sa1100_ir: fix build breakage
[deliverable/linux.git] / drivers / net / igb / igb_main.c
CommitLineData
9d5c8243
AK
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
86d5d38f 4 Copyright(c) 2007-2009 Intel Corporation.
9d5c8243
AK
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/vmalloc.h>
32#include <linux/pagemap.h>
33#include <linux/netdevice.h>
9d5c8243
AK
34#include <linux/ipv6.h>
35#include <net/checksum.h>
36#include <net/ip6_checksum.h>
c6cb090b 37#include <linux/net_tstamp.h>
9d5c8243
AK
38#include <linux/mii.h>
39#include <linux/ethtool.h>
40#include <linux/if_vlan.h>
41#include <linux/pci.h>
c54106bb 42#include <linux/pci-aspm.h>
9d5c8243
AK
43#include <linux/delay.h>
44#include <linux/interrupt.h>
45#include <linux/if_ether.h>
40a914fa 46#include <linux/aer.h>
421e02f0 47#ifdef CONFIG_IGB_DCA
fe4506b6
JC
48#include <linux/dca.h>
49#endif
9d5c8243
AK
50#include "igb.h"
51
86d5d38f 52#define DRV_VERSION "1.3.16-k2"
9d5c8243
AK
53char igb_driver_name[] = "igb";
54char igb_driver_version[] = DRV_VERSION;
55static const char igb_driver_string[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
86d5d38f 57static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
9d5c8243 58
9d5c8243
AK
59static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
61};
62
63static struct pci_device_id igb_pci_tbl[] = {
2d064c06 64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
9eb2341d 65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
2d064c06
AD
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
c8ea5ea9 68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
9d5c8243
AK
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
72 /* required last entry */
73 {0, }
74};
75
76MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
77
78void igb_reset(struct igb_adapter *);
79static int igb_setup_all_tx_resources(struct igb_adapter *);
80static int igb_setup_all_rx_resources(struct igb_adapter *);
81static void igb_free_all_tx_resources(struct igb_adapter *);
82static void igb_free_all_rx_resources(struct igb_adapter *);
9d5c8243
AK
83void igb_update_stats(struct igb_adapter *);
84static int igb_probe(struct pci_dev *, const struct pci_device_id *);
85static void __devexit igb_remove(struct pci_dev *pdev);
86static int igb_sw_init(struct igb_adapter *);
87static int igb_open(struct net_device *);
88static int igb_close(struct net_device *);
89static void igb_configure_tx(struct igb_adapter *);
90static void igb_configure_rx(struct igb_adapter *);
91static void igb_setup_rctl(struct igb_adapter *);
92static void igb_clean_all_tx_rings(struct igb_adapter *);
93static void igb_clean_all_rx_rings(struct igb_adapter *);
3b644cf6
MW
94static void igb_clean_tx_ring(struct igb_ring *);
95static void igb_clean_rx_ring(struct igb_ring *);
9d5c8243
AK
96static void igb_set_multi(struct net_device *);
97static void igb_update_phy_info(unsigned long);
98static void igb_watchdog(unsigned long);
99static void igb_watchdog_task(struct work_struct *);
100static int igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
101 struct igb_ring *);
102static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
103static struct net_device_stats *igb_get_stats(struct net_device *);
104static int igb_change_mtu(struct net_device *, int);
105static int igb_set_mac(struct net_device *, void *);
106static irqreturn_t igb_intr(int irq, void *);
107static irqreturn_t igb_intr_msi(int irq, void *);
108static irqreturn_t igb_msix_other(int irq, void *);
109static irqreturn_t igb_msix_rx(int irq, void *);
110static irqreturn_t igb_msix_tx(int irq, void *);
421e02f0 111#ifdef CONFIG_IGB_DCA
fe4506b6
JC
112static void igb_update_rx_dca(struct igb_ring *);
113static void igb_update_tx_dca(struct igb_ring *);
114static void igb_setup_dca(struct igb_adapter *);
421e02f0 115#endif /* CONFIG_IGB_DCA */
3b644cf6 116static bool igb_clean_tx_irq(struct igb_ring *);
661086df 117static int igb_poll(struct napi_struct *, int);
3b644cf6
MW
118static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
119static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
9d5c8243
AK
120static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
121static void igb_tx_timeout(struct net_device *);
122static void igb_reset_task(struct work_struct *);
123static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
124static void igb_vlan_rx_add_vid(struct net_device *, u16);
125static void igb_vlan_rx_kill_vid(struct net_device *, u16);
126static void igb_restore_vlan(struct igb_adapter *);
4ae196df
AD
127static void igb_ping_all_vfs(struct igb_adapter *);
128static void igb_msg_task(struct igb_adapter *);
129static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
e1739522
AD
130static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
131static void igb_set_mc_list_pools(struct igb_adapter *, int, u16);
4ae196df 132static void igb_vmm_control(struct igb_adapter *);
e1739522 133static inline void igb_set_vmolr(struct e1000_hw *, int);
4ae196df
AD
134static inline int igb_set_vf_rlpml(struct igb_adapter *, int, int);
135static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
136static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
9d5c8243 137
9d5c8243 138#ifdef CONFIG_PM
3fe7c4c9 139static int igb_suspend(struct pci_dev *, pm_message_t);
9d5c8243
AK
140static int igb_resume(struct pci_dev *);
141#endif
142static void igb_shutdown(struct pci_dev *);
421e02f0 143#ifdef CONFIG_IGB_DCA
fe4506b6
JC
144static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
145static struct notifier_block dca_notifier = {
146 .notifier_call = igb_notify_dca,
147 .next = NULL,
148 .priority = 0
149};
150#endif
9d5c8243
AK
151#ifdef CONFIG_NET_POLL_CONTROLLER
152/* for netdump / net console */
153static void igb_netpoll(struct net_device *);
154#endif
37680117 155#ifdef CONFIG_PCI_IOV
2a3abf6d
AD
156static unsigned int max_vfs = 0;
157module_param(max_vfs, uint, 0);
158MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
159 "per physical function");
160#endif /* CONFIG_PCI_IOV */
161
9d5c8243
AK
162static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
163 pci_channel_state_t);
164static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
165static void igb_io_resume(struct pci_dev *);
166
167static struct pci_error_handlers igb_err_handler = {
168 .error_detected = igb_io_error_detected,
169 .slot_reset = igb_io_slot_reset,
170 .resume = igb_io_resume,
171};
172
173
174static struct pci_driver igb_driver = {
175 .name = igb_driver_name,
176 .id_table = igb_pci_tbl,
177 .probe = igb_probe,
178 .remove = __devexit_p(igb_remove),
179#ifdef CONFIG_PM
180 /* Power Managment Hooks */
181 .suspend = igb_suspend,
182 .resume = igb_resume,
183#endif
184 .shutdown = igb_shutdown,
185 .err_handler = &igb_err_handler
186};
187
7dfc16fa
AD
188static int global_quad_port_a; /* global quad port a indication */
189
9d5c8243
AK
190MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
191MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
192MODULE_LICENSE("GPL");
193MODULE_VERSION(DRV_VERSION);
194
38c845c7
PO
195/**
196 * Scale the NIC clock cycle by a large factor so that
197 * relatively small clock corrections can be added or
198 * substracted at each clock tick. The drawbacks of a
199 * large factor are a) that the clock register overflows
200 * more quickly (not such a big deal) and b) that the
201 * increment per tick has to fit into 24 bits.
202 *
203 * Note that
204 * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
205 * IGB_TSYNC_SCALE
206 * TIMINCA += TIMINCA * adjustment [ppm] / 1e9
207 *
208 * The base scale factor is intentionally a power of two
209 * so that the division in %struct timecounter can be done with
210 * a shift.
211 */
212#define IGB_TSYNC_SHIFT (19)
213#define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
214
215/**
216 * The duration of one clock cycle of the NIC.
217 *
218 * @todo This hard-coded value is part of the specification and might change
219 * in future hardware revisions. Add revision check.
220 */
221#define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
222
223#if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
224# error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
225#endif
226
227/**
228 * igb_read_clock - read raw cycle counter (to be used by time counter)
229 */
230static cycle_t igb_read_clock(const struct cyclecounter *tc)
231{
232 struct igb_adapter *adapter =
233 container_of(tc, struct igb_adapter, cycles);
234 struct e1000_hw *hw = &adapter->hw;
235 u64 stamp;
236
237 stamp = rd32(E1000_SYSTIML);
238 stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL;
239
240 return stamp;
241}
242
9d5c8243
AK
243#ifdef DEBUG
244/**
245 * igb_get_hw_dev_name - return device name string
246 * used by hardware layer to print debugging information
247 **/
248char *igb_get_hw_dev_name(struct e1000_hw *hw)
249{
250 struct igb_adapter *adapter = hw->back;
251 return adapter->netdev->name;
252}
38c845c7
PO
253
254/**
255 * igb_get_time_str - format current NIC and system time as string
256 */
257static char *igb_get_time_str(struct igb_adapter *adapter,
258 char buffer[160])
259{
260 cycle_t hw = adapter->cycles.read(&adapter->cycles);
261 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
262 struct timespec sys;
263 struct timespec delta;
264 getnstimeofday(&sys);
265
266 delta = timespec_sub(nic, sys);
267
268 sprintf(buffer,
33af6bcc
PO
269 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
270 hw,
38c845c7
PO
271 (long)nic.tv_sec, nic.tv_nsec,
272 (long)sys.tv_sec, sys.tv_nsec,
273 (long)delta.tv_sec, delta.tv_nsec);
274
275 return buffer;
276}
9d5c8243
AK
277#endif
278
c493ea45
AD
279/**
280 * igb_desc_unused - calculate if we have unused descriptors
281 **/
282static int igb_desc_unused(struct igb_ring *ring)
283{
284 if (ring->next_to_clean > ring->next_to_use)
285 return ring->next_to_clean - ring->next_to_use - 1;
286
287 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
288}
289
9d5c8243
AK
290/**
291 * igb_init_module - Driver Registration Routine
292 *
293 * igb_init_module is the first routine called when the driver is
294 * loaded. All it does is register with the PCI subsystem.
295 **/
296static int __init igb_init_module(void)
297{
298 int ret;
299 printk(KERN_INFO "%s - version %s\n",
300 igb_driver_string, igb_driver_version);
301
302 printk(KERN_INFO "%s\n", igb_copyright);
303
7dfc16fa
AD
304 global_quad_port_a = 0;
305
421e02f0 306#ifdef CONFIG_IGB_DCA
fe4506b6
JC
307 dca_register_notify(&dca_notifier);
308#endif
bbd98fe4
AD
309
310 ret = pci_register_driver(&igb_driver);
9d5c8243
AK
311 return ret;
312}
313
314module_init(igb_init_module);
315
316/**
317 * igb_exit_module - Driver Exit Cleanup Routine
318 *
319 * igb_exit_module is called just before the driver is removed
320 * from memory.
321 **/
322static void __exit igb_exit_module(void)
323{
421e02f0 324#ifdef CONFIG_IGB_DCA
fe4506b6
JC
325 dca_unregister_notify(&dca_notifier);
326#endif
9d5c8243
AK
327 pci_unregister_driver(&igb_driver);
328}
329
330module_exit(igb_exit_module);
331
26bc19ec
AD
332#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
333/**
334 * igb_cache_ring_register - Descriptor ring to register mapping
335 * @adapter: board private structure to initialize
336 *
337 * Once we know the feature-set enabled for the device, we'll cache
338 * the register offset the descriptor ring is assigned to.
339 **/
340static void igb_cache_ring_register(struct igb_adapter *adapter)
341{
342 int i;
1bfaf07b 343 unsigned int rbase_offset = adapter->vfs_allocated_count;
26bc19ec
AD
344
345 switch (adapter->hw.mac.type) {
346 case e1000_82576:
347 /* The queues are allocated for virtualization such that VF 0
348 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
349 * In order to avoid collision we start at the first free queue
350 * and continue consuming queues in the same sequence
351 */
352 for (i = 0; i < adapter->num_rx_queues; i++)
1bfaf07b
AD
353 adapter->rx_ring[i].reg_idx = rbase_offset +
354 Q_IDX_82576(i);
26bc19ec 355 for (i = 0; i < adapter->num_tx_queues; i++)
1bfaf07b
AD
356 adapter->tx_ring[i].reg_idx = rbase_offset +
357 Q_IDX_82576(i);
26bc19ec
AD
358 break;
359 case e1000_82575:
360 default:
361 for (i = 0; i < adapter->num_rx_queues; i++)
362 adapter->rx_ring[i].reg_idx = i;
363 for (i = 0; i < adapter->num_tx_queues; i++)
364 adapter->tx_ring[i].reg_idx = i;
365 break;
366 }
367}
368
9d5c8243
AK
369/**
370 * igb_alloc_queues - Allocate memory for all rings
371 * @adapter: board private structure to initialize
372 *
373 * We allocate one ring per queue at run-time since we don't know the
374 * number of queues at compile-time.
375 **/
376static int igb_alloc_queues(struct igb_adapter *adapter)
377{
378 int i;
379
380 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
381 sizeof(struct igb_ring), GFP_KERNEL);
382 if (!adapter->tx_ring)
383 return -ENOMEM;
384
385 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
386 sizeof(struct igb_ring), GFP_KERNEL);
387 if (!adapter->rx_ring) {
388 kfree(adapter->tx_ring);
389 return -ENOMEM;
390 }
391
6eb5a7f1
AD
392 adapter->rx_ring->buddy = adapter->tx_ring;
393
661086df
PWJ
394 for (i = 0; i < adapter->num_tx_queues; i++) {
395 struct igb_ring *ring = &(adapter->tx_ring[i]);
68fd9910 396 ring->count = adapter->tx_ring_count;
661086df
PWJ
397 ring->adapter = adapter;
398 ring->queue_index = i;
399 }
9d5c8243
AK
400 for (i = 0; i < adapter->num_rx_queues; i++) {
401 struct igb_ring *ring = &(adapter->rx_ring[i]);
68fd9910 402 ring->count = adapter->rx_ring_count;
9d5c8243 403 ring->adapter = adapter;
844290e5 404 ring->queue_index = i;
9d5c8243
AK
405 ring->itr_register = E1000_ITR;
406
844290e5 407 /* set a default napi handler for each rx_ring */
661086df 408 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
9d5c8243 409 }
26bc19ec
AD
410
411 igb_cache_ring_register(adapter);
9d5c8243
AK
412 return 0;
413}
414
a88f10ec
AD
415static void igb_free_queues(struct igb_adapter *adapter)
416{
417 int i;
418
419 for (i = 0; i < adapter->num_rx_queues; i++)
420 netif_napi_del(&adapter->rx_ring[i].napi);
421
d1a8c9e1
AD
422 adapter->num_rx_queues = 0;
423 adapter->num_tx_queues = 0;
424
a88f10ec
AD
425 kfree(adapter->tx_ring);
426 kfree(adapter->rx_ring);
427}
428
9d5c8243
AK
429#define IGB_N0_QUEUE -1
430static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
431 int tx_queue, int msix_vector)
432{
433 u32 msixbm = 0;
434 struct e1000_hw *hw = &adapter->hw;
2d064c06
AD
435 u32 ivar, index;
436
437 switch (hw->mac.type) {
438 case e1000_82575:
9d5c8243
AK
439 /* The 82575 assigns vectors using a bitmask, which matches the
440 bitmask for the EICR/EIMS/EIMC registers. To assign one
441 or more queues to a vector, we write the appropriate bits
442 into the MSIXBM register for that vector. */
443 if (rx_queue > IGB_N0_QUEUE) {
444 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
445 adapter->rx_ring[rx_queue].eims_value = msixbm;
446 }
447 if (tx_queue > IGB_N0_QUEUE) {
448 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
449 adapter->tx_ring[tx_queue].eims_value =
450 E1000_EICR_TX_QUEUE0 << tx_queue;
451 }
452 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
2d064c06
AD
453 break;
454 case e1000_82576:
26bc19ec 455 /* 82576 uses a table-based method for assigning vectors.
2d064c06
AD
456 Each queue has a single entry in the table to which we write
457 a vector number along with a "valid" bit. Sadly, the layout
458 of the table is somewhat counterintuitive. */
459 if (rx_queue > IGB_N0_QUEUE) {
1bfaf07b 460 index = (rx_queue >> 1) + adapter->vfs_allocated_count;
2d064c06 461 ivar = array_rd32(E1000_IVAR0, index);
26bc19ec 462 if (rx_queue & 0x1) {
2d064c06
AD
463 /* vector goes into third byte of register */
464 ivar = ivar & 0xFF00FFFF;
465 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
26bc19ec
AD
466 } else {
467 /* vector goes into low byte of register */
468 ivar = ivar & 0xFFFFFF00;
469 ivar |= msix_vector | E1000_IVAR_VALID;
2d064c06
AD
470 }
471 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
472 array_wr32(E1000_IVAR0, index, ivar);
473 }
474 if (tx_queue > IGB_N0_QUEUE) {
1bfaf07b 475 index = (tx_queue >> 1) + adapter->vfs_allocated_count;
2d064c06 476 ivar = array_rd32(E1000_IVAR0, index);
26bc19ec 477 if (tx_queue & 0x1) {
2d064c06
AD
478 /* vector goes into high byte of register */
479 ivar = ivar & 0x00FFFFFF;
480 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
26bc19ec
AD
481 } else {
482 /* vector goes into second byte of register */
483 ivar = ivar & 0xFFFF00FF;
484 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
2d064c06
AD
485 }
486 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
487 array_wr32(E1000_IVAR0, index, ivar);
488 }
489 break;
490 default:
491 BUG();
492 break;
493 }
9d5c8243
AK
494}
495
496/**
497 * igb_configure_msix - Configure MSI-X hardware
498 *
499 * igb_configure_msix sets up the hardware to properly
500 * generate MSI-X interrupts.
501 **/
502static void igb_configure_msix(struct igb_adapter *adapter)
503{
504 u32 tmp;
505 int i, vector = 0;
506 struct e1000_hw *hw = &adapter->hw;
507
508 adapter->eims_enable_mask = 0;
2d064c06
AD
509 if (hw->mac.type == e1000_82576)
510 /* Turn on MSI-X capability first, or our settings
511 * won't stick. And it will take days to debug. */
512 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
eebbbdba 513 E1000_GPIE_PBA | E1000_GPIE_EIAME |
2d064c06 514 E1000_GPIE_NSICR);
9d5c8243
AK
515
516 for (i = 0; i < adapter->num_tx_queues; i++) {
517 struct igb_ring *tx_ring = &adapter->tx_ring[i];
518 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
519 adapter->eims_enable_mask |= tx_ring->eims_value;
520 if (tx_ring->itr_val)
6eb5a7f1 521 writel(tx_ring->itr_val,
9d5c8243
AK
522 hw->hw_addr + tx_ring->itr_register);
523 else
524 writel(1, hw->hw_addr + tx_ring->itr_register);
525 }
526
527 for (i = 0; i < adapter->num_rx_queues; i++) {
528 struct igb_ring *rx_ring = &adapter->rx_ring[i];
25ac3c24 529 rx_ring->buddy = NULL;
9d5c8243
AK
530 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
531 adapter->eims_enable_mask |= rx_ring->eims_value;
532 if (rx_ring->itr_val)
6eb5a7f1 533 writel(rx_ring->itr_val,
9d5c8243
AK
534 hw->hw_addr + rx_ring->itr_register);
535 else
536 writel(1, hw->hw_addr + rx_ring->itr_register);
537 }
538
539
540 /* set vector for other causes, i.e. link changes */
2d064c06
AD
541 switch (hw->mac.type) {
542 case e1000_82575:
9d5c8243
AK
543 array_wr32(E1000_MSIXBM(0), vector++,
544 E1000_EIMS_OTHER);
545
9d5c8243
AK
546 tmp = rd32(E1000_CTRL_EXT);
547 /* enable MSI-X PBA support*/
548 tmp |= E1000_CTRL_EXT_PBA_CLR;
549
550 /* Auto-Mask interrupts upon ICR read. */
551 tmp |= E1000_CTRL_EXT_EIAME;
552 tmp |= E1000_CTRL_EXT_IRCA;
553
554 wr32(E1000_CTRL_EXT, tmp);
555 adapter->eims_enable_mask |= E1000_EIMS_OTHER;
844290e5 556 adapter->eims_other = E1000_EIMS_OTHER;
9d5c8243 557
2d064c06
AD
558 break;
559
560 case e1000_82576:
561 tmp = (vector++ | E1000_IVAR_VALID) << 8;
562 wr32(E1000_IVAR_MISC, tmp);
563
564 adapter->eims_enable_mask = (1 << (vector)) - 1;
565 adapter->eims_other = 1 << (vector - 1);
566 break;
567 default:
568 /* do nothing, since nothing else supports MSI-X */
569 break;
570 } /* switch (hw->mac.type) */
9d5c8243
AK
571 wrfl();
572}
573
574/**
575 * igb_request_msix - Initialize MSI-X interrupts
576 *
577 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
578 * kernel.
579 **/
580static int igb_request_msix(struct igb_adapter *adapter)
581{
582 struct net_device *netdev = adapter->netdev;
583 int i, err = 0, vector = 0;
584
585 vector = 0;
586
587 for (i = 0; i < adapter->num_tx_queues; i++) {
588 struct igb_ring *ring = &(adapter->tx_ring[i]);
cb7b48f6 589 sprintf(ring->name, "%s-tx-%d", netdev->name, i);
9d5c8243
AK
590 err = request_irq(adapter->msix_entries[vector].vector,
591 &igb_msix_tx, 0, ring->name,
592 &(adapter->tx_ring[i]));
593 if (err)
594 goto out;
595 ring->itr_register = E1000_EITR(0) + (vector << 2);
6eb5a7f1 596 ring->itr_val = 976; /* ~4000 ints/sec */
9d5c8243
AK
597 vector++;
598 }
599 for (i = 0; i < adapter->num_rx_queues; i++) {
600 struct igb_ring *ring = &(adapter->rx_ring[i]);
601 if (strlen(netdev->name) < (IFNAMSIZ - 5))
cb7b48f6 602 sprintf(ring->name, "%s-rx-%d", netdev->name, i);
9d5c8243
AK
603 else
604 memcpy(ring->name, netdev->name, IFNAMSIZ);
605 err = request_irq(adapter->msix_entries[vector].vector,
606 &igb_msix_rx, 0, ring->name,
607 &(adapter->rx_ring[i]));
608 if (err)
609 goto out;
610 ring->itr_register = E1000_EITR(0) + (vector << 2);
611 ring->itr_val = adapter->itr;
612 vector++;
613 }
614
615 err = request_irq(adapter->msix_entries[vector].vector,
616 &igb_msix_other, 0, netdev->name, netdev);
617 if (err)
618 goto out;
619
9d5c8243
AK
620 igb_configure_msix(adapter);
621 return 0;
622out:
623 return err;
624}
625
626static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
627{
628 if (adapter->msix_entries) {
629 pci_disable_msix(adapter->pdev);
630 kfree(adapter->msix_entries);
631 adapter->msix_entries = NULL;
7dfc16fa 632 } else if (adapter->flags & IGB_FLAG_HAS_MSI)
9d5c8243
AK
633 pci_disable_msi(adapter->pdev);
634 return;
635}
636
637
638/**
639 * igb_set_interrupt_capability - set MSI or MSI-X if supported
640 *
641 * Attempt to configure interrupts using the best available
642 * capabilities of the hardware and kernel.
643 **/
644static void igb_set_interrupt_capability(struct igb_adapter *adapter)
645{
646 int err;
647 int numvecs, i;
648
83b7180d
AD
649 /* Number of supported queues. */
650 /* Having more queues than CPUs doesn't make sense. */
651 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
652 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
653
9d5c8243
AK
654 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
655 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
656 GFP_KERNEL);
657 if (!adapter->msix_entries)
658 goto msi_only;
659
660 for (i = 0; i < numvecs; i++)
661 adapter->msix_entries[i].entry = i;
662
663 err = pci_enable_msix(adapter->pdev,
664 adapter->msix_entries,
665 numvecs);
666 if (err == 0)
34a20e89 667 goto out;
9d5c8243
AK
668
669 igb_reset_interrupt_capability(adapter);
670
671 /* If we can't do MSI-X, try MSI */
672msi_only:
2a3abf6d
AD
673#ifdef CONFIG_PCI_IOV
674 /* disable SR-IOV for non MSI-X configurations */
675 if (adapter->vf_data) {
676 struct e1000_hw *hw = &adapter->hw;
677 /* disable iov and allow time for transactions to clear */
678 pci_disable_sriov(adapter->pdev);
679 msleep(500);
680
681 kfree(adapter->vf_data);
682 adapter->vf_data = NULL;
683 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
684 msleep(100);
685 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
686 }
687#endif
9d5c8243 688 adapter->num_rx_queues = 1;
661086df 689 adapter->num_tx_queues = 1;
9d5c8243 690 if (!pci_enable_msi(adapter->pdev))
7dfc16fa 691 adapter->flags |= IGB_FLAG_HAS_MSI;
34a20e89 692out:
661086df 693 /* Notify the stack of the (possibly) reduced Tx Queue count. */
fd2ea0a7 694 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
9d5c8243
AK
695 return;
696}
697
698/**
699 * igb_request_irq - initialize interrupts
700 *
701 * Attempts to configure interrupts using the best available
702 * capabilities of the hardware and kernel.
703 **/
704static int igb_request_irq(struct igb_adapter *adapter)
705{
706 struct net_device *netdev = adapter->netdev;
707 struct e1000_hw *hw = &adapter->hw;
708 int err = 0;
709
710 if (adapter->msix_entries) {
711 err = igb_request_msix(adapter);
844290e5 712 if (!err)
9d5c8243 713 goto request_done;
9d5c8243
AK
714 /* fall back to MSI */
715 igb_reset_interrupt_capability(adapter);
716 if (!pci_enable_msi(adapter->pdev))
7dfc16fa 717 adapter->flags |= IGB_FLAG_HAS_MSI;
9d5c8243
AK
718 igb_free_all_tx_resources(adapter);
719 igb_free_all_rx_resources(adapter);
720 adapter->num_rx_queues = 1;
721 igb_alloc_queues(adapter);
844290e5 722 } else {
2d064c06
AD
723 switch (hw->mac.type) {
724 case e1000_82575:
725 wr32(E1000_MSIXBM(0),
726 (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER));
727 break;
728 case e1000_82576:
729 wr32(E1000_IVAR0, E1000_IVAR_VALID);
730 break;
731 default:
732 break;
733 }
9d5c8243 734 }
844290e5 735
7dfc16fa 736 if (adapter->flags & IGB_FLAG_HAS_MSI) {
9d5c8243
AK
737 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
738 netdev->name, netdev);
739 if (!err)
740 goto request_done;
741 /* fall back to legacy interrupts */
742 igb_reset_interrupt_capability(adapter);
7dfc16fa 743 adapter->flags &= ~IGB_FLAG_HAS_MSI;
9d5c8243
AK
744 }
745
746 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
747 netdev->name, netdev);
748
6cb5e577 749 if (err)
9d5c8243
AK
750 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
751 err);
9d5c8243
AK
752
753request_done:
754 return err;
755}
756
757static void igb_free_irq(struct igb_adapter *adapter)
758{
759 struct net_device *netdev = adapter->netdev;
760
761 if (adapter->msix_entries) {
762 int vector = 0, i;
763
764 for (i = 0; i < adapter->num_tx_queues; i++)
765 free_irq(adapter->msix_entries[vector++].vector,
766 &(adapter->tx_ring[i]));
767 for (i = 0; i < adapter->num_rx_queues; i++)
768 free_irq(adapter->msix_entries[vector++].vector,
769 &(adapter->rx_ring[i]));
770
771 free_irq(adapter->msix_entries[vector++].vector, netdev);
772 return;
773 }
774
775 free_irq(adapter->pdev->irq, netdev);
776}
777
778/**
779 * igb_irq_disable - Mask off interrupt generation on the NIC
780 * @adapter: board private structure
781 **/
782static void igb_irq_disable(struct igb_adapter *adapter)
783{
784 struct e1000_hw *hw = &adapter->hw;
785
786 if (adapter->msix_entries) {
844290e5 787 wr32(E1000_EIAM, 0);
9d5c8243
AK
788 wr32(E1000_EIMC, ~0);
789 wr32(E1000_EIAC, 0);
790 }
844290e5
PW
791
792 wr32(E1000_IAM, 0);
9d5c8243
AK
793 wr32(E1000_IMC, ~0);
794 wrfl();
795 synchronize_irq(adapter->pdev->irq);
796}
797
798/**
799 * igb_irq_enable - Enable default interrupt generation settings
800 * @adapter: board private structure
801 **/
802static void igb_irq_enable(struct igb_adapter *adapter)
803{
804 struct e1000_hw *hw = &adapter->hw;
805
806 if (adapter->msix_entries) {
844290e5
PW
807 wr32(E1000_EIAC, adapter->eims_enable_mask);
808 wr32(E1000_EIAM, adapter->eims_enable_mask);
809 wr32(E1000_EIMS, adapter->eims_enable_mask);
4ae196df
AD
810 if (adapter->vfs_allocated_count)
811 wr32(E1000_MBVFIMR, 0xFF);
812 wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB |
813 E1000_IMS_DOUTSYNC));
844290e5
PW
814 } else {
815 wr32(E1000_IMS, IMS_ENABLE_MASK);
816 wr32(E1000_IAM, IMS_ENABLE_MASK);
817 }
9d5c8243
AK
818}
819
820static void igb_update_mng_vlan(struct igb_adapter *adapter)
821{
822 struct net_device *netdev = adapter->netdev;
823 u16 vid = adapter->hw.mng_cookie.vlan_id;
824 u16 old_vid = adapter->mng_vlan_id;
825 if (adapter->vlgrp) {
826 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
827 if (adapter->hw.mng_cookie.status &
828 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
829 igb_vlan_rx_add_vid(netdev, vid);
830 adapter->mng_vlan_id = vid;
831 } else
832 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
833
834 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
835 (vid != old_vid) &&
836 !vlan_group_get_device(adapter->vlgrp, old_vid))
837 igb_vlan_rx_kill_vid(netdev, old_vid);
838 } else
839 adapter->mng_vlan_id = vid;
840 }
841}
842
843/**
844 * igb_release_hw_control - release control of the h/w to f/w
845 * @adapter: address of board private structure
846 *
847 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
848 * For ASF and Pass Through versions of f/w this means that the
849 * driver is no longer loaded.
850 *
851 **/
852static void igb_release_hw_control(struct igb_adapter *adapter)
853{
854 struct e1000_hw *hw = &adapter->hw;
855 u32 ctrl_ext;
856
857 /* Let firmware take over control of h/w */
858 ctrl_ext = rd32(E1000_CTRL_EXT);
859 wr32(E1000_CTRL_EXT,
860 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
861}
862
863
864/**
865 * igb_get_hw_control - get control of the h/w from f/w
866 * @adapter: address of board private structure
867 *
868 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
869 * For ASF and Pass Through versions of f/w this means that
870 * the driver is loaded.
871 *
872 **/
873static void igb_get_hw_control(struct igb_adapter *adapter)
874{
875 struct e1000_hw *hw = &adapter->hw;
876 u32 ctrl_ext;
877
878 /* Let firmware know the driver has taken over */
879 ctrl_ext = rd32(E1000_CTRL_EXT);
880 wr32(E1000_CTRL_EXT,
881 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
882}
883
9d5c8243
AK
884/**
885 * igb_configure - configure the hardware for RX and TX
886 * @adapter: private board structure
887 **/
888static void igb_configure(struct igb_adapter *adapter)
889{
890 struct net_device *netdev = adapter->netdev;
891 int i;
892
893 igb_get_hw_control(adapter);
894 igb_set_multi(netdev);
895
896 igb_restore_vlan(adapter);
9d5c8243
AK
897
898 igb_configure_tx(adapter);
899 igb_setup_rctl(adapter);
900 igb_configure_rx(adapter);
662d7205
AD
901
902 igb_rx_fifo_flush_82575(&adapter->hw);
903
c493ea45 904 /* call igb_desc_unused which always leaves
9d5c8243
AK
905 * at least 1 descriptor unused to make sure
906 * next_to_use != next_to_clean */
907 for (i = 0; i < adapter->num_rx_queues; i++) {
908 struct igb_ring *ring = &adapter->rx_ring[i];
c493ea45 909 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
9d5c8243
AK
910 }
911
912
913 adapter->tx_queue_len = netdev->tx_queue_len;
914}
915
916
917/**
918 * igb_up - Open the interface and prepare it to handle traffic
919 * @adapter: board private structure
920 **/
921
922int igb_up(struct igb_adapter *adapter)
923{
924 struct e1000_hw *hw = &adapter->hw;
925 int i;
926
927 /* hardware has been reset, we need to reload some things */
928 igb_configure(adapter);
929
930 clear_bit(__IGB_DOWN, &adapter->state);
931
844290e5
PW
932 for (i = 0; i < adapter->num_rx_queues; i++)
933 napi_enable(&adapter->rx_ring[i].napi);
934 if (adapter->msix_entries)
9d5c8243 935 igb_configure_msix(adapter);
9d5c8243 936
4ae196df 937 igb_vmm_control(adapter);
e1739522
AD
938 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
939 igb_set_vmolr(hw, adapter->vfs_allocated_count);
940
9d5c8243
AK
941 /* Clear any pending interrupts. */
942 rd32(E1000_ICR);
943 igb_irq_enable(adapter);
944
945 /* Fire a link change interrupt to start the watchdog. */
946 wr32(E1000_ICS, E1000_ICS_LSC);
947 return 0;
948}
949
950void igb_down(struct igb_adapter *adapter)
951{
952 struct e1000_hw *hw = &adapter->hw;
953 struct net_device *netdev = adapter->netdev;
954 u32 tctl, rctl;
955 int i;
956
957 /* signal that we're down so the interrupt handler does not
958 * reschedule our watchdog timer */
959 set_bit(__IGB_DOWN, &adapter->state);
960
961 /* disable receives in the hardware */
962 rctl = rd32(E1000_RCTL);
963 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
964 /* flush and sleep below */
965
fd2ea0a7 966 netif_tx_stop_all_queues(netdev);
9d5c8243
AK
967
968 /* disable transmits in the hardware */
969 tctl = rd32(E1000_TCTL);
970 tctl &= ~E1000_TCTL_EN;
971 wr32(E1000_TCTL, tctl);
972 /* flush both disables and wait for them to finish */
973 wrfl();
974 msleep(10);
975
844290e5
PW
976 for (i = 0; i < adapter->num_rx_queues; i++)
977 napi_disable(&adapter->rx_ring[i].napi);
9d5c8243 978
9d5c8243
AK
979 igb_irq_disable(adapter);
980
981 del_timer_sync(&adapter->watchdog_timer);
982 del_timer_sync(&adapter->phy_info_timer);
983
984 netdev->tx_queue_len = adapter->tx_queue_len;
985 netif_carrier_off(netdev);
04fe6358
AD
986
987 /* record the stats before reset*/
988 igb_update_stats(adapter);
989
9d5c8243
AK
990 adapter->link_speed = 0;
991 adapter->link_duplex = 0;
992
3023682e
JK
993 if (!pci_channel_offline(adapter->pdev))
994 igb_reset(adapter);
9d5c8243
AK
995 igb_clean_all_tx_rings(adapter);
996 igb_clean_all_rx_rings(adapter);
997}
998
999void igb_reinit_locked(struct igb_adapter *adapter)
1000{
1001 WARN_ON(in_interrupt());
1002 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1003 msleep(1);
1004 igb_down(adapter);
1005 igb_up(adapter);
1006 clear_bit(__IGB_RESETTING, &adapter->state);
1007}
1008
1009void igb_reset(struct igb_adapter *adapter)
1010{
1011 struct e1000_hw *hw = &adapter->hw;
2d064c06
AD
1012 struct e1000_mac_info *mac = &hw->mac;
1013 struct e1000_fc_info *fc = &hw->fc;
9d5c8243
AK
1014 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1015 u16 hwm;
1016
1017 /* Repartition Pba for greater than 9k mtu
1018 * To take effect CTRL.RST is required.
1019 */
fa4dfae0
AD
1020 switch (mac->type) {
1021 case e1000_82576:
2d064c06 1022 pba = E1000_PBA_64K;
fa4dfae0
AD
1023 break;
1024 case e1000_82575:
1025 default:
1026 pba = E1000_PBA_34K;
1027 break;
2d064c06 1028 }
9d5c8243 1029
2d064c06
AD
1030 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1031 (mac->type < e1000_82576)) {
9d5c8243
AK
1032 /* adjust PBA for jumbo frames */
1033 wr32(E1000_PBA, pba);
1034
1035 /* To maintain wire speed transmits, the Tx FIFO should be
1036 * large enough to accommodate two full transmit packets,
1037 * rounded up to the next 1KB and expressed in KB. Likewise,
1038 * the Rx FIFO should be large enough to accommodate at least
1039 * one full receive packet and is similarly rounded up and
1040 * expressed in KB. */
1041 pba = rd32(E1000_PBA);
1042 /* upper 16 bits has Tx packet buffer allocation size in KB */
1043 tx_space = pba >> 16;
1044 /* lower 16 bits has Rx packet buffer allocation size in KB */
1045 pba &= 0xffff;
1046 /* the tx fifo also stores 16 bytes of information about the tx
1047 * but don't include ethernet FCS because hardware appends it */
1048 min_tx_space = (adapter->max_frame_size +
85e8d004 1049 sizeof(union e1000_adv_tx_desc) -
9d5c8243
AK
1050 ETH_FCS_LEN) * 2;
1051 min_tx_space = ALIGN(min_tx_space, 1024);
1052 min_tx_space >>= 10;
1053 /* software strips receive CRC, so leave room for it */
1054 min_rx_space = adapter->max_frame_size;
1055 min_rx_space = ALIGN(min_rx_space, 1024);
1056 min_rx_space >>= 10;
1057
1058 /* If current Tx allocation is less than the min Tx FIFO size,
1059 * and the min Tx FIFO size is less than the current Rx FIFO
1060 * allocation, take space away from current Rx allocation */
1061 if (tx_space < min_tx_space &&
1062 ((min_tx_space - tx_space) < pba)) {
1063 pba = pba - (min_tx_space - tx_space);
1064
1065 /* if short on rx space, rx wins and must trump tx
1066 * adjustment */
1067 if (pba < min_rx_space)
1068 pba = min_rx_space;
1069 }
2d064c06 1070 wr32(E1000_PBA, pba);
9d5c8243 1071 }
9d5c8243
AK
1072
1073 /* flow control settings */
1074 /* The high water mark must be low enough to fit one full frame
1075 * (or the size used for early receive) above it in the Rx FIFO.
1076 * Set it to the lower of:
1077 * - 90% of the Rx FIFO size, or
1078 * - the full Rx FIFO size minus one full frame */
1079 hwm = min(((pba << 10) * 9 / 10),
2d064c06 1080 ((pba << 10) - 2 * adapter->max_frame_size));
9d5c8243 1081
2d064c06
AD
1082 if (mac->type < e1000_82576) {
1083 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
1084 fc->low_water = fc->high_water - 8;
1085 } else {
1086 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1087 fc->low_water = fc->high_water - 16;
1088 }
9d5c8243
AK
1089 fc->pause_time = 0xFFFF;
1090 fc->send_xon = 1;
1091 fc->type = fc->original_type;
1092
4ae196df
AD
1093 /* disable receive for all VFs and wait one second */
1094 if (adapter->vfs_allocated_count) {
1095 int i;
1096 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1097 adapter->vf_data[i].clear_to_send = false;
1098
1099 /* ping all the active vfs to let them know we are going down */
1100 igb_ping_all_vfs(adapter);
1101
1102 /* disable transmits and receives */
1103 wr32(E1000_VFRE, 0);
1104 wr32(E1000_VFTE, 0);
1105 }
1106
9d5c8243
AK
1107 /* Allow time for pending master requests to run */
1108 adapter->hw.mac.ops.reset_hw(&adapter->hw);
1109 wr32(E1000_WUC, 0);
1110
1111 if (adapter->hw.mac.ops.init_hw(&adapter->hw))
1112 dev_err(&adapter->pdev->dev, "Hardware Error\n");
1113
1114 igb_update_mng_vlan(adapter);
1115
1116 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1117 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1118
1119 igb_reset_adaptive(&adapter->hw);
f5f4cf08 1120 igb_get_phy_info(&adapter->hw);
9d5c8243
AK
1121}
1122
2e5c6922
SH
1123static const struct net_device_ops igb_netdev_ops = {
1124 .ndo_open = igb_open,
1125 .ndo_stop = igb_close,
00829823 1126 .ndo_start_xmit = igb_xmit_frame_adv,
2e5c6922
SH
1127 .ndo_get_stats = igb_get_stats,
1128 .ndo_set_multicast_list = igb_set_multi,
1129 .ndo_set_mac_address = igb_set_mac,
1130 .ndo_change_mtu = igb_change_mtu,
1131 .ndo_do_ioctl = igb_ioctl,
1132 .ndo_tx_timeout = igb_tx_timeout,
1133 .ndo_validate_addr = eth_validate_addr,
1134 .ndo_vlan_rx_register = igb_vlan_rx_register,
1135 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1136 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1137#ifdef CONFIG_NET_POLL_CONTROLLER
1138 .ndo_poll_controller = igb_netpoll,
1139#endif
1140};
1141
9d5c8243
AK
1142/**
1143 * igb_probe - Device Initialization Routine
1144 * @pdev: PCI device information struct
1145 * @ent: entry in igb_pci_tbl
1146 *
1147 * Returns 0 on success, negative on failure
1148 *
1149 * igb_probe initializes an adapter identified by a pci_dev structure.
1150 * The OS initialization, configuring of the adapter private structure,
1151 * and a hardware reset occur.
1152 **/
1153static int __devinit igb_probe(struct pci_dev *pdev,
1154 const struct pci_device_id *ent)
1155{
1156 struct net_device *netdev;
1157 struct igb_adapter *adapter;
1158 struct e1000_hw *hw;
1159 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1160 unsigned long mmio_start, mmio_len;
2d6a5e95 1161 int err, pci_using_dac;
682337fe 1162 u16 eeprom_data = 0;
9d5c8243
AK
1163 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1164 u32 part_num;
1165
aed5dec3 1166 err = pci_enable_device_mem(pdev);
9d5c8243
AK
1167 if (err)
1168 return err;
1169
1170 pci_using_dac = 0;
6a35528a 1171 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9d5c8243 1172 if (!err) {
6a35528a 1173 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
9d5c8243
AK
1174 if (!err)
1175 pci_using_dac = 1;
1176 } else {
284901a9 1177 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9d5c8243 1178 if (err) {
284901a9 1179 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
9d5c8243
AK
1180 if (err) {
1181 dev_err(&pdev->dev, "No usable DMA "
1182 "configuration, aborting\n");
1183 goto err_dma;
1184 }
1185 }
1186 }
1187
aed5dec3
AD
1188 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1189 IORESOURCE_MEM),
1190 igb_driver_name);
9d5c8243
AK
1191 if (err)
1192 goto err_pci_reg;
1193
ea943d41
JK
1194 err = pci_enable_pcie_error_reporting(pdev);
1195 if (err) {
1196 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
1197 "0x%x\n", err);
1198 /* non-fatal, continue */
1199 }
40a914fa 1200
9d5c8243 1201 pci_set_master(pdev);
c682fc23 1202 pci_save_state(pdev);
9d5c8243
AK
1203
1204 err = -ENOMEM;
1bfaf07b
AD
1205 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1206 IGB_ABS_MAX_TX_QUEUES);
9d5c8243
AK
1207 if (!netdev)
1208 goto err_alloc_etherdev;
1209
1210 SET_NETDEV_DEV(netdev, &pdev->dev);
1211
1212 pci_set_drvdata(pdev, netdev);
1213 adapter = netdev_priv(netdev);
1214 adapter->netdev = netdev;
1215 adapter->pdev = pdev;
1216 hw = &adapter->hw;
1217 hw->back = adapter;
1218 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1219
1220 mmio_start = pci_resource_start(pdev, 0);
1221 mmio_len = pci_resource_len(pdev, 0);
1222
1223 err = -EIO;
28b0759c
AD
1224 hw->hw_addr = ioremap(mmio_start, mmio_len);
1225 if (!hw->hw_addr)
9d5c8243
AK
1226 goto err_ioremap;
1227
2e5c6922 1228 netdev->netdev_ops = &igb_netdev_ops;
9d5c8243 1229 igb_set_ethtool_ops(netdev);
9d5c8243 1230 netdev->watchdog_timeo = 5 * HZ;
9d5c8243
AK
1231
1232 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1233
1234 netdev->mem_start = mmio_start;
1235 netdev->mem_end = mmio_start + mmio_len;
1236
9d5c8243
AK
1237 /* PCI config space info */
1238 hw->vendor_id = pdev->vendor;
1239 hw->device_id = pdev->device;
1240 hw->revision_id = pdev->revision;
1241 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1242 hw->subsystem_device_id = pdev->subsystem_device;
1243
1244 /* setup the private structure */
1245 hw->back = adapter;
1246 /* Copy the default MAC, PHY and NVM function pointers */
1247 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1248 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1249 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1250 /* Initialize skew-specific constants */
1251 err = ei->get_invariants(hw);
1252 if (err)
450c87c8 1253 goto err_sw_init;
9d5c8243 1254
2a3abf6d
AD
1255#ifdef CONFIG_PCI_IOV
1256 /* since iov functionality isn't critical to base device function we
1257 * can accept failure. If it fails we don't allow iov to be enabled */
1258 if (hw->mac.type == e1000_82576) {
1259 /* 82576 supports a maximum of 7 VFs in addition to the PF */
1260 unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs;
1261 int i;
1262 unsigned char mac_addr[ETH_ALEN];
1263
9ca046d5 1264 if (num_vfs) {
2a3abf6d
AD
1265 adapter->vf_data = kcalloc(num_vfs,
1266 sizeof(struct vf_data_storage),
1267 GFP_KERNEL);
9ca046d5
AD
1268 if (!adapter->vf_data) {
1269 dev_err(&pdev->dev,
1270 "Could not allocate VF private data - "
1271 "IOV enable failed\n");
2a3abf6d 1272 } else {
9ca046d5
AD
1273 err = pci_enable_sriov(pdev, num_vfs);
1274 if (!err) {
1275 adapter->vfs_allocated_count = num_vfs;
1276 dev_info(&pdev->dev,
1277 "%d vfs allocated\n",
1278 num_vfs);
1279 for (i = 0;
1280 i < adapter->vfs_allocated_count;
1281 i++) {
1282 random_ether_addr(mac_addr);
1283 igb_set_vf_mac(adapter, i,
1284 mac_addr);
1285 }
1286 } else {
1287 kfree(adapter->vf_data);
1288 adapter->vf_data = NULL;
1289 }
2a3abf6d
AD
1290 }
1291 }
1292 }
1293
1294#endif
450c87c8 1295 /* setup the private structure */
9d5c8243
AK
1296 err = igb_sw_init(adapter);
1297 if (err)
1298 goto err_sw_init;
1299
1300 igb_get_bus_info_pcie(hw);
1301
7dfc16fa
AD
1302 /* set flags */
1303 switch (hw->mac.type) {
7dfc16fa 1304 case e1000_82575:
7dfc16fa
AD
1305 adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
1306 break;
bbd98fe4 1307 case e1000_82576:
7dfc16fa
AD
1308 default:
1309 break;
1310 }
1311
9d5c8243
AK
1312 hw->phy.autoneg_wait_to_complete = false;
1313 hw->mac.adaptive_ifs = true;
1314
1315 /* Copper options */
1316 if (hw->phy.media_type == e1000_media_type_copper) {
1317 hw->phy.mdix = AUTO_ALL_MODES;
1318 hw->phy.disable_polarity_correction = false;
1319 hw->phy.ms_type = e1000_ms_hw_default;
1320 }
1321
1322 if (igb_check_reset_block(hw))
1323 dev_info(&pdev->dev,
1324 "PHY reset is blocked due to SOL/IDER session.\n");
1325
1326 netdev->features = NETIF_F_SG |
7d8eb29e 1327 NETIF_F_IP_CSUM |
9d5c8243
AK
1328 NETIF_F_HW_VLAN_TX |
1329 NETIF_F_HW_VLAN_RX |
1330 NETIF_F_HW_VLAN_FILTER;
1331
7d8eb29e 1332 netdev->features |= NETIF_F_IPV6_CSUM;
9d5c8243 1333 netdev->features |= NETIF_F_TSO;
9d5c8243 1334 netdev->features |= NETIF_F_TSO6;
48f29ffc 1335
5c0999b7 1336 netdev->features |= NETIF_F_GRO;
d3352520 1337
48f29ffc
JK
1338 netdev->vlan_features |= NETIF_F_TSO;
1339 netdev->vlan_features |= NETIF_F_TSO6;
7d8eb29e 1340 netdev->vlan_features |= NETIF_F_IP_CSUM;
48f29ffc
JK
1341 netdev->vlan_features |= NETIF_F_SG;
1342
9d5c8243
AK
1343 if (pci_using_dac)
1344 netdev->features |= NETIF_F_HIGHDMA;
1345
9d5c8243
AK
1346 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1347
1348 /* before reading the NVM, reset the controller to put the device in a
1349 * known good starting state */
1350 hw->mac.ops.reset_hw(hw);
1351
1352 /* make sure the NVM is good */
1353 if (igb_validate_nvm_checksum(hw) < 0) {
1354 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1355 err = -EIO;
1356 goto err_eeprom;
1357 }
1358
1359 /* copy the MAC address out of the NVM */
1360 if (hw->mac.ops.read_mac_addr(hw))
1361 dev_err(&pdev->dev, "NVM Read Error\n");
1362
1363 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1364 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1365
1366 if (!is_valid_ether_addr(netdev->perm_addr)) {
1367 dev_err(&pdev->dev, "Invalid MAC Address\n");
1368 err = -EIO;
1369 goto err_eeprom;
1370 }
1371
0e340485
AD
1372 setup_timer(&adapter->watchdog_timer, &igb_watchdog,
1373 (unsigned long) adapter);
1374 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
1375 (unsigned long) adapter);
9d5c8243
AK
1376
1377 INIT_WORK(&adapter->reset_task, igb_reset_task);
1378 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1379
450c87c8 1380 /* Initialize link properties that are user-changeable */
9d5c8243
AK
1381 adapter->fc_autoneg = true;
1382 hw->mac.autoneg = true;
1383 hw->phy.autoneg_advertised = 0x2f;
1384
1385 hw->fc.original_type = e1000_fc_default;
1386 hw->fc.type = e1000_fc_default;
1387
cbd347ad 1388 adapter->itr_setting = IGB_DEFAULT_ITR;
9d5c8243
AK
1389 adapter->itr = IGB_START_ITR;
1390
1391 igb_validate_mdi_setting(hw);
1392
1393 adapter->rx_csum = 1;
1394
1395 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1396 * enable the ACPI Magic Packet filter
1397 */
1398
a2cf8b6c 1399 if (hw->bus.func == 0)
312c75ae 1400 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
a2cf8b6c
AD
1401 else if (hw->bus.func == 1)
1402 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
9d5c8243
AK
1403
1404 if (eeprom_data & eeprom_apme_mask)
1405 adapter->eeprom_wol |= E1000_WUFC_MAG;
1406
1407 /* now that we have the eeprom settings, apply the special cases where
1408 * the eeprom may be wrong or the board simply won't support wake on
1409 * lan on a particular port */
1410 switch (pdev->device) {
1411 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1412 adapter->eeprom_wol = 0;
1413 break;
1414 case E1000_DEV_ID_82575EB_FIBER_SERDES:
2d064c06
AD
1415 case E1000_DEV_ID_82576_FIBER:
1416 case E1000_DEV_ID_82576_SERDES:
9d5c8243
AK
1417 /* Wake events only supported on port A for dual fiber
1418 * regardless of eeprom setting */
1419 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1420 adapter->eeprom_wol = 0;
1421 break;
c8ea5ea9
AD
1422 case E1000_DEV_ID_82576_QUAD_COPPER:
1423 /* if quad port adapter, disable WoL on all but port A */
1424 if (global_quad_port_a != 0)
1425 adapter->eeprom_wol = 0;
1426 else
1427 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1428 /* Reset for multiple quad port adapters */
1429 if (++global_quad_port_a == 4)
1430 global_quad_port_a = 0;
1431 break;
9d5c8243
AK
1432 }
1433
1434 /* initialize the wol settings based on the eeprom settings */
1435 adapter->wol = adapter->eeprom_wol;
e1b86d84 1436 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
9d5c8243
AK
1437
1438 /* reset the hardware with the new settings */
1439 igb_reset(adapter);
1440
1441 /* let the f/w know that the h/w is now under the control of the
1442 * driver. */
1443 igb_get_hw_control(adapter);
1444
9d5c8243
AK
1445 strcpy(netdev->name, "eth%d");
1446 err = register_netdev(netdev);
1447 if (err)
1448 goto err_register;
1449
b168dfc5
JB
1450 /* carrier off reporting is important to ethtool even BEFORE open */
1451 netif_carrier_off(netdev);
1452
421e02f0 1453#ifdef CONFIG_IGB_DCA
bbd98fe4 1454 if (dca_add_requester(&pdev->dev) == 0) {
7dfc16fa 1455 adapter->flags |= IGB_FLAG_DCA_ENABLED;
fe4506b6
JC
1456 dev_info(&pdev->dev, "DCA enabled\n");
1457 /* Always use CB2 mode, difference is masked
1458 * in the CB driver. */
cbd347ad 1459 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
fe4506b6
JC
1460 igb_setup_dca(adapter);
1461 }
1462#endif
1463
38c845c7
PO
1464 /*
1465 * Initialize hardware timer: we keep it running just in case
1466 * that some program needs it later on.
1467 */
1468 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1469 adapter->cycles.read = igb_read_clock;
1470 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1471 adapter->cycles.mult = 1;
1472 adapter->cycles.shift = IGB_TSYNC_SHIFT;
1473 wr32(E1000_TIMINCA,
1474 (1<<24) |
1475 IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
1476#if 0
1477 /*
1478 * Avoid rollover while we initialize by resetting the time counter.
1479 */
1480 wr32(E1000_SYSTIML, 0x00000000);
1481 wr32(E1000_SYSTIMH, 0x00000000);
1482#else
1483 /*
1484 * Set registers so that rollover occurs soon to test this.
1485 */
1486 wr32(E1000_SYSTIML, 0x00000000);
1487 wr32(E1000_SYSTIMH, 0xFF800000);
1488#endif
1489 wrfl();
1490 timecounter_init(&adapter->clock,
1491 &adapter->cycles,
1492 ktime_to_ns(ktime_get_real()));
1493
33af6bcc
PO
1494 /*
1495 * Synchronize our NIC clock against system wall clock. NIC
1496 * time stamp reading requires ~3us per sample, each sample
1497 * was pretty stable even under load => only require 10
1498 * samples for each offset comparison.
1499 */
1500 memset(&adapter->compare, 0, sizeof(adapter->compare));
1501 adapter->compare.source = &adapter->clock;
1502 adapter->compare.target = ktime_get_real;
1503 adapter->compare.num_samples = 10;
1504 timecompare_update(&adapter->compare, 0);
1505
38c845c7
PO
1506#ifdef DEBUG
1507 {
1508 char buffer[160];
1509 printk(KERN_DEBUG
1510 "igb: %s: hw %p initialized timer\n",
1511 igb_get_time_str(adapter, buffer),
1512 &adapter->hw);
1513 }
1514#endif
1515
9d5c8243
AK
1516 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1517 /* print bus type/speed/width info */
7c510e4b 1518 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
9d5c8243
AK
1519 netdev->name,
1520 ((hw->bus.speed == e1000_bus_speed_2500)
1521 ? "2.5Gb/s" : "unknown"),
59c3de89
AD
1522 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1523 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1524 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1525 "unknown"),
7c510e4b 1526 netdev->dev_addr);
9d5c8243
AK
1527
1528 igb_read_part_num(hw, &part_num);
1529 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1530 (part_num >> 8), (part_num & 0xff));
1531
1532 dev_info(&pdev->dev,
1533 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1534 adapter->msix_entries ? "MSI-X" :
7dfc16fa 1535 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
9d5c8243
AK
1536 adapter->num_rx_queues, adapter->num_tx_queues);
1537
9d5c8243
AK
1538 return 0;
1539
1540err_register:
1541 igb_release_hw_control(adapter);
1542err_eeprom:
1543 if (!igb_check_reset_block(hw))
f5f4cf08 1544 igb_reset_phy(hw);
9d5c8243
AK
1545
1546 if (hw->flash_address)
1547 iounmap(hw->flash_address);
1548
a88f10ec 1549 igb_free_queues(adapter);
9d5c8243 1550err_sw_init:
9d5c8243
AK
1551 iounmap(hw->hw_addr);
1552err_ioremap:
1553 free_netdev(netdev);
1554err_alloc_etherdev:
aed5dec3
AD
1555 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1556 IORESOURCE_MEM));
9d5c8243
AK
1557err_pci_reg:
1558err_dma:
1559 pci_disable_device(pdev);
1560 return err;
1561}
1562
1563/**
1564 * igb_remove - Device Removal Routine
1565 * @pdev: PCI device information struct
1566 *
1567 * igb_remove is called by the PCI subsystem to alert the driver
1568 * that it should release a PCI device. The could be caused by a
1569 * Hot-Plug event, or because the driver is going to be removed from
1570 * memory.
1571 **/
1572static void __devexit igb_remove(struct pci_dev *pdev)
1573{
1574 struct net_device *netdev = pci_get_drvdata(pdev);
1575 struct igb_adapter *adapter = netdev_priv(netdev);
fe4506b6 1576 struct e1000_hw *hw = &adapter->hw;
ea943d41 1577 int err;
9d5c8243
AK
1578
1579 /* flush_scheduled work may reschedule our watchdog task, so
1580 * explicitly disable watchdog tasks from being rescheduled */
1581 set_bit(__IGB_DOWN, &adapter->state);
1582 del_timer_sync(&adapter->watchdog_timer);
1583 del_timer_sync(&adapter->phy_info_timer);
1584
1585 flush_scheduled_work();
1586
421e02f0 1587#ifdef CONFIG_IGB_DCA
7dfc16fa 1588 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
fe4506b6
JC
1589 dev_info(&pdev->dev, "DCA disabled\n");
1590 dca_remove_requester(&pdev->dev);
7dfc16fa 1591 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
cbd347ad 1592 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
fe4506b6
JC
1593 }
1594#endif
1595
9d5c8243
AK
1596 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1597 * would have already happened in close and is redundant. */
1598 igb_release_hw_control(adapter);
1599
1600 unregister_netdev(netdev);
1601
f5f4cf08
AD
1602 if (!igb_check_reset_block(&adapter->hw))
1603 igb_reset_phy(&adapter->hw);
9d5c8243 1604
9d5c8243
AK
1605 igb_reset_interrupt_capability(adapter);
1606
a88f10ec 1607 igb_free_queues(adapter);
9d5c8243 1608
37680117
AD
1609#ifdef CONFIG_PCI_IOV
1610 /* reclaim resources allocated to VFs */
1611 if (adapter->vf_data) {
1612 /* disable iov and allow time for transactions to clear */
1613 pci_disable_sriov(pdev);
1614 msleep(500);
1615
1616 kfree(adapter->vf_data);
1617 adapter->vf_data = NULL;
1618 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1619 msleep(100);
1620 dev_info(&pdev->dev, "IOV Disabled\n");
1621 }
1622#endif
28b0759c
AD
1623 iounmap(hw->hw_addr);
1624 if (hw->flash_address)
1625 iounmap(hw->flash_address);
aed5dec3
AD
1626 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1627 IORESOURCE_MEM));
9d5c8243
AK
1628
1629 free_netdev(netdev);
1630
ea943d41
JK
1631 err = pci_disable_pcie_error_reporting(pdev);
1632 if (err)
1633 dev_err(&pdev->dev,
1634 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
40a914fa 1635
9d5c8243
AK
1636 pci_disable_device(pdev);
1637}
1638
1639/**
1640 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1641 * @adapter: board private structure to initialize
1642 *
1643 * igb_sw_init initializes the Adapter private data structure.
1644 * Fields are initialized based on PCI device information and
1645 * OS network device settings (MTU size).
1646 **/
1647static int __devinit igb_sw_init(struct igb_adapter *adapter)
1648{
1649 struct e1000_hw *hw = &adapter->hw;
1650 struct net_device *netdev = adapter->netdev;
1651 struct pci_dev *pdev = adapter->pdev;
1652
1653 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1654
68fd9910
AD
1655 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1656 adapter->rx_ring_count = IGB_DEFAULT_RXD;
9d5c8243
AK
1657 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1658 adapter->rx_ps_hdr_size = 0; /* disable packet split */
1659 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1660 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1661
661086df
PWJ
1662 /* This call may decrease the number of queues depending on
1663 * interrupt mode. */
9d5c8243
AK
1664 igb_set_interrupt_capability(adapter);
1665
1666 if (igb_alloc_queues(adapter)) {
1667 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1668 return -ENOMEM;
1669 }
1670
1671 /* Explicitly disable IRQ since the NIC can be in any state. */
1672 igb_irq_disable(adapter);
1673
1674 set_bit(__IGB_DOWN, &adapter->state);
1675 return 0;
1676}
1677
1678/**
1679 * igb_open - Called when a network interface is made active
1680 * @netdev: network interface device structure
1681 *
1682 * Returns 0 on success, negative value on failure
1683 *
1684 * The open entry point is called when a network interface is made
1685 * active by the system (IFF_UP). At this point all resources needed
1686 * for transmit and receive operations are allocated, the interrupt
1687 * handler is registered with the OS, the watchdog timer is started,
1688 * and the stack is notified that the interface is ready.
1689 **/
1690static int igb_open(struct net_device *netdev)
1691{
1692 struct igb_adapter *adapter = netdev_priv(netdev);
1693 struct e1000_hw *hw = &adapter->hw;
1694 int err;
1695 int i;
1696
1697 /* disallow open during test */
1698 if (test_bit(__IGB_TESTING, &adapter->state))
1699 return -EBUSY;
1700
b168dfc5
JB
1701 netif_carrier_off(netdev);
1702
9d5c8243
AK
1703 /* allocate transmit descriptors */
1704 err = igb_setup_all_tx_resources(adapter);
1705 if (err)
1706 goto err_setup_tx;
1707
1708 /* allocate receive descriptors */
1709 err = igb_setup_all_rx_resources(adapter);
1710 if (err)
1711 goto err_setup_rx;
1712
1713 /* e1000_power_up_phy(adapter); */
1714
1715 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1716 if ((adapter->hw.mng_cookie.status &
1717 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1718 igb_update_mng_vlan(adapter);
1719
1720 /* before we allocate an interrupt, we must be ready to handle it.
1721 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1722 * as soon as we call pci_request_irq, so we have to setup our
1723 * clean_rx handler before we do so. */
1724 igb_configure(adapter);
1725
4ae196df 1726 igb_vmm_control(adapter);
e1739522
AD
1727 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
1728 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1729
9d5c8243
AK
1730 err = igb_request_irq(adapter);
1731 if (err)
1732 goto err_req_irq;
1733
1734 /* From here on the code is the same as igb_up() */
1735 clear_bit(__IGB_DOWN, &adapter->state);
1736
844290e5
PW
1737 for (i = 0; i < adapter->num_rx_queues; i++)
1738 napi_enable(&adapter->rx_ring[i].napi);
9d5c8243
AK
1739
1740 /* Clear any pending interrupts. */
1741 rd32(E1000_ICR);
844290e5
PW
1742
1743 igb_irq_enable(adapter);
1744
d55b53ff
JK
1745 netif_tx_start_all_queues(netdev);
1746
9d5c8243
AK
1747 /* Fire a link status change interrupt to start the watchdog. */
1748 wr32(E1000_ICS, E1000_ICS_LSC);
1749
1750 return 0;
1751
1752err_req_irq:
1753 igb_release_hw_control(adapter);
1754 /* e1000_power_down_phy(adapter); */
1755 igb_free_all_rx_resources(adapter);
1756err_setup_rx:
1757 igb_free_all_tx_resources(adapter);
1758err_setup_tx:
1759 igb_reset(adapter);
1760
1761 return err;
1762}
1763
1764/**
1765 * igb_close - Disables a network interface
1766 * @netdev: network interface device structure
1767 *
1768 * Returns 0, this is not allowed to fail
1769 *
1770 * The close entry point is called when an interface is de-activated
1771 * by the OS. The hardware is still under the driver's control, but
1772 * needs to be disabled. A global MAC reset is issued to stop the
1773 * hardware, and all transmit and receive resources are freed.
1774 **/
1775static int igb_close(struct net_device *netdev)
1776{
1777 struct igb_adapter *adapter = netdev_priv(netdev);
1778
1779 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
1780 igb_down(adapter);
1781
1782 igb_free_irq(adapter);
1783
1784 igb_free_all_tx_resources(adapter);
1785 igb_free_all_rx_resources(adapter);
1786
1787 /* kill manageability vlan ID if supported, but not if a vlan with
1788 * the same ID is registered on the host OS (let 8021q kill it) */
1789 if ((adapter->hw.mng_cookie.status &
1790 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1791 !(adapter->vlgrp &&
1792 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
1793 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1794
1795 return 0;
1796}
1797
1798/**
1799 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1800 * @adapter: board private structure
1801 * @tx_ring: tx descriptor ring (for a specific queue) to setup
1802 *
1803 * Return 0 on success, negative on failure
1804 **/
9d5c8243
AK
1805int igb_setup_tx_resources(struct igb_adapter *adapter,
1806 struct igb_ring *tx_ring)
1807{
1808 struct pci_dev *pdev = adapter->pdev;
1809 int size;
1810
1811 size = sizeof(struct igb_buffer) * tx_ring->count;
1812 tx_ring->buffer_info = vmalloc(size);
1813 if (!tx_ring->buffer_info)
1814 goto err;
1815 memset(tx_ring->buffer_info, 0, size);
1816
1817 /* round up to nearest 4K */
85e8d004 1818 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
9d5c8243
AK
1819 tx_ring->size = ALIGN(tx_ring->size, 4096);
1820
1821 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1822 &tx_ring->dma);
1823
1824 if (!tx_ring->desc)
1825 goto err;
1826
1827 tx_ring->adapter = adapter;
1828 tx_ring->next_to_use = 0;
1829 tx_ring->next_to_clean = 0;
9d5c8243
AK
1830 return 0;
1831
1832err:
1833 vfree(tx_ring->buffer_info);
1834 dev_err(&adapter->pdev->dev,
1835 "Unable to allocate memory for the transmit descriptor ring\n");
1836 return -ENOMEM;
1837}
1838
1839/**
1840 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
1841 * (Descriptors) for all queues
1842 * @adapter: board private structure
1843 *
1844 * Return 0 on success, negative on failure
1845 **/
1846static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1847{
1848 int i, err = 0;
661086df 1849 int r_idx;
9d5c8243
AK
1850
1851 for (i = 0; i < adapter->num_tx_queues; i++) {
1852 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1853 if (err) {
1854 dev_err(&adapter->pdev->dev,
1855 "Allocation for Tx Queue %u failed\n", i);
1856 for (i--; i >= 0; i--)
3b644cf6 1857 igb_free_tx_resources(&adapter->tx_ring[i]);
9d5c8243
AK
1858 break;
1859 }
1860 }
1861
661086df
PWJ
1862 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
1863 r_idx = i % adapter->num_tx_queues;
1864 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
eebbbdba 1865 }
9d5c8243
AK
1866 return err;
1867}
1868
1869/**
1870 * igb_configure_tx - Configure transmit Unit after Reset
1871 * @adapter: board private structure
1872 *
1873 * Configure the Tx unit of the MAC after a reset.
1874 **/
1875static void igb_configure_tx(struct igb_adapter *adapter)
1876{
0e014cb1 1877 u64 tdba;
9d5c8243
AK
1878 struct e1000_hw *hw = &adapter->hw;
1879 u32 tctl;
1880 u32 txdctl, txctrl;
26bc19ec 1881 int i, j;
9d5c8243
AK
1882
1883 for (i = 0; i < adapter->num_tx_queues; i++) {
73cd78f1 1884 struct igb_ring *ring = &adapter->tx_ring[i];
26bc19ec
AD
1885 j = ring->reg_idx;
1886 wr32(E1000_TDLEN(j),
85e8d004 1887 ring->count * sizeof(union e1000_adv_tx_desc));
9d5c8243 1888 tdba = ring->dma;
26bc19ec 1889 wr32(E1000_TDBAL(j),
73cd78f1 1890 tdba & 0x00000000ffffffffULL);
26bc19ec 1891 wr32(E1000_TDBAH(j), tdba >> 32);
9d5c8243 1892
26bc19ec
AD
1893 ring->head = E1000_TDH(j);
1894 ring->tail = E1000_TDT(j);
9d5c8243
AK
1895 writel(0, hw->hw_addr + ring->tail);
1896 writel(0, hw->hw_addr + ring->head);
26bc19ec 1897 txdctl = rd32(E1000_TXDCTL(j));
9d5c8243 1898 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
26bc19ec 1899 wr32(E1000_TXDCTL(j), txdctl);
9d5c8243
AK
1900
1901 /* Turn off Relaxed Ordering on head write-backs. The
1902 * writebacks MUST be delivered in order or it will
1903 * completely screw up our bookeeping.
1904 */
26bc19ec 1905 txctrl = rd32(E1000_DCA_TXCTRL(j));
9d5c8243 1906 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
26bc19ec 1907 wr32(E1000_DCA_TXCTRL(j), txctrl);
9d5c8243
AK
1908 }
1909
e1739522
AD
1910 /* disable queue 0 to prevent tail bump w/o re-configuration */
1911 if (adapter->vfs_allocated_count)
1912 wr32(E1000_TXDCTL(0), 0);
9d5c8243
AK
1913
1914 /* Program the Transmit Control Register */
9d5c8243
AK
1915 tctl = rd32(E1000_TCTL);
1916 tctl &= ~E1000_TCTL_CT;
1917 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1918 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1919
1920 igb_config_collision_dist(hw);
1921
1922 /* Setup Transmit Descriptor Settings for eop descriptor */
1923 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
1924
1925 /* Enable transmits */
1926 tctl |= E1000_TCTL_EN;
1927
1928 wr32(E1000_TCTL, tctl);
1929}
1930
1931/**
1932 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
1933 * @adapter: board private structure
1934 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1935 *
1936 * Returns 0 on success, negative on failure
1937 **/
9d5c8243
AK
1938int igb_setup_rx_resources(struct igb_adapter *adapter,
1939 struct igb_ring *rx_ring)
1940{
1941 struct pci_dev *pdev = adapter->pdev;
1942 int size, desc_len;
1943
1944 size = sizeof(struct igb_buffer) * rx_ring->count;
1945 rx_ring->buffer_info = vmalloc(size);
1946 if (!rx_ring->buffer_info)
1947 goto err;
1948 memset(rx_ring->buffer_info, 0, size);
1949
1950 desc_len = sizeof(union e1000_adv_rx_desc);
1951
1952 /* Round up to nearest 4K */
1953 rx_ring->size = rx_ring->count * desc_len;
1954 rx_ring->size = ALIGN(rx_ring->size, 4096);
1955
1956 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
1957 &rx_ring->dma);
1958
1959 if (!rx_ring->desc)
1960 goto err;
1961
1962 rx_ring->next_to_clean = 0;
1963 rx_ring->next_to_use = 0;
9d5c8243
AK
1964
1965 rx_ring->adapter = adapter;
9d5c8243
AK
1966
1967 return 0;
1968
1969err:
1970 vfree(rx_ring->buffer_info);
1971 dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
1972 "the receive descriptor ring\n");
1973 return -ENOMEM;
1974}
1975
1976/**
1977 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
1978 * (Descriptors) for all queues
1979 * @adapter: board private structure
1980 *
1981 * Return 0 on success, negative on failure
1982 **/
1983static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
1984{
1985 int i, err = 0;
1986
1987 for (i = 0; i < adapter->num_rx_queues; i++) {
1988 err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1989 if (err) {
1990 dev_err(&adapter->pdev->dev,
1991 "Allocation for Rx Queue %u failed\n", i);
1992 for (i--; i >= 0; i--)
3b644cf6 1993 igb_free_rx_resources(&adapter->rx_ring[i]);
9d5c8243
AK
1994 break;
1995 }
1996 }
1997
1998 return err;
1999}
2000
2001/**
2002 * igb_setup_rctl - configure the receive control registers
2003 * @adapter: Board private structure
2004 **/
2005static void igb_setup_rctl(struct igb_adapter *adapter)
2006{
2007 struct e1000_hw *hw = &adapter->hw;
2008 u32 rctl;
2009 u32 srrctl = 0;
26bc19ec 2010 int i, j;
9d5c8243
AK
2011
2012 rctl = rd32(E1000_RCTL);
2013
2014 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
69d728ba 2015 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
9d5c8243 2016
69d728ba 2017 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
28b0759c 2018 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
9d5c8243 2019
87cb7e8c
AK
2020 /*
2021 * enable stripping of CRC. It's unlikely this will break BMC
2022 * redirection as it did with e1000. Newer features require
2023 * that the HW strips the CRC.
73cd78f1 2024 */
87cb7e8c 2025 rctl |= E1000_RCTL_SECRC;
9d5c8243 2026
9b07f3d3 2027 /*
ec54d7d6 2028 * disable store bad packets and clear size bits.
9b07f3d3 2029 */
ec54d7d6 2030 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
9d5c8243 2031
ec54d7d6 2032 /* enable LPE when to prevent packets larger than max_frame_size */
9b07f3d3 2033 rctl |= E1000_RCTL_LPE;
b4557be2
AD
2034
2035 /* Setup buffer sizes */
2036 switch (adapter->rx_buffer_len) {
2037 case IGB_RXBUFFER_256:
2038 rctl |= E1000_RCTL_SZ_256;
2039 break;
2040 case IGB_RXBUFFER_512:
2041 rctl |= E1000_RCTL_SZ_512;
2042 break;
2043 default:
2044 srrctl = ALIGN(adapter->rx_buffer_len, 1024)
2045 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2046 break;
9d5c8243
AK
2047 }
2048
2049 /* 82575 and greater support packet-split where the protocol
2050 * header is placed in skb->data and the packet data is
2051 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2052 * In the case of a non-split, skb->data is linearly filled,
2053 * followed by the page buffers. Therefore, skb->data is
2054 * sized to hold the largest protocol header.
2055 */
2056 /* allocations using alloc_page take too long for regular MTU
2057 * so only enable packet split for jumbo frames */
ec54d7d6 2058 if (adapter->netdev->mtu > ETH_DATA_LEN) {
9d5c8243 2059 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
bf36c1a0 2060 srrctl |= adapter->rx_ps_hdr_size <<
9d5c8243 2061 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
9d5c8243
AK
2062 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2063 } else {
2064 adapter->rx_ps_hdr_size = 0;
2065 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2066 }
2067
e1739522
AD
2068 /* Attention!!! For SR-IOV PF driver operations you must enable
2069 * queue drop for all VF and PF queues to prevent head of line blocking
2070 * if an un-trusted VF does not provide descriptors to hardware.
2071 */
2072 if (adapter->vfs_allocated_count) {
2073 u32 vmolr;
2074
2075 j = adapter->rx_ring[0].reg_idx;
2076
2077 /* set all queue drop enable bits */
2078 wr32(E1000_QDE, ALL_QUEUES);
2079 srrctl |= E1000_SRRCTL_DROP_EN;
2080
2081 /* disable queue 0 to prevent tail write w/o re-config */
2082 wr32(E1000_RXDCTL(0), 0);
2083
2084 vmolr = rd32(E1000_VMOLR(j));
2085 if (rctl & E1000_RCTL_LPE)
2086 vmolr |= E1000_VMOLR_LPE;
2087 if (adapter->num_rx_queues > 0)
2088 vmolr |= E1000_VMOLR_RSSE;
2089 wr32(E1000_VMOLR(j), vmolr);
2090 }
2091
26bc19ec
AD
2092 for (i = 0; i < adapter->num_rx_queues; i++) {
2093 j = adapter->rx_ring[i].reg_idx;
2094 wr32(E1000_SRRCTL(j), srrctl);
2095 }
9d5c8243
AK
2096
2097 wr32(E1000_RCTL, rctl);
2098}
2099
e1739522
AD
2100/**
2101 * igb_rlpml_set - set maximum receive packet size
2102 * @adapter: board private structure
2103 *
2104 * Configure maximum receivable packet size.
2105 **/
2106static void igb_rlpml_set(struct igb_adapter *adapter)
2107{
2108 u32 max_frame_size = adapter->max_frame_size;
2109 struct e1000_hw *hw = &adapter->hw;
2110 u16 pf_id = adapter->vfs_allocated_count;
2111
2112 if (adapter->vlgrp)
2113 max_frame_size += VLAN_TAG_SIZE;
2114
2115 /* if vfs are enabled we set RLPML to the largest possible request
2116 * size and set the VMOLR RLPML to the size we need */
2117 if (pf_id) {
2118 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2119 max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE;
2120 }
2121
2122 wr32(E1000_RLPML, max_frame_size);
2123}
2124
2125/**
2126 * igb_configure_vt_default_pool - Configure VT default pool
2127 * @adapter: board private structure
2128 *
2129 * Configure the default pool
2130 **/
2131static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
2132{
2133 struct e1000_hw *hw = &adapter->hw;
2134 u16 pf_id = adapter->vfs_allocated_count;
2135 u32 vtctl;
2136
2137 /* not in sr-iov mode - do nothing */
2138 if (!pf_id)
2139 return;
2140
2141 vtctl = rd32(E1000_VT_CTL);
2142 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2143 E1000_VT_CTL_DISABLE_DEF_POOL);
2144 vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2145 wr32(E1000_VT_CTL, vtctl);
2146}
2147
9d5c8243
AK
2148/**
2149 * igb_configure_rx - Configure receive Unit after Reset
2150 * @adapter: board private structure
2151 *
2152 * Configure the Rx unit of the MAC after a reset.
2153 **/
2154static void igb_configure_rx(struct igb_adapter *adapter)
2155{
2156 u64 rdba;
2157 struct e1000_hw *hw = &adapter->hw;
2158 u32 rctl, rxcsum;
2159 u32 rxdctl;
9107584e 2160 int i;
9d5c8243
AK
2161
2162 /* disable receives while setting up the descriptors */
2163 rctl = rd32(E1000_RCTL);
2164 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2165 wrfl();
2166 mdelay(10);
2167
2168 if (adapter->itr_setting > 3)
6eb5a7f1 2169 wr32(E1000_ITR, adapter->itr);
9d5c8243
AK
2170
2171 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2172 * the Base and Length of the Rx Descriptor Ring */
2173 for (i = 0; i < adapter->num_rx_queues; i++) {
73cd78f1 2174 struct igb_ring *ring = &adapter->rx_ring[i];
9107584e 2175 int j = ring->reg_idx;
9d5c8243 2176 rdba = ring->dma;
26bc19ec 2177 wr32(E1000_RDBAL(j),
73cd78f1 2178 rdba & 0x00000000ffffffffULL);
26bc19ec
AD
2179 wr32(E1000_RDBAH(j), rdba >> 32);
2180 wr32(E1000_RDLEN(j),
73cd78f1 2181 ring->count * sizeof(union e1000_adv_rx_desc));
9d5c8243 2182
26bc19ec
AD
2183 ring->head = E1000_RDH(j);
2184 ring->tail = E1000_RDT(j);
9d5c8243
AK
2185 writel(0, hw->hw_addr + ring->tail);
2186 writel(0, hw->hw_addr + ring->head);
2187
26bc19ec 2188 rxdctl = rd32(E1000_RXDCTL(j));
9d5c8243
AK
2189 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2190 rxdctl &= 0xFFF00000;
2191 rxdctl |= IGB_RX_PTHRESH;
2192 rxdctl |= IGB_RX_HTHRESH << 8;
2193 rxdctl |= IGB_RX_WTHRESH << 16;
26bc19ec 2194 wr32(E1000_RXDCTL(j), rxdctl);
9d5c8243
AK
2195 }
2196
2197 if (adapter->num_rx_queues > 1) {
2198 u32 random[10];
2199 u32 mrqc;
2200 u32 j, shift;
2201 union e1000_reta {
2202 u32 dword;
2203 u8 bytes[4];
2204 } reta;
2205
2206 get_random_bytes(&random[0], 40);
2207
2d064c06
AD
2208 if (hw->mac.type >= e1000_82576)
2209 shift = 0;
2210 else
2211 shift = 6;
9d5c8243
AK
2212 for (j = 0; j < (32 * 4); j++) {
2213 reta.bytes[j & 3] =
26bc19ec 2214 adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
9d5c8243
AK
2215 if ((j & 3) == 3)
2216 writel(reta.dword,
2217 hw->hw_addr + E1000_RETA(0) + (j & ~3));
2218 }
e1739522
AD
2219 if (adapter->vfs_allocated_count)
2220 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2221 else
2222 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
9d5c8243
AK
2223
2224 /* Fill out hash function seeds */
2225 for (j = 0; j < 10; j++)
2226 array_wr32(E1000_RSSRK(0), j, random[j]);
2227
2228 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2229 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2230 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2231 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2232 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2233 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2234 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2235 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2236
2237
2238 wr32(E1000_MRQC, mrqc);
2239
2240 /* Multiqueue and raw packet checksumming are mutually
2241 * exclusive. Note that this not the same as TCP/IP
2242 * checksumming, which works fine. */
2243 rxcsum = rd32(E1000_RXCSUM);
2244 rxcsum |= E1000_RXCSUM_PCSD;
2245 wr32(E1000_RXCSUM, rxcsum);
2246 } else {
e1739522
AD
2247 /* Enable multi-queue for sr-iov */
2248 if (adapter->vfs_allocated_count)
2249 wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
9d5c8243
AK
2250 /* Enable Receive Checksum Offload for TCP and UDP */
2251 rxcsum = rd32(E1000_RXCSUM);
56fbbb4e
AD
2252 if (adapter->rx_csum)
2253 rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE;
2254 else
2255 rxcsum &= ~(E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE);
2256
9d5c8243
AK
2257 wr32(E1000_RXCSUM, rxcsum);
2258 }
2259
e1739522
AD
2260 /* Set the default pool for the PF's first queue */
2261 igb_configure_vt_default_pool(adapter);
2262
2263 igb_rlpml_set(adapter);
9d5c8243
AK
2264
2265 /* Enable Receives */
2266 wr32(E1000_RCTL, rctl);
2267}
2268
2269/**
2270 * igb_free_tx_resources - Free Tx Resources per Queue
9d5c8243
AK
2271 * @tx_ring: Tx descriptor ring for a specific queue
2272 *
2273 * Free all transmit software resources
2274 **/
68fd9910 2275void igb_free_tx_resources(struct igb_ring *tx_ring)
9d5c8243 2276{
3b644cf6 2277 struct pci_dev *pdev = tx_ring->adapter->pdev;
9d5c8243 2278
3b644cf6 2279 igb_clean_tx_ring(tx_ring);
9d5c8243
AK
2280
2281 vfree(tx_ring->buffer_info);
2282 tx_ring->buffer_info = NULL;
2283
2284 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2285
2286 tx_ring->desc = NULL;
2287}
2288
2289/**
2290 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2291 * @adapter: board private structure
2292 *
2293 * Free all transmit software resources
2294 **/
2295static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2296{
2297 int i;
2298
2299 for (i = 0; i < adapter->num_tx_queues; i++)
3b644cf6 2300 igb_free_tx_resources(&adapter->tx_ring[i]);
9d5c8243
AK
2301}
2302
2303static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2304 struct igb_buffer *buffer_info)
2305{
65689fef 2306 buffer_info->dma = 0;
9d5c8243 2307 if (buffer_info->skb) {
65689fef
AD
2308 skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
2309 DMA_TO_DEVICE);
9d5c8243
AK
2310 dev_kfree_skb_any(buffer_info->skb);
2311 buffer_info->skb = NULL;
2312 }
2313 buffer_info->time_stamp = 0;
2314 /* buffer_info must be completely set up in the transmit path */
2315}
2316
2317/**
2318 * igb_clean_tx_ring - Free Tx Buffers
9d5c8243
AK
2319 * @tx_ring: ring to be cleaned
2320 **/
3b644cf6 2321static void igb_clean_tx_ring(struct igb_ring *tx_ring)
9d5c8243 2322{
3b644cf6 2323 struct igb_adapter *adapter = tx_ring->adapter;
9d5c8243
AK
2324 struct igb_buffer *buffer_info;
2325 unsigned long size;
2326 unsigned int i;
2327
2328 if (!tx_ring->buffer_info)
2329 return;
2330 /* Free all the Tx ring sk_buffs */
2331
2332 for (i = 0; i < tx_ring->count; i++) {
2333 buffer_info = &tx_ring->buffer_info[i];
2334 igb_unmap_and_free_tx_resource(adapter, buffer_info);
2335 }
2336
2337 size = sizeof(struct igb_buffer) * tx_ring->count;
2338 memset(tx_ring->buffer_info, 0, size);
2339
2340 /* Zero out the descriptor ring */
2341
2342 memset(tx_ring->desc, 0, tx_ring->size);
2343
2344 tx_ring->next_to_use = 0;
2345 tx_ring->next_to_clean = 0;
2346
2347 writel(0, adapter->hw.hw_addr + tx_ring->head);
2348 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2349}
2350
2351/**
2352 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2353 * @adapter: board private structure
2354 **/
2355static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2356{
2357 int i;
2358
2359 for (i = 0; i < adapter->num_tx_queues; i++)
3b644cf6 2360 igb_clean_tx_ring(&adapter->tx_ring[i]);
9d5c8243
AK
2361}
2362
2363/**
2364 * igb_free_rx_resources - Free Rx Resources
9d5c8243
AK
2365 * @rx_ring: ring to clean the resources from
2366 *
2367 * Free all receive software resources
2368 **/
68fd9910 2369void igb_free_rx_resources(struct igb_ring *rx_ring)
9d5c8243 2370{
3b644cf6 2371 struct pci_dev *pdev = rx_ring->adapter->pdev;
9d5c8243 2372
3b644cf6 2373 igb_clean_rx_ring(rx_ring);
9d5c8243
AK
2374
2375 vfree(rx_ring->buffer_info);
2376 rx_ring->buffer_info = NULL;
2377
2378 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2379
2380 rx_ring->desc = NULL;
2381}
2382
2383/**
2384 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2385 * @adapter: board private structure
2386 *
2387 * Free all receive software resources
2388 **/
2389static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2390{
2391 int i;
2392
2393 for (i = 0; i < adapter->num_rx_queues; i++)
3b644cf6 2394 igb_free_rx_resources(&adapter->rx_ring[i]);
9d5c8243
AK
2395}
2396
2397/**
2398 * igb_clean_rx_ring - Free Rx Buffers per Queue
9d5c8243
AK
2399 * @rx_ring: ring to free buffers from
2400 **/
3b644cf6 2401static void igb_clean_rx_ring(struct igb_ring *rx_ring)
9d5c8243 2402{
3b644cf6 2403 struct igb_adapter *adapter = rx_ring->adapter;
9d5c8243
AK
2404 struct igb_buffer *buffer_info;
2405 struct pci_dev *pdev = adapter->pdev;
2406 unsigned long size;
2407 unsigned int i;
2408
2409 if (!rx_ring->buffer_info)
2410 return;
2411 /* Free all the Rx ring sk_buffs */
2412 for (i = 0; i < rx_ring->count; i++) {
2413 buffer_info = &rx_ring->buffer_info[i];
2414 if (buffer_info->dma) {
2415 if (adapter->rx_ps_hdr_size)
2416 pci_unmap_single(pdev, buffer_info->dma,
2417 adapter->rx_ps_hdr_size,
2418 PCI_DMA_FROMDEVICE);
2419 else
2420 pci_unmap_single(pdev, buffer_info->dma,
2421 adapter->rx_buffer_len,
2422 PCI_DMA_FROMDEVICE);
2423 buffer_info->dma = 0;
2424 }
2425
2426 if (buffer_info->skb) {
2427 dev_kfree_skb(buffer_info->skb);
2428 buffer_info->skb = NULL;
2429 }
2430 if (buffer_info->page) {
bf36c1a0
AD
2431 if (buffer_info->page_dma)
2432 pci_unmap_page(pdev, buffer_info->page_dma,
2433 PAGE_SIZE / 2,
2434 PCI_DMA_FROMDEVICE);
9d5c8243
AK
2435 put_page(buffer_info->page);
2436 buffer_info->page = NULL;
2437 buffer_info->page_dma = 0;
bf36c1a0 2438 buffer_info->page_offset = 0;
9d5c8243
AK
2439 }
2440 }
2441
9d5c8243
AK
2442 size = sizeof(struct igb_buffer) * rx_ring->count;
2443 memset(rx_ring->buffer_info, 0, size);
2444
2445 /* Zero out the descriptor ring */
2446 memset(rx_ring->desc, 0, rx_ring->size);
2447
2448 rx_ring->next_to_clean = 0;
2449 rx_ring->next_to_use = 0;
2450
2451 writel(0, adapter->hw.hw_addr + rx_ring->head);
2452 writel(0, adapter->hw.hw_addr + rx_ring->tail);
2453}
2454
2455/**
2456 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2457 * @adapter: board private structure
2458 **/
2459static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2460{
2461 int i;
2462
2463 for (i = 0; i < adapter->num_rx_queues; i++)
3b644cf6 2464 igb_clean_rx_ring(&adapter->rx_ring[i]);
9d5c8243
AK
2465}
2466
2467/**
2468 * igb_set_mac - Change the Ethernet Address of the NIC
2469 * @netdev: network interface device structure
2470 * @p: pointer to an address structure
2471 *
2472 * Returns 0 on success, negative on failure
2473 **/
2474static int igb_set_mac(struct net_device *netdev, void *p)
2475{
2476 struct igb_adapter *adapter = netdev_priv(netdev);
28b0759c 2477 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
2478 struct sockaddr *addr = p;
2479
2480 if (!is_valid_ether_addr(addr->sa_data))
2481 return -EADDRNOTAVAIL;
2482
2483 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
28b0759c 2484 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
9d5c8243 2485
28b0759c 2486 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
9d5c8243 2487
e1739522
AD
2488 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
2489
9d5c8243
AK
2490 return 0;
2491}
2492
2493/**
2494 * igb_set_multi - Multicast and Promiscuous mode set
2495 * @netdev: network interface device structure
2496 *
2497 * The set_multi entry point is called whenever the multicast address
2498 * list or the network interface flags are updated. This routine is
2499 * responsible for configuring the hardware for proper multicast,
2500 * promiscuous mode, and all-multi behavior.
2501 **/
2502static void igb_set_multi(struct net_device *netdev)
2503{
2504 struct igb_adapter *adapter = netdev_priv(netdev);
2505 struct e1000_hw *hw = &adapter->hw;
2506 struct e1000_mac_info *mac = &hw->mac;
2507 struct dev_mc_list *mc_ptr;
c5cd11e3 2508 u8 *mta_list = NULL;
9d5c8243
AK
2509 u32 rctl;
2510 int i;
2511
2512 /* Check for Promiscuous and All Multicast modes */
2513
2514 rctl = rd32(E1000_RCTL);
2515
746b9f02 2516 if (netdev->flags & IFF_PROMISC) {
9d5c8243 2517 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
746b9f02
PM
2518 rctl &= ~E1000_RCTL_VFE;
2519 } else {
2520 if (netdev->flags & IFF_ALLMULTI) {
2521 rctl |= E1000_RCTL_MPE;
2522 rctl &= ~E1000_RCTL_UPE;
2523 } else
2524 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
78ed11a5 2525 rctl |= E1000_RCTL_VFE;
746b9f02 2526 }
9d5c8243
AK
2527 wr32(E1000_RCTL, rctl);
2528
c5cd11e3
AD
2529 if (netdev->mc_count) {
2530 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2531 if (!mta_list) {
2532 dev_err(&adapter->pdev->dev,
2533 "failed to allocate multicast filter list\n");
2534 return;
2535 }
9d5c8243
AK
2536 }
2537
9d5c8243
AK
2538 /* The shared function expects a packed array of only addresses. */
2539 mc_ptr = netdev->mc_list;
2540
2541 for (i = 0; i < netdev->mc_count; i++) {
2542 if (!mc_ptr)
2543 break;
2544 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2545 mc_ptr = mc_ptr->next;
2546 }
e1739522
AD
2547 igb_update_mc_addr_list(hw, mta_list, i,
2548 adapter->vfs_allocated_count + 1,
2549 mac->rar_entry_count);
2550
2551 igb_set_mc_list_pools(adapter, i, mac->rar_entry_count);
4ae196df
AD
2552 igb_restore_vf_multicasts(adapter);
2553
9d5c8243
AK
2554 kfree(mta_list);
2555}
2556
2557/* Need to wait a few seconds after link up to get diagnostic information from
2558 * the phy */
2559static void igb_update_phy_info(unsigned long data)
2560{
2561 struct igb_adapter *adapter = (struct igb_adapter *) data;
f5f4cf08 2562 igb_get_phy_info(&adapter->hw);
9d5c8243
AK
2563}
2564
4d6b725e
AD
2565/**
2566 * igb_has_link - check shared code for link and determine up/down
2567 * @adapter: pointer to driver private info
2568 **/
2569static bool igb_has_link(struct igb_adapter *adapter)
2570{
2571 struct e1000_hw *hw = &adapter->hw;
2572 bool link_active = false;
2573 s32 ret_val = 0;
2574
2575 /* get_link_status is set on LSC (link status) interrupt or
2576 * rx sequence error interrupt. get_link_status will stay
2577 * false until the e1000_check_for_link establishes link
2578 * for copper adapters ONLY
2579 */
2580 switch (hw->phy.media_type) {
2581 case e1000_media_type_copper:
2582 if (hw->mac.get_link_status) {
2583 ret_val = hw->mac.ops.check_for_link(hw);
2584 link_active = !hw->mac.get_link_status;
2585 } else {
2586 link_active = true;
2587 }
2588 break;
2589 case e1000_media_type_fiber:
2590 ret_val = hw->mac.ops.check_for_link(hw);
2591 link_active = !!(rd32(E1000_STATUS) & E1000_STATUS_LU);
2592 break;
2593 case e1000_media_type_internal_serdes:
2594 ret_val = hw->mac.ops.check_for_link(hw);
2595 link_active = hw->mac.serdes_has_link;
2596 break;
2597 default:
2598 case e1000_media_type_unknown:
2599 break;
2600 }
2601
2602 return link_active;
2603}
2604
9d5c8243
AK
2605/**
2606 * igb_watchdog - Timer Call-back
2607 * @data: pointer to adapter cast into an unsigned long
2608 **/
2609static void igb_watchdog(unsigned long data)
2610{
2611 struct igb_adapter *adapter = (struct igb_adapter *)data;
2612 /* Do the rest outside of interrupt context */
2613 schedule_work(&adapter->watchdog_task);
2614}
2615
2616static void igb_watchdog_task(struct work_struct *work)
2617{
2618 struct igb_adapter *adapter = container_of(work,
2619 struct igb_adapter, watchdog_task);
2620 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
2621 struct net_device *netdev = adapter->netdev;
2622 struct igb_ring *tx_ring = adapter->tx_ring;
9d5c8243 2623 u32 link;
7a6ea550 2624 u32 eics = 0;
7a6ea550 2625 int i;
9d5c8243 2626
4d6b725e
AD
2627 link = igb_has_link(adapter);
2628 if ((netif_carrier_ok(netdev)) && link)
9d5c8243
AK
2629 goto link_up;
2630
9d5c8243
AK
2631 if (link) {
2632 if (!netif_carrier_ok(netdev)) {
2633 u32 ctrl;
2634 hw->mac.ops.get_speed_and_duplex(&adapter->hw,
2635 &adapter->link_speed,
2636 &adapter->link_duplex);
2637
2638 ctrl = rd32(E1000_CTRL);
527d47c1
AD
2639 /* Links status message must follow this format */
2640 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
9d5c8243 2641 "Flow Control: %s\n",
527d47c1 2642 netdev->name,
9d5c8243
AK
2643 adapter->link_speed,
2644 adapter->link_duplex == FULL_DUPLEX ?
2645 "Full Duplex" : "Half Duplex",
2646 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2647 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2648 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2649 E1000_CTRL_TFCE) ? "TX" : "None")));
2650
2651 /* tweak tx_queue_len according to speed/duplex and
2652 * adjust the timeout factor */
2653 netdev->tx_queue_len = adapter->tx_queue_len;
2654 adapter->tx_timeout_factor = 1;
2655 switch (adapter->link_speed) {
2656 case SPEED_10:
2657 netdev->tx_queue_len = 10;
2658 adapter->tx_timeout_factor = 14;
2659 break;
2660 case SPEED_100:
2661 netdev->tx_queue_len = 100;
2662 /* maybe add some timeout factor ? */
2663 break;
2664 }
2665
2666 netif_carrier_on(netdev);
fd2ea0a7 2667 netif_tx_wake_all_queues(netdev);
9d5c8243 2668
4ae196df
AD
2669 igb_ping_all_vfs(adapter);
2670
4b1a9877 2671 /* link state has changed, schedule phy info update */
9d5c8243
AK
2672 if (!test_bit(__IGB_DOWN, &adapter->state))
2673 mod_timer(&adapter->phy_info_timer,
2674 round_jiffies(jiffies + 2 * HZ));
2675 }
2676 } else {
2677 if (netif_carrier_ok(netdev)) {
2678 adapter->link_speed = 0;
2679 adapter->link_duplex = 0;
527d47c1
AD
2680 /* Links status message must follow this format */
2681 printk(KERN_INFO "igb: %s NIC Link is Down\n",
2682 netdev->name);
9d5c8243 2683 netif_carrier_off(netdev);
fd2ea0a7 2684 netif_tx_stop_all_queues(netdev);
4b1a9877 2685
4ae196df
AD
2686 igb_ping_all_vfs(adapter);
2687
4b1a9877 2688 /* link state has changed, schedule phy info update */
9d5c8243
AK
2689 if (!test_bit(__IGB_DOWN, &adapter->state))
2690 mod_timer(&adapter->phy_info_timer,
2691 round_jiffies(jiffies + 2 * HZ));
2692 }
2693 }
2694
2695link_up:
2696 igb_update_stats(adapter);
2697
4b1a9877 2698 hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
9d5c8243 2699 adapter->tpt_old = adapter->stats.tpt;
4b1a9877 2700 hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
9d5c8243
AK
2701 adapter->colc_old = adapter->stats.colc;
2702
2703 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2704 adapter->gorc_old = adapter->stats.gorc;
2705 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2706 adapter->gotc_old = adapter->stats.gotc;
2707
2708 igb_update_adaptive(&adapter->hw);
2709
2710 if (!netif_carrier_ok(netdev)) {
c493ea45 2711 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
9d5c8243
AK
2712 /* We've lost link, so the controller stops DMA,
2713 * but we've got queued Tx work that's never going
2714 * to get done, so reset controller to flush Tx.
2715 * (Do the reset outside of interrupt context). */
2716 adapter->tx_timeout_count++;
2717 schedule_work(&adapter->reset_task);
2718 }
2719 }
2720
2721 /* Cause software interrupt to ensure rx ring is cleaned */
7a6ea550
AD
2722 if (adapter->msix_entries) {
2723 for (i = 0; i < adapter->num_rx_queues; i++)
2724 eics |= adapter->rx_ring[i].eims_value;
2725 wr32(E1000_EICS, eics);
2726 } else {
2727 wr32(E1000_ICS, E1000_ICS_RXDMT0);
2728 }
9d5c8243
AK
2729
2730 /* Force detection of hung controller every watchdog period */
2731 tx_ring->detect_tx_hung = true;
2732
2733 /* Reset the timer */
2734 if (!test_bit(__IGB_DOWN, &adapter->state))
2735 mod_timer(&adapter->watchdog_timer,
2736 round_jiffies(jiffies + 2 * HZ));
2737}
2738
2739enum latency_range {
2740 lowest_latency = 0,
2741 low_latency = 1,
2742 bulk_latency = 2,
2743 latency_invalid = 255
2744};
2745
2746
6eb5a7f1
AD
2747/**
2748 * igb_update_ring_itr - update the dynamic ITR value based on packet size
2749 *
2750 * Stores a new ITR value based on strictly on packet size. This
2751 * algorithm is less sophisticated than that used in igb_update_itr,
2752 * due to the difficulty of synchronizing statistics across multiple
2753 * receive rings. The divisors and thresholds used by this fuction
2754 * were determined based on theoretical maximum wire speed and testing
2755 * data, in order to minimize response time while increasing bulk
2756 * throughput.
2757 * This functionality is controlled by the InterruptThrottleRate module
2758 * parameter (see igb_param.c)
2759 * NOTE: This function is called only when operating in a multiqueue
2760 * receive environment.
2761 * @rx_ring: pointer to ring
2762 **/
2763static void igb_update_ring_itr(struct igb_ring *rx_ring)
9d5c8243 2764{
6eb5a7f1
AD
2765 int new_val = rx_ring->itr_val;
2766 int avg_wire_size = 0;
2767 struct igb_adapter *adapter = rx_ring->adapter;
9d5c8243 2768
6eb5a7f1
AD
2769 if (!rx_ring->total_packets)
2770 goto clear_counts; /* no packets, so don't do anything */
9d5c8243 2771
6eb5a7f1
AD
2772 /* For non-gigabit speeds, just fix the interrupt rate at 4000
2773 * ints/sec - ITR timer value of 120 ticks.
2774 */
2775 if (adapter->link_speed != SPEED_1000) {
2776 new_val = 120;
2777 goto set_itr_val;
9d5c8243 2778 }
6eb5a7f1 2779 avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets;
9d5c8243 2780
6eb5a7f1
AD
2781 /* Add 24 bytes to size to account for CRC, preamble, and gap */
2782 avg_wire_size += 24;
2783
2784 /* Don't starve jumbo frames */
2785 avg_wire_size = min(avg_wire_size, 3000);
9d5c8243 2786
6eb5a7f1
AD
2787 /* Give a little boost to mid-size frames */
2788 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
2789 new_val = avg_wire_size / 3;
2790 else
2791 new_val = avg_wire_size / 2;
9d5c8243 2792
6eb5a7f1 2793set_itr_val:
9d5c8243
AK
2794 if (new_val != rx_ring->itr_val) {
2795 rx_ring->itr_val = new_val;
6eb5a7f1 2796 rx_ring->set_itr = 1;
9d5c8243 2797 }
6eb5a7f1
AD
2798clear_counts:
2799 rx_ring->total_bytes = 0;
2800 rx_ring->total_packets = 0;
9d5c8243
AK
2801}
2802
2803/**
2804 * igb_update_itr - update the dynamic ITR value based on statistics
2805 * Stores a new ITR value based on packets and byte
2806 * counts during the last interrupt. The advantage of per interrupt
2807 * computation is faster updates and more accurate ITR for the current
2808 * traffic pattern. Constants in this function were computed
2809 * based on theoretical maximum wire speed and thresholds were set based
2810 * on testing data as well as attempting to minimize response time
2811 * while increasing bulk throughput.
2812 * this functionality is controlled by the InterruptThrottleRate module
2813 * parameter (see igb_param.c)
2814 * NOTE: These calculations are only valid when operating in a single-
2815 * queue environment.
2816 * @adapter: pointer to adapter
2817 * @itr_setting: current adapter->itr
2818 * @packets: the number of packets during this measurement interval
2819 * @bytes: the number of bytes during this measurement interval
2820 **/
2821static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
2822 int packets, int bytes)
2823{
2824 unsigned int retval = itr_setting;
2825
2826 if (packets == 0)
2827 goto update_itr_done;
2828
2829 switch (itr_setting) {
2830 case lowest_latency:
2831 /* handle TSO and jumbo frames */
2832 if (bytes/packets > 8000)
2833 retval = bulk_latency;
2834 else if ((packets < 5) && (bytes > 512))
2835 retval = low_latency;
2836 break;
2837 case low_latency: /* 50 usec aka 20000 ints/s */
2838 if (bytes > 10000) {
2839 /* this if handles the TSO accounting */
2840 if (bytes/packets > 8000) {
2841 retval = bulk_latency;
2842 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
2843 retval = bulk_latency;
2844 } else if ((packets > 35)) {
2845 retval = lowest_latency;
2846 }
2847 } else if (bytes/packets > 2000) {
2848 retval = bulk_latency;
2849 } else if (packets <= 2 && bytes < 512) {
2850 retval = lowest_latency;
2851 }
2852 break;
2853 case bulk_latency: /* 250 usec aka 4000 ints/s */
2854 if (bytes > 25000) {
2855 if (packets > 35)
2856 retval = low_latency;
1e5c3d21 2857 } else if (bytes < 1500) {
9d5c8243
AK
2858 retval = low_latency;
2859 }
2860 break;
2861 }
2862
2863update_itr_done:
2864 return retval;
2865}
2866
6eb5a7f1 2867static void igb_set_itr(struct igb_adapter *adapter)
9d5c8243
AK
2868{
2869 u16 current_itr;
2870 u32 new_itr = adapter->itr;
2871
2872 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2873 if (adapter->link_speed != SPEED_1000) {
2874 current_itr = 0;
2875 new_itr = 4000;
2876 goto set_itr_now;
2877 }
2878
2879 adapter->rx_itr = igb_update_itr(adapter,
2880 adapter->rx_itr,
2881 adapter->rx_ring->total_packets,
2882 adapter->rx_ring->total_bytes);
9d5c8243 2883
6eb5a7f1 2884 if (adapter->rx_ring->buddy) {
9d5c8243
AK
2885 adapter->tx_itr = igb_update_itr(adapter,
2886 adapter->tx_itr,
2887 adapter->tx_ring->total_packets,
2888 adapter->tx_ring->total_bytes);
9d5c8243
AK
2889 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2890 } else {
2891 current_itr = adapter->rx_itr;
2892 }
2893
6eb5a7f1 2894 /* conservative mode (itr 3) eliminates the lowest_latency setting */
73cd78f1 2895 if (adapter->itr_setting == 3 && current_itr == lowest_latency)
6eb5a7f1
AD
2896 current_itr = low_latency;
2897
9d5c8243
AK
2898 switch (current_itr) {
2899 /* counts and packets in update_itr are dependent on these numbers */
2900 case lowest_latency:
2901 new_itr = 70000;
2902 break;
2903 case low_latency:
2904 new_itr = 20000; /* aka hwitr = ~200 */
2905 break;
2906 case bulk_latency:
2907 new_itr = 4000;
2908 break;
2909 default:
2910 break;
2911 }
2912
2913set_itr_now:
6eb5a7f1
AD
2914 adapter->rx_ring->total_bytes = 0;
2915 adapter->rx_ring->total_packets = 0;
2916 if (adapter->rx_ring->buddy) {
2917 adapter->rx_ring->buddy->total_bytes = 0;
2918 adapter->rx_ring->buddy->total_packets = 0;
2919 }
2920
9d5c8243
AK
2921 if (new_itr != adapter->itr) {
2922 /* this attempts to bias the interrupt rate towards Bulk
2923 * by adding intermediate steps when interrupt rate is
2924 * increasing */
2925 new_itr = new_itr > adapter->itr ?
2926 min(adapter->itr + (new_itr >> 2), new_itr) :
2927 new_itr;
2928 /* Don't write the value here; it resets the adapter's
2929 * internal timer, and causes us to delay far longer than
2930 * we should between interrupts. Instead, we write the ITR
2931 * value at the beginning of the next interrupt so the timing
2932 * ends up being correct.
2933 */
2934 adapter->itr = new_itr;
6eb5a7f1
AD
2935 adapter->rx_ring->itr_val = 1000000000 / (new_itr * 256);
2936 adapter->rx_ring->set_itr = 1;
9d5c8243
AK
2937 }
2938
2939 return;
2940}
2941
2942
2943#define IGB_TX_FLAGS_CSUM 0x00000001
2944#define IGB_TX_FLAGS_VLAN 0x00000002
2945#define IGB_TX_FLAGS_TSO 0x00000004
2946#define IGB_TX_FLAGS_IPV4 0x00000008
33af6bcc 2947#define IGB_TX_FLAGS_TSTAMP 0x00000010
9d5c8243
AK
2948#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
2949#define IGB_TX_FLAGS_VLAN_SHIFT 16
2950
2951static inline int igb_tso_adv(struct igb_adapter *adapter,
2952 struct igb_ring *tx_ring,
2953 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2954{
2955 struct e1000_adv_tx_context_desc *context_desc;
2956 unsigned int i;
2957 int err;
2958 struct igb_buffer *buffer_info;
2959 u32 info = 0, tu_cmd = 0;
2960 u32 mss_l4len_idx, l4len;
2961 *hdr_len = 0;
2962
2963 if (skb_header_cloned(skb)) {
2964 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2965 if (err)
2966 return err;
2967 }
2968
2969 l4len = tcp_hdrlen(skb);
2970 *hdr_len += l4len;
2971
2972 if (skb->protocol == htons(ETH_P_IP)) {
2973 struct iphdr *iph = ip_hdr(skb);
2974 iph->tot_len = 0;
2975 iph->check = 0;
2976 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2977 iph->daddr, 0,
2978 IPPROTO_TCP,
2979 0);
2980 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
2981 ipv6_hdr(skb)->payload_len = 0;
2982 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2983 &ipv6_hdr(skb)->daddr,
2984 0, IPPROTO_TCP, 0);
2985 }
2986
2987 i = tx_ring->next_to_use;
2988
2989 buffer_info = &tx_ring->buffer_info[i];
2990 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
2991 /* VLAN MACLEN IPLEN */
2992 if (tx_flags & IGB_TX_FLAGS_VLAN)
2993 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
2994 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
2995 *hdr_len += skb_network_offset(skb);
2996 info |= skb_network_header_len(skb);
2997 *hdr_len += skb_network_header_len(skb);
2998 context_desc->vlan_macip_lens = cpu_to_le32(info);
2999
3000 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3001 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3002
3003 if (skb->protocol == htons(ETH_P_IP))
3004 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3005 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3006
3007 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3008
3009 /* MSS L4LEN IDX */
3010 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3011 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3012
73cd78f1 3013 /* For 82575, context index must be unique per ring. */
7dfc16fa
AD
3014 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
3015 mss_l4len_idx |= tx_ring->queue_index << 4;
9d5c8243
AK
3016
3017 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3018 context_desc->seqnum_seed = 0;
3019
3020 buffer_info->time_stamp = jiffies;
0e014cb1 3021 buffer_info->next_to_watch = i;
9d5c8243
AK
3022 buffer_info->dma = 0;
3023 i++;
3024 if (i == tx_ring->count)
3025 i = 0;
3026
3027 tx_ring->next_to_use = i;
3028
3029 return true;
3030}
3031
3032static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3033 struct igb_ring *tx_ring,
3034 struct sk_buff *skb, u32 tx_flags)
3035{
3036 struct e1000_adv_tx_context_desc *context_desc;
3037 unsigned int i;
3038 struct igb_buffer *buffer_info;
3039 u32 info = 0, tu_cmd = 0;
3040
3041 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3042 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3043 i = tx_ring->next_to_use;
3044 buffer_info = &tx_ring->buffer_info[i];
3045 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3046
3047 if (tx_flags & IGB_TX_FLAGS_VLAN)
3048 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3049 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3050 if (skb->ip_summed == CHECKSUM_PARTIAL)
3051 info |= skb_network_header_len(skb);
3052
3053 context_desc->vlan_macip_lens = cpu_to_le32(info);
3054
3055 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3056
3057 if (skb->ip_summed == CHECKSUM_PARTIAL) {
fa4a7ef3
AJ
3058 __be16 protocol;
3059
3060 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3061 const struct vlan_ethhdr *vhdr =
3062 (const struct vlan_ethhdr*)skb->data;
3063
3064 protocol = vhdr->h_vlan_encapsulated_proto;
3065 } else {
3066 protocol = skb->protocol;
3067 }
3068
3069 switch (protocol) {
09640e63 3070 case cpu_to_be16(ETH_P_IP):
9d5c8243 3071 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
44b0cda3
MW
3072 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3073 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3074 break;
09640e63 3075 case cpu_to_be16(ETH_P_IPV6):
44b0cda3
MW
3076 /* XXX what about other V6 headers?? */
3077 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3078 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3079 break;
3080 default:
3081 if (unlikely(net_ratelimit()))
3082 dev_warn(&adapter->pdev->dev,
3083 "partial checksum but proto=%x!\n",
3084 skb->protocol);
3085 break;
3086 }
9d5c8243
AK
3087 }
3088
3089 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3090 context_desc->seqnum_seed = 0;
7dfc16fa
AD
3091 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
3092 context_desc->mss_l4len_idx =
3093 cpu_to_le32(tx_ring->queue_index << 4);
265de409
AD
3094 else
3095 context_desc->mss_l4len_idx = 0;
9d5c8243
AK
3096
3097 buffer_info->time_stamp = jiffies;
0e014cb1 3098 buffer_info->next_to_watch = i;
9d5c8243
AK
3099 buffer_info->dma = 0;
3100
3101 i++;
3102 if (i == tx_ring->count)
3103 i = 0;
3104 tx_ring->next_to_use = i;
3105
3106 return true;
3107 }
9d5c8243
AK
3108 return false;
3109}
3110
3111#define IGB_MAX_TXD_PWR 16
3112#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3113
3114static inline int igb_tx_map_adv(struct igb_adapter *adapter,
0e014cb1
AD
3115 struct igb_ring *tx_ring, struct sk_buff *skb,
3116 unsigned int first)
9d5c8243
AK
3117{
3118 struct igb_buffer *buffer_info;
3119 unsigned int len = skb_headlen(skb);
3120 unsigned int count = 0, i;
3121 unsigned int f;
65689fef 3122 dma_addr_t *map;
9d5c8243
AK
3123
3124 i = tx_ring->next_to_use;
3125
65689fef
AD
3126 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
3127 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
3128 return 0;
3129 }
3130
3131 map = skb_shinfo(skb)->dma_maps;
3132
9d5c8243
AK
3133 buffer_info = &tx_ring->buffer_info[i];
3134 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3135 buffer_info->length = len;
3136 /* set time_stamp *before* dma to help avoid a possible race */
3137 buffer_info->time_stamp = jiffies;
0e014cb1 3138 buffer_info->next_to_watch = i;
65689fef 3139 buffer_info->dma = map[count];
9d5c8243 3140 count++;
9d5c8243
AK
3141
3142 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3143 struct skb_frag_struct *frag;
3144
65689fef
AD
3145 i++;
3146 if (i == tx_ring->count)
3147 i = 0;
3148
9d5c8243
AK
3149 frag = &skb_shinfo(skb)->frags[f];
3150 len = frag->size;
3151
3152 buffer_info = &tx_ring->buffer_info[i];
3153 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3154 buffer_info->length = len;
3155 buffer_info->time_stamp = jiffies;
0e014cb1 3156 buffer_info->next_to_watch = i;
65689fef 3157 buffer_info->dma = map[count];
9d5c8243 3158 count++;
9d5c8243
AK
3159 }
3160
9d5c8243 3161 tx_ring->buffer_info[i].skb = skb;
0e014cb1 3162 tx_ring->buffer_info[first].next_to_watch = i;
9d5c8243
AK
3163
3164 return count;
3165}
3166
3167static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
3168 struct igb_ring *tx_ring,
3169 int tx_flags, int count, u32 paylen,
3170 u8 hdr_len)
3171{
3172 union e1000_adv_tx_desc *tx_desc = NULL;
3173 struct igb_buffer *buffer_info;
3174 u32 olinfo_status = 0, cmd_type_len;
3175 unsigned int i;
3176
3177 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3178 E1000_ADVTXD_DCMD_DEXT);
3179
3180 if (tx_flags & IGB_TX_FLAGS_VLAN)
3181 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3182
33af6bcc
PO
3183 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3184 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3185
9d5c8243
AK
3186 if (tx_flags & IGB_TX_FLAGS_TSO) {
3187 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3188
3189 /* insert tcp checksum */
3190 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3191
3192 /* insert ip checksum */
3193 if (tx_flags & IGB_TX_FLAGS_IPV4)
3194 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3195
3196 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3197 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3198 }
3199
7dfc16fa
AD
3200 if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
3201 (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
3202 IGB_TX_FLAGS_VLAN)))
661086df 3203 olinfo_status |= tx_ring->queue_index << 4;
9d5c8243
AK
3204
3205 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3206
3207 i = tx_ring->next_to_use;
3208 while (count--) {
3209 buffer_info = &tx_ring->buffer_info[i];
3210 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3211 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3212 tx_desc->read.cmd_type_len =
3213 cpu_to_le32(cmd_type_len | buffer_info->length);
3214 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3215 i++;
3216 if (i == tx_ring->count)
3217 i = 0;
3218 }
3219
3220 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
3221 /* Force memory writes to complete before letting h/w
3222 * know there are new descriptors to fetch. (Only
3223 * applicable for weak-ordered memory model archs,
3224 * such as IA-64). */
3225 wmb();
3226
3227 tx_ring->next_to_use = i;
3228 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3229 /* we need this if more than one processor can write to our tail
3230 * at a time, it syncronizes IO on IA64/Altix systems */
3231 mmiowb();
3232}
3233
3234static int __igb_maybe_stop_tx(struct net_device *netdev,
3235 struct igb_ring *tx_ring, int size)
3236{
3237 struct igb_adapter *adapter = netdev_priv(netdev);
3238
661086df 3239 netif_stop_subqueue(netdev, tx_ring->queue_index);
661086df 3240
9d5c8243
AK
3241 /* Herbert's original patch had:
3242 * smp_mb__after_netif_stop_queue();
3243 * but since that doesn't exist yet, just open code it. */
3244 smp_mb();
3245
3246 /* We need to check again in a case another CPU has just
3247 * made room available. */
c493ea45 3248 if (igb_desc_unused(tx_ring) < size)
9d5c8243
AK
3249 return -EBUSY;
3250
3251 /* A reprieve! */
661086df 3252 netif_wake_subqueue(netdev, tx_ring->queue_index);
9d5c8243
AK
3253 ++adapter->restart_queue;
3254 return 0;
3255}
3256
3257static int igb_maybe_stop_tx(struct net_device *netdev,
3258 struct igb_ring *tx_ring, int size)
3259{
c493ea45 3260 if (igb_desc_unused(tx_ring) >= size)
9d5c8243
AK
3261 return 0;
3262 return __igb_maybe_stop_tx(netdev, tx_ring, size);
3263}
3264
9d5c8243
AK
3265static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
3266 struct net_device *netdev,
3267 struct igb_ring *tx_ring)
3268{
3269 struct igb_adapter *adapter = netdev_priv(netdev);
0e014cb1 3270 unsigned int first;
9d5c8243 3271 unsigned int tx_flags = 0;
9d5c8243 3272 u8 hdr_len = 0;
65689fef 3273 int count = 0;
9d5c8243 3274 int tso = 0;
33af6bcc 3275 union skb_shared_tx *shtx;
9d5c8243 3276
9d5c8243
AK
3277 if (test_bit(__IGB_DOWN, &adapter->state)) {
3278 dev_kfree_skb_any(skb);
3279 return NETDEV_TX_OK;
3280 }
3281
3282 if (skb->len <= 0) {
3283 dev_kfree_skb_any(skb);
3284 return NETDEV_TX_OK;
3285 }
3286
9d5c8243
AK
3287 /* need: 1 descriptor per page,
3288 * + 2 desc gap to keep tail from touching head,
3289 * + 1 desc for skb->data,
3290 * + 1 desc for context descriptor,
3291 * otherwise try next time */
3292 if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
3293 /* this is a hard error */
9d5c8243
AK
3294 return NETDEV_TX_BUSY;
3295 }
33af6bcc
PO
3296
3297 /*
3298 * TODO: check that there currently is no other packet with
3299 * time stamping in the queue
3300 *
3301 * When doing time stamping, keep the connection to the socket
3302 * a while longer: it is still needed by skb_hwtstamp_tx(),
3303 * called either in igb_tx_hwtstamp() or by our caller when
3304 * doing software time stamping.
3305 */
3306 shtx = skb_tx(skb);
3307 if (unlikely(shtx->hardware)) {
3308 shtx->in_progress = 1;
3309 tx_flags |= IGB_TX_FLAGS_TSTAMP;
33af6bcc 3310 }
9d5c8243
AK
3311
3312 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3313 tx_flags |= IGB_TX_FLAGS_VLAN;
3314 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3315 }
3316
661086df
PWJ
3317 if (skb->protocol == htons(ETH_P_IP))
3318 tx_flags |= IGB_TX_FLAGS_IPV4;
3319
0e014cb1 3320 first = tx_ring->next_to_use;
9d5c8243
AK
3321 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
3322 &hdr_len) : 0;
3323
3324 if (tso < 0) {
3325 dev_kfree_skb_any(skb);
9d5c8243
AK
3326 return NETDEV_TX_OK;
3327 }
3328
3329 if (tso)
3330 tx_flags |= IGB_TX_FLAGS_TSO;
bc1cbd34
AD
3331 else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) &&
3332 (skb->ip_summed == CHECKSUM_PARTIAL))
3333 tx_flags |= IGB_TX_FLAGS_CSUM;
9d5c8243 3334
65689fef
AD
3335 /*
3336 * count reflects descriptors mapped, if 0 then mapping error
3337 * has occured and we need to rewind the descriptor queue
3338 */
3339 count = igb_tx_map_adv(adapter, tx_ring, skb, first);
3340
3341 if (count) {
3342 igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
3343 skb->len, hdr_len);
3344 netdev->trans_start = jiffies;
3345 /* Make sure there is space in the ring for the next send. */
3346 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
3347 } else {
3348 dev_kfree_skb_any(skb);
3349 tx_ring->buffer_info[first].time_stamp = 0;
3350 tx_ring->next_to_use = first;
3351 }
9d5c8243 3352
9d5c8243
AK
3353 return NETDEV_TX_OK;
3354}
3355
3356static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
3357{
3358 struct igb_adapter *adapter = netdev_priv(netdev);
661086df
PWJ
3359 struct igb_ring *tx_ring;
3360
661086df 3361 int r_idx = 0;
1bfaf07b 3362 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
661086df 3363 tx_ring = adapter->multi_tx_table[r_idx];
9d5c8243
AK
3364
3365 /* This goes back to the question of how to logically map a tx queue
3366 * to a flow. Right now, performance is impacted slightly negatively
3367 * if using multiple tx queues. If the stack breaks away from a
3368 * single qdisc implementation, we can look at this again. */
3369 return (igb_xmit_frame_ring_adv(skb, netdev, tx_ring));
3370}
3371
3372/**
3373 * igb_tx_timeout - Respond to a Tx Hang
3374 * @netdev: network interface device structure
3375 **/
3376static void igb_tx_timeout(struct net_device *netdev)
3377{
3378 struct igb_adapter *adapter = netdev_priv(netdev);
3379 struct e1000_hw *hw = &adapter->hw;
3380
3381 /* Do the reset outside of interrupt context */
3382 adapter->tx_timeout_count++;
3383 schedule_work(&adapter->reset_task);
265de409
AD
3384 wr32(E1000_EICS,
3385 (adapter->eims_enable_mask & ~adapter->eims_other));
9d5c8243
AK
3386}
3387
3388static void igb_reset_task(struct work_struct *work)
3389{
3390 struct igb_adapter *adapter;
3391 adapter = container_of(work, struct igb_adapter, reset_task);
3392
3393 igb_reinit_locked(adapter);
3394}
3395
3396/**
3397 * igb_get_stats - Get System Network Statistics
3398 * @netdev: network interface device structure
3399 *
3400 * Returns the address of the device statistics structure.
3401 * The statistics are actually updated from the timer callback.
3402 **/
73cd78f1 3403static struct net_device_stats *igb_get_stats(struct net_device *netdev)
9d5c8243
AK
3404{
3405 struct igb_adapter *adapter = netdev_priv(netdev);
3406
3407 /* only return the current stats */
3408 return &adapter->net_stats;
3409}
3410
3411/**
3412 * igb_change_mtu - Change the Maximum Transfer Unit
3413 * @netdev: network interface device structure
3414 * @new_mtu: new value for maximum frame size
3415 *
3416 * Returns 0 on success, negative on failure
3417 **/
3418static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3419{
3420 struct igb_adapter *adapter = netdev_priv(netdev);
3421 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3422
3423 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3424 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3425 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3426 return -EINVAL;
3427 }
3428
9d5c8243
AK
3429 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3430 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
3431 return -EINVAL;
3432 }
3433
3434 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3435 msleep(1);
73cd78f1 3436
9d5c8243
AK
3437 /* igb_down has a dependency on max_frame_size */
3438 adapter->max_frame_size = max_frame;
3439 if (netif_running(netdev))
3440 igb_down(adapter);
3441
3442 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3443 * means we reserve 2 more, this pushes us to allocate from the next
3444 * larger slab size.
3445 * i.e. RXBUFFER_2048 --> size-4096 slab
3446 */
3447
3448 if (max_frame <= IGB_RXBUFFER_256)
3449 adapter->rx_buffer_len = IGB_RXBUFFER_256;
3450 else if (max_frame <= IGB_RXBUFFER_512)
3451 adapter->rx_buffer_len = IGB_RXBUFFER_512;
3452 else if (max_frame <= IGB_RXBUFFER_1024)
3453 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3454 else if (max_frame <= IGB_RXBUFFER_2048)
3455 adapter->rx_buffer_len = IGB_RXBUFFER_2048;
3456 else
bf36c1a0
AD
3457#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
3458 adapter->rx_buffer_len = IGB_RXBUFFER_16384;
3459#else
3460 adapter->rx_buffer_len = PAGE_SIZE / 2;
3461#endif
e1739522
AD
3462
3463 /* if sr-iov is enabled we need to force buffer size to 1K or larger */
3464 if (adapter->vfs_allocated_count &&
3465 (adapter->rx_buffer_len < IGB_RXBUFFER_1024))
3466 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3467
9d5c8243
AK
3468 /* adjust allocation if LPE protects us, and we aren't using SBP */
3469 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
3470 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
3471 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3472
3473 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
3474 netdev->mtu, new_mtu);
3475 netdev->mtu = new_mtu;
3476
3477 if (netif_running(netdev))
3478 igb_up(adapter);
3479 else
3480 igb_reset(adapter);
3481
3482 clear_bit(__IGB_RESETTING, &adapter->state);
3483
3484 return 0;
3485}
3486
3487/**
3488 * igb_update_stats - Update the board statistics counters
3489 * @adapter: board private structure
3490 **/
3491
3492void igb_update_stats(struct igb_adapter *adapter)
3493{
3494 struct e1000_hw *hw = &adapter->hw;
3495 struct pci_dev *pdev = adapter->pdev;
3496 u16 phy_tmp;
3497
3498#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3499
3500 /*
3501 * Prevent stats update while adapter is being reset, or if the pci
3502 * connection is down.
3503 */
3504 if (adapter->link_speed == 0)
3505 return;
3506 if (pci_channel_offline(pdev))
3507 return;
3508
3509 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3510 adapter->stats.gprc += rd32(E1000_GPRC);
3511 adapter->stats.gorc += rd32(E1000_GORCL);
3512 rd32(E1000_GORCH); /* clear GORCL */
3513 adapter->stats.bprc += rd32(E1000_BPRC);
3514 adapter->stats.mprc += rd32(E1000_MPRC);
3515 adapter->stats.roc += rd32(E1000_ROC);
3516
3517 adapter->stats.prc64 += rd32(E1000_PRC64);
3518 adapter->stats.prc127 += rd32(E1000_PRC127);
3519 adapter->stats.prc255 += rd32(E1000_PRC255);
3520 adapter->stats.prc511 += rd32(E1000_PRC511);
3521 adapter->stats.prc1023 += rd32(E1000_PRC1023);
3522 adapter->stats.prc1522 += rd32(E1000_PRC1522);
3523 adapter->stats.symerrs += rd32(E1000_SYMERRS);
3524 adapter->stats.sec += rd32(E1000_SEC);
3525
3526 adapter->stats.mpc += rd32(E1000_MPC);
3527 adapter->stats.scc += rd32(E1000_SCC);
3528 adapter->stats.ecol += rd32(E1000_ECOL);
3529 adapter->stats.mcc += rd32(E1000_MCC);
3530 adapter->stats.latecol += rd32(E1000_LATECOL);
3531 adapter->stats.dc += rd32(E1000_DC);
3532 adapter->stats.rlec += rd32(E1000_RLEC);
3533 adapter->stats.xonrxc += rd32(E1000_XONRXC);
3534 adapter->stats.xontxc += rd32(E1000_XONTXC);
3535 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
3536 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
3537 adapter->stats.fcruc += rd32(E1000_FCRUC);
3538 adapter->stats.gptc += rd32(E1000_GPTC);
3539 adapter->stats.gotc += rd32(E1000_GOTCL);
3540 rd32(E1000_GOTCH); /* clear GOTCL */
3541 adapter->stats.rnbc += rd32(E1000_RNBC);
3542 adapter->stats.ruc += rd32(E1000_RUC);
3543 adapter->stats.rfc += rd32(E1000_RFC);
3544 adapter->stats.rjc += rd32(E1000_RJC);
3545 adapter->stats.tor += rd32(E1000_TORH);
3546 adapter->stats.tot += rd32(E1000_TOTH);
3547 adapter->stats.tpr += rd32(E1000_TPR);
3548
3549 adapter->stats.ptc64 += rd32(E1000_PTC64);
3550 adapter->stats.ptc127 += rd32(E1000_PTC127);
3551 adapter->stats.ptc255 += rd32(E1000_PTC255);
3552 adapter->stats.ptc511 += rd32(E1000_PTC511);
3553 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
3554 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
3555
3556 adapter->stats.mptc += rd32(E1000_MPTC);
3557 adapter->stats.bptc += rd32(E1000_BPTC);
3558
3559 /* used for adaptive IFS */
3560
3561 hw->mac.tx_packet_delta = rd32(E1000_TPT);
3562 adapter->stats.tpt += hw->mac.tx_packet_delta;
3563 hw->mac.collision_delta = rd32(E1000_COLC);
3564 adapter->stats.colc += hw->mac.collision_delta;
3565
3566 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
3567 adapter->stats.rxerrc += rd32(E1000_RXERRC);
3568 adapter->stats.tncrs += rd32(E1000_TNCRS);
3569 adapter->stats.tsctc += rd32(E1000_TSCTC);
3570 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
3571
3572 adapter->stats.iac += rd32(E1000_IAC);
3573 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
3574 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
3575 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
3576 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
3577 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
3578 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
3579 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
3580 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3581
3582 /* Fill out the OS statistics structure */
3583 adapter->net_stats.multicast = adapter->stats.mprc;
3584 adapter->net_stats.collisions = adapter->stats.colc;
3585
3586 /* Rx Errors */
3587
3588 /* RLEC on some newer hardware can be incorrect so build
3589 * our own version based on RUC and ROC */
3590 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3591 adapter->stats.crcerrs + adapter->stats.algnerrc +
3592 adapter->stats.ruc + adapter->stats.roc +
3593 adapter->stats.cexterr;
3594 adapter->net_stats.rx_length_errors = adapter->stats.ruc +
3595 adapter->stats.roc;
3596 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3597 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3598 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3599
3600 /* Tx Errors */
3601 adapter->net_stats.tx_errors = adapter->stats.ecol +
3602 adapter->stats.latecol;
3603 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
3604 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
3605 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
3606
3607 /* Tx Dropped needs to be maintained elsewhere */
3608
3609 /* Phy Stats */
3610 if (hw->phy.media_type == e1000_media_type_copper) {
3611 if ((adapter->link_speed == SPEED_1000) &&
73cd78f1 3612 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
9d5c8243
AK
3613 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3614 adapter->phy_stats.idle_errors += phy_tmp;
3615 }
3616 }
3617
3618 /* Management Stats */
3619 adapter->stats.mgptc += rd32(E1000_MGTPTC);
3620 adapter->stats.mgprc += rd32(E1000_MGTPRC);
3621 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
3622}
3623
9d5c8243
AK
3624static irqreturn_t igb_msix_other(int irq, void *data)
3625{
3626 struct net_device *netdev = data;
3627 struct igb_adapter *adapter = netdev_priv(netdev);
3628 struct e1000_hw *hw = &adapter->hw;
844290e5 3629 u32 icr = rd32(E1000_ICR);
9d5c8243 3630
844290e5 3631 /* reading ICR causes bit 31 of EICR to be cleared */
dda0e083
AD
3632
3633 if(icr & E1000_ICR_DOUTSYNC) {
3634 /* HW is reporting DMA is out of sync */
3635 adapter->stats.doosync++;
3636 }
eebbbdba 3637
4ae196df
AD
3638 /* Check for a mailbox event */
3639 if (icr & E1000_ICR_VMMB)
3640 igb_msg_task(adapter);
3641
3642 if (icr & E1000_ICR_LSC) {
3643 hw->mac.get_link_status = 1;
3644 /* guard against interrupt when we're going down */
3645 if (!test_bit(__IGB_DOWN, &adapter->state))
3646 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3647 }
3648
3649 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB);
844290e5 3650 wr32(E1000_EIMS, adapter->eims_other);
9d5c8243
AK
3651
3652 return IRQ_HANDLED;
3653}
3654
3655static irqreturn_t igb_msix_tx(int irq, void *data)
3656{
3657 struct igb_ring *tx_ring = data;
3658 struct igb_adapter *adapter = tx_ring->adapter;
3659 struct e1000_hw *hw = &adapter->hw;
3660
421e02f0 3661#ifdef CONFIG_IGB_DCA
7dfc16fa 3662 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6
JC
3663 igb_update_tx_dca(tx_ring);
3664#endif
73cd78f1 3665
9d5c8243
AK
3666 tx_ring->total_bytes = 0;
3667 tx_ring->total_packets = 0;
661086df
PWJ
3668
3669 /* auto mask will automatically reenable the interrupt when we write
3670 * EICS */
3b644cf6 3671 if (!igb_clean_tx_irq(tx_ring))
9d5c8243
AK
3672 /* Ring was not completely cleaned, so fire another interrupt */
3673 wr32(E1000_EICS, tx_ring->eims_value);
661086df 3674 else
9d5c8243 3675 wr32(E1000_EIMS, tx_ring->eims_value);
661086df 3676
9d5c8243
AK
3677 return IRQ_HANDLED;
3678}
3679
6eb5a7f1
AD
3680static void igb_write_itr(struct igb_ring *ring)
3681{
3682 struct e1000_hw *hw = &ring->adapter->hw;
3683 if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
3684 switch (hw->mac.type) {
3685 case e1000_82576:
73cd78f1 3686 wr32(ring->itr_register, ring->itr_val |
6eb5a7f1
AD
3687 0x80000000);
3688 break;
3689 default:
73cd78f1 3690 wr32(ring->itr_register, ring->itr_val |
6eb5a7f1
AD
3691 (ring->itr_val << 16));
3692 break;
3693 }
3694 ring->set_itr = 0;
3695 }
3696}
3697
9d5c8243
AK
3698static irqreturn_t igb_msix_rx(int irq, void *data)
3699{
3700 struct igb_ring *rx_ring = data;
9d5c8243 3701
844290e5
PW
3702 /* Write the ITR value calculated at the end of the
3703 * previous interrupt.
3704 */
9d5c8243 3705
6eb5a7f1 3706 igb_write_itr(rx_ring);
9d5c8243 3707
288379f0
BH
3708 if (napi_schedule_prep(&rx_ring->napi))
3709 __napi_schedule(&rx_ring->napi);
844290e5 3710
421e02f0 3711#ifdef CONFIG_IGB_DCA
8d253320 3712 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6
JC
3713 igb_update_rx_dca(rx_ring);
3714#endif
3715 return IRQ_HANDLED;
3716}
3717
421e02f0 3718#ifdef CONFIG_IGB_DCA
fe4506b6
JC
3719static void igb_update_rx_dca(struct igb_ring *rx_ring)
3720{
3721 u32 dca_rxctrl;
3722 struct igb_adapter *adapter = rx_ring->adapter;
3723 struct e1000_hw *hw = &adapter->hw;
3724 int cpu = get_cpu();
26bc19ec 3725 int q = rx_ring->reg_idx;
fe4506b6
JC
3726
3727 if (rx_ring->cpu != cpu) {
3728 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
2d064c06
AD
3729 if (hw->mac.type == e1000_82576) {
3730 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
92be7917 3731 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
2d064c06
AD
3732 E1000_DCA_RXCTRL_CPUID_SHIFT;
3733 } else {
3734 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
92be7917 3735 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
2d064c06 3736 }
fe4506b6
JC
3737 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
3738 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
3739 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
3740 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
3741 rx_ring->cpu = cpu;
3742 }
3743 put_cpu();
3744}
3745
3746static void igb_update_tx_dca(struct igb_ring *tx_ring)
3747{
3748 u32 dca_txctrl;
3749 struct igb_adapter *adapter = tx_ring->adapter;
3750 struct e1000_hw *hw = &adapter->hw;
3751 int cpu = get_cpu();
26bc19ec 3752 int q = tx_ring->reg_idx;
fe4506b6
JC
3753
3754 if (tx_ring->cpu != cpu) {
3755 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
2d064c06
AD
3756 if (hw->mac.type == e1000_82576) {
3757 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
92be7917 3758 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
2d064c06
AD
3759 E1000_DCA_TXCTRL_CPUID_SHIFT;
3760 } else {
3761 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
92be7917 3762 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
2d064c06 3763 }
fe4506b6
JC
3764 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
3765 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
3766 tx_ring->cpu = cpu;
3767 }
3768 put_cpu();
3769}
3770
3771static void igb_setup_dca(struct igb_adapter *adapter)
3772{
3773 int i;
3774
7dfc16fa 3775 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
fe4506b6
JC
3776 return;
3777
3778 for (i = 0; i < adapter->num_tx_queues; i++) {
3779 adapter->tx_ring[i].cpu = -1;
3780 igb_update_tx_dca(&adapter->tx_ring[i]);
3781 }
3782 for (i = 0; i < adapter->num_rx_queues; i++) {
3783 adapter->rx_ring[i].cpu = -1;
3784 igb_update_rx_dca(&adapter->rx_ring[i]);
3785 }
3786}
3787
3788static int __igb_notify_dca(struct device *dev, void *data)
3789{
3790 struct net_device *netdev = dev_get_drvdata(dev);
3791 struct igb_adapter *adapter = netdev_priv(netdev);
3792 struct e1000_hw *hw = &adapter->hw;
3793 unsigned long event = *(unsigned long *)data;
3794
3795 switch (event) {
3796 case DCA_PROVIDER_ADD:
3797 /* if already enabled, don't do it again */
7dfc16fa 3798 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6 3799 break;
fe4506b6
JC
3800 /* Always use CB2 mode, difference is masked
3801 * in the CB driver. */
cbd347ad 3802 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
fe4506b6 3803 if (dca_add_requester(dev) == 0) {
bbd98fe4 3804 adapter->flags |= IGB_FLAG_DCA_ENABLED;
fe4506b6
JC
3805 dev_info(&adapter->pdev->dev, "DCA enabled\n");
3806 igb_setup_dca(adapter);
3807 break;
3808 }
3809 /* Fall Through since DCA is disabled. */
3810 case DCA_PROVIDER_REMOVE:
7dfc16fa 3811 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
fe4506b6
JC
3812 /* without this a class_device is left
3813 * hanging around in the sysfs model */
3814 dca_remove_requester(dev);
3815 dev_info(&adapter->pdev->dev, "DCA disabled\n");
7dfc16fa 3816 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
cbd347ad 3817 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
fe4506b6
JC
3818 }
3819 break;
3820 }
bbd98fe4 3821
fe4506b6 3822 return 0;
9d5c8243
AK
3823}
3824
fe4506b6
JC
3825static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
3826 void *p)
3827{
3828 int ret_val;
3829
3830 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
3831 __igb_notify_dca);
3832
3833 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
3834}
421e02f0 3835#endif /* CONFIG_IGB_DCA */
9d5c8243 3836
4ae196df
AD
3837static void igb_ping_all_vfs(struct igb_adapter *adapter)
3838{
3839 struct e1000_hw *hw = &adapter->hw;
3840 u32 ping;
3841 int i;
3842
3843 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
3844 ping = E1000_PF_CONTROL_MSG;
3845 if (adapter->vf_data[i].clear_to_send)
3846 ping |= E1000_VT_MSGTYPE_CTS;
3847 igb_write_mbx(hw, &ping, 1, i);
3848 }
3849}
3850
3851static int igb_set_vf_multicasts(struct igb_adapter *adapter,
3852 u32 *msgbuf, u32 vf)
3853{
3854 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
3855 u16 *hash_list = (u16 *)&msgbuf[1];
3856 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
3857 int i;
3858
3859 /* only up to 30 hash values supported */
3860 if (n > 30)
3861 n = 30;
3862
3863 /* salt away the number of multi cast addresses assigned
3864 * to this VF for later use to restore when the PF multi cast
3865 * list changes
3866 */
3867 vf_data->num_vf_mc_hashes = n;
3868
3869 /* VFs are limited to using the MTA hash table for their multicast
3870 * addresses */
3871 for (i = 0; i < n; i++)
3872 vf_data->vf_mc_hashes[i] = hash_list[i];;
3873
3874 /* Flush and reset the mta with the new values */
3875 igb_set_multi(adapter->netdev);
3876
3877 return 0;
3878}
3879
3880static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
3881{
3882 struct e1000_hw *hw = &adapter->hw;
3883 struct vf_data_storage *vf_data;
3884 int i, j;
3885
3886 for (i = 0; i < adapter->vfs_allocated_count; i++) {
3887 vf_data = &adapter->vf_data[i];
75f4f382 3888 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4ae196df
AD
3889 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
3890 }
3891}
3892
3893static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
3894{
3895 struct e1000_hw *hw = &adapter->hw;
3896 u32 pool_mask, reg, vid;
3897 int i;
3898
3899 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
3900
3901 /* Find the vlan filter for this id */
3902 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
3903 reg = rd32(E1000_VLVF(i));
3904
3905 /* remove the vf from the pool */
3906 reg &= ~pool_mask;
3907
3908 /* if pool is empty then remove entry from vfta */
3909 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
3910 (reg & E1000_VLVF_VLANID_ENABLE)) {
3911 reg = 0;
3912 vid = reg & E1000_VLVF_VLANID_MASK;
3913 igb_vfta_set(hw, vid, false);
3914 }
3915
3916 wr32(E1000_VLVF(i), reg);
3917 }
3918}
3919
3920static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
3921{
3922 struct e1000_hw *hw = &adapter->hw;
3923 u32 reg, i;
3924
3925 /* It is an error to call this function when VFs are not enabled */
3926 if (!adapter->vfs_allocated_count)
3927 return -1;
3928
3929 /* Find the vlan filter for this id */
3930 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
3931 reg = rd32(E1000_VLVF(i));
3932 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
3933 vid == (reg & E1000_VLVF_VLANID_MASK))
3934 break;
3935 }
3936
3937 if (add) {
3938 if (i == E1000_VLVF_ARRAY_SIZE) {
3939 /* Did not find a matching VLAN ID entry that was
3940 * enabled. Search for a free filter entry, i.e.
3941 * one without the enable bit set
3942 */
3943 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
3944 reg = rd32(E1000_VLVF(i));
3945 if (!(reg & E1000_VLVF_VLANID_ENABLE))
3946 break;
3947 }
3948 }
3949 if (i < E1000_VLVF_ARRAY_SIZE) {
3950 /* Found an enabled/available entry */
3951 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
3952
3953 /* if !enabled we need to set this up in vfta */
3954 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
cad6d05f
AD
3955 /* add VID to filter table, if bit already set
3956 * PF must have added it outside of table */
3957 if (igb_vfta_set(hw, vid, true))
3958 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
3959 adapter->vfs_allocated_count);
4ae196df
AD
3960 reg |= E1000_VLVF_VLANID_ENABLE;
3961 }
cad6d05f
AD
3962 reg &= ~E1000_VLVF_VLANID_MASK;
3963 reg |= vid;
4ae196df
AD
3964
3965 wr32(E1000_VLVF(i), reg);
3966 return 0;
3967 }
3968 } else {
3969 if (i < E1000_VLVF_ARRAY_SIZE) {
3970 /* remove vf from the pool */
3971 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
3972 /* if pool is empty then remove entry from vfta */
3973 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
3974 reg = 0;
3975 igb_vfta_set(hw, vid, false);
3976 }
3977 wr32(E1000_VLVF(i), reg);
3978 return 0;
3979 }
3980 }
3981 return -1;
3982}
3983
3984static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
3985{
3986 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
3987 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
3988
3989 return igb_vlvf_set(adapter, vid, add, vf);
3990}
3991
3992static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
3993{
3994 struct e1000_hw *hw = &adapter->hw;
3995
3996 /* disable mailbox functionality for vf */
3997 adapter->vf_data[vf].clear_to_send = false;
3998
3999 /* reset offloads to defaults */
4000 igb_set_vmolr(hw, vf);
4001
4002 /* reset vlans for device */
4003 igb_clear_vf_vfta(adapter, vf);
4004
4005 /* reset multicast table array for vf */
4006 adapter->vf_data[vf].num_vf_mc_hashes = 0;
4007
4008 /* Flush and reset the mta with the new values */
4009 igb_set_multi(adapter->netdev);
4010}
4011
4012static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4013{
4014 struct e1000_hw *hw = &adapter->hw;
4015 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4016 u32 reg, msgbuf[3];
4017 u8 *addr = (u8 *)(&msgbuf[1]);
4018
4019 /* process all the same items cleared in a function level reset */
4020 igb_vf_reset_event(adapter, vf);
4021
4022 /* set vf mac address */
4023 igb_rar_set(hw, vf_mac, vf + 1);
4024 igb_set_rah_pool(hw, vf, vf + 1);
4025
4026 /* enable transmit and receive for vf */
4027 reg = rd32(E1000_VFTE);
4028 wr32(E1000_VFTE, reg | (1 << vf));
4029 reg = rd32(E1000_VFRE);
4030 wr32(E1000_VFRE, reg | (1 << vf));
4031
4032 /* enable mailbox functionality for vf */
4033 adapter->vf_data[vf].clear_to_send = true;
4034
4035 /* reply to reset with ack and vf mac address */
4036 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
4037 memcpy(addr, vf_mac, 6);
4038 igb_write_mbx(hw, msgbuf, 3, vf);
4039}
4040
4041static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4042{
4043 unsigned char *addr = (char *)&msg[1];
4044 int err = -1;
4045
4046 if (is_valid_ether_addr(addr))
4047 err = igb_set_vf_mac(adapter, vf, addr);
4048
4049 return err;
4050
4051}
4052
4053static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4054{
4055 struct e1000_hw *hw = &adapter->hw;
4056 u32 msg = E1000_VT_MSGTYPE_NACK;
4057
4058 /* if device isn't clear to send it shouldn't be reading either */
4059 if (!adapter->vf_data[vf].clear_to_send)
4060 igb_write_mbx(hw, &msg, 1, vf);
4061}
4062
4063
4064static void igb_msg_task(struct igb_adapter *adapter)
4065{
4066 struct e1000_hw *hw = &adapter->hw;
4067 u32 vf;
4068
4069 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4070 /* process any reset requests */
4071 if (!igb_check_for_rst(hw, vf)) {
4072 adapter->vf_data[vf].clear_to_send = false;
4073 igb_vf_reset_event(adapter, vf);
4074 }
4075
4076 /* process any messages pending */
4077 if (!igb_check_for_msg(hw, vf))
4078 igb_rcv_msg_from_vf(adapter, vf);
4079
4080 /* process any acks */
4081 if (!igb_check_for_ack(hw, vf))
4082 igb_rcv_ack_from_vf(adapter, vf);
4083
4084 }
4085}
4086
4087static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4088{
4089 u32 mbx_size = E1000_VFMAILBOX_SIZE;
4090 u32 msgbuf[mbx_size];
4091 struct e1000_hw *hw = &adapter->hw;
4092 s32 retval;
4093
4094 retval = igb_read_mbx(hw, msgbuf, mbx_size, vf);
4095
4096 if (retval)
4097 dev_err(&adapter->pdev->dev,
4098 "Error receiving message from VF\n");
4099
4100 /* this is a message we already processed, do nothing */
4101 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
4102 return retval;
4103
4104 /*
4105 * until the vf completes a reset it should not be
4106 * allowed to start any configuration.
4107 */
4108
4109 if (msgbuf[0] == E1000_VF_RESET) {
4110 igb_vf_reset_msg(adapter, vf);
4111
4112 return retval;
4113 }
4114
4115 if (!adapter->vf_data[vf].clear_to_send) {
4116 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4117 igb_write_mbx(hw, msgbuf, 1, vf);
4118 return retval;
4119 }
4120
4121 switch ((msgbuf[0] & 0xFFFF)) {
4122 case E1000_VF_SET_MAC_ADDR:
4123 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4124 break;
4125 case E1000_VF_SET_MULTICAST:
4126 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4127 break;
4128 case E1000_VF_SET_LPE:
4129 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4130 break;
4131 case E1000_VF_SET_VLAN:
4132 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4133 break;
4134 default:
4135 dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4136 retval = -1;
4137 break;
4138 }
4139
4140 /* notify the VF of the results of what it sent us */
4141 if (retval)
4142 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4143 else
4144 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4145
4146 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4147
4148 igb_write_mbx(hw, msgbuf, 1, vf);
4149
4150 return retval;
4151}
4152
9d5c8243
AK
4153/**
4154 * igb_intr_msi - Interrupt Handler
4155 * @irq: interrupt number
4156 * @data: pointer to a network interface device structure
4157 **/
4158static irqreturn_t igb_intr_msi(int irq, void *data)
4159{
4160 struct net_device *netdev = data;
4161 struct igb_adapter *adapter = netdev_priv(netdev);
9d5c8243
AK
4162 struct e1000_hw *hw = &adapter->hw;
4163 /* read ICR disables interrupts using IAM */
4164 u32 icr = rd32(E1000_ICR);
4165
6eb5a7f1 4166 igb_write_itr(adapter->rx_ring);
9d5c8243 4167
dda0e083
AD
4168 if(icr & E1000_ICR_DOUTSYNC) {
4169 /* HW is reporting DMA is out of sync */
4170 adapter->stats.doosync++;
4171 }
4172
9d5c8243
AK
4173 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4174 hw->mac.get_link_status = 1;
4175 if (!test_bit(__IGB_DOWN, &adapter->state))
4176 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4177 }
4178
288379f0 4179 napi_schedule(&adapter->rx_ring[0].napi);
9d5c8243
AK
4180
4181 return IRQ_HANDLED;
4182}
4183
4184/**
4a3c6433 4185 * igb_intr - Legacy Interrupt Handler
9d5c8243
AK
4186 * @irq: interrupt number
4187 * @data: pointer to a network interface device structure
4188 **/
4189static irqreturn_t igb_intr(int irq, void *data)
4190{
4191 struct net_device *netdev = data;
4192 struct igb_adapter *adapter = netdev_priv(netdev);
9d5c8243
AK
4193 struct e1000_hw *hw = &adapter->hw;
4194 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4195 * need for the IMC write */
4196 u32 icr = rd32(E1000_ICR);
9d5c8243
AK
4197 if (!icr)
4198 return IRQ_NONE; /* Not our interrupt */
4199
6eb5a7f1 4200 igb_write_itr(adapter->rx_ring);
9d5c8243
AK
4201
4202 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4203 * not set, then the adapter didn't send an interrupt */
4204 if (!(icr & E1000_ICR_INT_ASSERTED))
4205 return IRQ_NONE;
4206
dda0e083
AD
4207 if(icr & E1000_ICR_DOUTSYNC) {
4208 /* HW is reporting DMA is out of sync */
4209 adapter->stats.doosync++;
4210 }
4211
9d5c8243
AK
4212 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4213 hw->mac.get_link_status = 1;
4214 /* guard against interrupt when we're going down */
4215 if (!test_bit(__IGB_DOWN, &adapter->state))
4216 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4217 }
4218
288379f0 4219 napi_schedule(&adapter->rx_ring[0].napi);
9d5c8243
AK
4220
4221 return IRQ_HANDLED;
4222}
4223
46544258 4224static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
9d5c8243 4225{
661086df 4226 struct igb_adapter *adapter = rx_ring->adapter;
46544258 4227 struct e1000_hw *hw = &adapter->hw;
9d5c8243 4228
46544258
AD
4229 if (adapter->itr_setting & 3) {
4230 if (adapter->num_rx_queues == 1)
6eb5a7f1 4231 igb_set_itr(adapter);
46544258
AD
4232 else
4233 igb_update_ring_itr(rx_ring);
9d5c8243
AK
4234 }
4235
46544258
AD
4236 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4237 if (adapter->msix_entries)
4238 wr32(E1000_EIMS, rx_ring->eims_value);
4239 else
4240 igb_irq_enable(adapter);
4241 }
9d5c8243
AK
4242}
4243
46544258
AD
4244/**
4245 * igb_poll - NAPI Rx polling callback
4246 * @napi: napi polling structure
4247 * @budget: count of how many packets we should handle
4248 **/
4249static int igb_poll(struct napi_struct *napi, int budget)
9d5c8243
AK
4250{
4251 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
9d5c8243
AK
4252 int work_done = 0;
4253
421e02f0 4254#ifdef CONFIG_IGB_DCA
bd38e5d1 4255 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6
JC
4256 igb_update_rx_dca(rx_ring);
4257#endif
3b644cf6 4258 igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
9d5c8243 4259
46544258
AD
4260 if (rx_ring->buddy) {
4261#ifdef CONFIG_IGB_DCA
bd38e5d1 4262 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
46544258
AD
4263 igb_update_tx_dca(rx_ring->buddy);
4264#endif
4265 if (!igb_clean_tx_irq(rx_ring->buddy))
4266 work_done = budget;
4267 }
4268
9d5c8243 4269 /* If not enough Rx work done, exit the polling mode */
5e6d5b17 4270 if (work_done < budget) {
288379f0 4271 napi_complete(napi);
46544258 4272 igb_rx_irq_enable(rx_ring);
9d5c8243
AK
4273 }
4274
46544258 4275 return work_done;
9d5c8243 4276}
6d8126f9 4277
33af6bcc
PO
4278/**
4279 * igb_hwtstamp - utility function which checks for TX time stamp
4280 * @adapter: board private structure
4281 * @skb: packet that was just sent
4282 *
4283 * If we were asked to do hardware stamping and such a time stamp is
4284 * available, then it must have been for this skb here because we only
4285 * allow only one such packet into the queue.
4286 */
4287static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb)
4288{
4289 union skb_shared_tx *shtx = skb_tx(skb);
4290 struct e1000_hw *hw = &adapter->hw;
4291
4292 if (unlikely(shtx->hardware)) {
4293 u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID;
4294 if (valid) {
4295 u64 regval = rd32(E1000_TXSTMPL);
4296 u64 ns;
4297 struct skb_shared_hwtstamps shhwtstamps;
4298
4299 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
4300 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4301 ns = timecounter_cyc2time(&adapter->clock,
4302 regval);
4303 timecompare_update(&adapter->compare, ns);
4304 shhwtstamps.hwtstamp = ns_to_ktime(ns);
4305 shhwtstamps.syststamp =
4306 timecompare_transform(&adapter->compare, ns);
4307 skb_tstamp_tx(skb, &shhwtstamps);
4308 }
33af6bcc
PO
4309 }
4310}
4311
9d5c8243
AK
4312/**
4313 * igb_clean_tx_irq - Reclaim resources after transmit completes
4314 * @adapter: board private structure
4315 * returns true if ring is completely cleaned
4316 **/
3b644cf6 4317static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
9d5c8243 4318{
3b644cf6 4319 struct igb_adapter *adapter = tx_ring->adapter;
3b644cf6 4320 struct net_device *netdev = adapter->netdev;
0e014cb1 4321 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
4322 struct igb_buffer *buffer_info;
4323 struct sk_buff *skb;
0e014cb1 4324 union e1000_adv_tx_desc *tx_desc, *eop_desc;
9d5c8243 4325 unsigned int total_bytes = 0, total_packets = 0;
0e014cb1
AD
4326 unsigned int i, eop, count = 0;
4327 bool cleaned = false;
9d5c8243 4328
9d5c8243 4329 i = tx_ring->next_to_clean;
0e014cb1
AD
4330 eop = tx_ring->buffer_info[i].next_to_watch;
4331 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4332
4333 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
4334 (count < tx_ring->count)) {
4335 for (cleaned = false; !cleaned; count++) {
4336 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
9d5c8243 4337 buffer_info = &tx_ring->buffer_info[i];
0e014cb1 4338 cleaned = (i == eop);
9d5c8243
AK
4339 skb = buffer_info->skb;
4340
4341 if (skb) {
4342 unsigned int segs, bytecount;
4343 /* gso_segs is currently only valid for tcp */
4344 segs = skb_shinfo(skb)->gso_segs ?: 1;
4345 /* multiply data chunks by size of headers */
4346 bytecount = ((segs - 1) * skb_headlen(skb)) +
4347 skb->len;
4348 total_packets += segs;
4349 total_bytes += bytecount;
33af6bcc
PO
4350
4351 igb_tx_hwtstamp(adapter, skb);
9d5c8243
AK
4352 }
4353
4354 igb_unmap_and_free_tx_resource(adapter, buffer_info);
0e014cb1 4355 tx_desc->wb.status = 0;
9d5c8243
AK
4356
4357 i++;
4358 if (i == tx_ring->count)
4359 i = 0;
9d5c8243 4360 }
0e014cb1
AD
4361 eop = tx_ring->buffer_info[i].next_to_watch;
4362 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4363 }
4364
9d5c8243
AK
4365 tx_ring->next_to_clean = i;
4366
fc7d345d 4367 if (unlikely(count &&
9d5c8243 4368 netif_carrier_ok(netdev) &&
c493ea45 4369 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
9d5c8243
AK
4370 /* Make sure that anybody stopping the queue after this
4371 * sees the new next_to_clean.
4372 */
4373 smp_mb();
661086df
PWJ
4374 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
4375 !(test_bit(__IGB_DOWN, &adapter->state))) {
4376 netif_wake_subqueue(netdev, tx_ring->queue_index);
4377 ++adapter->restart_queue;
4378 }
9d5c8243
AK
4379 }
4380
4381 if (tx_ring->detect_tx_hung) {
4382 /* Detect a transmit hang in hardware, this serializes the
4383 * check with the clearing of time_stamp and movement of i */
4384 tx_ring->detect_tx_hung = false;
4385 if (tx_ring->buffer_info[i].time_stamp &&
4386 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
4387 (adapter->tx_timeout_factor * HZ))
4388 && !(rd32(E1000_STATUS) &
4389 E1000_STATUS_TXOFF)) {
4390
9d5c8243
AK
4391 /* detected Tx unit hang */
4392 dev_err(&adapter->pdev->dev,
4393 "Detected Tx Unit Hang\n"
2d064c06 4394 " Tx Queue <%d>\n"
9d5c8243
AK
4395 " TDH <%x>\n"
4396 " TDT <%x>\n"
4397 " next_to_use <%x>\n"
4398 " next_to_clean <%x>\n"
9d5c8243
AK
4399 "buffer_info[next_to_clean]\n"
4400 " time_stamp <%lx>\n"
0e014cb1 4401 " next_to_watch <%x>\n"
9d5c8243
AK
4402 " jiffies <%lx>\n"
4403 " desc.status <%x>\n",
2d064c06 4404 tx_ring->queue_index,
9d5c8243
AK
4405 readl(adapter->hw.hw_addr + tx_ring->head),
4406 readl(adapter->hw.hw_addr + tx_ring->tail),
4407 tx_ring->next_to_use,
4408 tx_ring->next_to_clean,
9d5c8243 4409 tx_ring->buffer_info[i].time_stamp,
0e014cb1 4410 eop,
9d5c8243 4411 jiffies,
0e014cb1 4412 eop_desc->wb.status);
661086df 4413 netif_stop_subqueue(netdev, tx_ring->queue_index);
9d5c8243
AK
4414 }
4415 }
4416 tx_ring->total_bytes += total_bytes;
4417 tx_ring->total_packets += total_packets;
e21ed353
AD
4418 tx_ring->tx_stats.bytes += total_bytes;
4419 tx_ring->tx_stats.packets += total_packets;
9d5c8243
AK
4420 adapter->net_stats.tx_bytes += total_bytes;
4421 adapter->net_stats.tx_packets += total_packets;
0e014cb1 4422 return (count < tx_ring->count);
9d5c8243
AK
4423}
4424
9d5c8243
AK
4425/**
4426 * igb_receive_skb - helper function to handle rx indications
eebbbdba 4427 * @ring: pointer to receive ring receving this packet
9d5c8243 4428 * @status: descriptor status field as written by hardware
73cd78f1 4429 * @rx_desc: receive descriptor containing vlan and type information.
9d5c8243
AK
4430 * @skb: pointer to sk_buff to be indicated to stack
4431 **/
d3352520
AD
4432static void igb_receive_skb(struct igb_ring *ring, u8 status,
4433 union e1000_adv_rx_desc * rx_desc,
4434 struct sk_buff *skb)
4435{
4436 struct igb_adapter * adapter = ring->adapter;
4437 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
4438
0c8dfc83 4439 skb_record_rx_queue(skb, ring->queue_index);
5c0999b7 4440 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
d3352520 4441 if (vlan_extracted)
5c0999b7
HX
4442 vlan_gro_receive(&ring->napi, adapter->vlgrp,
4443 le16_to_cpu(rx_desc->wb.upper.vlan),
4444 skb);
d3352520 4445 else
5c0999b7 4446 napi_gro_receive(&ring->napi, skb);
d3352520 4447 } else {
d3352520
AD
4448 if (vlan_extracted)
4449 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
4450 le16_to_cpu(rx_desc->wb.upper.vlan));
4451 else
d3352520 4452 netif_receive_skb(skb);
d3352520 4453 }
9d5c8243
AK
4454}
4455
9d5c8243
AK
4456static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
4457 u32 status_err, struct sk_buff *skb)
4458{
4459 skb->ip_summed = CHECKSUM_NONE;
4460
4461 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
4462 if ((status_err & E1000_RXD_STAT_IXSM) || !adapter->rx_csum)
4463 return;
4464 /* TCP/UDP checksum error bit is set */
4465 if (status_err &
4466 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
4467 /* let the stack verify checksum errors */
4468 adapter->hw_csum_err++;
4469 return;
4470 }
4471 /* It must be a TCP or UDP packet with a valid checksum */
4472 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
4473 skb->ip_summed = CHECKSUM_UNNECESSARY;
4474
4475 adapter->hw_csum_good++;
4476}
4477
3b644cf6
MW
4478static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4479 int *work_done, int budget)
9d5c8243 4480{
3b644cf6 4481 struct igb_adapter *adapter = rx_ring->adapter;
9d5c8243 4482 struct net_device *netdev = adapter->netdev;
33af6bcc 4483 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
4484 struct pci_dev *pdev = adapter->pdev;
4485 union e1000_adv_rx_desc *rx_desc , *next_rxd;
4486 struct igb_buffer *buffer_info , *next_buffer;
4487 struct sk_buff *skb;
9d5c8243
AK
4488 bool cleaned = false;
4489 int cleaned_count = 0;
4490 unsigned int total_bytes = 0, total_packets = 0;
73cd78f1
AD
4491 unsigned int i;
4492 u32 length, hlen, staterr;
9d5c8243
AK
4493
4494 i = rx_ring->next_to_clean;
69d3ca53 4495 buffer_info = &rx_ring->buffer_info[i];
9d5c8243
AK
4496 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4497 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
4498
4499 while (staterr & E1000_RXD_STAT_DD) {
4500 if (*work_done >= budget)
4501 break;
4502 (*work_done)++;
9d5c8243 4503
69d3ca53
AD
4504 skb = buffer_info->skb;
4505 prefetch(skb->data - NET_IP_ALIGN);
4506 buffer_info->skb = NULL;
4507
4508 i++;
4509 if (i == rx_ring->count)
4510 i = 0;
4511 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
4512 prefetch(next_rxd);
4513 next_buffer = &rx_ring->buffer_info[i];
9d5c8243
AK
4514
4515 length = le16_to_cpu(rx_desc->wb.upper.length);
4516 cleaned = true;
4517 cleaned_count++;
4518
bf36c1a0
AD
4519 if (!adapter->rx_ps_hdr_size) {
4520 pci_unmap_single(pdev, buffer_info->dma,
4521 adapter->rx_buffer_len +
4522 NET_IP_ALIGN,
4523 PCI_DMA_FROMDEVICE);
4524 skb_put(skb, length);
4525 goto send_up;
9d5c8243
AK
4526 }
4527
69d3ca53
AD
4528 /* HW will not DMA in data larger than the given buffer, even
4529 * if it parses the (NFS, of course) header to be larger. In
4530 * that case, it fills the header buffer and spills the rest
4531 * into the page.
4532 */
4533 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
4534 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
4535 if (hlen > adapter->rx_ps_hdr_size)
4536 hlen = adapter->rx_ps_hdr_size;
4537
bf36c1a0
AD
4538 if (!skb_shinfo(skb)->nr_frags) {
4539 pci_unmap_single(pdev, buffer_info->dma,
73cd78f1 4540 adapter->rx_ps_hdr_size + NET_IP_ALIGN,
bf36c1a0
AD
4541 PCI_DMA_FROMDEVICE);
4542 skb_put(skb, hlen);
4543 }
4544
4545 if (length) {
9d5c8243 4546 pci_unmap_page(pdev, buffer_info->page_dma,
bf36c1a0 4547 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
9d5c8243 4548 buffer_info->page_dma = 0;
bf36c1a0
AD
4549
4550 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
4551 buffer_info->page,
4552 buffer_info->page_offset,
4553 length);
4554
4555 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
4556 (page_count(buffer_info->page) != 1))
4557 buffer_info->page = NULL;
4558 else
4559 get_page(buffer_info->page);
9d5c8243
AK
4560
4561 skb->len += length;
4562 skb->data_len += length;
9d5c8243 4563
bf36c1a0 4564 skb->truesize += length;
9d5c8243 4565 }
9d5c8243 4566
bf36c1a0 4567 if (!(staterr & E1000_RXD_STAT_EOP)) {
b2d56536
AD
4568 buffer_info->skb = next_buffer->skb;
4569 buffer_info->dma = next_buffer->dma;
4570 next_buffer->skb = skb;
4571 next_buffer->dma = 0;
bf36c1a0
AD
4572 goto next_desc;
4573 }
69d3ca53 4574send_up:
33af6bcc
PO
4575 /*
4576 * If this bit is set, then the RX registers contain
4577 * the time stamp. No other packet will be time
4578 * stamped until we read these registers, so read the
4579 * registers to make them available again. Because
4580 * only one packet can be time stamped at a time, we
4581 * know that the register values must belong to this
4582 * one here and therefore we don't need to compare
4583 * any of the additional attributes stored for it.
4584 *
4585 * If nothing went wrong, then it should have a
4586 * skb_shared_tx that we can turn into a
4587 * skb_shared_hwtstamps.
4588 *
4589 * TODO: can time stamping be triggered (thus locking
4590 * the registers) without the packet reaching this point
4591 * here? In that case RX time stamping would get stuck.
4592 *
4593 * TODO: in "time stamp all packets" mode this bit is
4594 * not set. Need a global flag for this mode and then
4595 * always read the registers. Cannot be done without
4596 * a race condition.
4597 */
4598 if (unlikely(staterr & E1000_RXD_STAT_TS)) {
4599 u64 regval;
4600 u64 ns;
4601 struct skb_shared_hwtstamps *shhwtstamps =
4602 skb_hwtstamps(skb);
4603
4604 WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
4605 "igb: no RX time stamp available for time stamped packet");
4606 regval = rd32(E1000_RXSTMPL);
4607 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4608 ns = timecounter_cyc2time(&adapter->clock, regval);
4609 timecompare_update(&adapter->compare, ns);
4610 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
4611 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4612 shhwtstamps->syststamp =
4613 timecompare_transform(&adapter->compare, ns);
4614 }
4615
9d5c8243
AK
4616 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
4617 dev_kfree_skb_irq(skb);
4618 goto next_desc;
4619 }
9d5c8243
AK
4620
4621 total_bytes += skb->len;
4622 total_packets++;
4623
4624 igb_rx_checksum_adv(adapter, staterr, skb);
4625
4626 skb->protocol = eth_type_trans(skb, netdev);
4627
d3352520 4628 igb_receive_skb(rx_ring, staterr, rx_desc, skb);
9d5c8243 4629
9d5c8243
AK
4630next_desc:
4631 rx_desc->wb.upper.status_error = 0;
4632
4633 /* return some buffers to hardware, one at a time is too slow */
4634 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
3b644cf6 4635 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
9d5c8243
AK
4636 cleaned_count = 0;
4637 }
4638
4639 /* use prefetched values */
4640 rx_desc = next_rxd;
4641 buffer_info = next_buffer;
9d5c8243
AK
4642 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
4643 }
bf36c1a0 4644
9d5c8243 4645 rx_ring->next_to_clean = i;
c493ea45 4646 cleaned_count = igb_desc_unused(rx_ring);
9d5c8243
AK
4647
4648 if (cleaned_count)
3b644cf6 4649 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
9d5c8243
AK
4650
4651 rx_ring->total_packets += total_packets;
4652 rx_ring->total_bytes += total_bytes;
4653 rx_ring->rx_stats.packets += total_packets;
4654 rx_ring->rx_stats.bytes += total_bytes;
4655 adapter->net_stats.rx_bytes += total_bytes;
4656 adapter->net_stats.rx_packets += total_packets;
4657 return cleaned;
4658}
4659
9d5c8243
AK
4660/**
4661 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
4662 * @adapter: address of board private structure
4663 **/
3b644cf6 4664static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
9d5c8243
AK
4665 int cleaned_count)
4666{
3b644cf6 4667 struct igb_adapter *adapter = rx_ring->adapter;
9d5c8243
AK
4668 struct net_device *netdev = adapter->netdev;
4669 struct pci_dev *pdev = adapter->pdev;
4670 union e1000_adv_rx_desc *rx_desc;
4671 struct igb_buffer *buffer_info;
4672 struct sk_buff *skb;
4673 unsigned int i;
db761762 4674 int bufsz;
9d5c8243
AK
4675
4676 i = rx_ring->next_to_use;
4677 buffer_info = &rx_ring->buffer_info[i];
4678
db761762
AD
4679 if (adapter->rx_ps_hdr_size)
4680 bufsz = adapter->rx_ps_hdr_size;
4681 else
4682 bufsz = adapter->rx_buffer_len;
4683 bufsz += NET_IP_ALIGN;
4684
9d5c8243
AK
4685 while (cleaned_count--) {
4686 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4687
bf36c1a0 4688 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
9d5c8243 4689 if (!buffer_info->page) {
bf36c1a0
AD
4690 buffer_info->page = alloc_page(GFP_ATOMIC);
4691 if (!buffer_info->page) {
4692 adapter->alloc_rx_buff_failed++;
4693 goto no_buffers;
4694 }
4695 buffer_info->page_offset = 0;
4696 } else {
4697 buffer_info->page_offset ^= PAGE_SIZE / 2;
9d5c8243
AK
4698 }
4699 buffer_info->page_dma =
db761762 4700 pci_map_page(pdev, buffer_info->page,
bf36c1a0
AD
4701 buffer_info->page_offset,
4702 PAGE_SIZE / 2,
9d5c8243
AK
4703 PCI_DMA_FROMDEVICE);
4704 }
4705
4706 if (!buffer_info->skb) {
9d5c8243 4707 skb = netdev_alloc_skb(netdev, bufsz);
9d5c8243
AK
4708 if (!skb) {
4709 adapter->alloc_rx_buff_failed++;
4710 goto no_buffers;
4711 }
4712
4713 /* Make buffer alignment 2 beyond a 16 byte boundary
4714 * this will result in a 16 byte aligned IP header after
4715 * the 14 byte MAC header is removed
4716 */
4717 skb_reserve(skb, NET_IP_ALIGN);
4718
4719 buffer_info->skb = skb;
4720 buffer_info->dma = pci_map_single(pdev, skb->data,
4721 bufsz,
4722 PCI_DMA_FROMDEVICE);
9d5c8243
AK
4723 }
4724 /* Refresh the desc even if buffer_addrs didn't change because
4725 * each write-back erases this info. */
4726 if (adapter->rx_ps_hdr_size) {
4727 rx_desc->read.pkt_addr =
4728 cpu_to_le64(buffer_info->page_dma);
4729 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
4730 } else {
4731 rx_desc->read.pkt_addr =
4732 cpu_to_le64(buffer_info->dma);
4733 rx_desc->read.hdr_addr = 0;
4734 }
4735
4736 i++;
4737 if (i == rx_ring->count)
4738 i = 0;
4739 buffer_info = &rx_ring->buffer_info[i];
4740 }
4741
4742no_buffers:
4743 if (rx_ring->next_to_use != i) {
4744 rx_ring->next_to_use = i;
4745 if (i == 0)
4746 i = (rx_ring->count - 1);
4747 else
4748 i--;
4749
4750 /* Force memory writes to complete before letting h/w
4751 * know there are new descriptors to fetch. (Only
4752 * applicable for weak-ordered memory model archs,
4753 * such as IA-64). */
4754 wmb();
4755 writel(i, adapter->hw.hw_addr + rx_ring->tail);
4756 }
4757}
4758
4759/**
4760 * igb_mii_ioctl -
4761 * @netdev:
4762 * @ifreq:
4763 * @cmd:
4764 **/
4765static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4766{
4767 struct igb_adapter *adapter = netdev_priv(netdev);
4768 struct mii_ioctl_data *data = if_mii(ifr);
4769
4770 if (adapter->hw.phy.media_type != e1000_media_type_copper)
4771 return -EOPNOTSUPP;
4772
4773 switch (cmd) {
4774 case SIOCGMIIPHY:
4775 data->phy_id = adapter->hw.phy.addr;
4776 break;
4777 case SIOCGMIIREG:
4778 if (!capable(CAP_NET_ADMIN))
4779 return -EPERM;
f5f4cf08
AD
4780 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
4781 &data->val_out))
9d5c8243
AK
4782 return -EIO;
4783 break;
4784 case SIOCSMIIREG:
4785 default:
4786 return -EOPNOTSUPP;
4787 }
4788 return 0;
4789}
4790
c6cb090b
PO
4791/**
4792 * igb_hwtstamp_ioctl - control hardware time stamping
4793 * @netdev:
4794 * @ifreq:
4795 * @cmd:
4796 *
33af6bcc
PO
4797 * Outgoing time stamping can be enabled and disabled. Play nice and
4798 * disable it when requested, although it shouldn't case any overhead
4799 * when no packet needs it. At most one packet in the queue may be
4800 * marked for time stamping, otherwise it would be impossible to tell
4801 * for sure to which packet the hardware time stamp belongs.
4802 *
4803 * Incoming time stamping has to be configured via the hardware
4804 * filters. Not all combinations are supported, in particular event
4805 * type has to be specified. Matching the kind of event packet is
4806 * not supported, with the exception of "all V2 events regardless of
4807 * level 2 or 4".
4808 *
c6cb090b
PO
4809 **/
4810static int igb_hwtstamp_ioctl(struct net_device *netdev,
4811 struct ifreq *ifr, int cmd)
4812{
33af6bcc
PO
4813 struct igb_adapter *adapter = netdev_priv(netdev);
4814 struct e1000_hw *hw = &adapter->hw;
c6cb090b 4815 struct hwtstamp_config config;
33af6bcc
PO
4816 u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
4817 u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED;
4818 u32 tsync_rx_ctl_type = 0;
4819 u32 tsync_rx_cfg = 0;
4820 int is_l4 = 0;
4821 int is_l2 = 0;
4822 short port = 319; /* PTP */
4823 u32 regval;
c6cb090b
PO
4824
4825 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
4826 return -EFAULT;
4827
4828 /* reserved for future extensions */
4829 if (config.flags)
4830 return -EINVAL;
4831
33af6bcc
PO
4832 switch (config.tx_type) {
4833 case HWTSTAMP_TX_OFF:
4834 tsync_tx_ctl_bit = 0;
4835 break;
4836 case HWTSTAMP_TX_ON:
4837 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
4838 break;
4839 default:
4840 return -ERANGE;
4841 }
4842
4843 switch (config.rx_filter) {
4844 case HWTSTAMP_FILTER_NONE:
4845 tsync_rx_ctl_bit = 0;
4846 break;
4847 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4848 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
4849 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
4850 case HWTSTAMP_FILTER_ALL:
4851 /*
4852 * register TSYNCRXCFG must be set, therefore it is not
4853 * possible to time stamp both Sync and Delay_Req messages
4854 * => fall back to time stamping all packets
4855 */
4856 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL;
4857 config.rx_filter = HWTSTAMP_FILTER_ALL;
4858 break;
4859 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4860 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
4861 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
4862 is_l4 = 1;
4863 break;
4864 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4865 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
4866 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
4867 is_l4 = 1;
4868 break;
4869 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4870 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4871 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
4872 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
4873 is_l2 = 1;
4874 is_l4 = 1;
4875 config.rx_filter = HWTSTAMP_FILTER_SOME;
4876 break;
4877 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
4878 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
4879 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
4880 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
4881 is_l2 = 1;
4882 is_l4 = 1;
4883 config.rx_filter = HWTSTAMP_FILTER_SOME;
4884 break;
4885 case HWTSTAMP_FILTER_PTP_V2_EVENT:
4886 case HWTSTAMP_FILTER_PTP_V2_SYNC:
4887 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
4888 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2;
4889 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
4890 is_l2 = 1;
4891 break;
4892 default:
4893 return -ERANGE;
4894 }
4895
4896 /* enable/disable TX */
4897 regval = rd32(E1000_TSYNCTXCTL);
4898 regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
4899 wr32(E1000_TSYNCTXCTL, regval);
4900
4901 /* enable/disable RX, define which PTP packets are time stamped */
4902 regval = rd32(E1000_TSYNCRXCTL);
4903 regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
4904 regval = (regval & ~0xE) | tsync_rx_ctl_type;
4905 wr32(E1000_TSYNCRXCTL, regval);
4906 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
4907
4908 /*
4909 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
4910 * (Ethertype to filter on)
4911 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
4912 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
4913 */
4914 wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
4915
4916 /* L4 Queue Filter[0]: only filter by source and destination port */
4917 wr32(E1000_SPQF0, htons(port));
4918 wr32(E1000_IMIREXT(0), is_l4 ?
4919 ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
4920 wr32(E1000_IMIR(0), is_l4 ?
4921 (htons(port)
4922 | (0<<16) /* immediate interrupt disabled */
4923 | 0 /* (1<<17) bit cleared: do not bypass
4924 destination port check */)
4925 : 0);
4926 wr32(E1000_FTQF0, is_l4 ?
4927 (0x11 /* UDP */
4928 | (1<<15) /* VF not compared */
4929 | (1<<27) /* Enable Timestamping */
4930 | (7<<28) /* only source port filter enabled,
4931 source/target address and protocol
4932 masked */)
4933 : ((1<<15) | (15<<28) /* all mask bits set = filter not
4934 enabled */));
4935
4936 wrfl();
4937
4938 adapter->hwtstamp_config = config;
4939
4940 /* clear TX/RX time stamp registers, just to be sure */
4941 regval = rd32(E1000_TXSTMPH);
4942 regval = rd32(E1000_RXSTMPH);
c6cb090b 4943
33af6bcc
PO
4944 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
4945 -EFAULT : 0;
c6cb090b
PO
4946}
4947
9d5c8243
AK
4948/**
4949 * igb_ioctl -
4950 * @netdev:
4951 * @ifreq:
4952 * @cmd:
4953 **/
4954static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4955{
4956 switch (cmd) {
4957 case SIOCGMIIPHY:
4958 case SIOCGMIIREG:
4959 case SIOCSMIIREG:
4960 return igb_mii_ioctl(netdev, ifr, cmd);
c6cb090b
PO
4961 case SIOCSHWTSTAMP:
4962 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
9d5c8243
AK
4963 default:
4964 return -EOPNOTSUPP;
4965 }
4966}
4967
4968static void igb_vlan_rx_register(struct net_device *netdev,
4969 struct vlan_group *grp)
4970{
4971 struct igb_adapter *adapter = netdev_priv(netdev);
4972 struct e1000_hw *hw = &adapter->hw;
4973 u32 ctrl, rctl;
4974
4975 igb_irq_disable(adapter);
4976 adapter->vlgrp = grp;
4977
4978 if (grp) {
4979 /* enable VLAN tag insert/strip */
4980 ctrl = rd32(E1000_CTRL);
4981 ctrl |= E1000_CTRL_VME;
4982 wr32(E1000_CTRL, ctrl);
4983
4984 /* enable VLAN receive filtering */
4985 rctl = rd32(E1000_RCTL);
9d5c8243
AK
4986 rctl &= ~E1000_RCTL_CFIEN;
4987 wr32(E1000_RCTL, rctl);
4988 igb_update_mng_vlan(adapter);
9d5c8243
AK
4989 } else {
4990 /* disable VLAN tag insert/strip */
4991 ctrl = rd32(E1000_CTRL);
4992 ctrl &= ~E1000_CTRL_VME;
4993 wr32(E1000_CTRL, ctrl);
4994
9d5c8243
AK
4995 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
4996 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4997 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
4998 }
9d5c8243
AK
4999 }
5000
e1739522
AD
5001 igb_rlpml_set(adapter);
5002
9d5c8243
AK
5003 if (!test_bit(__IGB_DOWN, &adapter->state))
5004 igb_irq_enable(adapter);
5005}
5006
5007static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5008{
5009 struct igb_adapter *adapter = netdev_priv(netdev);
5010 struct e1000_hw *hw = &adapter->hw;
4ae196df 5011 int pf_id = adapter->vfs_allocated_count;
9d5c8243 5012
28b0759c 5013 if ((hw->mng_cookie.status &
9d5c8243
AK
5014 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
5015 (vid == adapter->mng_vlan_id))
5016 return;
4ae196df
AD
5017
5018 /* add vid to vlvf if sr-iov is enabled,
5019 * if that fails add directly to filter table */
5020 if (igb_vlvf_set(adapter, vid, true, pf_id))
5021 igb_vfta_set(hw, vid, true);
5022
9d5c8243
AK
5023}
5024
5025static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5026{
5027 struct igb_adapter *adapter = netdev_priv(netdev);
5028 struct e1000_hw *hw = &adapter->hw;
4ae196df 5029 int pf_id = adapter->vfs_allocated_count;
9d5c8243
AK
5030
5031 igb_irq_disable(adapter);
5032 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5033
5034 if (!test_bit(__IGB_DOWN, &adapter->state))
5035 igb_irq_enable(adapter);
5036
5037 if ((adapter->hw.mng_cookie.status &
5038 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
5039 (vid == adapter->mng_vlan_id)) {
5040 /* release control to f/w */
5041 igb_release_hw_control(adapter);
5042 return;
5043 }
5044
4ae196df
AD
5045 /* remove vid from vlvf if sr-iov is enabled,
5046 * if not in vlvf remove from vfta */
5047 if (igb_vlvf_set(adapter, vid, false, pf_id))
5048 igb_vfta_set(hw, vid, false);
9d5c8243
AK
5049}
5050
5051static void igb_restore_vlan(struct igb_adapter *adapter)
5052{
5053 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5054
5055 if (adapter->vlgrp) {
5056 u16 vid;
5057 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5058 if (!vlan_group_get_device(adapter->vlgrp, vid))
5059 continue;
5060 igb_vlan_rx_add_vid(adapter->netdev, vid);
5061 }
5062 }
5063}
5064
5065int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5066{
5067 struct e1000_mac_info *mac = &adapter->hw.mac;
5068
5069 mac->autoneg = 0;
5070
5071 /* Fiber NICs only allow 1000 gbps Full duplex */
5072 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
5073 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
5074 dev_err(&adapter->pdev->dev,
5075 "Unsupported Speed/Duplex configuration\n");
5076 return -EINVAL;
5077 }
5078
5079 switch (spddplx) {
5080 case SPEED_10 + DUPLEX_HALF:
5081 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5082 break;
5083 case SPEED_10 + DUPLEX_FULL:
5084 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5085 break;
5086 case SPEED_100 + DUPLEX_HALF:
5087 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5088 break;
5089 case SPEED_100 + DUPLEX_FULL:
5090 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5091 break;
5092 case SPEED_1000 + DUPLEX_FULL:
5093 mac->autoneg = 1;
5094 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5095 break;
5096 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5097 default:
5098 dev_err(&adapter->pdev->dev,
5099 "Unsupported Speed/Duplex configuration\n");
5100 return -EINVAL;
5101 }
5102 return 0;
5103}
5104
3fe7c4c9 5105static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
9d5c8243
AK
5106{
5107 struct net_device *netdev = pci_get_drvdata(pdev);
5108 struct igb_adapter *adapter = netdev_priv(netdev);
5109 struct e1000_hw *hw = &adapter->hw;
2d064c06 5110 u32 ctrl, rctl, status;
9d5c8243
AK
5111 u32 wufc = adapter->wol;
5112#ifdef CONFIG_PM
5113 int retval = 0;
5114#endif
5115
5116 netif_device_detach(netdev);
5117
a88f10ec
AD
5118 if (netif_running(netdev))
5119 igb_close(netdev);
5120
5121 igb_reset_interrupt_capability(adapter);
5122
5123 igb_free_queues(adapter);
9d5c8243
AK
5124
5125#ifdef CONFIG_PM
5126 retval = pci_save_state(pdev);
5127 if (retval)
5128 return retval;
5129#endif
5130
5131 status = rd32(E1000_STATUS);
5132 if (status & E1000_STATUS_LU)
5133 wufc &= ~E1000_WUFC_LNKC;
5134
5135 if (wufc) {
5136 igb_setup_rctl(adapter);
5137 igb_set_multi(netdev);
5138
5139 /* turn on all-multi mode if wake on multicast is enabled */
5140 if (wufc & E1000_WUFC_MC) {
5141 rctl = rd32(E1000_RCTL);
5142 rctl |= E1000_RCTL_MPE;
5143 wr32(E1000_RCTL, rctl);
5144 }
5145
5146 ctrl = rd32(E1000_CTRL);
5147 /* advertise wake from D3Cold */
5148 #define E1000_CTRL_ADVD3WUC 0x00100000
5149 /* phy power management enable */
5150 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5151 ctrl |= E1000_CTRL_ADVD3WUC;
5152 wr32(E1000_CTRL, ctrl);
5153
9d5c8243
AK
5154 /* Allow time for pending master requests to run */
5155 igb_disable_pcie_master(&adapter->hw);
5156
5157 wr32(E1000_WUC, E1000_WUC_PME_EN);
5158 wr32(E1000_WUFC, wufc);
9d5c8243
AK
5159 } else {
5160 wr32(E1000_WUC, 0);
5161 wr32(E1000_WUFC, 0);
9d5c8243
AK
5162 }
5163
3fe7c4c9
RW
5164 *enable_wake = wufc || adapter->en_mng_pt;
5165 if (!*enable_wake)
2d064c06 5166 igb_shutdown_fiber_serdes_link_82575(hw);
9d5c8243
AK
5167
5168 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5169 * would have already happened in close and is redundant. */
5170 igb_release_hw_control(adapter);
5171
5172 pci_disable_device(pdev);
5173
9d5c8243
AK
5174 return 0;
5175}
5176
5177#ifdef CONFIG_PM
3fe7c4c9
RW
5178static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5179{
5180 int retval;
5181 bool wake;
5182
5183 retval = __igb_shutdown(pdev, &wake);
5184 if (retval)
5185 return retval;
5186
5187 if (wake) {
5188 pci_prepare_to_sleep(pdev);
5189 } else {
5190 pci_wake_from_d3(pdev, false);
5191 pci_set_power_state(pdev, PCI_D3hot);
5192 }
5193
5194 return 0;
5195}
5196
9d5c8243
AK
5197static int igb_resume(struct pci_dev *pdev)
5198{
5199 struct net_device *netdev = pci_get_drvdata(pdev);
5200 struct igb_adapter *adapter = netdev_priv(netdev);
5201 struct e1000_hw *hw = &adapter->hw;
5202 u32 err;
5203
5204 pci_set_power_state(pdev, PCI_D0);
5205 pci_restore_state(pdev);
42bfd33a 5206
aed5dec3 5207 err = pci_enable_device_mem(pdev);
9d5c8243
AK
5208 if (err) {
5209 dev_err(&pdev->dev,
5210 "igb: Cannot enable PCI device from suspend\n");
5211 return err;
5212 }
5213 pci_set_master(pdev);
5214
5215 pci_enable_wake(pdev, PCI_D3hot, 0);
5216 pci_enable_wake(pdev, PCI_D3cold, 0);
5217
a88f10ec
AD
5218 igb_set_interrupt_capability(adapter);
5219
5220 if (igb_alloc_queues(adapter)) {
5221 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5222 return -ENOMEM;
9d5c8243
AK
5223 }
5224
5225 /* e1000_power_up_phy(adapter); */
5226
5227 igb_reset(adapter);
a8564f03
AD
5228
5229 /* let the f/w know that the h/w is now under the control of the
5230 * driver. */
5231 igb_get_hw_control(adapter);
5232
9d5c8243
AK
5233 wr32(E1000_WUS, ~0);
5234
a88f10ec
AD
5235 if (netif_running(netdev)) {
5236 err = igb_open(netdev);
5237 if (err)
5238 return err;
5239 }
9d5c8243
AK
5240
5241 netif_device_attach(netdev);
5242
9d5c8243
AK
5243 return 0;
5244}
5245#endif
5246
5247static void igb_shutdown(struct pci_dev *pdev)
5248{
3fe7c4c9
RW
5249 bool wake;
5250
5251 __igb_shutdown(pdev, &wake);
5252
5253 if (system_state == SYSTEM_POWER_OFF) {
5254 pci_wake_from_d3(pdev, wake);
5255 pci_set_power_state(pdev, PCI_D3hot);
5256 }
9d5c8243
AK
5257}
5258
5259#ifdef CONFIG_NET_POLL_CONTROLLER
5260/*
5261 * Polling 'interrupt' - used by things like netconsole to send skbs
5262 * without having to re-enable interrupts. It's not called while
5263 * the interrupt routine is executing.
5264 */
5265static void igb_netpoll(struct net_device *netdev)
5266{
5267 struct igb_adapter *adapter = netdev_priv(netdev);
eebbbdba 5268 struct e1000_hw *hw = &adapter->hw;
9d5c8243 5269 int i;
9d5c8243 5270
eebbbdba
AD
5271 if (!adapter->msix_entries) {
5272 igb_irq_disable(adapter);
5273 napi_schedule(&adapter->rx_ring[0].napi);
5274 return;
5275 }
9d5c8243 5276
eebbbdba
AD
5277 for (i = 0; i < adapter->num_tx_queues; i++) {
5278 struct igb_ring *tx_ring = &adapter->tx_ring[i];
5279 wr32(E1000_EIMC, tx_ring->eims_value);
5280 igb_clean_tx_irq(tx_ring);
5281 wr32(E1000_EIMS, tx_ring->eims_value);
5282 }
9d5c8243 5283
eebbbdba
AD
5284 for (i = 0; i < adapter->num_rx_queues; i++) {
5285 struct igb_ring *rx_ring = &adapter->rx_ring[i];
5286 wr32(E1000_EIMC, rx_ring->eims_value);
5287 napi_schedule(&rx_ring->napi);
5288 }
9d5c8243
AK
5289}
5290#endif /* CONFIG_NET_POLL_CONTROLLER */
5291
5292/**
5293 * igb_io_error_detected - called when PCI error is detected
5294 * @pdev: Pointer to PCI device
5295 * @state: The current pci connection state
5296 *
5297 * This function is called after a PCI bus error affecting
5298 * this device has been detected.
5299 */
5300static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5301 pci_channel_state_t state)
5302{
5303 struct net_device *netdev = pci_get_drvdata(pdev);
5304 struct igb_adapter *adapter = netdev_priv(netdev);
5305
5306 netif_device_detach(netdev);
5307
5308 if (netif_running(netdev))
5309 igb_down(adapter);
5310 pci_disable_device(pdev);
5311
5312 /* Request a slot slot reset. */
5313 return PCI_ERS_RESULT_NEED_RESET;
5314}
5315
5316/**
5317 * igb_io_slot_reset - called after the pci bus has been reset.
5318 * @pdev: Pointer to PCI device
5319 *
5320 * Restart the card from scratch, as if from a cold-boot. Implementation
5321 * resembles the first-half of the igb_resume routine.
5322 */
5323static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
5324{
5325 struct net_device *netdev = pci_get_drvdata(pdev);
5326 struct igb_adapter *adapter = netdev_priv(netdev);
5327 struct e1000_hw *hw = &adapter->hw;
40a914fa 5328 pci_ers_result_t result;
42bfd33a 5329 int err;
9d5c8243 5330
aed5dec3 5331 if (pci_enable_device_mem(pdev)) {
9d5c8243
AK
5332 dev_err(&pdev->dev,
5333 "Cannot re-enable PCI device after reset.\n");
40a914fa
AD
5334 result = PCI_ERS_RESULT_DISCONNECT;
5335 } else {
5336 pci_set_master(pdev);
5337 pci_restore_state(pdev);
9d5c8243 5338
40a914fa
AD
5339 pci_enable_wake(pdev, PCI_D3hot, 0);
5340 pci_enable_wake(pdev, PCI_D3cold, 0);
9d5c8243 5341
40a914fa
AD
5342 igb_reset(adapter);
5343 wr32(E1000_WUS, ~0);
5344 result = PCI_ERS_RESULT_RECOVERED;
5345 }
9d5c8243 5346
ea943d41
JK
5347 err = pci_cleanup_aer_uncorrect_error_status(pdev);
5348 if (err) {
5349 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
5350 "failed 0x%0x\n", err);
5351 /* non-fatal, continue */
5352 }
40a914fa
AD
5353
5354 return result;
9d5c8243
AK
5355}
5356
5357/**
5358 * igb_io_resume - called when traffic can start flowing again.
5359 * @pdev: Pointer to PCI device
5360 *
5361 * This callback is called when the error recovery driver tells us that
5362 * its OK to resume normal operation. Implementation resembles the
5363 * second-half of the igb_resume routine.
5364 */
5365static void igb_io_resume(struct pci_dev *pdev)
5366{
5367 struct net_device *netdev = pci_get_drvdata(pdev);
5368 struct igb_adapter *adapter = netdev_priv(netdev);
5369
9d5c8243
AK
5370 if (netif_running(netdev)) {
5371 if (igb_up(adapter)) {
5372 dev_err(&pdev->dev, "igb_up failed after reset\n");
5373 return;
5374 }
5375 }
5376
5377 netif_device_attach(netdev);
5378
5379 /* let the f/w know that the h/w is now under the control of the
5380 * driver. */
5381 igb_get_hw_control(adapter);
9d5c8243
AK
5382}
5383
e1739522
AD
5384static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
5385{
5386 u32 reg_data;
5387
5388 reg_data = rd32(E1000_VMOLR(vfn));
5389 reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
5390 E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
5391 E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
5392 E1000_VMOLR_AUPE | /* Accept untagged packets */
5393 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
5394 wr32(E1000_VMOLR(vfn), reg_data);
5395}
5396
4ae196df
AD
5397static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
5398 int vfn)
e1739522
AD
5399{
5400 struct e1000_hw *hw = &adapter->hw;
5401 u32 vmolr;
5402
5403 vmolr = rd32(E1000_VMOLR(vfn));
5404 vmolr &= ~E1000_VMOLR_RLPML_MASK;
5405 vmolr |= size | E1000_VMOLR_LPE;
5406 wr32(E1000_VMOLR(vfn), vmolr);
4ae196df
AD
5407
5408 return 0;
e1739522
AD
5409}
5410
5411static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
5412{
5413 u32 reg_data;
5414
5415 reg_data = rd32(E1000_RAH(entry));
5416 reg_data &= ~E1000_RAH_POOL_MASK;
5417 reg_data |= E1000_RAH_POOL_1 << pool;;
5418 wr32(E1000_RAH(entry), reg_data);
5419}
5420
5421static void igb_set_mc_list_pools(struct igb_adapter *adapter,
5422 int entry_count, u16 total_rar_filters)
5423{
5424 struct e1000_hw *hw = &adapter->hw;
5425 int i = adapter->vfs_allocated_count + 1;
5426
5427 if ((i + entry_count) < total_rar_filters)
5428 total_rar_filters = i + entry_count;
5429
5430 for (; i < total_rar_filters; i++)
5431 igb_set_rah_pool(hw, adapter->vfs_allocated_count, i);
5432}
5433
4ae196df
AD
5434static int igb_set_vf_mac(struct igb_adapter *adapter,
5435 int vf, unsigned char *mac_addr)
5436{
5437 struct e1000_hw *hw = &adapter->hw;
5438 int rar_entry = vf + 1; /* VF MAC addresses start at entry 1 */
5439
5440 igb_rar_set(hw, mac_addr, rar_entry);
5441
37680117 5442 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
4ae196df
AD
5443
5444 igb_set_rah_pool(hw, vf, rar_entry);
5445
5446 return 0;
5447}
5448
5449static void igb_vmm_control(struct igb_adapter *adapter)
5450{
5451 struct e1000_hw *hw = &adapter->hw;
5452 u32 reg_data;
5453
5454 if (!adapter->vfs_allocated_count)
5455 return;
5456
5457 /* VF's need PF reset indication before they
5458 * can send/receive mail */
5459 reg_data = rd32(E1000_CTRL_EXT);
5460 reg_data |= E1000_CTRL_EXT_PFRSTD;
5461 wr32(E1000_CTRL_EXT, reg_data);
5462
5463 igb_vmdq_set_loopback_pf(hw, true);
5464 igb_vmdq_set_replication_pf(hw, true);
5465}
5466
9d5c8243 5467/* igb_main.c */
This page took 0.48656 seconds and 5 git commands to generate.