1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/bitops.h>
32 #include <linux/vmalloc.h>
33 #include <linux/pagemap.h>
34 #include <linux/netdevice.h>
35 #include <linux/ipv6.h>
36 #include <linux/slab.h>
37 #include <net/checksum.h>
38 #include <net/ip6_checksum.h>
39 #include <linux/net_tstamp.h>
40 #include <linux/mii.h>
41 #include <linux/ethtool.h>
43 #include <linux/if_vlan.h>
44 #include <linux/pci.h>
45 #include <linux/pci-aspm.h>
46 #include <linux/delay.h>
47 #include <linux/interrupt.h>
48 #include <linux/if_ether.h>
49 #include <linux/aer.h>
50 #include <linux/prefetch.h>
52 #include <linux/dca.h>
59 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
60 __stringify(BUILD) "-k"
61 char igb_driver_name
[] = "igb";
62 char igb_driver_version
[] = DRV_VERSION
;
63 static const char igb_driver_string
[] =
64 "Intel(R) Gigabit Ethernet Network Driver";
65 static const char igb_copyright
[] = "Copyright (c) 2007-2011 Intel Corporation.";
67 static const struct e1000_info
*igb_info_tbl
[] = {
68 [board_82575
] = &e1000_82575_info
,
71 static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl
) = {
72 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_I350_COPPER
), board_82575
},
73 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_I350_FIBER
), board_82575
},
74 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_I350_SERDES
), board_82575
},
75 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_I350_SGMII
), board_82575
},
76 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82580_COPPER
), board_82575
},
77 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82580_FIBER
), board_82575
},
78 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82580_QUAD_FIBER
), board_82575
},
79 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82580_SERDES
), board_82575
},
80 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82580_SGMII
), board_82575
},
81 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82580_COPPER_DUAL
), board_82575
},
82 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_DH89XXCC_SGMII
), board_82575
},
83 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_DH89XXCC_SERDES
), board_82575
},
84 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_DH89XXCC_BACKPLANE
), board_82575
},
85 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_DH89XXCC_SFP
), board_82575
},
86 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576
), board_82575
},
87 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_NS
), board_82575
},
88 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_NS_SERDES
), board_82575
},
89 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_FIBER
), board_82575
},
90 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_SERDES
), board_82575
},
91 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_SERDES_QUAD
), board_82575
},
92 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_QUAD_COPPER_ET2
), board_82575
},
93 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_QUAD_COPPER
), board_82575
},
94 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575EB_COPPER
), board_82575
},
95 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575EB_FIBER_SERDES
), board_82575
},
96 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575GB_QUAD_COPPER
), board_82575
},
97 /* required last entry */
101 MODULE_DEVICE_TABLE(pci
, igb_pci_tbl
);
103 void igb_reset(struct igb_adapter
*);
104 static int igb_setup_all_tx_resources(struct igb_adapter
*);
105 static int igb_setup_all_rx_resources(struct igb_adapter
*);
106 static void igb_free_all_tx_resources(struct igb_adapter
*);
107 static void igb_free_all_rx_resources(struct igb_adapter
*);
108 static void igb_setup_mrqc(struct igb_adapter
*);
109 static int igb_probe(struct pci_dev
*, const struct pci_device_id
*);
110 static void __devexit
igb_remove(struct pci_dev
*pdev
);
111 static void igb_init_hw_timer(struct igb_adapter
*adapter
);
112 static int igb_sw_init(struct igb_adapter
*);
113 static int igb_open(struct net_device
*);
114 static int igb_close(struct net_device
*);
115 static void igb_configure_tx(struct igb_adapter
*);
116 static void igb_configure_rx(struct igb_adapter
*);
117 static void igb_clean_all_tx_rings(struct igb_adapter
*);
118 static void igb_clean_all_rx_rings(struct igb_adapter
*);
119 static void igb_clean_tx_ring(struct igb_ring
*);
120 static void igb_clean_rx_ring(struct igb_ring
*);
121 static void igb_set_rx_mode(struct net_device
*);
122 static void igb_update_phy_info(unsigned long);
123 static void igb_watchdog(unsigned long);
124 static void igb_watchdog_task(struct work_struct
*);
125 static netdev_tx_t
igb_xmit_frame_adv(struct sk_buff
*skb
, struct net_device
*);
126 static struct rtnl_link_stats64
*igb_get_stats64(struct net_device
*dev
,
127 struct rtnl_link_stats64
*stats
);
128 static int igb_change_mtu(struct net_device
*, int);
129 static int igb_set_mac(struct net_device
*, void *);
130 static void igb_set_uta(struct igb_adapter
*adapter
);
131 static irqreturn_t
igb_intr(int irq
, void *);
132 static irqreturn_t
igb_intr_msi(int irq
, void *);
133 static irqreturn_t
igb_msix_other(int irq
, void *);
134 static irqreturn_t
igb_msix_ring(int irq
, void *);
135 #ifdef CONFIG_IGB_DCA
136 static void igb_update_dca(struct igb_q_vector
*);
137 static void igb_setup_dca(struct igb_adapter
*);
138 #endif /* CONFIG_IGB_DCA */
139 static bool igb_clean_tx_irq(struct igb_q_vector
*);
140 static int igb_poll(struct napi_struct
*, int);
141 static bool igb_clean_rx_irq_adv(struct igb_q_vector
*, int *, int);
142 static int igb_ioctl(struct net_device
*, struct ifreq
*, int cmd
);
143 static void igb_tx_timeout(struct net_device
*);
144 static void igb_reset_task(struct work_struct
*);
145 static void igb_vlan_mode(struct net_device
*netdev
, u32 features
);
146 static void igb_vlan_rx_add_vid(struct net_device
*, u16
);
147 static void igb_vlan_rx_kill_vid(struct net_device
*, u16
);
148 static void igb_restore_vlan(struct igb_adapter
*);
149 static void igb_rar_set_qsel(struct igb_adapter
*, u8
*, u32
, u8
);
150 static void igb_ping_all_vfs(struct igb_adapter
*);
151 static void igb_msg_task(struct igb_adapter
*);
152 static void igb_vmm_control(struct igb_adapter
*);
153 static int igb_set_vf_mac(struct igb_adapter
*, int, unsigned char *);
154 static void igb_restore_vf_multicasts(struct igb_adapter
*adapter
);
155 static int igb_ndo_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
);
156 static int igb_ndo_set_vf_vlan(struct net_device
*netdev
,
157 int vf
, u16 vlan
, u8 qos
);
158 static int igb_ndo_set_vf_bw(struct net_device
*netdev
, int vf
, int tx_rate
);
159 static int igb_ndo_get_vf_config(struct net_device
*netdev
, int vf
,
160 struct ifla_vf_info
*ivi
);
161 static void igb_check_vf_rate_limit(struct igb_adapter
*);
164 static int igb_suspend(struct pci_dev
*, pm_message_t
);
165 static int igb_resume(struct pci_dev
*);
167 static void igb_shutdown(struct pci_dev
*);
168 #ifdef CONFIG_IGB_DCA
169 static int igb_notify_dca(struct notifier_block
*, unsigned long, void *);
170 static struct notifier_block dca_notifier
= {
171 .notifier_call
= igb_notify_dca
,
176 #ifdef CONFIG_NET_POLL_CONTROLLER
177 /* for netdump / net console */
178 static void igb_netpoll(struct net_device
*);
180 #ifdef CONFIG_PCI_IOV
181 static unsigned int max_vfs
= 0;
182 module_param(max_vfs
, uint
, 0);
183 MODULE_PARM_DESC(max_vfs
, "Maximum number of virtual functions to allocate "
184 "per physical function");
185 #endif /* CONFIG_PCI_IOV */
187 static pci_ers_result_t
igb_io_error_detected(struct pci_dev
*,
188 pci_channel_state_t
);
189 static pci_ers_result_t
igb_io_slot_reset(struct pci_dev
*);
190 static void igb_io_resume(struct pci_dev
*);
192 static struct pci_error_handlers igb_err_handler
= {
193 .error_detected
= igb_io_error_detected
,
194 .slot_reset
= igb_io_slot_reset
,
195 .resume
= igb_io_resume
,
199 static struct pci_driver igb_driver
= {
200 .name
= igb_driver_name
,
201 .id_table
= igb_pci_tbl
,
203 .remove
= __devexit_p(igb_remove
),
205 /* Power Management Hooks */
206 .suspend
= igb_suspend
,
207 .resume
= igb_resume
,
209 .shutdown
= igb_shutdown
,
210 .err_handler
= &igb_err_handler
213 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
214 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
215 MODULE_LICENSE("GPL");
216 MODULE_VERSION(DRV_VERSION
);
218 struct igb_reg_info
{
223 static const struct igb_reg_info igb_reg_info_tbl
[] = {
225 /* General Registers */
226 {E1000_CTRL
, "CTRL"},
227 {E1000_STATUS
, "STATUS"},
228 {E1000_CTRL_EXT
, "CTRL_EXT"},
230 /* Interrupt Registers */
234 {E1000_RCTL
, "RCTL"},
235 {E1000_RDLEN(0), "RDLEN"},
236 {E1000_RDH(0), "RDH"},
237 {E1000_RDT(0), "RDT"},
238 {E1000_RXDCTL(0), "RXDCTL"},
239 {E1000_RDBAL(0), "RDBAL"},
240 {E1000_RDBAH(0), "RDBAH"},
243 {E1000_TCTL
, "TCTL"},
244 {E1000_TDBAL(0), "TDBAL"},
245 {E1000_TDBAH(0), "TDBAH"},
246 {E1000_TDLEN(0), "TDLEN"},
247 {E1000_TDH(0), "TDH"},
248 {E1000_TDT(0), "TDT"},
249 {E1000_TXDCTL(0), "TXDCTL"},
250 {E1000_TDFH
, "TDFH"},
251 {E1000_TDFT
, "TDFT"},
252 {E1000_TDFHS
, "TDFHS"},
253 {E1000_TDFPC
, "TDFPC"},
255 /* List Terminator */
260 * igb_regdump - register printout routine
262 static void igb_regdump(struct e1000_hw
*hw
, struct igb_reg_info
*reginfo
)
268 switch (reginfo
->ofs
) {
270 for (n
= 0; n
< 4; n
++)
271 regs
[n
] = rd32(E1000_RDLEN(n
));
274 for (n
= 0; n
< 4; n
++)
275 regs
[n
] = rd32(E1000_RDH(n
));
278 for (n
= 0; n
< 4; n
++)
279 regs
[n
] = rd32(E1000_RDT(n
));
281 case E1000_RXDCTL(0):
282 for (n
= 0; n
< 4; n
++)
283 regs
[n
] = rd32(E1000_RXDCTL(n
));
286 for (n
= 0; n
< 4; n
++)
287 regs
[n
] = rd32(E1000_RDBAL(n
));
290 for (n
= 0; n
< 4; n
++)
291 regs
[n
] = rd32(E1000_RDBAH(n
));
294 for (n
= 0; n
< 4; n
++)
295 regs
[n
] = rd32(E1000_RDBAL(n
));
298 for (n
= 0; n
< 4; n
++)
299 regs
[n
] = rd32(E1000_TDBAH(n
));
302 for (n
= 0; n
< 4; n
++)
303 regs
[n
] = rd32(E1000_TDLEN(n
));
306 for (n
= 0; n
< 4; n
++)
307 regs
[n
] = rd32(E1000_TDH(n
));
310 for (n
= 0; n
< 4; n
++)
311 regs
[n
] = rd32(E1000_TDT(n
));
313 case E1000_TXDCTL(0):
314 for (n
= 0; n
< 4; n
++)
315 regs
[n
] = rd32(E1000_TXDCTL(n
));
318 printk(KERN_INFO
"%-15s %08x\n",
319 reginfo
->name
, rd32(reginfo
->ofs
));
323 snprintf(rname
, 16, "%s%s", reginfo
->name
, "[0-3]");
324 printk(KERN_INFO
"%-15s ", rname
);
325 for (n
= 0; n
< 4; n
++)
326 printk(KERN_CONT
"%08x ", regs
[n
]);
327 printk(KERN_CONT
"\n");
331 * igb_dump - Print registers, tx-rings and rx-rings
333 static void igb_dump(struct igb_adapter
*adapter
)
335 struct net_device
*netdev
= adapter
->netdev
;
336 struct e1000_hw
*hw
= &adapter
->hw
;
337 struct igb_reg_info
*reginfo
;
339 struct igb_ring
*tx_ring
;
340 union e1000_adv_tx_desc
*tx_desc
;
341 struct my_u0
{ u64 a
; u64 b
; } *u0
;
342 struct igb_buffer
*buffer_info
;
343 struct igb_ring
*rx_ring
;
344 union e1000_adv_rx_desc
*rx_desc
;
348 if (!netif_msg_hw(adapter
))
351 /* Print netdevice Info */
353 dev_info(&adapter
->pdev
->dev
, "Net device Info\n");
354 printk(KERN_INFO
"Device Name state "
355 "trans_start last_rx\n");
356 printk(KERN_INFO
"%-15s %016lX %016lX %016lX\n",
363 /* Print Registers */
364 dev_info(&adapter
->pdev
->dev
, "Register Dump\n");
365 printk(KERN_INFO
" Register Name Value\n");
366 for (reginfo
= (struct igb_reg_info
*)igb_reg_info_tbl
;
367 reginfo
->name
; reginfo
++) {
368 igb_regdump(hw
, reginfo
);
371 /* Print TX Ring Summary */
372 if (!netdev
|| !netif_running(netdev
))
375 dev_info(&adapter
->pdev
->dev
, "TX Rings Summary\n");
376 printk(KERN_INFO
"Queue [NTU] [NTC] [bi(ntc)->dma ]"
377 " leng ntw timestamp\n");
378 for (n
= 0; n
< adapter
->num_tx_queues
; n
++) {
379 tx_ring
= adapter
->tx_ring
[n
];
380 buffer_info
= &tx_ring
->buffer_info
[tx_ring
->next_to_clean
];
381 printk(KERN_INFO
" %5d %5X %5X %016llX %04X %3X %016llX\n",
382 n
, tx_ring
->next_to_use
, tx_ring
->next_to_clean
,
383 (u64
)buffer_info
->dma
,
385 buffer_info
->next_to_watch
,
386 (u64
)buffer_info
->time_stamp
);
390 if (!netif_msg_tx_done(adapter
))
391 goto rx_ring_summary
;
393 dev_info(&adapter
->pdev
->dev
, "TX Rings Dump\n");
395 /* Transmit Descriptor Formats
397 * Advanced Transmit Descriptor
398 * +--------------------------------------------------------------+
399 * 0 | Buffer Address [63:0] |
400 * +--------------------------------------------------------------+
401 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
402 * +--------------------------------------------------------------+
403 * 63 46 45 40 39 38 36 35 32 31 24 15 0
406 for (n
= 0; n
< adapter
->num_tx_queues
; n
++) {
407 tx_ring
= adapter
->tx_ring
[n
];
408 printk(KERN_INFO
"------------------------------------\n");
409 printk(KERN_INFO
"TX QUEUE INDEX = %d\n", tx_ring
->queue_index
);
410 printk(KERN_INFO
"------------------------------------\n");
411 printk(KERN_INFO
"T [desc] [address 63:0 ] "
412 "[PlPOCIStDDM Ln] [bi->dma ] "
413 "leng ntw timestamp bi->skb\n");
415 for (i
= 0; tx_ring
->desc
&& (i
< tx_ring
->count
); i
++) {
416 tx_desc
= E1000_TX_DESC_ADV(*tx_ring
, i
);
417 buffer_info
= &tx_ring
->buffer_info
[i
];
418 u0
= (struct my_u0
*)tx_desc
;
419 printk(KERN_INFO
"T [0x%03X] %016llX %016llX %016llX"
420 " %04X %3X %016llX %p", i
,
423 (u64
)buffer_info
->dma
,
425 buffer_info
->next_to_watch
,
426 (u64
)buffer_info
->time_stamp
,
428 if (i
== tx_ring
->next_to_use
&&
429 i
== tx_ring
->next_to_clean
)
430 printk(KERN_CONT
" NTC/U\n");
431 else if (i
== tx_ring
->next_to_use
)
432 printk(KERN_CONT
" NTU\n");
433 else if (i
== tx_ring
->next_to_clean
)
434 printk(KERN_CONT
" NTC\n");
436 printk(KERN_CONT
"\n");
438 if (netif_msg_pktdata(adapter
) && buffer_info
->dma
!= 0)
439 print_hex_dump(KERN_INFO
, "",
441 16, 1, phys_to_virt(buffer_info
->dma
),
442 buffer_info
->length
, true);
446 /* Print RX Rings Summary */
448 dev_info(&adapter
->pdev
->dev
, "RX Rings Summary\n");
449 printk(KERN_INFO
"Queue [NTU] [NTC]\n");
450 for (n
= 0; n
< adapter
->num_rx_queues
; n
++) {
451 rx_ring
= adapter
->rx_ring
[n
];
452 printk(KERN_INFO
" %5d %5X %5X\n", n
,
453 rx_ring
->next_to_use
, rx_ring
->next_to_clean
);
457 if (!netif_msg_rx_status(adapter
))
460 dev_info(&adapter
->pdev
->dev
, "RX Rings Dump\n");
462 /* Advanced Receive Descriptor (Read) Format
464 * +-----------------------------------------------------+
465 * 0 | Packet Buffer Address [63:1] |A0/NSE|
466 * +----------------------------------------------+------+
467 * 8 | Header Buffer Address [63:1] | DD |
468 * +-----------------------------------------------------+
471 * Advanced Receive Descriptor (Write-Back) Format
473 * 63 48 47 32 31 30 21 20 17 16 4 3 0
474 * +------------------------------------------------------+
475 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
476 * | Checksum Ident | | | | Type | Type |
477 * +------------------------------------------------------+
478 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
479 * +------------------------------------------------------+
480 * 63 48 47 32 31 20 19 0
483 for (n
= 0; n
< adapter
->num_rx_queues
; n
++) {
484 rx_ring
= adapter
->rx_ring
[n
];
485 printk(KERN_INFO
"------------------------------------\n");
486 printk(KERN_INFO
"RX QUEUE INDEX = %d\n", rx_ring
->queue_index
);
487 printk(KERN_INFO
"------------------------------------\n");
488 printk(KERN_INFO
"R [desc] [ PktBuf A0] "
489 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
490 "<-- Adv Rx Read format\n");
491 printk(KERN_INFO
"RWB[desc] [PcsmIpSHl PtRs] "
492 "[vl er S cks ln] ---------------- [bi->skb] "
493 "<-- Adv Rx Write-Back format\n");
495 for (i
= 0; i
< rx_ring
->count
; i
++) {
496 buffer_info
= &rx_ring
->buffer_info
[i
];
497 rx_desc
= E1000_RX_DESC_ADV(*rx_ring
, i
);
498 u0
= (struct my_u0
*)rx_desc
;
499 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
500 if (staterr
& E1000_RXD_STAT_DD
) {
501 /* Descriptor Done */
502 printk(KERN_INFO
"RWB[0x%03X] %016llX "
503 "%016llX ---------------- %p", i
,
508 printk(KERN_INFO
"R [0x%03X] %016llX "
509 "%016llX %016llX %p", i
,
512 (u64
)buffer_info
->dma
,
515 if (netif_msg_pktdata(adapter
)) {
516 print_hex_dump(KERN_INFO
, "",
519 phys_to_virt(buffer_info
->dma
),
520 rx_ring
->rx_buffer_len
, true);
521 if (rx_ring
->rx_buffer_len
523 print_hex_dump(KERN_INFO
, "",
527 buffer_info
->page_dma
+
528 buffer_info
->page_offset
),
533 if (i
== rx_ring
->next_to_use
)
534 printk(KERN_CONT
" NTU\n");
535 else if (i
== rx_ring
->next_to_clean
)
536 printk(KERN_CONT
" NTC\n");
538 printk(KERN_CONT
"\n");
549 * igb_read_clock - read raw cycle counter (to be used by time counter)
551 static cycle_t
igb_read_clock(const struct cyclecounter
*tc
)
553 struct igb_adapter
*adapter
=
554 container_of(tc
, struct igb_adapter
, cycles
);
555 struct e1000_hw
*hw
= &adapter
->hw
;
560 * The timestamp latches on lowest register read. For the 82580
561 * the lowest register is SYSTIMR instead of SYSTIML. However we never
562 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
564 if (hw
->mac
.type
== e1000_82580
) {
565 stamp
= rd32(E1000_SYSTIMR
) >> 8;
566 shift
= IGB_82580_TSYNC_SHIFT
;
569 stamp
|= (u64
)rd32(E1000_SYSTIML
) << shift
;
570 stamp
|= (u64
)rd32(E1000_SYSTIMH
) << (shift
+ 32);
575 * igb_get_hw_dev - return device
576 * used by hardware layer to print debugging information
578 struct net_device
*igb_get_hw_dev(struct e1000_hw
*hw
)
580 struct igb_adapter
*adapter
= hw
->back
;
581 return adapter
->netdev
;
585 * igb_init_module - Driver Registration Routine
587 * igb_init_module is the first routine called when the driver is
588 * loaded. All it does is register with the PCI subsystem.
590 static int __init
igb_init_module(void)
593 printk(KERN_INFO
"%s - version %s\n",
594 igb_driver_string
, igb_driver_version
);
596 printk(KERN_INFO
"%s\n", igb_copyright
);
598 #ifdef CONFIG_IGB_DCA
599 dca_register_notify(&dca_notifier
);
601 ret
= pci_register_driver(&igb_driver
);
605 module_init(igb_init_module
);
608 * igb_exit_module - Driver Exit Cleanup Routine
610 * igb_exit_module is called just before the driver is removed
613 static void __exit
igb_exit_module(void)
615 #ifdef CONFIG_IGB_DCA
616 dca_unregister_notify(&dca_notifier
);
618 pci_unregister_driver(&igb_driver
);
621 module_exit(igb_exit_module
);
623 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
625 * igb_cache_ring_register - Descriptor ring to register mapping
626 * @adapter: board private structure to initialize
628 * Once we know the feature-set enabled for the device, we'll cache
629 * the register offset the descriptor ring is assigned to.
631 static void igb_cache_ring_register(struct igb_adapter
*adapter
)
634 u32 rbase_offset
= adapter
->vfs_allocated_count
;
636 switch (adapter
->hw
.mac
.type
) {
638 /* The queues are allocated for virtualization such that VF 0
639 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
640 * In order to avoid collision we start at the first free queue
641 * and continue consuming queues in the same sequence
643 if (adapter
->vfs_allocated_count
) {
644 for (; i
< adapter
->rss_queues
; i
++)
645 adapter
->rx_ring
[i
]->reg_idx
= rbase_offset
+
652 for (; i
< adapter
->num_rx_queues
; i
++)
653 adapter
->rx_ring
[i
]->reg_idx
= rbase_offset
+ i
;
654 for (; j
< adapter
->num_tx_queues
; j
++)
655 adapter
->tx_ring
[j
]->reg_idx
= rbase_offset
+ j
;
660 static void igb_free_queues(struct igb_adapter
*adapter
)
664 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
665 kfree(adapter
->tx_ring
[i
]);
666 adapter
->tx_ring
[i
] = NULL
;
668 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
669 kfree(adapter
->rx_ring
[i
]);
670 adapter
->rx_ring
[i
] = NULL
;
672 adapter
->num_rx_queues
= 0;
673 adapter
->num_tx_queues
= 0;
677 * igb_alloc_queues - Allocate memory for all rings
678 * @adapter: board private structure to initialize
680 * We allocate one ring per queue at run-time since we don't know the
681 * number of queues at compile-time.
683 static int igb_alloc_queues(struct igb_adapter
*adapter
)
685 struct igb_ring
*ring
;
688 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
689 ring
= kzalloc(sizeof(struct igb_ring
), GFP_KERNEL
);
692 ring
->count
= adapter
->tx_ring_count
;
693 ring
->queue_index
= i
;
694 ring
->dev
= &adapter
->pdev
->dev
;
695 ring
->netdev
= adapter
->netdev
;
696 /* For 82575, context index must be unique per ring. */
697 if (adapter
->hw
.mac
.type
== e1000_82575
)
698 ring
->flags
= IGB_RING_FLAG_TX_CTX_IDX
;
699 adapter
->tx_ring
[i
] = ring
;
702 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
703 ring
= kzalloc(sizeof(struct igb_ring
), GFP_KERNEL
);
706 ring
->count
= adapter
->rx_ring_count
;
707 ring
->queue_index
= i
;
708 ring
->dev
= &adapter
->pdev
->dev
;
709 ring
->netdev
= adapter
->netdev
;
710 ring
->rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
711 ring
->flags
= IGB_RING_FLAG_RX_CSUM
; /* enable rx checksum */
712 /* set flag indicating ring supports SCTP checksum offload */
713 if (adapter
->hw
.mac
.type
>= e1000_82576
)
714 ring
->flags
|= IGB_RING_FLAG_RX_SCTP_CSUM
;
715 adapter
->rx_ring
[i
] = ring
;
718 igb_cache_ring_register(adapter
);
723 igb_free_queues(adapter
);
728 #define IGB_N0_QUEUE -1
729 static void igb_assign_vector(struct igb_q_vector
*q_vector
, int msix_vector
)
732 struct igb_adapter
*adapter
= q_vector
->adapter
;
733 struct e1000_hw
*hw
= &adapter
->hw
;
735 int rx_queue
= IGB_N0_QUEUE
;
736 int tx_queue
= IGB_N0_QUEUE
;
738 if (q_vector
->rx_ring
)
739 rx_queue
= q_vector
->rx_ring
->reg_idx
;
740 if (q_vector
->tx_ring
)
741 tx_queue
= q_vector
->tx_ring
->reg_idx
;
743 switch (hw
->mac
.type
) {
745 /* The 82575 assigns vectors using a bitmask, which matches the
746 bitmask for the EICR/EIMS/EIMC registers. To assign one
747 or more queues to a vector, we write the appropriate bits
748 into the MSIXBM register for that vector. */
749 if (rx_queue
> IGB_N0_QUEUE
)
750 msixbm
= E1000_EICR_RX_QUEUE0
<< rx_queue
;
751 if (tx_queue
> IGB_N0_QUEUE
)
752 msixbm
|= E1000_EICR_TX_QUEUE0
<< tx_queue
;
753 if (!adapter
->msix_entries
&& msix_vector
== 0)
754 msixbm
|= E1000_EIMS_OTHER
;
755 array_wr32(E1000_MSIXBM(0), msix_vector
, msixbm
);
756 q_vector
->eims_value
= msixbm
;
759 /* 82576 uses a table-based method for assigning vectors.
760 Each queue has a single entry in the table to which we write
761 a vector number along with a "valid" bit. Sadly, the layout
762 of the table is somewhat counterintuitive. */
763 if (rx_queue
> IGB_N0_QUEUE
) {
764 index
= (rx_queue
& 0x7);
765 ivar
= array_rd32(E1000_IVAR0
, index
);
767 /* vector goes into low byte of register */
768 ivar
= ivar
& 0xFFFFFF00;
769 ivar
|= msix_vector
| E1000_IVAR_VALID
;
771 /* vector goes into third byte of register */
772 ivar
= ivar
& 0xFF00FFFF;
773 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 16;
775 array_wr32(E1000_IVAR0
, index
, ivar
);
777 if (tx_queue
> IGB_N0_QUEUE
) {
778 index
= (tx_queue
& 0x7);
779 ivar
= array_rd32(E1000_IVAR0
, index
);
781 /* vector goes into second byte of register */
782 ivar
= ivar
& 0xFFFF00FF;
783 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 8;
785 /* vector goes into high byte of register */
786 ivar
= ivar
& 0x00FFFFFF;
787 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 24;
789 array_wr32(E1000_IVAR0
, index
, ivar
);
791 q_vector
->eims_value
= 1 << msix_vector
;
795 /* 82580 uses the same table-based approach as 82576 but has fewer
796 entries as a result we carry over for queues greater than 4. */
797 if (rx_queue
> IGB_N0_QUEUE
) {
798 index
= (rx_queue
>> 1);
799 ivar
= array_rd32(E1000_IVAR0
, index
);
800 if (rx_queue
& 0x1) {
801 /* vector goes into third byte of register */
802 ivar
= ivar
& 0xFF00FFFF;
803 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 16;
805 /* vector goes into low byte of register */
806 ivar
= ivar
& 0xFFFFFF00;
807 ivar
|= msix_vector
| E1000_IVAR_VALID
;
809 array_wr32(E1000_IVAR0
, index
, ivar
);
811 if (tx_queue
> IGB_N0_QUEUE
) {
812 index
= (tx_queue
>> 1);
813 ivar
= array_rd32(E1000_IVAR0
, index
);
814 if (tx_queue
& 0x1) {
815 /* vector goes into high byte of register */
816 ivar
= ivar
& 0x00FFFFFF;
817 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 24;
819 /* vector goes into second byte of register */
820 ivar
= ivar
& 0xFFFF00FF;
821 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 8;
823 array_wr32(E1000_IVAR0
, index
, ivar
);
825 q_vector
->eims_value
= 1 << msix_vector
;
832 /* add q_vector eims value to global eims_enable_mask */
833 adapter
->eims_enable_mask
|= q_vector
->eims_value
;
835 /* configure q_vector to set itr on first interrupt */
836 q_vector
->set_itr
= 1;
840 * igb_configure_msix - Configure MSI-X hardware
842 * igb_configure_msix sets up the hardware to properly
843 * generate MSI-X interrupts.
845 static void igb_configure_msix(struct igb_adapter
*adapter
)
849 struct e1000_hw
*hw
= &adapter
->hw
;
851 adapter
->eims_enable_mask
= 0;
853 /* set vector for other causes, i.e. link changes */
854 switch (hw
->mac
.type
) {
856 tmp
= rd32(E1000_CTRL_EXT
);
857 /* enable MSI-X PBA support*/
858 tmp
|= E1000_CTRL_EXT_PBA_CLR
;
860 /* Auto-Mask interrupts upon ICR read. */
861 tmp
|= E1000_CTRL_EXT_EIAME
;
862 tmp
|= E1000_CTRL_EXT_IRCA
;
864 wr32(E1000_CTRL_EXT
, tmp
);
866 /* enable msix_other interrupt */
867 array_wr32(E1000_MSIXBM(0), vector
++,
869 adapter
->eims_other
= E1000_EIMS_OTHER
;
876 /* Turn on MSI-X capability first, or our settings
877 * won't stick. And it will take days to debug. */
878 wr32(E1000_GPIE
, E1000_GPIE_MSIX_MODE
|
879 E1000_GPIE_PBA
| E1000_GPIE_EIAME
|
882 /* enable msix_other interrupt */
883 adapter
->eims_other
= 1 << vector
;
884 tmp
= (vector
++ | E1000_IVAR_VALID
) << 8;
886 wr32(E1000_IVAR_MISC
, tmp
);
889 /* do nothing, since nothing else supports MSI-X */
891 } /* switch (hw->mac.type) */
893 adapter
->eims_enable_mask
|= adapter
->eims_other
;
895 for (i
= 0; i
< adapter
->num_q_vectors
; i
++)
896 igb_assign_vector(adapter
->q_vector
[i
], vector
++);
902 * igb_request_msix - Initialize MSI-X interrupts
904 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
907 static int igb_request_msix(struct igb_adapter
*adapter
)
909 struct net_device
*netdev
= adapter
->netdev
;
910 struct e1000_hw
*hw
= &adapter
->hw
;
911 int i
, err
= 0, vector
= 0;
913 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
914 igb_msix_other
, 0, netdev
->name
, adapter
);
919 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
920 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
922 q_vector
->itr_register
= hw
->hw_addr
+ E1000_EITR(vector
);
924 if (q_vector
->rx_ring
&& q_vector
->tx_ring
)
925 sprintf(q_vector
->name
, "%s-TxRx-%u", netdev
->name
,
926 q_vector
->rx_ring
->queue_index
);
927 else if (q_vector
->tx_ring
)
928 sprintf(q_vector
->name
, "%s-tx-%u", netdev
->name
,
929 q_vector
->tx_ring
->queue_index
);
930 else if (q_vector
->rx_ring
)
931 sprintf(q_vector
->name
, "%s-rx-%u", netdev
->name
,
932 q_vector
->rx_ring
->queue_index
);
934 sprintf(q_vector
->name
, "%s-unused", netdev
->name
);
936 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
937 igb_msix_ring
, 0, q_vector
->name
,
944 igb_configure_msix(adapter
);
950 static void igb_reset_interrupt_capability(struct igb_adapter
*adapter
)
952 if (adapter
->msix_entries
) {
953 pci_disable_msix(adapter
->pdev
);
954 kfree(adapter
->msix_entries
);
955 adapter
->msix_entries
= NULL
;
956 } else if (adapter
->flags
& IGB_FLAG_HAS_MSI
) {
957 pci_disable_msi(adapter
->pdev
);
962 * igb_free_q_vectors - Free memory allocated for interrupt vectors
963 * @adapter: board private structure to initialize
965 * This function frees the memory allocated to the q_vectors. In addition if
966 * NAPI is enabled it will delete any references to the NAPI struct prior
967 * to freeing the q_vector.
969 static void igb_free_q_vectors(struct igb_adapter
*adapter
)
973 for (v_idx
= 0; v_idx
< adapter
->num_q_vectors
; v_idx
++) {
974 struct igb_q_vector
*q_vector
= adapter
->q_vector
[v_idx
];
975 adapter
->q_vector
[v_idx
] = NULL
;
978 netif_napi_del(&q_vector
->napi
);
981 adapter
->num_q_vectors
= 0;
985 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
987 * This function resets the device so that it has 0 rx queues, tx queues, and
988 * MSI-X interrupts allocated.
990 static void igb_clear_interrupt_scheme(struct igb_adapter
*adapter
)
992 igb_free_queues(adapter
);
993 igb_free_q_vectors(adapter
);
994 igb_reset_interrupt_capability(adapter
);
998 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1000 * Attempt to configure interrupts using the best available
1001 * capabilities of the hardware and kernel.
1003 static int igb_set_interrupt_capability(struct igb_adapter
*adapter
)
1008 /* Number of supported queues. */
1009 adapter
->num_rx_queues
= adapter
->rss_queues
;
1010 if (adapter
->vfs_allocated_count
)
1011 adapter
->num_tx_queues
= 1;
1013 adapter
->num_tx_queues
= adapter
->rss_queues
;
1015 /* start with one vector for every rx queue */
1016 numvecs
= adapter
->num_rx_queues
;
1018 /* if tx handler is separate add 1 for every tx queue */
1019 if (!(adapter
->flags
& IGB_FLAG_QUEUE_PAIRS
))
1020 numvecs
+= adapter
->num_tx_queues
;
1022 /* store the number of vectors reserved for queues */
1023 adapter
->num_q_vectors
= numvecs
;
1025 /* add 1 vector for link status interrupts */
1027 adapter
->msix_entries
= kcalloc(numvecs
, sizeof(struct msix_entry
),
1029 if (!adapter
->msix_entries
)
1032 for (i
= 0; i
< numvecs
; i
++)
1033 adapter
->msix_entries
[i
].entry
= i
;
1035 err
= pci_enable_msix(adapter
->pdev
,
1036 adapter
->msix_entries
,
1041 igb_reset_interrupt_capability(adapter
);
1043 /* If we can't do MSI-X, try MSI */
1045 #ifdef CONFIG_PCI_IOV
1046 /* disable SR-IOV for non MSI-X configurations */
1047 if (adapter
->vf_data
) {
1048 struct e1000_hw
*hw
= &adapter
->hw
;
1049 /* disable iov and allow time for transactions to clear */
1050 pci_disable_sriov(adapter
->pdev
);
1053 kfree(adapter
->vf_data
);
1054 adapter
->vf_data
= NULL
;
1055 wr32(E1000_IOVCTL
, E1000_IOVCTL_REUSE_VFQ
);
1058 dev_info(&adapter
->pdev
->dev
, "IOV Disabled\n");
1061 adapter
->vfs_allocated_count
= 0;
1062 adapter
->rss_queues
= 1;
1063 adapter
->flags
|= IGB_FLAG_QUEUE_PAIRS
;
1064 adapter
->num_rx_queues
= 1;
1065 adapter
->num_tx_queues
= 1;
1066 adapter
->num_q_vectors
= 1;
1067 if (!pci_enable_msi(adapter
->pdev
))
1068 adapter
->flags
|= IGB_FLAG_HAS_MSI
;
1070 /* Notify the stack of the (possibly) reduced queue counts. */
1071 netif_set_real_num_tx_queues(adapter
->netdev
, adapter
->num_tx_queues
);
1072 return netif_set_real_num_rx_queues(adapter
->netdev
,
1073 adapter
->num_rx_queues
);
1077 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1078 * @adapter: board private structure to initialize
1080 * We allocate one q_vector per queue interrupt. If allocation fails we
1083 static int igb_alloc_q_vectors(struct igb_adapter
*adapter
)
1085 struct igb_q_vector
*q_vector
;
1086 struct e1000_hw
*hw
= &adapter
->hw
;
1089 for (v_idx
= 0; v_idx
< adapter
->num_q_vectors
; v_idx
++) {
1090 q_vector
= kzalloc(sizeof(struct igb_q_vector
), GFP_KERNEL
);
1093 q_vector
->adapter
= adapter
;
1094 q_vector
->itr_register
= hw
->hw_addr
+ E1000_EITR(0);
1095 q_vector
->itr_val
= IGB_START_ITR
;
1096 netif_napi_add(adapter
->netdev
, &q_vector
->napi
, igb_poll
, 64);
1097 adapter
->q_vector
[v_idx
] = q_vector
;
1102 igb_free_q_vectors(adapter
);
1106 static void igb_map_rx_ring_to_vector(struct igb_adapter
*adapter
,
1107 int ring_idx
, int v_idx
)
1109 struct igb_q_vector
*q_vector
= adapter
->q_vector
[v_idx
];
1111 q_vector
->rx_ring
= adapter
->rx_ring
[ring_idx
];
1112 q_vector
->rx_ring
->q_vector
= q_vector
;
1113 q_vector
->itr_val
= adapter
->rx_itr_setting
;
1114 if (q_vector
->itr_val
&& q_vector
->itr_val
<= 3)
1115 q_vector
->itr_val
= IGB_START_ITR
;
1118 static void igb_map_tx_ring_to_vector(struct igb_adapter
*adapter
,
1119 int ring_idx
, int v_idx
)
1121 struct igb_q_vector
*q_vector
= adapter
->q_vector
[v_idx
];
1123 q_vector
->tx_ring
= adapter
->tx_ring
[ring_idx
];
1124 q_vector
->tx_ring
->q_vector
= q_vector
;
1125 q_vector
->itr_val
= adapter
->tx_itr_setting
;
1126 if (q_vector
->itr_val
&& q_vector
->itr_val
<= 3)
1127 q_vector
->itr_val
= IGB_START_ITR
;
1131 * igb_map_ring_to_vector - maps allocated queues to vectors
1133 * This function maps the recently allocated queues to vectors.
1135 static int igb_map_ring_to_vector(struct igb_adapter
*adapter
)
1140 if ((adapter
->num_q_vectors
< adapter
->num_rx_queues
) ||
1141 (adapter
->num_q_vectors
< adapter
->num_tx_queues
))
1144 if (adapter
->num_q_vectors
>=
1145 (adapter
->num_rx_queues
+ adapter
->num_tx_queues
)) {
1146 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1147 igb_map_rx_ring_to_vector(adapter
, i
, v_idx
++);
1148 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1149 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
++);
1151 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1152 if (i
< adapter
->num_tx_queues
)
1153 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
);
1154 igb_map_rx_ring_to_vector(adapter
, i
, v_idx
++);
1156 for (; i
< adapter
->num_tx_queues
; i
++)
1157 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
++);
1163 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1165 * This function initializes the interrupts and allocates all of the queues.
1167 static int igb_init_interrupt_scheme(struct igb_adapter
*adapter
)
1169 struct pci_dev
*pdev
= adapter
->pdev
;
1172 err
= igb_set_interrupt_capability(adapter
);
1176 err
= igb_alloc_q_vectors(adapter
);
1178 dev_err(&pdev
->dev
, "Unable to allocate memory for vectors\n");
1179 goto err_alloc_q_vectors
;
1182 err
= igb_alloc_queues(adapter
);
1184 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
1185 goto err_alloc_queues
;
1188 err
= igb_map_ring_to_vector(adapter
);
1190 dev_err(&pdev
->dev
, "Invalid q_vector to ring mapping\n");
1191 goto err_map_queues
;
1197 igb_free_queues(adapter
);
1199 igb_free_q_vectors(adapter
);
1200 err_alloc_q_vectors
:
1201 igb_reset_interrupt_capability(adapter
);
1206 * igb_request_irq - initialize interrupts
1208 * Attempts to configure interrupts using the best available
1209 * capabilities of the hardware and kernel.
1211 static int igb_request_irq(struct igb_adapter
*adapter
)
1213 struct net_device
*netdev
= adapter
->netdev
;
1214 struct pci_dev
*pdev
= adapter
->pdev
;
1217 if (adapter
->msix_entries
) {
1218 err
= igb_request_msix(adapter
);
1221 /* fall back to MSI */
1222 igb_clear_interrupt_scheme(adapter
);
1223 if (!pci_enable_msi(adapter
->pdev
))
1224 adapter
->flags
|= IGB_FLAG_HAS_MSI
;
1225 igb_free_all_tx_resources(adapter
);
1226 igb_free_all_rx_resources(adapter
);
1227 adapter
->num_tx_queues
= 1;
1228 adapter
->num_rx_queues
= 1;
1229 adapter
->num_q_vectors
= 1;
1230 err
= igb_alloc_q_vectors(adapter
);
1233 "Unable to allocate memory for vectors\n");
1236 err
= igb_alloc_queues(adapter
);
1239 "Unable to allocate memory for queues\n");
1240 igb_free_q_vectors(adapter
);
1243 igb_setup_all_tx_resources(adapter
);
1244 igb_setup_all_rx_resources(adapter
);
1246 igb_assign_vector(adapter
->q_vector
[0], 0);
1249 if (adapter
->flags
& IGB_FLAG_HAS_MSI
) {
1250 err
= request_irq(adapter
->pdev
->irq
, igb_intr_msi
, 0,
1251 netdev
->name
, adapter
);
1255 /* fall back to legacy interrupts */
1256 igb_reset_interrupt_capability(adapter
);
1257 adapter
->flags
&= ~IGB_FLAG_HAS_MSI
;
1260 err
= request_irq(adapter
->pdev
->irq
, igb_intr
, IRQF_SHARED
,
1261 netdev
->name
, adapter
);
1264 dev_err(&adapter
->pdev
->dev
, "Error %d getting interrupt\n",
1271 static void igb_free_irq(struct igb_adapter
*adapter
)
1273 if (adapter
->msix_entries
) {
1276 free_irq(adapter
->msix_entries
[vector
++].vector
, adapter
);
1278 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1279 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
1280 free_irq(adapter
->msix_entries
[vector
++].vector
,
1284 free_irq(adapter
->pdev
->irq
, adapter
);
1289 * igb_irq_disable - Mask off interrupt generation on the NIC
1290 * @adapter: board private structure
1292 static void igb_irq_disable(struct igb_adapter
*adapter
)
1294 struct e1000_hw
*hw
= &adapter
->hw
;
1297 * we need to be careful when disabling interrupts. The VFs are also
1298 * mapped into these registers and so clearing the bits can cause
1299 * issues on the VF drivers so we only need to clear what we set
1301 if (adapter
->msix_entries
) {
1302 u32 regval
= rd32(E1000_EIAM
);
1303 wr32(E1000_EIAM
, regval
& ~adapter
->eims_enable_mask
);
1304 wr32(E1000_EIMC
, adapter
->eims_enable_mask
);
1305 regval
= rd32(E1000_EIAC
);
1306 wr32(E1000_EIAC
, regval
& ~adapter
->eims_enable_mask
);
1310 wr32(E1000_IMC
, ~0);
1312 if (adapter
->msix_entries
) {
1314 for (i
= 0; i
< adapter
->num_q_vectors
; i
++)
1315 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1317 synchronize_irq(adapter
->pdev
->irq
);
1322 * igb_irq_enable - Enable default interrupt generation settings
1323 * @adapter: board private structure
1325 static void igb_irq_enable(struct igb_adapter
*adapter
)
1327 struct e1000_hw
*hw
= &adapter
->hw
;
1329 if (adapter
->msix_entries
) {
1330 u32 ims
= E1000_IMS_LSC
| E1000_IMS_DOUTSYNC
;
1331 u32 regval
= rd32(E1000_EIAC
);
1332 wr32(E1000_EIAC
, regval
| adapter
->eims_enable_mask
);
1333 regval
= rd32(E1000_EIAM
);
1334 wr32(E1000_EIAM
, regval
| adapter
->eims_enable_mask
);
1335 wr32(E1000_EIMS
, adapter
->eims_enable_mask
);
1336 if (adapter
->vfs_allocated_count
) {
1337 wr32(E1000_MBVFIMR
, 0xFF);
1338 ims
|= E1000_IMS_VMMB
;
1340 if (adapter
->hw
.mac
.type
== e1000_82580
)
1341 ims
|= E1000_IMS_DRSTA
;
1343 wr32(E1000_IMS
, ims
);
1345 wr32(E1000_IMS
, IMS_ENABLE_MASK
|
1347 wr32(E1000_IAM
, IMS_ENABLE_MASK
|
1352 static void igb_update_mng_vlan(struct igb_adapter
*adapter
)
1354 struct e1000_hw
*hw
= &adapter
->hw
;
1355 u16 vid
= adapter
->hw
.mng_cookie
.vlan_id
;
1356 u16 old_vid
= adapter
->mng_vlan_id
;
1358 if (hw
->mng_cookie
.status
& E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) {
1359 /* add VID to filter table */
1360 igb_vfta_set(hw
, vid
, true);
1361 adapter
->mng_vlan_id
= vid
;
1363 adapter
->mng_vlan_id
= IGB_MNG_VLAN_NONE
;
1366 if ((old_vid
!= (u16
)IGB_MNG_VLAN_NONE
) &&
1368 !test_bit(old_vid
, adapter
->active_vlans
)) {
1369 /* remove VID from filter table */
1370 igb_vfta_set(hw
, old_vid
, false);
1375 * igb_release_hw_control - release control of the h/w to f/w
1376 * @adapter: address of board private structure
1378 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1379 * For ASF and Pass Through versions of f/w this means that the
1380 * driver is no longer loaded.
1383 static void igb_release_hw_control(struct igb_adapter
*adapter
)
1385 struct e1000_hw
*hw
= &adapter
->hw
;
1388 /* Let firmware take over control of h/w */
1389 ctrl_ext
= rd32(E1000_CTRL_EXT
);
1390 wr32(E1000_CTRL_EXT
,
1391 ctrl_ext
& ~E1000_CTRL_EXT_DRV_LOAD
);
1395 * igb_get_hw_control - get control of the h/w from f/w
1396 * @adapter: address of board private structure
1398 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1399 * For ASF and Pass Through versions of f/w this means that
1400 * the driver is loaded.
1403 static void igb_get_hw_control(struct igb_adapter
*adapter
)
1405 struct e1000_hw
*hw
= &adapter
->hw
;
1408 /* Let firmware know the driver has taken over */
1409 ctrl_ext
= rd32(E1000_CTRL_EXT
);
1410 wr32(E1000_CTRL_EXT
,
1411 ctrl_ext
| E1000_CTRL_EXT_DRV_LOAD
);
1415 * igb_configure - configure the hardware for RX and TX
1416 * @adapter: private board structure
1418 static void igb_configure(struct igb_adapter
*adapter
)
1420 struct net_device
*netdev
= adapter
->netdev
;
1423 igb_get_hw_control(adapter
);
1424 igb_set_rx_mode(netdev
);
1426 igb_restore_vlan(adapter
);
1428 igb_setup_tctl(adapter
);
1429 igb_setup_mrqc(adapter
);
1430 igb_setup_rctl(adapter
);
1432 igb_configure_tx(adapter
);
1433 igb_configure_rx(adapter
);
1435 igb_rx_fifo_flush_82575(&adapter
->hw
);
1437 /* call igb_desc_unused which always leaves
1438 * at least 1 descriptor unused to make sure
1439 * next_to_use != next_to_clean */
1440 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1441 struct igb_ring
*ring
= adapter
->rx_ring
[i
];
1442 igb_alloc_rx_buffers_adv(ring
, igb_desc_unused(ring
));
1447 * igb_power_up_link - Power up the phy/serdes link
1448 * @adapter: address of board private structure
1450 void igb_power_up_link(struct igb_adapter
*adapter
)
1452 if (adapter
->hw
.phy
.media_type
== e1000_media_type_copper
)
1453 igb_power_up_phy_copper(&adapter
->hw
);
1455 igb_power_up_serdes_link_82575(&adapter
->hw
);
1459 * igb_power_down_link - Power down the phy/serdes link
1460 * @adapter: address of board private structure
1462 static void igb_power_down_link(struct igb_adapter
*adapter
)
1464 if (adapter
->hw
.phy
.media_type
== e1000_media_type_copper
)
1465 igb_power_down_phy_copper_82575(&adapter
->hw
);
1467 igb_shutdown_serdes_link_82575(&adapter
->hw
);
1471 * igb_up - Open the interface and prepare it to handle traffic
1472 * @adapter: board private structure
1474 int igb_up(struct igb_adapter
*adapter
)
1476 struct e1000_hw
*hw
= &adapter
->hw
;
1479 /* hardware has been reset, we need to reload some things */
1480 igb_configure(adapter
);
1482 clear_bit(__IGB_DOWN
, &adapter
->state
);
1484 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1485 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
1486 napi_enable(&q_vector
->napi
);
1488 if (adapter
->msix_entries
)
1489 igb_configure_msix(adapter
);
1491 igb_assign_vector(adapter
->q_vector
[0], 0);
1493 /* Clear any pending interrupts. */
1495 igb_irq_enable(adapter
);
1497 /* notify VFs that reset has been completed */
1498 if (adapter
->vfs_allocated_count
) {
1499 u32 reg_data
= rd32(E1000_CTRL_EXT
);
1500 reg_data
|= E1000_CTRL_EXT_PFRSTD
;
1501 wr32(E1000_CTRL_EXT
, reg_data
);
1504 netif_tx_start_all_queues(adapter
->netdev
);
1506 /* start the watchdog. */
1507 hw
->mac
.get_link_status
= 1;
1508 schedule_work(&adapter
->watchdog_task
);
1513 void igb_down(struct igb_adapter
*adapter
)
1515 struct net_device
*netdev
= adapter
->netdev
;
1516 struct e1000_hw
*hw
= &adapter
->hw
;
1520 /* signal that we're down so the interrupt handler does not
1521 * reschedule our watchdog timer */
1522 set_bit(__IGB_DOWN
, &adapter
->state
);
1524 /* disable receives in the hardware */
1525 rctl
= rd32(E1000_RCTL
);
1526 wr32(E1000_RCTL
, rctl
& ~E1000_RCTL_EN
);
1527 /* flush and sleep below */
1529 netif_tx_stop_all_queues(netdev
);
1531 /* disable transmits in the hardware */
1532 tctl
= rd32(E1000_TCTL
);
1533 tctl
&= ~E1000_TCTL_EN
;
1534 wr32(E1000_TCTL
, tctl
);
1535 /* flush both disables and wait for them to finish */
1539 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1540 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
1541 napi_disable(&q_vector
->napi
);
1544 igb_irq_disable(adapter
);
1546 del_timer_sync(&adapter
->watchdog_timer
);
1547 del_timer_sync(&adapter
->phy_info_timer
);
1549 netif_carrier_off(netdev
);
1551 /* record the stats before reset*/
1552 spin_lock(&adapter
->stats64_lock
);
1553 igb_update_stats(adapter
, &adapter
->stats64
);
1554 spin_unlock(&adapter
->stats64_lock
);
1556 adapter
->link_speed
= 0;
1557 adapter
->link_duplex
= 0;
1559 if (!pci_channel_offline(adapter
->pdev
))
1561 igb_clean_all_tx_rings(adapter
);
1562 igb_clean_all_rx_rings(adapter
);
1563 #ifdef CONFIG_IGB_DCA
1565 /* since we reset the hardware DCA settings were cleared */
1566 igb_setup_dca(adapter
);
1570 void igb_reinit_locked(struct igb_adapter
*adapter
)
1572 WARN_ON(in_interrupt());
1573 while (test_and_set_bit(__IGB_RESETTING
, &adapter
->state
))
1577 clear_bit(__IGB_RESETTING
, &adapter
->state
);
1580 void igb_reset(struct igb_adapter
*adapter
)
1582 struct pci_dev
*pdev
= adapter
->pdev
;
1583 struct e1000_hw
*hw
= &adapter
->hw
;
1584 struct e1000_mac_info
*mac
= &hw
->mac
;
1585 struct e1000_fc_info
*fc
= &hw
->fc
;
1586 u32 pba
= 0, tx_space
, min_tx_space
, min_rx_space
;
1589 /* Repartition Pba for greater than 9k mtu
1590 * To take effect CTRL.RST is required.
1592 switch (mac
->type
) {
1595 pba
= rd32(E1000_RXPBS
);
1596 pba
= igb_rxpbs_adjust_82580(pba
);
1599 pba
= rd32(E1000_RXPBS
);
1600 pba
&= E1000_RXPBS_SIZE_MASK_82576
;
1604 pba
= E1000_PBA_34K
;
1608 if ((adapter
->max_frame_size
> ETH_FRAME_LEN
+ ETH_FCS_LEN
) &&
1609 (mac
->type
< e1000_82576
)) {
1610 /* adjust PBA for jumbo frames */
1611 wr32(E1000_PBA
, pba
);
1613 /* To maintain wire speed transmits, the Tx FIFO should be
1614 * large enough to accommodate two full transmit packets,
1615 * rounded up to the next 1KB and expressed in KB. Likewise,
1616 * the Rx FIFO should be large enough to accommodate at least
1617 * one full receive packet and is similarly rounded up and
1618 * expressed in KB. */
1619 pba
= rd32(E1000_PBA
);
1620 /* upper 16 bits has Tx packet buffer allocation size in KB */
1621 tx_space
= pba
>> 16;
1622 /* lower 16 bits has Rx packet buffer allocation size in KB */
1624 /* the tx fifo also stores 16 bytes of information about the tx
1625 * but don't include ethernet FCS because hardware appends it */
1626 min_tx_space
= (adapter
->max_frame_size
+
1627 sizeof(union e1000_adv_tx_desc
) -
1629 min_tx_space
= ALIGN(min_tx_space
, 1024);
1630 min_tx_space
>>= 10;
1631 /* software strips receive CRC, so leave room for it */
1632 min_rx_space
= adapter
->max_frame_size
;
1633 min_rx_space
= ALIGN(min_rx_space
, 1024);
1634 min_rx_space
>>= 10;
1636 /* If current Tx allocation is less than the min Tx FIFO size,
1637 * and the min Tx FIFO size is less than the current Rx FIFO
1638 * allocation, take space away from current Rx allocation */
1639 if (tx_space
< min_tx_space
&&
1640 ((min_tx_space
- tx_space
) < pba
)) {
1641 pba
= pba
- (min_tx_space
- tx_space
);
1643 /* if short on rx space, rx wins and must trump tx
1645 if (pba
< min_rx_space
)
1648 wr32(E1000_PBA
, pba
);
1651 /* flow control settings */
1652 /* The high water mark must be low enough to fit one full frame
1653 * (or the size used for early receive) above it in the Rx FIFO.
1654 * Set it to the lower of:
1655 * - 90% of the Rx FIFO size, or
1656 * - the full Rx FIFO size minus one full frame */
1657 hwm
= min(((pba
<< 10) * 9 / 10),
1658 ((pba
<< 10) - 2 * adapter
->max_frame_size
));
1660 fc
->high_water
= hwm
& 0xFFF0; /* 16-byte granularity */
1661 fc
->low_water
= fc
->high_water
- 16;
1662 fc
->pause_time
= 0xFFFF;
1664 fc
->current_mode
= fc
->requested_mode
;
1666 /* disable receive for all VFs and wait one second */
1667 if (adapter
->vfs_allocated_count
) {
1669 for (i
= 0 ; i
< adapter
->vfs_allocated_count
; i
++)
1670 adapter
->vf_data
[i
].flags
&= IGB_VF_FLAG_PF_SET_MAC
;
1672 /* ping all the active vfs to let them know we are going down */
1673 igb_ping_all_vfs(adapter
);
1675 /* disable transmits and receives */
1676 wr32(E1000_VFRE
, 0);
1677 wr32(E1000_VFTE
, 0);
1680 /* Allow time for pending master requests to run */
1681 hw
->mac
.ops
.reset_hw(hw
);
1684 if (hw
->mac
.ops
.init_hw(hw
))
1685 dev_err(&pdev
->dev
, "Hardware Error\n");
1686 if (hw
->mac
.type
> e1000_82580
) {
1687 if (adapter
->flags
& IGB_FLAG_DMAC
) {
1691 * DMA Coalescing high water mark needs to be higher
1692 * than * the * Rx threshold. The Rx threshold is
1693 * currently * pba - 6, so we * should use a high water
1694 * mark of pba * - 4. */
1695 hwm
= (pba
- 4) << 10;
1697 reg
= (((pba
-6) << E1000_DMACR_DMACTHR_SHIFT
)
1698 & E1000_DMACR_DMACTHR_MASK
);
1700 /* transition to L0x or L1 if available..*/
1701 reg
|= (E1000_DMACR_DMAC_EN
| E1000_DMACR_DMAC_LX_MASK
);
1703 /* watchdog timer= +-1000 usec in 32usec intervals */
1705 wr32(E1000_DMACR
, reg
);
1707 /* no lower threshold to disable coalescing(smart fifb)
1709 wr32(E1000_DMCRTRH
, 0);
1711 /* set hwm to PBA - 2 * max frame size */
1712 wr32(E1000_FCRTC
, hwm
);
1715 * This sets the time to wait before requesting tran-
1716 * sition to * low power state to number of usecs needed
1717 * to receive 1 512 * byte frame at gigabit line rate
1719 reg
= rd32(E1000_DMCTLX
);
1720 reg
|= IGB_DMCTLX_DCFLUSH_DIS
;
1722 /* Delay 255 usec before entering Lx state. */
1724 wr32(E1000_DMCTLX
, reg
);
1726 /* free space in Tx packet buffer to wake from DMAC */
1729 (IGB_TX_BUF_4096
+ adapter
->max_frame_size
))
1732 /* make low power state decision controlled by DMAC */
1733 reg
= rd32(E1000_PCIEMISC
);
1734 reg
|= E1000_PCIEMISC_LX_DECISION
;
1735 wr32(E1000_PCIEMISC
, reg
);
1736 } /* end if IGB_FLAG_DMAC set */
1738 if (hw
->mac
.type
== e1000_82580
) {
1739 u32 reg
= rd32(E1000_PCIEMISC
);
1740 wr32(E1000_PCIEMISC
,
1741 reg
& ~E1000_PCIEMISC_LX_DECISION
);
1743 if (!netif_running(adapter
->netdev
))
1744 igb_power_down_link(adapter
);
1746 igb_update_mng_vlan(adapter
);
1748 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1749 wr32(E1000_VET
, ETHERNET_IEEE_VLAN_TYPE
);
1751 igb_get_phy_info(hw
);
1754 static u32
igb_fix_features(struct net_device
*netdev
, u32 features
)
1757 * Since there is no support for separate rx/tx vlan accel
1758 * enable/disable make sure tx flag is always in same state as rx.
1760 if (features
& NETIF_F_HW_VLAN_RX
)
1761 features
|= NETIF_F_HW_VLAN_TX
;
1763 features
&= ~NETIF_F_HW_VLAN_TX
;
1768 static int igb_set_features(struct net_device
*netdev
, u32 features
)
1770 struct igb_adapter
*adapter
= netdev_priv(netdev
);
1772 u32 changed
= netdev
->features
^ features
;
1774 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1775 if (features
& NETIF_F_RXCSUM
)
1776 adapter
->rx_ring
[i
]->flags
|= IGB_RING_FLAG_RX_CSUM
;
1778 adapter
->rx_ring
[i
]->flags
&= ~IGB_RING_FLAG_RX_CSUM
;
1781 if (changed
& NETIF_F_HW_VLAN_RX
)
1782 igb_vlan_mode(netdev
, features
);
1787 static const struct net_device_ops igb_netdev_ops
= {
1788 .ndo_open
= igb_open
,
1789 .ndo_stop
= igb_close
,
1790 .ndo_start_xmit
= igb_xmit_frame_adv
,
1791 .ndo_get_stats64
= igb_get_stats64
,
1792 .ndo_set_rx_mode
= igb_set_rx_mode
,
1793 .ndo_set_mac_address
= igb_set_mac
,
1794 .ndo_change_mtu
= igb_change_mtu
,
1795 .ndo_do_ioctl
= igb_ioctl
,
1796 .ndo_tx_timeout
= igb_tx_timeout
,
1797 .ndo_validate_addr
= eth_validate_addr
,
1798 .ndo_vlan_rx_add_vid
= igb_vlan_rx_add_vid
,
1799 .ndo_vlan_rx_kill_vid
= igb_vlan_rx_kill_vid
,
1800 .ndo_set_vf_mac
= igb_ndo_set_vf_mac
,
1801 .ndo_set_vf_vlan
= igb_ndo_set_vf_vlan
,
1802 .ndo_set_vf_tx_rate
= igb_ndo_set_vf_bw
,
1803 .ndo_get_vf_config
= igb_ndo_get_vf_config
,
1804 #ifdef CONFIG_NET_POLL_CONTROLLER
1805 .ndo_poll_controller
= igb_netpoll
,
1807 .ndo_fix_features
= igb_fix_features
,
1808 .ndo_set_features
= igb_set_features
,
1812 * igb_probe - Device Initialization Routine
1813 * @pdev: PCI device information struct
1814 * @ent: entry in igb_pci_tbl
1816 * Returns 0 on success, negative on failure
1818 * igb_probe initializes an adapter identified by a pci_dev structure.
1819 * The OS initialization, configuring of the adapter private structure,
1820 * and a hardware reset occur.
1822 static int __devinit
igb_probe(struct pci_dev
*pdev
,
1823 const struct pci_device_id
*ent
)
1825 struct net_device
*netdev
;
1826 struct igb_adapter
*adapter
;
1827 struct e1000_hw
*hw
;
1828 u16 eeprom_data
= 0;
1830 static int global_quad_port_a
; /* global quad port a indication */
1831 const struct e1000_info
*ei
= igb_info_tbl
[ent
->driver_data
];
1832 unsigned long mmio_start
, mmio_len
;
1833 int err
, pci_using_dac
;
1834 u16 eeprom_apme_mask
= IGB_EEPROM_APME
;
1835 u8 part_str
[E1000_PBANUM_LENGTH
];
1837 /* Catch broken hardware that put the wrong VF device ID in
1838 * the PCIe SR-IOV capability.
1840 if (pdev
->is_virtfn
) {
1841 WARN(1, KERN_ERR
"%s (%hx:%hx) should not be a VF!\n",
1842 pci_name(pdev
), pdev
->vendor
, pdev
->device
);
1846 err
= pci_enable_device_mem(pdev
);
1851 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64));
1853 err
= dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64));
1857 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
1859 err
= dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(32));
1861 dev_err(&pdev
->dev
, "No usable DMA "
1862 "configuration, aborting\n");
1868 err
= pci_request_selected_regions(pdev
, pci_select_bars(pdev
,
1874 pci_enable_pcie_error_reporting(pdev
);
1876 pci_set_master(pdev
);
1877 pci_save_state(pdev
);
1880 netdev
= alloc_etherdev_mq(sizeof(struct igb_adapter
),
1881 IGB_ABS_MAX_TX_QUEUES
);
1883 goto err_alloc_etherdev
;
1885 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1887 pci_set_drvdata(pdev
, netdev
);
1888 adapter
= netdev_priv(netdev
);
1889 adapter
->netdev
= netdev
;
1890 adapter
->pdev
= pdev
;
1893 adapter
->msg_enable
= NETIF_MSG_DRV
| NETIF_MSG_PROBE
;
1895 mmio_start
= pci_resource_start(pdev
, 0);
1896 mmio_len
= pci_resource_len(pdev
, 0);
1899 hw
->hw_addr
= ioremap(mmio_start
, mmio_len
);
1903 netdev
->netdev_ops
= &igb_netdev_ops
;
1904 igb_set_ethtool_ops(netdev
);
1905 netdev
->watchdog_timeo
= 5 * HZ
;
1907 strncpy(netdev
->name
, pci_name(pdev
), sizeof(netdev
->name
) - 1);
1909 netdev
->mem_start
= mmio_start
;
1910 netdev
->mem_end
= mmio_start
+ mmio_len
;
1912 /* PCI config space info */
1913 hw
->vendor_id
= pdev
->vendor
;
1914 hw
->device_id
= pdev
->device
;
1915 hw
->revision_id
= pdev
->revision
;
1916 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
1917 hw
->subsystem_device_id
= pdev
->subsystem_device
;
1919 /* Copy the default MAC, PHY and NVM function pointers */
1920 memcpy(&hw
->mac
.ops
, ei
->mac_ops
, sizeof(hw
->mac
.ops
));
1921 memcpy(&hw
->phy
.ops
, ei
->phy_ops
, sizeof(hw
->phy
.ops
));
1922 memcpy(&hw
->nvm
.ops
, ei
->nvm_ops
, sizeof(hw
->nvm
.ops
));
1923 /* Initialize skew-specific constants */
1924 err
= ei
->get_invariants(hw
);
1928 /* setup the private structure */
1929 err
= igb_sw_init(adapter
);
1933 igb_get_bus_info_pcie(hw
);
1935 hw
->phy
.autoneg_wait_to_complete
= false;
1937 /* Copper options */
1938 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
1939 hw
->phy
.mdix
= AUTO_ALL_MODES
;
1940 hw
->phy
.disable_polarity_correction
= false;
1941 hw
->phy
.ms_type
= e1000_ms_hw_default
;
1944 if (igb_check_reset_block(hw
))
1945 dev_info(&pdev
->dev
,
1946 "PHY reset is blocked due to SOL/IDER session.\n");
1948 netdev
->hw_features
= NETIF_F_SG
|
1956 netdev
->features
= netdev
->hw_features
|
1957 NETIF_F_HW_VLAN_TX
|
1958 NETIF_F_HW_VLAN_FILTER
;
1960 netdev
->vlan_features
|= NETIF_F_TSO
;
1961 netdev
->vlan_features
|= NETIF_F_TSO6
;
1962 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
1963 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
1964 netdev
->vlan_features
|= NETIF_F_SG
;
1966 if (pci_using_dac
) {
1967 netdev
->features
|= NETIF_F_HIGHDMA
;
1968 netdev
->vlan_features
|= NETIF_F_HIGHDMA
;
1971 if (hw
->mac
.type
>= e1000_82576
) {
1972 netdev
->hw_features
|= NETIF_F_SCTP_CSUM
;
1973 netdev
->features
|= NETIF_F_SCTP_CSUM
;
1976 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
1978 adapter
->en_mng_pt
= igb_enable_mng_pass_thru(hw
);
1980 /* before reading the NVM, reset the controller to put the device in a
1981 * known good starting state */
1982 hw
->mac
.ops
.reset_hw(hw
);
1984 /* make sure the NVM is good */
1985 if (hw
->nvm
.ops
.validate(hw
) < 0) {
1986 dev_err(&pdev
->dev
, "The NVM Checksum Is Not Valid\n");
1991 /* copy the MAC address out of the NVM */
1992 if (hw
->mac
.ops
.read_mac_addr(hw
))
1993 dev_err(&pdev
->dev
, "NVM Read Error\n");
1995 memcpy(netdev
->dev_addr
, hw
->mac
.addr
, netdev
->addr_len
);
1996 memcpy(netdev
->perm_addr
, hw
->mac
.addr
, netdev
->addr_len
);
1998 if (!is_valid_ether_addr(netdev
->perm_addr
)) {
1999 dev_err(&pdev
->dev
, "Invalid MAC Address\n");
2004 setup_timer(&adapter
->watchdog_timer
, igb_watchdog
,
2005 (unsigned long) adapter
);
2006 setup_timer(&adapter
->phy_info_timer
, igb_update_phy_info
,
2007 (unsigned long) adapter
);
2009 INIT_WORK(&adapter
->reset_task
, igb_reset_task
);
2010 INIT_WORK(&adapter
->watchdog_task
, igb_watchdog_task
);
2012 /* Initialize link properties that are user-changeable */
2013 adapter
->fc_autoneg
= true;
2014 hw
->mac
.autoneg
= true;
2015 hw
->phy
.autoneg_advertised
= 0x2f;
2017 hw
->fc
.requested_mode
= e1000_fc_default
;
2018 hw
->fc
.current_mode
= e1000_fc_default
;
2020 igb_validate_mdi_setting(hw
);
2022 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2023 * enable the ACPI Magic Packet filter
2026 if (hw
->bus
.func
== 0)
2027 hw
->nvm
.ops
.read(hw
, NVM_INIT_CONTROL3_PORT_A
, 1, &eeprom_data
);
2028 else if (hw
->mac
.type
>= e1000_82580
)
2029 hw
->nvm
.ops
.read(hw
, NVM_INIT_CONTROL3_PORT_A
+
2030 NVM_82580_LAN_FUNC_OFFSET(hw
->bus
.func
), 1,
2032 else if (hw
->bus
.func
== 1)
2033 hw
->nvm
.ops
.read(hw
, NVM_INIT_CONTROL3_PORT_B
, 1, &eeprom_data
);
2035 if (eeprom_data
& eeprom_apme_mask
)
2036 adapter
->eeprom_wol
|= E1000_WUFC_MAG
;
2038 /* now that we have the eeprom settings, apply the special cases where
2039 * the eeprom may be wrong or the board simply won't support wake on
2040 * lan on a particular port */
2041 switch (pdev
->device
) {
2042 case E1000_DEV_ID_82575GB_QUAD_COPPER
:
2043 adapter
->eeprom_wol
= 0;
2045 case E1000_DEV_ID_82575EB_FIBER_SERDES
:
2046 case E1000_DEV_ID_82576_FIBER
:
2047 case E1000_DEV_ID_82576_SERDES
:
2048 /* Wake events only supported on port A for dual fiber
2049 * regardless of eeprom setting */
2050 if (rd32(E1000_STATUS
) & E1000_STATUS_FUNC_1
)
2051 adapter
->eeprom_wol
= 0;
2053 case E1000_DEV_ID_82576_QUAD_COPPER
:
2054 case E1000_DEV_ID_82576_QUAD_COPPER_ET2
:
2055 /* if quad port adapter, disable WoL on all but port A */
2056 if (global_quad_port_a
!= 0)
2057 adapter
->eeprom_wol
= 0;
2059 adapter
->flags
|= IGB_FLAG_QUAD_PORT_A
;
2060 /* Reset for multiple quad port adapters */
2061 if (++global_quad_port_a
== 4)
2062 global_quad_port_a
= 0;
2066 /* initialize the wol settings based on the eeprom settings */
2067 adapter
->wol
= adapter
->eeprom_wol
;
2068 device_set_wakeup_enable(&adapter
->pdev
->dev
, adapter
->wol
);
2070 /* reset the hardware with the new settings */
2073 /* let the f/w know that the h/w is now under the control of the
2075 igb_get_hw_control(adapter
);
2077 strcpy(netdev
->name
, "eth%d");
2078 err
= register_netdev(netdev
);
2082 igb_vlan_mode(netdev
, netdev
->features
);
2084 /* carrier off reporting is important to ethtool even BEFORE open */
2085 netif_carrier_off(netdev
);
2087 #ifdef CONFIG_IGB_DCA
2088 if (dca_add_requester(&pdev
->dev
) == 0) {
2089 adapter
->flags
|= IGB_FLAG_DCA_ENABLED
;
2090 dev_info(&pdev
->dev
, "DCA enabled\n");
2091 igb_setup_dca(adapter
);
2095 /* do hw tstamp init after resetting */
2096 igb_init_hw_timer(adapter
);
2098 dev_info(&pdev
->dev
, "Intel(R) Gigabit Ethernet Network Connection\n");
2099 /* print bus type/speed/width info */
2100 dev_info(&pdev
->dev
, "%s: (PCIe:%s:%s) %pM\n",
2102 ((hw
->bus
.speed
== e1000_bus_speed_2500
) ? "2.5Gb/s" :
2103 (hw
->bus
.speed
== e1000_bus_speed_5000
) ? "5.0Gb/s" :
2105 ((hw
->bus
.width
== e1000_bus_width_pcie_x4
) ? "Width x4" :
2106 (hw
->bus
.width
== e1000_bus_width_pcie_x2
) ? "Width x2" :
2107 (hw
->bus
.width
== e1000_bus_width_pcie_x1
) ? "Width x1" :
2111 ret_val
= igb_read_part_string(hw
, part_str
, E1000_PBANUM_LENGTH
);
2113 strcpy(part_str
, "Unknown");
2114 dev_info(&pdev
->dev
, "%s: PBA No: %s\n", netdev
->name
, part_str
);
2115 dev_info(&pdev
->dev
,
2116 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2117 adapter
->msix_entries
? "MSI-X" :
2118 (adapter
->flags
& IGB_FLAG_HAS_MSI
) ? "MSI" : "legacy",
2119 adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2120 switch (hw
->mac
.type
) {
2122 igb_set_eee_i350(hw
);
2130 igb_release_hw_control(adapter
);
2132 if (!igb_check_reset_block(hw
))
2135 if (hw
->flash_address
)
2136 iounmap(hw
->flash_address
);
2138 igb_clear_interrupt_scheme(adapter
);
2139 iounmap(hw
->hw_addr
);
2141 free_netdev(netdev
);
2143 pci_release_selected_regions(pdev
,
2144 pci_select_bars(pdev
, IORESOURCE_MEM
));
2147 pci_disable_device(pdev
);
2152 * igb_remove - Device Removal Routine
2153 * @pdev: PCI device information struct
2155 * igb_remove is called by the PCI subsystem to alert the driver
2156 * that it should release a PCI device. The could be caused by a
2157 * Hot-Plug event, or because the driver is going to be removed from
2160 static void __devexit
igb_remove(struct pci_dev
*pdev
)
2162 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2163 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2164 struct e1000_hw
*hw
= &adapter
->hw
;
2167 * The watchdog timer may be rescheduled, so explicitly
2168 * disable watchdog from being rescheduled.
2170 set_bit(__IGB_DOWN
, &adapter
->state
);
2171 del_timer_sync(&adapter
->watchdog_timer
);
2172 del_timer_sync(&adapter
->phy_info_timer
);
2174 cancel_work_sync(&adapter
->reset_task
);
2175 cancel_work_sync(&adapter
->watchdog_task
);
2177 #ifdef CONFIG_IGB_DCA
2178 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
) {
2179 dev_info(&pdev
->dev
, "DCA disabled\n");
2180 dca_remove_requester(&pdev
->dev
);
2181 adapter
->flags
&= ~IGB_FLAG_DCA_ENABLED
;
2182 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_DISABLE
);
2186 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2187 * would have already happened in close and is redundant. */
2188 igb_release_hw_control(adapter
);
2190 unregister_netdev(netdev
);
2192 igb_clear_interrupt_scheme(adapter
);
2194 #ifdef CONFIG_PCI_IOV
2195 /* reclaim resources allocated to VFs */
2196 if (adapter
->vf_data
) {
2197 /* disable iov and allow time for transactions to clear */
2198 pci_disable_sriov(pdev
);
2201 kfree(adapter
->vf_data
);
2202 adapter
->vf_data
= NULL
;
2203 wr32(E1000_IOVCTL
, E1000_IOVCTL_REUSE_VFQ
);
2206 dev_info(&pdev
->dev
, "IOV Disabled\n");
2210 iounmap(hw
->hw_addr
);
2211 if (hw
->flash_address
)
2212 iounmap(hw
->flash_address
);
2213 pci_release_selected_regions(pdev
,
2214 pci_select_bars(pdev
, IORESOURCE_MEM
));
2216 free_netdev(netdev
);
2218 pci_disable_pcie_error_reporting(pdev
);
2220 pci_disable_device(pdev
);
2224 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2225 * @adapter: board private structure to initialize
2227 * This function initializes the vf specific data storage and then attempts to
2228 * allocate the VFs. The reason for ordering it this way is because it is much
2229 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2230 * the memory for the VFs.
2232 static void __devinit
igb_probe_vfs(struct igb_adapter
* adapter
)
2234 #ifdef CONFIG_PCI_IOV
2235 struct pci_dev
*pdev
= adapter
->pdev
;
2237 if (adapter
->vfs_allocated_count
) {
2238 adapter
->vf_data
= kcalloc(adapter
->vfs_allocated_count
,
2239 sizeof(struct vf_data_storage
),
2241 /* if allocation failed then we do not support SR-IOV */
2242 if (!adapter
->vf_data
) {
2243 adapter
->vfs_allocated_count
= 0;
2244 dev_err(&pdev
->dev
, "Unable to allocate memory for VF "
2249 if (pci_enable_sriov(pdev
, adapter
->vfs_allocated_count
)) {
2250 kfree(adapter
->vf_data
);
2251 adapter
->vf_data
= NULL
;
2252 #endif /* CONFIG_PCI_IOV */
2253 adapter
->vfs_allocated_count
= 0;
2254 #ifdef CONFIG_PCI_IOV
2256 unsigned char mac_addr
[ETH_ALEN
];
2258 dev_info(&pdev
->dev
, "%d vfs allocated\n",
2259 adapter
->vfs_allocated_count
);
2260 for (i
= 0; i
< adapter
->vfs_allocated_count
; i
++) {
2261 random_ether_addr(mac_addr
);
2262 igb_set_vf_mac(adapter
, i
, mac_addr
);
2264 /* DMA Coalescing is not supported in IOV mode. */
2265 if (adapter
->flags
& IGB_FLAG_DMAC
)
2266 adapter
->flags
&= ~IGB_FLAG_DMAC
;
2268 #endif /* CONFIG_PCI_IOV */
2273 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2274 * @adapter: board private structure to initialize
2276 * igb_init_hw_timer initializes the function pointer and values for the hw
2277 * timer found in hardware.
2279 static void igb_init_hw_timer(struct igb_adapter
*adapter
)
2281 struct e1000_hw
*hw
= &adapter
->hw
;
2283 switch (hw
->mac
.type
) {
2286 memset(&adapter
->cycles
, 0, sizeof(adapter
->cycles
));
2287 adapter
->cycles
.read
= igb_read_clock
;
2288 adapter
->cycles
.mask
= CLOCKSOURCE_MASK(64);
2289 adapter
->cycles
.mult
= 1;
2291 * The 82580 timesync updates the system timer every 8ns by 8ns
2292 * and the value cannot be shifted. Instead we need to shift
2293 * the registers to generate a 64bit timer value. As a result
2294 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2295 * 24 in order to generate a larger value for synchronization.
2297 adapter
->cycles
.shift
= IGB_82580_TSYNC_SHIFT
;
2298 /* disable system timer temporarily by setting bit 31 */
2299 wr32(E1000_TSAUXC
, 0x80000000);
2302 /* Set registers so that rollover occurs soon to test this. */
2303 wr32(E1000_SYSTIMR
, 0x00000000);
2304 wr32(E1000_SYSTIML
, 0x80000000);
2305 wr32(E1000_SYSTIMH
, 0x000000FF);
2308 /* enable system timer by clearing bit 31 */
2309 wr32(E1000_TSAUXC
, 0x0);
2312 timecounter_init(&adapter
->clock
,
2314 ktime_to_ns(ktime_get_real()));
2316 * Synchronize our NIC clock against system wall clock. NIC
2317 * time stamp reading requires ~3us per sample, each sample
2318 * was pretty stable even under load => only require 10
2319 * samples for each offset comparison.
2321 memset(&adapter
->compare
, 0, sizeof(adapter
->compare
));
2322 adapter
->compare
.source
= &adapter
->clock
;
2323 adapter
->compare
.target
= ktime_get_real
;
2324 adapter
->compare
.num_samples
= 10;
2325 timecompare_update(&adapter
->compare
, 0);
2329 * Initialize hardware timer: we keep it running just in case
2330 * that some program needs it later on.
2332 memset(&adapter
->cycles
, 0, sizeof(adapter
->cycles
));
2333 adapter
->cycles
.read
= igb_read_clock
;
2334 adapter
->cycles
.mask
= CLOCKSOURCE_MASK(64);
2335 adapter
->cycles
.mult
= 1;
2337 * Scale the NIC clock cycle by a large factor so that
2338 * relatively small clock corrections can be added or
2339 * subtracted at each clock tick. The drawbacks of a large
2340 * factor are a) that the clock register overflows more quickly
2341 * (not such a big deal) and b) that the increment per tick has
2342 * to fit into 24 bits. As a result we need to use a shift of
2343 * 19 so we can fit a value of 16 into the TIMINCA register.
2345 adapter
->cycles
.shift
= IGB_82576_TSYNC_SHIFT
;
2347 (1 << E1000_TIMINCA_16NS_SHIFT
) |
2348 (16 << IGB_82576_TSYNC_SHIFT
));
2350 /* Set registers so that rollover occurs soon to test this. */
2351 wr32(E1000_SYSTIML
, 0x00000000);
2352 wr32(E1000_SYSTIMH
, 0xFF800000);
2355 timecounter_init(&adapter
->clock
,
2357 ktime_to_ns(ktime_get_real()));
2359 * Synchronize our NIC clock against system wall clock. NIC
2360 * time stamp reading requires ~3us per sample, each sample
2361 * was pretty stable even under load => only require 10
2362 * samples for each offset comparison.
2364 memset(&adapter
->compare
, 0, sizeof(adapter
->compare
));
2365 adapter
->compare
.source
= &adapter
->clock
;
2366 adapter
->compare
.target
= ktime_get_real
;
2367 adapter
->compare
.num_samples
= 10;
2368 timecompare_update(&adapter
->compare
, 0);
2371 /* 82575 does not support timesync */
2379 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2380 * @adapter: board private structure to initialize
2382 * igb_sw_init initializes the Adapter private data structure.
2383 * Fields are initialized based on PCI device information and
2384 * OS network device settings (MTU size).
2386 static int __devinit
igb_sw_init(struct igb_adapter
*adapter
)
2388 struct e1000_hw
*hw
= &adapter
->hw
;
2389 struct net_device
*netdev
= adapter
->netdev
;
2390 struct pci_dev
*pdev
= adapter
->pdev
;
2392 pci_read_config_word(pdev
, PCI_COMMAND
, &hw
->bus
.pci_cmd_word
);
2394 adapter
->tx_ring_count
= IGB_DEFAULT_TXD
;
2395 adapter
->rx_ring_count
= IGB_DEFAULT_RXD
;
2396 adapter
->rx_itr_setting
= IGB_DEFAULT_ITR
;
2397 adapter
->tx_itr_setting
= IGB_DEFAULT_ITR
;
2399 adapter
->max_frame_size
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
2400 adapter
->min_frame_size
= ETH_ZLEN
+ ETH_FCS_LEN
;
2402 spin_lock_init(&adapter
->stats64_lock
);
2403 #ifdef CONFIG_PCI_IOV
2404 switch (hw
->mac
.type
) {
2408 dev_warn(&pdev
->dev
,
2409 "Maximum of 7 VFs per PF, using max\n");
2410 adapter
->vfs_allocated_count
= 7;
2412 adapter
->vfs_allocated_count
= max_vfs
;
2417 #endif /* CONFIG_PCI_IOV */
2418 adapter
->rss_queues
= min_t(u32
, IGB_MAX_RX_QUEUES
, num_online_cpus());
2419 /* i350 cannot do RSS and SR-IOV at the same time */
2420 if (hw
->mac
.type
== e1000_i350
&& adapter
->vfs_allocated_count
)
2421 adapter
->rss_queues
= 1;
2424 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2425 * then we should combine the queues into a queue pair in order to
2426 * conserve interrupts due to limited supply
2428 if ((adapter
->rss_queues
> 4) ||
2429 ((adapter
->rss_queues
> 1) && (adapter
->vfs_allocated_count
> 6)))
2430 adapter
->flags
|= IGB_FLAG_QUEUE_PAIRS
;
2432 /* This call may decrease the number of queues */
2433 if (igb_init_interrupt_scheme(adapter
)) {
2434 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
2438 igb_probe_vfs(adapter
);
2440 /* Explicitly disable IRQ since the NIC can be in any state. */
2441 igb_irq_disable(adapter
);
2443 if (hw
->mac
.type
== e1000_i350
)
2444 adapter
->flags
&= ~IGB_FLAG_DMAC
;
2446 set_bit(__IGB_DOWN
, &adapter
->state
);
2451 * igb_open - Called when a network interface is made active
2452 * @netdev: network interface device structure
2454 * Returns 0 on success, negative value on failure
2456 * The open entry point is called when a network interface is made
2457 * active by the system (IFF_UP). At this point all resources needed
2458 * for transmit and receive operations are allocated, the interrupt
2459 * handler is registered with the OS, the watchdog timer is started,
2460 * and the stack is notified that the interface is ready.
2462 static int igb_open(struct net_device
*netdev
)
2464 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2465 struct e1000_hw
*hw
= &adapter
->hw
;
2469 /* disallow open during test */
2470 if (test_bit(__IGB_TESTING
, &adapter
->state
))
2473 netif_carrier_off(netdev
);
2475 /* allocate transmit descriptors */
2476 err
= igb_setup_all_tx_resources(adapter
);
2480 /* allocate receive descriptors */
2481 err
= igb_setup_all_rx_resources(adapter
);
2485 igb_power_up_link(adapter
);
2487 /* before we allocate an interrupt, we must be ready to handle it.
2488 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2489 * as soon as we call pci_request_irq, so we have to setup our
2490 * clean_rx handler before we do so. */
2491 igb_configure(adapter
);
2493 err
= igb_request_irq(adapter
);
2497 /* From here on the code is the same as igb_up() */
2498 clear_bit(__IGB_DOWN
, &adapter
->state
);
2500 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
2501 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
2502 napi_enable(&q_vector
->napi
);
2505 /* Clear any pending interrupts. */
2508 igb_irq_enable(adapter
);
2510 /* notify VFs that reset has been completed */
2511 if (adapter
->vfs_allocated_count
) {
2512 u32 reg_data
= rd32(E1000_CTRL_EXT
);
2513 reg_data
|= E1000_CTRL_EXT_PFRSTD
;
2514 wr32(E1000_CTRL_EXT
, reg_data
);
2517 netif_tx_start_all_queues(netdev
);
2519 /* start the watchdog. */
2520 hw
->mac
.get_link_status
= 1;
2521 schedule_work(&adapter
->watchdog_task
);
2526 igb_release_hw_control(adapter
);
2527 igb_power_down_link(adapter
);
2528 igb_free_all_rx_resources(adapter
);
2530 igb_free_all_tx_resources(adapter
);
2538 * igb_close - Disables a network interface
2539 * @netdev: network interface device structure
2541 * Returns 0, this is not allowed to fail
2543 * The close entry point is called when an interface is de-activated
2544 * by the OS. The hardware is still under the driver's control, but
2545 * needs to be disabled. A global MAC reset is issued to stop the
2546 * hardware, and all transmit and receive resources are freed.
2548 static int igb_close(struct net_device
*netdev
)
2550 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2552 WARN_ON(test_bit(__IGB_RESETTING
, &adapter
->state
));
2555 igb_free_irq(adapter
);
2557 igb_free_all_tx_resources(adapter
);
2558 igb_free_all_rx_resources(adapter
);
2564 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
2565 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2567 * Return 0 on success, negative on failure
2569 int igb_setup_tx_resources(struct igb_ring
*tx_ring
)
2571 struct device
*dev
= tx_ring
->dev
;
2574 size
= sizeof(struct igb_buffer
) * tx_ring
->count
;
2575 tx_ring
->buffer_info
= vzalloc(size
);
2576 if (!tx_ring
->buffer_info
)
2579 /* round up to nearest 4K */
2580 tx_ring
->size
= tx_ring
->count
* sizeof(union e1000_adv_tx_desc
);
2581 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2583 tx_ring
->desc
= dma_alloc_coherent(dev
,
2591 tx_ring
->next_to_use
= 0;
2592 tx_ring
->next_to_clean
= 0;
2596 vfree(tx_ring
->buffer_info
);
2598 "Unable to allocate memory for the transmit descriptor ring\n");
2603 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2604 * (Descriptors) for all queues
2605 * @adapter: board private structure
2607 * Return 0 on success, negative on failure
2609 static int igb_setup_all_tx_resources(struct igb_adapter
*adapter
)
2611 struct pci_dev
*pdev
= adapter
->pdev
;
2614 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2615 err
= igb_setup_tx_resources(adapter
->tx_ring
[i
]);
2618 "Allocation for Tx Queue %u failed\n", i
);
2619 for (i
--; i
>= 0; i
--)
2620 igb_free_tx_resources(adapter
->tx_ring
[i
]);
2625 for (i
= 0; i
< IGB_ABS_MAX_TX_QUEUES
; i
++) {
2626 int r_idx
= i
% adapter
->num_tx_queues
;
2627 adapter
->multi_tx_table
[i
] = adapter
->tx_ring
[r_idx
];
2633 * igb_setup_tctl - configure the transmit control registers
2634 * @adapter: Board private structure
2636 void igb_setup_tctl(struct igb_adapter
*adapter
)
2638 struct e1000_hw
*hw
= &adapter
->hw
;
2641 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2642 wr32(E1000_TXDCTL(0), 0);
2644 /* Program the Transmit Control Register */
2645 tctl
= rd32(E1000_TCTL
);
2646 tctl
&= ~E1000_TCTL_CT
;
2647 tctl
|= E1000_TCTL_PSP
| E1000_TCTL_RTLC
|
2648 (E1000_COLLISION_THRESHOLD
<< E1000_CT_SHIFT
);
2650 igb_config_collision_dist(hw
);
2652 /* Enable transmits */
2653 tctl
|= E1000_TCTL_EN
;
2655 wr32(E1000_TCTL
, tctl
);
2659 * igb_configure_tx_ring - Configure transmit ring after Reset
2660 * @adapter: board private structure
2661 * @ring: tx ring to configure
2663 * Configure a transmit ring after a reset.
2665 void igb_configure_tx_ring(struct igb_adapter
*adapter
,
2666 struct igb_ring
*ring
)
2668 struct e1000_hw
*hw
= &adapter
->hw
;
2670 u64 tdba
= ring
->dma
;
2671 int reg_idx
= ring
->reg_idx
;
2673 /* disable the queue */
2674 txdctl
= rd32(E1000_TXDCTL(reg_idx
));
2675 wr32(E1000_TXDCTL(reg_idx
),
2676 txdctl
& ~E1000_TXDCTL_QUEUE_ENABLE
);
2680 wr32(E1000_TDLEN(reg_idx
),
2681 ring
->count
* sizeof(union e1000_adv_tx_desc
));
2682 wr32(E1000_TDBAL(reg_idx
),
2683 tdba
& 0x00000000ffffffffULL
);
2684 wr32(E1000_TDBAH(reg_idx
), tdba
>> 32);
2686 ring
->head
= hw
->hw_addr
+ E1000_TDH(reg_idx
);
2687 ring
->tail
= hw
->hw_addr
+ E1000_TDT(reg_idx
);
2688 writel(0, ring
->head
);
2689 writel(0, ring
->tail
);
2691 txdctl
|= IGB_TX_PTHRESH
;
2692 txdctl
|= IGB_TX_HTHRESH
<< 8;
2693 txdctl
|= IGB_TX_WTHRESH
<< 16;
2695 txdctl
|= E1000_TXDCTL_QUEUE_ENABLE
;
2696 wr32(E1000_TXDCTL(reg_idx
), txdctl
);
2700 * igb_configure_tx - Configure transmit Unit after Reset
2701 * @adapter: board private structure
2703 * Configure the Tx unit of the MAC after a reset.
2705 static void igb_configure_tx(struct igb_adapter
*adapter
)
2709 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2710 igb_configure_tx_ring(adapter
, adapter
->tx_ring
[i
]);
2714 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
2715 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2717 * Returns 0 on success, negative on failure
2719 int igb_setup_rx_resources(struct igb_ring
*rx_ring
)
2721 struct device
*dev
= rx_ring
->dev
;
2724 size
= sizeof(struct igb_buffer
) * rx_ring
->count
;
2725 rx_ring
->buffer_info
= vzalloc(size
);
2726 if (!rx_ring
->buffer_info
)
2729 desc_len
= sizeof(union e1000_adv_rx_desc
);
2731 /* Round up to nearest 4K */
2732 rx_ring
->size
= rx_ring
->count
* desc_len
;
2733 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2735 rx_ring
->desc
= dma_alloc_coherent(dev
,
2743 rx_ring
->next_to_clean
= 0;
2744 rx_ring
->next_to_use
= 0;
2749 vfree(rx_ring
->buffer_info
);
2750 rx_ring
->buffer_info
= NULL
;
2751 dev_err(dev
, "Unable to allocate memory for the receive descriptor"
2757 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2758 * (Descriptors) for all queues
2759 * @adapter: board private structure
2761 * Return 0 on success, negative on failure
2763 static int igb_setup_all_rx_resources(struct igb_adapter
*adapter
)
2765 struct pci_dev
*pdev
= adapter
->pdev
;
2768 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2769 err
= igb_setup_rx_resources(adapter
->rx_ring
[i
]);
2772 "Allocation for Rx Queue %u failed\n", i
);
2773 for (i
--; i
>= 0; i
--)
2774 igb_free_rx_resources(adapter
->rx_ring
[i
]);
2783 * igb_setup_mrqc - configure the multiple receive queue control registers
2784 * @adapter: Board private structure
2786 static void igb_setup_mrqc(struct igb_adapter
*adapter
)
2788 struct e1000_hw
*hw
= &adapter
->hw
;
2790 u32 j
, num_rx_queues
, shift
= 0, shift2
= 0;
2795 static const u8 rsshash
[40] = {
2796 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2797 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2798 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2799 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2801 /* Fill out hash function seeds */
2802 for (j
= 0; j
< 10; j
++) {
2803 u32 rsskey
= rsshash
[(j
* 4)];
2804 rsskey
|= rsshash
[(j
* 4) + 1] << 8;
2805 rsskey
|= rsshash
[(j
* 4) + 2] << 16;
2806 rsskey
|= rsshash
[(j
* 4) + 3] << 24;
2807 array_wr32(E1000_RSSRK(0), j
, rsskey
);
2810 num_rx_queues
= adapter
->rss_queues
;
2812 if (adapter
->vfs_allocated_count
) {
2813 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2814 switch (hw
->mac
.type
) {
2831 if (hw
->mac
.type
== e1000_82575
)
2835 for (j
= 0; j
< (32 * 4); j
++) {
2836 reta
.bytes
[j
& 3] = (j
% num_rx_queues
) << shift
;
2838 reta
.bytes
[j
& 3] |= num_rx_queues
<< shift2
;
2840 wr32(E1000_RETA(j
>> 2), reta
.dword
);
2844 * Disable raw packet checksumming so that RSS hash is placed in
2845 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2846 * offloads as they are enabled by default
2848 rxcsum
= rd32(E1000_RXCSUM
);
2849 rxcsum
|= E1000_RXCSUM_PCSD
;
2851 if (adapter
->hw
.mac
.type
>= e1000_82576
)
2852 /* Enable Receive Checksum Offload for SCTP */
2853 rxcsum
|= E1000_RXCSUM_CRCOFL
;
2855 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2856 wr32(E1000_RXCSUM
, rxcsum
);
2858 /* If VMDq is enabled then we set the appropriate mode for that, else
2859 * we default to RSS so that an RSS hash is calculated per packet even
2860 * if we are only using one queue */
2861 if (adapter
->vfs_allocated_count
) {
2862 if (hw
->mac
.type
> e1000_82575
) {
2863 /* Set the default pool for the PF's first queue */
2864 u32 vtctl
= rd32(E1000_VT_CTL
);
2865 vtctl
&= ~(E1000_VT_CTL_DEFAULT_POOL_MASK
|
2866 E1000_VT_CTL_DISABLE_DEF_POOL
);
2867 vtctl
|= adapter
->vfs_allocated_count
<<
2868 E1000_VT_CTL_DEFAULT_POOL_SHIFT
;
2869 wr32(E1000_VT_CTL
, vtctl
);
2871 if (adapter
->rss_queues
> 1)
2872 mrqc
= E1000_MRQC_ENABLE_VMDQ_RSS_2Q
;
2874 mrqc
= E1000_MRQC_ENABLE_VMDQ
;
2876 mrqc
= E1000_MRQC_ENABLE_RSS_4Q
;
2878 igb_vmm_control(adapter
);
2881 * Generate RSS hash based on TCP port numbers and/or
2882 * IPv4/v6 src and dst addresses since UDP cannot be
2883 * hashed reliably due to IP fragmentation
2885 mrqc
|= E1000_MRQC_RSS_FIELD_IPV4
|
2886 E1000_MRQC_RSS_FIELD_IPV4_TCP
|
2887 E1000_MRQC_RSS_FIELD_IPV6
|
2888 E1000_MRQC_RSS_FIELD_IPV6_TCP
|
2889 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX
;
2891 wr32(E1000_MRQC
, mrqc
);
2895 * igb_setup_rctl - configure the receive control registers
2896 * @adapter: Board private structure
2898 void igb_setup_rctl(struct igb_adapter
*adapter
)
2900 struct e1000_hw
*hw
= &adapter
->hw
;
2903 rctl
= rd32(E1000_RCTL
);
2905 rctl
&= ~(3 << E1000_RCTL_MO_SHIFT
);
2906 rctl
&= ~(E1000_RCTL_LBM_TCVR
| E1000_RCTL_LBM_MAC
);
2908 rctl
|= E1000_RCTL_EN
| E1000_RCTL_BAM
| E1000_RCTL_RDMTS_HALF
|
2909 (hw
->mac
.mc_filter_type
<< E1000_RCTL_MO_SHIFT
);
2912 * enable stripping of CRC. It's unlikely this will break BMC
2913 * redirection as it did with e1000. Newer features require
2914 * that the HW strips the CRC.
2916 rctl
|= E1000_RCTL_SECRC
;
2918 /* disable store bad packets and clear size bits. */
2919 rctl
&= ~(E1000_RCTL_SBP
| E1000_RCTL_SZ_256
);
2921 /* enable LPE to prevent packets larger than max_frame_size */
2922 rctl
|= E1000_RCTL_LPE
;
2924 /* disable queue 0 to prevent tail write w/o re-config */
2925 wr32(E1000_RXDCTL(0), 0);
2927 /* Attention!!! For SR-IOV PF driver operations you must enable
2928 * queue drop for all VF and PF queues to prevent head of line blocking
2929 * if an un-trusted VF does not provide descriptors to hardware.
2931 if (adapter
->vfs_allocated_count
) {
2932 /* set all queue drop enable bits */
2933 wr32(E1000_QDE
, ALL_QUEUES
);
2936 wr32(E1000_RCTL
, rctl
);
2939 static inline int igb_set_vf_rlpml(struct igb_adapter
*adapter
, int size
,
2942 struct e1000_hw
*hw
= &adapter
->hw
;
2945 /* if it isn't the PF check to see if VFs are enabled and
2946 * increase the size to support vlan tags */
2947 if (vfn
< adapter
->vfs_allocated_count
&&
2948 adapter
->vf_data
[vfn
].vlans_enabled
)
2949 size
+= VLAN_TAG_SIZE
;
2951 vmolr
= rd32(E1000_VMOLR(vfn
));
2952 vmolr
&= ~E1000_VMOLR_RLPML_MASK
;
2953 vmolr
|= size
| E1000_VMOLR_LPE
;
2954 wr32(E1000_VMOLR(vfn
), vmolr
);
2960 * igb_rlpml_set - set maximum receive packet size
2961 * @adapter: board private structure
2963 * Configure maximum receivable packet size.
2965 static void igb_rlpml_set(struct igb_adapter
*adapter
)
2968 struct e1000_hw
*hw
= &adapter
->hw
;
2969 u16 pf_id
= adapter
->vfs_allocated_count
;
2971 max_frame_size
= adapter
->max_frame_size
+ VLAN_TAG_SIZE
;
2973 /* if vfs are enabled we set RLPML to the largest possible request
2974 * size and set the VMOLR RLPML to the size we need */
2976 igb_set_vf_rlpml(adapter
, max_frame_size
, pf_id
);
2977 max_frame_size
= MAX_JUMBO_FRAME_SIZE
;
2980 wr32(E1000_RLPML
, max_frame_size
);
2983 static inline void igb_set_vmolr(struct igb_adapter
*adapter
,
2986 struct e1000_hw
*hw
= &adapter
->hw
;
2990 * This register exists only on 82576 and newer so if we are older then
2991 * we should exit and do nothing
2993 if (hw
->mac
.type
< e1000_82576
)
2996 vmolr
= rd32(E1000_VMOLR(vfn
));
2997 vmolr
|= E1000_VMOLR_STRVLAN
; /* Strip vlan tags */
2999 vmolr
|= E1000_VMOLR_AUPE
; /* Accept untagged packets */
3001 vmolr
&= ~(E1000_VMOLR_AUPE
); /* Tagged packets ONLY */
3003 /* clear all bits that might not be set */
3004 vmolr
&= ~(E1000_VMOLR_BAM
| E1000_VMOLR_RSSE
);
3006 if (adapter
->rss_queues
> 1 && vfn
== adapter
->vfs_allocated_count
)
3007 vmolr
|= E1000_VMOLR_RSSE
; /* enable RSS */
3009 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3012 if (vfn
<= adapter
->vfs_allocated_count
)
3013 vmolr
|= E1000_VMOLR_BAM
; /* Accept broadcast */
3015 wr32(E1000_VMOLR(vfn
), vmolr
);
3019 * igb_configure_rx_ring - Configure a receive ring after Reset
3020 * @adapter: board private structure
3021 * @ring: receive ring to be configured
3023 * Configure the Rx unit of the MAC after a reset.
3025 void igb_configure_rx_ring(struct igb_adapter
*adapter
,
3026 struct igb_ring
*ring
)
3028 struct e1000_hw
*hw
= &adapter
->hw
;
3029 u64 rdba
= ring
->dma
;
3030 int reg_idx
= ring
->reg_idx
;
3033 /* disable the queue */
3034 rxdctl
= rd32(E1000_RXDCTL(reg_idx
));
3035 wr32(E1000_RXDCTL(reg_idx
),
3036 rxdctl
& ~E1000_RXDCTL_QUEUE_ENABLE
);
3038 /* Set DMA base address registers */
3039 wr32(E1000_RDBAL(reg_idx
),
3040 rdba
& 0x00000000ffffffffULL
);
3041 wr32(E1000_RDBAH(reg_idx
), rdba
>> 32);
3042 wr32(E1000_RDLEN(reg_idx
),
3043 ring
->count
* sizeof(union e1000_adv_rx_desc
));
3045 /* initialize head and tail */
3046 ring
->head
= hw
->hw_addr
+ E1000_RDH(reg_idx
);
3047 ring
->tail
= hw
->hw_addr
+ E1000_RDT(reg_idx
);
3048 writel(0, ring
->head
);
3049 writel(0, ring
->tail
);
3051 /* set descriptor configuration */
3052 if (ring
->rx_buffer_len
< IGB_RXBUFFER_1024
) {
3053 srrctl
= ALIGN(ring
->rx_buffer_len
, 64) <<
3054 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT
;
3055 #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
3056 srrctl
|= IGB_RXBUFFER_16384
>>
3057 E1000_SRRCTL_BSIZEPKT_SHIFT
;
3059 srrctl
|= (PAGE_SIZE
/ 2) >>
3060 E1000_SRRCTL_BSIZEPKT_SHIFT
;
3062 srrctl
|= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
3064 srrctl
= ALIGN(ring
->rx_buffer_len
, 1024) >>
3065 E1000_SRRCTL_BSIZEPKT_SHIFT
;
3066 srrctl
|= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF
;
3068 if (hw
->mac
.type
== e1000_82580
)
3069 srrctl
|= E1000_SRRCTL_TIMESTAMP
;
3070 /* Only set Drop Enable if we are supporting multiple queues */
3071 if (adapter
->vfs_allocated_count
|| adapter
->num_rx_queues
> 1)
3072 srrctl
|= E1000_SRRCTL_DROP_EN
;
3074 wr32(E1000_SRRCTL(reg_idx
), srrctl
);
3076 /* set filtering for VMDQ pools */
3077 igb_set_vmolr(adapter
, reg_idx
& 0x7, true);
3079 /* enable receive descriptor fetching */
3080 rxdctl
= rd32(E1000_RXDCTL(reg_idx
));
3081 rxdctl
|= E1000_RXDCTL_QUEUE_ENABLE
;
3082 rxdctl
&= 0xFFF00000;
3083 rxdctl
|= IGB_RX_PTHRESH
;
3084 rxdctl
|= IGB_RX_HTHRESH
<< 8;
3085 rxdctl
|= IGB_RX_WTHRESH
<< 16;
3086 wr32(E1000_RXDCTL(reg_idx
), rxdctl
);
3090 * igb_configure_rx - Configure receive Unit after Reset
3091 * @adapter: board private structure
3093 * Configure the Rx unit of the MAC after a reset.
3095 static void igb_configure_rx(struct igb_adapter
*adapter
)
3099 /* set UTA to appropriate mode */
3100 igb_set_uta(adapter
);
3102 /* set the correct pool for the PF default MAC address in entry 0 */
3103 igb_rar_set_qsel(adapter
, adapter
->hw
.mac
.addr
, 0,
3104 adapter
->vfs_allocated_count
);
3106 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3107 * the Base and Length of the Rx Descriptor Ring */
3108 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3109 igb_configure_rx_ring(adapter
, adapter
->rx_ring
[i
]);
3113 * igb_free_tx_resources - Free Tx Resources per Queue
3114 * @tx_ring: Tx descriptor ring for a specific queue
3116 * Free all transmit software resources
3118 void igb_free_tx_resources(struct igb_ring
*tx_ring
)
3120 igb_clean_tx_ring(tx_ring
);
3122 vfree(tx_ring
->buffer_info
);
3123 tx_ring
->buffer_info
= NULL
;
3125 /* if not set, then don't free */
3129 dma_free_coherent(tx_ring
->dev
, tx_ring
->size
,
3130 tx_ring
->desc
, tx_ring
->dma
);
3132 tx_ring
->desc
= NULL
;
3136 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3137 * @adapter: board private structure
3139 * Free all transmit software resources
3141 static void igb_free_all_tx_resources(struct igb_adapter
*adapter
)
3145 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
3146 igb_free_tx_resources(adapter
->tx_ring
[i
]);
3149 void igb_unmap_and_free_tx_resource(struct igb_ring
*tx_ring
,
3150 struct igb_buffer
*buffer_info
)
3152 if (buffer_info
->dma
) {
3153 if (buffer_info
->mapped_as_page
)
3154 dma_unmap_page(tx_ring
->dev
,
3156 buffer_info
->length
,
3159 dma_unmap_single(tx_ring
->dev
,
3161 buffer_info
->length
,
3163 buffer_info
->dma
= 0;
3165 if (buffer_info
->skb
) {
3166 dev_kfree_skb_any(buffer_info
->skb
);
3167 buffer_info
->skb
= NULL
;
3169 buffer_info
->time_stamp
= 0;
3170 buffer_info
->length
= 0;
3171 buffer_info
->next_to_watch
= 0;
3172 buffer_info
->mapped_as_page
= false;
3176 * igb_clean_tx_ring - Free Tx Buffers
3177 * @tx_ring: ring to be cleaned
3179 static void igb_clean_tx_ring(struct igb_ring
*tx_ring
)
3181 struct igb_buffer
*buffer_info
;
3185 if (!tx_ring
->buffer_info
)
3187 /* Free all the Tx ring sk_buffs */
3189 for (i
= 0; i
< tx_ring
->count
; i
++) {
3190 buffer_info
= &tx_ring
->buffer_info
[i
];
3191 igb_unmap_and_free_tx_resource(tx_ring
, buffer_info
);
3194 size
= sizeof(struct igb_buffer
) * tx_ring
->count
;
3195 memset(tx_ring
->buffer_info
, 0, size
);
3197 /* Zero out the descriptor ring */
3198 memset(tx_ring
->desc
, 0, tx_ring
->size
);
3200 tx_ring
->next_to_use
= 0;
3201 tx_ring
->next_to_clean
= 0;
3205 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3206 * @adapter: board private structure
3208 static void igb_clean_all_tx_rings(struct igb_adapter
*adapter
)
3212 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
3213 igb_clean_tx_ring(adapter
->tx_ring
[i
]);
3217 * igb_free_rx_resources - Free Rx Resources
3218 * @rx_ring: ring to clean the resources from
3220 * Free all receive software resources
3222 void igb_free_rx_resources(struct igb_ring
*rx_ring
)
3224 igb_clean_rx_ring(rx_ring
);
3226 vfree(rx_ring
->buffer_info
);
3227 rx_ring
->buffer_info
= NULL
;
3229 /* if not set, then don't free */
3233 dma_free_coherent(rx_ring
->dev
, rx_ring
->size
,
3234 rx_ring
->desc
, rx_ring
->dma
);
3236 rx_ring
->desc
= NULL
;
3240 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3241 * @adapter: board private structure
3243 * Free all receive software resources
3245 static void igb_free_all_rx_resources(struct igb_adapter
*adapter
)
3249 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3250 igb_free_rx_resources(adapter
->rx_ring
[i
]);
3254 * igb_clean_rx_ring - Free Rx Buffers per Queue
3255 * @rx_ring: ring to free buffers from
3257 static void igb_clean_rx_ring(struct igb_ring
*rx_ring
)
3259 struct igb_buffer
*buffer_info
;
3263 if (!rx_ring
->buffer_info
)
3266 /* Free all the Rx ring sk_buffs */
3267 for (i
= 0; i
< rx_ring
->count
; i
++) {
3268 buffer_info
= &rx_ring
->buffer_info
[i
];
3269 if (buffer_info
->dma
) {
3270 dma_unmap_single(rx_ring
->dev
,
3272 rx_ring
->rx_buffer_len
,
3274 buffer_info
->dma
= 0;
3277 if (buffer_info
->skb
) {
3278 dev_kfree_skb(buffer_info
->skb
);
3279 buffer_info
->skb
= NULL
;
3281 if (buffer_info
->page_dma
) {
3282 dma_unmap_page(rx_ring
->dev
,
3283 buffer_info
->page_dma
,
3286 buffer_info
->page_dma
= 0;
3288 if (buffer_info
->page
) {
3289 put_page(buffer_info
->page
);
3290 buffer_info
->page
= NULL
;
3291 buffer_info
->page_offset
= 0;
3295 size
= sizeof(struct igb_buffer
) * rx_ring
->count
;
3296 memset(rx_ring
->buffer_info
, 0, size
);
3298 /* Zero out the descriptor ring */
3299 memset(rx_ring
->desc
, 0, rx_ring
->size
);
3301 rx_ring
->next_to_clean
= 0;
3302 rx_ring
->next_to_use
= 0;
3306 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3307 * @adapter: board private structure
3309 static void igb_clean_all_rx_rings(struct igb_adapter
*adapter
)
3313 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3314 igb_clean_rx_ring(adapter
->rx_ring
[i
]);
3318 * igb_set_mac - Change the Ethernet Address of the NIC
3319 * @netdev: network interface device structure
3320 * @p: pointer to an address structure
3322 * Returns 0 on success, negative on failure
3324 static int igb_set_mac(struct net_device
*netdev
, void *p
)
3326 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3327 struct e1000_hw
*hw
= &adapter
->hw
;
3328 struct sockaddr
*addr
= p
;
3330 if (!is_valid_ether_addr(addr
->sa_data
))
3331 return -EADDRNOTAVAIL
;
3333 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
3334 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
3336 /* set the correct pool for the new PF MAC address in entry 0 */
3337 igb_rar_set_qsel(adapter
, hw
->mac
.addr
, 0,
3338 adapter
->vfs_allocated_count
);
3344 * igb_write_mc_addr_list - write multicast addresses to MTA
3345 * @netdev: network interface device structure
3347 * Writes multicast address list to the MTA hash table.
3348 * Returns: -ENOMEM on failure
3349 * 0 on no addresses written
3350 * X on writing X addresses to MTA
3352 static int igb_write_mc_addr_list(struct net_device
*netdev
)
3354 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3355 struct e1000_hw
*hw
= &adapter
->hw
;
3356 struct netdev_hw_addr
*ha
;
3360 if (netdev_mc_empty(netdev
)) {
3361 /* nothing to program, so clear mc list */
3362 igb_update_mc_addr_list(hw
, NULL
, 0);
3363 igb_restore_vf_multicasts(adapter
);
3367 mta_list
= kzalloc(netdev_mc_count(netdev
) * 6, GFP_ATOMIC
);
3371 /* The shared function expects a packed array of only addresses. */
3373 netdev_for_each_mc_addr(ha
, netdev
)
3374 memcpy(mta_list
+ (i
++ * ETH_ALEN
), ha
->addr
, ETH_ALEN
);
3376 igb_update_mc_addr_list(hw
, mta_list
, i
);
3379 return netdev_mc_count(netdev
);
3383 * igb_write_uc_addr_list - write unicast addresses to RAR table
3384 * @netdev: network interface device structure
3386 * Writes unicast address list to the RAR table.
3387 * Returns: -ENOMEM on failure/insufficient address space
3388 * 0 on no addresses written
3389 * X on writing X addresses to the RAR table
3391 static int igb_write_uc_addr_list(struct net_device
*netdev
)
3393 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3394 struct e1000_hw
*hw
= &adapter
->hw
;
3395 unsigned int vfn
= adapter
->vfs_allocated_count
;
3396 unsigned int rar_entries
= hw
->mac
.rar_entry_count
- (vfn
+ 1);
3399 /* return ENOMEM indicating insufficient memory for addresses */
3400 if (netdev_uc_count(netdev
) > rar_entries
)
3403 if (!netdev_uc_empty(netdev
) && rar_entries
) {
3404 struct netdev_hw_addr
*ha
;
3406 netdev_for_each_uc_addr(ha
, netdev
) {
3409 igb_rar_set_qsel(adapter
, ha
->addr
,
3415 /* write the addresses in reverse order to avoid write combining */
3416 for (; rar_entries
> 0 ; rar_entries
--) {
3417 wr32(E1000_RAH(rar_entries
), 0);
3418 wr32(E1000_RAL(rar_entries
), 0);
3426 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3427 * @netdev: network interface device structure
3429 * The set_rx_mode entry point is called whenever the unicast or multicast
3430 * address lists or the network interface flags are updated. This routine is
3431 * responsible for configuring the hardware for proper unicast, multicast,
3432 * promiscuous mode, and all-multi behavior.
3434 static void igb_set_rx_mode(struct net_device
*netdev
)
3436 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3437 struct e1000_hw
*hw
= &adapter
->hw
;
3438 unsigned int vfn
= adapter
->vfs_allocated_count
;
3439 u32 rctl
, vmolr
= 0;
3442 /* Check for Promiscuous and All Multicast modes */
3443 rctl
= rd32(E1000_RCTL
);
3445 /* clear the effected bits */
3446 rctl
&= ~(E1000_RCTL_UPE
| E1000_RCTL_MPE
| E1000_RCTL_VFE
);
3448 if (netdev
->flags
& IFF_PROMISC
) {
3449 rctl
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
);
3450 vmolr
|= (E1000_VMOLR_ROPE
| E1000_VMOLR_MPME
);
3452 if (netdev
->flags
& IFF_ALLMULTI
) {
3453 rctl
|= E1000_RCTL_MPE
;
3454 vmolr
|= E1000_VMOLR_MPME
;
3457 * Write addresses to the MTA, if the attempt fails
3458 * then we should just turn on promiscuous mode so
3459 * that we can at least receive multicast traffic
3461 count
= igb_write_mc_addr_list(netdev
);
3463 rctl
|= E1000_RCTL_MPE
;
3464 vmolr
|= E1000_VMOLR_MPME
;
3466 vmolr
|= E1000_VMOLR_ROMPE
;
3470 * Write addresses to available RAR registers, if there is not
3471 * sufficient space to store all the addresses then enable
3472 * unicast promiscuous mode
3474 count
= igb_write_uc_addr_list(netdev
);
3476 rctl
|= E1000_RCTL_UPE
;
3477 vmolr
|= E1000_VMOLR_ROPE
;
3479 rctl
|= E1000_RCTL_VFE
;
3481 wr32(E1000_RCTL
, rctl
);
3484 * In order to support SR-IOV and eventually VMDq it is necessary to set
3485 * the VMOLR to enable the appropriate modes. Without this workaround
3486 * we will have issues with VLAN tag stripping not being done for frames
3487 * that are only arriving because we are the default pool
3489 if (hw
->mac
.type
< e1000_82576
)
3492 vmolr
|= rd32(E1000_VMOLR(vfn
)) &
3493 ~(E1000_VMOLR_ROPE
| E1000_VMOLR_MPME
| E1000_VMOLR_ROMPE
);
3494 wr32(E1000_VMOLR(vfn
), vmolr
);
3495 igb_restore_vf_multicasts(adapter
);
3498 static void igb_check_wvbr(struct igb_adapter
*adapter
)
3500 struct e1000_hw
*hw
= &adapter
->hw
;
3503 switch (hw
->mac
.type
) {
3506 if (!(wvbr
= rd32(E1000_WVBR
)))
3513 adapter
->wvbr
|= wvbr
;
3516 #define IGB_STAGGERED_QUEUE_OFFSET 8
3518 static void igb_spoof_check(struct igb_adapter
*adapter
)
3525 for(j
= 0; j
< adapter
->vfs_allocated_count
; j
++) {
3526 if (adapter
->wvbr
& (1 << j
) ||
3527 adapter
->wvbr
& (1 << (j
+ IGB_STAGGERED_QUEUE_OFFSET
))) {
3528 dev_warn(&adapter
->pdev
->dev
,
3529 "Spoof event(s) detected on VF %d\n", j
);
3532 (1 << (j
+ IGB_STAGGERED_QUEUE_OFFSET
)));
3537 /* Need to wait a few seconds after link up to get diagnostic information from
3539 static void igb_update_phy_info(unsigned long data
)
3541 struct igb_adapter
*adapter
= (struct igb_adapter
*) data
;
3542 igb_get_phy_info(&adapter
->hw
);
3546 * igb_has_link - check shared code for link and determine up/down
3547 * @adapter: pointer to driver private info
3549 bool igb_has_link(struct igb_adapter
*adapter
)
3551 struct e1000_hw
*hw
= &adapter
->hw
;
3552 bool link_active
= false;
3555 /* get_link_status is set on LSC (link status) interrupt or
3556 * rx sequence error interrupt. get_link_status will stay
3557 * false until the e1000_check_for_link establishes link
3558 * for copper adapters ONLY
3560 switch (hw
->phy
.media_type
) {
3561 case e1000_media_type_copper
:
3562 if (hw
->mac
.get_link_status
) {
3563 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
3564 link_active
= !hw
->mac
.get_link_status
;
3569 case e1000_media_type_internal_serdes
:
3570 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
3571 link_active
= hw
->mac
.serdes_has_link
;
3574 case e1000_media_type_unknown
:
3581 static bool igb_thermal_sensor_event(struct e1000_hw
*hw
, u32 event
)
3584 u32 ctrl_ext
, thstat
;
3586 /* check for thermal sensor event on i350, copper only */
3587 if (hw
->mac
.type
== e1000_i350
) {
3588 thstat
= rd32(E1000_THSTAT
);
3589 ctrl_ext
= rd32(E1000_CTRL_EXT
);
3591 if ((hw
->phy
.media_type
== e1000_media_type_copper
) &&
3592 !(ctrl_ext
& E1000_CTRL_EXT_LINK_MODE_SGMII
)) {
3593 ret
= !!(thstat
& event
);
3601 * igb_watchdog - Timer Call-back
3602 * @data: pointer to adapter cast into an unsigned long
3604 static void igb_watchdog(unsigned long data
)
3606 struct igb_adapter
*adapter
= (struct igb_adapter
*)data
;
3607 /* Do the rest outside of interrupt context */
3608 schedule_work(&adapter
->watchdog_task
);
3611 static void igb_watchdog_task(struct work_struct
*work
)
3613 struct igb_adapter
*adapter
= container_of(work
,
3616 struct e1000_hw
*hw
= &adapter
->hw
;
3617 struct net_device
*netdev
= adapter
->netdev
;
3621 link
= igb_has_link(adapter
);
3623 if (!netif_carrier_ok(netdev
)) {
3625 hw
->mac
.ops
.get_speed_and_duplex(hw
,
3626 &adapter
->link_speed
,
3627 &adapter
->link_duplex
);
3629 ctrl
= rd32(E1000_CTRL
);
3630 /* Links status message must follow this format */
3631 printk(KERN_INFO
"igb: %s NIC Link is Up %d Mbps %s, "
3632 "Flow Control: %s\n",
3634 adapter
->link_speed
,
3635 adapter
->link_duplex
== FULL_DUPLEX
?
3636 "Full Duplex" : "Half Duplex",
3637 ((ctrl
& E1000_CTRL_TFCE
) &&
3638 (ctrl
& E1000_CTRL_RFCE
)) ? "RX/TX" :
3639 ((ctrl
& E1000_CTRL_RFCE
) ? "RX" :
3640 ((ctrl
& E1000_CTRL_TFCE
) ? "TX" : "None")));
3642 /* check for thermal sensor event */
3643 if (igb_thermal_sensor_event(hw
, E1000_THSTAT_LINK_THROTTLE
)) {
3644 printk(KERN_INFO
"igb: %s The network adapter "
3645 "link speed was downshifted "
3646 "because it overheated.\n",
3650 /* adjust timeout factor according to speed/duplex */
3651 adapter
->tx_timeout_factor
= 1;
3652 switch (adapter
->link_speed
) {
3654 adapter
->tx_timeout_factor
= 14;
3657 /* maybe add some timeout factor ? */
3661 netif_carrier_on(netdev
);
3663 igb_ping_all_vfs(adapter
);
3664 igb_check_vf_rate_limit(adapter
);
3666 /* link state has changed, schedule phy info update */
3667 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3668 mod_timer(&adapter
->phy_info_timer
,
3669 round_jiffies(jiffies
+ 2 * HZ
));
3672 if (netif_carrier_ok(netdev
)) {
3673 adapter
->link_speed
= 0;
3674 adapter
->link_duplex
= 0;
3676 /* check for thermal sensor event */
3677 if (igb_thermal_sensor_event(hw
, E1000_THSTAT_PWR_DOWN
)) {
3678 printk(KERN_ERR
"igb: %s The network adapter "
3679 "was stopped because it "
3684 /* Links status message must follow this format */
3685 printk(KERN_INFO
"igb: %s NIC Link is Down\n",
3687 netif_carrier_off(netdev
);
3689 igb_ping_all_vfs(adapter
);
3691 /* link state has changed, schedule phy info update */
3692 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3693 mod_timer(&adapter
->phy_info_timer
,
3694 round_jiffies(jiffies
+ 2 * HZ
));
3698 spin_lock(&adapter
->stats64_lock
);
3699 igb_update_stats(adapter
, &adapter
->stats64
);
3700 spin_unlock(&adapter
->stats64_lock
);
3702 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3703 struct igb_ring
*tx_ring
= adapter
->tx_ring
[i
];
3704 if (!netif_carrier_ok(netdev
)) {
3705 /* We've lost link, so the controller stops DMA,
3706 * but we've got queued Tx work that's never going
3707 * to get done, so reset controller to flush Tx.
3708 * (Do the reset outside of interrupt context). */
3709 if (igb_desc_unused(tx_ring
) + 1 < tx_ring
->count
) {
3710 adapter
->tx_timeout_count
++;
3711 schedule_work(&adapter
->reset_task
);
3712 /* return immediately since reset is imminent */
3717 /* Force detection of hung controller every watchdog period */
3718 tx_ring
->detect_tx_hung
= true;
3721 /* Cause software interrupt to ensure rx ring is cleaned */
3722 if (adapter
->msix_entries
) {
3724 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
3725 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
3726 eics
|= q_vector
->eims_value
;
3728 wr32(E1000_EICS
, eics
);
3730 wr32(E1000_ICS
, E1000_ICS_RXDMT0
);
3733 igb_spoof_check(adapter
);
3735 /* Reset the timer */
3736 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3737 mod_timer(&adapter
->watchdog_timer
,
3738 round_jiffies(jiffies
+ 2 * HZ
));
3741 enum latency_range
{
3745 latency_invalid
= 255
3749 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3751 * Stores a new ITR value based on strictly on packet size. This
3752 * algorithm is less sophisticated than that used in igb_update_itr,
3753 * due to the difficulty of synchronizing statistics across multiple
3754 * receive rings. The divisors and thresholds used by this function
3755 * were determined based on theoretical maximum wire speed and testing
3756 * data, in order to minimize response time while increasing bulk
3758 * This functionality is controlled by the InterruptThrottleRate module
3759 * parameter (see igb_param.c)
3760 * NOTE: This function is called only when operating in a multiqueue
3761 * receive environment.
3762 * @q_vector: pointer to q_vector
3764 static void igb_update_ring_itr(struct igb_q_vector
*q_vector
)
3766 int new_val
= q_vector
->itr_val
;
3767 int avg_wire_size
= 0;
3768 struct igb_adapter
*adapter
= q_vector
->adapter
;
3769 struct igb_ring
*ring
;
3770 unsigned int packets
;
3772 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3773 * ints/sec - ITR timer value of 120 ticks.
3775 if (adapter
->link_speed
!= SPEED_1000
) {
3780 ring
= q_vector
->rx_ring
;
3782 packets
= ACCESS_ONCE(ring
->total_packets
);
3785 avg_wire_size
= ring
->total_bytes
/ packets
;
3788 ring
= q_vector
->tx_ring
;
3790 packets
= ACCESS_ONCE(ring
->total_packets
);
3793 avg_wire_size
= max_t(u32
, avg_wire_size
,
3794 ring
->total_bytes
/ packets
);
3797 /* if avg_wire_size isn't set no work was done */
3801 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3802 avg_wire_size
+= 24;
3804 /* Don't starve jumbo frames */
3805 avg_wire_size
= min(avg_wire_size
, 3000);
3807 /* Give a little boost to mid-size frames */
3808 if ((avg_wire_size
> 300) && (avg_wire_size
< 1200))
3809 new_val
= avg_wire_size
/ 3;
3811 new_val
= avg_wire_size
/ 2;
3813 /* when in itr mode 3 do not exceed 20K ints/sec */
3814 if (adapter
->rx_itr_setting
== 3 && new_val
< 196)
3818 if (new_val
!= q_vector
->itr_val
) {
3819 q_vector
->itr_val
= new_val
;
3820 q_vector
->set_itr
= 1;
3823 if (q_vector
->rx_ring
) {
3824 q_vector
->rx_ring
->total_bytes
= 0;
3825 q_vector
->rx_ring
->total_packets
= 0;
3827 if (q_vector
->tx_ring
) {
3828 q_vector
->tx_ring
->total_bytes
= 0;
3829 q_vector
->tx_ring
->total_packets
= 0;
3834 * igb_update_itr - update the dynamic ITR value based on statistics
3835 * Stores a new ITR value based on packets and byte
3836 * counts during the last interrupt. The advantage of per interrupt
3837 * computation is faster updates and more accurate ITR for the current
3838 * traffic pattern. Constants in this function were computed
3839 * based on theoretical maximum wire speed and thresholds were set based
3840 * on testing data as well as attempting to minimize response time
3841 * while increasing bulk throughput.
3842 * this functionality is controlled by the InterruptThrottleRate module
3843 * parameter (see igb_param.c)
3844 * NOTE: These calculations are only valid when operating in a single-
3845 * queue environment.
3846 * @adapter: pointer to adapter
3847 * @itr_setting: current q_vector->itr_val
3848 * @packets: the number of packets during this measurement interval
3849 * @bytes: the number of bytes during this measurement interval
3851 static unsigned int igb_update_itr(struct igb_adapter
*adapter
, u16 itr_setting
,
3852 int packets
, int bytes
)
3854 unsigned int retval
= itr_setting
;
3857 goto update_itr_done
;
3859 switch (itr_setting
) {
3860 case lowest_latency
:
3861 /* handle TSO and jumbo frames */
3862 if (bytes
/packets
> 8000)
3863 retval
= bulk_latency
;
3864 else if ((packets
< 5) && (bytes
> 512))
3865 retval
= low_latency
;
3867 case low_latency
: /* 50 usec aka 20000 ints/s */
3868 if (bytes
> 10000) {
3869 /* this if handles the TSO accounting */
3870 if (bytes
/packets
> 8000) {
3871 retval
= bulk_latency
;
3872 } else if ((packets
< 10) || ((bytes
/packets
) > 1200)) {
3873 retval
= bulk_latency
;
3874 } else if ((packets
> 35)) {
3875 retval
= lowest_latency
;
3877 } else if (bytes
/packets
> 2000) {
3878 retval
= bulk_latency
;
3879 } else if (packets
<= 2 && bytes
< 512) {
3880 retval
= lowest_latency
;
3883 case bulk_latency
: /* 250 usec aka 4000 ints/s */
3884 if (bytes
> 25000) {
3886 retval
= low_latency
;
3887 } else if (bytes
< 1500) {
3888 retval
= low_latency
;
3897 static void igb_set_itr(struct igb_adapter
*adapter
)
3899 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
3901 u32 new_itr
= q_vector
->itr_val
;
3903 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3904 if (adapter
->link_speed
!= SPEED_1000
) {
3910 adapter
->rx_itr
= igb_update_itr(adapter
,
3912 q_vector
->rx_ring
->total_packets
,
3913 q_vector
->rx_ring
->total_bytes
);
3915 adapter
->tx_itr
= igb_update_itr(adapter
,
3917 q_vector
->tx_ring
->total_packets
,
3918 q_vector
->tx_ring
->total_bytes
);
3919 current_itr
= max(adapter
->rx_itr
, adapter
->tx_itr
);
3921 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3922 if (adapter
->rx_itr_setting
== 3 && current_itr
== lowest_latency
)
3923 current_itr
= low_latency
;
3925 switch (current_itr
) {
3926 /* counts and packets in update_itr are dependent on these numbers */
3927 case lowest_latency
:
3928 new_itr
= 56; /* aka 70,000 ints/sec */
3931 new_itr
= 196; /* aka 20,000 ints/sec */
3934 new_itr
= 980; /* aka 4,000 ints/sec */
3941 q_vector
->rx_ring
->total_bytes
= 0;
3942 q_vector
->rx_ring
->total_packets
= 0;
3943 q_vector
->tx_ring
->total_bytes
= 0;
3944 q_vector
->tx_ring
->total_packets
= 0;
3946 if (new_itr
!= q_vector
->itr_val
) {
3947 /* this attempts to bias the interrupt rate towards Bulk
3948 * by adding intermediate steps when interrupt rate is
3950 new_itr
= new_itr
> q_vector
->itr_val
?
3951 max((new_itr
* q_vector
->itr_val
) /
3952 (new_itr
+ (q_vector
->itr_val
>> 2)),
3955 /* Don't write the value here; it resets the adapter's
3956 * internal timer, and causes us to delay far longer than
3957 * we should between interrupts. Instead, we write the ITR
3958 * value at the beginning of the next interrupt so the timing
3959 * ends up being correct.
3961 q_vector
->itr_val
= new_itr
;
3962 q_vector
->set_itr
= 1;
3966 #define IGB_TX_FLAGS_CSUM 0x00000001
3967 #define IGB_TX_FLAGS_VLAN 0x00000002
3968 #define IGB_TX_FLAGS_TSO 0x00000004
3969 #define IGB_TX_FLAGS_IPV4 0x00000008
3970 #define IGB_TX_FLAGS_TSTAMP 0x00000010
3971 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3972 #define IGB_TX_FLAGS_VLAN_SHIFT 16
3974 static inline int igb_tso_adv(struct igb_ring
*tx_ring
,
3975 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
3977 struct e1000_adv_tx_context_desc
*context_desc
;
3980 struct igb_buffer
*buffer_info
;
3981 u32 info
= 0, tu_cmd
= 0;
3985 if (skb_header_cloned(skb
)) {
3986 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
3991 l4len
= tcp_hdrlen(skb
);
3994 if (skb
->protocol
== htons(ETH_P_IP
)) {
3995 struct iphdr
*iph
= ip_hdr(skb
);
3998 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
4002 } else if (skb_is_gso_v6(skb
)) {
4003 ipv6_hdr(skb
)->payload_len
= 0;
4004 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
4005 &ipv6_hdr(skb
)->daddr
,
4009 i
= tx_ring
->next_to_use
;
4011 buffer_info
= &tx_ring
->buffer_info
[i
];
4012 context_desc
= E1000_TX_CTXTDESC_ADV(*tx_ring
, i
);
4013 /* VLAN MACLEN IPLEN */
4014 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
4015 info
|= (tx_flags
& IGB_TX_FLAGS_VLAN_MASK
);
4016 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
4017 *hdr_len
+= skb_network_offset(skb
);
4018 info
|= skb_network_header_len(skb
);
4019 *hdr_len
+= skb_network_header_len(skb
);
4020 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
4022 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4023 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
4025 if (skb
->protocol
== htons(ETH_P_IP
))
4026 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
4027 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
4029 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
4032 mss_l4len_idx
= (skb_shinfo(skb
)->gso_size
<< E1000_ADVTXD_MSS_SHIFT
);
4033 mss_l4len_idx
|= (l4len
<< E1000_ADVTXD_L4LEN_SHIFT
);
4035 /* For 82575, context index must be unique per ring. */
4036 if (tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
)
4037 mss_l4len_idx
|= tx_ring
->reg_idx
<< 4;
4039 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
4040 context_desc
->seqnum_seed
= 0;
4042 buffer_info
->time_stamp
= jiffies
;
4043 buffer_info
->next_to_watch
= i
;
4044 buffer_info
->dma
= 0;
4046 if (i
== tx_ring
->count
)
4049 tx_ring
->next_to_use
= i
;
4054 static inline bool igb_tx_csum_adv(struct igb_ring
*tx_ring
,
4055 struct sk_buff
*skb
, u32 tx_flags
)
4057 struct e1000_adv_tx_context_desc
*context_desc
;
4058 struct device
*dev
= tx_ring
->dev
;
4059 struct igb_buffer
*buffer_info
;
4060 u32 info
= 0, tu_cmd
= 0;
4063 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
4064 (tx_flags
& IGB_TX_FLAGS_VLAN
)) {
4065 i
= tx_ring
->next_to_use
;
4066 buffer_info
= &tx_ring
->buffer_info
[i
];
4067 context_desc
= E1000_TX_CTXTDESC_ADV(*tx_ring
, i
);
4069 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
4070 info
|= (tx_flags
& IGB_TX_FLAGS_VLAN_MASK
);
4072 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
4073 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
4074 info
|= skb_network_header_len(skb
);
4076 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
4078 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
4080 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
4083 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) {
4084 const struct vlan_ethhdr
*vhdr
=
4085 (const struct vlan_ethhdr
*)skb
->data
;
4087 protocol
= vhdr
->h_vlan_encapsulated_proto
;
4089 protocol
= skb
->protocol
;
4093 case cpu_to_be16(ETH_P_IP
):
4094 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
4095 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
4096 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
4097 else if (ip_hdr(skb
)->protocol
== IPPROTO_SCTP
)
4098 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_SCTP
;
4100 case cpu_to_be16(ETH_P_IPV6
):
4101 /* XXX what about other V6 headers?? */
4102 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
4103 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
4104 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_SCTP
)
4105 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_SCTP
;
4108 if (unlikely(net_ratelimit()))
4110 "partial checksum but proto=%x!\n",
4116 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
4117 context_desc
->seqnum_seed
= 0;
4118 if (tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
)
4119 context_desc
->mss_l4len_idx
=
4120 cpu_to_le32(tx_ring
->reg_idx
<< 4);
4122 buffer_info
->time_stamp
= jiffies
;
4123 buffer_info
->next_to_watch
= i
;
4124 buffer_info
->dma
= 0;
4127 if (i
== tx_ring
->count
)
4129 tx_ring
->next_to_use
= i
;
4136 #define IGB_MAX_TXD_PWR 16
4137 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
4139 static inline int igb_tx_map_adv(struct igb_ring
*tx_ring
, struct sk_buff
*skb
,
4142 struct igb_buffer
*buffer_info
;
4143 struct device
*dev
= tx_ring
->dev
;
4144 unsigned int hlen
= skb_headlen(skb
);
4145 unsigned int count
= 0, i
;
4147 u16 gso_segs
= skb_shinfo(skb
)->gso_segs
?: 1;
4149 i
= tx_ring
->next_to_use
;
4151 buffer_info
= &tx_ring
->buffer_info
[i
];
4152 BUG_ON(hlen
>= IGB_MAX_DATA_PER_TXD
);
4153 buffer_info
->length
= hlen
;
4154 /* set time_stamp *before* dma to help avoid a possible race */
4155 buffer_info
->time_stamp
= jiffies
;
4156 buffer_info
->next_to_watch
= i
;
4157 buffer_info
->dma
= dma_map_single(dev
, skb
->data
, hlen
,
4159 if (dma_mapping_error(dev
, buffer_info
->dma
))
4162 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++) {
4163 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[f
];
4164 unsigned int len
= frag
->size
;
4168 if (i
== tx_ring
->count
)
4171 buffer_info
= &tx_ring
->buffer_info
[i
];
4172 BUG_ON(len
>= IGB_MAX_DATA_PER_TXD
);
4173 buffer_info
->length
= len
;
4174 buffer_info
->time_stamp
= jiffies
;
4175 buffer_info
->next_to_watch
= i
;
4176 buffer_info
->mapped_as_page
= true;
4177 buffer_info
->dma
= dma_map_page(dev
,
4182 if (dma_mapping_error(dev
, buffer_info
->dma
))
4187 tx_ring
->buffer_info
[i
].skb
= skb
;
4188 tx_ring
->buffer_info
[i
].tx_flags
= skb_shinfo(skb
)->tx_flags
;
4189 /* multiply data chunks by size of headers */
4190 tx_ring
->buffer_info
[i
].bytecount
= ((gso_segs
- 1) * hlen
) + skb
->len
;
4191 tx_ring
->buffer_info
[i
].gso_segs
= gso_segs
;
4192 tx_ring
->buffer_info
[first
].next_to_watch
= i
;
4197 dev_err(dev
, "TX DMA map failed\n");
4199 /* clear timestamp and dma mappings for failed buffer_info mapping */
4200 buffer_info
->dma
= 0;
4201 buffer_info
->time_stamp
= 0;
4202 buffer_info
->length
= 0;
4203 buffer_info
->next_to_watch
= 0;
4204 buffer_info
->mapped_as_page
= false;
4206 /* clear timestamp and dma mappings for remaining portion of packet */
4211 buffer_info
= &tx_ring
->buffer_info
[i
];
4212 igb_unmap_and_free_tx_resource(tx_ring
, buffer_info
);
4218 static inline void igb_tx_queue_adv(struct igb_ring
*tx_ring
,
4219 u32 tx_flags
, int count
, u32 paylen
,
4222 union e1000_adv_tx_desc
*tx_desc
;
4223 struct igb_buffer
*buffer_info
;
4224 u32 olinfo_status
= 0, cmd_type_len
;
4225 unsigned int i
= tx_ring
->next_to_use
;
4227 cmd_type_len
= (E1000_ADVTXD_DTYP_DATA
| E1000_ADVTXD_DCMD_IFCS
|
4228 E1000_ADVTXD_DCMD_DEXT
);
4230 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
4231 cmd_type_len
|= E1000_ADVTXD_DCMD_VLE
;
4233 if (tx_flags
& IGB_TX_FLAGS_TSTAMP
)
4234 cmd_type_len
|= E1000_ADVTXD_MAC_TSTAMP
;
4236 if (tx_flags
& IGB_TX_FLAGS_TSO
) {
4237 cmd_type_len
|= E1000_ADVTXD_DCMD_TSE
;
4239 /* insert tcp checksum */
4240 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
4242 /* insert ip checksum */
4243 if (tx_flags
& IGB_TX_FLAGS_IPV4
)
4244 olinfo_status
|= E1000_TXD_POPTS_IXSM
<< 8;
4246 } else if (tx_flags
& IGB_TX_FLAGS_CSUM
) {
4247 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
4250 if ((tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
) &&
4251 (tx_flags
& (IGB_TX_FLAGS_CSUM
|
4253 IGB_TX_FLAGS_VLAN
)))
4254 olinfo_status
|= tx_ring
->reg_idx
<< 4;
4256 olinfo_status
|= ((paylen
- hdr_len
) << E1000_ADVTXD_PAYLEN_SHIFT
);
4259 buffer_info
= &tx_ring
->buffer_info
[i
];
4260 tx_desc
= E1000_TX_DESC_ADV(*tx_ring
, i
);
4261 tx_desc
->read
.buffer_addr
= cpu_to_le64(buffer_info
->dma
);
4262 tx_desc
->read
.cmd_type_len
=
4263 cpu_to_le32(cmd_type_len
| buffer_info
->length
);
4264 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
4267 if (i
== tx_ring
->count
)
4269 } while (count
> 0);
4271 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(IGB_ADVTXD_DCMD
);
4272 /* Force memory writes to complete before letting h/w
4273 * know there are new descriptors to fetch. (Only
4274 * applicable for weak-ordered memory model archs,
4275 * such as IA-64). */
4278 tx_ring
->next_to_use
= i
;
4279 writel(i
, tx_ring
->tail
);
4280 /* we need this if more than one processor can write to our tail
4281 * at a time, it syncronizes IO on IA64/Altix systems */
4285 static int __igb_maybe_stop_tx(struct igb_ring
*tx_ring
, int size
)
4287 struct net_device
*netdev
= tx_ring
->netdev
;
4289 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
4291 /* Herbert's original patch had:
4292 * smp_mb__after_netif_stop_queue();
4293 * but since that doesn't exist yet, just open code it. */
4296 /* We need to check again in a case another CPU has just
4297 * made room available. */
4298 if (igb_desc_unused(tx_ring
) < size
)
4302 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
4304 u64_stats_update_begin(&tx_ring
->tx_syncp2
);
4305 tx_ring
->tx_stats
.restart_queue2
++;
4306 u64_stats_update_end(&tx_ring
->tx_syncp2
);
4311 static inline int igb_maybe_stop_tx(struct igb_ring
*tx_ring
, int size
)
4313 if (igb_desc_unused(tx_ring
) >= size
)
4315 return __igb_maybe_stop_tx(tx_ring
, size
);
4318 netdev_tx_t
igb_xmit_frame_ring_adv(struct sk_buff
*skb
,
4319 struct igb_ring
*tx_ring
)
4326 /* need: 1 descriptor per page,
4327 * + 2 desc gap to keep tail from touching head,
4328 * + 1 desc for skb->data,
4329 * + 1 desc for context descriptor,
4330 * otherwise try next time */
4331 if (igb_maybe_stop_tx(tx_ring
, skb_shinfo(skb
)->nr_frags
+ 4)) {
4332 /* this is a hard error */
4333 return NETDEV_TX_BUSY
;
4336 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) {
4337 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
4338 tx_flags
|= IGB_TX_FLAGS_TSTAMP
;
4341 if (vlan_tx_tag_present(skb
)) {
4342 tx_flags
|= IGB_TX_FLAGS_VLAN
;
4343 tx_flags
|= (vlan_tx_tag_get(skb
) << IGB_TX_FLAGS_VLAN_SHIFT
);
4346 if (skb
->protocol
== htons(ETH_P_IP
))
4347 tx_flags
|= IGB_TX_FLAGS_IPV4
;
4349 first
= tx_ring
->next_to_use
;
4350 if (skb_is_gso(skb
)) {
4351 tso
= igb_tso_adv(tx_ring
, skb
, tx_flags
, &hdr_len
);
4354 dev_kfree_skb_any(skb
);
4355 return NETDEV_TX_OK
;
4360 tx_flags
|= IGB_TX_FLAGS_TSO
;
4361 else if (igb_tx_csum_adv(tx_ring
, skb
, tx_flags
) &&
4362 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
4363 tx_flags
|= IGB_TX_FLAGS_CSUM
;
4366 * count reflects descriptors mapped, if 0 or less then mapping error
4367 * has occurred and we need to rewind the descriptor queue
4369 count
= igb_tx_map_adv(tx_ring
, skb
, first
);
4371 dev_kfree_skb_any(skb
);
4372 tx_ring
->buffer_info
[first
].time_stamp
= 0;
4373 tx_ring
->next_to_use
= first
;
4374 return NETDEV_TX_OK
;
4377 igb_tx_queue_adv(tx_ring
, tx_flags
, count
, skb
->len
, hdr_len
);
4379 /* Make sure there is space in the ring for the next send. */
4380 igb_maybe_stop_tx(tx_ring
, MAX_SKB_FRAGS
+ 4);
4382 return NETDEV_TX_OK
;
4385 static netdev_tx_t
igb_xmit_frame_adv(struct sk_buff
*skb
,
4386 struct net_device
*netdev
)
4388 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4389 struct igb_ring
*tx_ring
;
4392 if (test_bit(__IGB_DOWN
, &adapter
->state
)) {
4393 dev_kfree_skb_any(skb
);
4394 return NETDEV_TX_OK
;
4397 if (skb
->len
<= 0) {
4398 dev_kfree_skb_any(skb
);
4399 return NETDEV_TX_OK
;
4402 r_idx
= skb
->queue_mapping
& (IGB_ABS_MAX_TX_QUEUES
- 1);
4403 tx_ring
= adapter
->multi_tx_table
[r_idx
];
4405 /* This goes back to the question of how to logically map a tx queue
4406 * to a flow. Right now, performance is impacted slightly negatively
4407 * if using multiple tx queues. If the stack breaks away from a
4408 * single qdisc implementation, we can look at this again. */
4409 return igb_xmit_frame_ring_adv(skb
, tx_ring
);
4413 * igb_tx_timeout - Respond to a Tx Hang
4414 * @netdev: network interface device structure
4416 static void igb_tx_timeout(struct net_device
*netdev
)
4418 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4419 struct e1000_hw
*hw
= &adapter
->hw
;
4421 /* Do the reset outside of interrupt context */
4422 adapter
->tx_timeout_count
++;
4424 if (hw
->mac
.type
== e1000_82580
)
4425 hw
->dev_spec
._82575
.global_device_reset
= true;
4427 schedule_work(&adapter
->reset_task
);
4429 (adapter
->eims_enable_mask
& ~adapter
->eims_other
));
4432 static void igb_reset_task(struct work_struct
*work
)
4434 struct igb_adapter
*adapter
;
4435 adapter
= container_of(work
, struct igb_adapter
, reset_task
);
4438 netdev_err(adapter
->netdev
, "Reset adapter\n");
4439 igb_reinit_locked(adapter
);
4443 * igb_get_stats64 - Get System Network Statistics
4444 * @netdev: network interface device structure
4445 * @stats: rtnl_link_stats64 pointer
4448 static struct rtnl_link_stats64
*igb_get_stats64(struct net_device
*netdev
,
4449 struct rtnl_link_stats64
*stats
)
4451 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4453 spin_lock(&adapter
->stats64_lock
);
4454 igb_update_stats(adapter
, &adapter
->stats64
);
4455 memcpy(stats
, &adapter
->stats64
, sizeof(*stats
));
4456 spin_unlock(&adapter
->stats64_lock
);
4462 * igb_change_mtu - Change the Maximum Transfer Unit
4463 * @netdev: network interface device structure
4464 * @new_mtu: new value for maximum frame size
4466 * Returns 0 on success, negative on failure
4468 static int igb_change_mtu(struct net_device
*netdev
, int new_mtu
)
4470 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4471 struct pci_dev
*pdev
= adapter
->pdev
;
4472 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
4473 u32 rx_buffer_len
, i
;
4475 if ((new_mtu
< 68) || (max_frame
> MAX_JUMBO_FRAME_SIZE
)) {
4476 dev_err(&pdev
->dev
, "Invalid MTU setting\n");
4480 if (max_frame
> MAX_STD_JUMBO_FRAME_SIZE
) {
4481 dev_err(&pdev
->dev
, "MTU > 9216 not supported.\n");
4485 while (test_and_set_bit(__IGB_RESETTING
, &adapter
->state
))
4488 /* igb_down has a dependency on max_frame_size */
4489 adapter
->max_frame_size
= max_frame
;
4491 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
4492 * means we reserve 2 more, this pushes us to allocate from the next
4494 * i.e. RXBUFFER_2048 --> size-4096 slab
4497 if (adapter
->hw
.mac
.type
== e1000_82580
)
4498 max_frame
+= IGB_TS_HDR_LEN
;
4500 if (max_frame
<= IGB_RXBUFFER_1024
)
4501 rx_buffer_len
= IGB_RXBUFFER_1024
;
4502 else if (max_frame
<= MAXIMUM_ETHERNET_VLAN_SIZE
)
4503 rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
4505 rx_buffer_len
= IGB_RXBUFFER_128
;
4507 if ((max_frame
== ETH_FRAME_LEN
+ ETH_FCS_LEN
+ IGB_TS_HDR_LEN
) ||
4508 (max_frame
== MAXIMUM_ETHERNET_VLAN_SIZE
+ IGB_TS_HDR_LEN
))
4509 rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
+ IGB_TS_HDR_LEN
;
4511 if ((adapter
->hw
.mac
.type
== e1000_82580
) &&
4512 (rx_buffer_len
== IGB_RXBUFFER_128
))
4513 rx_buffer_len
+= IGB_RXBUFFER_64
;
4515 if (netif_running(netdev
))
4518 dev_info(&pdev
->dev
, "changing MTU from %d to %d\n",
4519 netdev
->mtu
, new_mtu
);
4520 netdev
->mtu
= new_mtu
;
4522 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
4523 adapter
->rx_ring
[i
]->rx_buffer_len
= rx_buffer_len
;
4525 if (netif_running(netdev
))
4530 clear_bit(__IGB_RESETTING
, &adapter
->state
);
4536 * igb_update_stats - Update the board statistics counters
4537 * @adapter: board private structure
4540 void igb_update_stats(struct igb_adapter
*adapter
,
4541 struct rtnl_link_stats64
*net_stats
)
4543 struct e1000_hw
*hw
= &adapter
->hw
;
4544 struct pci_dev
*pdev
= adapter
->pdev
;
4550 u64 _bytes
, _packets
;
4552 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4555 * Prevent stats update while adapter is being reset, or if the pci
4556 * connection is down.
4558 if (adapter
->link_speed
== 0)
4560 if (pci_channel_offline(pdev
))
4565 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
4566 u32 rqdpc_tmp
= rd32(E1000_RQDPC(i
)) & 0x0FFF;
4567 struct igb_ring
*ring
= adapter
->rx_ring
[i
];
4569 ring
->rx_stats
.drops
+= rqdpc_tmp
;
4570 net_stats
->rx_fifo_errors
+= rqdpc_tmp
;
4573 start
= u64_stats_fetch_begin_bh(&ring
->rx_syncp
);
4574 _bytes
= ring
->rx_stats
.bytes
;
4575 _packets
= ring
->rx_stats
.packets
;
4576 } while (u64_stats_fetch_retry_bh(&ring
->rx_syncp
, start
));
4578 packets
+= _packets
;
4581 net_stats
->rx_bytes
= bytes
;
4582 net_stats
->rx_packets
= packets
;
4586 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
4587 struct igb_ring
*ring
= adapter
->tx_ring
[i
];
4589 start
= u64_stats_fetch_begin_bh(&ring
->tx_syncp
);
4590 _bytes
= ring
->tx_stats
.bytes
;
4591 _packets
= ring
->tx_stats
.packets
;
4592 } while (u64_stats_fetch_retry_bh(&ring
->tx_syncp
, start
));
4594 packets
+= _packets
;
4596 net_stats
->tx_bytes
= bytes
;
4597 net_stats
->tx_packets
= packets
;
4599 /* read stats registers */
4600 adapter
->stats
.crcerrs
+= rd32(E1000_CRCERRS
);
4601 adapter
->stats
.gprc
+= rd32(E1000_GPRC
);
4602 adapter
->stats
.gorc
+= rd32(E1000_GORCL
);
4603 rd32(E1000_GORCH
); /* clear GORCL */
4604 adapter
->stats
.bprc
+= rd32(E1000_BPRC
);
4605 adapter
->stats
.mprc
+= rd32(E1000_MPRC
);
4606 adapter
->stats
.roc
+= rd32(E1000_ROC
);
4608 adapter
->stats
.prc64
+= rd32(E1000_PRC64
);
4609 adapter
->stats
.prc127
+= rd32(E1000_PRC127
);
4610 adapter
->stats
.prc255
+= rd32(E1000_PRC255
);
4611 adapter
->stats
.prc511
+= rd32(E1000_PRC511
);
4612 adapter
->stats
.prc1023
+= rd32(E1000_PRC1023
);
4613 adapter
->stats
.prc1522
+= rd32(E1000_PRC1522
);
4614 adapter
->stats
.symerrs
+= rd32(E1000_SYMERRS
);
4615 adapter
->stats
.sec
+= rd32(E1000_SEC
);
4617 mpc
= rd32(E1000_MPC
);
4618 adapter
->stats
.mpc
+= mpc
;
4619 net_stats
->rx_fifo_errors
+= mpc
;
4620 adapter
->stats
.scc
+= rd32(E1000_SCC
);
4621 adapter
->stats
.ecol
+= rd32(E1000_ECOL
);
4622 adapter
->stats
.mcc
+= rd32(E1000_MCC
);
4623 adapter
->stats
.latecol
+= rd32(E1000_LATECOL
);
4624 adapter
->stats
.dc
+= rd32(E1000_DC
);
4625 adapter
->stats
.rlec
+= rd32(E1000_RLEC
);
4626 adapter
->stats
.xonrxc
+= rd32(E1000_XONRXC
);
4627 adapter
->stats
.xontxc
+= rd32(E1000_XONTXC
);
4628 adapter
->stats
.xoffrxc
+= rd32(E1000_XOFFRXC
);
4629 adapter
->stats
.xofftxc
+= rd32(E1000_XOFFTXC
);
4630 adapter
->stats
.fcruc
+= rd32(E1000_FCRUC
);
4631 adapter
->stats
.gptc
+= rd32(E1000_GPTC
);
4632 adapter
->stats
.gotc
+= rd32(E1000_GOTCL
);
4633 rd32(E1000_GOTCH
); /* clear GOTCL */
4634 adapter
->stats
.rnbc
+= rd32(E1000_RNBC
);
4635 adapter
->stats
.ruc
+= rd32(E1000_RUC
);
4636 adapter
->stats
.rfc
+= rd32(E1000_RFC
);
4637 adapter
->stats
.rjc
+= rd32(E1000_RJC
);
4638 adapter
->stats
.tor
+= rd32(E1000_TORH
);
4639 adapter
->stats
.tot
+= rd32(E1000_TOTH
);
4640 adapter
->stats
.tpr
+= rd32(E1000_TPR
);
4642 adapter
->stats
.ptc64
+= rd32(E1000_PTC64
);
4643 adapter
->stats
.ptc127
+= rd32(E1000_PTC127
);
4644 adapter
->stats
.ptc255
+= rd32(E1000_PTC255
);
4645 adapter
->stats
.ptc511
+= rd32(E1000_PTC511
);
4646 adapter
->stats
.ptc1023
+= rd32(E1000_PTC1023
);
4647 adapter
->stats
.ptc1522
+= rd32(E1000_PTC1522
);
4649 adapter
->stats
.mptc
+= rd32(E1000_MPTC
);
4650 adapter
->stats
.bptc
+= rd32(E1000_BPTC
);
4652 adapter
->stats
.tpt
+= rd32(E1000_TPT
);
4653 adapter
->stats
.colc
+= rd32(E1000_COLC
);
4655 adapter
->stats
.algnerrc
+= rd32(E1000_ALGNERRC
);
4656 /* read internal phy specific stats */
4657 reg
= rd32(E1000_CTRL_EXT
);
4658 if (!(reg
& E1000_CTRL_EXT_LINK_MODE_MASK
)) {
4659 adapter
->stats
.rxerrc
+= rd32(E1000_RXERRC
);
4660 adapter
->stats
.tncrs
+= rd32(E1000_TNCRS
);
4663 adapter
->stats
.tsctc
+= rd32(E1000_TSCTC
);
4664 adapter
->stats
.tsctfc
+= rd32(E1000_TSCTFC
);
4666 adapter
->stats
.iac
+= rd32(E1000_IAC
);
4667 adapter
->stats
.icrxoc
+= rd32(E1000_ICRXOC
);
4668 adapter
->stats
.icrxptc
+= rd32(E1000_ICRXPTC
);
4669 adapter
->stats
.icrxatc
+= rd32(E1000_ICRXATC
);
4670 adapter
->stats
.ictxptc
+= rd32(E1000_ICTXPTC
);
4671 adapter
->stats
.ictxatc
+= rd32(E1000_ICTXATC
);
4672 adapter
->stats
.ictxqec
+= rd32(E1000_ICTXQEC
);
4673 adapter
->stats
.ictxqmtc
+= rd32(E1000_ICTXQMTC
);
4674 adapter
->stats
.icrxdmtc
+= rd32(E1000_ICRXDMTC
);
4676 /* Fill out the OS statistics structure */
4677 net_stats
->multicast
= adapter
->stats
.mprc
;
4678 net_stats
->collisions
= adapter
->stats
.colc
;
4682 /* RLEC on some newer hardware can be incorrect so build
4683 * our own version based on RUC and ROC */
4684 net_stats
->rx_errors
= adapter
->stats
.rxerrc
+
4685 adapter
->stats
.crcerrs
+ adapter
->stats
.algnerrc
+
4686 adapter
->stats
.ruc
+ adapter
->stats
.roc
+
4687 adapter
->stats
.cexterr
;
4688 net_stats
->rx_length_errors
= adapter
->stats
.ruc
+
4690 net_stats
->rx_crc_errors
= adapter
->stats
.crcerrs
;
4691 net_stats
->rx_frame_errors
= adapter
->stats
.algnerrc
;
4692 net_stats
->rx_missed_errors
= adapter
->stats
.mpc
;
4695 net_stats
->tx_errors
= adapter
->stats
.ecol
+
4696 adapter
->stats
.latecol
;
4697 net_stats
->tx_aborted_errors
= adapter
->stats
.ecol
;
4698 net_stats
->tx_window_errors
= adapter
->stats
.latecol
;
4699 net_stats
->tx_carrier_errors
= adapter
->stats
.tncrs
;
4701 /* Tx Dropped needs to be maintained elsewhere */
4704 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
4705 if ((adapter
->link_speed
== SPEED_1000
) &&
4706 (!igb_read_phy_reg(hw
, PHY_1000T_STATUS
, &phy_tmp
))) {
4707 phy_tmp
&= PHY_IDLE_ERROR_COUNT_MASK
;
4708 adapter
->phy_stats
.idle_errors
+= phy_tmp
;
4712 /* Management Stats */
4713 adapter
->stats
.mgptc
+= rd32(E1000_MGTPTC
);
4714 adapter
->stats
.mgprc
+= rd32(E1000_MGTPRC
);
4715 adapter
->stats
.mgpdc
+= rd32(E1000_MGTPDC
);
4718 reg
= rd32(E1000_MANC
);
4719 if (reg
& E1000_MANC_EN_BMC2OS
) {
4720 adapter
->stats
.o2bgptc
+= rd32(E1000_O2BGPTC
);
4721 adapter
->stats
.o2bspc
+= rd32(E1000_O2BSPC
);
4722 adapter
->stats
.b2ospc
+= rd32(E1000_B2OSPC
);
4723 adapter
->stats
.b2ogprc
+= rd32(E1000_B2OGPRC
);
4727 static irqreturn_t
igb_msix_other(int irq
, void *data
)
4729 struct igb_adapter
*adapter
= data
;
4730 struct e1000_hw
*hw
= &adapter
->hw
;
4731 u32 icr
= rd32(E1000_ICR
);
4732 /* reading ICR causes bit 31 of EICR to be cleared */
4734 if (icr
& E1000_ICR_DRSTA
)
4735 schedule_work(&adapter
->reset_task
);
4737 if (icr
& E1000_ICR_DOUTSYNC
) {
4738 /* HW is reporting DMA is out of sync */
4739 adapter
->stats
.doosync
++;
4740 /* The DMA Out of Sync is also indication of a spoof event
4741 * in IOV mode. Check the Wrong VM Behavior register to
4742 * see if it is really a spoof event. */
4743 igb_check_wvbr(adapter
);
4746 /* Check for a mailbox event */
4747 if (icr
& E1000_ICR_VMMB
)
4748 igb_msg_task(adapter
);
4750 if (icr
& E1000_ICR_LSC
) {
4751 hw
->mac
.get_link_status
= 1;
4752 /* guard against interrupt when we're going down */
4753 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
4754 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
4757 if (adapter
->vfs_allocated_count
)
4758 wr32(E1000_IMS
, E1000_IMS_LSC
|
4760 E1000_IMS_DOUTSYNC
);
4762 wr32(E1000_IMS
, E1000_IMS_LSC
| E1000_IMS_DOUTSYNC
);
4763 wr32(E1000_EIMS
, adapter
->eims_other
);
4768 static void igb_write_itr(struct igb_q_vector
*q_vector
)
4770 struct igb_adapter
*adapter
= q_vector
->adapter
;
4771 u32 itr_val
= q_vector
->itr_val
& 0x7FFC;
4773 if (!q_vector
->set_itr
)
4779 if (adapter
->hw
.mac
.type
== e1000_82575
)
4780 itr_val
|= itr_val
<< 16;
4782 itr_val
|= 0x8000000;
4784 writel(itr_val
, q_vector
->itr_register
);
4785 q_vector
->set_itr
= 0;
4788 static irqreturn_t
igb_msix_ring(int irq
, void *data
)
4790 struct igb_q_vector
*q_vector
= data
;
4792 /* Write the ITR value calculated from the previous interrupt. */
4793 igb_write_itr(q_vector
);
4795 napi_schedule(&q_vector
->napi
);
4800 #ifdef CONFIG_IGB_DCA
4801 static void igb_update_dca(struct igb_q_vector
*q_vector
)
4803 struct igb_adapter
*adapter
= q_vector
->adapter
;
4804 struct e1000_hw
*hw
= &adapter
->hw
;
4805 int cpu
= get_cpu();
4807 if (q_vector
->cpu
== cpu
)
4810 if (q_vector
->tx_ring
) {
4811 int q
= q_vector
->tx_ring
->reg_idx
;
4812 u32 dca_txctrl
= rd32(E1000_DCA_TXCTRL(q
));
4813 if (hw
->mac
.type
== e1000_82575
) {
4814 dca_txctrl
&= ~E1000_DCA_TXCTRL_CPUID_MASK
;
4815 dca_txctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
4817 dca_txctrl
&= ~E1000_DCA_TXCTRL_CPUID_MASK_82576
;
4818 dca_txctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
) <<
4819 E1000_DCA_TXCTRL_CPUID_SHIFT
;
4821 dca_txctrl
|= E1000_DCA_TXCTRL_DESC_DCA_EN
;
4822 wr32(E1000_DCA_TXCTRL(q
), dca_txctrl
);
4824 if (q_vector
->rx_ring
) {
4825 int q
= q_vector
->rx_ring
->reg_idx
;
4826 u32 dca_rxctrl
= rd32(E1000_DCA_RXCTRL(q
));
4827 if (hw
->mac
.type
== e1000_82575
) {
4828 dca_rxctrl
&= ~E1000_DCA_RXCTRL_CPUID_MASK
;
4829 dca_rxctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
4831 dca_rxctrl
&= ~E1000_DCA_RXCTRL_CPUID_MASK_82576
;
4832 dca_rxctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
) <<
4833 E1000_DCA_RXCTRL_CPUID_SHIFT
;
4835 dca_rxctrl
|= E1000_DCA_RXCTRL_DESC_DCA_EN
;
4836 dca_rxctrl
|= E1000_DCA_RXCTRL_HEAD_DCA_EN
;
4837 dca_rxctrl
|= E1000_DCA_RXCTRL_DATA_DCA_EN
;
4838 wr32(E1000_DCA_RXCTRL(q
), dca_rxctrl
);
4840 q_vector
->cpu
= cpu
;
4845 static void igb_setup_dca(struct igb_adapter
*adapter
)
4847 struct e1000_hw
*hw
= &adapter
->hw
;
4850 if (!(adapter
->flags
& IGB_FLAG_DCA_ENABLED
))
4853 /* Always use CB2 mode, difference is masked in the CB driver. */
4854 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_CB2
);
4856 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
4857 adapter
->q_vector
[i
]->cpu
= -1;
4858 igb_update_dca(adapter
->q_vector
[i
]);
4862 static int __igb_notify_dca(struct device
*dev
, void *data
)
4864 struct net_device
*netdev
= dev_get_drvdata(dev
);
4865 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4866 struct pci_dev
*pdev
= adapter
->pdev
;
4867 struct e1000_hw
*hw
= &adapter
->hw
;
4868 unsigned long event
= *(unsigned long *)data
;
4871 case DCA_PROVIDER_ADD
:
4872 /* if already enabled, don't do it again */
4873 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
)
4875 if (dca_add_requester(dev
) == 0) {
4876 adapter
->flags
|= IGB_FLAG_DCA_ENABLED
;
4877 dev_info(&pdev
->dev
, "DCA enabled\n");
4878 igb_setup_dca(adapter
);
4881 /* Fall Through since DCA is disabled. */
4882 case DCA_PROVIDER_REMOVE
:
4883 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
) {
4884 /* without this a class_device is left
4885 * hanging around in the sysfs model */
4886 dca_remove_requester(dev
);
4887 dev_info(&pdev
->dev
, "DCA disabled\n");
4888 adapter
->flags
&= ~IGB_FLAG_DCA_ENABLED
;
4889 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_DISABLE
);
4897 static int igb_notify_dca(struct notifier_block
*nb
, unsigned long event
,
4902 ret_val
= driver_for_each_device(&igb_driver
.driver
, NULL
, &event
,
4905 return ret_val
? NOTIFY_BAD
: NOTIFY_DONE
;
4907 #endif /* CONFIG_IGB_DCA */
4909 static void igb_ping_all_vfs(struct igb_adapter
*adapter
)
4911 struct e1000_hw
*hw
= &adapter
->hw
;
4915 for (i
= 0 ; i
< adapter
->vfs_allocated_count
; i
++) {
4916 ping
= E1000_PF_CONTROL_MSG
;
4917 if (adapter
->vf_data
[i
].flags
& IGB_VF_FLAG_CTS
)
4918 ping
|= E1000_VT_MSGTYPE_CTS
;
4919 igb_write_mbx(hw
, &ping
, 1, i
);
4923 static int igb_set_vf_promisc(struct igb_adapter
*adapter
, u32
*msgbuf
, u32 vf
)
4925 struct e1000_hw
*hw
= &adapter
->hw
;
4926 u32 vmolr
= rd32(E1000_VMOLR(vf
));
4927 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
4929 vf_data
->flags
&= ~(IGB_VF_FLAG_UNI_PROMISC
|
4930 IGB_VF_FLAG_MULTI_PROMISC
);
4931 vmolr
&= ~(E1000_VMOLR_ROPE
| E1000_VMOLR_ROMPE
| E1000_VMOLR_MPME
);
4933 if (*msgbuf
& E1000_VF_SET_PROMISC_MULTICAST
) {
4934 vmolr
|= E1000_VMOLR_MPME
;
4935 vf_data
->flags
|= IGB_VF_FLAG_MULTI_PROMISC
;
4936 *msgbuf
&= ~E1000_VF_SET_PROMISC_MULTICAST
;
4939 * if we have hashes and we are clearing a multicast promisc
4940 * flag we need to write the hashes to the MTA as this step
4941 * was previously skipped
4943 if (vf_data
->num_vf_mc_hashes
> 30) {
4944 vmolr
|= E1000_VMOLR_MPME
;
4945 } else if (vf_data
->num_vf_mc_hashes
) {
4947 vmolr
|= E1000_VMOLR_ROMPE
;
4948 for (j
= 0; j
< vf_data
->num_vf_mc_hashes
; j
++)
4949 igb_mta_set(hw
, vf_data
->vf_mc_hashes
[j
]);
4953 wr32(E1000_VMOLR(vf
), vmolr
);
4955 /* there are flags left unprocessed, likely not supported */
4956 if (*msgbuf
& E1000_VT_MSGINFO_MASK
)
4963 static int igb_set_vf_multicasts(struct igb_adapter
*adapter
,
4964 u32
*msgbuf
, u32 vf
)
4966 int n
= (msgbuf
[0] & E1000_VT_MSGINFO_MASK
) >> E1000_VT_MSGINFO_SHIFT
;
4967 u16
*hash_list
= (u16
*)&msgbuf
[1];
4968 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
4971 /* salt away the number of multicast addresses assigned
4972 * to this VF for later use to restore when the PF multi cast
4975 vf_data
->num_vf_mc_hashes
= n
;
4977 /* only up to 30 hash values supported */
4981 /* store the hashes for later use */
4982 for (i
= 0; i
< n
; i
++)
4983 vf_data
->vf_mc_hashes
[i
] = hash_list
[i
];
4985 /* Flush and reset the mta with the new values */
4986 igb_set_rx_mode(adapter
->netdev
);
4991 static void igb_restore_vf_multicasts(struct igb_adapter
*adapter
)
4993 struct e1000_hw
*hw
= &adapter
->hw
;
4994 struct vf_data_storage
*vf_data
;
4997 for (i
= 0; i
< adapter
->vfs_allocated_count
; i
++) {
4998 u32 vmolr
= rd32(E1000_VMOLR(i
));
4999 vmolr
&= ~(E1000_VMOLR_ROMPE
| E1000_VMOLR_MPME
);
5001 vf_data
= &adapter
->vf_data
[i
];
5003 if ((vf_data
->num_vf_mc_hashes
> 30) ||
5004 (vf_data
->flags
& IGB_VF_FLAG_MULTI_PROMISC
)) {
5005 vmolr
|= E1000_VMOLR_MPME
;
5006 } else if (vf_data
->num_vf_mc_hashes
) {
5007 vmolr
|= E1000_VMOLR_ROMPE
;
5008 for (j
= 0; j
< vf_data
->num_vf_mc_hashes
; j
++)
5009 igb_mta_set(hw
, vf_data
->vf_mc_hashes
[j
]);
5011 wr32(E1000_VMOLR(i
), vmolr
);
5015 static void igb_clear_vf_vfta(struct igb_adapter
*adapter
, u32 vf
)
5017 struct e1000_hw
*hw
= &adapter
->hw
;
5018 u32 pool_mask
, reg
, vid
;
5021 pool_mask
= 1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
);
5023 /* Find the vlan filter for this id */
5024 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
5025 reg
= rd32(E1000_VLVF(i
));
5027 /* remove the vf from the pool */
5030 /* if pool is empty then remove entry from vfta */
5031 if (!(reg
& E1000_VLVF_POOLSEL_MASK
) &&
5032 (reg
& E1000_VLVF_VLANID_ENABLE
)) {
5034 vid
= reg
& E1000_VLVF_VLANID_MASK
;
5035 igb_vfta_set(hw
, vid
, false);
5038 wr32(E1000_VLVF(i
), reg
);
5041 adapter
->vf_data
[vf
].vlans_enabled
= 0;
5044 static s32
igb_vlvf_set(struct igb_adapter
*adapter
, u32 vid
, bool add
, u32 vf
)
5046 struct e1000_hw
*hw
= &adapter
->hw
;
5049 /* The vlvf table only exists on 82576 hardware and newer */
5050 if (hw
->mac
.type
< e1000_82576
)
5053 /* we only need to do this if VMDq is enabled */
5054 if (!adapter
->vfs_allocated_count
)
5057 /* Find the vlan filter for this id */
5058 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
5059 reg
= rd32(E1000_VLVF(i
));
5060 if ((reg
& E1000_VLVF_VLANID_ENABLE
) &&
5061 vid
== (reg
& E1000_VLVF_VLANID_MASK
))
5066 if (i
== E1000_VLVF_ARRAY_SIZE
) {
5067 /* Did not find a matching VLAN ID entry that was
5068 * enabled. Search for a free filter entry, i.e.
5069 * one without the enable bit set
5071 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
5072 reg
= rd32(E1000_VLVF(i
));
5073 if (!(reg
& E1000_VLVF_VLANID_ENABLE
))
5077 if (i
< E1000_VLVF_ARRAY_SIZE
) {
5078 /* Found an enabled/available entry */
5079 reg
|= 1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
);
5081 /* if !enabled we need to set this up in vfta */
5082 if (!(reg
& E1000_VLVF_VLANID_ENABLE
)) {
5083 /* add VID to filter table */
5084 igb_vfta_set(hw
, vid
, true);
5085 reg
|= E1000_VLVF_VLANID_ENABLE
;
5087 reg
&= ~E1000_VLVF_VLANID_MASK
;
5089 wr32(E1000_VLVF(i
), reg
);
5091 /* do not modify RLPML for PF devices */
5092 if (vf
>= adapter
->vfs_allocated_count
)
5095 if (!adapter
->vf_data
[vf
].vlans_enabled
) {
5097 reg
= rd32(E1000_VMOLR(vf
));
5098 size
= reg
& E1000_VMOLR_RLPML_MASK
;
5100 reg
&= ~E1000_VMOLR_RLPML_MASK
;
5102 wr32(E1000_VMOLR(vf
), reg
);
5105 adapter
->vf_data
[vf
].vlans_enabled
++;
5109 if (i
< E1000_VLVF_ARRAY_SIZE
) {
5110 /* remove vf from the pool */
5111 reg
&= ~(1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
));
5112 /* if pool is empty then remove entry from vfta */
5113 if (!(reg
& E1000_VLVF_POOLSEL_MASK
)) {
5115 igb_vfta_set(hw
, vid
, false);
5117 wr32(E1000_VLVF(i
), reg
);
5119 /* do not modify RLPML for PF devices */
5120 if (vf
>= adapter
->vfs_allocated_count
)
5123 adapter
->vf_data
[vf
].vlans_enabled
--;
5124 if (!adapter
->vf_data
[vf
].vlans_enabled
) {
5126 reg
= rd32(E1000_VMOLR(vf
));
5127 size
= reg
& E1000_VMOLR_RLPML_MASK
;
5129 reg
&= ~E1000_VMOLR_RLPML_MASK
;
5131 wr32(E1000_VMOLR(vf
), reg
);
5138 static void igb_set_vmvir(struct igb_adapter
*adapter
, u32 vid
, u32 vf
)
5140 struct e1000_hw
*hw
= &adapter
->hw
;
5143 wr32(E1000_VMVIR(vf
), (vid
| E1000_VMVIR_VLANA_DEFAULT
));
5145 wr32(E1000_VMVIR(vf
), 0);
5148 static int igb_ndo_set_vf_vlan(struct net_device
*netdev
,
5149 int vf
, u16 vlan
, u8 qos
)
5152 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5154 if ((vf
>= adapter
->vfs_allocated_count
) || (vlan
> 4095) || (qos
> 7))
5157 err
= igb_vlvf_set(adapter
, vlan
, !!vlan
, vf
);
5160 igb_set_vmvir(adapter
, vlan
| (qos
<< VLAN_PRIO_SHIFT
), vf
);
5161 igb_set_vmolr(adapter
, vf
, !vlan
);
5162 adapter
->vf_data
[vf
].pf_vlan
= vlan
;
5163 adapter
->vf_data
[vf
].pf_qos
= qos
;
5164 dev_info(&adapter
->pdev
->dev
,
5165 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan
, qos
, vf
);
5166 if (test_bit(__IGB_DOWN
, &adapter
->state
)) {
5167 dev_warn(&adapter
->pdev
->dev
,
5168 "The VF VLAN has been set,"
5169 " but the PF device is not up.\n");
5170 dev_warn(&adapter
->pdev
->dev
,
5171 "Bring the PF device up before"
5172 " attempting to use the VF device.\n");
5175 igb_vlvf_set(adapter
, adapter
->vf_data
[vf
].pf_vlan
,
5177 igb_set_vmvir(adapter
, vlan
, vf
);
5178 igb_set_vmolr(adapter
, vf
, true);
5179 adapter
->vf_data
[vf
].pf_vlan
= 0;
5180 adapter
->vf_data
[vf
].pf_qos
= 0;
5186 static int igb_set_vf_vlan(struct igb_adapter
*adapter
, u32
*msgbuf
, u32 vf
)
5188 int add
= (msgbuf
[0] & E1000_VT_MSGINFO_MASK
) >> E1000_VT_MSGINFO_SHIFT
;
5189 int vid
= (msgbuf
[1] & E1000_VLVF_VLANID_MASK
);
5191 return igb_vlvf_set(adapter
, vid
, add
, vf
);
5194 static inline void igb_vf_reset(struct igb_adapter
*adapter
, u32 vf
)
5196 /* clear flags - except flag that indicates PF has set the MAC */
5197 adapter
->vf_data
[vf
].flags
&= IGB_VF_FLAG_PF_SET_MAC
;
5198 adapter
->vf_data
[vf
].last_nack
= jiffies
;
5200 /* reset offloads to defaults */
5201 igb_set_vmolr(adapter
, vf
, true);
5203 /* reset vlans for device */
5204 igb_clear_vf_vfta(adapter
, vf
);
5205 if (adapter
->vf_data
[vf
].pf_vlan
)
5206 igb_ndo_set_vf_vlan(adapter
->netdev
, vf
,
5207 adapter
->vf_data
[vf
].pf_vlan
,
5208 adapter
->vf_data
[vf
].pf_qos
);
5210 igb_clear_vf_vfta(adapter
, vf
);
5212 /* reset multicast table array for vf */
5213 adapter
->vf_data
[vf
].num_vf_mc_hashes
= 0;
5215 /* Flush and reset the mta with the new values */
5216 igb_set_rx_mode(adapter
->netdev
);
5219 static void igb_vf_reset_event(struct igb_adapter
*adapter
, u32 vf
)
5221 unsigned char *vf_mac
= adapter
->vf_data
[vf
].vf_mac_addresses
;
5223 /* generate a new mac address as we were hotplug removed/added */
5224 if (!(adapter
->vf_data
[vf
].flags
& IGB_VF_FLAG_PF_SET_MAC
))
5225 random_ether_addr(vf_mac
);
5227 /* process remaining reset events */
5228 igb_vf_reset(adapter
, vf
);
5231 static void igb_vf_reset_msg(struct igb_adapter
*adapter
, u32 vf
)
5233 struct e1000_hw
*hw
= &adapter
->hw
;
5234 unsigned char *vf_mac
= adapter
->vf_data
[vf
].vf_mac_addresses
;
5235 int rar_entry
= hw
->mac
.rar_entry_count
- (vf
+ 1);
5237 u8
*addr
= (u8
*)(&msgbuf
[1]);
5239 /* process all the same items cleared in a function level reset */
5240 igb_vf_reset(adapter
, vf
);
5242 /* set vf mac address */
5243 igb_rar_set_qsel(adapter
, vf_mac
, rar_entry
, vf
);
5245 /* enable transmit and receive for vf */
5246 reg
= rd32(E1000_VFTE
);
5247 wr32(E1000_VFTE
, reg
| (1 << vf
));
5248 reg
= rd32(E1000_VFRE
);
5249 wr32(E1000_VFRE
, reg
| (1 << vf
));
5251 adapter
->vf_data
[vf
].flags
|= IGB_VF_FLAG_CTS
;
5253 /* reply to reset with ack and vf mac address */
5254 msgbuf
[0] = E1000_VF_RESET
| E1000_VT_MSGTYPE_ACK
;
5255 memcpy(addr
, vf_mac
, 6);
5256 igb_write_mbx(hw
, msgbuf
, 3, vf
);
5259 static int igb_set_vf_mac_addr(struct igb_adapter
*adapter
, u32
*msg
, int vf
)
5262 * The VF MAC Address is stored in a packed array of bytes
5263 * starting at the second 32 bit word of the msg array
5265 unsigned char *addr
= (char *)&msg
[1];
5268 if (is_valid_ether_addr(addr
))
5269 err
= igb_set_vf_mac(adapter
, vf
, addr
);
5274 static void igb_rcv_ack_from_vf(struct igb_adapter
*adapter
, u32 vf
)
5276 struct e1000_hw
*hw
= &adapter
->hw
;
5277 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
5278 u32 msg
= E1000_VT_MSGTYPE_NACK
;
5280 /* if device isn't clear to send it shouldn't be reading either */
5281 if (!(vf_data
->flags
& IGB_VF_FLAG_CTS
) &&
5282 time_after(jiffies
, vf_data
->last_nack
+ (2 * HZ
))) {
5283 igb_write_mbx(hw
, &msg
, 1, vf
);
5284 vf_data
->last_nack
= jiffies
;
5288 static void igb_rcv_msg_from_vf(struct igb_adapter
*adapter
, u32 vf
)
5290 struct pci_dev
*pdev
= adapter
->pdev
;
5291 u32 msgbuf
[E1000_VFMAILBOX_SIZE
];
5292 struct e1000_hw
*hw
= &adapter
->hw
;
5293 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
5296 retval
= igb_read_mbx(hw
, msgbuf
, E1000_VFMAILBOX_SIZE
, vf
);
5299 /* if receive failed revoke VF CTS stats and restart init */
5300 dev_err(&pdev
->dev
, "Error receiving message from VF\n");
5301 vf_data
->flags
&= ~IGB_VF_FLAG_CTS
;
5302 if (!time_after(jiffies
, vf_data
->last_nack
+ (2 * HZ
)))
5307 /* this is a message we already processed, do nothing */
5308 if (msgbuf
[0] & (E1000_VT_MSGTYPE_ACK
| E1000_VT_MSGTYPE_NACK
))
5312 * until the vf completes a reset it should not be
5313 * allowed to start any configuration.
5316 if (msgbuf
[0] == E1000_VF_RESET
) {
5317 igb_vf_reset_msg(adapter
, vf
);
5321 if (!(vf_data
->flags
& IGB_VF_FLAG_CTS
)) {
5322 if (!time_after(jiffies
, vf_data
->last_nack
+ (2 * HZ
)))
5328 switch ((msgbuf
[0] & 0xFFFF)) {
5329 case E1000_VF_SET_MAC_ADDR
:
5331 if (!(vf_data
->flags
& IGB_VF_FLAG_PF_SET_MAC
))
5332 retval
= igb_set_vf_mac_addr(adapter
, msgbuf
, vf
);
5334 dev_warn(&pdev
->dev
,
5335 "VF %d attempted to override administratively "
5336 "set MAC address\nReload the VF driver to "
5337 "resume operations\n", vf
);
5339 case E1000_VF_SET_PROMISC
:
5340 retval
= igb_set_vf_promisc(adapter
, msgbuf
, vf
);
5342 case E1000_VF_SET_MULTICAST
:
5343 retval
= igb_set_vf_multicasts(adapter
, msgbuf
, vf
);
5345 case E1000_VF_SET_LPE
:
5346 retval
= igb_set_vf_rlpml(adapter
, msgbuf
[1], vf
);
5348 case E1000_VF_SET_VLAN
:
5350 if (vf_data
->pf_vlan
)
5351 dev_warn(&pdev
->dev
,
5352 "VF %d attempted to override administratively "
5353 "set VLAN tag\nReload the VF driver to "
5354 "resume operations\n", vf
);
5356 retval
= igb_set_vf_vlan(adapter
, msgbuf
, vf
);
5359 dev_err(&pdev
->dev
, "Unhandled Msg %08x\n", msgbuf
[0]);
5364 msgbuf
[0] |= E1000_VT_MSGTYPE_CTS
;
5366 /* notify the VF of the results of what it sent us */
5368 msgbuf
[0] |= E1000_VT_MSGTYPE_NACK
;
5370 msgbuf
[0] |= E1000_VT_MSGTYPE_ACK
;
5372 igb_write_mbx(hw
, msgbuf
, 1, vf
);
5375 static void igb_msg_task(struct igb_adapter
*adapter
)
5377 struct e1000_hw
*hw
= &adapter
->hw
;
5380 for (vf
= 0; vf
< adapter
->vfs_allocated_count
; vf
++) {
5381 /* process any reset requests */
5382 if (!igb_check_for_rst(hw
, vf
))
5383 igb_vf_reset_event(adapter
, vf
);
5385 /* process any messages pending */
5386 if (!igb_check_for_msg(hw
, vf
))
5387 igb_rcv_msg_from_vf(adapter
, vf
);
5389 /* process any acks */
5390 if (!igb_check_for_ack(hw
, vf
))
5391 igb_rcv_ack_from_vf(adapter
, vf
);
5396 * igb_set_uta - Set unicast filter table address
5397 * @adapter: board private structure
5399 * The unicast table address is a register array of 32-bit registers.
5400 * The table is meant to be used in a way similar to how the MTA is used
5401 * however due to certain limitations in the hardware it is necessary to
5402 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5403 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
5405 static void igb_set_uta(struct igb_adapter
*adapter
)
5407 struct e1000_hw
*hw
= &adapter
->hw
;
5410 /* The UTA table only exists on 82576 hardware and newer */
5411 if (hw
->mac
.type
< e1000_82576
)
5414 /* we only need to do this if VMDq is enabled */
5415 if (!adapter
->vfs_allocated_count
)
5418 for (i
= 0; i
< hw
->mac
.uta_reg_count
; i
++)
5419 array_wr32(E1000_UTA
, i
, ~0);
5423 * igb_intr_msi - Interrupt Handler
5424 * @irq: interrupt number
5425 * @data: pointer to a network interface device structure
5427 static irqreturn_t
igb_intr_msi(int irq
, void *data
)
5429 struct igb_adapter
*adapter
= data
;
5430 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
5431 struct e1000_hw
*hw
= &adapter
->hw
;
5432 /* read ICR disables interrupts using IAM */
5433 u32 icr
= rd32(E1000_ICR
);
5435 igb_write_itr(q_vector
);
5437 if (icr
& E1000_ICR_DRSTA
)
5438 schedule_work(&adapter
->reset_task
);
5440 if (icr
& E1000_ICR_DOUTSYNC
) {
5441 /* HW is reporting DMA is out of sync */
5442 adapter
->stats
.doosync
++;
5445 if (icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
5446 hw
->mac
.get_link_status
= 1;
5447 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
5448 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
5451 napi_schedule(&q_vector
->napi
);
5457 * igb_intr - Legacy Interrupt Handler
5458 * @irq: interrupt number
5459 * @data: pointer to a network interface device structure
5461 static irqreturn_t
igb_intr(int irq
, void *data
)
5463 struct igb_adapter
*adapter
= data
;
5464 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
5465 struct e1000_hw
*hw
= &adapter
->hw
;
5466 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5467 * need for the IMC write */
5468 u32 icr
= rd32(E1000_ICR
);
5470 return IRQ_NONE
; /* Not our interrupt */
5472 igb_write_itr(q_vector
);
5474 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5475 * not set, then the adapter didn't send an interrupt */
5476 if (!(icr
& E1000_ICR_INT_ASSERTED
))
5479 if (icr
& E1000_ICR_DRSTA
)
5480 schedule_work(&adapter
->reset_task
);
5482 if (icr
& E1000_ICR_DOUTSYNC
) {
5483 /* HW is reporting DMA is out of sync */
5484 adapter
->stats
.doosync
++;
5487 if (icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
5488 hw
->mac
.get_link_status
= 1;
5489 /* guard against interrupt when we're going down */
5490 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
5491 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
5494 napi_schedule(&q_vector
->napi
);
5499 static inline void igb_ring_irq_enable(struct igb_q_vector
*q_vector
)
5501 struct igb_adapter
*adapter
= q_vector
->adapter
;
5502 struct e1000_hw
*hw
= &adapter
->hw
;
5504 if ((q_vector
->rx_ring
&& (adapter
->rx_itr_setting
& 3)) ||
5505 (!q_vector
->rx_ring
&& (adapter
->tx_itr_setting
& 3))) {
5506 if (!adapter
->msix_entries
)
5507 igb_set_itr(adapter
);
5509 igb_update_ring_itr(q_vector
);
5512 if (!test_bit(__IGB_DOWN
, &adapter
->state
)) {
5513 if (adapter
->msix_entries
)
5514 wr32(E1000_EIMS
, q_vector
->eims_value
);
5516 igb_irq_enable(adapter
);
5521 * igb_poll - NAPI Rx polling callback
5522 * @napi: napi polling structure
5523 * @budget: count of how many packets we should handle
5525 static int igb_poll(struct napi_struct
*napi
, int budget
)
5527 struct igb_q_vector
*q_vector
= container_of(napi
,
5528 struct igb_q_vector
,
5530 int tx_clean_complete
= 1, work_done
= 0;
5532 #ifdef CONFIG_IGB_DCA
5533 if (q_vector
->adapter
->flags
& IGB_FLAG_DCA_ENABLED
)
5534 igb_update_dca(q_vector
);
5536 if (q_vector
->tx_ring
)
5537 tx_clean_complete
= igb_clean_tx_irq(q_vector
);
5539 if (q_vector
->rx_ring
)
5540 igb_clean_rx_irq_adv(q_vector
, &work_done
, budget
);
5542 if (!tx_clean_complete
)
5545 /* If not enough Rx work done, exit the polling mode */
5546 if (work_done
< budget
) {
5547 napi_complete(napi
);
5548 igb_ring_irq_enable(q_vector
);
5555 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
5556 * @adapter: board private structure
5557 * @shhwtstamps: timestamp structure to update
5558 * @regval: unsigned 64bit system time value.
5560 * We need to convert the system time value stored in the RX/TXSTMP registers
5561 * into a hwtstamp which can be used by the upper level timestamping functions
5563 static void igb_systim_to_hwtstamp(struct igb_adapter
*adapter
,
5564 struct skb_shared_hwtstamps
*shhwtstamps
,
5570 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5571 * 24 to match clock shift we setup earlier.
5573 if (adapter
->hw
.mac
.type
== e1000_82580
)
5574 regval
<<= IGB_82580_TSYNC_SHIFT
;
5576 ns
= timecounter_cyc2time(&adapter
->clock
, regval
);
5577 timecompare_update(&adapter
->compare
, ns
);
5578 memset(shhwtstamps
, 0, sizeof(struct skb_shared_hwtstamps
));
5579 shhwtstamps
->hwtstamp
= ns_to_ktime(ns
);
5580 shhwtstamps
->syststamp
= timecompare_transform(&adapter
->compare
, ns
);
5584 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5585 * @q_vector: pointer to q_vector containing needed info
5586 * @buffer: pointer to igb_buffer structure
5588 * If we were asked to do hardware stamping and such a time stamp is
5589 * available, then it must have been for this skb here because we only
5590 * allow only one such packet into the queue.
5592 static void igb_tx_hwtstamp(struct igb_q_vector
*q_vector
, struct igb_buffer
*buffer_info
)
5594 struct igb_adapter
*adapter
= q_vector
->adapter
;
5595 struct e1000_hw
*hw
= &adapter
->hw
;
5596 struct skb_shared_hwtstamps shhwtstamps
;
5599 /* if skb does not support hw timestamp or TX stamp not valid exit */
5600 if (likely(!(buffer_info
->tx_flags
& SKBTX_HW_TSTAMP
)) ||
5601 !(rd32(E1000_TSYNCTXCTL
) & E1000_TSYNCTXCTL_VALID
))
5604 regval
= rd32(E1000_TXSTMPL
);
5605 regval
|= (u64
)rd32(E1000_TXSTMPH
) << 32;
5607 igb_systim_to_hwtstamp(adapter
, &shhwtstamps
, regval
);
5608 skb_tstamp_tx(buffer_info
->skb
, &shhwtstamps
);
5612 * igb_clean_tx_irq - Reclaim resources after transmit completes
5613 * @q_vector: pointer to q_vector containing needed info
5614 * returns true if ring is completely cleaned
5616 static bool igb_clean_tx_irq(struct igb_q_vector
*q_vector
)
5618 struct igb_adapter
*adapter
= q_vector
->adapter
;
5619 struct igb_ring
*tx_ring
= q_vector
->tx_ring
;
5620 struct net_device
*netdev
= tx_ring
->netdev
;
5621 struct e1000_hw
*hw
= &adapter
->hw
;
5622 struct igb_buffer
*buffer_info
;
5623 union e1000_adv_tx_desc
*tx_desc
, *eop_desc
;
5624 unsigned int total_bytes
= 0, total_packets
= 0;
5625 unsigned int i
, eop
, count
= 0;
5626 bool cleaned
= false;
5628 i
= tx_ring
->next_to_clean
;
5629 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
5630 eop_desc
= E1000_TX_DESC_ADV(*tx_ring
, eop
);
5632 while ((eop_desc
->wb
.status
& cpu_to_le32(E1000_TXD_STAT_DD
)) &&
5633 (count
< tx_ring
->count
)) {
5634 rmb(); /* read buffer_info after eop_desc status */
5635 for (cleaned
= false; !cleaned
; count
++) {
5636 tx_desc
= E1000_TX_DESC_ADV(*tx_ring
, i
);
5637 buffer_info
= &tx_ring
->buffer_info
[i
];
5638 cleaned
= (i
== eop
);
5640 if (buffer_info
->skb
) {
5641 total_bytes
+= buffer_info
->bytecount
;
5642 /* gso_segs is currently only valid for tcp */
5643 total_packets
+= buffer_info
->gso_segs
;
5644 igb_tx_hwtstamp(q_vector
, buffer_info
);
5647 igb_unmap_and_free_tx_resource(tx_ring
, buffer_info
);
5648 tx_desc
->wb
.status
= 0;
5651 if (i
== tx_ring
->count
)
5654 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
5655 eop_desc
= E1000_TX_DESC_ADV(*tx_ring
, eop
);
5658 tx_ring
->next_to_clean
= i
;
5660 if (unlikely(count
&&
5661 netif_carrier_ok(netdev
) &&
5662 igb_desc_unused(tx_ring
) >= IGB_TX_QUEUE_WAKE
)) {
5663 /* Make sure that anybody stopping the queue after this
5664 * sees the new next_to_clean.
5667 if (__netif_subqueue_stopped(netdev
, tx_ring
->queue_index
) &&
5668 !(test_bit(__IGB_DOWN
, &adapter
->state
))) {
5669 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
5671 u64_stats_update_begin(&tx_ring
->tx_syncp
);
5672 tx_ring
->tx_stats
.restart_queue
++;
5673 u64_stats_update_end(&tx_ring
->tx_syncp
);
5677 if (tx_ring
->detect_tx_hung
) {
5678 /* Detect a transmit hang in hardware, this serializes the
5679 * check with the clearing of time_stamp and movement of i */
5680 tx_ring
->detect_tx_hung
= false;
5681 if (tx_ring
->buffer_info
[i
].time_stamp
&&
5682 time_after(jiffies
, tx_ring
->buffer_info
[i
].time_stamp
+
5683 (adapter
->tx_timeout_factor
* HZ
)) &&
5684 !(rd32(E1000_STATUS
) & E1000_STATUS_TXOFF
)) {
5686 /* detected Tx unit hang */
5687 dev_err(tx_ring
->dev
,
5688 "Detected Tx Unit Hang\n"
5692 " next_to_use <%x>\n"
5693 " next_to_clean <%x>\n"
5694 "buffer_info[next_to_clean]\n"
5695 " time_stamp <%lx>\n"
5696 " next_to_watch <%x>\n"
5698 " desc.status <%x>\n",
5699 tx_ring
->queue_index
,
5700 readl(tx_ring
->head
),
5701 readl(tx_ring
->tail
),
5702 tx_ring
->next_to_use
,
5703 tx_ring
->next_to_clean
,
5704 tx_ring
->buffer_info
[eop
].time_stamp
,
5707 eop_desc
->wb
.status
);
5708 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
5711 tx_ring
->total_bytes
+= total_bytes
;
5712 tx_ring
->total_packets
+= total_packets
;
5713 u64_stats_update_begin(&tx_ring
->tx_syncp
);
5714 tx_ring
->tx_stats
.bytes
+= total_bytes
;
5715 tx_ring
->tx_stats
.packets
+= total_packets
;
5716 u64_stats_update_end(&tx_ring
->tx_syncp
);
5717 return count
< tx_ring
->count
;
5720 static inline void igb_rx_checksum_adv(struct igb_ring
*ring
,
5721 u32 status_err
, struct sk_buff
*skb
)
5723 skb_checksum_none_assert(skb
);
5725 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
5726 if (!(ring
->flags
& IGB_RING_FLAG_RX_CSUM
) ||
5727 (status_err
& E1000_RXD_STAT_IXSM
))
5730 /* TCP/UDP checksum error bit is set */
5732 (E1000_RXDEXT_STATERR_TCPE
| E1000_RXDEXT_STATERR_IPE
)) {
5734 * work around errata with sctp packets where the TCPE aka
5735 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5736 * packets, (aka let the stack check the crc32c)
5738 if ((skb
->len
== 60) &&
5739 (ring
->flags
& IGB_RING_FLAG_RX_SCTP_CSUM
)) {
5740 u64_stats_update_begin(&ring
->rx_syncp
);
5741 ring
->rx_stats
.csum_err
++;
5742 u64_stats_update_end(&ring
->rx_syncp
);
5744 /* let the stack verify checksum errors */
5747 /* It must be a TCP or UDP packet with a valid checksum */
5748 if (status_err
& (E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
))
5749 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5751 dev_dbg(ring
->dev
, "cksum success: bits %08X\n", status_err
);
5754 static void igb_rx_hwtstamp(struct igb_q_vector
*q_vector
, u32 staterr
,
5755 struct sk_buff
*skb
)
5757 struct igb_adapter
*adapter
= q_vector
->adapter
;
5758 struct e1000_hw
*hw
= &adapter
->hw
;
5762 * If this bit is set, then the RX registers contain the time stamp. No
5763 * other packet will be time stamped until we read these registers, so
5764 * read the registers to make them available again. Because only one
5765 * packet can be time stamped at a time, we know that the register
5766 * values must belong to this one here and therefore we don't need to
5767 * compare any of the additional attributes stored for it.
5769 * If nothing went wrong, then it should have a shared tx_flags that we
5770 * can turn into a skb_shared_hwtstamps.
5772 if (staterr
& E1000_RXDADV_STAT_TSIP
) {
5773 u32
*stamp
= (u32
*)skb
->data
;
5774 regval
= le32_to_cpu(*(stamp
+ 2));
5775 regval
|= (u64
)le32_to_cpu(*(stamp
+ 3)) << 32;
5776 skb_pull(skb
, IGB_TS_HDR_LEN
);
5778 if(!(rd32(E1000_TSYNCRXCTL
) & E1000_TSYNCRXCTL_VALID
))
5781 regval
= rd32(E1000_RXSTMPL
);
5782 regval
|= (u64
)rd32(E1000_RXSTMPH
) << 32;
5785 igb_systim_to_hwtstamp(adapter
, skb_hwtstamps(skb
), regval
);
5787 static inline u16
igb_get_hlen(struct igb_ring
*rx_ring
,
5788 union e1000_adv_rx_desc
*rx_desc
)
5790 /* HW will not DMA in data larger than the given buffer, even if it
5791 * parses the (NFS, of course) header to be larger. In that case, it
5792 * fills the header buffer and spills the rest into the page.
5794 u16 hlen
= (le16_to_cpu(rx_desc
->wb
.lower
.lo_dword
.hdr_info
) &
5795 E1000_RXDADV_HDRBUFLEN_MASK
) >> E1000_RXDADV_HDRBUFLEN_SHIFT
;
5796 if (hlen
> rx_ring
->rx_buffer_len
)
5797 hlen
= rx_ring
->rx_buffer_len
;
5801 static bool igb_clean_rx_irq_adv(struct igb_q_vector
*q_vector
,
5802 int *work_done
, int budget
)
5804 struct igb_ring
*rx_ring
= q_vector
->rx_ring
;
5805 struct net_device
*netdev
= rx_ring
->netdev
;
5806 struct device
*dev
= rx_ring
->dev
;
5807 union e1000_adv_rx_desc
*rx_desc
, *next_rxd
;
5808 struct igb_buffer
*buffer_info
, *next_buffer
;
5809 struct sk_buff
*skb
;
5810 bool cleaned
= false;
5811 int cleaned_count
= 0;
5812 int current_node
= numa_node_id();
5813 unsigned int total_bytes
= 0, total_packets
= 0;
5818 i
= rx_ring
->next_to_clean
;
5819 buffer_info
= &rx_ring
->buffer_info
[i
];
5820 rx_desc
= E1000_RX_DESC_ADV(*rx_ring
, i
);
5821 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
5823 while (staterr
& E1000_RXD_STAT_DD
) {
5824 if (*work_done
>= budget
)
5827 rmb(); /* read descriptor and rx_buffer_info after status DD */
5829 skb
= buffer_info
->skb
;
5830 prefetch(skb
->data
- NET_IP_ALIGN
);
5831 buffer_info
->skb
= NULL
;
5834 if (i
== rx_ring
->count
)
5837 next_rxd
= E1000_RX_DESC_ADV(*rx_ring
, i
);
5839 next_buffer
= &rx_ring
->buffer_info
[i
];
5841 length
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
5845 if (buffer_info
->dma
) {
5846 dma_unmap_single(dev
, buffer_info
->dma
,
5847 rx_ring
->rx_buffer_len
,
5849 buffer_info
->dma
= 0;
5850 if (rx_ring
->rx_buffer_len
>= IGB_RXBUFFER_1024
) {
5851 skb_put(skb
, length
);
5854 skb_put(skb
, igb_get_hlen(rx_ring
, rx_desc
));
5858 dma_unmap_page(dev
, buffer_info
->page_dma
,
5859 PAGE_SIZE
/ 2, DMA_FROM_DEVICE
);
5860 buffer_info
->page_dma
= 0;
5862 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
5864 buffer_info
->page_offset
,
5867 if ((page_count(buffer_info
->page
) != 1) ||
5868 (page_to_nid(buffer_info
->page
) != current_node
))
5869 buffer_info
->page
= NULL
;
5871 get_page(buffer_info
->page
);
5874 skb
->data_len
+= length
;
5875 skb
->truesize
+= length
;
5878 if (!(staterr
& E1000_RXD_STAT_EOP
)) {
5879 buffer_info
->skb
= next_buffer
->skb
;
5880 buffer_info
->dma
= next_buffer
->dma
;
5881 next_buffer
->skb
= skb
;
5882 next_buffer
->dma
= 0;
5886 if (staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
) {
5887 dev_kfree_skb_irq(skb
);
5891 if (staterr
& (E1000_RXDADV_STAT_TSIP
| E1000_RXDADV_STAT_TS
))
5892 igb_rx_hwtstamp(q_vector
, staterr
, skb
);
5893 total_bytes
+= skb
->len
;
5896 igb_rx_checksum_adv(rx_ring
, staterr
, skb
);
5898 skb
->protocol
= eth_type_trans(skb
, netdev
);
5899 skb_record_rx_queue(skb
, rx_ring
->queue_index
);
5901 if (staterr
& E1000_RXD_STAT_VP
) {
5902 u16 vid
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
5904 __vlan_hwaccel_put_tag(skb
, vid
);
5906 napi_gro_receive(&q_vector
->napi
, skb
);
5909 rx_desc
->wb
.upper
.status_error
= 0;
5911 /* return some buffers to hardware, one at a time is too slow */
5912 if (cleaned_count
>= IGB_RX_BUFFER_WRITE
) {
5913 igb_alloc_rx_buffers_adv(rx_ring
, cleaned_count
);
5917 /* use prefetched values */
5919 buffer_info
= next_buffer
;
5920 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
5923 rx_ring
->next_to_clean
= i
;
5924 cleaned_count
= igb_desc_unused(rx_ring
);
5927 igb_alloc_rx_buffers_adv(rx_ring
, cleaned_count
);
5929 rx_ring
->total_packets
+= total_packets
;
5930 rx_ring
->total_bytes
+= total_bytes
;
5931 u64_stats_update_begin(&rx_ring
->rx_syncp
);
5932 rx_ring
->rx_stats
.packets
+= total_packets
;
5933 rx_ring
->rx_stats
.bytes
+= total_bytes
;
5934 u64_stats_update_end(&rx_ring
->rx_syncp
);
5939 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5940 * @adapter: address of board private structure
5942 void igb_alloc_rx_buffers_adv(struct igb_ring
*rx_ring
, int cleaned_count
)
5944 struct net_device
*netdev
= rx_ring
->netdev
;
5945 union e1000_adv_rx_desc
*rx_desc
;
5946 struct igb_buffer
*buffer_info
;
5947 struct sk_buff
*skb
;
5951 i
= rx_ring
->next_to_use
;
5952 buffer_info
= &rx_ring
->buffer_info
[i
];
5954 bufsz
= rx_ring
->rx_buffer_len
;
5956 while (cleaned_count
--) {
5957 rx_desc
= E1000_RX_DESC_ADV(*rx_ring
, i
);
5959 if ((bufsz
< IGB_RXBUFFER_1024
) && !buffer_info
->page_dma
) {
5960 if (!buffer_info
->page
) {
5961 buffer_info
->page
= netdev_alloc_page(netdev
);
5962 if (unlikely(!buffer_info
->page
)) {
5963 u64_stats_update_begin(&rx_ring
->rx_syncp
);
5964 rx_ring
->rx_stats
.alloc_failed
++;
5965 u64_stats_update_end(&rx_ring
->rx_syncp
);
5968 buffer_info
->page_offset
= 0;
5970 buffer_info
->page_offset
^= PAGE_SIZE
/ 2;
5972 buffer_info
->page_dma
=
5973 dma_map_page(rx_ring
->dev
, buffer_info
->page
,
5974 buffer_info
->page_offset
,
5977 if (dma_mapping_error(rx_ring
->dev
,
5978 buffer_info
->page_dma
)) {
5979 buffer_info
->page_dma
= 0;
5980 u64_stats_update_begin(&rx_ring
->rx_syncp
);
5981 rx_ring
->rx_stats
.alloc_failed
++;
5982 u64_stats_update_end(&rx_ring
->rx_syncp
);
5987 skb
= buffer_info
->skb
;
5989 skb
= netdev_alloc_skb_ip_align(netdev
, bufsz
);
5990 if (unlikely(!skb
)) {
5991 u64_stats_update_begin(&rx_ring
->rx_syncp
);
5992 rx_ring
->rx_stats
.alloc_failed
++;
5993 u64_stats_update_end(&rx_ring
->rx_syncp
);
5997 buffer_info
->skb
= skb
;
5999 if (!buffer_info
->dma
) {
6000 buffer_info
->dma
= dma_map_single(rx_ring
->dev
,
6004 if (dma_mapping_error(rx_ring
->dev
,
6005 buffer_info
->dma
)) {
6006 buffer_info
->dma
= 0;
6007 u64_stats_update_begin(&rx_ring
->rx_syncp
);
6008 rx_ring
->rx_stats
.alloc_failed
++;
6009 u64_stats_update_end(&rx_ring
->rx_syncp
);
6013 /* Refresh the desc even if buffer_addrs didn't change because
6014 * each write-back erases this info. */
6015 if (bufsz
< IGB_RXBUFFER_1024
) {
6016 rx_desc
->read
.pkt_addr
=
6017 cpu_to_le64(buffer_info
->page_dma
);
6018 rx_desc
->read
.hdr_addr
= cpu_to_le64(buffer_info
->dma
);
6020 rx_desc
->read
.pkt_addr
= cpu_to_le64(buffer_info
->dma
);
6021 rx_desc
->read
.hdr_addr
= 0;
6025 if (i
== rx_ring
->count
)
6027 buffer_info
= &rx_ring
->buffer_info
[i
];
6031 if (rx_ring
->next_to_use
!= i
) {
6032 rx_ring
->next_to_use
= i
;
6034 i
= (rx_ring
->count
- 1);
6038 /* Force memory writes to complete before letting h/w
6039 * know there are new descriptors to fetch. (Only
6040 * applicable for weak-ordered memory model archs,
6041 * such as IA-64). */
6043 writel(i
, rx_ring
->tail
);
6053 static int igb_mii_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
6055 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6056 struct mii_ioctl_data
*data
= if_mii(ifr
);
6058 if (adapter
->hw
.phy
.media_type
!= e1000_media_type_copper
)
6063 data
->phy_id
= adapter
->hw
.phy
.addr
;
6066 if (igb_read_phy_reg(&adapter
->hw
, data
->reg_num
& 0x1F,
6078 * igb_hwtstamp_ioctl - control hardware time stamping
6083 * Outgoing time stamping can be enabled and disabled. Play nice and
6084 * disable it when requested, although it shouldn't case any overhead
6085 * when no packet needs it. At most one packet in the queue may be
6086 * marked for time stamping, otherwise it would be impossible to tell
6087 * for sure to which packet the hardware time stamp belongs.
6089 * Incoming time stamping has to be configured via the hardware
6090 * filters. Not all combinations are supported, in particular event
6091 * type has to be specified. Matching the kind of event packet is
6092 * not supported, with the exception of "all V2 events regardless of
6096 static int igb_hwtstamp_ioctl(struct net_device
*netdev
,
6097 struct ifreq
*ifr
, int cmd
)
6099 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6100 struct e1000_hw
*hw
= &adapter
->hw
;
6101 struct hwtstamp_config config
;
6102 u32 tsync_tx_ctl
= E1000_TSYNCTXCTL_ENABLED
;
6103 u32 tsync_rx_ctl
= E1000_TSYNCRXCTL_ENABLED
;
6104 u32 tsync_rx_cfg
= 0;
6109 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
6112 /* reserved for future extensions */
6116 switch (config
.tx_type
) {
6117 case HWTSTAMP_TX_OFF
:
6119 case HWTSTAMP_TX_ON
:
6125 switch (config
.rx_filter
) {
6126 case HWTSTAMP_FILTER_NONE
:
6129 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
6130 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
6131 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
6132 case HWTSTAMP_FILTER_ALL
:
6134 * register TSYNCRXCFG must be set, therefore it is not
6135 * possible to time stamp both Sync and Delay_Req messages
6136 * => fall back to time stamping all packets
6138 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_ALL
;
6139 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
6141 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
6142 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L4_V1
;
6143 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE
;
6146 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
6147 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L4_V1
;
6148 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE
;
6151 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
6152 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
6153 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L2_L4_V2
;
6154 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE
;
6157 config
.rx_filter
= HWTSTAMP_FILTER_SOME
;
6159 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
6160 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
6161 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L2_L4_V2
;
6162 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE
;
6165 config
.rx_filter
= HWTSTAMP_FILTER_SOME
;
6167 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
6168 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
6169 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
6170 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_EVENT_V2
;
6171 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
6178 if (hw
->mac
.type
== e1000_82575
) {
6179 if (tsync_rx_ctl
| tsync_tx_ctl
)
6185 * Per-packet timestamping only works if all packets are
6186 * timestamped, so enable timestamping in all packets as
6187 * long as one rx filter was configured.
6189 if ((hw
->mac
.type
== e1000_82580
) && tsync_rx_ctl
) {
6190 tsync_rx_ctl
= E1000_TSYNCRXCTL_ENABLED
;
6191 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_ALL
;
6194 /* enable/disable TX */
6195 regval
= rd32(E1000_TSYNCTXCTL
);
6196 regval
&= ~E1000_TSYNCTXCTL_ENABLED
;
6197 regval
|= tsync_tx_ctl
;
6198 wr32(E1000_TSYNCTXCTL
, regval
);
6200 /* enable/disable RX */
6201 regval
= rd32(E1000_TSYNCRXCTL
);
6202 regval
&= ~(E1000_TSYNCRXCTL_ENABLED
| E1000_TSYNCRXCTL_TYPE_MASK
);
6203 regval
|= tsync_rx_ctl
;
6204 wr32(E1000_TSYNCRXCTL
, regval
);
6206 /* define which PTP packets are time stamped */
6207 wr32(E1000_TSYNCRXCFG
, tsync_rx_cfg
);
6209 /* define ethertype filter for timestamped packets */
6212 (E1000_ETQF_FILTER_ENABLE
| /* enable filter */
6213 E1000_ETQF_1588
| /* enable timestamping */
6214 ETH_P_1588
)); /* 1588 eth protocol type */
6216 wr32(E1000_ETQF(3), 0);
6218 #define PTP_PORT 319
6219 /* L4 Queue Filter[3]: filter by destination port and protocol */
6221 u32 ftqf
= (IPPROTO_UDP
/* UDP */
6222 | E1000_FTQF_VF_BP
/* VF not compared */
6223 | E1000_FTQF_1588_TIME_STAMP
/* Enable Timestamping */
6224 | E1000_FTQF_MASK
); /* mask all inputs */
6225 ftqf
&= ~E1000_FTQF_MASK_PROTO_BP
; /* enable protocol check */
6227 wr32(E1000_IMIR(3), htons(PTP_PORT
));
6228 wr32(E1000_IMIREXT(3),
6229 (E1000_IMIREXT_SIZE_BP
| E1000_IMIREXT_CTRL_BP
));
6230 if (hw
->mac
.type
== e1000_82576
) {
6231 /* enable source port check */
6232 wr32(E1000_SPQF(3), htons(PTP_PORT
));
6233 ftqf
&= ~E1000_FTQF_MASK_SOURCE_PORT_BP
;
6235 wr32(E1000_FTQF(3), ftqf
);
6237 wr32(E1000_FTQF(3), E1000_FTQF_MASK
);
6241 adapter
->hwtstamp_config
= config
;
6243 /* clear TX/RX time stamp registers, just to be sure */
6244 regval
= rd32(E1000_TXSTMPH
);
6245 regval
= rd32(E1000_RXSTMPH
);
6247 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
6257 static int igb_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
6263 return igb_mii_ioctl(netdev
, ifr
, cmd
);
6265 return igb_hwtstamp_ioctl(netdev
, ifr
, cmd
);
6271 s32
igb_read_pcie_cap_reg(struct e1000_hw
*hw
, u32 reg
, u16
*value
)
6273 struct igb_adapter
*adapter
= hw
->back
;
6276 cap_offset
= adapter
->pdev
->pcie_cap
;
6278 return -E1000_ERR_CONFIG
;
6280 pci_read_config_word(adapter
->pdev
, cap_offset
+ reg
, value
);
6285 s32
igb_write_pcie_cap_reg(struct e1000_hw
*hw
, u32 reg
, u16
*value
)
6287 struct igb_adapter
*adapter
= hw
->back
;
6290 cap_offset
= adapter
->pdev
->pcie_cap
;
6292 return -E1000_ERR_CONFIG
;
6294 pci_write_config_word(adapter
->pdev
, cap_offset
+ reg
, *value
);
6299 static void igb_vlan_mode(struct net_device
*netdev
, u32 features
)
6301 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6302 struct e1000_hw
*hw
= &adapter
->hw
;
6305 igb_irq_disable(adapter
);
6307 if (features
& NETIF_F_HW_VLAN_RX
) {
6308 /* enable VLAN tag insert/strip */
6309 ctrl
= rd32(E1000_CTRL
);
6310 ctrl
|= E1000_CTRL_VME
;
6311 wr32(E1000_CTRL
, ctrl
);
6313 /* Disable CFI check */
6314 rctl
= rd32(E1000_RCTL
);
6315 rctl
&= ~E1000_RCTL_CFIEN
;
6316 wr32(E1000_RCTL
, rctl
);
6318 /* disable VLAN tag insert/strip */
6319 ctrl
= rd32(E1000_CTRL
);
6320 ctrl
&= ~E1000_CTRL_VME
;
6321 wr32(E1000_CTRL
, ctrl
);
6324 igb_rlpml_set(adapter
);
6326 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
6327 igb_irq_enable(adapter
);
6330 static void igb_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
6332 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6333 struct e1000_hw
*hw
= &adapter
->hw
;
6334 int pf_id
= adapter
->vfs_allocated_count
;
6336 /* attempt to add filter to vlvf array */
6337 igb_vlvf_set(adapter
, vid
, true, pf_id
);
6339 /* add the filter since PF can receive vlans w/o entry in vlvf */
6340 igb_vfta_set(hw
, vid
, true);
6342 set_bit(vid
, adapter
->active_vlans
);
6345 static void igb_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
6347 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6348 struct e1000_hw
*hw
= &adapter
->hw
;
6349 int pf_id
= adapter
->vfs_allocated_count
;
6352 igb_irq_disable(adapter
);
6354 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
6355 igb_irq_enable(adapter
);
6357 /* remove vlan from VLVF table array */
6358 err
= igb_vlvf_set(adapter
, vid
, false, pf_id
);
6360 /* if vid was not present in VLVF just remove it from table */
6362 igb_vfta_set(hw
, vid
, false);
6364 clear_bit(vid
, adapter
->active_vlans
);
6367 static void igb_restore_vlan(struct igb_adapter
*adapter
)
6371 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
6372 igb_vlan_rx_add_vid(adapter
->netdev
, vid
);
6375 int igb_set_spd_dplx(struct igb_adapter
*adapter
, u32 spd
, u8 dplx
)
6377 struct pci_dev
*pdev
= adapter
->pdev
;
6378 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
6382 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6383 * for the switch() below to work */
6384 if ((spd
& 1) || (dplx
& ~1))
6387 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6388 if ((adapter
->hw
.phy
.media_type
== e1000_media_type_internal_serdes
) &&
6389 spd
!= SPEED_1000
&&
6390 dplx
!= DUPLEX_FULL
)
6393 switch (spd
+ dplx
) {
6394 case SPEED_10
+ DUPLEX_HALF
:
6395 mac
->forced_speed_duplex
= ADVERTISE_10_HALF
;
6397 case SPEED_10
+ DUPLEX_FULL
:
6398 mac
->forced_speed_duplex
= ADVERTISE_10_FULL
;
6400 case SPEED_100
+ DUPLEX_HALF
:
6401 mac
->forced_speed_duplex
= ADVERTISE_100_HALF
;
6403 case SPEED_100
+ DUPLEX_FULL
:
6404 mac
->forced_speed_duplex
= ADVERTISE_100_FULL
;
6406 case SPEED_1000
+ DUPLEX_FULL
:
6408 adapter
->hw
.phy
.autoneg_advertised
= ADVERTISE_1000_FULL
;
6410 case SPEED_1000
+ DUPLEX_HALF
: /* not supported */
6417 dev_err(&pdev
->dev
, "Unsupported Speed/Duplex configuration\n");
6421 static int __igb_shutdown(struct pci_dev
*pdev
, bool *enable_wake
)
6423 struct net_device
*netdev
= pci_get_drvdata(pdev
);
6424 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6425 struct e1000_hw
*hw
= &adapter
->hw
;
6426 u32 ctrl
, rctl
, status
;
6427 u32 wufc
= adapter
->wol
;
6432 netif_device_detach(netdev
);
6434 if (netif_running(netdev
))
6437 igb_clear_interrupt_scheme(adapter
);
6440 retval
= pci_save_state(pdev
);
6445 status
= rd32(E1000_STATUS
);
6446 if (status
& E1000_STATUS_LU
)
6447 wufc
&= ~E1000_WUFC_LNKC
;
6450 igb_setup_rctl(adapter
);
6451 igb_set_rx_mode(netdev
);
6453 /* turn on all-multi mode if wake on multicast is enabled */
6454 if (wufc
& E1000_WUFC_MC
) {
6455 rctl
= rd32(E1000_RCTL
);
6456 rctl
|= E1000_RCTL_MPE
;
6457 wr32(E1000_RCTL
, rctl
);
6460 ctrl
= rd32(E1000_CTRL
);
6461 /* advertise wake from D3Cold */
6462 #define E1000_CTRL_ADVD3WUC 0x00100000
6463 /* phy power management enable */
6464 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6465 ctrl
|= E1000_CTRL_ADVD3WUC
;
6466 wr32(E1000_CTRL
, ctrl
);
6468 /* Allow time for pending master requests to run */
6469 igb_disable_pcie_master(hw
);
6471 wr32(E1000_WUC
, E1000_WUC_PME_EN
);
6472 wr32(E1000_WUFC
, wufc
);
6475 wr32(E1000_WUFC
, 0);
6478 *enable_wake
= wufc
|| adapter
->en_mng_pt
;
6480 igb_power_down_link(adapter
);
6482 igb_power_up_link(adapter
);
6484 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6485 * would have already happened in close and is redundant. */
6486 igb_release_hw_control(adapter
);
6488 pci_disable_device(pdev
);
6494 static int igb_suspend(struct pci_dev
*pdev
, pm_message_t state
)
6499 retval
= __igb_shutdown(pdev
, &wake
);
6504 pci_prepare_to_sleep(pdev
);
6506 pci_wake_from_d3(pdev
, false);
6507 pci_set_power_state(pdev
, PCI_D3hot
);
6513 static int igb_resume(struct pci_dev
*pdev
)
6515 struct net_device
*netdev
= pci_get_drvdata(pdev
);
6516 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6517 struct e1000_hw
*hw
= &adapter
->hw
;
6520 pci_set_power_state(pdev
, PCI_D0
);
6521 pci_restore_state(pdev
);
6522 pci_save_state(pdev
);
6524 err
= pci_enable_device_mem(pdev
);
6527 "igb: Cannot enable PCI device from suspend\n");
6530 pci_set_master(pdev
);
6532 pci_enable_wake(pdev
, PCI_D3hot
, 0);
6533 pci_enable_wake(pdev
, PCI_D3cold
, 0);
6535 if (igb_init_interrupt_scheme(adapter
)) {
6536 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
6542 /* let the f/w know that the h/w is now under the control of the
6544 igb_get_hw_control(adapter
);
6546 wr32(E1000_WUS
, ~0);
6548 if (netif_running(netdev
)) {
6549 err
= igb_open(netdev
);
6554 netif_device_attach(netdev
);
6560 static void igb_shutdown(struct pci_dev
*pdev
)
6564 __igb_shutdown(pdev
, &wake
);
6566 if (system_state
== SYSTEM_POWER_OFF
) {
6567 pci_wake_from_d3(pdev
, wake
);
6568 pci_set_power_state(pdev
, PCI_D3hot
);
6572 #ifdef CONFIG_NET_POLL_CONTROLLER
6574 * Polling 'interrupt' - used by things like netconsole to send skbs
6575 * without having to re-enable interrupts. It's not called while
6576 * the interrupt routine is executing.
6578 static void igb_netpoll(struct net_device
*netdev
)
6580 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6581 struct e1000_hw
*hw
= &adapter
->hw
;
6584 if (!adapter
->msix_entries
) {
6585 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
6586 igb_irq_disable(adapter
);
6587 napi_schedule(&q_vector
->napi
);
6591 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
6592 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
6593 wr32(E1000_EIMC
, q_vector
->eims_value
);
6594 napi_schedule(&q_vector
->napi
);
6597 #endif /* CONFIG_NET_POLL_CONTROLLER */
6600 * igb_io_error_detected - called when PCI error is detected
6601 * @pdev: Pointer to PCI device
6602 * @state: The current pci connection state
6604 * This function is called after a PCI bus error affecting
6605 * this device has been detected.
6607 static pci_ers_result_t
igb_io_error_detected(struct pci_dev
*pdev
,
6608 pci_channel_state_t state
)
6610 struct net_device
*netdev
= pci_get_drvdata(pdev
);
6611 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6613 netif_device_detach(netdev
);
6615 if (state
== pci_channel_io_perm_failure
)
6616 return PCI_ERS_RESULT_DISCONNECT
;
6618 if (netif_running(netdev
))
6620 pci_disable_device(pdev
);
6622 /* Request a slot slot reset. */
6623 return PCI_ERS_RESULT_NEED_RESET
;
6627 * igb_io_slot_reset - called after the pci bus has been reset.
6628 * @pdev: Pointer to PCI device
6630 * Restart the card from scratch, as if from a cold-boot. Implementation
6631 * resembles the first-half of the igb_resume routine.
6633 static pci_ers_result_t
igb_io_slot_reset(struct pci_dev
*pdev
)
6635 struct net_device
*netdev
= pci_get_drvdata(pdev
);
6636 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6637 struct e1000_hw
*hw
= &adapter
->hw
;
6638 pci_ers_result_t result
;
6641 if (pci_enable_device_mem(pdev
)) {
6643 "Cannot re-enable PCI device after reset.\n");
6644 result
= PCI_ERS_RESULT_DISCONNECT
;
6646 pci_set_master(pdev
);
6647 pci_restore_state(pdev
);
6648 pci_save_state(pdev
);
6650 pci_enable_wake(pdev
, PCI_D3hot
, 0);
6651 pci_enable_wake(pdev
, PCI_D3cold
, 0);
6654 wr32(E1000_WUS
, ~0);
6655 result
= PCI_ERS_RESULT_RECOVERED
;
6658 err
= pci_cleanup_aer_uncorrect_error_status(pdev
);
6660 dev_err(&pdev
->dev
, "pci_cleanup_aer_uncorrect_error_status "
6661 "failed 0x%0x\n", err
);
6662 /* non-fatal, continue */
6669 * igb_io_resume - called when traffic can start flowing again.
6670 * @pdev: Pointer to PCI device
6672 * This callback is called when the error recovery driver tells us that
6673 * its OK to resume normal operation. Implementation resembles the
6674 * second-half of the igb_resume routine.
6676 static void igb_io_resume(struct pci_dev
*pdev
)
6678 struct net_device
*netdev
= pci_get_drvdata(pdev
);
6679 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6681 if (netif_running(netdev
)) {
6682 if (igb_up(adapter
)) {
6683 dev_err(&pdev
->dev
, "igb_up failed after reset\n");
6688 netif_device_attach(netdev
);
6690 /* let the f/w know that the h/w is now under the control of the
6692 igb_get_hw_control(adapter
);
6695 static void igb_rar_set_qsel(struct igb_adapter
*adapter
, u8
*addr
, u32 index
,
6698 u32 rar_low
, rar_high
;
6699 struct e1000_hw
*hw
= &adapter
->hw
;
6701 /* HW expects these in little endian so we reverse the byte order
6702 * from network order (big endian) to little endian
6704 rar_low
= ((u32
) addr
[0] | ((u32
) addr
[1] << 8) |
6705 ((u32
) addr
[2] << 16) | ((u32
) addr
[3] << 24));
6706 rar_high
= ((u32
) addr
[4] | ((u32
) addr
[5] << 8));
6708 /* Indicate to hardware the Address is Valid. */
6709 rar_high
|= E1000_RAH_AV
;
6711 if (hw
->mac
.type
== e1000_82575
)
6712 rar_high
|= E1000_RAH_POOL_1
* qsel
;
6714 rar_high
|= E1000_RAH_POOL_1
<< qsel
;
6716 wr32(E1000_RAL(index
), rar_low
);
6718 wr32(E1000_RAH(index
), rar_high
);
6722 static int igb_set_vf_mac(struct igb_adapter
*adapter
,
6723 int vf
, unsigned char *mac_addr
)
6725 struct e1000_hw
*hw
= &adapter
->hw
;
6726 /* VF MAC addresses start at end of receive addresses and moves
6727 * torwards the first, as a result a collision should not be possible */
6728 int rar_entry
= hw
->mac
.rar_entry_count
- (vf
+ 1);
6730 memcpy(adapter
->vf_data
[vf
].vf_mac_addresses
, mac_addr
, ETH_ALEN
);
6732 igb_rar_set_qsel(adapter
, mac_addr
, rar_entry
, vf
);
6737 static int igb_ndo_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
)
6739 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6740 if (!is_valid_ether_addr(mac
) || (vf
>= adapter
->vfs_allocated_count
))
6742 adapter
->vf_data
[vf
].flags
|= IGB_VF_FLAG_PF_SET_MAC
;
6743 dev_info(&adapter
->pdev
->dev
, "setting MAC %pM on VF %d\n", mac
, vf
);
6744 dev_info(&adapter
->pdev
->dev
, "Reload the VF driver to make this"
6745 " change effective.");
6746 if (test_bit(__IGB_DOWN
, &adapter
->state
)) {
6747 dev_warn(&adapter
->pdev
->dev
, "The VF MAC address has been set,"
6748 " but the PF device is not up.\n");
6749 dev_warn(&adapter
->pdev
->dev
, "Bring the PF device up before"
6750 " attempting to use the VF device.\n");
6752 return igb_set_vf_mac(adapter
, vf
, mac
);
6755 static int igb_link_mbps(int internal_link_speed
)
6757 switch (internal_link_speed
) {
6767 static void igb_set_vf_rate_limit(struct e1000_hw
*hw
, int vf
, int tx_rate
,
6774 /* Calculate the rate factor values to set */
6775 rf_int
= link_speed
/ tx_rate
;
6776 rf_dec
= (link_speed
- (rf_int
* tx_rate
));
6777 rf_dec
= (rf_dec
* (1<<E1000_RTTBCNRC_RF_INT_SHIFT
)) / tx_rate
;
6779 bcnrc_val
= E1000_RTTBCNRC_RS_ENA
;
6780 bcnrc_val
|= ((rf_int
<<E1000_RTTBCNRC_RF_INT_SHIFT
) &
6781 E1000_RTTBCNRC_RF_INT_MASK
);
6782 bcnrc_val
|= (rf_dec
& E1000_RTTBCNRC_RF_DEC_MASK
);
6787 wr32(E1000_RTTDQSEL
, vf
); /* vf X uses queue X */
6788 wr32(E1000_RTTBCNRC
, bcnrc_val
);
6791 static void igb_check_vf_rate_limit(struct igb_adapter
*adapter
)
6793 int actual_link_speed
, i
;
6794 bool reset_rate
= false;
6796 /* VF TX rate limit was not set or not supported */
6797 if ((adapter
->vf_rate_link_speed
== 0) ||
6798 (adapter
->hw
.mac
.type
!= e1000_82576
))
6801 actual_link_speed
= igb_link_mbps(adapter
->link_speed
);
6802 if (actual_link_speed
!= adapter
->vf_rate_link_speed
) {
6804 adapter
->vf_rate_link_speed
= 0;
6805 dev_info(&adapter
->pdev
->dev
,
6806 "Link speed has been changed. VF Transmit "
6807 "rate is disabled\n");
6810 for (i
= 0; i
< adapter
->vfs_allocated_count
; i
++) {
6812 adapter
->vf_data
[i
].tx_rate
= 0;
6814 igb_set_vf_rate_limit(&adapter
->hw
, i
,
6815 adapter
->vf_data
[i
].tx_rate
,
6820 static int igb_ndo_set_vf_bw(struct net_device
*netdev
, int vf
, int tx_rate
)
6822 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6823 struct e1000_hw
*hw
= &adapter
->hw
;
6824 int actual_link_speed
;
6826 if (hw
->mac
.type
!= e1000_82576
)
6829 actual_link_speed
= igb_link_mbps(adapter
->link_speed
);
6830 if ((vf
>= adapter
->vfs_allocated_count
) ||
6831 (!(rd32(E1000_STATUS
) & E1000_STATUS_LU
)) ||
6832 (tx_rate
< 0) || (tx_rate
> actual_link_speed
))
6835 adapter
->vf_rate_link_speed
= actual_link_speed
;
6836 adapter
->vf_data
[vf
].tx_rate
= (u16
)tx_rate
;
6837 igb_set_vf_rate_limit(hw
, vf
, tx_rate
, actual_link_speed
);
6842 static int igb_ndo_get_vf_config(struct net_device
*netdev
,
6843 int vf
, struct ifla_vf_info
*ivi
)
6845 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6846 if (vf
>= adapter
->vfs_allocated_count
)
6849 memcpy(&ivi
->mac
, adapter
->vf_data
[vf
].vf_mac_addresses
, ETH_ALEN
);
6850 ivi
->tx_rate
= adapter
->vf_data
[vf
].tx_rate
;
6851 ivi
->vlan
= adapter
->vf_data
[vf
].pf_vlan
;
6852 ivi
->qos
= adapter
->vf_data
[vf
].pf_qos
;
6856 static void igb_vmm_control(struct igb_adapter
*adapter
)
6858 struct e1000_hw
*hw
= &adapter
->hw
;
6861 switch (hw
->mac
.type
) {
6864 /* replication is not supported for 82575 */
6867 /* notify HW that the MAC is adding vlan tags */
6868 reg
= rd32(E1000_DTXCTL
);
6869 reg
|= E1000_DTXCTL_VLAN_ADDED
;
6870 wr32(E1000_DTXCTL
, reg
);
6872 /* enable replication vlan tag stripping */
6873 reg
= rd32(E1000_RPLOLR
);
6874 reg
|= E1000_RPLOLR_STRVLAN
;
6875 wr32(E1000_RPLOLR
, reg
);
6877 /* none of the above registers are supported by i350 */
6881 if (adapter
->vfs_allocated_count
) {
6882 igb_vmdq_set_loopback_pf(hw
, true);
6883 igb_vmdq_set_replication_pf(hw
, true);
6884 igb_vmdq_set_anti_spoofing_pf(hw
, true,
6885 adapter
->vfs_allocated_count
);
6887 igb_vmdq_set_loopback_pf(hw
, false);
6888 igb_vmdq_set_replication_pf(hw
, false);