e1000: fix vlan processing regression
[deliverable/linux.git] / drivers / net / ethernet / intel / e1000 / e1000_main.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
0abb6eb1 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
3d41e30a 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "e1000.h"
d0bb53e1 30#include <net/ip6_checksum.h>
5377a416 31#include <linux/io.h>
70c71606 32#include <linux/prefetch.h>
5622e404
JP
33#include <linux/bitops.h>
34#include <linux/if_vlan.h>
5377a416 35
1da177e4 36char e1000_driver_name[] = "e1000";
3ad2cc67 37static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
ab08853f 38#define DRV_VERSION "7.3.21-k8-NAPI"
abec42a4
SH
39const char e1000_driver_version[] = DRV_VERSION;
40static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
1da177e4
LT
41
42/* e1000_pci_tbl - PCI Device ID Table
43 *
44 * Last entry must be all 0s
45 *
46 * Macro expands to...
47 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48 */
a3aa1884 49static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
1da177e4
LT
50 INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
2648345f 69 INTEL_E1000_ETHERNET_DEVICE(0x101A),
1da177e4
LT
70 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 INTEL_E1000_ETHERNET_DEVICE(0x108A),
b7ee49db 84 INTEL_E1000_ETHERNET_DEVICE(0x1099),
b7ee49db 85 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
5377a416 86 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
1da177e4
LT
87 /* required last entry */
88 {0,}
89};
90
91MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
35574764
NN
93int e1000_up(struct e1000_adapter *adapter);
94void e1000_down(struct e1000_adapter *adapter);
95void e1000_reinit_locked(struct e1000_adapter *adapter);
96void e1000_reset(struct e1000_adapter *adapter);
35574764
NN
97int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
3ad2cc67 101static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
35574764 102 struct e1000_tx_ring *txdr);
3ad2cc67 103static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
35574764 104 struct e1000_rx_ring *rxdr);
3ad2cc67 105static void e1000_free_tx_resources(struct e1000_adapter *adapter,
35574764 106 struct e1000_tx_ring *tx_ring);
3ad2cc67 107static void e1000_free_rx_resources(struct e1000_adapter *adapter,
35574764
NN
108 struct e1000_rx_ring *rx_ring);
109void e1000_update_stats(struct e1000_adapter *adapter);
1da177e4
LT
110
111static int e1000_init_module(void);
112static void e1000_exit_module(void);
113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114static void __devexit e1000_remove(struct pci_dev *pdev);
581d708e 115static int e1000_alloc_queues(struct e1000_adapter *adapter);
1da177e4
LT
116static int e1000_sw_init(struct e1000_adapter *adapter);
117static int e1000_open(struct net_device *netdev);
118static int e1000_close(struct net_device *netdev);
119static void e1000_configure_tx(struct e1000_adapter *adapter);
120static void e1000_configure_rx(struct e1000_adapter *adapter);
121static void e1000_setup_rctl(struct e1000_adapter *adapter);
581d708e
MC
122static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125 struct e1000_tx_ring *tx_ring);
126static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127 struct e1000_rx_ring *rx_ring);
db0ce50d 128static void e1000_set_rx_mode(struct net_device *netdev);
5cf42fcd 129static void e1000_update_phy_info_task(struct work_struct *work);
a4010afe 130static void e1000_watchdog(struct work_struct *work);
5cf42fcd 131static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
3b29a56d
SH
132static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 struct net_device *netdev);
1da177e4
LT
134static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136static int e1000_set_mac(struct net_device *netdev, void *p);
7d12e780 137static irqreturn_t e1000_intr(int irq, void *data);
c3033b01
JP
138static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139 struct e1000_tx_ring *tx_ring);
bea3348e 140static int e1000_clean(struct napi_struct *napi, int budget);
c3033b01
JP
141static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 struct e1000_rx_ring *rx_ring,
143 int *work_done, int work_to_do);
edbbb3ca
JB
144static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145 struct e1000_rx_ring *rx_ring,
146 int *work_done, int work_to_do);
581d708e 147static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
edbbb3ca 148 struct e1000_rx_ring *rx_ring,
72d64a43 149 int cleaned_count);
edbbb3ca
JB
150static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
151 struct e1000_rx_ring *rx_ring,
152 int cleaned_count);
1da177e4
LT
153static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
154static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
155 int cmd);
1da177e4
LT
156static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
157static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
158static void e1000_tx_timeout(struct net_device *dev);
65f27f38 159static void e1000_reset_task(struct work_struct *work);
1da177e4 160static void e1000_smartspeed(struct e1000_adapter *adapter);
e619d523
AK
161static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
162 struct sk_buff *skb);
1da177e4 163
5622e404 164static bool e1000_vlan_used(struct e1000_adapter *adapter);
c8f44aff
MM
165static void e1000_vlan_mode(struct net_device *netdev,
166 netdev_features_t features);
52f5509f
JP
167static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
168 bool filter_on);
8e586137
JP
169static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
170static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
1da177e4
LT
171static void e1000_restore_vlan(struct e1000_adapter *adapter);
172
6fdfef16 173#ifdef CONFIG_PM
b43fcd7d 174static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
1da177e4
LT
175static int e1000_resume(struct pci_dev *pdev);
176#endif
c653e635 177static void e1000_shutdown(struct pci_dev *pdev);
1da177e4
LT
178
179#ifdef CONFIG_NET_POLL_CONTROLLER
180/* for netdump / net console */
181static void e1000_netpoll (struct net_device *netdev);
182#endif
183
1f753861
JB
184#define COPYBREAK_DEFAULT 256
185static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
186module_param(copybreak, uint, 0644);
187MODULE_PARM_DESC(copybreak,
188 "Maximum size of packet that is copied to a new buffer on receive");
189
9026729b
AK
190static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
191 pci_channel_state_t state);
192static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
193static void e1000_io_resume(struct pci_dev *pdev);
194
195static struct pci_error_handlers e1000_err_handler = {
196 .error_detected = e1000_io_error_detected,
197 .slot_reset = e1000_io_slot_reset,
198 .resume = e1000_io_resume,
199};
24025e4e 200
1da177e4
LT
201static struct pci_driver e1000_driver = {
202 .name = e1000_driver_name,
203 .id_table = e1000_pci_tbl,
204 .probe = e1000_probe,
205 .remove = __devexit_p(e1000_remove),
c4e24f01 206#ifdef CONFIG_PM
25985edc 207 /* Power Management Hooks */
1da177e4 208 .suspend = e1000_suspend,
c653e635 209 .resume = e1000_resume,
1da177e4 210#endif
9026729b
AK
211 .shutdown = e1000_shutdown,
212 .err_handler = &e1000_err_handler
1da177e4
LT
213};
214
215MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
216MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
217MODULE_LICENSE("GPL");
218MODULE_VERSION(DRV_VERSION);
219
220static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
221module_param(debug, int, 0);
222MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
223
675ad473
ET
224/**
225 * e1000_get_hw_dev - return device
226 * used by hardware layer to print debugging information
227 *
228 **/
229struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
230{
231 struct e1000_adapter *adapter = hw->back;
232 return adapter->netdev;
233}
234
1da177e4
LT
235/**
236 * e1000_init_module - Driver Registration Routine
237 *
238 * e1000_init_module is the first routine called when the driver is
239 * loaded. All it does is register with the PCI subsystem.
240 **/
241
64798845 242static int __init e1000_init_module(void)
1da177e4
LT
243{
244 int ret;
675ad473 245 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
1da177e4 246
675ad473 247 pr_info("%s\n", e1000_copyright);
1da177e4 248
29917620 249 ret = pci_register_driver(&e1000_driver);
1f753861
JB
250 if (copybreak != COPYBREAK_DEFAULT) {
251 if (copybreak == 0)
675ad473 252 pr_info("copybreak disabled\n");
1f753861 253 else
675ad473
ET
254 pr_info("copybreak enabled for "
255 "packets <= %u bytes\n", copybreak);
1f753861 256 }
1da177e4
LT
257 return ret;
258}
259
260module_init(e1000_init_module);
261
262/**
263 * e1000_exit_module - Driver Exit Cleanup Routine
264 *
265 * e1000_exit_module is called just before the driver is removed
266 * from memory.
267 **/
268
64798845 269static void __exit e1000_exit_module(void)
1da177e4 270{
1da177e4
LT
271 pci_unregister_driver(&e1000_driver);
272}
273
274module_exit(e1000_exit_module);
275
2db10a08
AK
276static int e1000_request_irq(struct e1000_adapter *adapter)
277{
278 struct net_device *netdev = adapter->netdev;
3e18826c 279 irq_handler_t handler = e1000_intr;
e94bd23f
AK
280 int irq_flags = IRQF_SHARED;
281 int err;
2db10a08 282
e94bd23f
AK
283 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
284 netdev);
285 if (err) {
feb8f478 286 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
e94bd23f 287 }
2db10a08
AK
288
289 return err;
290}
291
292static void e1000_free_irq(struct e1000_adapter *adapter)
293{
294 struct net_device *netdev = adapter->netdev;
295
296 free_irq(adapter->pdev->irq, netdev);
2db10a08
AK
297}
298
1da177e4
LT
299/**
300 * e1000_irq_disable - Mask off interrupt generation on the NIC
301 * @adapter: board private structure
302 **/
303
64798845 304static void e1000_irq_disable(struct e1000_adapter *adapter)
1da177e4 305{
1dc32918
JP
306 struct e1000_hw *hw = &adapter->hw;
307
308 ew32(IMC, ~0);
309 E1000_WRITE_FLUSH();
1da177e4
LT
310 synchronize_irq(adapter->pdev->irq);
311}
312
313/**
314 * e1000_irq_enable - Enable default interrupt generation settings
315 * @adapter: board private structure
316 **/
317
64798845 318static void e1000_irq_enable(struct e1000_adapter *adapter)
1da177e4 319{
1dc32918
JP
320 struct e1000_hw *hw = &adapter->hw;
321
322 ew32(IMS, IMS_ENABLE_MASK);
323 E1000_WRITE_FLUSH();
1da177e4 324}
3ad2cc67 325
64798845 326static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2d7edb92 327{
1dc32918 328 struct e1000_hw *hw = &adapter->hw;
2d7edb92 329 struct net_device *netdev = adapter->netdev;
1dc32918 330 u16 vid = hw->mng_cookie.vlan_id;
406874a7 331 u16 old_vid = adapter->mng_vlan_id;
96838a40 332
5622e404
JP
333 if (!e1000_vlan_used(adapter))
334 return;
335
336 if (!test_bit(vid, adapter->active_vlans)) {
337 if (hw->mng_cookie.status &
338 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
339 e1000_vlan_rx_add_vid(netdev, vid);
c5f226fe 340 adapter->mng_vlan_id = vid;
5622e404
JP
341 } else {
342 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
343 }
344 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
345 (vid != old_vid) &&
346 !test_bit(old_vid, adapter->active_vlans))
347 e1000_vlan_rx_kill_vid(netdev, old_vid);
348 } else {
349 adapter->mng_vlan_id = vid;
2d7edb92
MC
350 }
351}
b55ccb35 352
64798845 353static void e1000_init_manageability(struct e1000_adapter *adapter)
0fccd0e9 354{
1dc32918
JP
355 struct e1000_hw *hw = &adapter->hw;
356
0fccd0e9 357 if (adapter->en_mng_pt) {
1dc32918 358 u32 manc = er32(MANC);
0fccd0e9
JG
359
360 /* disable hardware interception of ARP */
361 manc &= ~(E1000_MANC_ARP_EN);
362
1dc32918 363 ew32(MANC, manc);
0fccd0e9
JG
364 }
365}
366
64798845 367static void e1000_release_manageability(struct e1000_adapter *adapter)
0fccd0e9 368{
1dc32918
JP
369 struct e1000_hw *hw = &adapter->hw;
370
0fccd0e9 371 if (adapter->en_mng_pt) {
1dc32918 372 u32 manc = er32(MANC);
0fccd0e9
JG
373
374 /* re-enable hardware interception of ARP */
375 manc |= E1000_MANC_ARP_EN;
376
1dc32918 377 ew32(MANC, manc);
0fccd0e9
JG
378 }
379}
380
e0aac5a2
AK
381/**
382 * e1000_configure - configure the hardware for RX and TX
383 * @adapter = private board structure
384 **/
385static void e1000_configure(struct e1000_adapter *adapter)
1da177e4
LT
386{
387 struct net_device *netdev = adapter->netdev;
2db10a08 388 int i;
1da177e4 389
db0ce50d 390 e1000_set_rx_mode(netdev);
1da177e4
LT
391
392 e1000_restore_vlan(adapter);
0fccd0e9 393 e1000_init_manageability(adapter);
1da177e4
LT
394
395 e1000_configure_tx(adapter);
396 e1000_setup_rctl(adapter);
397 e1000_configure_rx(adapter);
72d64a43
JK
398 /* call E1000_DESC_UNUSED which always leaves
399 * at least 1 descriptor unused to make sure
400 * next_to_use != next_to_clean */
f56799ea 401 for (i = 0; i < adapter->num_rx_queues; i++) {
72d64a43 402 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
a292ca6e
JK
403 adapter->alloc_rx_buf(adapter, ring,
404 E1000_DESC_UNUSED(ring));
f56799ea 405 }
e0aac5a2
AK
406}
407
408int e1000_up(struct e1000_adapter *adapter)
409{
1dc32918
JP
410 struct e1000_hw *hw = &adapter->hw;
411
e0aac5a2
AK
412 /* hardware has been reset, we need to reload some things */
413 e1000_configure(adapter);
414
415 clear_bit(__E1000_DOWN, &adapter->flags);
7bfa4816 416
bea3348e 417 napi_enable(&adapter->napi);
c3570acb 418
5de55624
MC
419 e1000_irq_enable(adapter);
420
4cb9be7a
JB
421 netif_wake_queue(adapter->netdev);
422
79f3d399 423 /* fire a link change interrupt to start the watchdog */
1dc32918 424 ew32(ICS, E1000_ICS_LSC);
1da177e4
LT
425 return 0;
426}
427
79f05bf0
AK
428/**
429 * e1000_power_up_phy - restore link in case the phy was powered down
430 * @adapter: address of board private structure
431 *
432 * The phy may be powered down to save power and turn off link when the
433 * driver is unloaded and wake on lan is not enabled (among others)
434 * *** this routine MUST be followed by a call to e1000_reset ***
435 *
436 **/
437
d658266e 438void e1000_power_up_phy(struct e1000_adapter *adapter)
79f05bf0 439{
1dc32918 440 struct e1000_hw *hw = &adapter->hw;
406874a7 441 u16 mii_reg = 0;
79f05bf0
AK
442
443 /* Just clear the power down bit to wake the phy back up */
1dc32918 444 if (hw->media_type == e1000_media_type_copper) {
79f05bf0
AK
445 /* according to the manual, the phy will retain its
446 * settings across a power-down/up cycle */
1dc32918 447 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
79f05bf0 448 mii_reg &= ~MII_CR_POWER_DOWN;
1dc32918 449 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
79f05bf0
AK
450 }
451}
452
453static void e1000_power_down_phy(struct e1000_adapter *adapter)
454{
1dc32918
JP
455 struct e1000_hw *hw = &adapter->hw;
456
61c2505f 457 /* Power down the PHY so no link is implied when interface is down *
c3033b01 458 * The PHY cannot be powered down if any of the following is true *
79f05bf0
AK
459 * (a) WoL is enabled
460 * (b) AMT is active
461 * (c) SoL/IDER session is active */
1dc32918
JP
462 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
463 hw->media_type == e1000_media_type_copper) {
406874a7 464 u16 mii_reg = 0;
61c2505f 465
1dc32918 466 switch (hw->mac_type) {
61c2505f
BA
467 case e1000_82540:
468 case e1000_82545:
469 case e1000_82545_rev_3:
470 case e1000_82546:
5377a416 471 case e1000_ce4100:
61c2505f
BA
472 case e1000_82546_rev_3:
473 case e1000_82541:
474 case e1000_82541_rev_2:
475 case e1000_82547:
476 case e1000_82547_rev_2:
1dc32918 477 if (er32(MANC) & E1000_MANC_SMBUS_EN)
61c2505f
BA
478 goto out;
479 break;
61c2505f
BA
480 default:
481 goto out;
482 }
1dc32918 483 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
79f05bf0 484 mii_reg |= MII_CR_POWER_DOWN;
1dc32918 485 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
4e0d8f7d 486 msleep(1);
79f05bf0 487 }
61c2505f
BA
488out:
489 return;
79f05bf0
AK
490}
491
a4010afe
JB
492static void e1000_down_and_stop(struct e1000_adapter *adapter)
493{
494 set_bit(__E1000_DOWN, &adapter->flags);
495 cancel_work_sync(&adapter->reset_task);
496 cancel_delayed_work_sync(&adapter->watchdog_task);
497 cancel_delayed_work_sync(&adapter->phy_info_task);
498 cancel_delayed_work_sync(&adapter->fifo_stall_task);
499}
500
64798845 501void e1000_down(struct e1000_adapter *adapter)
1da177e4 502{
a6c42322 503 struct e1000_hw *hw = &adapter->hw;
1da177e4 504 struct net_device *netdev = adapter->netdev;
a6c42322 505 u32 rctl, tctl;
1da177e4 506
1314bbf3 507
a6c42322
JB
508 /* disable receives in the hardware */
509 rctl = er32(RCTL);
510 ew32(RCTL, rctl & ~E1000_RCTL_EN);
511 /* flush and sleep below */
512
51851073 513 netif_tx_disable(netdev);
a6c42322
JB
514
515 /* disable transmits in the hardware */
516 tctl = er32(TCTL);
517 tctl &= ~E1000_TCTL_EN;
518 ew32(TCTL, tctl);
519 /* flush both disables and wait for them to finish */
520 E1000_WRITE_FLUSH();
521 msleep(10);
522
bea3348e 523 napi_disable(&adapter->napi);
c3570acb 524
1da177e4 525 e1000_irq_disable(adapter);
c1605eb3 526
ab08853f
AC
527 /*
528 * Setting DOWN must be after irq_disable to prevent
529 * a screaming interrupt. Setting DOWN also prevents
a4010afe 530 * tasks from rescheduling.
ab08853f 531 */
a4010afe 532 e1000_down_and_stop(adapter);
1da177e4 533
1da177e4
LT
534 adapter->link_speed = 0;
535 adapter->link_duplex = 0;
536 netif_carrier_off(netdev);
1da177e4
LT
537
538 e1000_reset(adapter);
581d708e
MC
539 e1000_clean_all_tx_rings(adapter);
540 e1000_clean_all_rx_rings(adapter);
1da177e4 541}
1da177e4 542
38df7a39 543static void e1000_reinit_safe(struct e1000_adapter *adapter)
338c15e4
JB
544{
545 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
546 msleep(1);
0ef4eedc 547 mutex_lock(&adapter->mutex);
338c15e4
JB
548 e1000_down(adapter);
549 e1000_up(adapter);
0ef4eedc 550 mutex_unlock(&adapter->mutex);
338c15e4
JB
551 clear_bit(__E1000_RESETTING, &adapter->flags);
552}
553
64798845 554void e1000_reinit_locked(struct e1000_adapter *adapter)
2db10a08 555{
338c15e4
JB
556 /* if rtnl_lock is not held the call path is bogus */
557 ASSERT_RTNL();
2db10a08
AK
558 WARN_ON(in_interrupt());
559 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
560 msleep(1);
561 e1000_down(adapter);
562 e1000_up(adapter);
563 clear_bit(__E1000_RESETTING, &adapter->flags);
1da177e4
LT
564}
565
64798845 566void e1000_reset(struct e1000_adapter *adapter)
1da177e4 567{
1dc32918 568 struct e1000_hw *hw = &adapter->hw;
406874a7 569 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
c3033b01 570 bool legacy_pba_adjust = false;
b7cb8c2c 571 u16 hwm;
1da177e4
LT
572
573 /* Repartition Pba for greater than 9k mtu
574 * To take effect CTRL.RST is required.
575 */
576
1dc32918 577 switch (hw->mac_type) {
018ea44e
BA
578 case e1000_82542_rev2_0:
579 case e1000_82542_rev2_1:
580 case e1000_82543:
581 case e1000_82544:
582 case e1000_82540:
583 case e1000_82541:
584 case e1000_82541_rev_2:
c3033b01 585 legacy_pba_adjust = true;
018ea44e
BA
586 pba = E1000_PBA_48K;
587 break;
588 case e1000_82545:
589 case e1000_82545_rev_3:
590 case e1000_82546:
5377a416 591 case e1000_ce4100:
018ea44e
BA
592 case e1000_82546_rev_3:
593 pba = E1000_PBA_48K;
594 break;
2d7edb92 595 case e1000_82547:
0e6ef3e0 596 case e1000_82547_rev_2:
c3033b01 597 legacy_pba_adjust = true;
2d7edb92
MC
598 pba = E1000_PBA_30K;
599 break;
018ea44e
BA
600 case e1000_undefined:
601 case e1000_num_macs:
2d7edb92
MC
602 break;
603 }
604
c3033b01 605 if (legacy_pba_adjust) {
b7cb8c2c 606 if (hw->max_frame_size > E1000_RXBUFFER_8192)
018ea44e 607 pba -= 8; /* allocate more FIFO for Tx */
2d7edb92 608
1dc32918 609 if (hw->mac_type == e1000_82547) {
018ea44e
BA
610 adapter->tx_fifo_head = 0;
611 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
612 adapter->tx_fifo_size =
613 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
614 atomic_set(&adapter->tx_fifo_stall, 0);
615 }
b7cb8c2c 616 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
018ea44e 617 /* adjust PBA for jumbo frames */
1dc32918 618 ew32(PBA, pba);
018ea44e
BA
619
620 /* To maintain wire speed transmits, the Tx FIFO should be
b7cb8c2c 621 * large enough to accommodate two full transmit packets,
018ea44e 622 * rounded up to the next 1KB and expressed in KB. Likewise,
b7cb8c2c 623 * the Rx FIFO should be large enough to accommodate at least
018ea44e
BA
624 * one full receive packet and is similarly rounded up and
625 * expressed in KB. */
1dc32918 626 pba = er32(PBA);
018ea44e
BA
627 /* upper 16 bits has Tx packet buffer allocation size in KB */
628 tx_space = pba >> 16;
629 /* lower 16 bits has Rx packet buffer allocation size in KB */
630 pba &= 0xffff;
b7cb8c2c
JB
631 /*
632 * the tx fifo also stores 16 bytes of information about the tx
633 * but don't include ethernet FCS because hardware appends it
634 */
635 min_tx_space = (hw->max_frame_size +
636 sizeof(struct e1000_tx_desc) -
637 ETH_FCS_LEN) * 2;
9099cfb9 638 min_tx_space = ALIGN(min_tx_space, 1024);
018ea44e 639 min_tx_space >>= 10;
b7cb8c2c
JB
640 /* software strips receive CRC, so leave room for it */
641 min_rx_space = hw->max_frame_size;
9099cfb9 642 min_rx_space = ALIGN(min_rx_space, 1024);
018ea44e
BA
643 min_rx_space >>= 10;
644
645 /* If current Tx allocation is less than the min Tx FIFO size,
646 * and the min Tx FIFO size is less than the current Rx FIFO
647 * allocation, take space away from current Rx allocation */
648 if (tx_space < min_tx_space &&
649 ((min_tx_space - tx_space) < pba)) {
650 pba = pba - (min_tx_space - tx_space);
651
652 /* PCI/PCIx hardware has PBA alignment constraints */
1dc32918 653 switch (hw->mac_type) {
018ea44e
BA
654 case e1000_82545 ... e1000_82546_rev_3:
655 pba &= ~(E1000_PBA_8K - 1);
656 break;
657 default:
658 break;
659 }
660
661 /* if short on rx space, rx wins and must trump tx
662 * adjustment or use Early Receive if available */
1532ecea
JB
663 if (pba < min_rx_space)
664 pba = min_rx_space;
018ea44e 665 }
1da177e4 666 }
2d7edb92 667
1dc32918 668 ew32(PBA, pba);
1da177e4 669
b7cb8c2c
JB
670 /*
671 * flow control settings:
672 * The high water mark must be low enough to fit one full frame
673 * (or the size used for early receive) above it in the Rx FIFO.
674 * Set it to the lower of:
675 * - 90% of the Rx FIFO size, and
676 * - the full Rx FIFO size minus the early receive size (for parts
677 * with ERT support assuming ERT set to E1000_ERT_2048), or
678 * - the full Rx FIFO size minus one full frame
679 */
680 hwm = min(((pba << 10) * 9 / 10),
681 ((pba << 10) - hw->max_frame_size));
682
683 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
684 hw->fc_low_water = hw->fc_high_water - 8;
edbbb3ca 685 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
1dc32918
JP
686 hw->fc_send_xon = 1;
687 hw->fc = hw->original_fc;
1da177e4 688
2d7edb92 689 /* Allow time for pending master requests to run */
1dc32918
JP
690 e1000_reset_hw(hw);
691 if (hw->mac_type >= e1000_82544)
692 ew32(WUC, 0);
09ae3e88 693
1dc32918 694 if (e1000_init_hw(hw))
feb8f478 695 e_dev_err("Hardware Error\n");
2d7edb92 696 e1000_update_mng_vlan(adapter);
3d5460a0
JB
697
698 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
1dc32918 699 if (hw->mac_type >= e1000_82544 &&
1dc32918
JP
700 hw->autoneg == 1 &&
701 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
702 u32 ctrl = er32(CTRL);
3d5460a0
JB
703 /* clear phy power management bit if we are in gig only mode,
704 * which if enabled will attempt negotiation to 100Mb, which
705 * can cause a loss of link at power off or driver unload */
706 ctrl &= ~E1000_CTRL_SWDPIN3;
1dc32918 707 ew32(CTRL, ctrl);
3d5460a0
JB
708 }
709
1da177e4 710 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1dc32918 711 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
1da177e4 712
1dc32918
JP
713 e1000_reset_adaptive(hw);
714 e1000_phy_get_info(hw, &adapter->phy_info);
9a53a202 715
0fccd0e9 716 e1000_release_manageability(adapter);
1da177e4
LT
717}
718
67b3c27c
AK
719/**
720 * Dump the eeprom for users having checksum issues
721 **/
b4ea895d 722static void e1000_dump_eeprom(struct e1000_adapter *adapter)
67b3c27c
AK
723{
724 struct net_device *netdev = adapter->netdev;
725 struct ethtool_eeprom eeprom;
726 const struct ethtool_ops *ops = netdev->ethtool_ops;
727 u8 *data;
728 int i;
729 u16 csum_old, csum_new = 0;
730
731 eeprom.len = ops->get_eeprom_len(netdev);
732 eeprom.offset = 0;
733
734 data = kmalloc(eeprom.len, GFP_KERNEL);
e404decb 735 if (!data)
67b3c27c 736 return;
67b3c27c
AK
737
738 ops->get_eeprom(netdev, &eeprom, data);
739
740 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
741 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
742 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
743 csum_new += data[i] + (data[i + 1] << 8);
744 csum_new = EEPROM_SUM - csum_new;
745
675ad473
ET
746 pr_err("/*********************/\n");
747 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
748 pr_err("Calculated : 0x%04x\n", csum_new);
67b3c27c 749
675ad473
ET
750 pr_err("Offset Values\n");
751 pr_err("======== ======\n");
67b3c27c
AK
752 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
753
675ad473
ET
754 pr_err("Include this output when contacting your support provider.\n");
755 pr_err("This is not a software error! Something bad happened to\n");
756 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
757 pr_err("result in further problems, possibly loss of data,\n");
758 pr_err("corruption or system hangs!\n");
759 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
760 pr_err("which is invalid and requires you to set the proper MAC\n");
761 pr_err("address manually before continuing to enable this network\n");
762 pr_err("device. Please inspect the EEPROM dump and report the\n");
763 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
764 pr_err("/*********************/\n");
67b3c27c
AK
765
766 kfree(data);
767}
768
81250297
TI
769/**
770 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
771 * @pdev: PCI device information struct
772 *
773 * Return true if an adapter needs ioport resources
774 **/
775static int e1000_is_need_ioport(struct pci_dev *pdev)
776{
777 switch (pdev->device) {
778 case E1000_DEV_ID_82540EM:
779 case E1000_DEV_ID_82540EM_LOM:
780 case E1000_DEV_ID_82540EP:
781 case E1000_DEV_ID_82540EP_LOM:
782 case E1000_DEV_ID_82540EP_LP:
783 case E1000_DEV_ID_82541EI:
784 case E1000_DEV_ID_82541EI_MOBILE:
785 case E1000_DEV_ID_82541ER:
786 case E1000_DEV_ID_82541ER_LOM:
787 case E1000_DEV_ID_82541GI:
788 case E1000_DEV_ID_82541GI_LF:
789 case E1000_DEV_ID_82541GI_MOBILE:
790 case E1000_DEV_ID_82544EI_COPPER:
791 case E1000_DEV_ID_82544EI_FIBER:
792 case E1000_DEV_ID_82544GC_COPPER:
793 case E1000_DEV_ID_82544GC_LOM:
794 case E1000_DEV_ID_82545EM_COPPER:
795 case E1000_DEV_ID_82545EM_FIBER:
796 case E1000_DEV_ID_82546EB_COPPER:
797 case E1000_DEV_ID_82546EB_FIBER:
798 case E1000_DEV_ID_82546EB_QUAD_COPPER:
799 return true;
800 default:
801 return false;
802 }
803}
804
c8f44aff
MM
805static netdev_features_t e1000_fix_features(struct net_device *netdev,
806 netdev_features_t features)
5622e404
JP
807{
808 /*
809 * Since there is no support for separate rx/tx vlan accel
810 * enable/disable make sure tx flag is always in same state as rx.
811 */
812 if (features & NETIF_F_HW_VLAN_RX)
813 features |= NETIF_F_HW_VLAN_TX;
814 else
815 features &= ~NETIF_F_HW_VLAN_TX;
816
817 return features;
818}
819
c8f44aff
MM
820static int e1000_set_features(struct net_device *netdev,
821 netdev_features_t features)
e97d3207
MM
822{
823 struct e1000_adapter *adapter = netdev_priv(netdev);
c8f44aff 824 netdev_features_t changed = features ^ netdev->features;
e97d3207 825
5622e404
JP
826 if (changed & NETIF_F_HW_VLAN_RX)
827 e1000_vlan_mode(netdev, features);
828
e97d3207
MM
829 if (!(changed & NETIF_F_RXCSUM))
830 return 0;
831
832 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
833
834 if (netif_running(netdev))
835 e1000_reinit_locked(adapter);
836 else
837 e1000_reset(adapter);
838
839 return 0;
840}
841
0e7614bc
SH
842static const struct net_device_ops e1000_netdev_ops = {
843 .ndo_open = e1000_open,
844 .ndo_stop = e1000_close,
00829823 845 .ndo_start_xmit = e1000_xmit_frame,
0e7614bc
SH
846 .ndo_get_stats = e1000_get_stats,
847 .ndo_set_rx_mode = e1000_set_rx_mode,
848 .ndo_set_mac_address = e1000_set_mac,
5622e404 849 .ndo_tx_timeout = e1000_tx_timeout,
0e7614bc
SH
850 .ndo_change_mtu = e1000_change_mtu,
851 .ndo_do_ioctl = e1000_ioctl,
852 .ndo_validate_addr = eth_validate_addr,
0e7614bc
SH
853 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
854 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
855#ifdef CONFIG_NET_POLL_CONTROLLER
856 .ndo_poll_controller = e1000_netpoll,
857#endif
5622e404
JP
858 .ndo_fix_features = e1000_fix_features,
859 .ndo_set_features = e1000_set_features,
0e7614bc
SH
860};
861
e508be17
JB
862/**
863 * e1000_init_hw_struct - initialize members of hw struct
864 * @adapter: board private struct
865 * @hw: structure used by e1000_hw.c
866 *
867 * Factors out initialization of the e1000_hw struct to its own function
868 * that can be called very early at init (just after struct allocation).
869 * Fields are initialized based on PCI device information and
870 * OS network device settings (MTU size).
871 * Returns negative error codes if MAC type setup fails.
872 */
873static int e1000_init_hw_struct(struct e1000_adapter *adapter,
874 struct e1000_hw *hw)
875{
876 struct pci_dev *pdev = adapter->pdev;
877
878 /* PCI config space info */
879 hw->vendor_id = pdev->vendor;
880 hw->device_id = pdev->device;
881 hw->subsystem_vendor_id = pdev->subsystem_vendor;
882 hw->subsystem_id = pdev->subsystem_device;
883 hw->revision_id = pdev->revision;
884
885 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
886
887 hw->max_frame_size = adapter->netdev->mtu +
888 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
889 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
890
891 /* identify the MAC */
892 if (e1000_set_mac_type(hw)) {
893 e_err(probe, "Unknown MAC Type\n");
894 return -EIO;
895 }
896
897 switch (hw->mac_type) {
898 default:
899 break;
900 case e1000_82541:
901 case e1000_82547:
902 case e1000_82541_rev_2:
903 case e1000_82547_rev_2:
904 hw->phy_init_script = 1;
905 break;
906 }
907
908 e1000_set_media_type(hw);
909 e1000_get_bus_info(hw);
910
911 hw->wait_autoneg_complete = false;
912 hw->tbi_compatibility_en = true;
913 hw->adaptive_ifs = true;
914
915 /* Copper options */
916
917 if (hw->media_type == e1000_media_type_copper) {
918 hw->mdix = AUTO_ALL_MODES;
919 hw->disable_polarity_correction = false;
920 hw->master_slave = E1000_MASTER_SLAVE;
921 }
922
923 return 0;
924}
925
1da177e4
LT
926/**
927 * e1000_probe - Device Initialization Routine
928 * @pdev: PCI device information struct
929 * @ent: entry in e1000_pci_tbl
930 *
931 * Returns 0 on success, negative on failure
932 *
933 * e1000_probe initializes an adapter identified by a pci_dev structure.
934 * The OS initialization, configuring of the adapter private structure,
935 * and a hardware reset occur.
936 **/
1dc32918
JP
937static int __devinit e1000_probe(struct pci_dev *pdev,
938 const struct pci_device_id *ent)
1da177e4
LT
939{
940 struct net_device *netdev;
941 struct e1000_adapter *adapter;
1dc32918 942 struct e1000_hw *hw;
2d7edb92 943
1da177e4 944 static int cards_found = 0;
120cd576 945 static int global_quad_port_a = 0; /* global ksp3 port a indication */
2d7edb92 946 int i, err, pci_using_dac;
406874a7 947 u16 eeprom_data = 0;
5377a416 948 u16 tmp = 0;
406874a7 949 u16 eeprom_apme_mask = E1000_EEPROM_APME;
81250297 950 int bars, need_ioport;
0795af57 951
81250297
TI
952 /* do not allocate ioport bars when not needed */
953 need_ioport = e1000_is_need_ioport(pdev);
954 if (need_ioport) {
955 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
956 err = pci_enable_device(pdev);
957 } else {
958 bars = pci_select_bars(pdev, IORESOURCE_MEM);
4d7155b9 959 err = pci_enable_device_mem(pdev);
81250297 960 }
c7be73bc 961 if (err)
1da177e4
LT
962 return err;
963
81250297 964 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
c7be73bc 965 if (err)
6dd62ab0 966 goto err_pci_reg;
1da177e4
LT
967
968 pci_set_master(pdev);
dbb5aaeb
NN
969 err = pci_save_state(pdev);
970 if (err)
971 goto err_alloc_etherdev;
1da177e4 972
6dd62ab0 973 err = -ENOMEM;
1da177e4 974 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
6dd62ab0 975 if (!netdev)
1da177e4 976 goto err_alloc_etherdev;
1da177e4 977
1da177e4
LT
978 SET_NETDEV_DEV(netdev, &pdev->dev);
979
980 pci_set_drvdata(pdev, netdev);
60490fe0 981 adapter = netdev_priv(netdev);
1da177e4
LT
982 adapter->netdev = netdev;
983 adapter->pdev = pdev;
1da177e4 984 adapter->msg_enable = (1 << debug) - 1;
81250297
TI
985 adapter->bars = bars;
986 adapter->need_ioport = need_ioport;
1da177e4 987
1dc32918
JP
988 hw = &adapter->hw;
989 hw->back = adapter;
990
6dd62ab0 991 err = -EIO;
275f165f 992 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
1dc32918 993 if (!hw->hw_addr)
1da177e4 994 goto err_ioremap;
1da177e4 995
81250297
TI
996 if (adapter->need_ioport) {
997 for (i = BAR_1; i <= BAR_5; i++) {
998 if (pci_resource_len(pdev, i) == 0)
999 continue;
1000 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1001 hw->io_base = pci_resource_start(pdev, i);
1002 break;
1003 }
1da177e4
LT
1004 }
1005 }
1006
e508be17
JB
1007 /* make ready for any if (hw->...) below */
1008 err = e1000_init_hw_struct(adapter, hw);
1009 if (err)
1010 goto err_sw_init;
1011
1012 /*
1013 * there is a workaround being applied below that limits
1014 * 64-bit DMA addresses to 64-bit hardware. There are some
1015 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1016 */
1017 pci_using_dac = 0;
1018 if ((hw->bus_type == e1000_bus_type_pcix) &&
1019 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1020 /*
1021 * according to DMA-API-HOWTO, coherent calls will always
1022 * succeed if the set call did
1023 */
1024 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1025 pci_using_dac = 1;
e508be17 1026 } else {
19a0b67a
DN
1027 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1028 if (err) {
1029 pr_err("No usable DMA config, aborting\n");
1030 goto err_dma;
1031 }
1032 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
e508be17
JB
1033 }
1034
0e7614bc 1035 netdev->netdev_ops = &e1000_netdev_ops;
1da177e4 1036 e1000_set_ethtool_ops(netdev);
1da177e4 1037 netdev->watchdog_timeo = 5 * HZ;
bea3348e 1038 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
0e7614bc 1039
0eb5a34c 1040 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4 1041
1da177e4
LT
1042 adapter->bd_number = cards_found;
1043
1044 /* setup the private structure */
1045
c7be73bc
JP
1046 err = e1000_sw_init(adapter);
1047 if (err)
1da177e4
LT
1048 goto err_sw_init;
1049
6dd62ab0 1050 err = -EIO;
5377a416 1051 if (hw->mac_type == e1000_ce4100) {
13acde8f
FF
1052 hw->ce4100_gbe_mdio_base_virt =
1053 ioremap(pci_resource_start(pdev, BAR_1),
5377a416
DB
1054 pci_resource_len(pdev, BAR_1));
1055
13acde8f 1056 if (!hw->ce4100_gbe_mdio_base_virt)
5377a416
DB
1057 goto err_mdio_ioremap;
1058 }
2d7edb92 1059
1dc32918 1060 if (hw->mac_type >= e1000_82543) {
e97d3207 1061 netdev->hw_features = NETIF_F_SG |
5622e404
JP
1062 NETIF_F_HW_CSUM |
1063 NETIF_F_HW_VLAN_RX;
e97d3207 1064 netdev->features = NETIF_F_HW_VLAN_TX |
1da177e4
LT
1065 NETIF_F_HW_VLAN_FILTER;
1066 }
1067
1dc32918
JP
1068 if ((hw->mac_type >= e1000_82544) &&
1069 (hw->mac_type != e1000_82547))
e97d3207
MM
1070 netdev->hw_features |= NETIF_F_TSO;
1071
11a78dcf
BG
1072 netdev->priv_flags |= IFF_SUPP_NOFCS;
1073
e97d3207
MM
1074 netdev->features |= netdev->hw_features;
1075 netdev->hw_features |= NETIF_F_RXCSUM;
b0d1562c 1076 netdev->hw_features |= NETIF_F_RXFCS;
2d7edb92 1077
7b872a55 1078 if (pci_using_dac) {
1da177e4 1079 netdev->features |= NETIF_F_HIGHDMA;
7b872a55
YZ
1080 netdev->vlan_features |= NETIF_F_HIGHDMA;
1081 }
1da177e4 1082
20501a69 1083 netdev->vlan_features |= NETIF_F_TSO;
20501a69
PM
1084 netdev->vlan_features |= NETIF_F_HW_CSUM;
1085 netdev->vlan_features |= NETIF_F_SG;
1086
01789349
JP
1087 netdev->priv_flags |= IFF_UNICAST_FLT;
1088
1dc32918 1089 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
2d7edb92 1090
cd94dd0b 1091 /* initialize eeprom parameters */
1dc32918 1092 if (e1000_init_eeprom_params(hw)) {
feb8f478 1093 e_err(probe, "EEPROM initialization failed\n");
6dd62ab0 1094 goto err_eeprom;
cd94dd0b
AK
1095 }
1096
96838a40 1097 /* before reading the EEPROM, reset the controller to
1da177e4 1098 * put the device in a known good starting state */
96838a40 1099
1dc32918 1100 e1000_reset_hw(hw);
1da177e4
LT
1101
1102 /* make sure the EEPROM is good */
1dc32918 1103 if (e1000_validate_eeprom_checksum(hw) < 0) {
feb8f478 1104 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
67b3c27c
AK
1105 e1000_dump_eeprom(adapter);
1106 /*
1107 * set MAC address to all zeroes to invalidate and temporary
1108 * disable this device for the user. This blocks regular
1109 * traffic while still permitting ethtool ioctls from reaching
1110 * the hardware as well as allowing the user to run the
1111 * interface after manually setting a hw addr using
1112 * `ip set address`
1113 */
1dc32918 1114 memset(hw->mac_addr, 0, netdev->addr_len);
67b3c27c
AK
1115 } else {
1116 /* copy the MAC address out of the EEPROM */
1dc32918 1117 if (e1000_read_mac_addr(hw))
feb8f478 1118 e_err(probe, "EEPROM Read Error\n");
1da177e4 1119 }
67b3c27c 1120 /* don't block initalization here due to bad MAC address */
1dc32918
JP
1121 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1122 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
1da177e4 1123
67b3c27c 1124 if (!is_valid_ether_addr(netdev->perm_addr))
feb8f478 1125 e_err(probe, "Invalid MAC Address\n");
1da177e4 1126
1da177e4 1127
a4010afe
JB
1128 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1129 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1130 e1000_82547_tx_fifo_stall_task);
1131 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
65f27f38 1132 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1da177e4 1133
1da177e4
LT
1134 e1000_check_options(adapter);
1135
1136 /* Initial Wake on LAN setting
1137 * If APM wake is enabled in the EEPROM,
1138 * enable the ACPI Magic Packet filter
1139 */
1140
1dc32918 1141 switch (hw->mac_type) {
1da177e4
LT
1142 case e1000_82542_rev2_0:
1143 case e1000_82542_rev2_1:
1144 case e1000_82543:
1145 break;
1146 case e1000_82544:
1dc32918 1147 e1000_read_eeprom(hw,
1da177e4
LT
1148 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1149 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1150 break;
1151 case e1000_82546:
1152 case e1000_82546_rev_3:
1dc32918
JP
1153 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1154 e1000_read_eeprom(hw,
1da177e4
LT
1155 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1156 break;
1157 }
1158 /* Fall Through */
1159 default:
1dc32918 1160 e1000_read_eeprom(hw,
1da177e4
LT
1161 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1162 break;
1163 }
96838a40 1164 if (eeprom_data & eeprom_apme_mask)
120cd576
JB
1165 adapter->eeprom_wol |= E1000_WUFC_MAG;
1166
1167 /* now that we have the eeprom settings, apply the special cases
1168 * where the eeprom may be wrong or the board simply won't support
1169 * wake on lan on a particular port */
1170 switch (pdev->device) {
1171 case E1000_DEV_ID_82546GB_PCIE:
1172 adapter->eeprom_wol = 0;
1173 break;
1174 case E1000_DEV_ID_82546EB_FIBER:
1175 case E1000_DEV_ID_82546GB_FIBER:
120cd576
JB
1176 /* Wake events only supported on port A for dual fiber
1177 * regardless of eeprom setting */
1dc32918 1178 if (er32(STATUS) & E1000_STATUS_FUNC_1)
120cd576
JB
1179 adapter->eeprom_wol = 0;
1180 break;
1181 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1182 /* if quad port adapter, disable WoL on all but port A */
1183 if (global_quad_port_a != 0)
1184 adapter->eeprom_wol = 0;
1185 else
3db1cd5c 1186 adapter->quad_port_a = true;
120cd576
JB
1187 /* Reset for multiple quad port adapters */
1188 if (++global_quad_port_a == 4)
1189 global_quad_port_a = 0;
1190 break;
1191 }
1192
1193 /* initialize the wol settings based on the eeprom settings */
1194 adapter->wol = adapter->eeprom_wol;
de126489 1195 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1da177e4 1196
5377a416
DB
1197 /* Auto detect PHY address */
1198 if (hw->mac_type == e1000_ce4100) {
1199 for (i = 0; i < 32; i++) {
1200 hw->phy_addr = i;
1201 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1202 if (tmp == 0 || tmp == 0xFF) {
1203 if (i == 31)
1204 goto err_eeprom;
1205 continue;
1206 } else
1207 break;
1208 }
1209 }
1210
675ad473
ET
1211 /* reset the hardware with the new settings */
1212 e1000_reset(adapter);
1213
1214 strcpy(netdev->name, "eth%d");
1215 err = register_netdev(netdev);
1216 if (err)
1217 goto err_register;
1218
52f5509f 1219 e1000_vlan_filter_on_off(adapter, false);
5622e404 1220
fb3d47d4 1221 /* print bus type/speed/width info */
feb8f478 1222 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
7837e58c
JP
1223 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1224 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1225 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1226 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1227 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1228 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1229 netdev->dev_addr);
1314bbf3 1230
eb62efd2
JB
1231 /* carrier off reporting is important to ethtool even BEFORE open */
1232 netif_carrier_off(netdev);
1233
feb8f478 1234 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1da177e4
LT
1235
1236 cards_found++;
1237 return 0;
1238
1239err_register:
6dd62ab0 1240err_eeprom:
1532ecea 1241 e1000_phy_hw_reset(hw);
6dd62ab0 1242
1dc32918
JP
1243 if (hw->flash_address)
1244 iounmap(hw->flash_address);
6dd62ab0
VA
1245 kfree(adapter->tx_ring);
1246 kfree(adapter->rx_ring);
e508be17 1247err_dma:
1da177e4 1248err_sw_init:
5377a416 1249err_mdio_ioremap:
13acde8f 1250 iounmap(hw->ce4100_gbe_mdio_base_virt);
1dc32918 1251 iounmap(hw->hw_addr);
1da177e4
LT
1252err_ioremap:
1253 free_netdev(netdev);
1254err_alloc_etherdev:
81250297 1255 pci_release_selected_regions(pdev, bars);
6dd62ab0 1256err_pci_reg:
6dd62ab0 1257 pci_disable_device(pdev);
1da177e4
LT
1258 return err;
1259}
1260
1261/**
1262 * e1000_remove - Device Removal Routine
1263 * @pdev: PCI device information struct
1264 *
1265 * e1000_remove is called by the PCI subsystem to alert the driver
1266 * that it should release a PCI device. The could be caused by a
1267 * Hot-Plug event, or because the driver is going to be removed from
1268 * memory.
1269 **/
1270
64798845 1271static void __devexit e1000_remove(struct pci_dev *pdev)
1da177e4
LT
1272{
1273 struct net_device *netdev = pci_get_drvdata(pdev);
60490fe0 1274 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 1275 struct e1000_hw *hw = &adapter->hw;
1da177e4 1276
a4010afe 1277 e1000_down_and_stop(adapter);
0fccd0e9 1278 e1000_release_manageability(adapter);
1da177e4 1279
bea3348e
SH
1280 unregister_netdev(netdev);
1281
1532ecea 1282 e1000_phy_hw_reset(hw);
1da177e4 1283
24025e4e
MC
1284 kfree(adapter->tx_ring);
1285 kfree(adapter->rx_ring);
24025e4e 1286
1c26750c 1287 if (hw->mac_type == e1000_ce4100)
13acde8f 1288 iounmap(hw->ce4100_gbe_mdio_base_virt);
1dc32918
JP
1289 iounmap(hw->hw_addr);
1290 if (hw->flash_address)
1291 iounmap(hw->flash_address);
81250297 1292 pci_release_selected_regions(pdev, adapter->bars);
1da177e4
LT
1293
1294 free_netdev(netdev);
1295
1296 pci_disable_device(pdev);
1297}
1298
1299/**
1300 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1301 * @adapter: board private structure to initialize
1302 *
1303 * e1000_sw_init initializes the Adapter private data structure.
e508be17 1304 * e1000_init_hw_struct MUST be called before this function
1da177e4
LT
1305 **/
1306
64798845 1307static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1da177e4 1308{
eb0f8054 1309 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1da177e4 1310
f56799ea
JK
1311 adapter->num_tx_queues = 1;
1312 adapter->num_rx_queues = 1;
581d708e
MC
1313
1314 if (e1000_alloc_queues(adapter)) {
feb8f478 1315 e_err(probe, "Unable to allocate memory for queues\n");
581d708e
MC
1316 return -ENOMEM;
1317 }
1318
47313054 1319 /* Explicitly disable IRQ since the NIC can be in any state. */
47313054
HX
1320 e1000_irq_disable(adapter);
1321
1da177e4 1322 spin_lock_init(&adapter->stats_lock);
0ef4eedc 1323 mutex_init(&adapter->mutex);
1da177e4 1324
1314bbf3
AK
1325 set_bit(__E1000_DOWN, &adapter->flags);
1326
1da177e4
LT
1327 return 0;
1328}
1329
581d708e
MC
1330/**
1331 * e1000_alloc_queues - Allocate memory for all rings
1332 * @adapter: board private structure to initialize
1333 *
1334 * We allocate one ring per queue at run-time since we don't know the
3e1d7cd2 1335 * number of queues at compile-time.
581d708e
MC
1336 **/
1337
64798845 1338static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
581d708e 1339{
1c7e5b12
YB
1340 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1341 sizeof(struct e1000_tx_ring), GFP_KERNEL);
581d708e
MC
1342 if (!adapter->tx_ring)
1343 return -ENOMEM;
581d708e 1344
1c7e5b12
YB
1345 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1346 sizeof(struct e1000_rx_ring), GFP_KERNEL);
581d708e
MC
1347 if (!adapter->rx_ring) {
1348 kfree(adapter->tx_ring);
1349 return -ENOMEM;
1350 }
581d708e 1351
581d708e
MC
1352 return E1000_SUCCESS;
1353}
1354
1da177e4
LT
1355/**
1356 * e1000_open - Called when a network interface is made active
1357 * @netdev: network interface device structure
1358 *
1359 * Returns 0 on success, negative value on failure
1360 *
1361 * The open entry point is called when a network interface is made
1362 * active by the system (IFF_UP). At this point all resources needed
1363 * for transmit and receive operations are allocated, the interrupt
a4010afe 1364 * handler is registered with the OS, the watchdog task is started,
1da177e4
LT
1365 * and the stack is notified that the interface is ready.
1366 **/
1367
64798845 1368static int e1000_open(struct net_device *netdev)
1da177e4 1369{
60490fe0 1370 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 1371 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
1372 int err;
1373
2db10a08 1374 /* disallow open during test */
1314bbf3 1375 if (test_bit(__E1000_TESTING, &adapter->flags))
2db10a08
AK
1376 return -EBUSY;
1377
eb62efd2
JB
1378 netif_carrier_off(netdev);
1379
1da177e4 1380 /* allocate transmit descriptors */
e0aac5a2
AK
1381 err = e1000_setup_all_tx_resources(adapter);
1382 if (err)
1da177e4
LT
1383 goto err_setup_tx;
1384
1385 /* allocate receive descriptors */
e0aac5a2 1386 err = e1000_setup_all_rx_resources(adapter);
b5bf28cd 1387 if (err)
e0aac5a2 1388 goto err_setup_rx;
b5bf28cd 1389
79f05bf0
AK
1390 e1000_power_up_phy(adapter);
1391
2d7edb92 1392 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1dc32918 1393 if ((hw->mng_cookie.status &
2d7edb92
MC
1394 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1395 e1000_update_mng_vlan(adapter);
1396 }
1da177e4 1397
e0aac5a2
AK
1398 /* before we allocate an interrupt, we must be ready to handle it.
1399 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1400 * as soon as we call pci_request_irq, so we have to setup our
1401 * clean_rx handler before we do so. */
1402 e1000_configure(adapter);
1403
1404 err = e1000_request_irq(adapter);
1405 if (err)
1406 goto err_req_irq;
1407
1408 /* From here on the code is the same as e1000_up() */
1409 clear_bit(__E1000_DOWN, &adapter->flags);
1410
bea3348e 1411 napi_enable(&adapter->napi);
47313054 1412
e0aac5a2
AK
1413 e1000_irq_enable(adapter);
1414
076152d5
BH
1415 netif_start_queue(netdev);
1416
e0aac5a2 1417 /* fire a link status change interrupt to start the watchdog */
1dc32918 1418 ew32(ICS, E1000_ICS_LSC);
e0aac5a2 1419
1da177e4
LT
1420 return E1000_SUCCESS;
1421
b5bf28cd 1422err_req_irq:
e0aac5a2 1423 e1000_power_down_phy(adapter);
581d708e 1424 e1000_free_all_rx_resources(adapter);
1da177e4 1425err_setup_rx:
581d708e 1426 e1000_free_all_tx_resources(adapter);
1da177e4
LT
1427err_setup_tx:
1428 e1000_reset(adapter);
1429
1430 return err;
1431}
1432
1433/**
1434 * e1000_close - Disables a network interface
1435 * @netdev: network interface device structure
1436 *
1437 * Returns 0, this is not allowed to fail
1438 *
1439 * The close entry point is called when an interface is de-activated
1440 * by the OS. The hardware is still under the drivers control, but
1441 * needs to be disabled. A global MAC reset is issued to stop the
1442 * hardware, and all transmit and receive resources are freed.
1443 **/
1444
64798845 1445static int e1000_close(struct net_device *netdev)
1da177e4 1446{
60490fe0 1447 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 1448 struct e1000_hw *hw = &adapter->hw;
1da177e4 1449
2db10a08 1450 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1da177e4 1451 e1000_down(adapter);
79f05bf0 1452 e1000_power_down_phy(adapter);
2db10a08 1453 e1000_free_irq(adapter);
1da177e4 1454
581d708e
MC
1455 e1000_free_all_tx_resources(adapter);
1456 e1000_free_all_rx_resources(adapter);
1da177e4 1457
4666560a
BA
1458 /* kill manageability vlan ID if supported, but not if a vlan with
1459 * the same ID is registered on the host OS (let 8021q kill it) */
1dc32918 1460 if ((hw->mng_cookie.status &
4666560a 1461 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
5622e404 1462 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
2d7edb92
MC
1463 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1464 }
b55ccb35 1465
1da177e4
LT
1466 return 0;
1467}
1468
1469/**
1470 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1471 * @adapter: address of board private structure
2d7edb92
MC
1472 * @start: address of beginning of memory
1473 * @len: length of memory
1da177e4 1474 **/
64798845
JP
1475static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1476 unsigned long len)
1da177e4 1477{
1dc32918 1478 struct e1000_hw *hw = &adapter->hw;
e982f17c 1479 unsigned long begin = (unsigned long)start;
1da177e4
LT
1480 unsigned long end = begin + len;
1481
2648345f
MC
1482 /* First rev 82545 and 82546 need to not allow any memory
1483 * write location to cross 64k boundary due to errata 23 */
1dc32918 1484 if (hw->mac_type == e1000_82545 ||
5377a416 1485 hw->mac_type == e1000_ce4100 ||
1dc32918 1486 hw->mac_type == e1000_82546) {
c3033b01 1487 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1da177e4
LT
1488 }
1489
c3033b01 1490 return true;
1da177e4
LT
1491}
1492
1493/**
1494 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1495 * @adapter: board private structure
581d708e 1496 * @txdr: tx descriptor ring (for a specific queue) to setup
1da177e4
LT
1497 *
1498 * Return 0 on success, negative on failure
1499 **/
1500
64798845
JP
1501static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1502 struct e1000_tx_ring *txdr)
1da177e4 1503{
1da177e4
LT
1504 struct pci_dev *pdev = adapter->pdev;
1505 int size;
1506
1507 size = sizeof(struct e1000_buffer) * txdr->count;
89bf67f1 1508 txdr->buffer_info = vzalloc(size);
96838a40 1509 if (!txdr->buffer_info) {
feb8f478
ET
1510 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1511 "ring\n");
1da177e4
LT
1512 return -ENOMEM;
1513 }
1da177e4
LT
1514
1515 /* round up to nearest 4K */
1516
1517 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
9099cfb9 1518 txdr->size = ALIGN(txdr->size, 4096);
1da177e4 1519
b16f53be
NN
1520 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1521 GFP_KERNEL);
96838a40 1522 if (!txdr->desc) {
1da177e4 1523setup_tx_desc_die:
1da177e4 1524 vfree(txdr->buffer_info);
feb8f478
ET
1525 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1526 "ring\n");
1da177e4
LT
1527 return -ENOMEM;
1528 }
1529
2648345f 1530 /* Fix for errata 23, can't cross 64kB boundary */
1da177e4
LT
1531 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1532 void *olddesc = txdr->desc;
1533 dma_addr_t olddma = txdr->dma;
feb8f478 1534 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
675ad473 1535 txdr->size, txdr->desc);
2648345f 1536 /* Try again, without freeing the previous */
b16f53be
NN
1537 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1538 &txdr->dma, GFP_KERNEL);
2648345f 1539 /* Failed allocation, critical failure */
96838a40 1540 if (!txdr->desc) {
b16f53be
NN
1541 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1542 olddma);
1da177e4
LT
1543 goto setup_tx_desc_die;
1544 }
1545
1546 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1547 /* give up */
b16f53be
NN
1548 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1549 txdr->dma);
1550 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1551 olddma);
feb8f478 1552 e_err(probe, "Unable to allocate aligned memory "
675ad473 1553 "for the transmit descriptor ring\n");
1da177e4
LT
1554 vfree(txdr->buffer_info);
1555 return -ENOMEM;
1556 } else {
2648345f 1557 /* Free old allocation, new allocation was successful */
b16f53be
NN
1558 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1559 olddma);
1da177e4
LT
1560 }
1561 }
1562 memset(txdr->desc, 0, txdr->size);
1563
1564 txdr->next_to_use = 0;
1565 txdr->next_to_clean = 0;
1566
1567 return 0;
1568}
1569
581d708e
MC
1570/**
1571 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1572 * (Descriptors) for all queues
1573 * @adapter: board private structure
1574 *
581d708e
MC
1575 * Return 0 on success, negative on failure
1576 **/
1577
64798845 1578int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
581d708e
MC
1579{
1580 int i, err = 0;
1581
f56799ea 1582 for (i = 0; i < adapter->num_tx_queues; i++) {
581d708e
MC
1583 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1584 if (err) {
feb8f478 1585 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
3fbbc72e
VA
1586 for (i-- ; i >= 0; i--)
1587 e1000_free_tx_resources(adapter,
1588 &adapter->tx_ring[i]);
581d708e
MC
1589 break;
1590 }
1591 }
1592
1593 return err;
1594}
1595
1da177e4
LT
1596/**
1597 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1598 * @adapter: board private structure
1599 *
1600 * Configure the Tx unit of the MAC after a reset.
1601 **/
1602
64798845 1603static void e1000_configure_tx(struct e1000_adapter *adapter)
1da177e4 1604{
406874a7 1605 u64 tdba;
581d708e 1606 struct e1000_hw *hw = &adapter->hw;
1532ecea 1607 u32 tdlen, tctl, tipg;
406874a7 1608 u32 ipgr1, ipgr2;
1da177e4
LT
1609
1610 /* Setup the HW Tx Head and Tail descriptor pointers */
1611
f56799ea 1612 switch (adapter->num_tx_queues) {
24025e4e
MC
1613 case 1:
1614 default:
581d708e
MC
1615 tdba = adapter->tx_ring[0].dma;
1616 tdlen = adapter->tx_ring[0].count *
1617 sizeof(struct e1000_tx_desc);
1dc32918
JP
1618 ew32(TDLEN, tdlen);
1619 ew32(TDBAH, (tdba >> 32));
1620 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1621 ew32(TDT, 0);
1622 ew32(TDH, 0);
6a951698
AK
1623 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1624 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
24025e4e
MC
1625 break;
1626 }
1da177e4
LT
1627
1628 /* Set the default values for the Tx Inter Packet Gap timer */
1532ecea 1629 if ((hw->media_type == e1000_media_type_fiber ||
d89b6c67 1630 hw->media_type == e1000_media_type_internal_serdes))
0fadb059
JK
1631 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1632 else
1633 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1634
581d708e 1635 switch (hw->mac_type) {
1da177e4
LT
1636 case e1000_82542_rev2_0:
1637 case e1000_82542_rev2_1:
1638 tipg = DEFAULT_82542_TIPG_IPGT;
0fadb059
JK
1639 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1640 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1da177e4
LT
1641 break;
1642 default:
0fadb059
JK
1643 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1644 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1645 break;
1da177e4 1646 }
0fadb059
JK
1647 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1648 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1dc32918 1649 ew32(TIPG, tipg);
1da177e4
LT
1650
1651 /* Set the Tx Interrupt Delay register */
1652
1dc32918 1653 ew32(TIDV, adapter->tx_int_delay);
581d708e 1654 if (hw->mac_type >= e1000_82540)
1dc32918 1655 ew32(TADV, adapter->tx_abs_int_delay);
1da177e4
LT
1656
1657 /* Program the Transmit Control Register */
1658
1dc32918 1659 tctl = er32(TCTL);
1da177e4 1660 tctl &= ~E1000_TCTL_CT;
7e6c9861 1661 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1da177e4
LT
1662 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1663
581d708e 1664 e1000_config_collision_dist(hw);
1da177e4
LT
1665
1666 /* Setup Transmit Descriptor Settings for eop descriptor */
6a042dab
JB
1667 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1668
1669 /* only set IDE if we are delaying interrupts using the timers */
1670 if (adapter->tx_int_delay)
1671 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1da177e4 1672
581d708e 1673 if (hw->mac_type < e1000_82543)
1da177e4
LT
1674 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1675 else
1676 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1677
1678 /* Cache if we're 82544 running in PCI-X because we'll
1679 * need this to apply a workaround later in the send path. */
581d708e
MC
1680 if (hw->mac_type == e1000_82544 &&
1681 hw->bus_type == e1000_bus_type_pcix)
3db1cd5c 1682 adapter->pcix_82544 = true;
7e6c9861 1683
1dc32918 1684 ew32(TCTL, tctl);
7e6c9861 1685
1da177e4
LT
1686}
1687
1688/**
1689 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1690 * @adapter: board private structure
581d708e 1691 * @rxdr: rx descriptor ring (for a specific queue) to setup
1da177e4
LT
1692 *
1693 * Returns 0 on success, negative on failure
1694 **/
1695
64798845
JP
1696static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1697 struct e1000_rx_ring *rxdr)
1da177e4 1698{
1da177e4 1699 struct pci_dev *pdev = adapter->pdev;
2d7edb92 1700 int size, desc_len;
1da177e4
LT
1701
1702 size = sizeof(struct e1000_buffer) * rxdr->count;
89bf67f1 1703 rxdr->buffer_info = vzalloc(size);
581d708e 1704 if (!rxdr->buffer_info) {
feb8f478
ET
1705 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1706 "ring\n");
1da177e4
LT
1707 return -ENOMEM;
1708 }
1da177e4 1709
1532ecea 1710 desc_len = sizeof(struct e1000_rx_desc);
2d7edb92 1711
1da177e4
LT
1712 /* Round up to nearest 4K */
1713
2d7edb92 1714 rxdr->size = rxdr->count * desc_len;
9099cfb9 1715 rxdr->size = ALIGN(rxdr->size, 4096);
1da177e4 1716
b16f53be
NN
1717 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1718 GFP_KERNEL);
1da177e4 1719
581d708e 1720 if (!rxdr->desc) {
feb8f478
ET
1721 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1722 "ring\n");
1da177e4 1723setup_rx_desc_die:
1da177e4
LT
1724 vfree(rxdr->buffer_info);
1725 return -ENOMEM;
1726 }
1727
2648345f 1728 /* Fix for errata 23, can't cross 64kB boundary */
1da177e4
LT
1729 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1730 void *olddesc = rxdr->desc;
1731 dma_addr_t olddma = rxdr->dma;
feb8f478 1732 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
675ad473 1733 rxdr->size, rxdr->desc);
2648345f 1734 /* Try again, without freeing the previous */
b16f53be
NN
1735 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1736 &rxdr->dma, GFP_KERNEL);
2648345f 1737 /* Failed allocation, critical failure */
581d708e 1738 if (!rxdr->desc) {
b16f53be
NN
1739 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1740 olddma);
feb8f478
ET
1741 e_err(probe, "Unable to allocate memory for the Rx "
1742 "descriptor ring\n");
1da177e4
LT
1743 goto setup_rx_desc_die;
1744 }
1745
1746 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1747 /* give up */
b16f53be
NN
1748 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1749 rxdr->dma);
1750 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1751 olddma);
feb8f478
ET
1752 e_err(probe, "Unable to allocate aligned memory for "
1753 "the Rx descriptor ring\n");
581d708e 1754 goto setup_rx_desc_die;
1da177e4 1755 } else {
2648345f 1756 /* Free old allocation, new allocation was successful */
b16f53be
NN
1757 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1758 olddma);
1da177e4
LT
1759 }
1760 }
1761 memset(rxdr->desc, 0, rxdr->size);
1762
1763 rxdr->next_to_clean = 0;
1764 rxdr->next_to_use = 0;
edbbb3ca 1765 rxdr->rx_skb_top = NULL;
1da177e4
LT
1766
1767 return 0;
1768}
1769
581d708e
MC
1770/**
1771 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1772 * (Descriptors) for all queues
1773 * @adapter: board private structure
1774 *
581d708e
MC
1775 * Return 0 on success, negative on failure
1776 **/
1777
64798845 1778int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
581d708e
MC
1779{
1780 int i, err = 0;
1781
f56799ea 1782 for (i = 0; i < adapter->num_rx_queues; i++) {
581d708e
MC
1783 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1784 if (err) {
feb8f478 1785 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
3fbbc72e
VA
1786 for (i-- ; i >= 0; i--)
1787 e1000_free_rx_resources(adapter,
1788 &adapter->rx_ring[i]);
581d708e
MC
1789 break;
1790 }
1791 }
1792
1793 return err;
1794}
1795
1da177e4 1796/**
2648345f 1797 * e1000_setup_rctl - configure the receive control registers
1da177e4
LT
1798 * @adapter: Board private structure
1799 **/
64798845 1800static void e1000_setup_rctl(struct e1000_adapter *adapter)
1da177e4 1801{
1dc32918 1802 struct e1000_hw *hw = &adapter->hw;
630b25cd 1803 u32 rctl;
1da177e4 1804
1dc32918 1805 rctl = er32(RCTL);
1da177e4
LT
1806
1807 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1808
d5bc77a2
DN
1809 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1810 E1000_RCTL_RDMTS_HALF |
1dc32918 1811 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1da177e4 1812
1dc32918 1813 if (hw->tbi_compatibility_on == 1)
1da177e4
LT
1814 rctl |= E1000_RCTL_SBP;
1815 else
1816 rctl &= ~E1000_RCTL_SBP;
1817
2d7edb92
MC
1818 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1819 rctl &= ~E1000_RCTL_LPE;
1820 else
1821 rctl |= E1000_RCTL_LPE;
1822
1da177e4 1823 /* Setup buffer sizes */
9e2feace
AK
1824 rctl &= ~E1000_RCTL_SZ_4096;
1825 rctl |= E1000_RCTL_BSEX;
1826 switch (adapter->rx_buffer_len) {
a1415ee6
JK
1827 case E1000_RXBUFFER_2048:
1828 default:
1829 rctl |= E1000_RCTL_SZ_2048;
1830 rctl &= ~E1000_RCTL_BSEX;
1831 break;
1832 case E1000_RXBUFFER_4096:
1833 rctl |= E1000_RCTL_SZ_4096;
1834 break;
1835 case E1000_RXBUFFER_8192:
1836 rctl |= E1000_RCTL_SZ_8192;
1837 break;
1838 case E1000_RXBUFFER_16384:
1839 rctl |= E1000_RCTL_SZ_16384;
1840 break;
2d7edb92
MC
1841 }
1842
1dc32918 1843 ew32(RCTL, rctl);
1da177e4
LT
1844}
1845
1846/**
1847 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1848 * @adapter: board private structure
1849 *
1850 * Configure the Rx unit of the MAC after a reset.
1851 **/
1852
64798845 1853static void e1000_configure_rx(struct e1000_adapter *adapter)
1da177e4 1854{
406874a7 1855 u64 rdba;
581d708e 1856 struct e1000_hw *hw = &adapter->hw;
1532ecea 1857 u32 rdlen, rctl, rxcsum;
2d7edb92 1858
edbbb3ca
JB
1859 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1860 rdlen = adapter->rx_ring[0].count *
1861 sizeof(struct e1000_rx_desc);
1862 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1863 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1864 } else {
1865 rdlen = adapter->rx_ring[0].count *
1866 sizeof(struct e1000_rx_desc);
1867 adapter->clean_rx = e1000_clean_rx_irq;
1868 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1869 }
1da177e4
LT
1870
1871 /* disable receives while setting up the descriptors */
1dc32918
JP
1872 rctl = er32(RCTL);
1873 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1da177e4
LT
1874
1875 /* set the Receive Delay Timer Register */
1dc32918 1876 ew32(RDTR, adapter->rx_int_delay);
1da177e4 1877
581d708e 1878 if (hw->mac_type >= e1000_82540) {
1dc32918 1879 ew32(RADV, adapter->rx_abs_int_delay);
835bb129 1880 if (adapter->itr_setting != 0)
1dc32918 1881 ew32(ITR, 1000000000 / (adapter->itr * 256));
1da177e4
LT
1882 }
1883
581d708e
MC
1884 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1885 * the Base and Length of the Rx Descriptor Ring */
f56799ea 1886 switch (adapter->num_rx_queues) {
24025e4e
MC
1887 case 1:
1888 default:
581d708e 1889 rdba = adapter->rx_ring[0].dma;
1dc32918
JP
1890 ew32(RDLEN, rdlen);
1891 ew32(RDBAH, (rdba >> 32));
1892 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1893 ew32(RDT, 0);
1894 ew32(RDH, 0);
6a951698
AK
1895 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1896 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
581d708e 1897 break;
24025e4e
MC
1898 }
1899
1da177e4 1900 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
581d708e 1901 if (hw->mac_type >= e1000_82543) {
1dc32918 1902 rxcsum = er32(RXCSUM);
630b25cd 1903 if (adapter->rx_csum)
2d7edb92 1904 rxcsum |= E1000_RXCSUM_TUOFL;
630b25cd 1905 else
2d7edb92 1906 /* don't need to clear IPPCSE as it defaults to 0 */
630b25cd 1907 rxcsum &= ~E1000_RXCSUM_TUOFL;
1dc32918 1908 ew32(RXCSUM, rxcsum);
1da177e4
LT
1909 }
1910
1911 /* Enable Receives */
d5bc77a2 1912 ew32(RCTL, rctl | E1000_RCTL_EN);
1da177e4
LT
1913}
1914
1915/**
581d708e 1916 * e1000_free_tx_resources - Free Tx Resources per Queue
1da177e4 1917 * @adapter: board private structure
581d708e 1918 * @tx_ring: Tx descriptor ring for a specific queue
1da177e4
LT
1919 *
1920 * Free all transmit software resources
1921 **/
1922
64798845
JP
1923static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1924 struct e1000_tx_ring *tx_ring)
1da177e4
LT
1925{
1926 struct pci_dev *pdev = adapter->pdev;
1927
581d708e 1928 e1000_clean_tx_ring(adapter, tx_ring);
1da177e4 1929
581d708e
MC
1930 vfree(tx_ring->buffer_info);
1931 tx_ring->buffer_info = NULL;
1da177e4 1932
b16f53be
NN
1933 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1934 tx_ring->dma);
1da177e4 1935
581d708e
MC
1936 tx_ring->desc = NULL;
1937}
1938
1939/**
1940 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1941 * @adapter: board private structure
1942 *
1943 * Free all transmit software resources
1944 **/
1945
64798845 1946void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
581d708e
MC
1947{
1948 int i;
1949
f56799ea 1950 for (i = 0; i < adapter->num_tx_queues; i++)
581d708e 1951 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1da177e4
LT
1952}
1953
64798845
JP
1954static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1955 struct e1000_buffer *buffer_info)
1da177e4 1956{
602c0554
AD
1957 if (buffer_info->dma) {
1958 if (buffer_info->mapped_as_page)
b16f53be
NN
1959 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1960 buffer_info->length, DMA_TO_DEVICE);
602c0554 1961 else
b16f53be 1962 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
602c0554 1963 buffer_info->length,
b16f53be 1964 DMA_TO_DEVICE);
602c0554
AD
1965 buffer_info->dma = 0;
1966 }
a9ebadd6 1967 if (buffer_info->skb) {
1da177e4 1968 dev_kfree_skb_any(buffer_info->skb);
a9ebadd6
JB
1969 buffer_info->skb = NULL;
1970 }
37e73df8 1971 buffer_info->time_stamp = 0;
a9ebadd6 1972 /* buffer_info must be completely set up in the transmit path */
1da177e4
LT
1973}
1974
1975/**
1976 * e1000_clean_tx_ring - Free Tx Buffers
1977 * @adapter: board private structure
581d708e 1978 * @tx_ring: ring to be cleaned
1da177e4
LT
1979 **/
1980
64798845
JP
1981static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1982 struct e1000_tx_ring *tx_ring)
1da177e4 1983{
1dc32918 1984 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
1985 struct e1000_buffer *buffer_info;
1986 unsigned long size;
1987 unsigned int i;
1988
1989 /* Free all the Tx ring sk_buffs */
1990
96838a40 1991 for (i = 0; i < tx_ring->count; i++) {
1da177e4
LT
1992 buffer_info = &tx_ring->buffer_info[i];
1993 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1994 }
1995
1996 size = sizeof(struct e1000_buffer) * tx_ring->count;
1997 memset(tx_ring->buffer_info, 0, size);
1998
1999 /* Zero out the descriptor ring */
2000
2001 memset(tx_ring->desc, 0, tx_ring->size);
2002
2003 tx_ring->next_to_use = 0;
2004 tx_ring->next_to_clean = 0;
3db1cd5c 2005 tx_ring->last_tx_tso = false;
1da177e4 2006
1dc32918
JP
2007 writel(0, hw->hw_addr + tx_ring->tdh);
2008 writel(0, hw->hw_addr + tx_ring->tdt);
581d708e
MC
2009}
2010
2011/**
2012 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2013 * @adapter: board private structure
2014 **/
2015
64798845 2016static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
581d708e
MC
2017{
2018 int i;
2019
f56799ea 2020 for (i = 0; i < adapter->num_tx_queues; i++)
581d708e 2021 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1da177e4
LT
2022}
2023
2024/**
2025 * e1000_free_rx_resources - Free Rx Resources
2026 * @adapter: board private structure
581d708e 2027 * @rx_ring: ring to clean the resources from
1da177e4
LT
2028 *
2029 * Free all receive software resources
2030 **/
2031
64798845
JP
2032static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2033 struct e1000_rx_ring *rx_ring)
1da177e4 2034{
1da177e4
LT
2035 struct pci_dev *pdev = adapter->pdev;
2036
581d708e 2037 e1000_clean_rx_ring(adapter, rx_ring);
1da177e4
LT
2038
2039 vfree(rx_ring->buffer_info);
2040 rx_ring->buffer_info = NULL;
2041
b16f53be
NN
2042 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2043 rx_ring->dma);
1da177e4
LT
2044
2045 rx_ring->desc = NULL;
2046}
2047
2048/**
581d708e 2049 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
1da177e4 2050 * @adapter: board private structure
581d708e
MC
2051 *
2052 * Free all receive software resources
2053 **/
2054
64798845 2055void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
581d708e
MC
2056{
2057 int i;
2058
f56799ea 2059 for (i = 0; i < adapter->num_rx_queues; i++)
581d708e
MC
2060 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2061}
2062
2063/**
2064 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2065 * @adapter: board private structure
2066 * @rx_ring: ring to free buffers from
1da177e4
LT
2067 **/
2068
64798845
JP
2069static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2070 struct e1000_rx_ring *rx_ring)
1da177e4 2071{
1dc32918 2072 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
2073 struct e1000_buffer *buffer_info;
2074 struct pci_dev *pdev = adapter->pdev;
2075 unsigned long size;
630b25cd 2076 unsigned int i;
1da177e4
LT
2077
2078 /* Free all the Rx ring sk_buffs */
96838a40 2079 for (i = 0; i < rx_ring->count; i++) {
1da177e4 2080 buffer_info = &rx_ring->buffer_info[i];
edbbb3ca
JB
2081 if (buffer_info->dma &&
2082 adapter->clean_rx == e1000_clean_rx_irq) {
b16f53be 2083 dma_unmap_single(&pdev->dev, buffer_info->dma,
edbbb3ca 2084 buffer_info->length,
b16f53be 2085 DMA_FROM_DEVICE);
edbbb3ca
JB
2086 } else if (buffer_info->dma &&
2087 adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
b16f53be
NN
2088 dma_unmap_page(&pdev->dev, buffer_info->dma,
2089 buffer_info->length,
2090 DMA_FROM_DEVICE);
679be3ba 2091 }
1da177e4 2092
679be3ba 2093 buffer_info->dma = 0;
edbbb3ca
JB
2094 if (buffer_info->page) {
2095 put_page(buffer_info->page);
2096 buffer_info->page = NULL;
2097 }
679be3ba 2098 if (buffer_info->skb) {
1da177e4
LT
2099 dev_kfree_skb(buffer_info->skb);
2100 buffer_info->skb = NULL;
997f5cbd 2101 }
1da177e4
LT
2102 }
2103
edbbb3ca
JB
2104 /* there also may be some cached data from a chained receive */
2105 if (rx_ring->rx_skb_top) {
2106 dev_kfree_skb(rx_ring->rx_skb_top);
2107 rx_ring->rx_skb_top = NULL;
2108 }
2109
1da177e4
LT
2110 size = sizeof(struct e1000_buffer) * rx_ring->count;
2111 memset(rx_ring->buffer_info, 0, size);
2112
2113 /* Zero out the descriptor ring */
1da177e4
LT
2114 memset(rx_ring->desc, 0, rx_ring->size);
2115
2116 rx_ring->next_to_clean = 0;
2117 rx_ring->next_to_use = 0;
2118
1dc32918
JP
2119 writel(0, hw->hw_addr + rx_ring->rdh);
2120 writel(0, hw->hw_addr + rx_ring->rdt);
581d708e
MC
2121}
2122
2123/**
2124 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2125 * @adapter: board private structure
2126 **/
2127
64798845 2128static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
581d708e
MC
2129{
2130 int i;
2131
f56799ea 2132 for (i = 0; i < adapter->num_rx_queues; i++)
581d708e 2133 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1da177e4
LT
2134}
2135
2136/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2137 * and memory write and invalidate disabled for certain operations
2138 */
64798845 2139static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
1da177e4 2140{
1dc32918 2141 struct e1000_hw *hw = &adapter->hw;
1da177e4 2142 struct net_device *netdev = adapter->netdev;
406874a7 2143 u32 rctl;
1da177e4 2144
1dc32918 2145 e1000_pci_clear_mwi(hw);
1da177e4 2146
1dc32918 2147 rctl = er32(RCTL);
1da177e4 2148 rctl |= E1000_RCTL_RST;
1dc32918
JP
2149 ew32(RCTL, rctl);
2150 E1000_WRITE_FLUSH();
1da177e4
LT
2151 mdelay(5);
2152
96838a40 2153 if (netif_running(netdev))
581d708e 2154 e1000_clean_all_rx_rings(adapter);
1da177e4
LT
2155}
2156
64798845 2157static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
1da177e4 2158{
1dc32918 2159 struct e1000_hw *hw = &adapter->hw;
1da177e4 2160 struct net_device *netdev = adapter->netdev;
406874a7 2161 u32 rctl;
1da177e4 2162
1dc32918 2163 rctl = er32(RCTL);
1da177e4 2164 rctl &= ~E1000_RCTL_RST;
1dc32918
JP
2165 ew32(RCTL, rctl);
2166 E1000_WRITE_FLUSH();
1da177e4
LT
2167 mdelay(5);
2168
1dc32918
JP
2169 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2170 e1000_pci_set_mwi(hw);
1da177e4 2171
96838a40 2172 if (netif_running(netdev)) {
72d64a43
JK
2173 /* No need to loop, because 82542 supports only 1 queue */
2174 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
7c4d3367 2175 e1000_configure_rx(adapter);
72d64a43 2176 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
1da177e4
LT
2177 }
2178}
2179
2180/**
2181 * e1000_set_mac - Change the Ethernet Address of the NIC
2182 * @netdev: network interface device structure
2183 * @p: pointer to an address structure
2184 *
2185 * Returns 0 on success, negative on failure
2186 **/
2187
64798845 2188static int e1000_set_mac(struct net_device *netdev, void *p)
1da177e4 2189{
60490fe0 2190 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 2191 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
2192 struct sockaddr *addr = p;
2193
96838a40 2194 if (!is_valid_ether_addr(addr->sa_data))
1da177e4
LT
2195 return -EADDRNOTAVAIL;
2196
2197 /* 82542 2.0 needs to be in reset to write receive address registers */
2198
1dc32918 2199 if (hw->mac_type == e1000_82542_rev2_0)
1da177e4
LT
2200 e1000_enter_82542_rst(adapter);
2201
2202 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1dc32918 2203 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
1da177e4 2204
1dc32918 2205 e1000_rar_set(hw, hw->mac_addr, 0);
1da177e4 2206
1dc32918 2207 if (hw->mac_type == e1000_82542_rev2_0)
1da177e4
LT
2208 e1000_leave_82542_rst(adapter);
2209
2210 return 0;
2211}
2212
2213/**
db0ce50d 2214 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
1da177e4
LT
2215 * @netdev: network interface device structure
2216 *
db0ce50d
PM
2217 * The set_rx_mode entry point is called whenever the unicast or multicast
2218 * address lists or the network interface flags are updated. This routine is
2219 * responsible for configuring the hardware for proper unicast, multicast,
1da177e4
LT
2220 * promiscuous mode, and all-multi behavior.
2221 **/
2222
64798845 2223static void e1000_set_rx_mode(struct net_device *netdev)
1da177e4 2224{
60490fe0 2225 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4 2226 struct e1000_hw *hw = &adapter->hw;
ccffad25
JP
2227 struct netdev_hw_addr *ha;
2228 bool use_uc = false;
406874a7
JP
2229 u32 rctl;
2230 u32 hash_value;
868d5309 2231 int i, rar_entries = E1000_RAR_ENTRIES;
1532ecea 2232 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
81c52285
JB
2233 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2234
2235 if (!mcarray) {
feb8f478 2236 e_err(probe, "memory allocation failed\n");
81c52285
JB
2237 return;
2238 }
cd94dd0b 2239
2648345f
MC
2240 /* Check for Promiscuous and All Multicast modes */
2241
1dc32918 2242 rctl = er32(RCTL);
1da177e4 2243
96838a40 2244 if (netdev->flags & IFF_PROMISC) {
1da177e4 2245 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
746b9f02 2246 rctl &= ~E1000_RCTL_VFE;
1da177e4 2247 } else {
1532ecea 2248 if (netdev->flags & IFF_ALLMULTI)
746b9f02 2249 rctl |= E1000_RCTL_MPE;
1532ecea 2250 else
746b9f02 2251 rctl &= ~E1000_RCTL_MPE;
1532ecea 2252 /* Enable VLAN filter if there is a VLAN */
5622e404 2253 if (e1000_vlan_used(adapter))
1532ecea 2254 rctl |= E1000_RCTL_VFE;
db0ce50d
PM
2255 }
2256
32e7bfc4 2257 if (netdev_uc_count(netdev) > rar_entries - 1) {
db0ce50d
PM
2258 rctl |= E1000_RCTL_UPE;
2259 } else if (!(netdev->flags & IFF_PROMISC)) {
2260 rctl &= ~E1000_RCTL_UPE;
ccffad25 2261 use_uc = true;
1da177e4
LT
2262 }
2263
1dc32918 2264 ew32(RCTL, rctl);
1da177e4
LT
2265
2266 /* 82542 2.0 needs to be in reset to write receive address registers */
2267
96838a40 2268 if (hw->mac_type == e1000_82542_rev2_0)
1da177e4
LT
2269 e1000_enter_82542_rst(adapter);
2270
db0ce50d
PM
2271 /* load the first 14 addresses into the exact filters 1-14. Unicast
2272 * addresses take precedence to avoid disabling unicast filtering
2273 * when possible.
2274 *
b595076a 2275 * RAR 0 is used for the station MAC address
1da177e4
LT
2276 * if there are not 14 addresses, go ahead and clear the filters
2277 */
ccffad25
JP
2278 i = 1;
2279 if (use_uc)
32e7bfc4 2280 netdev_for_each_uc_addr(ha, netdev) {
ccffad25
JP
2281 if (i == rar_entries)
2282 break;
2283 e1000_rar_set(hw, ha->addr, i++);
2284 }
2285
22bedad3 2286 netdev_for_each_mc_addr(ha, netdev) {
7a81e9f3
JP
2287 if (i == rar_entries) {
2288 /* load any remaining addresses into the hash table */
2289 u32 hash_reg, hash_bit, mta;
22bedad3 2290 hash_value = e1000_hash_mc_addr(hw, ha->addr);
7a81e9f3
JP
2291 hash_reg = (hash_value >> 5) & 0x7F;
2292 hash_bit = hash_value & 0x1F;
2293 mta = (1 << hash_bit);
2294 mcarray[hash_reg] |= mta;
10886af5 2295 } else {
22bedad3 2296 e1000_rar_set(hw, ha->addr, i++);
1da177e4
LT
2297 }
2298 }
2299
7a81e9f3
JP
2300 for (; i < rar_entries; i++) {
2301 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2302 E1000_WRITE_FLUSH();
2303 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2304 E1000_WRITE_FLUSH();
1da177e4
LT
2305 }
2306
81c52285
JB
2307 /* write the hash table completely, write from bottom to avoid
2308 * both stupid write combining chipsets, and flushing each write */
2309 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2310 /*
2311 * If we are on an 82544 has an errata where writing odd
2312 * offsets overwrites the previous even offset, but writing
2313 * backwards over the range solves the issue by always
2314 * writing the odd offset first
2315 */
2316 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2317 }
2318 E1000_WRITE_FLUSH();
2319
96838a40 2320 if (hw->mac_type == e1000_82542_rev2_0)
1da177e4 2321 e1000_leave_82542_rst(adapter);
81c52285
JB
2322
2323 kfree(mcarray);
1da177e4
LT
2324}
2325
a4010afe
JB
2326/**
2327 * e1000_update_phy_info_task - get phy info
2328 * @work: work struct contained inside adapter struct
2329 *
2330 * Need to wait a few seconds after link up to get diagnostic information from
2331 * the phy
2332 */
5cf42fcd
JB
2333static void e1000_update_phy_info_task(struct work_struct *work)
2334{
2335 struct e1000_adapter *adapter = container_of(work,
a4010afe
JB
2336 struct e1000_adapter,
2337 phy_info_task.work);
0ef4eedc
JB
2338 if (test_bit(__E1000_DOWN, &adapter->flags))
2339 return;
2340 mutex_lock(&adapter->mutex);
a4010afe 2341 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
0ef4eedc 2342 mutex_unlock(&adapter->mutex);
1da177e4
LT
2343}
2344
5cf42fcd
JB
2345/**
2346 * e1000_82547_tx_fifo_stall_task - task to complete work
2347 * @work: work struct contained inside adapter struct
2348 **/
2349static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2350{
2351 struct e1000_adapter *adapter = container_of(work,
a4010afe
JB
2352 struct e1000_adapter,
2353 fifo_stall_task.work);
1dc32918 2354 struct e1000_hw *hw = &adapter->hw;
1da177e4 2355 struct net_device *netdev = adapter->netdev;
406874a7 2356 u32 tctl;
1da177e4 2357
0ef4eedc
JB
2358 if (test_bit(__E1000_DOWN, &adapter->flags))
2359 return;
2360 mutex_lock(&adapter->mutex);
96838a40 2361 if (atomic_read(&adapter->tx_fifo_stall)) {
1dc32918
JP
2362 if ((er32(TDT) == er32(TDH)) &&
2363 (er32(TDFT) == er32(TDFH)) &&
2364 (er32(TDFTS) == er32(TDFHS))) {
2365 tctl = er32(TCTL);
2366 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2367 ew32(TDFT, adapter->tx_head_addr);
2368 ew32(TDFH, adapter->tx_head_addr);
2369 ew32(TDFTS, adapter->tx_head_addr);
2370 ew32(TDFHS, adapter->tx_head_addr);
2371 ew32(TCTL, tctl);
2372 E1000_WRITE_FLUSH();
1da177e4
LT
2373
2374 adapter->tx_fifo_head = 0;
2375 atomic_set(&adapter->tx_fifo_stall, 0);
2376 netif_wake_queue(netdev);
baa34745 2377 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
a4010afe 2378 schedule_delayed_work(&adapter->fifo_stall_task, 1);
1da177e4
LT
2379 }
2380 }
0ef4eedc 2381 mutex_unlock(&adapter->mutex);
1da177e4
LT
2382}
2383
b548192a 2384bool e1000_has_link(struct e1000_adapter *adapter)
be0f0719
JB
2385{
2386 struct e1000_hw *hw = &adapter->hw;
2387 bool link_active = false;
be0f0719 2388
6d9e5130
NS
2389 /* get_link_status is set on LSC (link status) interrupt or rx
2390 * sequence error interrupt (except on intel ce4100).
2391 * get_link_status will stay false until the
2392 * e1000_check_for_link establishes link for copper adapters
2393 * ONLY
be0f0719
JB
2394 */
2395 switch (hw->media_type) {
2396 case e1000_media_type_copper:
6d9e5130
NS
2397 if (hw->mac_type == e1000_ce4100)
2398 hw->get_link_status = 1;
be0f0719 2399 if (hw->get_link_status) {
120a5d0d 2400 e1000_check_for_link(hw);
be0f0719
JB
2401 link_active = !hw->get_link_status;
2402 } else {
2403 link_active = true;
2404 }
2405 break;
2406 case e1000_media_type_fiber:
120a5d0d 2407 e1000_check_for_link(hw);
be0f0719
JB
2408 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2409 break;
2410 case e1000_media_type_internal_serdes:
120a5d0d 2411 e1000_check_for_link(hw);
be0f0719
JB
2412 link_active = hw->serdes_has_link;
2413 break;
2414 default:
2415 break;
2416 }
2417
2418 return link_active;
2419}
2420
1da177e4 2421/**
a4010afe
JB
2422 * e1000_watchdog - work function
2423 * @work: work struct contained inside adapter struct
1da177e4 2424 **/
a4010afe 2425static void e1000_watchdog(struct work_struct *work)
1da177e4 2426{
a4010afe
JB
2427 struct e1000_adapter *adapter = container_of(work,
2428 struct e1000_adapter,
2429 watchdog_task.work);
1dc32918 2430 struct e1000_hw *hw = &adapter->hw;
1da177e4 2431 struct net_device *netdev = adapter->netdev;
545c67c0 2432 struct e1000_tx_ring *txdr = adapter->tx_ring;
406874a7 2433 u32 link, tctl;
90fb5135 2434
0ef4eedc
JB
2435 if (test_bit(__E1000_DOWN, &adapter->flags))
2436 return;
2437
2438 mutex_lock(&adapter->mutex);
be0f0719
JB
2439 link = e1000_has_link(adapter);
2440 if ((netif_carrier_ok(netdev)) && link)
2441 goto link_up;
1da177e4 2442
96838a40
JB
2443 if (link) {
2444 if (!netif_carrier_ok(netdev)) {
406874a7 2445 u32 ctrl;
c3033b01 2446 bool txb2b = true;
be0f0719 2447 /* update snapshot of PHY registers on LSC */
1dc32918 2448 e1000_get_speed_and_duplex(hw,
1da177e4
LT
2449 &adapter->link_speed,
2450 &adapter->link_duplex);
2451
1dc32918 2452 ctrl = er32(CTRL);
675ad473
ET
2453 pr_info("%s NIC Link is Up %d Mbps %s, "
2454 "Flow Control: %s\n",
2455 netdev->name,
2456 adapter->link_speed,
2457 adapter->link_duplex == FULL_DUPLEX ?
2458 "Full Duplex" : "Half Duplex",
2459 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2460 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2461 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2462 E1000_CTRL_TFCE) ? "TX" : "None")));
1da177e4 2463
39ca5f03 2464 /* adjust timeout factor according to speed/duplex */
66a2b0a3 2465 adapter->tx_timeout_factor = 1;
7e6c9861
JK
2466 switch (adapter->link_speed) {
2467 case SPEED_10:
c3033b01 2468 txb2b = false;
be0f0719 2469 adapter->tx_timeout_factor = 16;
7e6c9861
JK
2470 break;
2471 case SPEED_100:
c3033b01 2472 txb2b = false;
7e6c9861
JK
2473 /* maybe add some timeout factor ? */
2474 break;
2475 }
2476
1532ecea 2477 /* enable transmits in the hardware */
1dc32918 2478 tctl = er32(TCTL);
7e6c9861 2479 tctl |= E1000_TCTL_EN;
1dc32918 2480 ew32(TCTL, tctl);
66a2b0a3 2481
1da177e4 2482 netif_carrier_on(netdev);
baa34745 2483 if (!test_bit(__E1000_DOWN, &adapter->flags))
a4010afe
JB
2484 schedule_delayed_work(&adapter->phy_info_task,
2485 2 * HZ);
1da177e4
LT
2486 adapter->smartspeed = 0;
2487 }
2488 } else {
96838a40 2489 if (netif_carrier_ok(netdev)) {
1da177e4
LT
2490 adapter->link_speed = 0;
2491 adapter->link_duplex = 0;
675ad473
ET
2492 pr_info("%s NIC Link is Down\n",
2493 netdev->name);
1da177e4 2494 netif_carrier_off(netdev);
baa34745
JB
2495
2496 if (!test_bit(__E1000_DOWN, &adapter->flags))
a4010afe
JB
2497 schedule_delayed_work(&adapter->phy_info_task,
2498 2 * HZ);
1da177e4
LT
2499 }
2500
2501 e1000_smartspeed(adapter);
2502 }
2503
be0f0719 2504link_up:
1da177e4
LT
2505 e1000_update_stats(adapter);
2506
1dc32918 2507 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
1da177e4 2508 adapter->tpt_old = adapter->stats.tpt;
1dc32918 2509 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
1da177e4
LT
2510 adapter->colc_old = adapter->stats.colc;
2511
2512 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2513 adapter->gorcl_old = adapter->stats.gorcl;
2514 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2515 adapter->gotcl_old = adapter->stats.gotcl;
2516
1dc32918 2517 e1000_update_adaptive(hw);
1da177e4 2518
f56799ea 2519 if (!netif_carrier_ok(netdev)) {
581d708e 2520 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
1da177e4
LT
2521 /* We've lost link, so the controller stops DMA,
2522 * but we've got queued Tx work that's never going
2523 * to get done, so reset controller to flush Tx.
2524 * (Do the reset outside of interrupt context). */
87041639
JK
2525 adapter->tx_timeout_count++;
2526 schedule_work(&adapter->reset_task);
0ef4eedc
JB
2527 /* exit immediately since reset is imminent */
2528 goto unlock;
1da177e4
LT
2529 }
2530 }
2531
eab2abf5
JB
2532 /* Simple mode for Interrupt Throttle Rate (ITR) */
2533 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2534 /*
2535 * Symmetric Tx/Rx gets a reduced ITR=2000;
2536 * Total asymmetrical Tx or Rx gets ITR=8000;
2537 * everyone else is between 2000-8000.
2538 */
2539 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2540 u32 dif = (adapter->gotcl > adapter->gorcl ?
2541 adapter->gotcl - adapter->gorcl :
2542 adapter->gorcl - adapter->gotcl) / 10000;
2543 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2544
2545 ew32(ITR, 1000000000 / (itr * 256));
2546 }
2547
1da177e4 2548 /* Cause software interrupt to ensure rx ring is cleaned */
1dc32918 2549 ew32(ICS, E1000_ICS_RXDMT0);
1da177e4 2550
2648345f 2551 /* Force detection of hung controller every watchdog period */
c3033b01 2552 adapter->detect_tx_hung = true;
1da177e4 2553
a4010afe 2554 /* Reschedule the task */
baa34745 2555 if (!test_bit(__E1000_DOWN, &adapter->flags))
a4010afe 2556 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
0ef4eedc
JB
2557
2558unlock:
2559 mutex_unlock(&adapter->mutex);
1da177e4
LT
2560}
2561
835bb129
JB
2562enum latency_range {
2563 lowest_latency = 0,
2564 low_latency = 1,
2565 bulk_latency = 2,
2566 latency_invalid = 255
2567};
2568
2569/**
2570 * e1000_update_itr - update the dynamic ITR value based on statistics
8fce4731
JB
2571 * @adapter: pointer to adapter
2572 * @itr_setting: current adapter->itr
2573 * @packets: the number of packets during this measurement interval
2574 * @bytes: the number of bytes during this measurement interval
2575 *
835bb129
JB
2576 * Stores a new ITR value based on packets and byte
2577 * counts during the last interrupt. The advantage of per interrupt
2578 * computation is faster updates and more accurate ITR for the current
2579 * traffic pattern. Constants in this function were computed
2580 * based on theoretical maximum wire speed and thresholds were set based
2581 * on testing data as well as attempting to minimize response time
2582 * while increasing bulk throughput.
2583 * this functionality is controlled by the InterruptThrottleRate module
2584 * parameter (see e1000_param.c)
835bb129
JB
2585 **/
2586static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
64798845 2587 u16 itr_setting, int packets, int bytes)
835bb129
JB
2588{
2589 unsigned int retval = itr_setting;
2590 struct e1000_hw *hw = &adapter->hw;
2591
2592 if (unlikely(hw->mac_type < e1000_82540))
2593 goto update_itr_done;
2594
2595 if (packets == 0)
2596 goto update_itr_done;
2597
835bb129
JB
2598 switch (itr_setting) {
2599 case lowest_latency:
2b65326e
JB
2600 /* jumbo frames get bulk treatment*/
2601 if (bytes/packets > 8000)
2602 retval = bulk_latency;
2603 else if ((packets < 5) && (bytes > 512))
835bb129
JB
2604 retval = low_latency;
2605 break;
2606 case low_latency: /* 50 usec aka 20000 ints/s */
2607 if (bytes > 10000) {
2b65326e
JB
2608 /* jumbo frames need bulk latency setting */
2609 if (bytes/packets > 8000)
2610 retval = bulk_latency;
2611 else if ((packets < 10) || ((bytes/packets) > 1200))
835bb129
JB
2612 retval = bulk_latency;
2613 else if ((packets > 35))
2614 retval = lowest_latency;
2b65326e
JB
2615 } else if (bytes/packets > 2000)
2616 retval = bulk_latency;
2617 else if (packets <= 2 && bytes < 512)
835bb129
JB
2618 retval = lowest_latency;
2619 break;
2620 case bulk_latency: /* 250 usec aka 4000 ints/s */
2621 if (bytes > 25000) {
2622 if (packets > 35)
2623 retval = low_latency;
2b65326e
JB
2624 } else if (bytes < 6000) {
2625 retval = low_latency;
835bb129
JB
2626 }
2627 break;
2628 }
2629
2630update_itr_done:
2631 return retval;
2632}
2633
2634static void e1000_set_itr(struct e1000_adapter *adapter)
2635{
2636 struct e1000_hw *hw = &adapter->hw;
406874a7
JP
2637 u16 current_itr;
2638 u32 new_itr = adapter->itr;
835bb129
JB
2639
2640 if (unlikely(hw->mac_type < e1000_82540))
2641 return;
2642
2643 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2644 if (unlikely(adapter->link_speed != SPEED_1000)) {
2645 current_itr = 0;
2646 new_itr = 4000;
2647 goto set_itr_now;
2648 }
2649
2650 adapter->tx_itr = e1000_update_itr(adapter,
2651 adapter->tx_itr,
2652 adapter->total_tx_packets,
2653 adapter->total_tx_bytes);
2b65326e
JB
2654 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2655 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2656 adapter->tx_itr = low_latency;
2657
835bb129
JB
2658 adapter->rx_itr = e1000_update_itr(adapter,
2659 adapter->rx_itr,
2660 adapter->total_rx_packets,
2661 adapter->total_rx_bytes);
2b65326e
JB
2662 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2663 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2664 adapter->rx_itr = low_latency;
835bb129
JB
2665
2666 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2667
835bb129
JB
2668 switch (current_itr) {
2669 /* counts and packets in update_itr are dependent on these numbers */
2670 case lowest_latency:
2671 new_itr = 70000;
2672 break;
2673 case low_latency:
2674 new_itr = 20000; /* aka hwitr = ~200 */
2675 break;
2676 case bulk_latency:
2677 new_itr = 4000;
2678 break;
2679 default:
2680 break;
2681 }
2682
2683set_itr_now:
2684 if (new_itr != adapter->itr) {
2685 /* this attempts to bias the interrupt rate towards Bulk
2686 * by adding intermediate steps when interrupt rate is
2687 * increasing */
2688 new_itr = new_itr > adapter->itr ?
2689 min(adapter->itr + (new_itr >> 2), new_itr) :
2690 new_itr;
2691 adapter->itr = new_itr;
1dc32918 2692 ew32(ITR, 1000000000 / (new_itr * 256));
835bb129 2693 }
835bb129
JB
2694}
2695
1da177e4
LT
2696#define E1000_TX_FLAGS_CSUM 0x00000001
2697#define E1000_TX_FLAGS_VLAN 0x00000002
2698#define E1000_TX_FLAGS_TSO 0x00000004
2d7edb92 2699#define E1000_TX_FLAGS_IPV4 0x00000008
11a78dcf 2700#define E1000_TX_FLAGS_NO_FCS 0x00000010
1da177e4
LT
2701#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2702#define E1000_TX_FLAGS_VLAN_SHIFT 16
2703
64798845
JP
2704static int e1000_tso(struct e1000_adapter *adapter,
2705 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
1da177e4 2706{
1da177e4 2707 struct e1000_context_desc *context_desc;
545c67c0 2708 struct e1000_buffer *buffer_info;
1da177e4 2709 unsigned int i;
406874a7
JP
2710 u32 cmd_length = 0;
2711 u16 ipcse = 0, tucse, mss;
2712 u8 ipcss, ipcso, tucss, tucso, hdr_len;
1da177e4
LT
2713 int err;
2714
89114afd 2715 if (skb_is_gso(skb)) {
1da177e4
LT
2716 if (skb_header_cloned(skb)) {
2717 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2718 if (err)
2719 return err;
2720 }
2721
ab6a5bb6 2722 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
7967168c 2723 mss = skb_shinfo(skb)->gso_size;
60828236 2724 if (skb->protocol == htons(ETH_P_IP)) {
eddc9ec5
ACM
2725 struct iphdr *iph = ip_hdr(skb);
2726 iph->tot_len = 0;
2727 iph->check = 0;
aa8223c7
ACM
2728 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2729 iph->daddr, 0,
2730 IPPROTO_TCP,
2731 0);
2d7edb92 2732 cmd_length = E1000_TXD_CMD_IP;
ea2ae17d 2733 ipcse = skb_transport_offset(skb) - 1;
e15fdd03 2734 } else if (skb->protocol == htons(ETH_P_IPV6)) {
0660e03f 2735 ipv6_hdr(skb)->payload_len = 0;
aa8223c7 2736 tcp_hdr(skb)->check =
0660e03f
ACM
2737 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2738 &ipv6_hdr(skb)->daddr,
2739 0, IPPROTO_TCP, 0);
2d7edb92 2740 ipcse = 0;
2d7edb92 2741 }
bbe735e4 2742 ipcss = skb_network_offset(skb);
eddc9ec5 2743 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
ea2ae17d 2744 tucss = skb_transport_offset(skb);
aa8223c7 2745 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
1da177e4
LT
2746 tucse = 0;
2747
2748 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2d7edb92 2749 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
1da177e4 2750
581d708e
MC
2751 i = tx_ring->next_to_use;
2752 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
545c67c0 2753 buffer_info = &tx_ring->buffer_info[i];
1da177e4
LT
2754
2755 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2756 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2757 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2758 context_desc->upper_setup.tcp_fields.tucss = tucss;
2759 context_desc->upper_setup.tcp_fields.tucso = tucso;
2760 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2761 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2762 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2763 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2764
545c67c0 2765 buffer_info->time_stamp = jiffies;
a9ebadd6 2766 buffer_info->next_to_watch = i;
545c67c0 2767
581d708e
MC
2768 if (++i == tx_ring->count) i = 0;
2769 tx_ring->next_to_use = i;
1da177e4 2770
c3033b01 2771 return true;
1da177e4 2772 }
c3033b01 2773 return false;
1da177e4
LT
2774}
2775
64798845
JP
2776static bool e1000_tx_csum(struct e1000_adapter *adapter,
2777 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
1da177e4
LT
2778{
2779 struct e1000_context_desc *context_desc;
545c67c0 2780 struct e1000_buffer *buffer_info;
1da177e4 2781 unsigned int i;
406874a7 2782 u8 css;
3ed30676 2783 u32 cmd_len = E1000_TXD_CMD_DEXT;
1da177e4 2784
3ed30676
DG
2785 if (skb->ip_summed != CHECKSUM_PARTIAL)
2786 return false;
1da177e4 2787
3ed30676 2788 switch (skb->protocol) {
09640e63 2789 case cpu_to_be16(ETH_P_IP):
3ed30676
DG
2790 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2791 cmd_len |= E1000_TXD_CMD_TCP;
2792 break;
09640e63 2793 case cpu_to_be16(ETH_P_IPV6):
3ed30676
DG
2794 /* XXX not handling all IPV6 headers */
2795 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2796 cmd_len |= E1000_TXD_CMD_TCP;
2797 break;
2798 default:
2799 if (unlikely(net_ratelimit()))
feb8f478
ET
2800 e_warn(drv, "checksum_partial proto=%x!\n",
2801 skb->protocol);
3ed30676
DG
2802 break;
2803 }
1da177e4 2804
0d0b1672 2805 css = skb_checksum_start_offset(skb);
1da177e4 2806
3ed30676
DG
2807 i = tx_ring->next_to_use;
2808 buffer_info = &tx_ring->buffer_info[i];
2809 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
545c67c0 2810
3ed30676
DG
2811 context_desc->lower_setup.ip_config = 0;
2812 context_desc->upper_setup.tcp_fields.tucss = css;
2813 context_desc->upper_setup.tcp_fields.tucso =
2814 css + skb->csum_offset;
2815 context_desc->upper_setup.tcp_fields.tucse = 0;
2816 context_desc->tcp_seg_setup.data = 0;
2817 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
1da177e4 2818
3ed30676
DG
2819 buffer_info->time_stamp = jiffies;
2820 buffer_info->next_to_watch = i;
1da177e4 2821
3ed30676
DG
2822 if (unlikely(++i == tx_ring->count)) i = 0;
2823 tx_ring->next_to_use = i;
2824
2825 return true;
1da177e4
LT
2826}
2827
2828#define E1000_MAX_TXD_PWR 12
2829#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2830
64798845
JP
2831static int e1000_tx_map(struct e1000_adapter *adapter,
2832 struct e1000_tx_ring *tx_ring,
2833 struct sk_buff *skb, unsigned int first,
2834 unsigned int max_per_txd, unsigned int nr_frags,
2835 unsigned int mss)
1da177e4 2836{
1dc32918 2837 struct e1000_hw *hw = &adapter->hw;
602c0554 2838 struct pci_dev *pdev = adapter->pdev;
37e73df8 2839 struct e1000_buffer *buffer_info;
d20b606c 2840 unsigned int len = skb_headlen(skb);
602c0554 2841 unsigned int offset = 0, size, count = 0, i;
31c15a2f 2842 unsigned int f, bytecount, segs;
1da177e4
LT
2843
2844 i = tx_ring->next_to_use;
2845
96838a40 2846 while (len) {
37e73df8 2847 buffer_info = &tx_ring->buffer_info[i];
1da177e4 2848 size = min(len, max_per_txd);
fd803241
JK
2849 /* Workaround for Controller erratum --
2850 * descriptor for non-tso packet in a linear SKB that follows a
2851 * tso gets written back prematurely before the data is fully
0f15a8fa 2852 * DMA'd to the controller */
fd803241 2853 if (!skb->data_len && tx_ring->last_tx_tso &&
89114afd 2854 !skb_is_gso(skb)) {
3db1cd5c 2855 tx_ring->last_tx_tso = false;
fd803241
JK
2856 size -= 4;
2857 }
2858
1da177e4
LT
2859 /* Workaround for premature desc write-backs
2860 * in TSO mode. Append 4-byte sentinel desc */
96838a40 2861 if (unlikely(mss && !nr_frags && size == len && size > 8))
1da177e4 2862 size -= 4;
97338bde
MC
2863 /* work-around for errata 10 and it applies
2864 * to all controllers in PCI-X mode
2865 * The fix is to make sure that the first descriptor of a
2866 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2867 */
1dc32918 2868 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
97338bde
MC
2869 (size > 2015) && count == 0))
2870 size = 2015;
96838a40 2871
1da177e4
LT
2872 /* Workaround for potential 82544 hang in PCI-X. Avoid
2873 * terminating buffers within evenly-aligned dwords. */
96838a40 2874 if (unlikely(adapter->pcix_82544 &&
1da177e4
LT
2875 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2876 size > 4))
2877 size -= 4;
2878
2879 buffer_info->length = size;
cdd7549e 2880 /* set time_stamp *before* dma to help avoid a possible race */
1da177e4 2881 buffer_info->time_stamp = jiffies;
602c0554 2882 buffer_info->mapped_as_page = false;
b16f53be
NN
2883 buffer_info->dma = dma_map_single(&pdev->dev,
2884 skb->data + offset,
2885 size, DMA_TO_DEVICE);
2886 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
602c0554 2887 goto dma_error;
a9ebadd6 2888 buffer_info->next_to_watch = i;
1da177e4
LT
2889
2890 len -= size;
2891 offset += size;
2892 count++;
37e73df8
AD
2893 if (len) {
2894 i++;
2895 if (unlikely(i == tx_ring->count))
2896 i = 0;
2897 }
1da177e4
LT
2898 }
2899
96838a40 2900 for (f = 0; f < nr_frags; f++) {
9e903e08 2901 const struct skb_frag_struct *frag;
1da177e4
LT
2902
2903 frag = &skb_shinfo(skb)->frags[f];
9e903e08 2904 len = skb_frag_size(frag);
877749bf 2905 offset = 0;
1da177e4 2906
96838a40 2907 while (len) {
877749bf 2908 unsigned long bufend;
37e73df8
AD
2909 i++;
2910 if (unlikely(i == tx_ring->count))
2911 i = 0;
2912
1da177e4
LT
2913 buffer_info = &tx_ring->buffer_info[i];
2914 size = min(len, max_per_txd);
1da177e4
LT
2915 /* Workaround for premature desc write-backs
2916 * in TSO mode. Append 4-byte sentinel desc */
96838a40 2917 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
1da177e4 2918 size -= 4;
1da177e4
LT
2919 /* Workaround for potential 82544 hang in PCI-X.
2920 * Avoid terminating buffers within evenly-aligned
2921 * dwords. */
877749bf
IC
2922 bufend = (unsigned long)
2923 page_to_phys(skb_frag_page(frag));
2924 bufend += offset + size - 1;
96838a40 2925 if (unlikely(adapter->pcix_82544 &&
877749bf
IC
2926 !(bufend & 4) &&
2927 size > 4))
1da177e4
LT
2928 size -= 4;
2929
2930 buffer_info->length = size;
1da177e4 2931 buffer_info->time_stamp = jiffies;
602c0554 2932 buffer_info->mapped_as_page = true;
877749bf
IC
2933 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2934 offset, size, DMA_TO_DEVICE);
b16f53be 2935 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
602c0554 2936 goto dma_error;
a9ebadd6 2937 buffer_info->next_to_watch = i;
1da177e4
LT
2938
2939 len -= size;
2940 offset += size;
2941 count++;
1da177e4
LT
2942 }
2943 }
2944
31c15a2f
DN
2945 segs = skb_shinfo(skb)->gso_segs ?: 1;
2946 /* multiply data chunks by size of headers */
2947 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2948
1da177e4 2949 tx_ring->buffer_info[i].skb = skb;
31c15a2f
DN
2950 tx_ring->buffer_info[i].segs = segs;
2951 tx_ring->buffer_info[i].bytecount = bytecount;
1da177e4
LT
2952 tx_ring->buffer_info[first].next_to_watch = i;
2953
2954 return count;
602c0554
AD
2955
2956dma_error:
2957 dev_err(&pdev->dev, "TX DMA map failed\n");
2958 buffer_info->dma = 0;
c1fa347f 2959 if (count)
602c0554 2960 count--;
c1fa347f
RK
2961
2962 while (count--) {
2963 if (i==0)
602c0554 2964 i += tx_ring->count;
c1fa347f 2965 i--;
602c0554
AD
2966 buffer_info = &tx_ring->buffer_info[i];
2967 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2968 }
2969
2970 return 0;
1da177e4
LT
2971}
2972
64798845
JP
2973static void e1000_tx_queue(struct e1000_adapter *adapter,
2974 struct e1000_tx_ring *tx_ring, int tx_flags,
2975 int count)
1da177e4 2976{
1dc32918 2977 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
2978 struct e1000_tx_desc *tx_desc = NULL;
2979 struct e1000_buffer *buffer_info;
406874a7 2980 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
1da177e4
LT
2981 unsigned int i;
2982
96838a40 2983 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
1da177e4
LT
2984 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2985 E1000_TXD_CMD_TSE;
2d7edb92
MC
2986 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2987
96838a40 2988 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2d7edb92 2989 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
1da177e4
LT
2990 }
2991
96838a40 2992 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
1da177e4
LT
2993 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2994 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2995 }
2996
96838a40 2997 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
1da177e4
LT
2998 txd_lower |= E1000_TXD_CMD_VLE;
2999 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3000 }
3001
11a78dcf
BG
3002 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3003 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3004
1da177e4
LT
3005 i = tx_ring->next_to_use;
3006
96838a40 3007 while (count--) {
1da177e4
LT
3008 buffer_info = &tx_ring->buffer_info[i];
3009 tx_desc = E1000_TX_DESC(*tx_ring, i);
3010 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3011 tx_desc->lower.data =
3012 cpu_to_le32(txd_lower | buffer_info->length);
3013 tx_desc->upper.data = cpu_to_le32(txd_upper);
96838a40 3014 if (unlikely(++i == tx_ring->count)) i = 0;
1da177e4
LT
3015 }
3016
3017 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3018
11a78dcf
BG
3019 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3020 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3021 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3022
1da177e4
LT
3023 /* Force memory writes to complete before letting h/w
3024 * know there are new descriptors to fetch. (Only
3025 * applicable for weak-ordered memory model archs,
3026 * such as IA-64). */
3027 wmb();
3028
3029 tx_ring->next_to_use = i;
1dc32918 3030 writel(i, hw->hw_addr + tx_ring->tdt);
2ce9047f
JB
3031 /* we need this if more than one processor can write to our tail
3032 * at a time, it syncronizes IO on IA64/Altix systems */
3033 mmiowb();
1da177e4
LT
3034}
3035
3036/**
3037 * 82547 workaround to avoid controller hang in half-duplex environment.
3038 * The workaround is to avoid queuing a large packet that would span
3039 * the internal Tx FIFO ring boundary by notifying the stack to resend
3040 * the packet at a later time. This gives the Tx FIFO an opportunity to
3041 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3042 * to the beginning of the Tx FIFO.
3043 **/
3044
3045#define E1000_FIFO_HDR 0x10
3046#define E1000_82547_PAD_LEN 0x3E0
3047
64798845
JP
3048static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3049 struct sk_buff *skb)
1da177e4 3050{
406874a7
JP
3051 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3052 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
1da177e4 3053
9099cfb9 3054 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
1da177e4 3055
96838a40 3056 if (adapter->link_duplex != HALF_DUPLEX)
1da177e4
LT
3057 goto no_fifo_stall_required;
3058
96838a40 3059 if (atomic_read(&adapter->tx_fifo_stall))
1da177e4
LT
3060 return 1;
3061
96838a40 3062 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
1da177e4
LT
3063 atomic_set(&adapter->tx_fifo_stall, 1);
3064 return 1;
3065 }
3066
3067no_fifo_stall_required:
3068 adapter->tx_fifo_head += skb_fifo_len;
96838a40 3069 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
1da177e4
LT
3070 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3071 return 0;
3072}
3073
65c7973f
JB
3074static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3075{
3076 struct e1000_adapter *adapter = netdev_priv(netdev);
3077 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3078
3079 netif_stop_queue(netdev);
3080 /* Herbert's original patch had:
3081 * smp_mb__after_netif_stop_queue();
3082 * but since that doesn't exist yet, just open code it. */
3083 smp_mb();
3084
3085 /* We need to check again in a case another CPU has just
3086 * made room available. */
3087 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3088 return -EBUSY;
3089
3090 /* A reprieve! */
3091 netif_start_queue(netdev);
fcfb1224 3092 ++adapter->restart_queue;
65c7973f
JB
3093 return 0;
3094}
3095
3096static int e1000_maybe_stop_tx(struct net_device *netdev,
3097 struct e1000_tx_ring *tx_ring, int size)
3098{
3099 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3100 return 0;
3101 return __e1000_maybe_stop_tx(netdev, size);
3102}
3103
1da177e4 3104#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3b29a56d
SH
3105static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3106 struct net_device *netdev)
1da177e4 3107{
60490fe0 3108 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 3109 struct e1000_hw *hw = &adapter->hw;
581d708e 3110 struct e1000_tx_ring *tx_ring;
1da177e4
LT
3111 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3112 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3113 unsigned int tx_flags = 0;
e743d313 3114 unsigned int len = skb_headlen(skb);
6d1e3aa7
KK
3115 unsigned int nr_frags;
3116 unsigned int mss;
1da177e4 3117 int count = 0;
76c224bc 3118 int tso;
1da177e4 3119 unsigned int f;
1da177e4 3120
65c7973f
JB
3121 /* This goes back to the question of how to logically map a tx queue
3122 * to a flow. Right now, performance is impacted slightly negatively
3123 * if using multiple tx queues. If the stack breaks away from a
3124 * single qdisc implementation, we can look at this again. */
581d708e 3125 tx_ring = adapter->tx_ring;
24025e4e 3126
581d708e 3127 if (unlikely(skb->len <= 0)) {
1da177e4
LT
3128 dev_kfree_skb_any(skb);
3129 return NETDEV_TX_OK;
3130 }
3131
7967168c 3132 mss = skb_shinfo(skb)->gso_size;
76c224bc 3133 /* The controller does a simple calculation to
1da177e4
LT
3134 * make sure there is enough room in the FIFO before
3135 * initiating the DMA for each buffer. The calc is:
3136 * 4 = ceil(buffer len/mss). To make sure we don't
3137 * overrun the FIFO, adjust the max buffer len if mss
3138 * drops. */
96838a40 3139 if (mss) {
406874a7 3140 u8 hdr_len;
1da177e4
LT
3141 max_per_txd = min(mss << 2, max_per_txd);
3142 max_txd_pwr = fls(max_per_txd) - 1;
9a3056da 3143
ab6a5bb6 3144 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
6d1e3aa7 3145 if (skb->data_len && hdr_len == len) {
1dc32918 3146 switch (hw->mac_type) {
9f687888 3147 unsigned int pull_size;
683a2aa3
HX
3148 case e1000_82544:
3149 /* Make sure we have room to chop off 4 bytes,
3150 * and that the end alignment will work out to
3151 * this hardware's requirements
3152 * NOTE: this is a TSO only workaround
3153 * if end byte alignment not correct move us
3154 * into the next dword */
27a884dc 3155 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
683a2aa3
HX
3156 break;
3157 /* fall through */
9f687888
JK
3158 pull_size = min((unsigned int)4, skb->data_len);
3159 if (!__pskb_pull_tail(skb, pull_size)) {
feb8f478
ET
3160 e_err(drv, "__pskb_pull_tail "
3161 "failed.\n");
9f687888 3162 dev_kfree_skb_any(skb);
749dfc70 3163 return NETDEV_TX_OK;
9f687888 3164 }
e743d313 3165 len = skb_headlen(skb);
9f687888
JK
3166 break;
3167 default:
3168 /* do nothing */
3169 break;
d74bbd3b 3170 }
9a3056da 3171 }
1da177e4
LT
3172 }
3173
9a3056da 3174 /* reserve a descriptor for the offload context */
84fa7933 3175 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
1da177e4 3176 count++;
2648345f 3177 count++;
fd803241 3178
fd803241 3179 /* Controller Erratum workaround */
89114afd 3180 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
fd803241 3181 count++;
fd803241 3182
1da177e4
LT
3183 count += TXD_USE_COUNT(len, max_txd_pwr);
3184
96838a40 3185 if (adapter->pcix_82544)
1da177e4
LT
3186 count++;
3187
96838a40 3188 /* work-around for errata 10 and it applies to all controllers
97338bde
MC
3189 * in PCI-X mode, so add one more descriptor to the count
3190 */
1dc32918 3191 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
97338bde
MC
3192 (len > 2015)))
3193 count++;
3194
1da177e4 3195 nr_frags = skb_shinfo(skb)->nr_frags;
96838a40 3196 for (f = 0; f < nr_frags; f++)
9e903e08 3197 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
1da177e4 3198 max_txd_pwr);
96838a40 3199 if (adapter->pcix_82544)
1da177e4
LT
3200 count += nr_frags;
3201
1da177e4
LT
3202 /* need: count + 2 desc gap to keep tail from touching
3203 * head, otherwise try next time */
8017943e 3204 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
1da177e4 3205 return NETDEV_TX_BUSY;
1da177e4 3206
a4010afe
JB
3207 if (unlikely((hw->mac_type == e1000_82547) &&
3208 (e1000_82547_fifo_workaround(adapter, skb)))) {
3209 netif_stop_queue(netdev);
3210 if (!test_bit(__E1000_DOWN, &adapter->flags))
3211 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3212 return NETDEV_TX_BUSY;
1da177e4
LT
3213 }
3214
5622e404 3215 if (vlan_tx_tag_present(skb)) {
1da177e4
LT
3216 tx_flags |= E1000_TX_FLAGS_VLAN;
3217 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3218 }
3219
581d708e 3220 first = tx_ring->next_to_use;
96838a40 3221
581d708e 3222 tso = e1000_tso(adapter, tx_ring, skb);
1da177e4
LT
3223 if (tso < 0) {
3224 dev_kfree_skb_any(skb);
3225 return NETDEV_TX_OK;
3226 }
3227
fd803241 3228 if (likely(tso)) {
8fce4731 3229 if (likely(hw->mac_type != e1000_82544))
3db1cd5c 3230 tx_ring->last_tx_tso = true;
1da177e4 3231 tx_flags |= E1000_TX_FLAGS_TSO;
fd803241 3232 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
1da177e4
LT
3233 tx_flags |= E1000_TX_FLAGS_CSUM;
3234
60828236 3235 if (likely(skb->protocol == htons(ETH_P_IP)))
2d7edb92
MC
3236 tx_flags |= E1000_TX_FLAGS_IPV4;
3237
11a78dcf
BG
3238 if (unlikely(skb->no_fcs))
3239 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3240
37e73df8
AD
3241 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3242 nr_frags, mss);
1da177e4 3243
37e73df8
AD
3244 if (count) {
3245 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
37e73df8
AD
3246 /* Make sure there is space in the ring for the next send. */
3247 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
1da177e4 3248
37e73df8
AD
3249 } else {
3250 dev_kfree_skb_any(skb);
3251 tx_ring->buffer_info[first].time_stamp = 0;
3252 tx_ring->next_to_use = first;
3253 }
1da177e4 3254
1da177e4
LT
3255 return NETDEV_TX_OK;
3256}
3257
b04e36ba
TD
3258#define NUM_REGS 38 /* 1 based count */
3259static void e1000_regdump(struct e1000_adapter *adapter)
3260{
3261 struct e1000_hw *hw = &adapter->hw;
3262 u32 regs[NUM_REGS];
3263 u32 *regs_buff = regs;
3264 int i = 0;
3265
e29b5d8f
TD
3266 static const char * const reg_name[] = {
3267 "CTRL", "STATUS",
3268 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3269 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3270 "TIDV", "TXDCTL", "TADV", "TARC0",
3271 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3272 "TXDCTL1", "TARC1",
3273 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3274 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3275 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
b04e36ba
TD
3276 };
3277
3278 regs_buff[0] = er32(CTRL);
3279 regs_buff[1] = er32(STATUS);
3280
3281 regs_buff[2] = er32(RCTL);
3282 regs_buff[3] = er32(RDLEN);
3283 regs_buff[4] = er32(RDH);
3284 regs_buff[5] = er32(RDT);
3285 regs_buff[6] = er32(RDTR);
3286
3287 regs_buff[7] = er32(TCTL);
3288 regs_buff[8] = er32(TDBAL);
3289 regs_buff[9] = er32(TDBAH);
3290 regs_buff[10] = er32(TDLEN);
3291 regs_buff[11] = er32(TDH);
3292 regs_buff[12] = er32(TDT);
3293 regs_buff[13] = er32(TIDV);
3294 regs_buff[14] = er32(TXDCTL);
3295 regs_buff[15] = er32(TADV);
3296 regs_buff[16] = er32(TARC0);
3297
3298 regs_buff[17] = er32(TDBAL1);
3299 regs_buff[18] = er32(TDBAH1);
3300 regs_buff[19] = er32(TDLEN1);
3301 regs_buff[20] = er32(TDH1);
3302 regs_buff[21] = er32(TDT1);
3303 regs_buff[22] = er32(TXDCTL1);
3304 regs_buff[23] = er32(TARC1);
3305 regs_buff[24] = er32(CTRL_EXT);
3306 regs_buff[25] = er32(ERT);
3307 regs_buff[26] = er32(RDBAL0);
3308 regs_buff[27] = er32(RDBAH0);
3309 regs_buff[28] = er32(TDFH);
3310 regs_buff[29] = er32(TDFT);
3311 regs_buff[30] = er32(TDFHS);
3312 regs_buff[31] = er32(TDFTS);
3313 regs_buff[32] = er32(TDFPC);
3314 regs_buff[33] = er32(RDFH);
3315 regs_buff[34] = er32(RDFT);
3316 regs_buff[35] = er32(RDFHS);
3317 regs_buff[36] = er32(RDFTS);
3318 regs_buff[37] = er32(RDFPC);
3319
3320 pr_info("Register dump\n");
e29b5d8f
TD
3321 for (i = 0; i < NUM_REGS; i++)
3322 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
b04e36ba
TD
3323}
3324
3325/*
3326 * e1000_dump: Print registers, tx ring and rx ring
3327 */
3328static void e1000_dump(struct e1000_adapter *adapter)
3329{
3330 /* this code doesn't handle multiple rings */
3331 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3332 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3333 int i;
3334
3335 if (!netif_msg_hw(adapter))
3336 return;
3337
3338 /* Print Registers */
3339 e1000_regdump(adapter);
3340
3341 /*
3342 * transmit dump
3343 */
3344 pr_info("TX Desc ring0 dump\n");
3345
3346 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3347 *
3348 * Legacy Transmit Descriptor
3349 * +--------------------------------------------------------------+
3350 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3351 * +--------------------------------------------------------------+
3352 * 8 | Special | CSS | Status | CMD | CSO | Length |
3353 * +--------------------------------------------------------------+
3354 * 63 48 47 36 35 32 31 24 23 16 15 0
3355 *
3356 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3357 * 63 48 47 40 39 32 31 16 15 8 7 0
3358 * +----------------------------------------------------------------+
3359 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3360 * +----------------------------------------------------------------+
3361 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3362 * +----------------------------------------------------------------+
3363 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3364 *
3365 * Extended Data Descriptor (DTYP=0x1)
3366 * +----------------------------------------------------------------+
3367 * 0 | Buffer Address [63:0] |
3368 * +----------------------------------------------------------------+
3369 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3370 * +----------------------------------------------------------------+
3371 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3372 */
e29b5d8f
TD
3373 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3374 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
b04e36ba
TD
3375
3376 if (!netif_msg_tx_done(adapter))
3377 goto rx_ring_summary;
3378
3379 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3380 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3381 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
3382 struct my_u { u64 a; u64 b; };
3383 struct my_u *u = (struct my_u *)tx_desc;
e29b5d8f
TD
3384 const char *type;
3385
b04e36ba 3386 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
e29b5d8f 3387 type = "NTC/U";
b04e36ba 3388 else if (i == tx_ring->next_to_use)
e29b5d8f 3389 type = "NTU";
b04e36ba 3390 else if (i == tx_ring->next_to_clean)
e29b5d8f 3391 type = "NTC";
b04e36ba 3392 else
e29b5d8f 3393 type = "";
b04e36ba 3394
e29b5d8f
TD
3395 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3396 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3397 le64_to_cpu(u->a), le64_to_cpu(u->b),
3398 (u64)buffer_info->dma, buffer_info->length,
3399 buffer_info->next_to_watch,
3400 (u64)buffer_info->time_stamp, buffer_info->skb, type);
b04e36ba
TD
3401 }
3402
3403rx_ring_summary:
3404 /*
3405 * receive dump
3406 */
3407 pr_info("\nRX Desc ring dump\n");
3408
3409 /* Legacy Receive Descriptor Format
3410 *
3411 * +-----------------------------------------------------+
3412 * | Buffer Address [63:0] |
3413 * +-----------------------------------------------------+
3414 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3415 * +-----------------------------------------------------+
3416 * 63 48 47 40 39 32 31 16 15 0
3417 */
e29b5d8f 3418 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
b04e36ba
TD
3419
3420 if (!netif_msg_rx_status(adapter))
3421 goto exit;
3422
3423 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3424 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3425 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
3426 struct my_u { u64 a; u64 b; };
3427 struct my_u *u = (struct my_u *)rx_desc;
e29b5d8f
TD
3428 const char *type;
3429
b04e36ba 3430 if (i == rx_ring->next_to_use)
e29b5d8f 3431 type = "NTU";
b04e36ba 3432 else if (i == rx_ring->next_to_clean)
e29b5d8f 3433 type = "NTC";
b04e36ba 3434 else
e29b5d8f 3435 type = "";
b04e36ba 3436
e29b5d8f
TD
3437 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3438 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3439 (u64)buffer_info->dma, buffer_info->skb, type);
b04e36ba
TD
3440 } /* for */
3441
3442 /* dump the descriptor caches */
3443 /* rx */
e29b5d8f 3444 pr_info("Rx descriptor cache in 64bit format\n");
b04e36ba 3445 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
e29b5d8f
TD
3446 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3447 i,
3448 readl(adapter->hw.hw_addr + i+4),
3449 readl(adapter->hw.hw_addr + i),
3450 readl(adapter->hw.hw_addr + i+12),
3451 readl(adapter->hw.hw_addr + i+8));
b04e36ba
TD
3452 }
3453 /* tx */
e29b5d8f 3454 pr_info("Tx descriptor cache in 64bit format\n");
b04e36ba 3455 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
e29b5d8f
TD
3456 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3457 i,
3458 readl(adapter->hw.hw_addr + i+4),
3459 readl(adapter->hw.hw_addr + i),
3460 readl(adapter->hw.hw_addr + i+12),
3461 readl(adapter->hw.hw_addr + i+8));
b04e36ba
TD
3462 }
3463exit:
3464 return;
3465}
3466
1da177e4
LT
3467/**
3468 * e1000_tx_timeout - Respond to a Tx Hang
3469 * @netdev: network interface device structure
3470 **/
3471
64798845 3472static void e1000_tx_timeout(struct net_device *netdev)
1da177e4 3473{
60490fe0 3474 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4
LT
3475
3476 /* Do the reset outside of interrupt context */
87041639
JK
3477 adapter->tx_timeout_count++;
3478 schedule_work(&adapter->reset_task);
1da177e4
LT
3479}
3480
64798845 3481static void e1000_reset_task(struct work_struct *work)
1da177e4 3482{
65f27f38
DH
3483 struct e1000_adapter *adapter =
3484 container_of(work, struct e1000_adapter, reset_task);
1da177e4 3485
0ef4eedc
JB
3486 if (test_bit(__E1000_DOWN, &adapter->flags))
3487 return;
b04e36ba 3488 e_err(drv, "Reset adapter\n");
338c15e4 3489 e1000_reinit_safe(adapter);
1da177e4
LT
3490}
3491
3492/**
3493 * e1000_get_stats - Get System Network Statistics
3494 * @netdev: network interface device structure
3495 *
3496 * Returns the address of the device statistics structure.
a4010afe 3497 * The statistics are actually updated from the watchdog.
1da177e4
LT
3498 **/
3499
64798845 3500static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
1da177e4 3501{
6b7660cd 3502 /* only return the current stats */
5fe31def 3503 return &netdev->stats;
1da177e4
LT
3504}
3505
3506/**
3507 * e1000_change_mtu - Change the Maximum Transfer Unit
3508 * @netdev: network interface device structure
3509 * @new_mtu: new value for maximum frame size
3510 *
3511 * Returns 0 on success, negative on failure
3512 **/
3513
64798845 3514static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
1da177e4 3515{
60490fe0 3516 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 3517 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
3518 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3519
96838a40
JB
3520 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3521 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
feb8f478 3522 e_err(probe, "Invalid MTU setting\n");
1da177e4 3523 return -EINVAL;
2d7edb92 3524 }
1da177e4 3525
997f5cbd 3526 /* Adapter-specific max frame size limits. */
1dc32918 3527 switch (hw->mac_type) {
9e2feace 3528 case e1000_undefined ... e1000_82542_rev2_1:
b7cb8c2c 3529 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
feb8f478 3530 e_err(probe, "Jumbo Frames not supported.\n");
2d7edb92 3531 return -EINVAL;
2d7edb92 3532 }
997f5cbd 3533 break;
997f5cbd
JK
3534 default:
3535 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3536 break;
1da177e4
LT
3537 }
3538
3d6114e7
JB
3539 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3540 msleep(1);
3541 /* e1000_down has a dependency on max_frame_size */
3542 hw->max_frame_size = max_frame;
3543 if (netif_running(netdev))
3544 e1000_down(adapter);
3545
87f5032e 3546 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
9e2feace 3547 * means we reserve 2 more, this pushes us to allocate from the next
edbbb3ca
JB
3548 * larger slab size.
3549 * i.e. RXBUFFER_2048 --> size-4096 slab
3550 * however with the new *_jumbo_rx* routines, jumbo receives will use
3551 * fragmented skbs */
9e2feace 3552
9926146b 3553 if (max_frame <= E1000_RXBUFFER_2048)
9e2feace 3554 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
edbbb3ca
JB
3555 else
3556#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
9e2feace 3557 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
edbbb3ca
JB
3558#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3559 adapter->rx_buffer_len = PAGE_SIZE;
3560#endif
9e2feace
AK
3561
3562 /* adjust allocation if LPE protects us, and we aren't using SBP */
1dc32918 3563 if (!hw->tbi_compatibility_on &&
b7cb8c2c 3564 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
9e2feace
AK
3565 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3566 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
997f5cbd 3567
675ad473
ET
3568 pr_info("%s changing MTU from %d to %d\n",
3569 netdev->name, netdev->mtu, new_mtu);
2d7edb92
MC
3570 netdev->mtu = new_mtu;
3571
2db10a08 3572 if (netif_running(netdev))
3d6114e7
JB
3573 e1000_up(adapter);
3574 else
3575 e1000_reset(adapter);
3576
3577 clear_bit(__E1000_RESETTING, &adapter->flags);
1da177e4 3578
1da177e4
LT
3579 return 0;
3580}
3581
3582/**
3583 * e1000_update_stats - Update the board statistics counters
3584 * @adapter: board private structure
3585 **/
3586
64798845 3587void e1000_update_stats(struct e1000_adapter *adapter)
1da177e4 3588{
5fe31def 3589 struct net_device *netdev = adapter->netdev;
1da177e4 3590 struct e1000_hw *hw = &adapter->hw;
282f33c9 3591 struct pci_dev *pdev = adapter->pdev;
1da177e4 3592 unsigned long flags;
406874a7 3593 u16 phy_tmp;
1da177e4
LT
3594
3595#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3596
282f33c9
LV
3597 /*
3598 * Prevent stats update while adapter is being reset, or if the pci
3599 * connection is down.
3600 */
9026729b 3601 if (adapter->link_speed == 0)
282f33c9 3602 return;
81b1955e 3603 if (pci_channel_offline(pdev))
9026729b
AK
3604 return;
3605
1da177e4
LT
3606 spin_lock_irqsave(&adapter->stats_lock, flags);
3607
828d055f 3608 /* these counters are modified from e1000_tbi_adjust_stats,
1da177e4
LT
3609 * called from the interrupt context, so they must only
3610 * be written while holding adapter->stats_lock
3611 */
3612
1dc32918
JP
3613 adapter->stats.crcerrs += er32(CRCERRS);
3614 adapter->stats.gprc += er32(GPRC);
3615 adapter->stats.gorcl += er32(GORCL);
3616 adapter->stats.gorch += er32(GORCH);
3617 adapter->stats.bprc += er32(BPRC);
3618 adapter->stats.mprc += er32(MPRC);
3619 adapter->stats.roc += er32(ROC);
3620
1532ecea
JB
3621 adapter->stats.prc64 += er32(PRC64);
3622 adapter->stats.prc127 += er32(PRC127);
3623 adapter->stats.prc255 += er32(PRC255);
3624 adapter->stats.prc511 += er32(PRC511);
3625 adapter->stats.prc1023 += er32(PRC1023);
3626 adapter->stats.prc1522 += er32(PRC1522);
1dc32918
JP
3627
3628 adapter->stats.symerrs += er32(SYMERRS);
3629 adapter->stats.mpc += er32(MPC);
3630 adapter->stats.scc += er32(SCC);
3631 adapter->stats.ecol += er32(ECOL);
3632 adapter->stats.mcc += er32(MCC);
3633 adapter->stats.latecol += er32(LATECOL);
3634 adapter->stats.dc += er32(DC);
3635 adapter->stats.sec += er32(SEC);
3636 adapter->stats.rlec += er32(RLEC);
3637 adapter->stats.xonrxc += er32(XONRXC);
3638 adapter->stats.xontxc += er32(XONTXC);
3639 adapter->stats.xoffrxc += er32(XOFFRXC);
3640 adapter->stats.xofftxc += er32(XOFFTXC);
3641 adapter->stats.fcruc += er32(FCRUC);
3642 adapter->stats.gptc += er32(GPTC);
3643 adapter->stats.gotcl += er32(GOTCL);
3644 adapter->stats.gotch += er32(GOTCH);
3645 adapter->stats.rnbc += er32(RNBC);
3646 adapter->stats.ruc += er32(RUC);
3647 adapter->stats.rfc += er32(RFC);
3648 adapter->stats.rjc += er32(RJC);
3649 adapter->stats.torl += er32(TORL);
3650 adapter->stats.torh += er32(TORH);
3651 adapter->stats.totl += er32(TOTL);
3652 adapter->stats.toth += er32(TOTH);
3653 adapter->stats.tpr += er32(TPR);
3654
1532ecea
JB
3655 adapter->stats.ptc64 += er32(PTC64);
3656 adapter->stats.ptc127 += er32(PTC127);
3657 adapter->stats.ptc255 += er32(PTC255);
3658 adapter->stats.ptc511 += er32(PTC511);
3659 adapter->stats.ptc1023 += er32(PTC1023);
3660 adapter->stats.ptc1522 += er32(PTC1522);
1dc32918
JP
3661
3662 adapter->stats.mptc += er32(MPTC);
3663 adapter->stats.bptc += er32(BPTC);
1da177e4
LT
3664
3665 /* used for adaptive IFS */
3666
1dc32918 3667 hw->tx_packet_delta = er32(TPT);
1da177e4 3668 adapter->stats.tpt += hw->tx_packet_delta;
1dc32918 3669 hw->collision_delta = er32(COLC);
1da177e4
LT
3670 adapter->stats.colc += hw->collision_delta;
3671
96838a40 3672 if (hw->mac_type >= e1000_82543) {
1dc32918
JP
3673 adapter->stats.algnerrc += er32(ALGNERRC);
3674 adapter->stats.rxerrc += er32(RXERRC);
3675 adapter->stats.tncrs += er32(TNCRS);
3676 adapter->stats.cexterr += er32(CEXTERR);
3677 adapter->stats.tsctc += er32(TSCTC);
3678 adapter->stats.tsctfc += er32(TSCTFC);
1da177e4
LT
3679 }
3680
3681 /* Fill out the OS statistics structure */
5fe31def
AK
3682 netdev->stats.multicast = adapter->stats.mprc;
3683 netdev->stats.collisions = adapter->stats.colc;
1da177e4
LT
3684
3685 /* Rx Errors */
3686
87041639
JK
3687 /* RLEC on some newer hardware can be incorrect so build
3688 * our own version based on RUC and ROC */
5fe31def 3689 netdev->stats.rx_errors = adapter->stats.rxerrc +
1da177e4 3690 adapter->stats.crcerrs + adapter->stats.algnerrc +
87041639
JK
3691 adapter->stats.ruc + adapter->stats.roc +
3692 adapter->stats.cexterr;
49559854 3693 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
5fe31def
AK
3694 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3695 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3696 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3697 netdev->stats.rx_missed_errors = adapter->stats.mpc;
1da177e4
LT
3698
3699 /* Tx Errors */
49559854 3700 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
5fe31def
AK
3701 netdev->stats.tx_errors = adapter->stats.txerrc;
3702 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3703 netdev->stats.tx_window_errors = adapter->stats.latecol;
3704 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
1dc32918 3705 if (hw->bad_tx_carr_stats_fd &&
167fb284 3706 adapter->link_duplex == FULL_DUPLEX) {
5fe31def 3707 netdev->stats.tx_carrier_errors = 0;
167fb284
JG
3708 adapter->stats.tncrs = 0;
3709 }
1da177e4
LT
3710
3711 /* Tx Dropped needs to be maintained elsewhere */
3712
3713 /* Phy Stats */
96838a40
JB
3714 if (hw->media_type == e1000_media_type_copper) {
3715 if ((adapter->link_speed == SPEED_1000) &&
1da177e4
LT
3716 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3717 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3718 adapter->phy_stats.idle_errors += phy_tmp;
3719 }
3720
96838a40 3721 if ((hw->mac_type <= e1000_82546) &&
1da177e4
LT
3722 (hw->phy_type == e1000_phy_m88) &&
3723 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3724 adapter->phy_stats.receive_errors += phy_tmp;
3725 }
3726
15e376b4 3727 /* Management Stats */
1dc32918
JP
3728 if (hw->has_smbus) {
3729 adapter->stats.mgptc += er32(MGTPTC);
3730 adapter->stats.mgprc += er32(MGTPRC);
3731 adapter->stats.mgpdc += er32(MGTPDC);
15e376b4
JG
3732 }
3733
1da177e4
LT
3734 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3735}
9ac98284 3736
1da177e4
LT
3737/**
3738 * e1000_intr - Interrupt Handler
3739 * @irq: interrupt number
3740 * @data: pointer to a network interface device structure
1da177e4
LT
3741 **/
3742
64798845 3743static irqreturn_t e1000_intr(int irq, void *data)
1da177e4
LT
3744{
3745 struct net_device *netdev = data;
60490fe0 3746 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4 3747 struct e1000_hw *hw = &adapter->hw;
1532ecea 3748 u32 icr = er32(ICR);
c3570acb 3749
4c11b8ad 3750 if (unlikely((!icr)))
835bb129
JB
3751 return IRQ_NONE; /* Not our interrupt */
3752
4c11b8ad
JB
3753 /*
3754 * we might have caused the interrupt, but the above
3755 * read cleared it, and just in case the driver is
3756 * down there is nothing to do so return handled
3757 */
3758 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3759 return IRQ_HANDLED;
3760
96838a40 3761 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
1da177e4 3762 hw->get_link_status = 1;
1314bbf3
AK
3763 /* guard against interrupt when we're going down */
3764 if (!test_bit(__E1000_DOWN, &adapter->flags))
a4010afe 3765 schedule_delayed_work(&adapter->watchdog_task, 1);
1da177e4
LT
3766 }
3767
1532ecea
JB
3768 /* disable interrupts, without the synchronize_irq bit */
3769 ew32(IMC, ~0);
3770 E1000_WRITE_FLUSH();
3771
288379f0 3772 if (likely(napi_schedule_prep(&adapter->napi))) {
835bb129
JB
3773 adapter->total_tx_bytes = 0;
3774 adapter->total_tx_packets = 0;
3775 adapter->total_rx_bytes = 0;
3776 adapter->total_rx_packets = 0;
288379f0 3777 __napi_schedule(&adapter->napi);
a6c42322 3778 } else {
90fb5135
AK
3779 /* this really should not happen! if it does it is basically a
3780 * bug, but not a hard error, so enable ints and continue */
a6c42322
JB
3781 if (!test_bit(__E1000_DOWN, &adapter->flags))
3782 e1000_irq_enable(adapter);
3783 }
1da177e4 3784
1da177e4
LT
3785 return IRQ_HANDLED;
3786}
3787
1da177e4
LT
3788/**
3789 * e1000_clean - NAPI Rx polling callback
3790 * @adapter: board private structure
3791 **/
64798845 3792static int e1000_clean(struct napi_struct *napi, int budget)
1da177e4 3793{
bea3348e 3794 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
650b5a5c 3795 int tx_clean_complete = 0, work_done = 0;
581d708e 3796
650b5a5c 3797 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
581d708e 3798
650b5a5c 3799 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
581d708e 3800
650b5a5c 3801 if (!tx_clean_complete)
d2c7ddd6
DM
3802 work_done = budget;
3803
53e52c72
DM
3804 /* If budget not fully consumed, exit the polling mode */
3805 if (work_done < budget) {
835bb129
JB
3806 if (likely(adapter->itr_setting & 3))
3807 e1000_set_itr(adapter);
288379f0 3808 napi_complete(napi);
a6c42322
JB
3809 if (!test_bit(__E1000_DOWN, &adapter->flags))
3810 e1000_irq_enable(adapter);
1da177e4
LT
3811 }
3812
bea3348e 3813 return work_done;
1da177e4
LT
3814}
3815
1da177e4
LT
3816/**
3817 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3818 * @adapter: board private structure
3819 **/
64798845
JP
3820static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3821 struct e1000_tx_ring *tx_ring)
1da177e4 3822{
1dc32918 3823 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
3824 struct net_device *netdev = adapter->netdev;
3825 struct e1000_tx_desc *tx_desc, *eop_desc;
3826 struct e1000_buffer *buffer_info;
3827 unsigned int i, eop;
2a1af5d7 3828 unsigned int count = 0;
835bb129 3829 unsigned int total_tx_bytes=0, total_tx_packets=0;
1da177e4
LT
3830
3831 i = tx_ring->next_to_clean;
3832 eop = tx_ring->buffer_info[i].next_to_watch;
3833 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3834
ccfb342c
AD
3835 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3836 (count < tx_ring->count)) {
843f4267 3837 bool cleaned = false;
2d0bb1c1 3838 rmb(); /* read buffer_info after eop_desc */
843f4267 3839 for ( ; !cleaned; count++) {
1da177e4
LT
3840 tx_desc = E1000_TX_DESC(*tx_ring, i);
3841 buffer_info = &tx_ring->buffer_info[i];
3842 cleaned = (i == eop);
3843
835bb129 3844 if (cleaned) {
31c15a2f
DN
3845 total_tx_packets += buffer_info->segs;
3846 total_tx_bytes += buffer_info->bytecount;
835bb129 3847 }
fd803241 3848 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
a9ebadd6 3849 tx_desc->upper.data = 0;
1da177e4 3850
96838a40 3851 if (unlikely(++i == tx_ring->count)) i = 0;
1da177e4 3852 }
581d708e 3853
1da177e4
LT
3854 eop = tx_ring->buffer_info[i].next_to_watch;
3855 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3856 }
3857
3858 tx_ring->next_to_clean = i;
3859
77b2aad5 3860#define TX_WAKE_THRESHOLD 32
843f4267 3861 if (unlikely(count && netif_carrier_ok(netdev) &&
65c7973f
JB
3862 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3863 /* Make sure that anybody stopping the queue after this
3864 * sees the new next_to_clean.
3865 */
3866 smp_mb();
cdd7549e
JB
3867
3868 if (netif_queue_stopped(netdev) &&
3869 !(test_bit(__E1000_DOWN, &adapter->flags))) {
77b2aad5 3870 netif_wake_queue(netdev);
fcfb1224
JB
3871 ++adapter->restart_queue;
3872 }
77b2aad5 3873 }
2648345f 3874
581d708e 3875 if (adapter->detect_tx_hung) {
2648345f 3876 /* Detect a transmit hang in hardware, this serializes the
1da177e4 3877 * check with the clearing of time_stamp and movement of i */
c3033b01 3878 adapter->detect_tx_hung = false;
cdd7549e
JB
3879 if (tx_ring->buffer_info[eop].time_stamp &&
3880 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
8e95a202
JP
3881 (adapter->tx_timeout_factor * HZ)) &&
3882 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
70b8f1e1
MC
3883
3884 /* detected Tx unit hang */
feb8f478 3885 e_err(drv, "Detected Tx Unit Hang\n"
675ad473
ET
3886 " Tx Queue <%lu>\n"
3887 " TDH <%x>\n"
3888 " TDT <%x>\n"
3889 " next_to_use <%x>\n"
3890 " next_to_clean <%x>\n"
3891 "buffer_info[next_to_clean]\n"
3892 " time_stamp <%lx>\n"
3893 " next_to_watch <%x>\n"
3894 " jiffies <%lx>\n"
3895 " next_to_watch.status <%x>\n",
7bfa4816
JK
3896 (unsigned long)((tx_ring - adapter->tx_ring) /
3897 sizeof(struct e1000_tx_ring)),
1dc32918
JP
3898 readl(hw->hw_addr + tx_ring->tdh),
3899 readl(hw->hw_addr + tx_ring->tdt),
70b8f1e1 3900 tx_ring->next_to_use,
392137fa 3901 tx_ring->next_to_clean,
cdd7549e 3902 tx_ring->buffer_info[eop].time_stamp,
70b8f1e1
MC
3903 eop,
3904 jiffies,
3905 eop_desc->upper.fields.status);
b04e36ba 3906 e1000_dump(adapter);
1da177e4 3907 netif_stop_queue(netdev);
70b8f1e1 3908 }
1da177e4 3909 }
835bb129
JB
3910 adapter->total_tx_bytes += total_tx_bytes;
3911 adapter->total_tx_packets += total_tx_packets;
5fe31def
AK
3912 netdev->stats.tx_bytes += total_tx_bytes;
3913 netdev->stats.tx_packets += total_tx_packets;
807540ba 3914 return count < tx_ring->count;
1da177e4
LT
3915}
3916
3917/**
3918 * e1000_rx_checksum - Receive Checksum Offload for 82543
2d7edb92
MC
3919 * @adapter: board private structure
3920 * @status_err: receive descriptor status and error fields
3921 * @csum: receive descriptor csum field
3922 * @sk_buff: socket buffer with received data
1da177e4
LT
3923 **/
3924
64798845
JP
3925static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3926 u32 csum, struct sk_buff *skb)
1da177e4 3927{
1dc32918 3928 struct e1000_hw *hw = &adapter->hw;
406874a7
JP
3929 u16 status = (u16)status_err;
3930 u8 errors = (u8)(status_err >> 24);
bc8acf2c
ED
3931
3932 skb_checksum_none_assert(skb);
2d7edb92 3933
1da177e4 3934 /* 82543 or newer only */
1dc32918 3935 if (unlikely(hw->mac_type < e1000_82543)) return;
1da177e4 3936 /* Ignore Checksum bit is set */
96838a40 3937 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
2d7edb92 3938 /* TCP/UDP checksum error bit is set */
96838a40 3939 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
1da177e4 3940 /* let the stack verify checksum errors */
1da177e4 3941 adapter->hw_csum_err++;
2d7edb92
MC
3942 return;
3943 }
3944 /* TCP/UDP Checksum has not been calculated */
1532ecea
JB
3945 if (!(status & E1000_RXD_STAT_TCPCS))
3946 return;
3947
2d7edb92
MC
3948 /* It must be a TCP or UDP packet with a valid checksum */
3949 if (likely(status & E1000_RXD_STAT_TCPCS)) {
1da177e4
LT
3950 /* TCP checksum is good */
3951 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 3952 }
2d7edb92 3953 adapter->hw_csum_good++;
1da177e4
LT
3954}
3955
edbbb3ca
JB
3956/**
3957 * e1000_consume_page - helper function
3958 **/
3959static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3960 u16 length)
3961{
3962 bi->page = NULL;
3963 skb->len += length;
3964 skb->data_len += length;
ed64b3cc 3965 skb->truesize += PAGE_SIZE;
edbbb3ca
JB
3966}
3967
3968/**
3969 * e1000_receive_skb - helper function to handle rx indications
3970 * @adapter: board private structure
3971 * @status: descriptor status field as written by hardware
3972 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3973 * @skb: pointer to sk_buff to be indicated to stack
3974 */
3975static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3976 __le16 vlan, struct sk_buff *skb)
3977{
6a08d194
JB
3978 skb->protocol = eth_type_trans(skb, adapter->netdev);
3979
5622e404
JP
3980 if (status & E1000_RXD_STAT_VP) {
3981 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
3982
3983 __vlan_hwaccel_put_tag(skb, vid);
3984 }
3985 napi_gro_receive(&adapter->napi, skb);
edbbb3ca
JB
3986}
3987
3988/**
3989 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
3990 * @adapter: board private structure
3991 * @rx_ring: ring to clean
3992 * @work_done: amount of napi work completed this call
3993 * @work_to_do: max amount of work allowed for this call to do
3994 *
3995 * the return value indicates whether actual cleaning was done, there
3996 * is no guarantee that everything was cleaned
3997 */
3998static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3999 struct e1000_rx_ring *rx_ring,
4000 int *work_done, int work_to_do)
4001{
4002 struct e1000_hw *hw = &adapter->hw;
4003 struct net_device *netdev = adapter->netdev;
4004 struct pci_dev *pdev = adapter->pdev;
4005 struct e1000_rx_desc *rx_desc, *next_rxd;
4006 struct e1000_buffer *buffer_info, *next_buffer;
4007 unsigned long irq_flags;
4008 u32 length;
4009 unsigned int i;
4010 int cleaned_count = 0;
4011 bool cleaned = false;
4012 unsigned int total_rx_bytes=0, total_rx_packets=0;
4013
4014 i = rx_ring->next_to_clean;
4015 rx_desc = E1000_RX_DESC(*rx_ring, i);
4016 buffer_info = &rx_ring->buffer_info[i];
4017
4018 while (rx_desc->status & E1000_RXD_STAT_DD) {
4019 struct sk_buff *skb;
4020 u8 status;
4021
4022 if (*work_done >= work_to_do)
4023 break;
4024 (*work_done)++;
2d0bb1c1 4025 rmb(); /* read descriptor and rx_buffer_info after status DD */
edbbb3ca
JB
4026
4027 status = rx_desc->status;
4028 skb = buffer_info->skb;
4029 buffer_info->skb = NULL;
4030
4031 if (++i == rx_ring->count) i = 0;
4032 next_rxd = E1000_RX_DESC(*rx_ring, i);
4033 prefetch(next_rxd);
4034
4035 next_buffer = &rx_ring->buffer_info[i];
4036
4037 cleaned = true;
4038 cleaned_count++;
b16f53be
NN
4039 dma_unmap_page(&pdev->dev, buffer_info->dma,
4040 buffer_info->length, DMA_FROM_DEVICE);
edbbb3ca
JB
4041 buffer_info->dma = 0;
4042
4043 length = le16_to_cpu(rx_desc->length);
4044
4045 /* errors is only valid for DD + EOP descriptors */
4046 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4047 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4048 u8 last_byte = *(skb->data + length - 1);
4049 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4050 last_byte)) {
4051 spin_lock_irqsave(&adapter->stats_lock,
4052 irq_flags);
4053 e1000_tbi_adjust_stats(hw, &adapter->stats,
4054 length, skb->data);
4055 spin_unlock_irqrestore(&adapter->stats_lock,
4056 irq_flags);
4057 length--;
4058 } else {
4059 /* recycle both page and skb */
4060 buffer_info->skb = skb;
4061 /* an error means any chain goes out the window
4062 * too */
4063 if (rx_ring->rx_skb_top)
4064 dev_kfree_skb(rx_ring->rx_skb_top);
4065 rx_ring->rx_skb_top = NULL;
4066 goto next_desc;
4067 }
4068 }
4069
4070#define rxtop rx_ring->rx_skb_top
4071 if (!(status & E1000_RXD_STAT_EOP)) {
4072 /* this descriptor is only the beginning (or middle) */
4073 if (!rxtop) {
4074 /* this is the beginning of a chain */
4075 rxtop = skb;
4076 skb_fill_page_desc(rxtop, 0, buffer_info->page,
4077 0, length);
4078 } else {
4079 /* this is the middle of a chain */
4080 skb_fill_page_desc(rxtop,
4081 skb_shinfo(rxtop)->nr_frags,
4082 buffer_info->page, 0, length);
4083 /* re-use the skb, only consumed the page */
4084 buffer_info->skb = skb;
4085 }
4086 e1000_consume_page(buffer_info, rxtop, length);
4087 goto next_desc;
4088 } else {
4089 if (rxtop) {
4090 /* end of the chain */
4091 skb_fill_page_desc(rxtop,
4092 skb_shinfo(rxtop)->nr_frags,
4093 buffer_info->page, 0, length);
4094 /* re-use the current skb, we only consumed the
4095 * page */
4096 buffer_info->skb = skb;
4097 skb = rxtop;
4098 rxtop = NULL;
4099 e1000_consume_page(buffer_info, skb, length);
4100 } else {
4101 /* no chain, got EOP, this buf is the packet
4102 * copybreak to save the put_page/alloc_page */
4103 if (length <= copybreak &&
4104 skb_tailroom(skb) >= length) {
4105 u8 *vaddr;
4679026d 4106 vaddr = kmap_atomic(buffer_info->page);
edbbb3ca 4107 memcpy(skb_tail_pointer(skb), vaddr, length);
4679026d 4108 kunmap_atomic(vaddr);
edbbb3ca
JB
4109 /* re-use the page, so don't erase
4110 * buffer_info->page */
4111 skb_put(skb, length);
4112 } else {
4113 skb_fill_page_desc(skb, 0,
4114 buffer_info->page, 0,
4115 length);
4116 e1000_consume_page(buffer_info, skb,
4117 length);
4118 }
4119 }
4120 }
4121
4122 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4123 e1000_rx_checksum(adapter,
4124 (u32)(status) |
4125 ((u32)(rx_desc->errors) << 24),
4126 le16_to_cpu(rx_desc->csum), skb);
4127
b0d1562c
BG
4128 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4129 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4130 pskb_trim(skb, skb->len - 4);
edbbb3ca
JB
4131 total_rx_packets++;
4132
4133 /* eth type trans needs skb->data to point to something */
4134 if (!pskb_may_pull(skb, ETH_HLEN)) {
feb8f478 4135 e_err(drv, "pskb_may_pull failed.\n");
edbbb3ca
JB
4136 dev_kfree_skb(skb);
4137 goto next_desc;
4138 }
4139
edbbb3ca
JB
4140 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4141
4142next_desc:
4143 rx_desc->status = 0;
4144
4145 /* return some buffers to hardware, one at a time is too slow */
4146 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4147 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4148 cleaned_count = 0;
4149 }
4150
4151 /* use prefetched values */
4152 rx_desc = next_rxd;
4153 buffer_info = next_buffer;
4154 }
4155 rx_ring->next_to_clean = i;
4156
4157 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4158 if (cleaned_count)
4159 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4160
4161 adapter->total_rx_packets += total_rx_packets;
4162 adapter->total_rx_bytes += total_rx_bytes;
5fe31def
AK
4163 netdev->stats.rx_bytes += total_rx_bytes;
4164 netdev->stats.rx_packets += total_rx_packets;
edbbb3ca
JB
4165 return cleaned;
4166}
4167
57bf6eef
JP
4168/*
4169 * this should improve performance for small packets with large amounts
4170 * of reassembly being done in the stack
4171 */
4172static void e1000_check_copybreak(struct net_device *netdev,
4173 struct e1000_buffer *buffer_info,
4174 u32 length, struct sk_buff **skb)
4175{
4176 struct sk_buff *new_skb;
4177
4178 if (length > copybreak)
4179 return;
4180
4181 new_skb = netdev_alloc_skb_ip_align(netdev, length);
4182 if (!new_skb)
4183 return;
4184
4185 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4186 (*skb)->data - NET_IP_ALIGN,
4187 length + NET_IP_ALIGN);
4188 /* save the skb in buffer_info as good */
4189 buffer_info->skb = *skb;
4190 *skb = new_skb;
4191}
4192
1da177e4 4193/**
2d7edb92 4194 * e1000_clean_rx_irq - Send received data up the network stack; legacy
1da177e4 4195 * @adapter: board private structure
edbbb3ca
JB
4196 * @rx_ring: ring to clean
4197 * @work_done: amount of napi work completed this call
4198 * @work_to_do: max amount of work allowed for this call to do
4199 */
64798845
JP
4200static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4201 struct e1000_rx_ring *rx_ring,
4202 int *work_done, int work_to_do)
1da177e4 4203{
1dc32918 4204 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
4205 struct net_device *netdev = adapter->netdev;
4206 struct pci_dev *pdev = adapter->pdev;
86c3d59f
JB
4207 struct e1000_rx_desc *rx_desc, *next_rxd;
4208 struct e1000_buffer *buffer_info, *next_buffer;
1da177e4 4209 unsigned long flags;
406874a7 4210 u32 length;
1da177e4 4211 unsigned int i;
72d64a43 4212 int cleaned_count = 0;
c3033b01 4213 bool cleaned = false;
835bb129 4214 unsigned int total_rx_bytes=0, total_rx_packets=0;
1da177e4
LT
4215
4216 i = rx_ring->next_to_clean;
4217 rx_desc = E1000_RX_DESC(*rx_ring, i);
b92ff8ee 4218 buffer_info = &rx_ring->buffer_info[i];
1da177e4 4219
b92ff8ee 4220 while (rx_desc->status & E1000_RXD_STAT_DD) {
24f476ee 4221 struct sk_buff *skb;
a292ca6e 4222 u8 status;
90fb5135 4223
96838a40 4224 if (*work_done >= work_to_do)
1da177e4
LT
4225 break;
4226 (*work_done)++;
2d0bb1c1 4227 rmb(); /* read descriptor and rx_buffer_info after status DD */
c3570acb 4228
a292ca6e 4229 status = rx_desc->status;
b92ff8ee 4230 skb = buffer_info->skb;
86c3d59f
JB
4231 buffer_info->skb = NULL;
4232
30320be8
JK
4233 prefetch(skb->data - NET_IP_ALIGN);
4234
86c3d59f
JB
4235 if (++i == rx_ring->count) i = 0;
4236 next_rxd = E1000_RX_DESC(*rx_ring, i);
30320be8
JK
4237 prefetch(next_rxd);
4238
86c3d59f 4239 next_buffer = &rx_ring->buffer_info[i];
86c3d59f 4240
c3033b01 4241 cleaned = true;
72d64a43 4242 cleaned_count++;
b16f53be
NN
4243 dma_unmap_single(&pdev->dev, buffer_info->dma,
4244 buffer_info->length, DMA_FROM_DEVICE);
679be3ba 4245 buffer_info->dma = 0;
1da177e4 4246
1da177e4 4247 length = le16_to_cpu(rx_desc->length);
ea30e119 4248 /* !EOP means multiple descriptors were used to store a single
40a14dea
JB
4249 * packet, if thats the case we need to toss it. In fact, we
4250 * to toss every packet with the EOP bit clear and the next
4251 * frame that _does_ have the EOP bit set, as it is by
4252 * definition only a frame fragment
4253 */
4254 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4255 adapter->discarding = true;
4256
4257 if (adapter->discarding) {
a1415ee6 4258 /* All receives must fit into a single buffer */
feb8f478 4259 e_dbg("Receive packet consumed multiple buffers\n");
864c4e45 4260 /* recycle */
8fc897b0 4261 buffer_info->skb = skb;
40a14dea
JB
4262 if (status & E1000_RXD_STAT_EOP)
4263 adapter->discarding = false;
1da177e4
LT
4264 goto next_desc;
4265 }
4266
96838a40 4267 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
edbbb3ca 4268 u8 last_byte = *(skb->data + length - 1);
1dc32918
JP
4269 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4270 last_byte)) {
1da177e4 4271 spin_lock_irqsave(&adapter->stats_lock, flags);
1dc32918 4272 e1000_tbi_adjust_stats(hw, &adapter->stats,
1da177e4
LT
4273 length, skb->data);
4274 spin_unlock_irqrestore(&adapter->stats_lock,
4275 flags);
4276 length--;
4277 } else {
9e2feace
AK
4278 /* recycle */
4279 buffer_info->skb = skb;
1da177e4
LT
4280 goto next_desc;
4281 }
1cb5821f 4282 }
1da177e4 4283
b0d1562c 4284 total_rx_bytes += (length - 4); /* don't count FCS */
835bb129
JB
4285 total_rx_packets++;
4286
b0d1562c
BG
4287 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4288 /* adjust length to remove Ethernet CRC, this must be
4289 * done after the TBI_ACCEPT workaround above
4290 */
4291 length -= 4;
4292
57bf6eef
JP
4293 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4294
996695de 4295 skb_put(skb, length);
1da177e4
LT
4296
4297 /* Receive Checksum Offload */
a292ca6e 4298 e1000_rx_checksum(adapter,
406874a7
JP
4299 (u32)(status) |
4300 ((u32)(rx_desc->errors) << 24),
c3d7a3a4 4301 le16_to_cpu(rx_desc->csum), skb);
96838a40 4302
edbbb3ca 4303 e1000_receive_skb(adapter, status, rx_desc->special, skb);
c3570acb 4304
1da177e4
LT
4305next_desc:
4306 rx_desc->status = 0;
1da177e4 4307
72d64a43
JK
4308 /* return some buffers to hardware, one at a time is too slow */
4309 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4310 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4311 cleaned_count = 0;
4312 }
4313
30320be8 4314 /* use prefetched values */
86c3d59f
JB
4315 rx_desc = next_rxd;
4316 buffer_info = next_buffer;
1da177e4 4317 }
1da177e4 4318 rx_ring->next_to_clean = i;
72d64a43
JK
4319
4320 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4321 if (cleaned_count)
4322 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
2d7edb92 4323
835bb129
JB
4324 adapter->total_rx_packets += total_rx_packets;
4325 adapter->total_rx_bytes += total_rx_bytes;
5fe31def
AK
4326 netdev->stats.rx_bytes += total_rx_bytes;
4327 netdev->stats.rx_packets += total_rx_packets;
2d7edb92
MC
4328 return cleaned;
4329}
4330
edbbb3ca
JB
4331/**
4332 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4333 * @adapter: address of board private structure
4334 * @rx_ring: pointer to receive ring structure
4335 * @cleaned_count: number of buffers to allocate this pass
4336 **/
4337
4338static void
4339e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4340 struct e1000_rx_ring *rx_ring, int cleaned_count)
4341{
4342 struct net_device *netdev = adapter->netdev;
4343 struct pci_dev *pdev = adapter->pdev;
4344 struct e1000_rx_desc *rx_desc;
4345 struct e1000_buffer *buffer_info;
4346 struct sk_buff *skb;
4347 unsigned int i;
89d71a66 4348 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
edbbb3ca
JB
4349
4350 i = rx_ring->next_to_use;
4351 buffer_info = &rx_ring->buffer_info[i];
4352
4353 while (cleaned_count--) {
4354 skb = buffer_info->skb;
4355 if (skb) {
4356 skb_trim(skb, 0);
4357 goto check_page;
4358 }
4359
89d71a66 4360 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
edbbb3ca
JB
4361 if (unlikely(!skb)) {
4362 /* Better luck next round */
4363 adapter->alloc_rx_buff_failed++;
4364 break;
4365 }
4366
4367 /* Fix for errata 23, can't cross 64kB boundary */
4368 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4369 struct sk_buff *oldskb = skb;
feb8f478
ET
4370 e_err(rx_err, "skb align check failed: %u bytes at "
4371 "%p\n", bufsz, skb->data);
edbbb3ca 4372 /* Try again, without freeing the previous */
89d71a66 4373 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
edbbb3ca
JB
4374 /* Failed allocation, critical failure */
4375 if (!skb) {
4376 dev_kfree_skb(oldskb);
4377 adapter->alloc_rx_buff_failed++;
4378 break;
4379 }
4380
4381 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4382 /* give up */
4383 dev_kfree_skb(skb);
4384 dev_kfree_skb(oldskb);
4385 break; /* while (cleaned_count--) */
4386 }
4387
4388 /* Use new allocation */
4389 dev_kfree_skb(oldskb);
4390 }
edbbb3ca
JB
4391 buffer_info->skb = skb;
4392 buffer_info->length = adapter->rx_buffer_len;
4393check_page:
4394 /* allocate a new page if necessary */
4395 if (!buffer_info->page) {
4396 buffer_info->page = alloc_page(GFP_ATOMIC);
4397 if (unlikely(!buffer_info->page)) {
4398 adapter->alloc_rx_buff_failed++;
4399 break;
4400 }
4401 }
4402
b5abb028 4403 if (!buffer_info->dma) {
b16f53be 4404 buffer_info->dma = dma_map_page(&pdev->dev,
edbbb3ca 4405 buffer_info->page, 0,
b16f53be
NN
4406 buffer_info->length,
4407 DMA_FROM_DEVICE);
4408 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
b5abb028
AB
4409 put_page(buffer_info->page);
4410 dev_kfree_skb(skb);
4411 buffer_info->page = NULL;
4412 buffer_info->skb = NULL;
4413 buffer_info->dma = 0;
4414 adapter->alloc_rx_buff_failed++;
4415 break; /* while !buffer_info->skb */
4416 }
4417 }
edbbb3ca
JB
4418
4419 rx_desc = E1000_RX_DESC(*rx_ring, i);
4420 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4421
4422 if (unlikely(++i == rx_ring->count))
4423 i = 0;
4424 buffer_info = &rx_ring->buffer_info[i];
4425 }
4426
4427 if (likely(rx_ring->next_to_use != i)) {
4428 rx_ring->next_to_use = i;
4429 if (unlikely(i-- == 0))
4430 i = (rx_ring->count - 1);
4431
4432 /* Force memory writes to complete before letting h/w
4433 * know there are new descriptors to fetch. (Only
4434 * applicable for weak-ordered memory model archs,
4435 * such as IA-64). */
4436 wmb();
4437 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4438 }
4439}
4440
1da177e4 4441/**
2d7edb92 4442 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
1da177e4
LT
4443 * @adapter: address of board private structure
4444 **/
4445
64798845
JP
4446static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4447 struct e1000_rx_ring *rx_ring,
4448 int cleaned_count)
1da177e4 4449{
1dc32918 4450 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
4451 struct net_device *netdev = adapter->netdev;
4452 struct pci_dev *pdev = adapter->pdev;
4453 struct e1000_rx_desc *rx_desc;
4454 struct e1000_buffer *buffer_info;
4455 struct sk_buff *skb;
2648345f 4456 unsigned int i;
89d71a66 4457 unsigned int bufsz = adapter->rx_buffer_len;
1da177e4
LT
4458
4459 i = rx_ring->next_to_use;
4460 buffer_info = &rx_ring->buffer_info[i];
4461
a292ca6e 4462 while (cleaned_count--) {
ca6f7224
CH
4463 skb = buffer_info->skb;
4464 if (skb) {
a292ca6e
JK
4465 skb_trim(skb, 0);
4466 goto map_skb;
4467 }
4468
89d71a66 4469 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
96838a40 4470 if (unlikely(!skb)) {
1da177e4 4471 /* Better luck next round */
72d64a43 4472 adapter->alloc_rx_buff_failed++;
1da177e4
LT
4473 break;
4474 }
4475
2648345f 4476 /* Fix for errata 23, can't cross 64kB boundary */
1da177e4
LT
4477 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4478 struct sk_buff *oldskb = skb;
feb8f478
ET
4479 e_err(rx_err, "skb align check failed: %u bytes at "
4480 "%p\n", bufsz, skb->data);
2648345f 4481 /* Try again, without freeing the previous */
89d71a66 4482 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
2648345f 4483 /* Failed allocation, critical failure */
1da177e4
LT
4484 if (!skb) {
4485 dev_kfree_skb(oldskb);
edbbb3ca 4486 adapter->alloc_rx_buff_failed++;
1da177e4
LT
4487 break;
4488 }
2648345f 4489
1da177e4
LT
4490 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4491 /* give up */
4492 dev_kfree_skb(skb);
4493 dev_kfree_skb(oldskb);
edbbb3ca 4494 adapter->alloc_rx_buff_failed++;
1da177e4 4495 break; /* while !buffer_info->skb */
1da177e4 4496 }
ca6f7224
CH
4497
4498 /* Use new allocation */
4499 dev_kfree_skb(oldskb);
1da177e4 4500 }
1da177e4
LT
4501 buffer_info->skb = skb;
4502 buffer_info->length = adapter->rx_buffer_len;
a292ca6e 4503map_skb:
b16f53be 4504 buffer_info->dma = dma_map_single(&pdev->dev,
1da177e4 4505 skb->data,
edbbb3ca 4506 buffer_info->length,
b16f53be
NN
4507 DMA_FROM_DEVICE);
4508 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
b5abb028
AB
4509 dev_kfree_skb(skb);
4510 buffer_info->skb = NULL;
4511 buffer_info->dma = 0;
4512 adapter->alloc_rx_buff_failed++;
4513 break; /* while !buffer_info->skb */
4514 }
1da177e4 4515
edbbb3ca
JB
4516 /*
4517 * XXX if it was allocated cleanly it will never map to a
4518 * boundary crossing
4519 */
4520
2648345f
MC
4521 /* Fix for errata 23, can't cross 64kB boundary */
4522 if (!e1000_check_64k_bound(adapter,
4523 (void *)(unsigned long)buffer_info->dma,
4524 adapter->rx_buffer_len)) {
feb8f478
ET
4525 e_err(rx_err, "dma align check failed: %u bytes at "
4526 "%p\n", adapter->rx_buffer_len,
675ad473 4527 (void *)(unsigned long)buffer_info->dma);
1da177e4
LT
4528 dev_kfree_skb(skb);
4529 buffer_info->skb = NULL;
4530
b16f53be 4531 dma_unmap_single(&pdev->dev, buffer_info->dma,
1da177e4 4532 adapter->rx_buffer_len,
b16f53be 4533 DMA_FROM_DEVICE);
679be3ba 4534 buffer_info->dma = 0;
1da177e4 4535
edbbb3ca 4536 adapter->alloc_rx_buff_failed++;
1da177e4
LT
4537 break; /* while !buffer_info->skb */
4538 }
1da177e4
LT
4539 rx_desc = E1000_RX_DESC(*rx_ring, i);
4540 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4541
96838a40
JB
4542 if (unlikely(++i == rx_ring->count))
4543 i = 0;
1da177e4
LT
4544 buffer_info = &rx_ring->buffer_info[i];
4545 }
4546
b92ff8ee
JB
4547 if (likely(rx_ring->next_to_use != i)) {
4548 rx_ring->next_to_use = i;
4549 if (unlikely(i-- == 0))
4550 i = (rx_ring->count - 1);
4551
4552 /* Force memory writes to complete before letting h/w
4553 * know there are new descriptors to fetch. (Only
4554 * applicable for weak-ordered memory model archs,
4555 * such as IA-64). */
4556 wmb();
1dc32918 4557 writel(i, hw->hw_addr + rx_ring->rdt);
b92ff8ee 4558 }
1da177e4
LT
4559}
4560
4561/**
4562 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4563 * @adapter:
4564 **/
4565
64798845 4566static void e1000_smartspeed(struct e1000_adapter *adapter)
1da177e4 4567{
1dc32918 4568 struct e1000_hw *hw = &adapter->hw;
406874a7
JP
4569 u16 phy_status;
4570 u16 phy_ctrl;
1da177e4 4571
1dc32918
JP
4572 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4573 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
1da177e4
LT
4574 return;
4575
96838a40 4576 if (adapter->smartspeed == 0) {
1da177e4
LT
4577 /* If Master/Slave config fault is asserted twice,
4578 * we assume back-to-back */
1dc32918 4579 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
96838a40 4580 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
1dc32918 4581 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
96838a40 4582 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
1dc32918 4583 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
96838a40 4584 if (phy_ctrl & CR_1000T_MS_ENABLE) {
1da177e4 4585 phy_ctrl &= ~CR_1000T_MS_ENABLE;
1dc32918 4586 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
1da177e4
LT
4587 phy_ctrl);
4588 adapter->smartspeed++;
1dc32918
JP
4589 if (!e1000_phy_setup_autoneg(hw) &&
4590 !e1000_read_phy_reg(hw, PHY_CTRL,
1da177e4
LT
4591 &phy_ctrl)) {
4592 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4593 MII_CR_RESTART_AUTO_NEG);
1dc32918 4594 e1000_write_phy_reg(hw, PHY_CTRL,
1da177e4
LT
4595 phy_ctrl);
4596 }
4597 }
4598 return;
96838a40 4599 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
1da177e4 4600 /* If still no link, perhaps using 2/3 pair cable */
1dc32918 4601 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
1da177e4 4602 phy_ctrl |= CR_1000T_MS_ENABLE;
1dc32918
JP
4603 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4604 if (!e1000_phy_setup_autoneg(hw) &&
4605 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
1da177e4
LT
4606 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4607 MII_CR_RESTART_AUTO_NEG);
1dc32918 4608 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
1da177e4
LT
4609 }
4610 }
4611 /* Restart process after E1000_SMARTSPEED_MAX iterations */
96838a40 4612 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
1da177e4
LT
4613 adapter->smartspeed = 0;
4614}
4615
4616/**
4617 * e1000_ioctl -
4618 * @netdev:
4619 * @ifreq:
4620 * @cmd:
4621 **/
4622
64798845 4623static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1da177e4
LT
4624{
4625 switch (cmd) {
4626 case SIOCGMIIPHY:
4627 case SIOCGMIIREG:
4628 case SIOCSMIIREG:
4629 return e1000_mii_ioctl(netdev, ifr, cmd);
4630 default:
4631 return -EOPNOTSUPP;
4632 }
4633}
4634
4635/**
4636 * e1000_mii_ioctl -
4637 * @netdev:
4638 * @ifreq:
4639 * @cmd:
4640 **/
4641
64798845
JP
4642static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4643 int cmd)
1da177e4 4644{
60490fe0 4645 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 4646 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
4647 struct mii_ioctl_data *data = if_mii(ifr);
4648 int retval;
406874a7 4649 u16 mii_reg;
97876fc6 4650 unsigned long flags;
1da177e4 4651
1dc32918 4652 if (hw->media_type != e1000_media_type_copper)
1da177e4
LT
4653 return -EOPNOTSUPP;
4654
4655 switch (cmd) {
4656 case SIOCGMIIPHY:
1dc32918 4657 data->phy_id = hw->phy_addr;
1da177e4
LT
4658 break;
4659 case SIOCGMIIREG:
97876fc6 4660 spin_lock_irqsave(&adapter->stats_lock, flags);
1dc32918 4661 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
97876fc6
MC
4662 &data->val_out)) {
4663 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1da177e4 4664 return -EIO;
97876fc6
MC
4665 }
4666 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1da177e4
LT
4667 break;
4668 case SIOCSMIIREG:
96838a40 4669 if (data->reg_num & ~(0x1F))
1da177e4
LT
4670 return -EFAULT;
4671 mii_reg = data->val_in;
97876fc6 4672 spin_lock_irqsave(&adapter->stats_lock, flags);
1dc32918 4673 if (e1000_write_phy_reg(hw, data->reg_num,
97876fc6
MC
4674 mii_reg)) {
4675 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1da177e4 4676 return -EIO;
97876fc6 4677 }
f0163ac4 4678 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1dc32918 4679 if (hw->media_type == e1000_media_type_copper) {
1da177e4
LT
4680 switch (data->reg_num) {
4681 case PHY_CTRL:
96838a40 4682 if (mii_reg & MII_CR_POWER_DOWN)
1da177e4 4683 break;
96838a40 4684 if (mii_reg & MII_CR_AUTO_NEG_EN) {
1dc32918
JP
4685 hw->autoneg = 1;
4686 hw->autoneg_advertised = 0x2F;
1da177e4 4687 } else {
14ad2513 4688 u32 speed;
1da177e4 4689 if (mii_reg & 0x40)
14ad2513 4690 speed = SPEED_1000;
1da177e4 4691 else if (mii_reg & 0x2000)
14ad2513 4692 speed = SPEED_100;
1da177e4 4693 else
14ad2513
DD
4694 speed = SPEED_10;
4695 retval = e1000_set_spd_dplx(
4696 adapter, speed,
4697 ((mii_reg & 0x100)
4698 ? DUPLEX_FULL :
4699 DUPLEX_HALF));
f0163ac4 4700 if (retval)
1da177e4
LT
4701 return retval;
4702 }
2db10a08
AK
4703 if (netif_running(adapter->netdev))
4704 e1000_reinit_locked(adapter);
4705 else
1da177e4
LT
4706 e1000_reset(adapter);
4707 break;
4708 case M88E1000_PHY_SPEC_CTRL:
4709 case M88E1000_EXT_PHY_SPEC_CTRL:
1dc32918 4710 if (e1000_phy_reset(hw))
1da177e4
LT
4711 return -EIO;
4712 break;
4713 }
4714 } else {
4715 switch (data->reg_num) {
4716 case PHY_CTRL:
96838a40 4717 if (mii_reg & MII_CR_POWER_DOWN)
1da177e4 4718 break;
2db10a08
AK
4719 if (netif_running(adapter->netdev))
4720 e1000_reinit_locked(adapter);
4721 else
1da177e4
LT
4722 e1000_reset(adapter);
4723 break;
4724 }
4725 }
4726 break;
4727 default:
4728 return -EOPNOTSUPP;
4729 }
4730 return E1000_SUCCESS;
4731}
4732
64798845 4733void e1000_pci_set_mwi(struct e1000_hw *hw)
1da177e4
LT
4734{
4735 struct e1000_adapter *adapter = hw->back;
2648345f 4736 int ret_val = pci_set_mwi(adapter->pdev);
1da177e4 4737
96838a40 4738 if (ret_val)
feb8f478 4739 e_err(probe, "Error in setting MWI\n");
1da177e4
LT
4740}
4741
64798845 4742void e1000_pci_clear_mwi(struct e1000_hw *hw)
1da177e4
LT
4743{
4744 struct e1000_adapter *adapter = hw->back;
4745
4746 pci_clear_mwi(adapter->pdev);
4747}
4748
64798845 4749int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
007755eb
PO
4750{
4751 struct e1000_adapter *adapter = hw->back;
4752 return pcix_get_mmrbc(adapter->pdev);
4753}
4754
64798845 4755void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
007755eb
PO
4756{
4757 struct e1000_adapter *adapter = hw->back;
4758 pcix_set_mmrbc(adapter->pdev, mmrbc);
4759}
4760
64798845 4761void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
1da177e4
LT
4762{
4763 outl(value, port);
4764}
4765
5622e404
JP
4766static bool e1000_vlan_used(struct e1000_adapter *adapter)
4767{
4768 u16 vid;
4769
4770 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4771 return true;
4772 return false;
4773}
4774
52f5509f
JP
4775static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4776 netdev_features_t features)
4777{
4778 struct e1000_hw *hw = &adapter->hw;
4779 u32 ctrl;
4780
4781 ctrl = er32(CTRL);
4782 if (features & NETIF_F_HW_VLAN_RX) {
4783 /* enable VLAN tag insert/strip */
4784 ctrl |= E1000_CTRL_VME;
4785 } else {
4786 /* disable VLAN tag insert/strip */
4787 ctrl &= ~E1000_CTRL_VME;
4788 }
4789 ew32(CTRL, ctrl);
4790}
5622e404
JP
4791static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4792 bool filter_on)
1da177e4 4793{
1dc32918 4794 struct e1000_hw *hw = &adapter->hw;
5622e404 4795 u32 rctl;
1da177e4 4796
9150b76a
JB
4797 if (!test_bit(__E1000_DOWN, &adapter->flags))
4798 e1000_irq_disable(adapter);
1da177e4 4799
52f5509f 4800 __e1000_vlan_mode(adapter, adapter->netdev->features);
5622e404 4801 if (filter_on) {
1532ecea
JB
4802 /* enable VLAN receive filtering */
4803 rctl = er32(RCTL);
4804 rctl &= ~E1000_RCTL_CFIEN;
5622e404 4805 if (!(adapter->netdev->flags & IFF_PROMISC))
1532ecea
JB
4806 rctl |= E1000_RCTL_VFE;
4807 ew32(RCTL, rctl);
4808 e1000_update_mng_vlan(adapter);
1da177e4 4809 } else {
1532ecea
JB
4810 /* disable VLAN receive filtering */
4811 rctl = er32(RCTL);
4812 rctl &= ~E1000_RCTL_VFE;
4813 ew32(RCTL, rctl);
5622e404 4814 }
fd38d7a0 4815
5622e404
JP
4816 if (!test_bit(__E1000_DOWN, &adapter->flags))
4817 e1000_irq_enable(adapter);
4818}
4819
c8f44aff 4820static void e1000_vlan_mode(struct net_device *netdev,
52f5509f 4821 netdev_features_t features)
5622e404
JP
4822{
4823 struct e1000_adapter *adapter = netdev_priv(netdev);
5622e404
JP
4824
4825 if (!test_bit(__E1000_DOWN, &adapter->flags))
4826 e1000_irq_disable(adapter);
4827
52f5509f 4828 __e1000_vlan_mode(adapter, features);
1da177e4 4829
9150b76a
JB
4830 if (!test_bit(__E1000_DOWN, &adapter->flags))
4831 e1000_irq_enable(adapter);
1da177e4
LT
4832}
4833
8e586137 4834static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1da177e4 4835{
60490fe0 4836 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 4837 struct e1000_hw *hw = &adapter->hw;
406874a7 4838 u32 vfta, index;
96838a40 4839
1dc32918 4840 if ((hw->mng_cookie.status &
96838a40
JB
4841 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4842 (vid == adapter->mng_vlan_id))
8e586137 4843 return 0;
5622e404
JP
4844
4845 if (!e1000_vlan_used(adapter))
4846 e1000_vlan_filter_on_off(adapter, true);
4847
1da177e4
LT
4848 /* add VID to filter table */
4849 index = (vid >> 5) & 0x7F;
1dc32918 4850 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
1da177e4 4851 vfta |= (1 << (vid & 0x1F));
1dc32918 4852 e1000_write_vfta(hw, index, vfta);
5622e404
JP
4853
4854 set_bit(vid, adapter->active_vlans);
8e586137
JP
4855
4856 return 0;
1da177e4
LT
4857}
4858
8e586137 4859static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1da177e4 4860{
60490fe0 4861 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 4862 struct e1000_hw *hw = &adapter->hw;
406874a7 4863 u32 vfta, index;
1da177e4 4864
9150b76a
JB
4865 if (!test_bit(__E1000_DOWN, &adapter->flags))
4866 e1000_irq_disable(adapter);
9150b76a
JB
4867 if (!test_bit(__E1000_DOWN, &adapter->flags))
4868 e1000_irq_enable(adapter);
1da177e4
LT
4869
4870 /* remove VID from filter table */
4871 index = (vid >> 5) & 0x7F;
1dc32918 4872 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
1da177e4 4873 vfta &= ~(1 << (vid & 0x1F));
1dc32918 4874 e1000_write_vfta(hw, index, vfta);
5622e404
JP
4875
4876 clear_bit(vid, adapter->active_vlans);
4877
4878 if (!e1000_vlan_used(adapter))
4879 e1000_vlan_filter_on_off(adapter, false);
8e586137
JP
4880
4881 return 0;
1da177e4
LT
4882}
4883
64798845 4884static void e1000_restore_vlan(struct e1000_adapter *adapter)
1da177e4 4885{
5622e404 4886 u16 vid;
1da177e4 4887
5622e404
JP
4888 if (!e1000_vlan_used(adapter))
4889 return;
4890
4891 e1000_vlan_filter_on_off(adapter, true);
4892 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4893 e1000_vlan_rx_add_vid(adapter->netdev, vid);
1da177e4
LT
4894}
4895
14ad2513 4896int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
1da177e4 4897{
1dc32918
JP
4898 struct e1000_hw *hw = &adapter->hw;
4899
4900 hw->autoneg = 0;
1da177e4 4901
14ad2513
DD
4902 /* Make sure dplx is at most 1 bit and lsb of speed is not set
4903 * for the switch() below to work */
4904 if ((spd & 1) || (dplx & ~1))
4905 goto err_inval;
4906
6921368f 4907 /* Fiber NICs only allow 1000 gbps Full duplex */
1dc32918 4908 if ((hw->media_type == e1000_media_type_fiber) &&
14ad2513
DD
4909 spd != SPEED_1000 &&
4910 dplx != DUPLEX_FULL)
4911 goto err_inval;
6921368f 4912
14ad2513 4913 switch (spd + dplx) {
1da177e4 4914 case SPEED_10 + DUPLEX_HALF:
1dc32918 4915 hw->forced_speed_duplex = e1000_10_half;
1da177e4
LT
4916 break;
4917 case SPEED_10 + DUPLEX_FULL:
1dc32918 4918 hw->forced_speed_duplex = e1000_10_full;
1da177e4
LT
4919 break;
4920 case SPEED_100 + DUPLEX_HALF:
1dc32918 4921 hw->forced_speed_duplex = e1000_100_half;
1da177e4
LT
4922 break;
4923 case SPEED_100 + DUPLEX_FULL:
1dc32918 4924 hw->forced_speed_duplex = e1000_100_full;
1da177e4
LT
4925 break;
4926 case SPEED_1000 + DUPLEX_FULL:
1dc32918
JP
4927 hw->autoneg = 1;
4928 hw->autoneg_advertised = ADVERTISE_1000_FULL;
1da177e4
LT
4929 break;
4930 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4931 default:
14ad2513 4932 goto err_inval;
1da177e4
LT
4933 }
4934 return 0;
14ad2513
DD
4935
4936err_inval:
4937 e_err(probe, "Unsupported Speed/Duplex configuration\n");
4938 return -EINVAL;
1da177e4
LT
4939}
4940
b43fcd7d 4941static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
1da177e4
LT
4942{
4943 struct net_device *netdev = pci_get_drvdata(pdev);
60490fe0 4944 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 4945 struct e1000_hw *hw = &adapter->hw;
406874a7
JP
4946 u32 ctrl, ctrl_ext, rctl, status;
4947 u32 wufc = adapter->wol;
6fdfef16 4948#ifdef CONFIG_PM
240b1710 4949 int retval = 0;
6fdfef16 4950#endif
1da177e4
LT
4951
4952 netif_device_detach(netdev);
4953
2db10a08
AK
4954 if (netif_running(netdev)) {
4955 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1da177e4 4956 e1000_down(adapter);
2db10a08 4957 }
1da177e4 4958
2f82665f 4959#ifdef CONFIG_PM
1d33e9c6 4960 retval = pci_save_state(pdev);
3a3847e0 4961 if (retval)
2f82665f
JB
4962 return retval;
4963#endif
4964
1dc32918 4965 status = er32(STATUS);
96838a40 4966 if (status & E1000_STATUS_LU)
1da177e4
LT
4967 wufc &= ~E1000_WUFC_LNKC;
4968
96838a40 4969 if (wufc) {
1da177e4 4970 e1000_setup_rctl(adapter);
db0ce50d 4971 e1000_set_rx_mode(netdev);
1da177e4 4972
b868179c
DN
4973 rctl = er32(RCTL);
4974
1da177e4 4975 /* turn on all-multi mode if wake on multicast is enabled */
b868179c 4976 if (wufc & E1000_WUFC_MC)
1da177e4 4977 rctl |= E1000_RCTL_MPE;
b868179c
DN
4978
4979 /* enable receives in the hardware */
4980 ew32(RCTL, rctl | E1000_RCTL_EN);
1da177e4 4981
1dc32918
JP
4982 if (hw->mac_type >= e1000_82540) {
4983 ctrl = er32(CTRL);
1da177e4
LT
4984 /* advertise wake from D3Cold */
4985 #define E1000_CTRL_ADVD3WUC 0x00100000
4986 /* phy power management enable */
4987 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4988 ctrl |= E1000_CTRL_ADVD3WUC |
4989 E1000_CTRL_EN_PHY_PWR_MGMT;
1dc32918 4990 ew32(CTRL, ctrl);
1da177e4
LT
4991 }
4992
1dc32918 4993 if (hw->media_type == e1000_media_type_fiber ||
1532ecea 4994 hw->media_type == e1000_media_type_internal_serdes) {
1da177e4 4995 /* keep the laser running in D3 */
1dc32918 4996 ctrl_ext = er32(CTRL_EXT);
1da177e4 4997 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
1dc32918 4998 ew32(CTRL_EXT, ctrl_ext);
1da177e4
LT
4999 }
5000
1dc32918
JP
5001 ew32(WUC, E1000_WUC_PME_EN);
5002 ew32(WUFC, wufc);
1da177e4 5003 } else {
1dc32918
JP
5004 ew32(WUC, 0);
5005 ew32(WUFC, 0);
1da177e4
LT
5006 }
5007
0fccd0e9
JG
5008 e1000_release_manageability(adapter);
5009
b43fcd7d
RW
5010 *enable_wake = !!wufc;
5011
0fccd0e9 5012 /* make sure adapter isn't asleep if manageability is enabled */
b43fcd7d
RW
5013 if (adapter->en_mng_pt)
5014 *enable_wake = true;
1da177e4 5015
edd106fc
AK
5016 if (netif_running(netdev))
5017 e1000_free_irq(adapter);
5018
1da177e4 5019 pci_disable_device(pdev);
240b1710 5020
1da177e4
LT
5021 return 0;
5022}
5023
2f82665f 5024#ifdef CONFIG_PM
b43fcd7d
RW
5025static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5026{
5027 int retval;
5028 bool wake;
5029
5030 retval = __e1000_shutdown(pdev, &wake);
5031 if (retval)
5032 return retval;
5033
5034 if (wake) {
5035 pci_prepare_to_sleep(pdev);
5036 } else {
5037 pci_wake_from_d3(pdev, false);
5038 pci_set_power_state(pdev, PCI_D3hot);
5039 }
5040
5041 return 0;
5042}
5043
64798845 5044static int e1000_resume(struct pci_dev *pdev)
1da177e4
LT
5045{
5046 struct net_device *netdev = pci_get_drvdata(pdev);
60490fe0 5047 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 5048 struct e1000_hw *hw = &adapter->hw;
406874a7 5049 u32 err;
1da177e4 5050
d0e027db 5051 pci_set_power_state(pdev, PCI_D0);
1d33e9c6 5052 pci_restore_state(pdev);
dbb5aaeb 5053 pci_save_state(pdev);
81250297
TI
5054
5055 if (adapter->need_ioport)
5056 err = pci_enable_device(pdev);
5057 else
5058 err = pci_enable_device_mem(pdev);
c7be73bc 5059 if (err) {
675ad473 5060 pr_err("Cannot enable PCI device from suspend\n");
3d1dd8cb
AK
5061 return err;
5062 }
a4cb847d 5063 pci_set_master(pdev);
1da177e4 5064
d0e027db
AK
5065 pci_enable_wake(pdev, PCI_D3hot, 0);
5066 pci_enable_wake(pdev, PCI_D3cold, 0);
1da177e4 5067
c7be73bc
JP
5068 if (netif_running(netdev)) {
5069 err = e1000_request_irq(adapter);
5070 if (err)
5071 return err;
5072 }
edd106fc
AK
5073
5074 e1000_power_up_phy(adapter);
1da177e4 5075 e1000_reset(adapter);
1dc32918 5076 ew32(WUS, ~0);
1da177e4 5077
0fccd0e9
JG
5078 e1000_init_manageability(adapter);
5079
96838a40 5080 if (netif_running(netdev))
1da177e4
LT
5081 e1000_up(adapter);
5082
5083 netif_device_attach(netdev);
5084
1da177e4
LT
5085 return 0;
5086}
5087#endif
c653e635
AK
5088
5089static void e1000_shutdown(struct pci_dev *pdev)
5090{
b43fcd7d
RW
5091 bool wake;
5092
5093 __e1000_shutdown(pdev, &wake);
5094
5095 if (system_state == SYSTEM_POWER_OFF) {
5096 pci_wake_from_d3(pdev, wake);
5097 pci_set_power_state(pdev, PCI_D3hot);
5098 }
c653e635
AK
5099}
5100
1da177e4
LT
5101#ifdef CONFIG_NET_POLL_CONTROLLER
5102/*
5103 * Polling 'interrupt' - used by things like netconsole to send skbs
5104 * without having to re-enable interrupts. It's not called while
5105 * the interrupt routine is executing.
5106 */
64798845 5107static void e1000_netpoll(struct net_device *netdev)
1da177e4 5108{
60490fe0 5109 struct e1000_adapter *adapter = netdev_priv(netdev);
d3d9e484 5110
1da177e4 5111 disable_irq(adapter->pdev->irq);
7d12e780 5112 e1000_intr(adapter->pdev->irq, netdev);
1da177e4
LT
5113 enable_irq(adapter->pdev->irq);
5114}
5115#endif
5116
9026729b
AK
5117/**
5118 * e1000_io_error_detected - called when PCI error is detected
5119 * @pdev: Pointer to PCI device
120a5d0d 5120 * @state: The current pci connection state
9026729b
AK
5121 *
5122 * This function is called after a PCI bus error affecting
5123 * this device has been detected.
5124 */
64798845
JP
5125static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5126 pci_channel_state_t state)
9026729b
AK
5127{
5128 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 5129 struct e1000_adapter *adapter = netdev_priv(netdev);
9026729b
AK
5130
5131 netif_device_detach(netdev);
5132
eab63302
AD
5133 if (state == pci_channel_io_perm_failure)
5134 return PCI_ERS_RESULT_DISCONNECT;
5135
9026729b
AK
5136 if (netif_running(netdev))
5137 e1000_down(adapter);
72e8d6bb 5138 pci_disable_device(pdev);
9026729b
AK
5139
5140 /* Request a slot slot reset. */
5141 return PCI_ERS_RESULT_NEED_RESET;
5142}
5143
5144/**
5145 * e1000_io_slot_reset - called after the pci bus has been reset.
5146 * @pdev: Pointer to PCI device
5147 *
5148 * Restart the card from scratch, as if from a cold-boot. Implementation
5149 * resembles the first-half of the e1000_resume routine.
5150 */
5151static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5152{
5153 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 5154 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 5155 struct e1000_hw *hw = &adapter->hw;
81250297 5156 int err;
9026729b 5157
81250297
TI
5158 if (adapter->need_ioport)
5159 err = pci_enable_device(pdev);
5160 else
5161 err = pci_enable_device_mem(pdev);
5162 if (err) {
675ad473 5163 pr_err("Cannot re-enable PCI device after reset.\n");
9026729b
AK
5164 return PCI_ERS_RESULT_DISCONNECT;
5165 }
5166 pci_set_master(pdev);
5167
dbf38c94
LV
5168 pci_enable_wake(pdev, PCI_D3hot, 0);
5169 pci_enable_wake(pdev, PCI_D3cold, 0);
9026729b 5170
9026729b 5171 e1000_reset(adapter);
1dc32918 5172 ew32(WUS, ~0);
9026729b
AK
5173
5174 return PCI_ERS_RESULT_RECOVERED;
5175}
5176
5177/**
5178 * e1000_io_resume - called when traffic can start flowing again.
5179 * @pdev: Pointer to PCI device
5180 *
5181 * This callback is called when the error recovery driver tells us that
5182 * its OK to resume normal operation. Implementation resembles the
5183 * second-half of the e1000_resume routine.
5184 */
5185static void e1000_io_resume(struct pci_dev *pdev)
5186{
5187 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 5188 struct e1000_adapter *adapter = netdev_priv(netdev);
0fccd0e9
JG
5189
5190 e1000_init_manageability(adapter);
9026729b
AK
5191
5192 if (netif_running(netdev)) {
5193 if (e1000_up(adapter)) {
675ad473 5194 pr_info("can't bring device back up after reset\n");
9026729b
AK
5195 return;
5196 }
5197 }
5198
5199 netif_device_attach(netdev);
9026729b
AK
5200}
5201
1da177e4 5202/* e1000_main.c */
This page took 1.500732 seconds and 5 git commands to generate.