e1000: clear ip csum info from context descriptor
[deliverable/linux.git] / drivers / net / e1000 / e1000_main.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
0abb6eb1 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
3d41e30a 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "e1000.h"
d0bb53e1 30#include <net/ip6_checksum.h>
1da177e4 31
1da177e4 32char e1000_driver_name[] = "e1000";
3ad2cc67 33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
1da177e4
LT
34#ifndef CONFIG_E1000_NAPI
35#define DRIVERNAPI
36#else
37#define DRIVERNAPI "-NAPI"
38#endif
25006ac6 39#define DRV_VERSION "7.3.15-k2"DRIVERNAPI
1da177e4 40char e1000_driver_version[] = DRV_VERSION;
3d41e30a 41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
1da177e4
LT
42
43/* e1000_pci_tbl - PCI Device ID Table
44 *
45 * Last entry must be all 0s
46 *
47 * Macro expands to...
48 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
49 */
50static struct pci_device_id e1000_pci_tbl[] = {
51 INTEL_E1000_ETHERNET_DEVICE(0x1000),
52 INTEL_E1000_ETHERNET_DEVICE(0x1001),
53 INTEL_E1000_ETHERNET_DEVICE(0x1004),
54 INTEL_E1000_ETHERNET_DEVICE(0x1008),
55 INTEL_E1000_ETHERNET_DEVICE(0x1009),
56 INTEL_E1000_ETHERNET_DEVICE(0x100C),
57 INTEL_E1000_ETHERNET_DEVICE(0x100D),
58 INTEL_E1000_ETHERNET_DEVICE(0x100E),
59 INTEL_E1000_ETHERNET_DEVICE(0x100F),
60 INTEL_E1000_ETHERNET_DEVICE(0x1010),
61 INTEL_E1000_ETHERNET_DEVICE(0x1011),
62 INTEL_E1000_ETHERNET_DEVICE(0x1012),
63 INTEL_E1000_ETHERNET_DEVICE(0x1013),
64 INTEL_E1000_ETHERNET_DEVICE(0x1014),
65 INTEL_E1000_ETHERNET_DEVICE(0x1015),
66 INTEL_E1000_ETHERNET_DEVICE(0x1016),
67 INTEL_E1000_ETHERNET_DEVICE(0x1017),
68 INTEL_E1000_ETHERNET_DEVICE(0x1018),
69 INTEL_E1000_ETHERNET_DEVICE(0x1019),
2648345f 70 INTEL_E1000_ETHERNET_DEVICE(0x101A),
1da177e4
LT
71 INTEL_E1000_ETHERNET_DEVICE(0x101D),
72 INTEL_E1000_ETHERNET_DEVICE(0x101E),
73 INTEL_E1000_ETHERNET_DEVICE(0x1026),
74 INTEL_E1000_ETHERNET_DEVICE(0x1027),
75 INTEL_E1000_ETHERNET_DEVICE(0x1028),
ae2c3860
AK
76 INTEL_E1000_ETHERNET_DEVICE(0x1049),
77 INTEL_E1000_ETHERNET_DEVICE(0x104A),
78 INTEL_E1000_ETHERNET_DEVICE(0x104B),
79 INTEL_E1000_ETHERNET_DEVICE(0x104C),
80 INTEL_E1000_ETHERNET_DEVICE(0x104D),
07b8fede
MC
81 INTEL_E1000_ETHERNET_DEVICE(0x105E),
82 INTEL_E1000_ETHERNET_DEVICE(0x105F),
83 INTEL_E1000_ETHERNET_DEVICE(0x1060),
1da177e4
LT
84 INTEL_E1000_ETHERNET_DEVICE(0x1075),
85 INTEL_E1000_ETHERNET_DEVICE(0x1076),
86 INTEL_E1000_ETHERNET_DEVICE(0x1077),
87 INTEL_E1000_ETHERNET_DEVICE(0x1078),
88 INTEL_E1000_ETHERNET_DEVICE(0x1079),
89 INTEL_E1000_ETHERNET_DEVICE(0x107A),
90 INTEL_E1000_ETHERNET_DEVICE(0x107B),
91 INTEL_E1000_ETHERNET_DEVICE(0x107C),
07b8fede
MC
92 INTEL_E1000_ETHERNET_DEVICE(0x107D),
93 INTEL_E1000_ETHERNET_DEVICE(0x107E),
94 INTEL_E1000_ETHERNET_DEVICE(0x107F),
1da177e4 95 INTEL_E1000_ETHERNET_DEVICE(0x108A),
2648345f
MC
96 INTEL_E1000_ETHERNET_DEVICE(0x108B),
97 INTEL_E1000_ETHERNET_DEVICE(0x108C),
6418ecc6
JK
98 INTEL_E1000_ETHERNET_DEVICE(0x1096),
99 INTEL_E1000_ETHERNET_DEVICE(0x1098),
b7ee49db 100 INTEL_E1000_ETHERNET_DEVICE(0x1099),
07b8fede 101 INTEL_E1000_ETHERNET_DEVICE(0x109A),
5881cde8 102 INTEL_E1000_ETHERNET_DEVICE(0x10A4),
b7ee49db 103 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
6418ecc6 104 INTEL_E1000_ETHERNET_DEVICE(0x10B9),
ae2c3860
AK
105 INTEL_E1000_ETHERNET_DEVICE(0x10BA),
106 INTEL_E1000_ETHERNET_DEVICE(0x10BB),
fc2307d0
AK
107 INTEL_E1000_ETHERNET_DEVICE(0x10BC),
108 INTEL_E1000_ETHERNET_DEVICE(0x10C4),
109 INTEL_E1000_ETHERNET_DEVICE(0x10C5),
1da177e4
LT
110 /* required last entry */
111 {0,}
112};
113
114MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
115
35574764
NN
116int e1000_up(struct e1000_adapter *adapter);
117void e1000_down(struct e1000_adapter *adapter);
118void e1000_reinit_locked(struct e1000_adapter *adapter);
119void e1000_reset(struct e1000_adapter *adapter);
120int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
121int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
122int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
123void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
124void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
3ad2cc67 125static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
35574764 126 struct e1000_tx_ring *txdr);
3ad2cc67 127static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
35574764 128 struct e1000_rx_ring *rxdr);
3ad2cc67 129static void e1000_free_tx_resources(struct e1000_adapter *adapter,
35574764 130 struct e1000_tx_ring *tx_ring);
3ad2cc67 131static void e1000_free_rx_resources(struct e1000_adapter *adapter,
35574764
NN
132 struct e1000_rx_ring *rx_ring);
133void e1000_update_stats(struct e1000_adapter *adapter);
1da177e4
LT
134
135static int e1000_init_module(void);
136static void e1000_exit_module(void);
137static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
138static void __devexit e1000_remove(struct pci_dev *pdev);
581d708e 139static int e1000_alloc_queues(struct e1000_adapter *adapter);
1da177e4
LT
140static int e1000_sw_init(struct e1000_adapter *adapter);
141static int e1000_open(struct net_device *netdev);
142static int e1000_close(struct net_device *netdev);
143static void e1000_configure_tx(struct e1000_adapter *adapter);
144static void e1000_configure_rx(struct e1000_adapter *adapter);
145static void e1000_setup_rctl(struct e1000_adapter *adapter);
581d708e
MC
146static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
147static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
148static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
149 struct e1000_tx_ring *tx_ring);
150static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
151 struct e1000_rx_ring *rx_ring);
1da177e4
LT
152static void e1000_set_multi(struct net_device *netdev);
153static void e1000_update_phy_info(unsigned long data);
154static void e1000_watchdog(unsigned long data);
1da177e4
LT
155static void e1000_82547_tx_fifo_stall(unsigned long data);
156static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
157static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
158static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
159static int e1000_set_mac(struct net_device *netdev, void *p);
7d12e780 160static irqreturn_t e1000_intr(int irq, void *data);
9ac98284
JB
161#ifdef CONFIG_PCI_MSI
162static irqreturn_t e1000_intr_msi(int irq, void *data);
163#endif
581d708e
MC
164static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
165 struct e1000_tx_ring *tx_ring);
1da177e4 166#ifdef CONFIG_E1000_NAPI
581d708e 167static int e1000_clean(struct net_device *poll_dev, int *budget);
1da177e4 168static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
581d708e 169 struct e1000_rx_ring *rx_ring,
1da177e4 170 int *work_done, int work_to_do);
2d7edb92 171static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
581d708e 172 struct e1000_rx_ring *rx_ring,
2d7edb92 173 int *work_done, int work_to_do);
1da177e4 174#else
581d708e
MC
175static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
176 struct e1000_rx_ring *rx_ring);
177static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
178 struct e1000_rx_ring *rx_ring);
1da177e4 179#endif
581d708e 180static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
72d64a43
JK
181 struct e1000_rx_ring *rx_ring,
182 int cleaned_count);
581d708e 183static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
72d64a43
JK
184 struct e1000_rx_ring *rx_ring,
185 int cleaned_count);
1da177e4
LT
186static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
187static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
188 int cmd);
35574764 189void e1000_set_ethtool_ops(struct net_device *netdev);
1da177e4
LT
190static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
191static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
192static void e1000_tx_timeout(struct net_device *dev);
65f27f38 193static void e1000_reset_task(struct work_struct *work);
1da177e4 194static void e1000_smartspeed(struct e1000_adapter *adapter);
e619d523
AK
195static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
196 struct sk_buff *skb);
1da177e4
LT
197
198static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
199static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
200static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
201static void e1000_restore_vlan(struct e1000_adapter *adapter);
202
977e74b5 203static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
6fdfef16 204#ifdef CONFIG_PM
1da177e4
LT
205static int e1000_resume(struct pci_dev *pdev);
206#endif
c653e635 207static void e1000_shutdown(struct pci_dev *pdev);
1da177e4
LT
208
209#ifdef CONFIG_NET_POLL_CONTROLLER
210/* for netdump / net console */
211static void e1000_netpoll (struct net_device *netdev);
212#endif
213
35574764
NN
214extern void e1000_check_options(struct e1000_adapter *adapter);
215
1f753861
JB
216#define COPYBREAK_DEFAULT 256
217static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
218module_param(copybreak, uint, 0644);
219MODULE_PARM_DESC(copybreak,
220 "Maximum size of packet that is copied to a new buffer on receive");
221
9026729b
AK
222static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
223 pci_channel_state_t state);
224static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
225static void e1000_io_resume(struct pci_dev *pdev);
226
227static struct pci_error_handlers e1000_err_handler = {
228 .error_detected = e1000_io_error_detected,
229 .slot_reset = e1000_io_slot_reset,
230 .resume = e1000_io_resume,
231};
24025e4e 232
1da177e4
LT
233static struct pci_driver e1000_driver = {
234 .name = e1000_driver_name,
235 .id_table = e1000_pci_tbl,
236 .probe = e1000_probe,
237 .remove = __devexit_p(e1000_remove),
c4e24f01 238#ifdef CONFIG_PM
1da177e4 239 /* Power Managment Hooks */
1da177e4 240 .suspend = e1000_suspend,
c653e635 241 .resume = e1000_resume,
1da177e4 242#endif
9026729b
AK
243 .shutdown = e1000_shutdown,
244 .err_handler = &e1000_err_handler
1da177e4
LT
245};
246
247MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
248MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
249MODULE_LICENSE("GPL");
250MODULE_VERSION(DRV_VERSION);
251
252static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
253module_param(debug, int, 0);
254MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
255
256/**
257 * e1000_init_module - Driver Registration Routine
258 *
259 * e1000_init_module is the first routine called when the driver is
260 * loaded. All it does is register with the PCI subsystem.
261 **/
262
263static int __init
264e1000_init_module(void)
265{
266 int ret;
267 printk(KERN_INFO "%s - version %s\n",
268 e1000_driver_string, e1000_driver_version);
269
270 printk(KERN_INFO "%s\n", e1000_copyright);
271
29917620 272 ret = pci_register_driver(&e1000_driver);
1f753861
JB
273 if (copybreak != COPYBREAK_DEFAULT) {
274 if (copybreak == 0)
275 printk(KERN_INFO "e1000: copybreak disabled\n");
276 else
277 printk(KERN_INFO "e1000: copybreak enabled for "
278 "packets <= %u bytes\n", copybreak);
279 }
1da177e4
LT
280 return ret;
281}
282
283module_init(e1000_init_module);
284
285/**
286 * e1000_exit_module - Driver Exit Cleanup Routine
287 *
288 * e1000_exit_module is called just before the driver is removed
289 * from memory.
290 **/
291
292static void __exit
293e1000_exit_module(void)
294{
1da177e4
LT
295 pci_unregister_driver(&e1000_driver);
296}
297
298module_exit(e1000_exit_module);
299
2db10a08
AK
300static int e1000_request_irq(struct e1000_adapter *adapter)
301{
302 struct net_device *netdev = adapter->netdev;
303 int flags, err = 0;
304
c0bc8721 305 flags = IRQF_SHARED;
2db10a08 306#ifdef CONFIG_PCI_MSI
9ac98284 307 if (adapter->hw.mac_type >= e1000_82571) {
2db10a08
AK
308 adapter->have_msi = TRUE;
309 if ((err = pci_enable_msi(adapter->pdev))) {
310 DPRINTK(PROBE, ERR,
311 "Unable to allocate MSI interrupt Error: %d\n", err);
312 adapter->have_msi = FALSE;
313 }
314 }
9ac98284 315 if (adapter->have_msi) {
61ef5c00 316 flags &= ~IRQF_SHARED;
9ac98284
JB
317 err = request_irq(adapter->pdev->irq, &e1000_intr_msi, flags,
318 netdev->name, netdev);
319 if (err)
320 DPRINTK(PROBE, ERR,
321 "Unable to allocate interrupt Error: %d\n", err);
322 } else
2db10a08
AK
323#endif
324 if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
325 netdev->name, netdev)))
326 DPRINTK(PROBE, ERR,
327 "Unable to allocate interrupt Error: %d\n", err);
328
329 return err;
330}
331
332static void e1000_free_irq(struct e1000_adapter *adapter)
333{
334 struct net_device *netdev = adapter->netdev;
335
336 free_irq(adapter->pdev->irq, netdev);
337
338#ifdef CONFIG_PCI_MSI
339 if (adapter->have_msi)
340 pci_disable_msi(adapter->pdev);
341#endif
342}
343
1da177e4
LT
344/**
345 * e1000_irq_disable - Mask off interrupt generation on the NIC
346 * @adapter: board private structure
347 **/
348
e619d523 349static void
1da177e4
LT
350e1000_irq_disable(struct e1000_adapter *adapter)
351{
352 atomic_inc(&adapter->irq_sem);
353 E1000_WRITE_REG(&adapter->hw, IMC, ~0);
354 E1000_WRITE_FLUSH(&adapter->hw);
355 synchronize_irq(adapter->pdev->irq);
356}
357
358/**
359 * e1000_irq_enable - Enable default interrupt generation settings
360 * @adapter: board private structure
361 **/
362
e619d523 363static void
1da177e4
LT
364e1000_irq_enable(struct e1000_adapter *adapter)
365{
96838a40 366 if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
1da177e4
LT
367 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
368 E1000_WRITE_FLUSH(&adapter->hw);
369 }
370}
3ad2cc67
AB
371
372static void
2d7edb92
MC
373e1000_update_mng_vlan(struct e1000_adapter *adapter)
374{
375 struct net_device *netdev = adapter->netdev;
376 uint16_t vid = adapter->hw.mng_cookie.vlan_id;
377 uint16_t old_vid = adapter->mng_vlan_id;
96838a40
JB
378 if (adapter->vlgrp) {
379 if (!adapter->vlgrp->vlan_devices[vid]) {
380 if (adapter->hw.mng_cookie.status &
2d7edb92
MC
381 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
382 e1000_vlan_rx_add_vid(netdev, vid);
383 adapter->mng_vlan_id = vid;
384 } else
385 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
96838a40
JB
386
387 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
388 (vid != old_vid) &&
2d7edb92
MC
389 !adapter->vlgrp->vlan_devices[old_vid])
390 e1000_vlan_rx_kill_vid(netdev, old_vid);
c5f226fe
JK
391 } else
392 adapter->mng_vlan_id = vid;
2d7edb92
MC
393 }
394}
b55ccb35
JK
395
396/**
397 * e1000_release_hw_control - release control of the h/w to f/w
398 * @adapter: address of board private structure
399 *
400 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
401 * For ASF and Pass Through versions of f/w this means that the
402 * driver is no longer loaded. For AMT version (only with 82573) i
90fb5135 403 * of the f/w this means that the network i/f is closed.
76c224bc 404 *
b55ccb35
JK
405 **/
406
e619d523 407static void
b55ccb35
JK
408e1000_release_hw_control(struct e1000_adapter *adapter)
409{
410 uint32_t ctrl_ext;
411 uint32_t swsm;
cd94dd0b 412 uint32_t extcnf;
b55ccb35
JK
413
414 /* Let firmware taken over control of h/w */
415 switch (adapter->hw.mac_type) {
416 case e1000_82571:
417 case e1000_82572:
4cc15f54 418 case e1000_80003es2lan:
b55ccb35
JK
419 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
420 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
421 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
422 break;
423 case e1000_82573:
424 swsm = E1000_READ_REG(&adapter->hw, SWSM);
425 E1000_WRITE_REG(&adapter->hw, SWSM,
426 swsm & ~E1000_SWSM_DRV_LOAD);
cd94dd0b
AK
427 case e1000_ich8lan:
428 extcnf = E1000_READ_REG(&adapter->hw, CTRL_EXT);
429 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
430 extcnf & ~E1000_CTRL_EXT_DRV_LOAD);
431 break;
b55ccb35
JK
432 default:
433 break;
434 }
435}
436
437/**
438 * e1000_get_hw_control - get control of the h/w from f/w
439 * @adapter: address of board private structure
440 *
441 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
76c224bc
AK
442 * For ASF and Pass Through versions of f/w this means that
443 * the driver is loaded. For AMT version (only with 82573)
90fb5135 444 * of the f/w this means that the network i/f is open.
76c224bc 445 *
b55ccb35
JK
446 **/
447
e619d523 448static void
b55ccb35
JK
449e1000_get_hw_control(struct e1000_adapter *adapter)
450{
451 uint32_t ctrl_ext;
452 uint32_t swsm;
cd94dd0b 453 uint32_t extcnf;
90fb5135 454
b55ccb35
JK
455 /* Let firmware know the driver has taken over */
456 switch (adapter->hw.mac_type) {
457 case e1000_82571:
458 case e1000_82572:
4cc15f54 459 case e1000_80003es2lan:
b55ccb35
JK
460 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
461 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
462 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
463 break;
464 case e1000_82573:
465 swsm = E1000_READ_REG(&adapter->hw, SWSM);
466 E1000_WRITE_REG(&adapter->hw, SWSM,
467 swsm | E1000_SWSM_DRV_LOAD);
468 break;
cd94dd0b
AK
469 case e1000_ich8lan:
470 extcnf = E1000_READ_REG(&adapter->hw, EXTCNF_CTRL);
471 E1000_WRITE_REG(&adapter->hw, EXTCNF_CTRL,
472 extcnf | E1000_EXTCNF_CTRL_SWFLAG);
473 break;
b55ccb35
JK
474 default:
475 break;
476 }
477}
478
0fccd0e9
JG
479static void
480e1000_init_manageability(struct e1000_adapter *adapter)
481{
482 if (adapter->en_mng_pt) {
483 uint32_t manc = E1000_READ_REG(&adapter->hw, MANC);
484
485 /* disable hardware interception of ARP */
486 manc &= ~(E1000_MANC_ARP_EN);
487
488 /* enable receiving management packets to the host */
489 /* this will probably generate destination unreachable messages
490 * from the host OS, but the packets will be handled on SMBUS */
491 if (adapter->hw.has_manc2h) {
492 uint32_t manc2h = E1000_READ_REG(&adapter->hw, MANC2H);
493
494 manc |= E1000_MANC_EN_MNG2HOST;
495#define E1000_MNG2HOST_PORT_623 (1 << 5)
496#define E1000_MNG2HOST_PORT_664 (1 << 6)
497 manc2h |= E1000_MNG2HOST_PORT_623;
498 manc2h |= E1000_MNG2HOST_PORT_664;
499 E1000_WRITE_REG(&adapter->hw, MANC2H, manc2h);
500 }
501
502 E1000_WRITE_REG(&adapter->hw, MANC, manc);
503 }
504}
505
506static void
507e1000_release_manageability(struct e1000_adapter *adapter)
508{
509 if (adapter->en_mng_pt) {
510 uint32_t manc = E1000_READ_REG(&adapter->hw, MANC);
511
512 /* re-enable hardware interception of ARP */
513 manc |= E1000_MANC_ARP_EN;
514
515 if (adapter->hw.has_manc2h)
516 manc &= ~E1000_MANC_EN_MNG2HOST;
517
518 /* don't explicitly have to mess with MANC2H since
519 * MANC has an enable disable that gates MANC2H */
520
521 E1000_WRITE_REG(&adapter->hw, MANC, manc);
522 }
523}
524
1da177e4
LT
525int
526e1000_up(struct e1000_adapter *adapter)
527{
528 struct net_device *netdev = adapter->netdev;
2db10a08 529 int i;
1da177e4
LT
530
531 /* hardware has been reset, we need to reload some things */
532
1da177e4
LT
533 e1000_set_multi(netdev);
534
535 e1000_restore_vlan(adapter);
0fccd0e9 536 e1000_init_manageability(adapter);
1da177e4
LT
537
538 e1000_configure_tx(adapter);
539 e1000_setup_rctl(adapter);
540 e1000_configure_rx(adapter);
72d64a43
JK
541 /* call E1000_DESC_UNUSED which always leaves
542 * at least 1 descriptor unused to make sure
543 * next_to_use != next_to_clean */
f56799ea 544 for (i = 0; i < adapter->num_rx_queues; i++) {
72d64a43 545 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
a292ca6e
JK
546 adapter->alloc_rx_buf(adapter, ring,
547 E1000_DESC_UNUSED(ring));
f56799ea 548 }
1da177e4 549
7bfa4816
JK
550 adapter->tx_queue_len = netdev->tx_queue_len;
551
1da177e4
LT
552#ifdef CONFIG_E1000_NAPI
553 netif_poll_enable(netdev);
554#endif
5de55624
MC
555 e1000_irq_enable(adapter);
556
1314bbf3
AK
557 clear_bit(__E1000_DOWN, &adapter->flags);
558
79f3d399
JB
559 /* fire a link change interrupt to start the watchdog */
560 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC);
1da177e4
LT
561 return 0;
562}
563
79f05bf0
AK
564/**
565 * e1000_power_up_phy - restore link in case the phy was powered down
566 * @adapter: address of board private structure
567 *
568 * The phy may be powered down to save power and turn off link when the
569 * driver is unloaded and wake on lan is not enabled (among others)
570 * *** this routine MUST be followed by a call to e1000_reset ***
571 *
572 **/
573
d658266e 574void e1000_power_up_phy(struct e1000_adapter *adapter)
79f05bf0
AK
575{
576 uint16_t mii_reg = 0;
577
578 /* Just clear the power down bit to wake the phy back up */
579 if (adapter->hw.media_type == e1000_media_type_copper) {
580 /* according to the manual, the phy will retain its
581 * settings across a power-down/up cycle */
582 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
583 mii_reg &= ~MII_CR_POWER_DOWN;
584 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
585 }
586}
587
588static void e1000_power_down_phy(struct e1000_adapter *adapter)
589{
61c2505f
BA
590 /* Power down the PHY so no link is implied when interface is down *
591 * The PHY cannot be powered down if any of the following is TRUE *
79f05bf0
AK
592 * (a) WoL is enabled
593 * (b) AMT is active
594 * (c) SoL/IDER session is active */
595 if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
61c2505f 596 adapter->hw.media_type == e1000_media_type_copper) {
79f05bf0 597 uint16_t mii_reg = 0;
61c2505f
BA
598
599 switch (adapter->hw.mac_type) {
600 case e1000_82540:
601 case e1000_82545:
602 case e1000_82545_rev_3:
603 case e1000_82546:
604 case e1000_82546_rev_3:
605 case e1000_82541:
606 case e1000_82541_rev_2:
607 case e1000_82547:
608 case e1000_82547_rev_2:
609 if (E1000_READ_REG(&adapter->hw, MANC) &
610 E1000_MANC_SMBUS_EN)
611 goto out;
612 break;
613 case e1000_82571:
614 case e1000_82572:
615 case e1000_82573:
616 case e1000_80003es2lan:
617 case e1000_ich8lan:
618 if (e1000_check_mng_mode(&adapter->hw) ||
619 e1000_check_phy_reset_block(&adapter->hw))
620 goto out;
621 break;
622 default:
623 goto out;
624 }
79f05bf0
AK
625 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
626 mii_reg |= MII_CR_POWER_DOWN;
627 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
628 mdelay(1);
629 }
61c2505f
BA
630out:
631 return;
79f05bf0
AK
632}
633
1da177e4
LT
634void
635e1000_down(struct e1000_adapter *adapter)
636{
637 struct net_device *netdev = adapter->netdev;
638
1314bbf3
AK
639 /* signal that we're down so the interrupt handler does not
640 * reschedule our watchdog timer */
641 set_bit(__E1000_DOWN, &adapter->flags);
642
1da177e4 643 e1000_irq_disable(adapter);
c1605eb3 644
1da177e4
LT
645 del_timer_sync(&adapter->tx_fifo_stall_timer);
646 del_timer_sync(&adapter->watchdog_timer);
647 del_timer_sync(&adapter->phy_info_timer);
648
649#ifdef CONFIG_E1000_NAPI
650 netif_poll_disable(netdev);
651#endif
7bfa4816 652 netdev->tx_queue_len = adapter->tx_queue_len;
1da177e4
LT
653 adapter->link_speed = 0;
654 adapter->link_duplex = 0;
655 netif_carrier_off(netdev);
656 netif_stop_queue(netdev);
657
658 e1000_reset(adapter);
581d708e
MC
659 e1000_clean_all_tx_rings(adapter);
660 e1000_clean_all_rx_rings(adapter);
1da177e4 661}
1da177e4 662
2db10a08
AK
663void
664e1000_reinit_locked(struct e1000_adapter *adapter)
665{
666 WARN_ON(in_interrupt());
667 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
668 msleep(1);
669 e1000_down(adapter);
670 e1000_up(adapter);
671 clear_bit(__E1000_RESETTING, &adapter->flags);
1da177e4
LT
672}
673
674void
675e1000_reset(struct e1000_adapter *adapter)
676{
018ea44e 677 uint32_t pba = 0, tx_space, min_tx_space, min_rx_space;
1125ecbc 678 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
018ea44e 679 boolean_t legacy_pba_adjust = FALSE;
1da177e4
LT
680
681 /* Repartition Pba for greater than 9k mtu
682 * To take effect CTRL.RST is required.
683 */
684
2d7edb92 685 switch (adapter->hw.mac_type) {
018ea44e
BA
686 case e1000_82542_rev2_0:
687 case e1000_82542_rev2_1:
688 case e1000_82543:
689 case e1000_82544:
690 case e1000_82540:
691 case e1000_82541:
692 case e1000_82541_rev_2:
693 legacy_pba_adjust = TRUE;
694 pba = E1000_PBA_48K;
695 break;
696 case e1000_82545:
697 case e1000_82545_rev_3:
698 case e1000_82546:
699 case e1000_82546_rev_3:
700 pba = E1000_PBA_48K;
701 break;
2d7edb92 702 case e1000_82547:
0e6ef3e0 703 case e1000_82547_rev_2:
018ea44e 704 legacy_pba_adjust = TRUE;
2d7edb92
MC
705 pba = E1000_PBA_30K;
706 break;
868d5309
MC
707 case e1000_82571:
708 case e1000_82572:
6418ecc6 709 case e1000_80003es2lan:
868d5309
MC
710 pba = E1000_PBA_38K;
711 break;
2d7edb92 712 case e1000_82573:
018ea44e 713 pba = E1000_PBA_20K;
2d7edb92 714 break;
cd94dd0b
AK
715 case e1000_ich8lan:
716 pba = E1000_PBA_8K;
018ea44e
BA
717 case e1000_undefined:
718 case e1000_num_macs:
2d7edb92
MC
719 break;
720 }
721
018ea44e
BA
722 if (legacy_pba_adjust == TRUE) {
723 if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
724 pba -= 8; /* allocate more FIFO for Tx */
2d7edb92 725
018ea44e
BA
726 if (adapter->hw.mac_type == e1000_82547) {
727 adapter->tx_fifo_head = 0;
728 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
729 adapter->tx_fifo_size =
730 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
731 atomic_set(&adapter->tx_fifo_stall, 0);
732 }
733 } else if (adapter->hw.max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) {
734 /* adjust PBA for jumbo frames */
735 E1000_WRITE_REG(&adapter->hw, PBA, pba);
736
737 /* To maintain wire speed transmits, the Tx FIFO should be
738 * large enough to accomodate two full transmit packets,
739 * rounded up to the next 1KB and expressed in KB. Likewise,
740 * the Rx FIFO should be large enough to accomodate at least
741 * one full receive packet and is similarly rounded up and
742 * expressed in KB. */
743 pba = E1000_READ_REG(&adapter->hw, PBA);
744 /* upper 16 bits has Tx packet buffer allocation size in KB */
745 tx_space = pba >> 16;
746 /* lower 16 bits has Rx packet buffer allocation size in KB */
747 pba &= 0xffff;
748 /* don't include ethernet FCS because hardware appends/strips */
749 min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE +
750 VLAN_TAG_SIZE;
751 min_tx_space = min_rx_space;
752 min_tx_space *= 2;
753 E1000_ROUNDUP(min_tx_space, 1024);
754 min_tx_space >>= 10;
755 E1000_ROUNDUP(min_rx_space, 1024);
756 min_rx_space >>= 10;
757
758 /* If current Tx allocation is less than the min Tx FIFO size,
759 * and the min Tx FIFO size is less than the current Rx FIFO
760 * allocation, take space away from current Rx allocation */
761 if (tx_space < min_tx_space &&
762 ((min_tx_space - tx_space) < pba)) {
763 pba = pba - (min_tx_space - tx_space);
764
765 /* PCI/PCIx hardware has PBA alignment constraints */
766 switch (adapter->hw.mac_type) {
767 case e1000_82545 ... e1000_82546_rev_3:
768 pba &= ~(E1000_PBA_8K - 1);
769 break;
770 default:
771 break;
772 }
773
774 /* if short on rx space, rx wins and must trump tx
775 * adjustment or use Early Receive if available */
776 if (pba < min_rx_space) {
777 switch (adapter->hw.mac_type) {
778 case e1000_82573:
779 /* ERT enabled in e1000_configure_rx */
780 break;
781 default:
782 pba = min_rx_space;
783 break;
784 }
785 }
786 }
1da177e4 787 }
2d7edb92 788
1da177e4
LT
789 E1000_WRITE_REG(&adapter->hw, PBA, pba);
790
791 /* flow control settings */
f11b7f85
JK
792 /* Set the FC high water mark to 90% of the FIFO size.
793 * Required to clear last 3 LSB */
794 fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
cd94dd0b
AK
795 /* We can't use 90% on small FIFOs because the remainder
796 * would be less than 1 full frame. In this case, we size
797 * it to allow at least a full frame above the high water
798 * mark. */
799 if (pba < E1000_PBA_16K)
800 fc_high_water_mark = (pba * 1024) - 1600;
f11b7f85
JK
801
802 adapter->hw.fc_high_water = fc_high_water_mark;
803 adapter->hw.fc_low_water = fc_high_water_mark - 8;
87041639
JK
804 if (adapter->hw.mac_type == e1000_80003es2lan)
805 adapter->hw.fc_pause_time = 0xFFFF;
806 else
807 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
1da177e4
LT
808 adapter->hw.fc_send_xon = 1;
809 adapter->hw.fc = adapter->hw.original_fc;
810
2d7edb92 811 /* Allow time for pending master requests to run */
1da177e4 812 e1000_reset_hw(&adapter->hw);
96838a40 813 if (adapter->hw.mac_type >= e1000_82544)
1da177e4 814 E1000_WRITE_REG(&adapter->hw, WUC, 0);
09ae3e88 815
96838a40 816 if (e1000_init_hw(&adapter->hw))
1da177e4 817 DPRINTK(PROBE, ERR, "Hardware Error\n");
2d7edb92 818 e1000_update_mng_vlan(adapter);
3d5460a0
JB
819
820 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
821 if (adapter->hw.mac_type >= e1000_82544 &&
822 adapter->hw.mac_type <= e1000_82547_rev_2 &&
823 adapter->hw.autoneg == 1 &&
824 adapter->hw.autoneg_advertised == ADVERTISE_1000_FULL) {
825 uint32_t ctrl = E1000_READ_REG(&adapter->hw, CTRL);
826 /* clear phy power management bit if we are in gig only mode,
827 * which if enabled will attempt negotiation to 100Mb, which
828 * can cause a loss of link at power off or driver unload */
829 ctrl &= ~E1000_CTRL_SWDPIN3;
830 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
831 }
832
1da177e4
LT
833 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
834 E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
835
836 e1000_reset_adaptive(&adapter->hw);
837 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
9a53a202
AK
838
839 if (!adapter->smart_power_down &&
840 (adapter->hw.mac_type == e1000_82571 ||
841 adapter->hw.mac_type == e1000_82572)) {
842 uint16_t phy_data = 0;
843 /* speed up time to link by disabling smart power down, ignore
844 * the return value of this function because there is nothing
845 * different we would do if it failed */
846 e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
847 &phy_data);
848 phy_data &= ~IGP02E1000_PM_SPD;
849 e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
850 phy_data);
851 }
852
0fccd0e9 853 e1000_release_manageability(adapter);
1da177e4
LT
854}
855
856/**
857 * e1000_probe - Device Initialization Routine
858 * @pdev: PCI device information struct
859 * @ent: entry in e1000_pci_tbl
860 *
861 * Returns 0 on success, negative on failure
862 *
863 * e1000_probe initializes an adapter identified by a pci_dev structure.
864 * The OS initialization, configuring of the adapter private structure,
865 * and a hardware reset occur.
866 **/
867
868static int __devinit
869e1000_probe(struct pci_dev *pdev,
870 const struct pci_device_id *ent)
871{
872 struct net_device *netdev;
873 struct e1000_adapter *adapter;
2d7edb92 874 unsigned long mmio_start, mmio_len;
cd94dd0b 875 unsigned long flash_start, flash_len;
2d7edb92 876
1da177e4 877 static int cards_found = 0;
120cd576 878 static int global_quad_port_a = 0; /* global ksp3 port a indication */
2d7edb92 879 int i, err, pci_using_dac;
120cd576 880 uint16_t eeprom_data = 0;
1da177e4 881 uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
96838a40 882 if ((err = pci_enable_device(pdev)))
1da177e4
LT
883 return err;
884
cd94dd0b
AK
885 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
886 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
1da177e4
LT
887 pci_using_dac = 1;
888 } else {
cd94dd0b
AK
889 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
890 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
1da177e4 891 E1000_ERR("No usable DMA configuration, aborting\n");
6dd62ab0 892 goto err_dma;
1da177e4
LT
893 }
894 pci_using_dac = 0;
895 }
896
96838a40 897 if ((err = pci_request_regions(pdev, e1000_driver_name)))
6dd62ab0 898 goto err_pci_reg;
1da177e4
LT
899
900 pci_set_master(pdev);
901
6dd62ab0 902 err = -ENOMEM;
1da177e4 903 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
6dd62ab0 904 if (!netdev)
1da177e4 905 goto err_alloc_etherdev;
1da177e4
LT
906
907 SET_MODULE_OWNER(netdev);
908 SET_NETDEV_DEV(netdev, &pdev->dev);
909
910 pci_set_drvdata(pdev, netdev);
60490fe0 911 adapter = netdev_priv(netdev);
1da177e4
LT
912 adapter->netdev = netdev;
913 adapter->pdev = pdev;
914 adapter->hw.back = adapter;
915 adapter->msg_enable = (1 << debug) - 1;
916
917 mmio_start = pci_resource_start(pdev, BAR_0);
918 mmio_len = pci_resource_len(pdev, BAR_0);
919
6dd62ab0 920 err = -EIO;
1da177e4 921 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
6dd62ab0 922 if (!adapter->hw.hw_addr)
1da177e4 923 goto err_ioremap;
1da177e4 924
96838a40
JB
925 for (i = BAR_1; i <= BAR_5; i++) {
926 if (pci_resource_len(pdev, i) == 0)
1da177e4 927 continue;
96838a40 928 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1da177e4
LT
929 adapter->hw.io_base = pci_resource_start(pdev, i);
930 break;
931 }
932 }
933
934 netdev->open = &e1000_open;
935 netdev->stop = &e1000_close;
936 netdev->hard_start_xmit = &e1000_xmit_frame;
937 netdev->get_stats = &e1000_get_stats;
938 netdev->set_multicast_list = &e1000_set_multi;
939 netdev->set_mac_address = &e1000_set_mac;
940 netdev->change_mtu = &e1000_change_mtu;
941 netdev->do_ioctl = &e1000_ioctl;
942 e1000_set_ethtool_ops(netdev);
943 netdev->tx_timeout = &e1000_tx_timeout;
944 netdev->watchdog_timeo = 5 * HZ;
945#ifdef CONFIG_E1000_NAPI
946 netdev->poll = &e1000_clean;
947 netdev->weight = 64;
948#endif
949 netdev->vlan_rx_register = e1000_vlan_rx_register;
950 netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
951 netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
952#ifdef CONFIG_NET_POLL_CONTROLLER
953 netdev->poll_controller = e1000_netpoll;
954#endif
0eb5a34c 955 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4
LT
956
957 netdev->mem_start = mmio_start;
958 netdev->mem_end = mmio_start + mmio_len;
959 netdev->base_addr = adapter->hw.io_base;
960
961 adapter->bd_number = cards_found;
962
963 /* setup the private structure */
964
96838a40 965 if ((err = e1000_sw_init(adapter)))
1da177e4
LT
966 goto err_sw_init;
967
6dd62ab0 968 err = -EIO;
cd94dd0b
AK
969 /* Flash BAR mapping must happen after e1000_sw_init
970 * because it depends on mac_type */
971 if ((adapter->hw.mac_type == e1000_ich8lan) &&
972 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
973 flash_start = pci_resource_start(pdev, 1);
974 flash_len = pci_resource_len(pdev, 1);
975 adapter->hw.flash_address = ioremap(flash_start, flash_len);
6dd62ab0 976 if (!adapter->hw.flash_address)
cd94dd0b 977 goto err_flashmap;
cd94dd0b
AK
978 }
979
6dd62ab0 980 if (e1000_check_phy_reset_block(&adapter->hw))
2d7edb92
MC
981 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
982
96838a40 983 if (adapter->hw.mac_type >= e1000_82543) {
1da177e4
LT
984 netdev->features = NETIF_F_SG |
985 NETIF_F_HW_CSUM |
986 NETIF_F_HW_VLAN_TX |
987 NETIF_F_HW_VLAN_RX |
988 NETIF_F_HW_VLAN_FILTER;
cd94dd0b
AK
989 if (adapter->hw.mac_type == e1000_ich8lan)
990 netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
1da177e4
LT
991 }
992
993#ifdef NETIF_F_TSO
96838a40 994 if ((adapter->hw.mac_type >= e1000_82544) &&
1da177e4
LT
995 (adapter->hw.mac_type != e1000_82547))
996 netdev->features |= NETIF_F_TSO;
2d7edb92 997
87ca4e5b 998#ifdef NETIF_F_TSO6
96838a40 999 if (adapter->hw.mac_type > e1000_82547_rev_2)
87ca4e5b 1000 netdev->features |= NETIF_F_TSO6;
2d7edb92 1001#endif
1da177e4 1002#endif
96838a40 1003 if (pci_using_dac)
1da177e4
LT
1004 netdev->features |= NETIF_F_HIGHDMA;
1005
76c224bc
AK
1006 netdev->features |= NETIF_F_LLTX;
1007
2d7edb92
MC
1008 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
1009
cd94dd0b
AK
1010 /* initialize eeprom parameters */
1011
1012 if (e1000_init_eeprom_params(&adapter->hw)) {
1013 E1000_ERR("EEPROM initialization failed\n");
6dd62ab0 1014 goto err_eeprom;
cd94dd0b
AK
1015 }
1016
96838a40 1017 /* before reading the EEPROM, reset the controller to
1da177e4 1018 * put the device in a known good starting state */
96838a40 1019
1da177e4
LT
1020 e1000_reset_hw(&adapter->hw);
1021
1022 /* make sure the EEPROM is good */
1023
96838a40 1024 if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
1da177e4 1025 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
1da177e4
LT
1026 goto err_eeprom;
1027 }
1028
1029 /* copy the MAC address out of the EEPROM */
1030
96838a40 1031 if (e1000_read_mac_addr(&adapter->hw))
1da177e4
LT
1032 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
1033 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
9beb0ac1 1034 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
1da177e4 1035
96838a40 1036 if (!is_valid_ether_addr(netdev->perm_addr)) {
1da177e4 1037 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
1da177e4
LT
1038 goto err_eeprom;
1039 }
1040
1da177e4
LT
1041 e1000_get_bus_info(&adapter->hw);
1042
1043 init_timer(&adapter->tx_fifo_stall_timer);
1044 adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
1045 adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
1046
1047 init_timer(&adapter->watchdog_timer);
1048 adapter->watchdog_timer.function = &e1000_watchdog;
1049 adapter->watchdog_timer.data = (unsigned long) adapter;
1050
1da177e4
LT
1051 init_timer(&adapter->phy_info_timer);
1052 adapter->phy_info_timer.function = &e1000_update_phy_info;
1053 adapter->phy_info_timer.data = (unsigned long) adapter;
1054
65f27f38 1055 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1da177e4 1056
1da177e4
LT
1057 e1000_check_options(adapter);
1058
1059 /* Initial Wake on LAN setting
1060 * If APM wake is enabled in the EEPROM,
1061 * enable the ACPI Magic Packet filter
1062 */
1063
96838a40 1064 switch (adapter->hw.mac_type) {
1da177e4
LT
1065 case e1000_82542_rev2_0:
1066 case e1000_82542_rev2_1:
1067 case e1000_82543:
1068 break;
1069 case e1000_82544:
1070 e1000_read_eeprom(&adapter->hw,
1071 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1072 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1073 break;
cd94dd0b
AK
1074 case e1000_ich8lan:
1075 e1000_read_eeprom(&adapter->hw,
1076 EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
1077 eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
1078 break;
1da177e4
LT
1079 case e1000_82546:
1080 case e1000_82546_rev_3:
fd803241 1081 case e1000_82571:
6418ecc6 1082 case e1000_80003es2lan:
96838a40 1083 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
1da177e4
LT
1084 e1000_read_eeprom(&adapter->hw,
1085 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1086 break;
1087 }
1088 /* Fall Through */
1089 default:
1090 e1000_read_eeprom(&adapter->hw,
1091 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1092 break;
1093 }
96838a40 1094 if (eeprom_data & eeprom_apme_mask)
120cd576
JB
1095 adapter->eeprom_wol |= E1000_WUFC_MAG;
1096
1097 /* now that we have the eeprom settings, apply the special cases
1098 * where the eeprom may be wrong or the board simply won't support
1099 * wake on lan on a particular port */
1100 switch (pdev->device) {
1101 case E1000_DEV_ID_82546GB_PCIE:
1102 adapter->eeprom_wol = 0;
1103 break;
1104 case E1000_DEV_ID_82546EB_FIBER:
1105 case E1000_DEV_ID_82546GB_FIBER:
1106 case E1000_DEV_ID_82571EB_FIBER:
1107 /* Wake events only supported on port A for dual fiber
1108 * regardless of eeprom setting */
1109 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
1110 adapter->eeprom_wol = 0;
1111 break;
1112 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
5881cde8 1113 case E1000_DEV_ID_82571EB_QUAD_COPPER:
fc2307d0 1114 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
120cd576
JB
1115 /* if quad port adapter, disable WoL on all but port A */
1116 if (global_quad_port_a != 0)
1117 adapter->eeprom_wol = 0;
1118 else
1119 adapter->quad_port_a = 1;
1120 /* Reset for multiple quad port adapters */
1121 if (++global_quad_port_a == 4)
1122 global_quad_port_a = 0;
1123 break;
1124 }
1125
1126 /* initialize the wol settings based on the eeprom settings */
1127 adapter->wol = adapter->eeprom_wol;
1da177e4 1128
fb3d47d4
JK
1129 /* print bus type/speed/width info */
1130 {
1131 struct e1000_hw *hw = &adapter->hw;
1132 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
1133 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
1134 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
1135 ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1136 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
1137 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
1138 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
1139 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
1140 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
1141 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
1142 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
1143 "32-bit"));
1144 }
1145
1146 for (i = 0; i < 6; i++)
1147 printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
1148
1da177e4
LT
1149 /* reset the hardware with the new settings */
1150 e1000_reset(adapter);
1151
b55ccb35
JK
1152 /* If the controller is 82573 and f/w is AMT, do not set
1153 * DRV_LOAD until the interface is up. For all other cases,
1154 * let the f/w know that the h/w is now under the control
1155 * of the driver. */
1156 if (adapter->hw.mac_type != e1000_82573 ||
1157 !e1000_check_mng_mode(&adapter->hw))
1158 e1000_get_hw_control(adapter);
2d7edb92 1159
1da177e4 1160 strcpy(netdev->name, "eth%d");
96838a40 1161 if ((err = register_netdev(netdev)))
1da177e4
LT
1162 goto err_register;
1163
1314bbf3
AK
1164 /* tell the stack to leave us alone until e1000_open() is called */
1165 netif_carrier_off(netdev);
1166 netif_stop_queue(netdev);
1167
1da177e4
LT
1168 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
1169
1170 cards_found++;
1171 return 0;
1172
1173err_register:
6dd62ab0
VA
1174 e1000_release_hw_control(adapter);
1175err_eeprom:
1176 if (!e1000_check_phy_reset_block(&adapter->hw))
1177 e1000_phy_hw_reset(&adapter->hw);
1178
cd94dd0b
AK
1179 if (adapter->hw.flash_address)
1180 iounmap(adapter->hw.flash_address);
1181err_flashmap:
6dd62ab0
VA
1182#ifdef CONFIG_E1000_NAPI
1183 for (i = 0; i < adapter->num_rx_queues; i++)
1184 dev_put(&adapter->polling_netdev[i]);
1185#endif
1186
1187 kfree(adapter->tx_ring);
1188 kfree(adapter->rx_ring);
1189#ifdef CONFIG_E1000_NAPI
1190 kfree(adapter->polling_netdev);
1191#endif
1da177e4 1192err_sw_init:
1da177e4
LT
1193 iounmap(adapter->hw.hw_addr);
1194err_ioremap:
1195 free_netdev(netdev);
1196err_alloc_etherdev:
1197 pci_release_regions(pdev);
6dd62ab0
VA
1198err_pci_reg:
1199err_dma:
1200 pci_disable_device(pdev);
1da177e4
LT
1201 return err;
1202}
1203
1204/**
1205 * e1000_remove - Device Removal Routine
1206 * @pdev: PCI device information struct
1207 *
1208 * e1000_remove is called by the PCI subsystem to alert the driver
1209 * that it should release a PCI device. The could be caused by a
1210 * Hot-Plug event, or because the driver is going to be removed from
1211 * memory.
1212 **/
1213
1214static void __devexit
1215e1000_remove(struct pci_dev *pdev)
1216{
1217 struct net_device *netdev = pci_get_drvdata(pdev);
60490fe0 1218 struct e1000_adapter *adapter = netdev_priv(netdev);
581d708e
MC
1219#ifdef CONFIG_E1000_NAPI
1220 int i;
1221#endif
1da177e4 1222
be2b28ed
JG
1223 flush_scheduled_work();
1224
0fccd0e9 1225 e1000_release_manageability(adapter);
1da177e4 1226
b55ccb35
JK
1227 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1228 * would have already happened in close and is redundant. */
1229 e1000_release_hw_control(adapter);
2d7edb92 1230
1da177e4 1231 unregister_netdev(netdev);
581d708e 1232#ifdef CONFIG_E1000_NAPI
f56799ea 1233 for (i = 0; i < adapter->num_rx_queues; i++)
15333061 1234 dev_put(&adapter->polling_netdev[i]);
581d708e 1235#endif
1da177e4 1236
96838a40 1237 if (!e1000_check_phy_reset_block(&adapter->hw))
2d7edb92 1238 e1000_phy_hw_reset(&adapter->hw);
1da177e4 1239
24025e4e
MC
1240 kfree(adapter->tx_ring);
1241 kfree(adapter->rx_ring);
1242#ifdef CONFIG_E1000_NAPI
1243 kfree(adapter->polling_netdev);
1244#endif
1245
1da177e4 1246 iounmap(adapter->hw.hw_addr);
cd94dd0b
AK
1247 if (adapter->hw.flash_address)
1248 iounmap(adapter->hw.flash_address);
1da177e4
LT
1249 pci_release_regions(pdev);
1250
1251 free_netdev(netdev);
1252
1253 pci_disable_device(pdev);
1254}
1255
1256/**
1257 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1258 * @adapter: board private structure to initialize
1259 *
1260 * e1000_sw_init initializes the Adapter private data structure.
1261 * Fields are initialized based on PCI device information and
1262 * OS network device settings (MTU size).
1263 **/
1264
1265static int __devinit
1266e1000_sw_init(struct e1000_adapter *adapter)
1267{
1268 struct e1000_hw *hw = &adapter->hw;
1269 struct net_device *netdev = adapter->netdev;
1270 struct pci_dev *pdev = adapter->pdev;
581d708e
MC
1271#ifdef CONFIG_E1000_NAPI
1272 int i;
1273#endif
1da177e4
LT
1274
1275 /* PCI config space info */
1276
1277 hw->vendor_id = pdev->vendor;
1278 hw->device_id = pdev->device;
1279 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1280 hw->subsystem_id = pdev->subsystem_device;
1281
1282 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
1283
1284 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
1285
eb0f8054 1286 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
9e2feace 1287 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
1da177e4
LT
1288 hw->max_frame_size = netdev->mtu +
1289 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1290 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
1291
1292 /* identify the MAC */
1293
96838a40 1294 if (e1000_set_mac_type(hw)) {
1da177e4
LT
1295 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
1296 return -EIO;
1297 }
1298
96838a40 1299 switch (hw->mac_type) {
1da177e4
LT
1300 default:
1301 break;
1302 case e1000_82541:
1303 case e1000_82547:
1304 case e1000_82541_rev_2:
1305 case e1000_82547_rev_2:
1306 hw->phy_init_script = 1;
1307 break;
1308 }
1309
1310 e1000_set_media_type(hw);
1311
1312 hw->wait_autoneg_complete = FALSE;
1313 hw->tbi_compatibility_en = TRUE;
1314 hw->adaptive_ifs = TRUE;
1315
1316 /* Copper options */
1317
96838a40 1318 if (hw->media_type == e1000_media_type_copper) {
1da177e4
LT
1319 hw->mdix = AUTO_ALL_MODES;
1320 hw->disable_polarity_correction = FALSE;
1321 hw->master_slave = E1000_MASTER_SLAVE;
1322 }
1323
f56799ea
JK
1324 adapter->num_tx_queues = 1;
1325 adapter->num_rx_queues = 1;
581d708e
MC
1326
1327 if (e1000_alloc_queues(adapter)) {
1328 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
1329 return -ENOMEM;
1330 }
1331
1332#ifdef CONFIG_E1000_NAPI
f56799ea 1333 for (i = 0; i < adapter->num_rx_queues; i++) {
581d708e
MC
1334 adapter->polling_netdev[i].priv = adapter;
1335 adapter->polling_netdev[i].poll = &e1000_clean;
1336 adapter->polling_netdev[i].weight = 64;
1337 dev_hold(&adapter->polling_netdev[i]);
1338 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
1339 }
7bfa4816 1340 spin_lock_init(&adapter->tx_queue_lock);
24025e4e
MC
1341#endif
1342
1da177e4
LT
1343 atomic_set(&adapter->irq_sem, 1);
1344 spin_lock_init(&adapter->stats_lock);
1da177e4 1345
1314bbf3
AK
1346 set_bit(__E1000_DOWN, &adapter->flags);
1347
1da177e4
LT
1348 return 0;
1349}
1350
581d708e
MC
1351/**
1352 * e1000_alloc_queues - Allocate memory for all rings
1353 * @adapter: board private structure to initialize
1354 *
1355 * We allocate one ring per queue at run-time since we don't know the
1356 * number of queues at compile-time. The polling_netdev array is
1357 * intended for Multiqueue, but should work fine with a single queue.
1358 **/
1359
1360static int __devinit
1361e1000_alloc_queues(struct e1000_adapter *adapter)
1362{
1363 int size;
1364
f56799ea 1365 size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
581d708e
MC
1366 adapter->tx_ring = kmalloc(size, GFP_KERNEL);
1367 if (!adapter->tx_ring)
1368 return -ENOMEM;
1369 memset(adapter->tx_ring, 0, size);
1370
f56799ea 1371 size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
581d708e
MC
1372 adapter->rx_ring = kmalloc(size, GFP_KERNEL);
1373 if (!adapter->rx_ring) {
1374 kfree(adapter->tx_ring);
1375 return -ENOMEM;
1376 }
1377 memset(adapter->rx_ring, 0, size);
1378
1379#ifdef CONFIG_E1000_NAPI
f56799ea 1380 size = sizeof(struct net_device) * adapter->num_rx_queues;
581d708e
MC
1381 adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
1382 if (!adapter->polling_netdev) {
1383 kfree(adapter->tx_ring);
1384 kfree(adapter->rx_ring);
1385 return -ENOMEM;
1386 }
1387 memset(adapter->polling_netdev, 0, size);
1388#endif
1389
1390 return E1000_SUCCESS;
1391}
1392
1da177e4
LT
1393/**
1394 * e1000_open - Called when a network interface is made active
1395 * @netdev: network interface device structure
1396 *
1397 * Returns 0 on success, negative value on failure
1398 *
1399 * The open entry point is called when a network interface is made
1400 * active by the system (IFF_UP). At this point all resources needed
1401 * for transmit and receive operations are allocated, the interrupt
1402 * handler is registered with the OS, the watchdog timer is started,
1403 * and the stack is notified that the interface is ready.
1404 **/
1405
1406static int
1407e1000_open(struct net_device *netdev)
1408{
60490fe0 1409 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4
LT
1410 int err;
1411
2db10a08 1412 /* disallow open during test */
1314bbf3 1413 if (test_bit(__E1000_TESTING, &adapter->flags))
2db10a08
AK
1414 return -EBUSY;
1415
1da177e4 1416 /* allocate transmit descriptors */
581d708e 1417 if ((err = e1000_setup_all_tx_resources(adapter)))
1da177e4
LT
1418 goto err_setup_tx;
1419
1420 /* allocate receive descriptors */
581d708e 1421 if ((err = e1000_setup_all_rx_resources(adapter)))
1da177e4
LT
1422 goto err_setup_rx;
1423
2db10a08
AK
1424 err = e1000_request_irq(adapter);
1425 if (err)
401a552b 1426 goto err_req_irq;
2db10a08 1427
79f05bf0
AK
1428 e1000_power_up_phy(adapter);
1429
96838a40 1430 if ((err = e1000_up(adapter)))
1da177e4 1431 goto err_up;
2d7edb92 1432 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
96838a40 1433 if ((adapter->hw.mng_cookie.status &
2d7edb92
MC
1434 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1435 e1000_update_mng_vlan(adapter);
1436 }
1da177e4 1437
b55ccb35
JK
1438 /* If AMT is enabled, let the firmware know that the network
1439 * interface is now open */
1440 if (adapter->hw.mac_type == e1000_82573 &&
1441 e1000_check_mng_mode(&adapter->hw))
1442 e1000_get_hw_control(adapter);
1443
1da177e4
LT
1444 return E1000_SUCCESS;
1445
1446err_up:
401a552b
VA
1447 e1000_power_down_phy(adapter);
1448 e1000_free_irq(adapter);
1449err_req_irq:
581d708e 1450 e1000_free_all_rx_resources(adapter);
1da177e4 1451err_setup_rx:
581d708e 1452 e1000_free_all_tx_resources(adapter);
1da177e4
LT
1453err_setup_tx:
1454 e1000_reset(adapter);
1455
1456 return err;
1457}
1458
1459/**
1460 * e1000_close - Disables a network interface
1461 * @netdev: network interface device structure
1462 *
1463 * Returns 0, this is not allowed to fail
1464 *
1465 * The close entry point is called when an interface is de-activated
1466 * by the OS. The hardware is still under the drivers control, but
1467 * needs to be disabled. A global MAC reset is issued to stop the
1468 * hardware, and all transmit and receive resources are freed.
1469 **/
1470
1471static int
1472e1000_close(struct net_device *netdev)
1473{
60490fe0 1474 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4 1475
2db10a08 1476 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1da177e4 1477 e1000_down(adapter);
79f05bf0 1478 e1000_power_down_phy(adapter);
2db10a08 1479 e1000_free_irq(adapter);
1da177e4 1480
581d708e
MC
1481 e1000_free_all_tx_resources(adapter);
1482 e1000_free_all_rx_resources(adapter);
1da177e4 1483
4666560a
BA
1484 /* kill manageability vlan ID if supported, but not if a vlan with
1485 * the same ID is registered on the host OS (let 8021q kill it) */
96838a40 1486 if ((adapter->hw.mng_cookie.status &
4666560a
BA
1487 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1488 !(adapter->vlgrp &&
1489 adapter->vlgrp->vlan_devices[adapter->mng_vlan_id])) {
2d7edb92
MC
1490 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1491 }
b55ccb35
JK
1492
1493 /* If AMT is enabled, let the firmware know that the network
1494 * interface is now closed */
1495 if (adapter->hw.mac_type == e1000_82573 &&
1496 e1000_check_mng_mode(&adapter->hw))
1497 e1000_release_hw_control(adapter);
1498
1da177e4
LT
1499 return 0;
1500}
1501
1502/**
1503 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1504 * @adapter: address of board private structure
2d7edb92
MC
1505 * @start: address of beginning of memory
1506 * @len: length of memory
1da177e4 1507 **/
e619d523 1508static boolean_t
1da177e4
LT
1509e1000_check_64k_bound(struct e1000_adapter *adapter,
1510 void *start, unsigned long len)
1511{
1512 unsigned long begin = (unsigned long) start;
1513 unsigned long end = begin + len;
1514
2648345f
MC
1515 /* First rev 82545 and 82546 need to not allow any memory
1516 * write location to cross 64k boundary due to errata 23 */
1da177e4 1517 if (adapter->hw.mac_type == e1000_82545 ||
2648345f 1518 adapter->hw.mac_type == e1000_82546) {
1da177e4
LT
1519 return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
1520 }
1521
1522 return TRUE;
1523}
1524
1525/**
1526 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1527 * @adapter: board private structure
581d708e 1528 * @txdr: tx descriptor ring (for a specific queue) to setup
1da177e4
LT
1529 *
1530 * Return 0 on success, negative on failure
1531 **/
1532
3ad2cc67 1533static int
581d708e
MC
1534e1000_setup_tx_resources(struct e1000_adapter *adapter,
1535 struct e1000_tx_ring *txdr)
1da177e4 1536{
1da177e4
LT
1537 struct pci_dev *pdev = adapter->pdev;
1538 int size;
1539
1540 size = sizeof(struct e1000_buffer) * txdr->count;
cd94dd0b 1541 txdr->buffer_info = vmalloc(size);
96838a40 1542 if (!txdr->buffer_info) {
2648345f
MC
1543 DPRINTK(PROBE, ERR,
1544 "Unable to allocate memory for the transmit descriptor ring\n");
1da177e4
LT
1545 return -ENOMEM;
1546 }
1547 memset(txdr->buffer_info, 0, size);
1548
1549 /* round up to nearest 4K */
1550
1551 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1552 E1000_ROUNDUP(txdr->size, 4096);
1553
1554 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
96838a40 1555 if (!txdr->desc) {
1da177e4 1556setup_tx_desc_die:
1da177e4 1557 vfree(txdr->buffer_info);
2648345f
MC
1558 DPRINTK(PROBE, ERR,
1559 "Unable to allocate memory for the transmit descriptor ring\n");
1da177e4
LT
1560 return -ENOMEM;
1561 }
1562
2648345f 1563 /* Fix for errata 23, can't cross 64kB boundary */
1da177e4
LT
1564 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1565 void *olddesc = txdr->desc;
1566 dma_addr_t olddma = txdr->dma;
2648345f
MC
1567 DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
1568 "at %p\n", txdr->size, txdr->desc);
1569 /* Try again, without freeing the previous */
1da177e4 1570 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
2648345f 1571 /* Failed allocation, critical failure */
96838a40 1572 if (!txdr->desc) {
1da177e4
LT
1573 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1574 goto setup_tx_desc_die;
1575 }
1576
1577 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1578 /* give up */
2648345f
MC
1579 pci_free_consistent(pdev, txdr->size, txdr->desc,
1580 txdr->dma);
1da177e4
LT
1581 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1582 DPRINTK(PROBE, ERR,
2648345f
MC
1583 "Unable to allocate aligned memory "
1584 "for the transmit descriptor ring\n");
1da177e4
LT
1585 vfree(txdr->buffer_info);
1586 return -ENOMEM;
1587 } else {
2648345f 1588 /* Free old allocation, new allocation was successful */
1da177e4
LT
1589 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1590 }
1591 }
1592 memset(txdr->desc, 0, txdr->size);
1593
1594 txdr->next_to_use = 0;
1595 txdr->next_to_clean = 0;
2ae76d98 1596 spin_lock_init(&txdr->tx_lock);
1da177e4
LT
1597
1598 return 0;
1599}
1600
581d708e
MC
1601/**
1602 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1603 * (Descriptors) for all queues
1604 * @adapter: board private structure
1605 *
581d708e
MC
1606 * Return 0 on success, negative on failure
1607 **/
1608
1609int
1610e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1611{
1612 int i, err = 0;
1613
f56799ea 1614 for (i = 0; i < adapter->num_tx_queues; i++) {
581d708e
MC
1615 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1616 if (err) {
1617 DPRINTK(PROBE, ERR,
1618 "Allocation for Tx Queue %u failed\n", i);
3fbbc72e
VA
1619 for (i-- ; i >= 0; i--)
1620 e1000_free_tx_resources(adapter,
1621 &adapter->tx_ring[i]);
581d708e
MC
1622 break;
1623 }
1624 }
1625
1626 return err;
1627}
1628
1da177e4
LT
1629/**
1630 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1631 * @adapter: board private structure
1632 *
1633 * Configure the Tx unit of the MAC after a reset.
1634 **/
1635
1636static void
1637e1000_configure_tx(struct e1000_adapter *adapter)
1638{
581d708e
MC
1639 uint64_t tdba;
1640 struct e1000_hw *hw = &adapter->hw;
1641 uint32_t tdlen, tctl, tipg, tarc;
0fadb059 1642 uint32_t ipgr1, ipgr2;
1da177e4
LT
1643
1644 /* Setup the HW Tx Head and Tail descriptor pointers */
1645
f56799ea 1646 switch (adapter->num_tx_queues) {
24025e4e
MC
1647 case 1:
1648 default:
581d708e
MC
1649 tdba = adapter->tx_ring[0].dma;
1650 tdlen = adapter->tx_ring[0].count *
1651 sizeof(struct e1000_tx_desc);
581d708e 1652 E1000_WRITE_REG(hw, TDLEN, tdlen);
4ca213a6
AK
1653 E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
1654 E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
581d708e 1655 E1000_WRITE_REG(hw, TDT, 0);
4ca213a6 1656 E1000_WRITE_REG(hw, TDH, 0);
6a951698
AK
1657 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1658 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
24025e4e
MC
1659 break;
1660 }
1da177e4
LT
1661
1662 /* Set the default values for the Tx Inter Packet Gap timer */
d89b6c67
JB
1663 if (adapter->hw.mac_type <= e1000_82547_rev_2 &&
1664 (hw->media_type == e1000_media_type_fiber ||
1665 hw->media_type == e1000_media_type_internal_serdes))
0fadb059
JK
1666 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1667 else
1668 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1669
581d708e 1670 switch (hw->mac_type) {
1da177e4
LT
1671 case e1000_82542_rev2_0:
1672 case e1000_82542_rev2_1:
1673 tipg = DEFAULT_82542_TIPG_IPGT;
0fadb059
JK
1674 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1675 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1da177e4 1676 break;
87041639
JK
1677 case e1000_80003es2lan:
1678 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1679 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
1680 break;
1da177e4 1681 default:
0fadb059
JK
1682 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1683 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1684 break;
1da177e4 1685 }
0fadb059
JK
1686 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1687 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
581d708e 1688 E1000_WRITE_REG(hw, TIPG, tipg);
1da177e4
LT
1689
1690 /* Set the Tx Interrupt Delay register */
1691
581d708e
MC
1692 E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
1693 if (hw->mac_type >= e1000_82540)
1694 E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
1da177e4
LT
1695
1696 /* Program the Transmit Control Register */
1697
581d708e 1698 tctl = E1000_READ_REG(hw, TCTL);
1da177e4 1699 tctl &= ~E1000_TCTL_CT;
7e6c9861 1700 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1da177e4
LT
1701 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1702
2ae76d98
MC
1703 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
1704 tarc = E1000_READ_REG(hw, TARC0);
90fb5135
AK
1705 /* set the speed mode bit, we'll clear it if we're not at
1706 * gigabit link later */
09ae3e88 1707 tarc |= (1 << 21);
2ae76d98 1708 E1000_WRITE_REG(hw, TARC0, tarc);
87041639
JK
1709 } else if (hw->mac_type == e1000_80003es2lan) {
1710 tarc = E1000_READ_REG(hw, TARC0);
1711 tarc |= 1;
87041639
JK
1712 E1000_WRITE_REG(hw, TARC0, tarc);
1713 tarc = E1000_READ_REG(hw, TARC1);
1714 tarc |= 1;
1715 E1000_WRITE_REG(hw, TARC1, tarc);
2ae76d98
MC
1716 }
1717
581d708e 1718 e1000_config_collision_dist(hw);
1da177e4
LT
1719
1720 /* Setup Transmit Descriptor Settings for eop descriptor */
6a042dab
JB
1721 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1722
1723 /* only set IDE if we are delaying interrupts using the timers */
1724 if (adapter->tx_int_delay)
1725 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1da177e4 1726
581d708e 1727 if (hw->mac_type < e1000_82543)
1da177e4
LT
1728 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1729 else
1730 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1731
1732 /* Cache if we're 82544 running in PCI-X because we'll
1733 * need this to apply a workaround later in the send path. */
581d708e
MC
1734 if (hw->mac_type == e1000_82544 &&
1735 hw->bus_type == e1000_bus_type_pcix)
1da177e4 1736 adapter->pcix_82544 = 1;
7e6c9861
JK
1737
1738 E1000_WRITE_REG(hw, TCTL, tctl);
1739
1da177e4
LT
1740}
1741
1742/**
1743 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1744 * @adapter: board private structure
581d708e 1745 * @rxdr: rx descriptor ring (for a specific queue) to setup
1da177e4
LT
1746 *
1747 * Returns 0 on success, negative on failure
1748 **/
1749
3ad2cc67 1750static int
581d708e
MC
1751e1000_setup_rx_resources(struct e1000_adapter *adapter,
1752 struct e1000_rx_ring *rxdr)
1da177e4 1753{
1da177e4 1754 struct pci_dev *pdev = adapter->pdev;
2d7edb92 1755 int size, desc_len;
1da177e4
LT
1756
1757 size = sizeof(struct e1000_buffer) * rxdr->count;
cd94dd0b 1758 rxdr->buffer_info = vmalloc(size);
581d708e 1759 if (!rxdr->buffer_info) {
2648345f
MC
1760 DPRINTK(PROBE, ERR,
1761 "Unable to allocate memory for the receive descriptor ring\n");
1da177e4
LT
1762 return -ENOMEM;
1763 }
1764 memset(rxdr->buffer_info, 0, size);
1765
2d7edb92
MC
1766 size = sizeof(struct e1000_ps_page) * rxdr->count;
1767 rxdr->ps_page = kmalloc(size, GFP_KERNEL);
96838a40 1768 if (!rxdr->ps_page) {
2d7edb92
MC
1769 vfree(rxdr->buffer_info);
1770 DPRINTK(PROBE, ERR,
1771 "Unable to allocate memory for the receive descriptor ring\n");
1772 return -ENOMEM;
1773 }
1774 memset(rxdr->ps_page, 0, size);
1775
1776 size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
1777 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
96838a40 1778 if (!rxdr->ps_page_dma) {
2d7edb92
MC
1779 vfree(rxdr->buffer_info);
1780 kfree(rxdr->ps_page);
1781 DPRINTK(PROBE, ERR,
1782 "Unable to allocate memory for the receive descriptor ring\n");
1783 return -ENOMEM;
1784 }
1785 memset(rxdr->ps_page_dma, 0, size);
1786
96838a40 1787 if (adapter->hw.mac_type <= e1000_82547_rev_2)
2d7edb92
MC
1788 desc_len = sizeof(struct e1000_rx_desc);
1789 else
1790 desc_len = sizeof(union e1000_rx_desc_packet_split);
1791
1da177e4
LT
1792 /* Round up to nearest 4K */
1793
2d7edb92 1794 rxdr->size = rxdr->count * desc_len;
1da177e4
LT
1795 E1000_ROUNDUP(rxdr->size, 4096);
1796
1797 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1798
581d708e
MC
1799 if (!rxdr->desc) {
1800 DPRINTK(PROBE, ERR,
1801 "Unable to allocate memory for the receive descriptor ring\n");
1da177e4 1802setup_rx_desc_die:
1da177e4 1803 vfree(rxdr->buffer_info);
2d7edb92
MC
1804 kfree(rxdr->ps_page);
1805 kfree(rxdr->ps_page_dma);
1da177e4
LT
1806 return -ENOMEM;
1807 }
1808
2648345f 1809 /* Fix for errata 23, can't cross 64kB boundary */
1da177e4
LT
1810 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1811 void *olddesc = rxdr->desc;
1812 dma_addr_t olddma = rxdr->dma;
2648345f
MC
1813 DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
1814 "at %p\n", rxdr->size, rxdr->desc);
1815 /* Try again, without freeing the previous */
1da177e4 1816 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
2648345f 1817 /* Failed allocation, critical failure */
581d708e 1818 if (!rxdr->desc) {
1da177e4 1819 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
581d708e
MC
1820 DPRINTK(PROBE, ERR,
1821 "Unable to allocate memory "
1822 "for the receive descriptor ring\n");
1da177e4
LT
1823 goto setup_rx_desc_die;
1824 }
1825
1826 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1827 /* give up */
2648345f
MC
1828 pci_free_consistent(pdev, rxdr->size, rxdr->desc,
1829 rxdr->dma);
1da177e4 1830 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
2648345f
MC
1831 DPRINTK(PROBE, ERR,
1832 "Unable to allocate aligned memory "
1833 "for the receive descriptor ring\n");
581d708e 1834 goto setup_rx_desc_die;
1da177e4 1835 } else {
2648345f 1836 /* Free old allocation, new allocation was successful */
1da177e4
LT
1837 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1838 }
1839 }
1840 memset(rxdr->desc, 0, rxdr->size);
1841
1842 rxdr->next_to_clean = 0;
1843 rxdr->next_to_use = 0;
1844
1845 return 0;
1846}
1847
581d708e
MC
1848/**
1849 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1850 * (Descriptors) for all queues
1851 * @adapter: board private structure
1852 *
581d708e
MC
1853 * Return 0 on success, negative on failure
1854 **/
1855
1856int
1857e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1858{
1859 int i, err = 0;
1860
f56799ea 1861 for (i = 0; i < adapter->num_rx_queues; i++) {
581d708e
MC
1862 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1863 if (err) {
1864 DPRINTK(PROBE, ERR,
1865 "Allocation for Rx Queue %u failed\n", i);
3fbbc72e
VA
1866 for (i-- ; i >= 0; i--)
1867 e1000_free_rx_resources(adapter,
1868 &adapter->rx_ring[i]);
581d708e
MC
1869 break;
1870 }
1871 }
1872
1873 return err;
1874}
1875
1da177e4 1876/**
2648345f 1877 * e1000_setup_rctl - configure the receive control registers
1da177e4
LT
1878 * @adapter: Board private structure
1879 **/
e4c811c9
MC
1880#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1881 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1da177e4
LT
1882static void
1883e1000_setup_rctl(struct e1000_adapter *adapter)
1884{
2d7edb92
MC
1885 uint32_t rctl, rfctl;
1886 uint32_t psrctl = 0;
35ec56bb 1887#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
e4c811c9
MC
1888 uint32_t pages = 0;
1889#endif
1da177e4
LT
1890
1891 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1892
1893 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1894
1895 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1896 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1897 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1898
0fadb059 1899 if (adapter->hw.tbi_compatibility_on == 1)
1da177e4
LT
1900 rctl |= E1000_RCTL_SBP;
1901 else
1902 rctl &= ~E1000_RCTL_SBP;
1903
2d7edb92
MC
1904 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1905 rctl &= ~E1000_RCTL_LPE;
1906 else
1907 rctl |= E1000_RCTL_LPE;
1908
1da177e4 1909 /* Setup buffer sizes */
9e2feace
AK
1910 rctl &= ~E1000_RCTL_SZ_4096;
1911 rctl |= E1000_RCTL_BSEX;
1912 switch (adapter->rx_buffer_len) {
1913 case E1000_RXBUFFER_256:
1914 rctl |= E1000_RCTL_SZ_256;
1915 rctl &= ~E1000_RCTL_BSEX;
1916 break;
1917 case E1000_RXBUFFER_512:
1918 rctl |= E1000_RCTL_SZ_512;
1919 rctl &= ~E1000_RCTL_BSEX;
1920 break;
1921 case E1000_RXBUFFER_1024:
1922 rctl |= E1000_RCTL_SZ_1024;
1923 rctl &= ~E1000_RCTL_BSEX;
1924 break;
a1415ee6
JK
1925 case E1000_RXBUFFER_2048:
1926 default:
1927 rctl |= E1000_RCTL_SZ_2048;
1928 rctl &= ~E1000_RCTL_BSEX;
1929 break;
1930 case E1000_RXBUFFER_4096:
1931 rctl |= E1000_RCTL_SZ_4096;
1932 break;
1933 case E1000_RXBUFFER_8192:
1934 rctl |= E1000_RCTL_SZ_8192;
1935 break;
1936 case E1000_RXBUFFER_16384:
1937 rctl |= E1000_RCTL_SZ_16384;
1938 break;
2d7edb92
MC
1939 }
1940
35ec56bb 1941#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
2d7edb92
MC
1942 /* 82571 and greater support packet-split where the protocol
1943 * header is placed in skb->data and the packet data is
1944 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1945 * In the case of a non-split, skb->data is linearly filled,
1946 * followed by the page buffers. Therefore, skb->data is
1947 * sized to hold the largest protocol header.
1948 */
e64d7d02
JB
1949 /* allocations using alloc_page take too long for regular MTU
1950 * so only enable packet split for jumbo frames */
e4c811c9 1951 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
e64d7d02
JB
1952 if ((adapter->hw.mac_type >= e1000_82571) && (pages <= 3) &&
1953 PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE))
e4c811c9
MC
1954 adapter->rx_ps_pages = pages;
1955 else
1956 adapter->rx_ps_pages = 0;
2d7edb92 1957#endif
e4c811c9 1958 if (adapter->rx_ps_pages) {
2d7edb92
MC
1959 /* Configure extra packet-split registers */
1960 rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
1961 rfctl |= E1000_RFCTL_EXTEN;
87ca4e5b
AK
1962 /* disable packet split support for IPv6 extension headers,
1963 * because some malformed IPv6 headers can hang the RX */
1964 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
1965 E1000_RFCTL_NEW_IPV6_EXT_DIS);
1966
2d7edb92
MC
1967 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
1968
7dfee0cb 1969 rctl |= E1000_RCTL_DTYP_PS;
96838a40 1970
2d7edb92
MC
1971 psrctl |= adapter->rx_ps_bsize0 >>
1972 E1000_PSRCTL_BSIZE0_SHIFT;
e4c811c9
MC
1973
1974 switch (adapter->rx_ps_pages) {
1975 case 3:
1976 psrctl |= PAGE_SIZE <<
1977 E1000_PSRCTL_BSIZE3_SHIFT;
1978 case 2:
1979 psrctl |= PAGE_SIZE <<
1980 E1000_PSRCTL_BSIZE2_SHIFT;
1981 case 1:
1982 psrctl |= PAGE_SIZE >>
1983 E1000_PSRCTL_BSIZE1_SHIFT;
1984 break;
1985 }
2d7edb92
MC
1986
1987 E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
1da177e4
LT
1988 }
1989
1990 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1991}
1992
1993/**
1994 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1995 * @adapter: board private structure
1996 *
1997 * Configure the Rx unit of the MAC after a reset.
1998 **/
1999
2000static void
2001e1000_configure_rx(struct e1000_adapter *adapter)
2002{
581d708e
MC
2003 uint64_t rdba;
2004 struct e1000_hw *hw = &adapter->hw;
2005 uint32_t rdlen, rctl, rxcsum, ctrl_ext;
2d7edb92 2006
e4c811c9 2007 if (adapter->rx_ps_pages) {
0f15a8fa 2008 /* this is a 32 byte descriptor */
581d708e 2009 rdlen = adapter->rx_ring[0].count *
2d7edb92
MC
2010 sizeof(union e1000_rx_desc_packet_split);
2011 adapter->clean_rx = e1000_clean_rx_irq_ps;
2012 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2013 } else {
581d708e
MC
2014 rdlen = adapter->rx_ring[0].count *
2015 sizeof(struct e1000_rx_desc);
2d7edb92
MC
2016 adapter->clean_rx = e1000_clean_rx_irq;
2017 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2018 }
1da177e4
LT
2019
2020 /* disable receives while setting up the descriptors */
581d708e
MC
2021 rctl = E1000_READ_REG(hw, RCTL);
2022 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
1da177e4
LT
2023
2024 /* set the Receive Delay Timer Register */
581d708e 2025 E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
1da177e4 2026
581d708e
MC
2027 if (hw->mac_type >= e1000_82540) {
2028 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
835bb129 2029 if (adapter->itr_setting != 0)
581d708e 2030 E1000_WRITE_REG(hw, ITR,
1da177e4
LT
2031 1000000000 / (adapter->itr * 256));
2032 }
2033
2ae76d98 2034 if (hw->mac_type >= e1000_82571) {
2ae76d98 2035 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
1e613fd9 2036 /* Reset delay timers after every interrupt */
6fc7a7ec 2037 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
1e613fd9 2038#ifdef CONFIG_E1000_NAPI
835bb129 2039 /* Auto-Mask interrupts upon ICR access */
1e613fd9 2040 ctrl_ext |= E1000_CTRL_EXT_IAME;
835bb129 2041 E1000_WRITE_REG(hw, IAM, 0xffffffff);
1e613fd9 2042#endif
2ae76d98
MC
2043 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
2044 E1000_WRITE_FLUSH(hw);
2045 }
2046
581d708e
MC
2047 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2048 * the Base and Length of the Rx Descriptor Ring */
f56799ea 2049 switch (adapter->num_rx_queues) {
24025e4e
MC
2050 case 1:
2051 default:
581d708e 2052 rdba = adapter->rx_ring[0].dma;
581d708e 2053 E1000_WRITE_REG(hw, RDLEN, rdlen);
4ca213a6
AK
2054 E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
2055 E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
581d708e 2056 E1000_WRITE_REG(hw, RDT, 0);
4ca213a6 2057 E1000_WRITE_REG(hw, RDH, 0);
6a951698
AK
2058 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
2059 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
581d708e 2060 break;
24025e4e
MC
2061 }
2062
1da177e4 2063 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
581d708e
MC
2064 if (hw->mac_type >= e1000_82543) {
2065 rxcsum = E1000_READ_REG(hw, RXCSUM);
96838a40 2066 if (adapter->rx_csum == TRUE) {
2d7edb92
MC
2067 rxcsum |= E1000_RXCSUM_TUOFL;
2068
868d5309 2069 /* Enable 82571 IPv4 payload checksum for UDP fragments
2d7edb92 2070 * Must be used in conjunction with packet-split. */
96838a40
JB
2071 if ((hw->mac_type >= e1000_82571) &&
2072 (adapter->rx_ps_pages)) {
2d7edb92
MC
2073 rxcsum |= E1000_RXCSUM_IPPCSE;
2074 }
2075 } else {
2076 rxcsum &= ~E1000_RXCSUM_TUOFL;
2077 /* don't need to clear IPPCSE as it defaults to 0 */
2078 }
581d708e 2079 E1000_WRITE_REG(hw, RXCSUM, rxcsum);
1da177e4
LT
2080 }
2081
21c4d5e0
AK
2082 /* enable early receives on 82573, only takes effect if using > 2048
2083 * byte total frame size. for example only for jumbo frames */
2084#define E1000_ERT_2048 0x100
2085 if (hw->mac_type == e1000_82573)
2086 E1000_WRITE_REG(hw, ERT, E1000_ERT_2048);
2087
1da177e4 2088 /* Enable Receives */
581d708e 2089 E1000_WRITE_REG(hw, RCTL, rctl);
1da177e4
LT
2090}
2091
2092/**
581d708e 2093 * e1000_free_tx_resources - Free Tx Resources per Queue
1da177e4 2094 * @adapter: board private structure
581d708e 2095 * @tx_ring: Tx descriptor ring for a specific queue
1da177e4
LT
2096 *
2097 * Free all transmit software resources
2098 **/
2099
3ad2cc67 2100static void
581d708e
MC
2101e1000_free_tx_resources(struct e1000_adapter *adapter,
2102 struct e1000_tx_ring *tx_ring)
1da177e4
LT
2103{
2104 struct pci_dev *pdev = adapter->pdev;
2105
581d708e 2106 e1000_clean_tx_ring(adapter, tx_ring);
1da177e4 2107
581d708e
MC
2108 vfree(tx_ring->buffer_info);
2109 tx_ring->buffer_info = NULL;
1da177e4 2110
581d708e 2111 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1da177e4 2112
581d708e
MC
2113 tx_ring->desc = NULL;
2114}
2115
2116/**
2117 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
2118 * @adapter: board private structure
2119 *
2120 * Free all transmit software resources
2121 **/
2122
2123void
2124e1000_free_all_tx_resources(struct e1000_adapter *adapter)
2125{
2126 int i;
2127
f56799ea 2128 for (i = 0; i < adapter->num_tx_queues; i++)
581d708e 2129 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1da177e4
LT
2130}
2131
e619d523 2132static void
1da177e4
LT
2133e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
2134 struct e1000_buffer *buffer_info)
2135{
96838a40 2136 if (buffer_info->dma) {
2648345f
MC
2137 pci_unmap_page(adapter->pdev,
2138 buffer_info->dma,
2139 buffer_info->length,
2140 PCI_DMA_TODEVICE);
a9ebadd6 2141 buffer_info->dma = 0;
1da177e4 2142 }
a9ebadd6 2143 if (buffer_info->skb) {
1da177e4 2144 dev_kfree_skb_any(buffer_info->skb);
a9ebadd6
JB
2145 buffer_info->skb = NULL;
2146 }
2147 /* buffer_info must be completely set up in the transmit path */
1da177e4
LT
2148}
2149
2150/**
2151 * e1000_clean_tx_ring - Free Tx Buffers
2152 * @adapter: board private structure
581d708e 2153 * @tx_ring: ring to be cleaned
1da177e4
LT
2154 **/
2155
2156static void
581d708e
MC
2157e1000_clean_tx_ring(struct e1000_adapter *adapter,
2158 struct e1000_tx_ring *tx_ring)
1da177e4 2159{
1da177e4
LT
2160 struct e1000_buffer *buffer_info;
2161 unsigned long size;
2162 unsigned int i;
2163
2164 /* Free all the Tx ring sk_buffs */
2165
96838a40 2166 for (i = 0; i < tx_ring->count; i++) {
1da177e4
LT
2167 buffer_info = &tx_ring->buffer_info[i];
2168 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2169 }
2170
2171 size = sizeof(struct e1000_buffer) * tx_ring->count;
2172 memset(tx_ring->buffer_info, 0, size);
2173
2174 /* Zero out the descriptor ring */
2175
2176 memset(tx_ring->desc, 0, tx_ring->size);
2177
2178 tx_ring->next_to_use = 0;
2179 tx_ring->next_to_clean = 0;
fd803241 2180 tx_ring->last_tx_tso = 0;
1da177e4 2181
581d708e
MC
2182 writel(0, adapter->hw.hw_addr + tx_ring->tdh);
2183 writel(0, adapter->hw.hw_addr + tx_ring->tdt);
2184}
2185
2186/**
2187 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2188 * @adapter: board private structure
2189 **/
2190
2191static void
2192e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2193{
2194 int i;
2195
f56799ea 2196 for (i = 0; i < adapter->num_tx_queues; i++)
581d708e 2197 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1da177e4
LT
2198}
2199
2200/**
2201 * e1000_free_rx_resources - Free Rx Resources
2202 * @adapter: board private structure
581d708e 2203 * @rx_ring: ring to clean the resources from
1da177e4
LT
2204 *
2205 * Free all receive software resources
2206 **/
2207
3ad2cc67 2208static void
581d708e
MC
2209e1000_free_rx_resources(struct e1000_adapter *adapter,
2210 struct e1000_rx_ring *rx_ring)
1da177e4 2211{
1da177e4
LT
2212 struct pci_dev *pdev = adapter->pdev;
2213
581d708e 2214 e1000_clean_rx_ring(adapter, rx_ring);
1da177e4
LT
2215
2216 vfree(rx_ring->buffer_info);
2217 rx_ring->buffer_info = NULL;
2d7edb92
MC
2218 kfree(rx_ring->ps_page);
2219 rx_ring->ps_page = NULL;
2220 kfree(rx_ring->ps_page_dma);
2221 rx_ring->ps_page_dma = NULL;
1da177e4
LT
2222
2223 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2224
2225 rx_ring->desc = NULL;
2226}
2227
2228/**
581d708e 2229 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
1da177e4 2230 * @adapter: board private structure
581d708e
MC
2231 *
2232 * Free all receive software resources
2233 **/
2234
2235void
2236e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2237{
2238 int i;
2239
f56799ea 2240 for (i = 0; i < adapter->num_rx_queues; i++)
581d708e
MC
2241 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2242}
2243
2244/**
2245 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2246 * @adapter: board private structure
2247 * @rx_ring: ring to free buffers from
1da177e4
LT
2248 **/
2249
2250static void
581d708e
MC
2251e1000_clean_rx_ring(struct e1000_adapter *adapter,
2252 struct e1000_rx_ring *rx_ring)
1da177e4 2253{
1da177e4 2254 struct e1000_buffer *buffer_info;
2d7edb92
MC
2255 struct e1000_ps_page *ps_page;
2256 struct e1000_ps_page_dma *ps_page_dma;
1da177e4
LT
2257 struct pci_dev *pdev = adapter->pdev;
2258 unsigned long size;
2d7edb92 2259 unsigned int i, j;
1da177e4
LT
2260
2261 /* Free all the Rx ring sk_buffs */
96838a40 2262 for (i = 0; i < rx_ring->count; i++) {
1da177e4 2263 buffer_info = &rx_ring->buffer_info[i];
96838a40 2264 if (buffer_info->skb) {
1da177e4
LT
2265 pci_unmap_single(pdev,
2266 buffer_info->dma,
2267 buffer_info->length,
2268 PCI_DMA_FROMDEVICE);
2269
2270 dev_kfree_skb(buffer_info->skb);
2271 buffer_info->skb = NULL;
997f5cbd
JK
2272 }
2273 ps_page = &rx_ring->ps_page[i];
2274 ps_page_dma = &rx_ring->ps_page_dma[i];
2275 for (j = 0; j < adapter->rx_ps_pages; j++) {
2276 if (!ps_page->ps_page[j]) break;
2277 pci_unmap_page(pdev,
2278 ps_page_dma->ps_page_dma[j],
2279 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2280 ps_page_dma->ps_page_dma[j] = 0;
2281 put_page(ps_page->ps_page[j]);
2282 ps_page->ps_page[j] = NULL;
1da177e4
LT
2283 }
2284 }
2285
2286 size = sizeof(struct e1000_buffer) * rx_ring->count;
2287 memset(rx_ring->buffer_info, 0, size);
2d7edb92
MC
2288 size = sizeof(struct e1000_ps_page) * rx_ring->count;
2289 memset(rx_ring->ps_page, 0, size);
2290 size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
2291 memset(rx_ring->ps_page_dma, 0, size);
1da177e4
LT
2292
2293 /* Zero out the descriptor ring */
2294
2295 memset(rx_ring->desc, 0, rx_ring->size);
2296
2297 rx_ring->next_to_clean = 0;
2298 rx_ring->next_to_use = 0;
2299
581d708e
MC
2300 writel(0, adapter->hw.hw_addr + rx_ring->rdh);
2301 writel(0, adapter->hw.hw_addr + rx_ring->rdt);
2302}
2303
2304/**
2305 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2306 * @adapter: board private structure
2307 **/
2308
2309static void
2310e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2311{
2312 int i;
2313
f56799ea 2314 for (i = 0; i < adapter->num_rx_queues; i++)
581d708e 2315 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1da177e4
LT
2316}
2317
2318/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2319 * and memory write and invalidate disabled for certain operations
2320 */
2321static void
2322e1000_enter_82542_rst(struct e1000_adapter *adapter)
2323{
2324 struct net_device *netdev = adapter->netdev;
2325 uint32_t rctl;
2326
2327 e1000_pci_clear_mwi(&adapter->hw);
2328
2329 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2330 rctl |= E1000_RCTL_RST;
2331 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2332 E1000_WRITE_FLUSH(&adapter->hw);
2333 mdelay(5);
2334
96838a40 2335 if (netif_running(netdev))
581d708e 2336 e1000_clean_all_rx_rings(adapter);
1da177e4
LT
2337}
2338
2339static void
2340e1000_leave_82542_rst(struct e1000_adapter *adapter)
2341{
2342 struct net_device *netdev = adapter->netdev;
2343 uint32_t rctl;
2344
2345 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2346 rctl &= ~E1000_RCTL_RST;
2347 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2348 E1000_WRITE_FLUSH(&adapter->hw);
2349 mdelay(5);
2350
96838a40 2351 if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
1da177e4
LT
2352 e1000_pci_set_mwi(&adapter->hw);
2353
96838a40 2354 if (netif_running(netdev)) {
72d64a43
JK
2355 /* No need to loop, because 82542 supports only 1 queue */
2356 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
7c4d3367 2357 e1000_configure_rx(adapter);
72d64a43 2358 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
1da177e4
LT
2359 }
2360}
2361
2362/**
2363 * e1000_set_mac - Change the Ethernet Address of the NIC
2364 * @netdev: network interface device structure
2365 * @p: pointer to an address structure
2366 *
2367 * Returns 0 on success, negative on failure
2368 **/
2369
2370static int
2371e1000_set_mac(struct net_device *netdev, void *p)
2372{
60490fe0 2373 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4
LT
2374 struct sockaddr *addr = p;
2375
96838a40 2376 if (!is_valid_ether_addr(addr->sa_data))
1da177e4
LT
2377 return -EADDRNOTAVAIL;
2378
2379 /* 82542 2.0 needs to be in reset to write receive address registers */
2380
96838a40 2381 if (adapter->hw.mac_type == e1000_82542_rev2_0)
1da177e4
LT
2382 e1000_enter_82542_rst(adapter);
2383
2384 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2385 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
2386
2387 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
2388
868d5309
MC
2389 /* With 82571 controllers, LAA may be overwritten (with the default)
2390 * due to controller reset from the other port. */
2391 if (adapter->hw.mac_type == e1000_82571) {
2392 /* activate the work around */
2393 adapter->hw.laa_is_present = 1;
2394
96838a40
JB
2395 /* Hold a copy of the LAA in RAR[14] This is done so that
2396 * between the time RAR[0] gets clobbered and the time it
2397 * gets fixed (in e1000_watchdog), the actual LAA is in one
868d5309 2398 * of the RARs and no incoming packets directed to this port
96838a40 2399 * are dropped. Eventaully the LAA will be in RAR[0] and
868d5309 2400 * RAR[14] */
96838a40 2401 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
868d5309
MC
2402 E1000_RAR_ENTRIES - 1);
2403 }
2404
96838a40 2405 if (adapter->hw.mac_type == e1000_82542_rev2_0)
1da177e4
LT
2406 e1000_leave_82542_rst(adapter);
2407
2408 return 0;
2409}
2410
2411/**
2412 * e1000_set_multi - Multicast and Promiscuous mode set
2413 * @netdev: network interface device structure
2414 *
2415 * The set_multi entry point is called whenever the multicast address
2416 * list or the network interface flags are updated. This routine is
2417 * responsible for configuring the hardware for proper multicast,
2418 * promiscuous mode, and all-multi behavior.
2419 **/
2420
2421static void
2422e1000_set_multi(struct net_device *netdev)
2423{
60490fe0 2424 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4
LT
2425 struct e1000_hw *hw = &adapter->hw;
2426 struct dev_mc_list *mc_ptr;
2427 uint32_t rctl;
2428 uint32_t hash_value;
868d5309 2429 int i, rar_entries = E1000_RAR_ENTRIES;
cd94dd0b
AK
2430 int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
2431 E1000_NUM_MTA_REGISTERS_ICH8LAN :
2432 E1000_NUM_MTA_REGISTERS;
2433
2434 if (adapter->hw.mac_type == e1000_ich8lan)
2435 rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
1da177e4 2436
868d5309
MC
2437 /* reserve RAR[14] for LAA over-write work-around */
2438 if (adapter->hw.mac_type == e1000_82571)
2439 rar_entries--;
1da177e4 2440
2648345f
MC
2441 /* Check for Promiscuous and All Multicast modes */
2442
1da177e4
LT
2443 rctl = E1000_READ_REG(hw, RCTL);
2444
96838a40 2445 if (netdev->flags & IFF_PROMISC) {
1da177e4 2446 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
96838a40 2447 } else if (netdev->flags & IFF_ALLMULTI) {
1da177e4
LT
2448 rctl |= E1000_RCTL_MPE;
2449 rctl &= ~E1000_RCTL_UPE;
2450 } else {
2451 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2452 }
2453
2454 E1000_WRITE_REG(hw, RCTL, rctl);
2455
2456 /* 82542 2.0 needs to be in reset to write receive address registers */
2457
96838a40 2458 if (hw->mac_type == e1000_82542_rev2_0)
1da177e4
LT
2459 e1000_enter_82542_rst(adapter);
2460
2461 /* load the first 14 multicast address into the exact filters 1-14
2462 * RAR 0 is used for the station MAC adddress
2463 * if there are not 14 addresses, go ahead and clear the filters
868d5309 2464 * -- with 82571 controllers only 0-13 entries are filled here
1da177e4
LT
2465 */
2466 mc_ptr = netdev->mc_list;
2467
96838a40 2468 for (i = 1; i < rar_entries; i++) {
868d5309 2469 if (mc_ptr) {
1da177e4
LT
2470 e1000_rar_set(hw, mc_ptr->dmi_addr, i);
2471 mc_ptr = mc_ptr->next;
2472 } else {
2473 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
4ca213a6 2474 E1000_WRITE_FLUSH(hw);
1da177e4 2475 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
4ca213a6 2476 E1000_WRITE_FLUSH(hw);
1da177e4
LT
2477 }
2478 }
2479
2480 /* clear the old settings from the multicast hash table */
2481
cd94dd0b 2482 for (i = 0; i < mta_reg_count; i++) {
1da177e4 2483 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
4ca213a6
AK
2484 E1000_WRITE_FLUSH(hw);
2485 }
1da177e4
LT
2486
2487 /* load any remaining addresses into the hash table */
2488
96838a40 2489 for (; mc_ptr; mc_ptr = mc_ptr->next) {
1da177e4
LT
2490 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
2491 e1000_mta_set(hw, hash_value);
2492 }
2493
96838a40 2494 if (hw->mac_type == e1000_82542_rev2_0)
1da177e4 2495 e1000_leave_82542_rst(adapter);
1da177e4
LT
2496}
2497
2498/* Need to wait a few seconds after link up to get diagnostic information from
2499 * the phy */
2500
2501static void
2502e1000_update_phy_info(unsigned long data)
2503{
2504 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2505 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2506}
2507
2508/**
2509 * e1000_82547_tx_fifo_stall - Timer Call-back
2510 * @data: pointer to adapter cast into an unsigned long
2511 **/
2512
2513static void
2514e1000_82547_tx_fifo_stall(unsigned long data)
2515{
2516 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2517 struct net_device *netdev = adapter->netdev;
2518 uint32_t tctl;
2519
96838a40
JB
2520 if (atomic_read(&adapter->tx_fifo_stall)) {
2521 if ((E1000_READ_REG(&adapter->hw, TDT) ==
1da177e4
LT
2522 E1000_READ_REG(&adapter->hw, TDH)) &&
2523 (E1000_READ_REG(&adapter->hw, TDFT) ==
2524 E1000_READ_REG(&adapter->hw, TDFH)) &&
2525 (E1000_READ_REG(&adapter->hw, TDFTS) ==
2526 E1000_READ_REG(&adapter->hw, TDFHS))) {
2527 tctl = E1000_READ_REG(&adapter->hw, TCTL);
2528 E1000_WRITE_REG(&adapter->hw, TCTL,
2529 tctl & ~E1000_TCTL_EN);
2530 E1000_WRITE_REG(&adapter->hw, TDFT,
2531 adapter->tx_head_addr);
2532 E1000_WRITE_REG(&adapter->hw, TDFH,
2533 adapter->tx_head_addr);
2534 E1000_WRITE_REG(&adapter->hw, TDFTS,
2535 adapter->tx_head_addr);
2536 E1000_WRITE_REG(&adapter->hw, TDFHS,
2537 adapter->tx_head_addr);
2538 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
2539 E1000_WRITE_FLUSH(&adapter->hw);
2540
2541 adapter->tx_fifo_head = 0;
2542 atomic_set(&adapter->tx_fifo_stall, 0);
2543 netif_wake_queue(netdev);
2544 } else {
2545 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
2546 }
2547 }
2548}
2549
2550/**
2551 * e1000_watchdog - Timer Call-back
2552 * @data: pointer to adapter cast into an unsigned long
2553 **/
2554static void
2555e1000_watchdog(unsigned long data)
2556{
2557 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1da177e4 2558 struct net_device *netdev = adapter->netdev;
545c67c0 2559 struct e1000_tx_ring *txdr = adapter->tx_ring;
7e6c9861 2560 uint32_t link, tctl;
cd94dd0b
AK
2561 int32_t ret_val;
2562
2563 ret_val = e1000_check_for_link(&adapter->hw);
2564 if ((ret_val == E1000_ERR_PHY) &&
2565 (adapter->hw.phy_type == e1000_phy_igp_3) &&
2566 (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2567 /* See e1000_kumeran_lock_loss_workaround() */
2568 DPRINTK(LINK, INFO,
2569 "Gigabit has been disabled, downgrading speed\n");
2570 }
90fb5135 2571
2d7edb92
MC
2572 if (adapter->hw.mac_type == e1000_82573) {
2573 e1000_enable_tx_pkt_filtering(&adapter->hw);
96838a40 2574 if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
2d7edb92 2575 e1000_update_mng_vlan(adapter);
96838a40 2576 }
1da177e4 2577
96838a40 2578 if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
1da177e4
LT
2579 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
2580 link = !adapter->hw.serdes_link_down;
2581 else
2582 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
2583
96838a40
JB
2584 if (link) {
2585 if (!netif_carrier_ok(netdev)) {
9669f53b 2586 uint32_t ctrl;
fe7fe28e 2587 boolean_t txb2b = 1;
1da177e4
LT
2588 e1000_get_speed_and_duplex(&adapter->hw,
2589 &adapter->link_speed,
2590 &adapter->link_duplex);
2591
9669f53b
AK
2592 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2593 DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
2594 "Flow Control: %s\n",
2595 adapter->link_speed,
2596 adapter->link_duplex == FULL_DUPLEX ?
2597 "Full Duplex" : "Half Duplex",
2598 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2599 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2600 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2601 E1000_CTRL_TFCE) ? "TX" : "None" )));
1da177e4 2602
7e6c9861
JK
2603 /* tweak tx_queue_len according to speed/duplex
2604 * and adjust the timeout factor */
66a2b0a3
JK
2605 netdev->tx_queue_len = adapter->tx_queue_len;
2606 adapter->tx_timeout_factor = 1;
7e6c9861
JK
2607 switch (adapter->link_speed) {
2608 case SPEED_10:
fe7fe28e 2609 txb2b = 0;
7e6c9861
JK
2610 netdev->tx_queue_len = 10;
2611 adapter->tx_timeout_factor = 8;
2612 break;
2613 case SPEED_100:
fe7fe28e 2614 txb2b = 0;
7e6c9861
JK
2615 netdev->tx_queue_len = 100;
2616 /* maybe add some timeout factor ? */
2617 break;
2618 }
2619
fe7fe28e 2620 if ((adapter->hw.mac_type == e1000_82571 ||
7e6c9861 2621 adapter->hw.mac_type == e1000_82572) &&
fe7fe28e 2622 txb2b == 0) {
7e6c9861
JK
2623 uint32_t tarc0;
2624 tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
90fb5135 2625 tarc0 &= ~(1 << 21);
7e6c9861
JK
2626 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
2627 }
90fb5135 2628
7e6c9861
JK
2629#ifdef NETIF_F_TSO
2630 /* disable TSO for pcie and 10/100 speeds, to avoid
2631 * some hardware issues */
2632 if (!adapter->tso_force &&
2633 adapter->hw.bus_type == e1000_bus_type_pci_express){
66a2b0a3
JK
2634 switch (adapter->link_speed) {
2635 case SPEED_10:
66a2b0a3 2636 case SPEED_100:
7e6c9861
JK
2637 DPRINTK(PROBE,INFO,
2638 "10/100 speed: disabling TSO\n");
2639 netdev->features &= ~NETIF_F_TSO;
87ca4e5b
AK
2640#ifdef NETIF_F_TSO6
2641 netdev->features &= ~NETIF_F_TSO6;
2642#endif
7e6c9861
JK
2643 break;
2644 case SPEED_1000:
2645 netdev->features |= NETIF_F_TSO;
87ca4e5b
AK
2646#ifdef NETIF_F_TSO6
2647 netdev->features |= NETIF_F_TSO6;
2648#endif
7e6c9861
JK
2649 break;
2650 default:
2651 /* oops */
66a2b0a3
JK
2652 break;
2653 }
2654 }
7e6c9861
JK
2655#endif
2656
2657 /* enable transmits in the hardware, need to do this
2658 * after setting TARC0 */
2659 tctl = E1000_READ_REG(&adapter->hw, TCTL);
2660 tctl |= E1000_TCTL_EN;
2661 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
66a2b0a3 2662
1da177e4
LT
2663 netif_carrier_on(netdev);
2664 netif_wake_queue(netdev);
2665 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
2666 adapter->smartspeed = 0;
bb8e3311
JG
2667 } else {
2668 /* make sure the receive unit is started */
2669 if (adapter->hw.rx_needs_kicking) {
2670 struct e1000_hw *hw = &adapter->hw;
2671 uint32_t rctl = E1000_READ_REG(hw, RCTL);
2672 E1000_WRITE_REG(hw, RCTL, rctl | E1000_RCTL_EN);
2673 }
1da177e4
LT
2674 }
2675 } else {
96838a40 2676 if (netif_carrier_ok(netdev)) {
1da177e4
LT
2677 adapter->link_speed = 0;
2678 adapter->link_duplex = 0;
2679 DPRINTK(LINK, INFO, "NIC Link is Down\n");
2680 netif_carrier_off(netdev);
2681 netif_stop_queue(netdev);
2682 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
87041639
JK
2683
2684 /* 80003ES2LAN workaround--
2685 * For packet buffer work-around on link down event;
2686 * disable receives in the ISR and
2687 * reset device here in the watchdog
2688 */
8fc897b0 2689 if (adapter->hw.mac_type == e1000_80003es2lan)
87041639
JK
2690 /* reset device */
2691 schedule_work(&adapter->reset_task);
1da177e4
LT
2692 }
2693
2694 e1000_smartspeed(adapter);
2695 }
2696
2697 e1000_update_stats(adapter);
2698
2699 adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2700 adapter->tpt_old = adapter->stats.tpt;
2701 adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
2702 adapter->colc_old = adapter->stats.colc;
2703
2704 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2705 adapter->gorcl_old = adapter->stats.gorcl;
2706 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2707 adapter->gotcl_old = adapter->stats.gotcl;
2708
2709 e1000_update_adaptive(&adapter->hw);
2710
f56799ea 2711 if (!netif_carrier_ok(netdev)) {
581d708e 2712 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
1da177e4
LT
2713 /* We've lost link, so the controller stops DMA,
2714 * but we've got queued Tx work that's never going
2715 * to get done, so reset controller to flush Tx.
2716 * (Do the reset outside of interrupt context). */
87041639
JK
2717 adapter->tx_timeout_count++;
2718 schedule_work(&adapter->reset_task);
1da177e4
LT
2719 }
2720 }
2721
1da177e4
LT
2722 /* Cause software interrupt to ensure rx ring is cleaned */
2723 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
2724
2648345f 2725 /* Force detection of hung controller every watchdog period */
1da177e4
LT
2726 adapter->detect_tx_hung = TRUE;
2727
96838a40 2728 /* With 82571 controllers, LAA may be overwritten due to controller
868d5309
MC
2729 * reset from the other port. Set the appropriate LAA in RAR[0] */
2730 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
2731 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
2732
1da177e4
LT
2733 /* Reset the timer */
2734 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
2735}
2736
835bb129
JB
2737enum latency_range {
2738 lowest_latency = 0,
2739 low_latency = 1,
2740 bulk_latency = 2,
2741 latency_invalid = 255
2742};
2743
2744/**
2745 * e1000_update_itr - update the dynamic ITR value based on statistics
2746 * Stores a new ITR value based on packets and byte
2747 * counts during the last interrupt. The advantage of per interrupt
2748 * computation is faster updates and more accurate ITR for the current
2749 * traffic pattern. Constants in this function were computed
2750 * based on theoretical maximum wire speed and thresholds were set based
2751 * on testing data as well as attempting to minimize response time
2752 * while increasing bulk throughput.
2753 * this functionality is controlled by the InterruptThrottleRate module
2754 * parameter (see e1000_param.c)
2755 * @adapter: pointer to adapter
2756 * @itr_setting: current adapter->itr
2757 * @packets: the number of packets during this measurement interval
2758 * @bytes: the number of bytes during this measurement interval
2759 **/
2760static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2761 uint16_t itr_setting,
2762 int packets,
2763 int bytes)
2764{
2765 unsigned int retval = itr_setting;
2766 struct e1000_hw *hw = &adapter->hw;
2767
2768 if (unlikely(hw->mac_type < e1000_82540))
2769 goto update_itr_done;
2770
2771 if (packets == 0)
2772 goto update_itr_done;
2773
835bb129
JB
2774 switch (itr_setting) {
2775 case lowest_latency:
2b65326e
JB
2776 /* jumbo frames get bulk treatment*/
2777 if (bytes/packets > 8000)
2778 retval = bulk_latency;
2779 else if ((packets < 5) && (bytes > 512))
835bb129
JB
2780 retval = low_latency;
2781 break;
2782 case low_latency: /* 50 usec aka 20000 ints/s */
2783 if (bytes > 10000) {
2b65326e
JB
2784 /* jumbo frames need bulk latency setting */
2785 if (bytes/packets > 8000)
2786 retval = bulk_latency;
2787 else if ((packets < 10) || ((bytes/packets) > 1200))
835bb129
JB
2788 retval = bulk_latency;
2789 else if ((packets > 35))
2790 retval = lowest_latency;
2b65326e
JB
2791 } else if (bytes/packets > 2000)
2792 retval = bulk_latency;
2793 else if (packets <= 2 && bytes < 512)
835bb129
JB
2794 retval = lowest_latency;
2795 break;
2796 case bulk_latency: /* 250 usec aka 4000 ints/s */
2797 if (bytes > 25000) {
2798 if (packets > 35)
2799 retval = low_latency;
2b65326e
JB
2800 } else if (bytes < 6000) {
2801 retval = low_latency;
835bb129
JB
2802 }
2803 break;
2804 }
2805
2806update_itr_done:
2807 return retval;
2808}
2809
2810static void e1000_set_itr(struct e1000_adapter *adapter)
2811{
2812 struct e1000_hw *hw = &adapter->hw;
2813 uint16_t current_itr;
2814 uint32_t new_itr = adapter->itr;
2815
2816 if (unlikely(hw->mac_type < e1000_82540))
2817 return;
2818
2819 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2820 if (unlikely(adapter->link_speed != SPEED_1000)) {
2821 current_itr = 0;
2822 new_itr = 4000;
2823 goto set_itr_now;
2824 }
2825
2826 adapter->tx_itr = e1000_update_itr(adapter,
2827 adapter->tx_itr,
2828 adapter->total_tx_packets,
2829 adapter->total_tx_bytes);
2b65326e
JB
2830 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2831 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2832 adapter->tx_itr = low_latency;
2833
835bb129
JB
2834 adapter->rx_itr = e1000_update_itr(adapter,
2835 adapter->rx_itr,
2836 adapter->total_rx_packets,
2837 adapter->total_rx_bytes);
2b65326e
JB
2838 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2839 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2840 adapter->rx_itr = low_latency;
835bb129
JB
2841
2842 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2843
835bb129
JB
2844 switch (current_itr) {
2845 /* counts and packets in update_itr are dependent on these numbers */
2846 case lowest_latency:
2847 new_itr = 70000;
2848 break;
2849 case low_latency:
2850 new_itr = 20000; /* aka hwitr = ~200 */
2851 break;
2852 case bulk_latency:
2853 new_itr = 4000;
2854 break;
2855 default:
2856 break;
2857 }
2858
2859set_itr_now:
2860 if (new_itr != adapter->itr) {
2861 /* this attempts to bias the interrupt rate towards Bulk
2862 * by adding intermediate steps when interrupt rate is
2863 * increasing */
2864 new_itr = new_itr > adapter->itr ?
2865 min(adapter->itr + (new_itr >> 2), new_itr) :
2866 new_itr;
2867 adapter->itr = new_itr;
2868 E1000_WRITE_REG(hw, ITR, 1000000000 / (new_itr * 256));
2869 }
2870
2871 return;
2872}
2873
1da177e4
LT
2874#define E1000_TX_FLAGS_CSUM 0x00000001
2875#define E1000_TX_FLAGS_VLAN 0x00000002
2876#define E1000_TX_FLAGS_TSO 0x00000004
2d7edb92 2877#define E1000_TX_FLAGS_IPV4 0x00000008
1da177e4
LT
2878#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2879#define E1000_TX_FLAGS_VLAN_SHIFT 16
2880
e619d523 2881static int
581d708e
MC
2882e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2883 struct sk_buff *skb)
1da177e4
LT
2884{
2885#ifdef NETIF_F_TSO
2886 struct e1000_context_desc *context_desc;
545c67c0 2887 struct e1000_buffer *buffer_info;
1da177e4
LT
2888 unsigned int i;
2889 uint32_t cmd_length = 0;
2d7edb92 2890 uint16_t ipcse = 0, tucse, mss;
1da177e4
LT
2891 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
2892 int err;
2893
89114afd 2894 if (skb_is_gso(skb)) {
1da177e4
LT
2895 if (skb_header_cloned(skb)) {
2896 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2897 if (err)
2898 return err;
2899 }
2900
2901 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
7967168c 2902 mss = skb_shinfo(skb)->gso_size;
60828236 2903 if (skb->protocol == htons(ETH_P_IP)) {
2d7edb92
MC
2904 skb->nh.iph->tot_len = 0;
2905 skb->nh.iph->check = 0;
2906 skb->h.th->check =
2907 ~csum_tcpudp_magic(skb->nh.iph->saddr,
2908 skb->nh.iph->daddr,
2909 0,
2910 IPPROTO_TCP,
2911 0);
2912 cmd_length = E1000_TXD_CMD_IP;
2913 ipcse = skb->h.raw - skb->data - 1;
87ca4e5b 2914#ifdef NETIF_F_TSO6
e15fdd03 2915 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2d7edb92
MC
2916 skb->nh.ipv6h->payload_len = 0;
2917 skb->h.th->check =
2918 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
2919 &skb->nh.ipv6h->daddr,
2920 0,
2921 IPPROTO_TCP,
2922 0);
2923 ipcse = 0;
2924#endif
2925 }
1da177e4
LT
2926 ipcss = skb->nh.raw - skb->data;
2927 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1da177e4
LT
2928 tucss = skb->h.raw - skb->data;
2929 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
2930 tucse = 0;
2931
2932 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2d7edb92 2933 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
1da177e4 2934
581d708e
MC
2935 i = tx_ring->next_to_use;
2936 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
545c67c0 2937 buffer_info = &tx_ring->buffer_info[i];
1da177e4
LT
2938
2939 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2940 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2941 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2942 context_desc->upper_setup.tcp_fields.tucss = tucss;
2943 context_desc->upper_setup.tcp_fields.tucso = tucso;
2944 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2945 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2946 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2947 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2948
545c67c0 2949 buffer_info->time_stamp = jiffies;
a9ebadd6 2950 buffer_info->next_to_watch = i;
545c67c0 2951
581d708e
MC
2952 if (++i == tx_ring->count) i = 0;
2953 tx_ring->next_to_use = i;
1da177e4 2954
8241e35e 2955 return TRUE;
1da177e4
LT
2956 }
2957#endif
2958
8241e35e 2959 return FALSE;
1da177e4
LT
2960}
2961
e619d523 2962static boolean_t
581d708e
MC
2963e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2964 struct sk_buff *skb)
1da177e4
LT
2965{
2966 struct e1000_context_desc *context_desc;
545c67c0 2967 struct e1000_buffer *buffer_info;
1da177e4
LT
2968 unsigned int i;
2969 uint8_t css;
2970
84fa7933 2971 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1da177e4
LT
2972 css = skb->h.raw - skb->data;
2973
581d708e 2974 i = tx_ring->next_to_use;
545c67c0 2975 buffer_info = &tx_ring->buffer_info[i];
581d708e 2976 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
1da177e4 2977
f6c57baf 2978 context_desc->lower_setup.ip_config = 0;
1da177e4 2979 context_desc->upper_setup.tcp_fields.tucss = css;
f6c57baf 2980 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
1da177e4
LT
2981 context_desc->upper_setup.tcp_fields.tucse = 0;
2982 context_desc->tcp_seg_setup.data = 0;
2983 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
2984
545c67c0 2985 buffer_info->time_stamp = jiffies;
a9ebadd6 2986 buffer_info->next_to_watch = i;
545c67c0 2987
581d708e
MC
2988 if (unlikely(++i == tx_ring->count)) i = 0;
2989 tx_ring->next_to_use = i;
1da177e4
LT
2990
2991 return TRUE;
2992 }
2993
2994 return FALSE;
2995}
2996
2997#define E1000_MAX_TXD_PWR 12
2998#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2999
e619d523 3000static int
581d708e
MC
3001e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3002 struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
3003 unsigned int nr_frags, unsigned int mss)
1da177e4 3004{
1da177e4
LT
3005 struct e1000_buffer *buffer_info;
3006 unsigned int len = skb->len;
3007 unsigned int offset = 0, size, count = 0, i;
3008 unsigned int f;
3009 len -= skb->data_len;
3010
3011 i = tx_ring->next_to_use;
3012
96838a40 3013 while (len) {
1da177e4
LT
3014 buffer_info = &tx_ring->buffer_info[i];
3015 size = min(len, max_per_txd);
3016#ifdef NETIF_F_TSO
fd803241
JK
3017 /* Workaround for Controller erratum --
3018 * descriptor for non-tso packet in a linear SKB that follows a
3019 * tso gets written back prematurely before the data is fully
0f15a8fa 3020 * DMA'd to the controller */
fd803241 3021 if (!skb->data_len && tx_ring->last_tx_tso &&
89114afd 3022 !skb_is_gso(skb)) {
fd803241
JK
3023 tx_ring->last_tx_tso = 0;
3024 size -= 4;
3025 }
3026
1da177e4
LT
3027 /* Workaround for premature desc write-backs
3028 * in TSO mode. Append 4-byte sentinel desc */
96838a40 3029 if (unlikely(mss && !nr_frags && size == len && size > 8))
1da177e4
LT
3030 size -= 4;
3031#endif
97338bde
MC
3032 /* work-around for errata 10 and it applies
3033 * to all controllers in PCI-X mode
3034 * The fix is to make sure that the first descriptor of a
3035 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
3036 */
96838a40 3037 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
97338bde
MC
3038 (size > 2015) && count == 0))
3039 size = 2015;
96838a40 3040
1da177e4
LT
3041 /* Workaround for potential 82544 hang in PCI-X. Avoid
3042 * terminating buffers within evenly-aligned dwords. */
96838a40 3043 if (unlikely(adapter->pcix_82544 &&
1da177e4
LT
3044 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
3045 size > 4))
3046 size -= 4;
3047
3048 buffer_info->length = size;
3049 buffer_info->dma =
3050 pci_map_single(adapter->pdev,
3051 skb->data + offset,
3052 size,
3053 PCI_DMA_TODEVICE);
3054 buffer_info->time_stamp = jiffies;
a9ebadd6 3055 buffer_info->next_to_watch = i;
1da177e4
LT
3056
3057 len -= size;
3058 offset += size;
3059 count++;
96838a40 3060 if (unlikely(++i == tx_ring->count)) i = 0;
1da177e4
LT
3061 }
3062
96838a40 3063 for (f = 0; f < nr_frags; f++) {
1da177e4
LT
3064 struct skb_frag_struct *frag;
3065
3066 frag = &skb_shinfo(skb)->frags[f];
3067 len = frag->size;
3068 offset = frag->page_offset;
3069
96838a40 3070 while (len) {
1da177e4
LT
3071 buffer_info = &tx_ring->buffer_info[i];
3072 size = min(len, max_per_txd);
3073#ifdef NETIF_F_TSO
3074 /* Workaround for premature desc write-backs
3075 * in TSO mode. Append 4-byte sentinel desc */
96838a40 3076 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
1da177e4
LT
3077 size -= 4;
3078#endif
3079 /* Workaround for potential 82544 hang in PCI-X.
3080 * Avoid terminating buffers within evenly-aligned
3081 * dwords. */
96838a40 3082 if (unlikely(adapter->pcix_82544 &&
1da177e4
LT
3083 !((unsigned long)(frag->page+offset+size-1) & 4) &&
3084 size > 4))
3085 size -= 4;
3086
3087 buffer_info->length = size;
3088 buffer_info->dma =
3089 pci_map_page(adapter->pdev,
3090 frag->page,
3091 offset,
3092 size,
3093 PCI_DMA_TODEVICE);
3094 buffer_info->time_stamp = jiffies;
a9ebadd6 3095 buffer_info->next_to_watch = i;
1da177e4
LT
3096
3097 len -= size;
3098 offset += size;
3099 count++;
96838a40 3100 if (unlikely(++i == tx_ring->count)) i = 0;
1da177e4
LT
3101 }
3102 }
3103
3104 i = (i == 0) ? tx_ring->count - 1 : i - 1;
3105 tx_ring->buffer_info[i].skb = skb;
3106 tx_ring->buffer_info[first].next_to_watch = i;
3107
3108 return count;
3109}
3110
e619d523 3111static void
581d708e
MC
3112e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3113 int tx_flags, int count)
1da177e4 3114{
1da177e4
LT
3115 struct e1000_tx_desc *tx_desc = NULL;
3116 struct e1000_buffer *buffer_info;
3117 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3118 unsigned int i;
3119
96838a40 3120 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
1da177e4
LT
3121 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3122 E1000_TXD_CMD_TSE;
2d7edb92
MC
3123 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3124
96838a40 3125 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2d7edb92 3126 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
1da177e4
LT
3127 }
3128
96838a40 3129 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
1da177e4
LT
3130 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3131 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3132 }
3133
96838a40 3134 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
1da177e4
LT
3135 txd_lower |= E1000_TXD_CMD_VLE;
3136 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3137 }
3138
3139 i = tx_ring->next_to_use;
3140
96838a40 3141 while (count--) {
1da177e4
LT
3142 buffer_info = &tx_ring->buffer_info[i];
3143 tx_desc = E1000_TX_DESC(*tx_ring, i);
3144 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3145 tx_desc->lower.data =
3146 cpu_to_le32(txd_lower | buffer_info->length);
3147 tx_desc->upper.data = cpu_to_le32(txd_upper);
96838a40 3148 if (unlikely(++i == tx_ring->count)) i = 0;
1da177e4
LT
3149 }
3150
3151 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3152
3153 /* Force memory writes to complete before letting h/w
3154 * know there are new descriptors to fetch. (Only
3155 * applicable for weak-ordered memory model archs,
3156 * such as IA-64). */
3157 wmb();
3158
3159 tx_ring->next_to_use = i;
581d708e 3160 writel(i, adapter->hw.hw_addr + tx_ring->tdt);
2ce9047f
JB
3161 /* we need this if more than one processor can write to our tail
3162 * at a time, it syncronizes IO on IA64/Altix systems */
3163 mmiowb();
1da177e4
LT
3164}
3165
3166/**
3167 * 82547 workaround to avoid controller hang in half-duplex environment.
3168 * The workaround is to avoid queuing a large packet that would span
3169 * the internal Tx FIFO ring boundary by notifying the stack to resend
3170 * the packet at a later time. This gives the Tx FIFO an opportunity to
3171 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3172 * to the beginning of the Tx FIFO.
3173 **/
3174
3175#define E1000_FIFO_HDR 0x10
3176#define E1000_82547_PAD_LEN 0x3E0
3177
e619d523 3178static int
1da177e4
LT
3179e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
3180{
3181 uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3182 uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
3183
3184 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
3185
96838a40 3186 if (adapter->link_duplex != HALF_DUPLEX)
1da177e4
LT
3187 goto no_fifo_stall_required;
3188
96838a40 3189 if (atomic_read(&adapter->tx_fifo_stall))
1da177e4
LT
3190 return 1;
3191
96838a40 3192 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
1da177e4
LT
3193 atomic_set(&adapter->tx_fifo_stall, 1);
3194 return 1;
3195 }
3196
3197no_fifo_stall_required:
3198 adapter->tx_fifo_head += skb_fifo_len;
96838a40 3199 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
1da177e4
LT
3200 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3201 return 0;
3202}
3203
2d7edb92 3204#define MINIMUM_DHCP_PACKET_SIZE 282
e619d523 3205static int
2d7edb92
MC
3206e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
3207{
3208 struct e1000_hw *hw = &adapter->hw;
3209 uint16_t length, offset;
96838a40
JB
3210 if (vlan_tx_tag_present(skb)) {
3211 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
2d7edb92
MC
3212 ( adapter->hw.mng_cookie.status &
3213 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
3214 return 0;
3215 }
20a44028 3216 if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
2d7edb92 3217 struct ethhdr *eth = (struct ethhdr *) skb->data;
96838a40
JB
3218 if ((htons(ETH_P_IP) == eth->h_proto)) {
3219 const struct iphdr *ip =
2d7edb92 3220 (struct iphdr *)((uint8_t *)skb->data+14);
96838a40
JB
3221 if (IPPROTO_UDP == ip->protocol) {
3222 struct udphdr *udp =
3223 (struct udphdr *)((uint8_t *)ip +
2d7edb92 3224 (ip->ihl << 2));
96838a40 3225 if (ntohs(udp->dest) == 67) {
2d7edb92
MC
3226 offset = (uint8_t *)udp + 8 - skb->data;
3227 length = skb->len - offset;
3228
3229 return e1000_mng_write_dhcp_info(hw,
96838a40 3230 (uint8_t *)udp + 8,
2d7edb92
MC
3231 length);
3232 }
3233 }
3234 }
3235 }
3236 return 0;
3237}
3238
65c7973f
JB
3239static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3240{
3241 struct e1000_adapter *adapter = netdev_priv(netdev);
3242 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3243
3244 netif_stop_queue(netdev);
3245 /* Herbert's original patch had:
3246 * smp_mb__after_netif_stop_queue();
3247 * but since that doesn't exist yet, just open code it. */
3248 smp_mb();
3249
3250 /* We need to check again in a case another CPU has just
3251 * made room available. */
3252 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3253 return -EBUSY;
3254
3255 /* A reprieve! */
3256 netif_start_queue(netdev);
fcfb1224 3257 ++adapter->restart_queue;
65c7973f
JB
3258 return 0;
3259}
3260
3261static int e1000_maybe_stop_tx(struct net_device *netdev,
3262 struct e1000_tx_ring *tx_ring, int size)
3263{
3264 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3265 return 0;
3266 return __e1000_maybe_stop_tx(netdev, size);
3267}
3268
1da177e4
LT
3269#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3270static int
3271e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3272{
60490fe0 3273 struct e1000_adapter *adapter = netdev_priv(netdev);
581d708e 3274 struct e1000_tx_ring *tx_ring;
1da177e4
LT
3275 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3276 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3277 unsigned int tx_flags = 0;
3278 unsigned int len = skb->len;
3279 unsigned long flags;
3280 unsigned int nr_frags = 0;
3281 unsigned int mss = 0;
3282 int count = 0;
76c224bc 3283 int tso;
1da177e4
LT
3284 unsigned int f;
3285 len -= skb->data_len;
3286
65c7973f
JB
3287 /* This goes back to the question of how to logically map a tx queue
3288 * to a flow. Right now, performance is impacted slightly negatively
3289 * if using multiple tx queues. If the stack breaks away from a
3290 * single qdisc implementation, we can look at this again. */
581d708e 3291 tx_ring = adapter->tx_ring;
24025e4e 3292
581d708e 3293 if (unlikely(skb->len <= 0)) {
1da177e4
LT
3294 dev_kfree_skb_any(skb);
3295 return NETDEV_TX_OK;
3296 }
3297
032fe6e9
JB
3298 /* 82571 and newer doesn't need the workaround that limited descriptor
3299 * length to 4kB */
3300 if (adapter->hw.mac_type >= e1000_82571)
3301 max_per_txd = 8192;
3302
1da177e4 3303#ifdef NETIF_F_TSO
7967168c 3304 mss = skb_shinfo(skb)->gso_size;
76c224bc 3305 /* The controller does a simple calculation to
1da177e4
LT
3306 * make sure there is enough room in the FIFO before
3307 * initiating the DMA for each buffer. The calc is:
3308 * 4 = ceil(buffer len/mss). To make sure we don't
3309 * overrun the FIFO, adjust the max buffer len if mss
3310 * drops. */
96838a40 3311 if (mss) {
9a3056da 3312 uint8_t hdr_len;
1da177e4
LT
3313 max_per_txd = min(mss << 2, max_per_txd);
3314 max_txd_pwr = fls(max_per_txd) - 1;
9a3056da 3315
90fb5135
AK
3316 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
3317 * points to just header, pull a few bytes of payload from
3318 * frags into skb->data */
9a3056da 3319 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
9f687888
JK
3320 if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
3321 switch (adapter->hw.mac_type) {
3322 unsigned int pull_size;
683a2aa3
HX
3323 case e1000_82544:
3324 /* Make sure we have room to chop off 4 bytes,
3325 * and that the end alignment will work out to
3326 * this hardware's requirements
3327 * NOTE: this is a TSO only workaround
3328 * if end byte alignment not correct move us
3329 * into the next dword */
3330 if ((unsigned long)(skb->tail - 1) & 4)
3331 break;
3332 /* fall through */
9f687888
JK
3333 case e1000_82571:
3334 case e1000_82572:
3335 case e1000_82573:
cd94dd0b 3336 case e1000_ich8lan:
9f687888
JK
3337 pull_size = min((unsigned int)4, skb->data_len);
3338 if (!__pskb_pull_tail(skb, pull_size)) {
a5eafce2 3339 DPRINTK(DRV, ERR,
9f687888
JK
3340 "__pskb_pull_tail failed.\n");
3341 dev_kfree_skb_any(skb);
749dfc70 3342 return NETDEV_TX_OK;
9f687888
JK
3343 }
3344 len = skb->len - skb->data_len;
3345 break;
3346 default:
3347 /* do nothing */
3348 break;
d74bbd3b 3349 }
9a3056da 3350 }
1da177e4
LT
3351 }
3352
9a3056da 3353 /* reserve a descriptor for the offload context */
84fa7933 3354 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
1da177e4 3355 count++;
2648345f 3356 count++;
1da177e4 3357#else
84fa7933 3358 if (skb->ip_summed == CHECKSUM_PARTIAL)
1da177e4
LT
3359 count++;
3360#endif
fd803241
JK
3361
3362#ifdef NETIF_F_TSO
3363 /* Controller Erratum workaround */
89114afd 3364 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
fd803241
JK
3365 count++;
3366#endif
3367
1da177e4
LT
3368 count += TXD_USE_COUNT(len, max_txd_pwr);
3369
96838a40 3370 if (adapter->pcix_82544)
1da177e4
LT
3371 count++;
3372
96838a40 3373 /* work-around for errata 10 and it applies to all controllers
97338bde
MC
3374 * in PCI-X mode, so add one more descriptor to the count
3375 */
96838a40 3376 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
97338bde
MC
3377 (len > 2015)))
3378 count++;
3379
1da177e4 3380 nr_frags = skb_shinfo(skb)->nr_frags;
96838a40 3381 for (f = 0; f < nr_frags; f++)
1da177e4
LT
3382 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
3383 max_txd_pwr);
96838a40 3384 if (adapter->pcix_82544)
1da177e4
LT
3385 count += nr_frags;
3386
0f15a8fa
JK
3387
3388 if (adapter->hw.tx_pkt_filtering &&
3389 (adapter->hw.mac_type == e1000_82573))
2d7edb92
MC
3390 e1000_transfer_dhcp_info(adapter, skb);
3391
581d708e
MC
3392 local_irq_save(flags);
3393 if (!spin_trylock(&tx_ring->tx_lock)) {
3394 /* Collision - tell upper layer to requeue */
3395 local_irq_restore(flags);
3396 return NETDEV_TX_LOCKED;
3397 }
1da177e4
LT
3398
3399 /* need: count + 2 desc gap to keep tail from touching
3400 * head, otherwise try next time */
65c7973f 3401 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) {
581d708e 3402 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1da177e4
LT
3403 return NETDEV_TX_BUSY;
3404 }
3405
96838a40
JB
3406 if (unlikely(adapter->hw.mac_type == e1000_82547)) {
3407 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
1da177e4 3408 netif_stop_queue(netdev);
1314bbf3 3409 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
581d708e 3410 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1da177e4
LT
3411 return NETDEV_TX_BUSY;
3412 }
3413 }
3414
96838a40 3415 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
1da177e4
LT
3416 tx_flags |= E1000_TX_FLAGS_VLAN;
3417 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3418 }
3419
581d708e 3420 first = tx_ring->next_to_use;
96838a40 3421
581d708e 3422 tso = e1000_tso(adapter, tx_ring, skb);
1da177e4
LT
3423 if (tso < 0) {
3424 dev_kfree_skb_any(skb);
581d708e 3425 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1da177e4
LT
3426 return NETDEV_TX_OK;
3427 }
3428
fd803241
JK
3429 if (likely(tso)) {
3430 tx_ring->last_tx_tso = 1;
1da177e4 3431 tx_flags |= E1000_TX_FLAGS_TSO;
fd803241 3432 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
1da177e4
LT
3433 tx_flags |= E1000_TX_FLAGS_CSUM;
3434
2d7edb92 3435 /* Old method was to assume IPv4 packet by default if TSO was enabled.
868d5309 3436 * 82571 hardware supports TSO capabilities for IPv6 as well...
2d7edb92 3437 * no longer assume, we must. */
60828236 3438 if (likely(skb->protocol == htons(ETH_P_IP)))
2d7edb92
MC
3439 tx_flags |= E1000_TX_FLAGS_IPV4;
3440
581d708e
MC
3441 e1000_tx_queue(adapter, tx_ring, tx_flags,
3442 e1000_tx_map(adapter, tx_ring, skb, first,
3443 max_per_txd, nr_frags, mss));
1da177e4
LT
3444
3445 netdev->trans_start = jiffies;
3446
3447 /* Make sure there is space in the ring for the next send. */
65c7973f 3448 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
1da177e4 3449
581d708e 3450 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1da177e4
LT
3451 return NETDEV_TX_OK;
3452}
3453
3454/**
3455 * e1000_tx_timeout - Respond to a Tx Hang
3456 * @netdev: network interface device structure
3457 **/
3458
3459static void
3460e1000_tx_timeout(struct net_device *netdev)
3461{
60490fe0 3462 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4
LT
3463
3464 /* Do the reset outside of interrupt context */
87041639
JK
3465 adapter->tx_timeout_count++;
3466 schedule_work(&adapter->reset_task);
1da177e4
LT
3467}
3468
3469static void
65f27f38 3470e1000_reset_task(struct work_struct *work)
1da177e4 3471{
65f27f38
DH
3472 struct e1000_adapter *adapter =
3473 container_of(work, struct e1000_adapter, reset_task);
1da177e4 3474
2db10a08 3475 e1000_reinit_locked(adapter);
1da177e4
LT
3476}
3477
3478/**
3479 * e1000_get_stats - Get System Network Statistics
3480 * @netdev: network interface device structure
3481 *
3482 * Returns the address of the device statistics structure.
3483 * The statistics are actually updated from the timer callback.
3484 **/
3485
3486static struct net_device_stats *
3487e1000_get_stats(struct net_device *netdev)
3488{
60490fe0 3489 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4 3490
6b7660cd 3491 /* only return the current stats */
1da177e4
LT
3492 return &adapter->net_stats;
3493}
3494
3495/**
3496 * e1000_change_mtu - Change the Maximum Transfer Unit
3497 * @netdev: network interface device structure
3498 * @new_mtu: new value for maximum frame size
3499 *
3500 * Returns 0 on success, negative on failure
3501 **/
3502
3503static int
3504e1000_change_mtu(struct net_device *netdev, int new_mtu)
3505{
60490fe0 3506 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4 3507 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
85b22eb6 3508 uint16_t eeprom_data = 0;
1da177e4 3509
96838a40
JB
3510 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3511 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3512 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
1da177e4 3513 return -EINVAL;
2d7edb92 3514 }
1da177e4 3515
997f5cbd
JK
3516 /* Adapter-specific max frame size limits. */
3517 switch (adapter->hw.mac_type) {
9e2feace 3518 case e1000_undefined ... e1000_82542_rev2_1:
cd94dd0b 3519 case e1000_ich8lan:
997f5cbd
JK
3520 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3521 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
2d7edb92 3522 return -EINVAL;
2d7edb92 3523 }
997f5cbd 3524 break;
85b22eb6 3525 case e1000_82573:
249d71d6
BA
3526 /* Jumbo Frames not supported if:
3527 * - this is not an 82573L device
3528 * - ASPM is enabled in any way (0x1A bits 3:2) */
85b22eb6
JK
3529 e1000_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3, 1,
3530 &eeprom_data);
249d71d6
BA
3531 if ((adapter->hw.device_id != E1000_DEV_ID_82573L) ||
3532 (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
85b22eb6
JK
3533 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3534 DPRINTK(PROBE, ERR,
3535 "Jumbo Frames not supported.\n");
3536 return -EINVAL;
3537 }
3538 break;
3539 }
249d71d6
BA
3540 /* ERT will be enabled later to enable wire speed receives */
3541
85b22eb6 3542 /* fall through to get support */
997f5cbd
JK
3543 case e1000_82571:
3544 case e1000_82572:
87041639 3545 case e1000_80003es2lan:
997f5cbd
JK
3546#define MAX_STD_JUMBO_FRAME_SIZE 9234
3547 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3548 DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
3549 return -EINVAL;
3550 }
3551 break;
3552 default:
3553 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3554 break;
1da177e4
LT
3555 }
3556
87f5032e 3557 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
9e2feace
AK
3558 * means we reserve 2 more, this pushes us to allocate from the next
3559 * larger slab size
3560 * i.e. RXBUFFER_2048 --> size-4096 slab */
3561
3562 if (max_frame <= E1000_RXBUFFER_256)
3563 adapter->rx_buffer_len = E1000_RXBUFFER_256;
3564 else if (max_frame <= E1000_RXBUFFER_512)
3565 adapter->rx_buffer_len = E1000_RXBUFFER_512;
3566 else if (max_frame <= E1000_RXBUFFER_1024)
3567 adapter->rx_buffer_len = E1000_RXBUFFER_1024;
3568 else if (max_frame <= E1000_RXBUFFER_2048)
3569 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3570 else if (max_frame <= E1000_RXBUFFER_4096)
3571 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
3572 else if (max_frame <= E1000_RXBUFFER_8192)
3573 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
3574 else if (max_frame <= E1000_RXBUFFER_16384)
3575 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3576
3577 /* adjust allocation if LPE protects us, and we aren't using SBP */
9e2feace
AK
3578 if (!adapter->hw.tbi_compatibility_on &&
3579 ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
3580 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3581 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
997f5cbd 3582
2d7edb92 3583 netdev->mtu = new_mtu;
83cd8279 3584 adapter->hw.max_frame_size = max_frame;
2d7edb92 3585
2db10a08
AK
3586 if (netif_running(netdev))
3587 e1000_reinit_locked(adapter);
1da177e4 3588
1da177e4
LT
3589 return 0;
3590}
3591
3592/**
3593 * e1000_update_stats - Update the board statistics counters
3594 * @adapter: board private structure
3595 **/
3596
3597void
3598e1000_update_stats(struct e1000_adapter *adapter)
3599{
3600 struct e1000_hw *hw = &adapter->hw;
282f33c9 3601 struct pci_dev *pdev = adapter->pdev;
1da177e4
LT
3602 unsigned long flags;
3603 uint16_t phy_tmp;
3604
3605#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3606
282f33c9
LV
3607 /*
3608 * Prevent stats update while adapter is being reset, or if the pci
3609 * connection is down.
3610 */
9026729b 3611 if (adapter->link_speed == 0)
282f33c9
LV
3612 return;
3613 if (pdev->error_state && pdev->error_state != pci_channel_io_normal)
9026729b
AK
3614 return;
3615
1da177e4
LT
3616 spin_lock_irqsave(&adapter->stats_lock, flags);
3617
3618 /* these counters are modified from e1000_adjust_tbi_stats,
3619 * called from the interrupt context, so they must only
3620 * be written while holding adapter->stats_lock
3621 */
3622
3623 adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
3624 adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
3625 adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
3626 adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
3627 adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
3628 adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
3629 adapter->stats.roc += E1000_READ_REG(hw, ROC);
cd94dd0b
AK
3630
3631 if (adapter->hw.mac_type != e1000_ich8lan) {
90fb5135
AK
3632 adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
3633 adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
3634 adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
3635 adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
3636 adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
3637 adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
cd94dd0b 3638 }
1da177e4
LT
3639
3640 adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
3641 adapter->stats.mpc += E1000_READ_REG(hw, MPC);
3642 adapter->stats.scc += E1000_READ_REG(hw, SCC);
3643 adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
3644 adapter->stats.mcc += E1000_READ_REG(hw, MCC);
3645 adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
3646 adapter->stats.dc += E1000_READ_REG(hw, DC);
3647 adapter->stats.sec += E1000_READ_REG(hw, SEC);
3648 adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
3649 adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
3650 adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
3651 adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
3652 adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
3653 adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
3654 adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
3655 adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
3656 adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
3657 adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
3658 adapter->stats.ruc += E1000_READ_REG(hw, RUC);
3659 adapter->stats.rfc += E1000_READ_REG(hw, RFC);
3660 adapter->stats.rjc += E1000_READ_REG(hw, RJC);
3661 adapter->stats.torl += E1000_READ_REG(hw, TORL);
3662 adapter->stats.torh += E1000_READ_REG(hw, TORH);
3663 adapter->stats.totl += E1000_READ_REG(hw, TOTL);
3664 adapter->stats.toth += E1000_READ_REG(hw, TOTH);
3665 adapter->stats.tpr += E1000_READ_REG(hw, TPR);
cd94dd0b
AK
3666
3667 if (adapter->hw.mac_type != e1000_ich8lan) {
90fb5135
AK
3668 adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
3669 adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
3670 adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
3671 adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
3672 adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
3673 adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
cd94dd0b
AK
3674 }
3675
1da177e4
LT
3676 adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
3677 adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
3678
3679 /* used for adaptive IFS */
3680
3681 hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
3682 adapter->stats.tpt += hw->tx_packet_delta;
3683 hw->collision_delta = E1000_READ_REG(hw, COLC);
3684 adapter->stats.colc += hw->collision_delta;
3685
96838a40 3686 if (hw->mac_type >= e1000_82543) {
1da177e4
LT
3687 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
3688 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
3689 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
3690 adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
3691 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
3692 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
3693 }
96838a40 3694 if (hw->mac_type > e1000_82547_rev_2) {
2d7edb92
MC
3695 adapter->stats.iac += E1000_READ_REG(hw, IAC);
3696 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
cd94dd0b
AK
3697
3698 if (adapter->hw.mac_type != e1000_ich8lan) {
90fb5135
AK
3699 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
3700 adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
3701 adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
3702 adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
3703 adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
3704 adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
3705 adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
cd94dd0b 3706 }
2d7edb92 3707 }
1da177e4
LT
3708
3709 /* Fill out the OS statistics structure */
1da177e4
LT
3710 adapter->net_stats.rx_packets = adapter->stats.gprc;
3711 adapter->net_stats.tx_packets = adapter->stats.gptc;
3712 adapter->net_stats.rx_bytes = adapter->stats.gorcl;
3713 adapter->net_stats.tx_bytes = adapter->stats.gotcl;
3714 adapter->net_stats.multicast = adapter->stats.mprc;
3715 adapter->net_stats.collisions = adapter->stats.colc;
3716
3717 /* Rx Errors */
3718
87041639
JK
3719 /* RLEC on some newer hardware can be incorrect so build
3720 * our own version based on RUC and ROC */
1da177e4
LT
3721 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3722 adapter->stats.crcerrs + adapter->stats.algnerrc +
87041639
JK
3723 adapter->stats.ruc + adapter->stats.roc +
3724 adapter->stats.cexterr;
49559854
MW
3725 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3726 adapter->net_stats.rx_length_errors = adapter->stats.rlerrc;
1da177e4
LT
3727 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3728 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
1da177e4
LT
3729 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3730
3731 /* Tx Errors */
49559854
MW
3732 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3733 adapter->net_stats.tx_errors = adapter->stats.txerrc;
1da177e4
LT
3734 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
3735 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
3736 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
167fb284
JG
3737 if (adapter->hw.bad_tx_carr_stats_fd &&
3738 adapter->link_duplex == FULL_DUPLEX) {
3739 adapter->net_stats.tx_carrier_errors = 0;
3740 adapter->stats.tncrs = 0;
3741 }
1da177e4
LT
3742
3743 /* Tx Dropped needs to be maintained elsewhere */
3744
3745 /* Phy Stats */
96838a40
JB
3746 if (hw->media_type == e1000_media_type_copper) {
3747 if ((adapter->link_speed == SPEED_1000) &&
1da177e4
LT
3748 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3749 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3750 adapter->phy_stats.idle_errors += phy_tmp;
3751 }
3752
96838a40 3753 if ((hw->mac_type <= e1000_82546) &&
1da177e4
LT
3754 (hw->phy_type == e1000_phy_m88) &&
3755 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3756 adapter->phy_stats.receive_errors += phy_tmp;
3757 }
3758
15e376b4
JG
3759 /* Management Stats */
3760 if (adapter->hw.has_smbus) {
3761 adapter->stats.mgptc += E1000_READ_REG(hw, MGTPTC);
3762 adapter->stats.mgprc += E1000_READ_REG(hw, MGTPRC);
3763 adapter->stats.mgpdc += E1000_READ_REG(hw, MGTPDC);
3764 }
3765
1da177e4
LT
3766 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3767}
9ac98284
JB
3768#ifdef CONFIG_PCI_MSI
3769
3770/**
3771 * e1000_intr_msi - Interrupt Handler
3772 * @irq: interrupt number
3773 * @data: pointer to a network interface device structure
3774 **/
3775
b5fc8f0c
JB
3776static irqreturn_t
3777e1000_intr_msi(int irq, void *data)
9ac98284
JB
3778{
3779 struct net_device *netdev = data;
3780 struct e1000_adapter *adapter = netdev_priv(netdev);
3781 struct e1000_hw *hw = &adapter->hw;
3782#ifndef CONFIG_E1000_NAPI
3783 int i;
3784#endif
b5fc8f0c 3785 uint32_t icr = E1000_READ_REG(hw, ICR);
9ac98284 3786
9ac98284 3787#ifdef CONFIG_E1000_NAPI
b5fc8f0c
JB
3788 /* read ICR disables interrupts using IAM, so keep up with our
3789 * enable/disable accounting */
3790 atomic_inc(&adapter->irq_sem);
9ac98284 3791#endif
b5fc8f0c
JB
3792 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3793 hw->get_link_status = 1;
3794 /* 80003ES2LAN workaround-- For packet buffer work-around on
3795 * link down event; disable receives here in the ISR and reset
3796 * adapter in watchdog */
3797 if (netif_carrier_ok(netdev) &&
3798 (adapter->hw.mac_type == e1000_80003es2lan)) {
3799 /* disable receives */
3800 uint32_t rctl = E1000_READ_REG(hw, RCTL);
3801 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
9ac98284 3802 }
b5fc8f0c
JB
3803 /* guard against interrupt when we're going down */
3804 if (!test_bit(__E1000_DOWN, &adapter->flags))
3805 mod_timer(&adapter->watchdog_timer, jiffies + 1);
9ac98284
JB
3806 }
3807
3808#ifdef CONFIG_E1000_NAPI
835bb129
JB
3809 if (likely(netif_rx_schedule_prep(netdev))) {
3810 adapter->total_tx_bytes = 0;
3811 adapter->total_tx_packets = 0;
3812 adapter->total_rx_bytes = 0;
3813 adapter->total_rx_packets = 0;
9ac98284 3814 __netif_rx_schedule(netdev);
835bb129 3815 } else
9ac98284
JB
3816 e1000_irq_enable(adapter);
3817#else
835bb129
JB
3818 adapter->total_tx_bytes = 0;
3819 adapter->total_rx_bytes = 0;
3820 adapter->total_tx_packets = 0;
3821 adapter->total_rx_packets = 0;
3822
9ac98284
JB
3823 for (i = 0; i < E1000_MAX_INTR; i++)
3824 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
60cba200 3825 e1000_clean_tx_irq(adapter, adapter->tx_ring)))
9ac98284 3826 break;
835bb129
JB
3827
3828 if (likely(adapter->itr_setting & 3))
3829 e1000_set_itr(adapter);
9ac98284
JB
3830#endif
3831
3832 return IRQ_HANDLED;
3833}
3834#endif
1da177e4
LT
3835
3836/**
3837 * e1000_intr - Interrupt Handler
3838 * @irq: interrupt number
3839 * @data: pointer to a network interface device structure
1da177e4
LT
3840 **/
3841
3842static irqreturn_t
7d12e780 3843e1000_intr(int irq, void *data)
1da177e4
LT
3844{
3845 struct net_device *netdev = data;
60490fe0 3846 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4 3847 struct e1000_hw *hw = &adapter->hw;
87041639 3848 uint32_t rctl, icr = E1000_READ_REG(hw, ICR);
1e613fd9 3849#ifndef CONFIG_E1000_NAPI
581d708e 3850 int i;
835bb129
JB
3851#endif
3852 if (unlikely(!icr))
3853 return IRQ_NONE; /* Not our interrupt */
3854
3855#ifdef CONFIG_E1000_NAPI
3856 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3857 * not set, then the adapter didn't send an interrupt */
3858 if (unlikely(hw->mac_type >= e1000_82571 &&
3859 !(icr & E1000_ICR_INT_ASSERTED)))
3860 return IRQ_NONE;
3861
1e613fd9
JK
3862 /* Interrupt Auto-Mask...upon reading ICR,
3863 * interrupts are masked. No need for the
3864 * IMC write, but it does mean we should
3865 * account for it ASAP. */
3866 if (likely(hw->mac_type >= e1000_82571))
3867 atomic_inc(&adapter->irq_sem);
be2b28ed 3868#endif
1da177e4 3869
96838a40 3870 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
1da177e4 3871 hw->get_link_status = 1;
87041639
JK
3872 /* 80003ES2LAN workaround--
3873 * For packet buffer work-around on link down event;
3874 * disable receives here in the ISR and
3875 * reset adapter in watchdog
3876 */
3877 if (netif_carrier_ok(netdev) &&
3878 (adapter->hw.mac_type == e1000_80003es2lan)) {
3879 /* disable receives */
3880 rctl = E1000_READ_REG(hw, RCTL);
3881 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3882 }
1314bbf3
AK
3883 /* guard against interrupt when we're going down */
3884 if (!test_bit(__E1000_DOWN, &adapter->flags))
3885 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1da177e4
LT
3886 }
3887
3888#ifdef CONFIG_E1000_NAPI
1e613fd9 3889 if (unlikely(hw->mac_type < e1000_82571)) {
835bb129 3890 /* disable interrupts, without the synchronize_irq bit */
1e613fd9
JK
3891 atomic_inc(&adapter->irq_sem);
3892 E1000_WRITE_REG(hw, IMC, ~0);
3893 E1000_WRITE_FLUSH(hw);
3894 }
835bb129
JB
3895 if (likely(netif_rx_schedule_prep(netdev))) {
3896 adapter->total_tx_bytes = 0;
3897 adapter->total_tx_packets = 0;
3898 adapter->total_rx_bytes = 0;
3899 adapter->total_rx_packets = 0;
d3d9e484 3900 __netif_rx_schedule(netdev);
835bb129 3901 } else
90fb5135
AK
3902 /* this really should not happen! if it does it is basically a
3903 * bug, but not a hard error, so enable ints and continue */
581d708e 3904 e1000_irq_enable(adapter);
c1605eb3 3905#else
1da177e4 3906 /* Writing IMC and IMS is needed for 82547.
96838a40
JB
3907 * Due to Hub Link bus being occupied, an interrupt
3908 * de-assertion message is not able to be sent.
3909 * When an interrupt assertion message is generated later,
3910 * two messages are re-ordered and sent out.
3911 * That causes APIC to think 82547 is in de-assertion
3912 * state, while 82547 is in assertion state, resulting
3913 * in dead lock. Writing IMC forces 82547 into
3914 * de-assertion state.
3915 */
3916 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
1da177e4 3917 atomic_inc(&adapter->irq_sem);
2648345f 3918 E1000_WRITE_REG(hw, IMC, ~0);
1da177e4
LT
3919 }
3920
835bb129
JB
3921 adapter->total_tx_bytes = 0;
3922 adapter->total_rx_bytes = 0;
3923 adapter->total_tx_packets = 0;
3924 adapter->total_rx_packets = 0;
3925
96838a40
JB
3926 for (i = 0; i < E1000_MAX_INTR; i++)
3927 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
60cba200 3928 e1000_clean_tx_irq(adapter, adapter->tx_ring)))
1da177e4
LT
3929 break;
3930
835bb129
JB
3931 if (likely(adapter->itr_setting & 3))
3932 e1000_set_itr(adapter);
3933
96838a40 3934 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
1da177e4 3935 e1000_irq_enable(adapter);
581d708e 3936
c1605eb3 3937#endif
1da177e4
LT
3938 return IRQ_HANDLED;
3939}
3940
3941#ifdef CONFIG_E1000_NAPI
3942/**
3943 * e1000_clean - NAPI Rx polling callback
3944 * @adapter: board private structure
3945 **/
3946
3947static int
581d708e 3948e1000_clean(struct net_device *poll_dev, int *budget)
1da177e4 3949{
581d708e
MC
3950 struct e1000_adapter *adapter;
3951 int work_to_do = min(*budget, poll_dev->quota);
d3d9e484 3952 int tx_cleaned = 0, work_done = 0;
581d708e
MC
3953
3954 /* Must NOT use netdev_priv macro here. */
3955 adapter = poll_dev->priv;
3956
3957 /* Keep link state information with original netdev */
d3d9e484 3958 if (!netif_carrier_ok(poll_dev))
581d708e 3959 goto quit_polling;
2648345f 3960
d3d9e484
AK
3961 /* e1000_clean is called per-cpu. This lock protects
3962 * tx_ring[0] from being cleaned by multiple cpus
3963 * simultaneously. A failure obtaining the lock means
3964 * tx_ring[0] is currently being cleaned anyway. */
3965 if (spin_trylock(&adapter->tx_queue_lock)) {
3966 tx_cleaned = e1000_clean_tx_irq(adapter,
3967 &adapter->tx_ring[0]);
3968 spin_unlock(&adapter->tx_queue_lock);
581d708e
MC
3969 }
3970
d3d9e484 3971 adapter->clean_rx(adapter, &adapter->rx_ring[0],
581d708e 3972 &work_done, work_to_do);
1da177e4
LT
3973
3974 *budget -= work_done;
581d708e 3975 poll_dev->quota -= work_done;
96838a40 3976
2b02893e 3977 /* If no Tx and not enough Rx work done, exit the polling mode */
60cba200 3978 if ((tx_cleaned && (work_done < work_to_do)) ||
d3d9e484 3979 !netif_running(poll_dev)) {
581d708e 3980quit_polling:
835bb129
JB
3981 if (likely(adapter->itr_setting & 3))
3982 e1000_set_itr(adapter);
581d708e 3983 netif_rx_complete(poll_dev);
1da177e4
LT
3984 e1000_irq_enable(adapter);
3985 return 0;
3986 }
3987
3988 return 1;
3989}
3990
3991#endif
3992/**
3993 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3994 * @adapter: board private structure
3995 **/
3996
3997static boolean_t
581d708e
MC
3998e1000_clean_tx_irq(struct e1000_adapter *adapter,
3999 struct e1000_tx_ring *tx_ring)
1da177e4 4000{
1da177e4
LT
4001 struct net_device *netdev = adapter->netdev;
4002 struct e1000_tx_desc *tx_desc, *eop_desc;
4003 struct e1000_buffer *buffer_info;
4004 unsigned int i, eop;
2a1af5d7
JK
4005#ifdef CONFIG_E1000_NAPI
4006 unsigned int count = 0;
4007#endif
60cba200 4008 boolean_t cleaned = TRUE;
835bb129 4009 unsigned int total_tx_bytes=0, total_tx_packets=0;
1da177e4
LT
4010
4011 i = tx_ring->next_to_clean;
4012 eop = tx_ring->buffer_info[i].next_to_watch;
4013 eop_desc = E1000_TX_DESC(*tx_ring, eop);
4014
581d708e 4015 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
96838a40 4016 for (cleaned = FALSE; !cleaned; ) {
1da177e4
LT
4017 tx_desc = E1000_TX_DESC(*tx_ring, i);
4018 buffer_info = &tx_ring->buffer_info[i];
4019 cleaned = (i == eop);
4020
835bb129 4021 if (cleaned) {
2b65326e
JB
4022 struct sk_buff *skb = buffer_info->skb;
4023 unsigned int segs = skb_shinfo(skb)->gso_segs;
4024 total_tx_packets += segs;
835bb129 4025 total_tx_packets++;
2b65326e 4026 total_tx_bytes += skb->len;
835bb129 4027 }
fd803241 4028 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
a9ebadd6 4029 tx_desc->upper.data = 0;
1da177e4 4030
96838a40 4031 if (unlikely(++i == tx_ring->count)) i = 0;
1da177e4 4032 }
581d708e 4033
1da177e4
LT
4034 eop = tx_ring->buffer_info[i].next_to_watch;
4035 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2a1af5d7
JK
4036#ifdef CONFIG_E1000_NAPI
4037#define E1000_TX_WEIGHT 64
4038 /* weight of a sort for tx, to avoid endless transmit cleanup */
60cba200
JB
4039 if (count++ == E1000_TX_WEIGHT) {
4040 cleaned = FALSE;
4041 break;
4042 }
2a1af5d7 4043#endif
1da177e4
LT
4044 }
4045
4046 tx_ring->next_to_clean = i;
4047
77b2aad5 4048#define TX_WAKE_THRESHOLD 32
65c7973f
JB
4049 if (unlikely(cleaned && netif_carrier_ok(netdev) &&
4050 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
4051 /* Make sure that anybody stopping the queue after this
4052 * sees the new next_to_clean.
4053 */
4054 smp_mb();
fcfb1224 4055 if (netif_queue_stopped(netdev)) {
77b2aad5 4056 netif_wake_queue(netdev);
fcfb1224
JB
4057 ++adapter->restart_queue;
4058 }
77b2aad5 4059 }
2648345f 4060
581d708e 4061 if (adapter->detect_tx_hung) {
2648345f 4062 /* Detect a transmit hang in hardware, this serializes the
1da177e4
LT
4063 * check with the clearing of time_stamp and movement of i */
4064 adapter->detect_tx_hung = FALSE;
392137fa
JK
4065 if (tx_ring->buffer_info[eop].dma &&
4066 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
7e6c9861 4067 (adapter->tx_timeout_factor * HZ))
70b8f1e1 4068 && !(E1000_READ_REG(&adapter->hw, STATUS) &
392137fa 4069 E1000_STATUS_TXOFF)) {
70b8f1e1
MC
4070
4071 /* detected Tx unit hang */
c6963ef5 4072 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
7bfa4816 4073 " Tx Queue <%lu>\n"
70b8f1e1
MC
4074 " TDH <%x>\n"
4075 " TDT <%x>\n"
4076 " next_to_use <%x>\n"
4077 " next_to_clean <%x>\n"
4078 "buffer_info[next_to_clean]\n"
70b8f1e1
MC
4079 " time_stamp <%lx>\n"
4080 " next_to_watch <%x>\n"
4081 " jiffies <%lx>\n"
4082 " next_to_watch.status <%x>\n",
7bfa4816
JK
4083 (unsigned long)((tx_ring - adapter->tx_ring) /
4084 sizeof(struct e1000_tx_ring)),
581d708e
MC
4085 readl(adapter->hw.hw_addr + tx_ring->tdh),
4086 readl(adapter->hw.hw_addr + tx_ring->tdt),
70b8f1e1 4087 tx_ring->next_to_use,
392137fa
JK
4088 tx_ring->next_to_clean,
4089 tx_ring->buffer_info[eop].time_stamp,
70b8f1e1
MC
4090 eop,
4091 jiffies,
4092 eop_desc->upper.fields.status);
1da177e4 4093 netif_stop_queue(netdev);
70b8f1e1 4094 }
1da177e4 4095 }
835bb129
JB
4096 adapter->total_tx_bytes += total_tx_bytes;
4097 adapter->total_tx_packets += total_tx_packets;
1da177e4
LT
4098 return cleaned;
4099}
4100
4101/**
4102 * e1000_rx_checksum - Receive Checksum Offload for 82543
2d7edb92
MC
4103 * @adapter: board private structure
4104 * @status_err: receive descriptor status and error fields
4105 * @csum: receive descriptor csum field
4106 * @sk_buff: socket buffer with received data
1da177e4
LT
4107 **/
4108
e619d523 4109static void
1da177e4 4110e1000_rx_checksum(struct e1000_adapter *adapter,
2d7edb92
MC
4111 uint32_t status_err, uint32_t csum,
4112 struct sk_buff *skb)
1da177e4 4113{
2d7edb92
MC
4114 uint16_t status = (uint16_t)status_err;
4115 uint8_t errors = (uint8_t)(status_err >> 24);
4116 skb->ip_summed = CHECKSUM_NONE;
4117
1da177e4 4118 /* 82543 or newer only */
96838a40 4119 if (unlikely(adapter->hw.mac_type < e1000_82543)) return;
1da177e4 4120 /* Ignore Checksum bit is set */
96838a40 4121 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
2d7edb92 4122 /* TCP/UDP checksum error bit is set */
96838a40 4123 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
1da177e4 4124 /* let the stack verify checksum errors */
1da177e4 4125 adapter->hw_csum_err++;
2d7edb92
MC
4126 return;
4127 }
4128 /* TCP/UDP Checksum has not been calculated */
96838a40
JB
4129 if (adapter->hw.mac_type <= e1000_82547_rev_2) {
4130 if (!(status & E1000_RXD_STAT_TCPCS))
2d7edb92 4131 return;
1da177e4 4132 } else {
96838a40 4133 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
2d7edb92
MC
4134 return;
4135 }
4136 /* It must be a TCP or UDP packet with a valid checksum */
4137 if (likely(status & E1000_RXD_STAT_TCPCS)) {
1da177e4
LT
4138 /* TCP checksum is good */
4139 skb->ip_summed = CHECKSUM_UNNECESSARY;
2d7edb92
MC
4140 } else if (adapter->hw.mac_type > e1000_82547_rev_2) {
4141 /* IP fragment with UDP payload */
4142 /* Hardware complements the payload checksum, so we undo it
4143 * and then put the value in host order for further stack use.
4144 */
4145 csum = ntohl(csum ^ 0xFFFF);
4146 skb->csum = csum;
84fa7933 4147 skb->ip_summed = CHECKSUM_COMPLETE;
1da177e4 4148 }
2d7edb92 4149 adapter->hw_csum_good++;
1da177e4
LT
4150}
4151
4152/**
2d7edb92 4153 * e1000_clean_rx_irq - Send received data up the network stack; legacy
1da177e4
LT
4154 * @adapter: board private structure
4155 **/
4156
4157static boolean_t
4158#ifdef CONFIG_E1000_NAPI
581d708e
MC
4159e1000_clean_rx_irq(struct e1000_adapter *adapter,
4160 struct e1000_rx_ring *rx_ring,
4161 int *work_done, int work_to_do)
1da177e4 4162#else
581d708e
MC
4163e1000_clean_rx_irq(struct e1000_adapter *adapter,
4164 struct e1000_rx_ring *rx_ring)
1da177e4
LT
4165#endif
4166{
1da177e4
LT
4167 struct net_device *netdev = adapter->netdev;
4168 struct pci_dev *pdev = adapter->pdev;
86c3d59f
JB
4169 struct e1000_rx_desc *rx_desc, *next_rxd;
4170 struct e1000_buffer *buffer_info, *next_buffer;
1da177e4
LT
4171 unsigned long flags;
4172 uint32_t length;
4173 uint8_t last_byte;
4174 unsigned int i;
72d64a43 4175 int cleaned_count = 0;
a1415ee6 4176 boolean_t cleaned = FALSE;
835bb129 4177 unsigned int total_rx_bytes=0, total_rx_packets=0;
1da177e4
LT
4178
4179 i = rx_ring->next_to_clean;
4180 rx_desc = E1000_RX_DESC(*rx_ring, i);
b92ff8ee 4181 buffer_info = &rx_ring->buffer_info[i];
1da177e4 4182
b92ff8ee 4183 while (rx_desc->status & E1000_RXD_STAT_DD) {
24f476ee 4184 struct sk_buff *skb;
a292ca6e 4185 u8 status;
90fb5135 4186
1da177e4 4187#ifdef CONFIG_E1000_NAPI
96838a40 4188 if (*work_done >= work_to_do)
1da177e4
LT
4189 break;
4190 (*work_done)++;
4191#endif
a292ca6e 4192 status = rx_desc->status;
b92ff8ee 4193 skb = buffer_info->skb;
86c3d59f
JB
4194 buffer_info->skb = NULL;
4195
30320be8
JK
4196 prefetch(skb->data - NET_IP_ALIGN);
4197
86c3d59f
JB
4198 if (++i == rx_ring->count) i = 0;
4199 next_rxd = E1000_RX_DESC(*rx_ring, i);
30320be8
JK
4200 prefetch(next_rxd);
4201
86c3d59f 4202 next_buffer = &rx_ring->buffer_info[i];
86c3d59f 4203
72d64a43
JK
4204 cleaned = TRUE;
4205 cleaned_count++;
a292ca6e
JK
4206 pci_unmap_single(pdev,
4207 buffer_info->dma,
4208 buffer_info->length,
1da177e4
LT
4209 PCI_DMA_FROMDEVICE);
4210
1da177e4
LT
4211 length = le16_to_cpu(rx_desc->length);
4212
a1415ee6
JK
4213 if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
4214 /* All receives must fit into a single buffer */
4215 E1000_DBG("%s: Receive packet consumed multiple"
4216 " buffers\n", netdev->name);
864c4e45 4217 /* recycle */
8fc897b0 4218 buffer_info->skb = skb;
1da177e4
LT
4219 goto next_desc;
4220 }
4221
96838a40 4222 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
1da177e4 4223 last_byte = *(skb->data + length - 1);
b92ff8ee 4224 if (TBI_ACCEPT(&adapter->hw, status,
1da177e4
LT
4225 rx_desc->errors, length, last_byte)) {
4226 spin_lock_irqsave(&adapter->stats_lock, flags);
a292ca6e
JK
4227 e1000_tbi_adjust_stats(&adapter->hw,
4228 &adapter->stats,
1da177e4
LT
4229 length, skb->data);
4230 spin_unlock_irqrestore(&adapter->stats_lock,
4231 flags);
4232 length--;
4233 } else {
9e2feace
AK
4234 /* recycle */
4235 buffer_info->skb = skb;
1da177e4
LT
4236 goto next_desc;
4237 }
1cb5821f 4238 }
1da177e4 4239
d2a1e213
JB
4240 /* adjust length to remove Ethernet CRC, this must be
4241 * done after the TBI_ACCEPT workaround above */
4242 length -= 4;
4243
835bb129
JB
4244 /* probably a little skewed due to removing CRC */
4245 total_rx_bytes += length;
4246 total_rx_packets++;
4247
a292ca6e
JK
4248 /* code added for copybreak, this should improve
4249 * performance for small packets with large amounts
4250 * of reassembly being done in the stack */
1f753861 4251 if (length < copybreak) {
a292ca6e 4252 struct sk_buff *new_skb =
87f5032e 4253 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
a292ca6e
JK
4254 if (new_skb) {
4255 skb_reserve(new_skb, NET_IP_ALIGN);
a292ca6e
JK
4256 memcpy(new_skb->data - NET_IP_ALIGN,
4257 skb->data - NET_IP_ALIGN,
4258 length + NET_IP_ALIGN);
4259 /* save the skb in buffer_info as good */
4260 buffer_info->skb = skb;
4261 skb = new_skb;
a292ca6e 4262 }
996695de
AK
4263 /* else just continue with the old one */
4264 }
a292ca6e 4265 /* end copybreak code */
996695de 4266 skb_put(skb, length);
1da177e4
LT
4267
4268 /* Receive Checksum Offload */
a292ca6e
JK
4269 e1000_rx_checksum(adapter,
4270 (uint32_t)(status) |
2d7edb92 4271 ((uint32_t)(rx_desc->errors) << 24),
c3d7a3a4 4272 le16_to_cpu(rx_desc->csum), skb);
96838a40 4273
1da177e4
LT
4274 skb->protocol = eth_type_trans(skb, netdev);
4275#ifdef CONFIG_E1000_NAPI
96838a40 4276 if (unlikely(adapter->vlgrp &&
a292ca6e 4277 (status & E1000_RXD_STAT_VP))) {
1da177e4 4278 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
2d7edb92
MC
4279 le16_to_cpu(rx_desc->special) &
4280 E1000_RXD_SPC_VLAN_MASK);
1da177e4
LT
4281 } else {
4282 netif_receive_skb(skb);
4283 }
4284#else /* CONFIG_E1000_NAPI */
96838a40 4285 if (unlikely(adapter->vlgrp &&
b92ff8ee 4286 (status & E1000_RXD_STAT_VP))) {
1da177e4
LT
4287 vlan_hwaccel_rx(skb, adapter->vlgrp,
4288 le16_to_cpu(rx_desc->special) &
4289 E1000_RXD_SPC_VLAN_MASK);
4290 } else {
4291 netif_rx(skb);
4292 }
4293#endif /* CONFIG_E1000_NAPI */
4294 netdev->last_rx = jiffies;
4295
4296next_desc:
4297 rx_desc->status = 0;
1da177e4 4298
72d64a43
JK
4299 /* return some buffers to hardware, one at a time is too slow */
4300 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4301 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4302 cleaned_count = 0;
4303 }
4304
30320be8 4305 /* use prefetched values */
86c3d59f
JB
4306 rx_desc = next_rxd;
4307 buffer_info = next_buffer;
1da177e4 4308 }
1da177e4 4309 rx_ring->next_to_clean = i;
72d64a43
JK
4310
4311 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4312 if (cleaned_count)
4313 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
2d7edb92 4314
835bb129
JB
4315 adapter->total_rx_packets += total_rx_packets;
4316 adapter->total_rx_bytes += total_rx_bytes;
2d7edb92
MC
4317 return cleaned;
4318}
4319
4320/**
4321 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
4322 * @adapter: board private structure
4323 **/
4324
4325static boolean_t
4326#ifdef CONFIG_E1000_NAPI
581d708e
MC
4327e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4328 struct e1000_rx_ring *rx_ring,
4329 int *work_done, int work_to_do)
2d7edb92 4330#else
581d708e
MC
4331e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4332 struct e1000_rx_ring *rx_ring)
2d7edb92
MC
4333#endif
4334{
86c3d59f 4335 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
2d7edb92
MC
4336 struct net_device *netdev = adapter->netdev;
4337 struct pci_dev *pdev = adapter->pdev;
86c3d59f 4338 struct e1000_buffer *buffer_info, *next_buffer;
2d7edb92
MC
4339 struct e1000_ps_page *ps_page;
4340 struct e1000_ps_page_dma *ps_page_dma;
24f476ee 4341 struct sk_buff *skb;
2d7edb92
MC
4342 unsigned int i, j;
4343 uint32_t length, staterr;
72d64a43 4344 int cleaned_count = 0;
2d7edb92 4345 boolean_t cleaned = FALSE;
835bb129 4346 unsigned int total_rx_bytes=0, total_rx_packets=0;
2d7edb92
MC
4347
4348 i = rx_ring->next_to_clean;
4349 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
683a38f3 4350 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
9e2feace 4351 buffer_info = &rx_ring->buffer_info[i];
2d7edb92 4352
96838a40 4353 while (staterr & E1000_RXD_STAT_DD) {
2d7edb92
MC
4354 ps_page = &rx_ring->ps_page[i];
4355 ps_page_dma = &rx_ring->ps_page_dma[i];
4356#ifdef CONFIG_E1000_NAPI
96838a40 4357 if (unlikely(*work_done >= work_to_do))
2d7edb92
MC
4358 break;
4359 (*work_done)++;
4360#endif
86c3d59f
JB
4361 skb = buffer_info->skb;
4362
30320be8
JK
4363 /* in the packet split case this is header only */
4364 prefetch(skb->data - NET_IP_ALIGN);
4365
86c3d59f
JB
4366 if (++i == rx_ring->count) i = 0;
4367 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
30320be8
JK
4368 prefetch(next_rxd);
4369
86c3d59f 4370 next_buffer = &rx_ring->buffer_info[i];
86c3d59f 4371
2d7edb92 4372 cleaned = TRUE;
72d64a43 4373 cleaned_count++;
2d7edb92
MC
4374 pci_unmap_single(pdev, buffer_info->dma,
4375 buffer_info->length,
4376 PCI_DMA_FROMDEVICE);
4377
96838a40 4378 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
2d7edb92
MC
4379 E1000_DBG("%s: Packet Split buffers didn't pick up"
4380 " the full packet\n", netdev->name);
4381 dev_kfree_skb_irq(skb);
4382 goto next_desc;
4383 }
1da177e4 4384
96838a40 4385 if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
2d7edb92
MC
4386 dev_kfree_skb_irq(skb);
4387 goto next_desc;
4388 }
4389
4390 length = le16_to_cpu(rx_desc->wb.middle.length0);
4391
96838a40 4392 if (unlikely(!length)) {
2d7edb92
MC
4393 E1000_DBG("%s: Last part of the packet spanning"
4394 " multiple descriptors\n", netdev->name);
4395 dev_kfree_skb_irq(skb);
4396 goto next_desc;
4397 }
4398
4399 /* Good Receive */
4400 skb_put(skb, length);
4401
dc7c6add
JK
4402 {
4403 /* this looks ugly, but it seems compiler issues make it
4404 more efficient than reusing j */
4405 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
4406
4407 /* page alloc/put takes too long and effects small packet
4408 * throughput, so unsplit small packets and save the alloc/put*/
1f753861 4409 if (l1 && (l1 <= copybreak) && ((length + l1) <= adapter->rx_ps_bsize0)) {
dc7c6add 4410 u8 *vaddr;
76c224bc 4411 /* there is no documentation about how to call
dc7c6add
JK
4412 * kmap_atomic, so we can't hold the mapping
4413 * very long */
4414 pci_dma_sync_single_for_cpu(pdev,
4415 ps_page_dma->ps_page_dma[0],
4416 PAGE_SIZE,
4417 PCI_DMA_FROMDEVICE);
4418 vaddr = kmap_atomic(ps_page->ps_page[0],
4419 KM_SKB_DATA_SOFTIRQ);
4420 memcpy(skb->tail, vaddr, l1);
4421 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
4422 pci_dma_sync_single_for_device(pdev,
4423 ps_page_dma->ps_page_dma[0],
4424 PAGE_SIZE, PCI_DMA_FROMDEVICE);
f235a2ab
AK
4425 /* remove the CRC */
4426 l1 -= 4;
dc7c6add 4427 skb_put(skb, l1);
dc7c6add
JK
4428 goto copydone;
4429 } /* if */
4430 }
90fb5135 4431
96838a40 4432 for (j = 0; j < adapter->rx_ps_pages; j++) {
30320be8 4433 if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j])))
2d7edb92 4434 break;
2d7edb92
MC
4435 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
4436 PAGE_SIZE, PCI_DMA_FROMDEVICE);
4437 ps_page_dma->ps_page_dma[j] = 0;
329bfd0b
JK
4438 skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0,
4439 length);
2d7edb92 4440 ps_page->ps_page[j] = NULL;
2d7edb92
MC
4441 skb->len += length;
4442 skb->data_len += length;
5d51b80f 4443 skb->truesize += length;
2d7edb92
MC
4444 }
4445
f235a2ab
AK
4446 /* strip the ethernet crc, problem is we're using pages now so
4447 * this whole operation can get a little cpu intensive */
4448 pskb_trim(skb, skb->len - 4);
4449
dc7c6add 4450copydone:
835bb129
JB
4451 total_rx_bytes += skb->len;
4452 total_rx_packets++;
4453
2d7edb92 4454 e1000_rx_checksum(adapter, staterr,
c3d7a3a4 4455 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
2d7edb92
MC
4456 skb->protocol = eth_type_trans(skb, netdev);
4457
96838a40 4458 if (likely(rx_desc->wb.upper.header_status &
c3d7a3a4 4459 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)))
e4c811c9 4460 adapter->rx_hdr_split++;
2d7edb92 4461#ifdef CONFIG_E1000_NAPI
96838a40 4462 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
2d7edb92 4463 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
683a38f3
MC
4464 le16_to_cpu(rx_desc->wb.middle.vlan) &
4465 E1000_RXD_SPC_VLAN_MASK);
2d7edb92
MC
4466 } else {
4467 netif_receive_skb(skb);
4468 }
4469#else /* CONFIG_E1000_NAPI */
96838a40 4470 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
2d7edb92 4471 vlan_hwaccel_rx(skb, adapter->vlgrp,
683a38f3
MC
4472 le16_to_cpu(rx_desc->wb.middle.vlan) &
4473 E1000_RXD_SPC_VLAN_MASK);
2d7edb92
MC
4474 } else {
4475 netif_rx(skb);
4476 }
4477#endif /* CONFIG_E1000_NAPI */
4478 netdev->last_rx = jiffies;
4479
4480next_desc:
c3d7a3a4 4481 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
2d7edb92 4482 buffer_info->skb = NULL;
2d7edb92 4483
72d64a43
JK
4484 /* return some buffers to hardware, one at a time is too slow */
4485 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4486 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4487 cleaned_count = 0;
4488 }
4489
30320be8 4490 /* use prefetched values */
86c3d59f
JB
4491 rx_desc = next_rxd;
4492 buffer_info = next_buffer;
4493
683a38f3 4494 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
2d7edb92
MC
4495 }
4496 rx_ring->next_to_clean = i;
72d64a43
JK
4497
4498 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4499 if (cleaned_count)
4500 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
1da177e4 4501
835bb129
JB
4502 adapter->total_rx_packets += total_rx_packets;
4503 adapter->total_rx_bytes += total_rx_bytes;
1da177e4
LT
4504 return cleaned;
4505}
4506
4507/**
2d7edb92 4508 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
1da177e4
LT
4509 * @adapter: address of board private structure
4510 **/
4511
4512static void
581d708e 4513e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
72d64a43 4514 struct e1000_rx_ring *rx_ring,
a292ca6e 4515 int cleaned_count)
1da177e4 4516{
1da177e4
LT
4517 struct net_device *netdev = adapter->netdev;
4518 struct pci_dev *pdev = adapter->pdev;
4519 struct e1000_rx_desc *rx_desc;
4520 struct e1000_buffer *buffer_info;
4521 struct sk_buff *skb;
2648345f
MC
4522 unsigned int i;
4523 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1da177e4
LT
4524
4525 i = rx_ring->next_to_use;
4526 buffer_info = &rx_ring->buffer_info[i];
4527
a292ca6e 4528 while (cleaned_count--) {
ca6f7224
CH
4529 skb = buffer_info->skb;
4530 if (skb) {
a292ca6e
JK
4531 skb_trim(skb, 0);
4532 goto map_skb;
4533 }
4534
ca6f7224 4535 skb = netdev_alloc_skb(netdev, bufsz);
96838a40 4536 if (unlikely(!skb)) {
1da177e4 4537 /* Better luck next round */
72d64a43 4538 adapter->alloc_rx_buff_failed++;
1da177e4
LT
4539 break;
4540 }
4541
2648345f 4542 /* Fix for errata 23, can't cross 64kB boundary */
1da177e4
LT
4543 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4544 struct sk_buff *oldskb = skb;
2648345f
MC
4545 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
4546 "at %p\n", bufsz, skb->data);
4547 /* Try again, without freeing the previous */
87f5032e 4548 skb = netdev_alloc_skb(netdev, bufsz);
2648345f 4549 /* Failed allocation, critical failure */
1da177e4
LT
4550 if (!skb) {
4551 dev_kfree_skb(oldskb);
4552 break;
4553 }
2648345f 4554
1da177e4
LT
4555 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4556 /* give up */
4557 dev_kfree_skb(skb);
4558 dev_kfree_skb(oldskb);
4559 break; /* while !buffer_info->skb */
1da177e4 4560 }
ca6f7224
CH
4561
4562 /* Use new allocation */
4563 dev_kfree_skb(oldskb);
1da177e4 4564 }
1da177e4
LT
4565 /* Make buffer alignment 2 beyond a 16 byte boundary
4566 * this will result in a 16 byte aligned IP header after
4567 * the 14 byte MAC header is removed
4568 */
4569 skb_reserve(skb, NET_IP_ALIGN);
4570
1da177e4
LT
4571 buffer_info->skb = skb;
4572 buffer_info->length = adapter->rx_buffer_len;
a292ca6e 4573map_skb:
1da177e4
LT
4574 buffer_info->dma = pci_map_single(pdev,
4575 skb->data,
4576 adapter->rx_buffer_len,
4577 PCI_DMA_FROMDEVICE);
4578
2648345f
MC
4579 /* Fix for errata 23, can't cross 64kB boundary */
4580 if (!e1000_check_64k_bound(adapter,
4581 (void *)(unsigned long)buffer_info->dma,
4582 adapter->rx_buffer_len)) {
4583 DPRINTK(RX_ERR, ERR,
4584 "dma align check failed: %u bytes at %p\n",
4585 adapter->rx_buffer_len,
4586 (void *)(unsigned long)buffer_info->dma);
1da177e4
LT
4587 dev_kfree_skb(skb);
4588 buffer_info->skb = NULL;
4589
2648345f 4590 pci_unmap_single(pdev, buffer_info->dma,
1da177e4
LT
4591 adapter->rx_buffer_len,
4592 PCI_DMA_FROMDEVICE);
4593
4594 break; /* while !buffer_info->skb */
4595 }
1da177e4
LT
4596 rx_desc = E1000_RX_DESC(*rx_ring, i);
4597 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4598
96838a40
JB
4599 if (unlikely(++i == rx_ring->count))
4600 i = 0;
1da177e4
LT
4601 buffer_info = &rx_ring->buffer_info[i];
4602 }
4603
b92ff8ee
JB
4604 if (likely(rx_ring->next_to_use != i)) {
4605 rx_ring->next_to_use = i;
4606 if (unlikely(i-- == 0))
4607 i = (rx_ring->count - 1);
4608
4609 /* Force memory writes to complete before letting h/w
4610 * know there are new descriptors to fetch. (Only
4611 * applicable for weak-ordered memory model archs,
4612 * such as IA-64). */
4613 wmb();
4614 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4615 }
1da177e4
LT
4616}
4617
2d7edb92
MC
4618/**
4619 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
4620 * @adapter: address of board private structure
4621 **/
4622
4623static void
581d708e 4624e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
72d64a43
JK
4625 struct e1000_rx_ring *rx_ring,
4626 int cleaned_count)
2d7edb92 4627{
2d7edb92
MC
4628 struct net_device *netdev = adapter->netdev;
4629 struct pci_dev *pdev = adapter->pdev;
4630 union e1000_rx_desc_packet_split *rx_desc;
4631 struct e1000_buffer *buffer_info;
4632 struct e1000_ps_page *ps_page;
4633 struct e1000_ps_page_dma *ps_page_dma;
4634 struct sk_buff *skb;
4635 unsigned int i, j;
4636
4637 i = rx_ring->next_to_use;
4638 buffer_info = &rx_ring->buffer_info[i];
4639 ps_page = &rx_ring->ps_page[i];
4640 ps_page_dma = &rx_ring->ps_page_dma[i];
4641
72d64a43 4642 while (cleaned_count--) {
2d7edb92
MC
4643 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
4644
96838a40 4645 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
e4c811c9
MC
4646 if (j < adapter->rx_ps_pages) {
4647 if (likely(!ps_page->ps_page[j])) {
4648 ps_page->ps_page[j] =
4649 alloc_page(GFP_ATOMIC);
b92ff8ee
JB
4650 if (unlikely(!ps_page->ps_page[j])) {
4651 adapter->alloc_rx_buff_failed++;
e4c811c9 4652 goto no_buffers;
b92ff8ee 4653 }
e4c811c9
MC
4654 ps_page_dma->ps_page_dma[j] =
4655 pci_map_page(pdev,
4656 ps_page->ps_page[j],
4657 0, PAGE_SIZE,
4658 PCI_DMA_FROMDEVICE);
4659 }
4660 /* Refresh the desc even if buffer_addrs didn't
96838a40 4661 * change because each write-back erases
e4c811c9
MC
4662 * this info.
4663 */
4664 rx_desc->read.buffer_addr[j+1] =
4665 cpu_to_le64(ps_page_dma->ps_page_dma[j]);
4666 } else
4667 rx_desc->read.buffer_addr[j+1] = ~0;
2d7edb92
MC
4668 }
4669
87f5032e 4670 skb = netdev_alloc_skb(netdev,
90fb5135 4671 adapter->rx_ps_bsize0 + NET_IP_ALIGN);
2d7edb92 4672
b92ff8ee
JB
4673 if (unlikely(!skb)) {
4674 adapter->alloc_rx_buff_failed++;
2d7edb92 4675 break;
b92ff8ee 4676 }
2d7edb92
MC
4677
4678 /* Make buffer alignment 2 beyond a 16 byte boundary
4679 * this will result in a 16 byte aligned IP header after
4680 * the 14 byte MAC header is removed
4681 */
4682 skb_reserve(skb, NET_IP_ALIGN);
4683
2d7edb92
MC
4684 buffer_info->skb = skb;
4685 buffer_info->length = adapter->rx_ps_bsize0;
4686 buffer_info->dma = pci_map_single(pdev, skb->data,
4687 adapter->rx_ps_bsize0,
4688 PCI_DMA_FROMDEVICE);
4689
4690 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
4691
96838a40 4692 if (unlikely(++i == rx_ring->count)) i = 0;
2d7edb92
MC
4693 buffer_info = &rx_ring->buffer_info[i];
4694 ps_page = &rx_ring->ps_page[i];
4695 ps_page_dma = &rx_ring->ps_page_dma[i];
4696 }
4697
4698no_buffers:
b92ff8ee
JB
4699 if (likely(rx_ring->next_to_use != i)) {
4700 rx_ring->next_to_use = i;
4701 if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
4702
4703 /* Force memory writes to complete before letting h/w
4704 * know there are new descriptors to fetch. (Only
4705 * applicable for weak-ordered memory model archs,
4706 * such as IA-64). */
4707 wmb();
4708 /* Hardware increments by 16 bytes, but packet split
4709 * descriptors are 32 bytes...so we increment tail
4710 * twice as much.
4711 */
4712 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
4713 }
2d7edb92
MC
4714}
4715
1da177e4
LT
4716/**
4717 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4718 * @adapter:
4719 **/
4720
4721static void
4722e1000_smartspeed(struct e1000_adapter *adapter)
4723{
4724 uint16_t phy_status;
4725 uint16_t phy_ctrl;
4726
96838a40 4727 if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
1da177e4
LT
4728 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
4729 return;
4730
96838a40 4731 if (adapter->smartspeed == 0) {
1da177e4
LT
4732 /* If Master/Slave config fault is asserted twice,
4733 * we assume back-to-back */
4734 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
96838a40 4735 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
1da177e4 4736 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
96838a40 4737 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
1da177e4 4738 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
96838a40 4739 if (phy_ctrl & CR_1000T_MS_ENABLE) {
1da177e4
LT
4740 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4741 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
4742 phy_ctrl);
4743 adapter->smartspeed++;
96838a40 4744 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
1da177e4
LT
4745 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
4746 &phy_ctrl)) {
4747 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4748 MII_CR_RESTART_AUTO_NEG);
4749 e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
4750 phy_ctrl);
4751 }
4752 }
4753 return;
96838a40 4754 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
1da177e4
LT
4755 /* If still no link, perhaps using 2/3 pair cable */
4756 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
4757 phy_ctrl |= CR_1000T_MS_ENABLE;
4758 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
96838a40 4759 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
1da177e4
LT
4760 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
4761 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4762 MII_CR_RESTART_AUTO_NEG);
4763 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
4764 }
4765 }
4766 /* Restart process after E1000_SMARTSPEED_MAX iterations */
96838a40 4767 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
1da177e4
LT
4768 adapter->smartspeed = 0;
4769}
4770
4771/**
4772 * e1000_ioctl -
4773 * @netdev:
4774 * @ifreq:
4775 * @cmd:
4776 **/
4777
4778static int
4779e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4780{
4781 switch (cmd) {
4782 case SIOCGMIIPHY:
4783 case SIOCGMIIREG:
4784 case SIOCSMIIREG:
4785 return e1000_mii_ioctl(netdev, ifr, cmd);
4786 default:
4787 return -EOPNOTSUPP;
4788 }
4789}
4790
4791/**
4792 * e1000_mii_ioctl -
4793 * @netdev:
4794 * @ifreq:
4795 * @cmd:
4796 **/
4797
4798static int
4799e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4800{
60490fe0 4801 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4
LT
4802 struct mii_ioctl_data *data = if_mii(ifr);
4803 int retval;
4804 uint16_t mii_reg;
4805 uint16_t spddplx;
97876fc6 4806 unsigned long flags;
1da177e4 4807
96838a40 4808 if (adapter->hw.media_type != e1000_media_type_copper)
1da177e4
LT
4809 return -EOPNOTSUPP;
4810
4811 switch (cmd) {
4812 case SIOCGMIIPHY:
4813 data->phy_id = adapter->hw.phy_addr;
4814 break;
4815 case SIOCGMIIREG:
96838a40 4816 if (!capable(CAP_NET_ADMIN))
1da177e4 4817 return -EPERM;
97876fc6 4818 spin_lock_irqsave(&adapter->stats_lock, flags);
96838a40 4819 if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
97876fc6
MC
4820 &data->val_out)) {
4821 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1da177e4 4822 return -EIO;
97876fc6
MC
4823 }
4824 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1da177e4
LT
4825 break;
4826 case SIOCSMIIREG:
96838a40 4827 if (!capable(CAP_NET_ADMIN))
1da177e4 4828 return -EPERM;
96838a40 4829 if (data->reg_num & ~(0x1F))
1da177e4
LT
4830 return -EFAULT;
4831 mii_reg = data->val_in;
97876fc6 4832 spin_lock_irqsave(&adapter->stats_lock, flags);
96838a40 4833 if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
97876fc6
MC
4834 mii_reg)) {
4835 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1da177e4 4836 return -EIO;
97876fc6 4837 }
dc86d32a 4838 if (adapter->hw.media_type == e1000_media_type_copper) {
1da177e4
LT
4839 switch (data->reg_num) {
4840 case PHY_CTRL:
96838a40 4841 if (mii_reg & MII_CR_POWER_DOWN)
1da177e4 4842 break;
96838a40 4843 if (mii_reg & MII_CR_AUTO_NEG_EN) {
1da177e4
LT
4844 adapter->hw.autoneg = 1;
4845 adapter->hw.autoneg_advertised = 0x2F;
4846 } else {
4847 if (mii_reg & 0x40)
4848 spddplx = SPEED_1000;
4849 else if (mii_reg & 0x2000)
4850 spddplx = SPEED_100;
4851 else
4852 spddplx = SPEED_10;
4853 spddplx += (mii_reg & 0x100)
cb764326
JK
4854 ? DUPLEX_FULL :
4855 DUPLEX_HALF;
1da177e4
LT
4856 retval = e1000_set_spd_dplx(adapter,
4857 spddplx);
96838a40 4858 if (retval) {
97876fc6 4859 spin_unlock_irqrestore(
96838a40 4860 &adapter->stats_lock,
97876fc6 4861 flags);
1da177e4 4862 return retval;
97876fc6 4863 }
1da177e4 4864 }
2db10a08
AK
4865 if (netif_running(adapter->netdev))
4866 e1000_reinit_locked(adapter);
4867 else
1da177e4
LT
4868 e1000_reset(adapter);
4869 break;
4870 case M88E1000_PHY_SPEC_CTRL:
4871 case M88E1000_EXT_PHY_SPEC_CTRL:
96838a40 4872 if (e1000_phy_reset(&adapter->hw)) {
97876fc6
MC
4873 spin_unlock_irqrestore(
4874 &adapter->stats_lock, flags);
1da177e4 4875 return -EIO;
97876fc6 4876 }
1da177e4
LT
4877 break;
4878 }
4879 } else {
4880 switch (data->reg_num) {
4881 case PHY_CTRL:
96838a40 4882 if (mii_reg & MII_CR_POWER_DOWN)
1da177e4 4883 break;
2db10a08
AK
4884 if (netif_running(adapter->netdev))
4885 e1000_reinit_locked(adapter);
4886 else
1da177e4
LT
4887 e1000_reset(adapter);
4888 break;
4889 }
4890 }
97876fc6 4891 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1da177e4
LT
4892 break;
4893 default:
4894 return -EOPNOTSUPP;
4895 }
4896 return E1000_SUCCESS;
4897}
4898
4899void
4900e1000_pci_set_mwi(struct e1000_hw *hw)
4901{
4902 struct e1000_adapter *adapter = hw->back;
2648345f 4903 int ret_val = pci_set_mwi(adapter->pdev);
1da177e4 4904
96838a40 4905 if (ret_val)
2648345f 4906 DPRINTK(PROBE, ERR, "Error in setting MWI\n");
1da177e4
LT
4907}
4908
4909void
4910e1000_pci_clear_mwi(struct e1000_hw *hw)
4911{
4912 struct e1000_adapter *adapter = hw->back;
4913
4914 pci_clear_mwi(adapter->pdev);
4915}
4916
4917void
4918e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4919{
4920 struct e1000_adapter *adapter = hw->back;
4921
4922 pci_read_config_word(adapter->pdev, reg, value);
4923}
4924
4925void
4926e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4927{
4928 struct e1000_adapter *adapter = hw->back;
4929
4930 pci_write_config_word(adapter->pdev, reg, *value);
4931}
4932
caeccb68
JK
4933int32_t
4934e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4935{
4936 struct e1000_adapter *adapter = hw->back;
4937 uint16_t cap_offset;
4938
4939 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
4940 if (!cap_offset)
4941 return -E1000_ERR_CONFIG;
4942
4943 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
4944
4945 return E1000_SUCCESS;
4946}
4947
1da177e4
LT
4948void
4949e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
4950{
4951 outl(value, port);
4952}
4953
4954static void
4955e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4956{
60490fe0 4957 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4
LT
4958 uint32_t ctrl, rctl;
4959
4960 e1000_irq_disable(adapter);
4961 adapter->vlgrp = grp;
4962
96838a40 4963 if (grp) {
1da177e4
LT
4964 /* enable VLAN tag insert/strip */
4965 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4966 ctrl |= E1000_CTRL_VME;
4967 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4968
cd94dd0b 4969 if (adapter->hw.mac_type != e1000_ich8lan) {
90fb5135
AK
4970 /* enable VLAN receive filtering */
4971 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4972 rctl |= E1000_RCTL_VFE;
4973 rctl &= ~E1000_RCTL_CFIEN;
4974 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4975 e1000_update_mng_vlan(adapter);
cd94dd0b 4976 }
1da177e4
LT
4977 } else {
4978 /* disable VLAN tag insert/strip */
4979 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4980 ctrl &= ~E1000_CTRL_VME;
4981 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4982
cd94dd0b 4983 if (adapter->hw.mac_type != e1000_ich8lan) {
90fb5135
AK
4984 /* disable VLAN filtering */
4985 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4986 rctl &= ~E1000_RCTL_VFE;
4987 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4988 if (adapter->mng_vlan_id !=
4989 (uint16_t)E1000_MNG_VLAN_NONE) {
4990 e1000_vlan_rx_kill_vid(netdev,
4991 adapter->mng_vlan_id);
4992 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4993 }
cd94dd0b 4994 }
1da177e4
LT
4995 }
4996
4997 e1000_irq_enable(adapter);
4998}
4999
5000static void
5001e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
5002{
60490fe0 5003 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4 5004 uint32_t vfta, index;
96838a40
JB
5005
5006 if ((adapter->hw.mng_cookie.status &
5007 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
5008 (vid == adapter->mng_vlan_id))
2d7edb92 5009 return;
1da177e4
LT
5010 /* add VID to filter table */
5011 index = (vid >> 5) & 0x7F;
5012 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
5013 vfta |= (1 << (vid & 0x1F));
5014 e1000_write_vfta(&adapter->hw, index, vfta);
5015}
5016
5017static void
5018e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
5019{
60490fe0 5020 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4
LT
5021 uint32_t vfta, index;
5022
5023 e1000_irq_disable(adapter);
5024
96838a40 5025 if (adapter->vlgrp)
1da177e4
LT
5026 adapter->vlgrp->vlan_devices[vid] = NULL;
5027
5028 e1000_irq_enable(adapter);
5029
96838a40
JB
5030 if ((adapter->hw.mng_cookie.status &
5031 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
ff147013
JK
5032 (vid == adapter->mng_vlan_id)) {
5033 /* release control to f/w */
5034 e1000_release_hw_control(adapter);
2d7edb92 5035 return;
ff147013
JK
5036 }
5037
1da177e4
LT
5038 /* remove VID from filter table */
5039 index = (vid >> 5) & 0x7F;
5040 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
5041 vfta &= ~(1 << (vid & 0x1F));
5042 e1000_write_vfta(&adapter->hw, index, vfta);
5043}
5044
5045static void
5046e1000_restore_vlan(struct e1000_adapter *adapter)
5047{
5048 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5049
96838a40 5050 if (adapter->vlgrp) {
1da177e4 5051 uint16_t vid;
96838a40
JB
5052 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5053 if (!adapter->vlgrp->vlan_devices[vid])
1da177e4
LT
5054 continue;
5055 e1000_vlan_rx_add_vid(adapter->netdev, vid);
5056 }
5057 }
5058}
5059
5060int
5061e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
5062{
5063 adapter->hw.autoneg = 0;
5064
6921368f 5065 /* Fiber NICs only allow 1000 gbps Full duplex */
96838a40 5066 if ((adapter->hw.media_type == e1000_media_type_fiber) &&
6921368f
MC
5067 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
5068 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
5069 return -EINVAL;
5070 }
5071
96838a40 5072 switch (spddplx) {
1da177e4
LT
5073 case SPEED_10 + DUPLEX_HALF:
5074 adapter->hw.forced_speed_duplex = e1000_10_half;
5075 break;
5076 case SPEED_10 + DUPLEX_FULL:
5077 adapter->hw.forced_speed_duplex = e1000_10_full;
5078 break;
5079 case SPEED_100 + DUPLEX_HALF:
5080 adapter->hw.forced_speed_duplex = e1000_100_half;
5081 break;
5082 case SPEED_100 + DUPLEX_FULL:
5083 adapter->hw.forced_speed_duplex = e1000_100_full;
5084 break;
5085 case SPEED_1000 + DUPLEX_FULL:
5086 adapter->hw.autoneg = 1;
5087 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
5088 break;
5089 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5090 default:
2648345f 5091 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
1da177e4
LT
5092 return -EINVAL;
5093 }
5094 return 0;
5095}
5096
b6a1d5f8 5097#ifdef CONFIG_PM
0f15a8fa
JK
5098/* Save/restore 16 or 64 dwords of PCI config space depending on which
5099 * bus we're on (PCI(X) vs. PCI-E)
2f82665f
JB
5100 */
5101#define PCIE_CONFIG_SPACE_LEN 256
5102#define PCI_CONFIG_SPACE_LEN 64
5103static int
5104e1000_pci_save_state(struct e1000_adapter *adapter)
5105{
5106 struct pci_dev *dev = adapter->pdev;
5107 int size;
5108 int i;
0f15a8fa 5109
2f82665f
JB
5110 if (adapter->hw.mac_type >= e1000_82571)
5111 size = PCIE_CONFIG_SPACE_LEN;
5112 else
5113 size = PCI_CONFIG_SPACE_LEN;
5114
5115 WARN_ON(adapter->config_space != NULL);
5116
5117 adapter->config_space = kmalloc(size, GFP_KERNEL);
5118 if (!adapter->config_space) {
5119 DPRINTK(PROBE, ERR, "unable to allocate %d bytes\n", size);
5120 return -ENOMEM;
5121 }
5122 for (i = 0; i < (size / 4); i++)
5123 pci_read_config_dword(dev, i * 4, &adapter->config_space[i]);
5124 return 0;
5125}
5126
5127static void
5128e1000_pci_restore_state(struct e1000_adapter *adapter)
5129{
5130 struct pci_dev *dev = adapter->pdev;
5131 int size;
5132 int i;
0f15a8fa 5133
2f82665f
JB
5134 if (adapter->config_space == NULL)
5135 return;
0f15a8fa 5136
2f82665f
JB
5137 if (adapter->hw.mac_type >= e1000_82571)
5138 size = PCIE_CONFIG_SPACE_LEN;
5139 else
5140 size = PCI_CONFIG_SPACE_LEN;
5141 for (i = 0; i < (size / 4); i++)
5142 pci_write_config_dword(dev, i * 4, adapter->config_space[i]);
5143 kfree(adapter->config_space);
5144 adapter->config_space = NULL;
5145 return;
5146}
5147#endif /* CONFIG_PM */
5148
1da177e4 5149static int
829ca9a3 5150e1000_suspend(struct pci_dev *pdev, pm_message_t state)
1da177e4
LT
5151{
5152 struct net_device *netdev = pci_get_drvdata(pdev);
60490fe0 5153 struct e1000_adapter *adapter = netdev_priv(netdev);
0fccd0e9 5154 uint32_t ctrl, ctrl_ext, rctl, status;
1da177e4 5155 uint32_t wufc = adapter->wol;
6fdfef16 5156#ifdef CONFIG_PM
240b1710 5157 int retval = 0;
6fdfef16 5158#endif
1da177e4
LT
5159
5160 netif_device_detach(netdev);
5161
2db10a08
AK
5162 if (netif_running(netdev)) {
5163 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1da177e4 5164 e1000_down(adapter);
2db10a08 5165 }
1da177e4 5166
2f82665f 5167#ifdef CONFIG_PM
0f15a8fa
JK
5168 /* Implement our own version of pci_save_state(pdev) because pci-
5169 * express adapters have 256-byte config spaces. */
2f82665f
JB
5170 retval = e1000_pci_save_state(adapter);
5171 if (retval)
5172 return retval;
5173#endif
5174
1da177e4 5175 status = E1000_READ_REG(&adapter->hw, STATUS);
96838a40 5176 if (status & E1000_STATUS_LU)
1da177e4
LT
5177 wufc &= ~E1000_WUFC_LNKC;
5178
96838a40 5179 if (wufc) {
1da177e4
LT
5180 e1000_setup_rctl(adapter);
5181 e1000_set_multi(netdev);
5182
5183 /* turn on all-multi mode if wake on multicast is enabled */
120cd576 5184 if (wufc & E1000_WUFC_MC) {
1da177e4
LT
5185 rctl = E1000_READ_REG(&adapter->hw, RCTL);
5186 rctl |= E1000_RCTL_MPE;
5187 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
5188 }
5189
96838a40 5190 if (adapter->hw.mac_type >= e1000_82540) {
1da177e4
LT
5191 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
5192 /* advertise wake from D3Cold */
5193 #define E1000_CTRL_ADVD3WUC 0x00100000
5194 /* phy power management enable */
5195 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5196 ctrl |= E1000_CTRL_ADVD3WUC |
5197 E1000_CTRL_EN_PHY_PWR_MGMT;
5198 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
5199 }
5200
96838a40 5201 if (adapter->hw.media_type == e1000_media_type_fiber ||
1da177e4
LT
5202 adapter->hw.media_type == e1000_media_type_internal_serdes) {
5203 /* keep the laser running in D3 */
5204 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
5205 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5206 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
5207 }
5208
2d7edb92
MC
5209 /* Allow time for pending master requests to run */
5210 e1000_disable_pciex_master(&adapter->hw);
5211
1da177e4
LT
5212 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
5213 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
d0e027db
AK
5214 pci_enable_wake(pdev, PCI_D3hot, 1);
5215 pci_enable_wake(pdev, PCI_D3cold, 1);
1da177e4
LT
5216 } else {
5217 E1000_WRITE_REG(&adapter->hw, WUC, 0);
5218 E1000_WRITE_REG(&adapter->hw, WUFC, 0);
d0e027db
AK
5219 pci_enable_wake(pdev, PCI_D3hot, 0);
5220 pci_enable_wake(pdev, PCI_D3cold, 0);
1da177e4
LT
5221 }
5222
0fccd0e9
JG
5223 e1000_release_manageability(adapter);
5224
5225 /* make sure adapter isn't asleep if manageability is enabled */
5226 if (adapter->en_mng_pt) {
5227 pci_enable_wake(pdev, PCI_D3hot, 1);
5228 pci_enable_wake(pdev, PCI_D3cold, 1);
1da177e4
LT
5229 }
5230
cd94dd0b
AK
5231 if (adapter->hw.phy_type == e1000_phy_igp_3)
5232 e1000_phy_powerdown_workaround(&adapter->hw);
5233
edd106fc
AK
5234 if (netif_running(netdev))
5235 e1000_free_irq(adapter);
5236
b55ccb35
JK
5237 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5238 * would have already happened in close and is redundant. */
5239 e1000_release_hw_control(adapter);
2d7edb92 5240
1da177e4 5241 pci_disable_device(pdev);
240b1710 5242
d0e027db 5243 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1da177e4
LT
5244
5245 return 0;
5246}
5247
2f82665f 5248#ifdef CONFIG_PM
1da177e4
LT
5249static int
5250e1000_resume(struct pci_dev *pdev)
5251{
5252 struct net_device *netdev = pci_get_drvdata(pdev);
60490fe0 5253 struct e1000_adapter *adapter = netdev_priv(netdev);
0fccd0e9 5254 uint32_t err;
1da177e4 5255
d0e027db 5256 pci_set_power_state(pdev, PCI_D0);
2f82665f 5257 e1000_pci_restore_state(adapter);
3d1dd8cb
AK
5258 if ((err = pci_enable_device(pdev))) {
5259 printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
5260 return err;
5261 }
a4cb847d 5262 pci_set_master(pdev);
1da177e4 5263
d0e027db
AK
5264 pci_enable_wake(pdev, PCI_D3hot, 0);
5265 pci_enable_wake(pdev, PCI_D3cold, 0);
1da177e4 5266
edd106fc
AK
5267 if (netif_running(netdev) && (err = e1000_request_irq(adapter)))
5268 return err;
5269
5270 e1000_power_up_phy(adapter);
1da177e4
LT
5271 e1000_reset(adapter);
5272 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
5273
0fccd0e9
JG
5274 e1000_init_manageability(adapter);
5275
96838a40 5276 if (netif_running(netdev))
1da177e4
LT
5277 e1000_up(adapter);
5278
5279 netif_device_attach(netdev);
5280
b55ccb35
JK
5281 /* If the controller is 82573 and f/w is AMT, do not set
5282 * DRV_LOAD until the interface is up. For all other cases,
5283 * let the f/w know that the h/w is now under the control
5284 * of the driver. */
5285 if (adapter->hw.mac_type != e1000_82573 ||
5286 !e1000_check_mng_mode(&adapter->hw))
5287 e1000_get_hw_control(adapter);
2d7edb92 5288
1da177e4
LT
5289 return 0;
5290}
5291#endif
c653e635
AK
5292
5293static void e1000_shutdown(struct pci_dev *pdev)
5294{
5295 e1000_suspend(pdev, PMSG_SUSPEND);
5296}
5297
1da177e4
LT
5298#ifdef CONFIG_NET_POLL_CONTROLLER
5299/*
5300 * Polling 'interrupt' - used by things like netconsole to send skbs
5301 * without having to re-enable interrupts. It's not called while
5302 * the interrupt routine is executing.
5303 */
5304static void
2648345f 5305e1000_netpoll(struct net_device *netdev)
1da177e4 5306{
60490fe0 5307 struct e1000_adapter *adapter = netdev_priv(netdev);
d3d9e484 5308
1da177e4 5309 disable_irq(adapter->pdev->irq);
7d12e780 5310 e1000_intr(adapter->pdev->irq, netdev);
c4cfe567 5311 e1000_clean_tx_irq(adapter, adapter->tx_ring);
e8da8be1
JK
5312#ifndef CONFIG_E1000_NAPI
5313 adapter->clean_rx(adapter, adapter->rx_ring);
5314#endif
1da177e4
LT
5315 enable_irq(adapter->pdev->irq);
5316}
5317#endif
5318
9026729b
AK
5319/**
5320 * e1000_io_error_detected - called when PCI error is detected
5321 * @pdev: Pointer to PCI device
5322 * @state: The current pci conneection state
5323 *
5324 * This function is called after a PCI bus error affecting
5325 * this device has been detected.
5326 */
5327static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5328{
5329 struct net_device *netdev = pci_get_drvdata(pdev);
5330 struct e1000_adapter *adapter = netdev->priv;
5331
5332 netif_device_detach(netdev);
5333
5334 if (netif_running(netdev))
5335 e1000_down(adapter);
72e8d6bb 5336 pci_disable_device(pdev);
9026729b
AK
5337
5338 /* Request a slot slot reset. */
5339 return PCI_ERS_RESULT_NEED_RESET;
5340}
5341
5342/**
5343 * e1000_io_slot_reset - called after the pci bus has been reset.
5344 * @pdev: Pointer to PCI device
5345 *
5346 * Restart the card from scratch, as if from a cold-boot. Implementation
5347 * resembles the first-half of the e1000_resume routine.
5348 */
5349static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5350{
5351 struct net_device *netdev = pci_get_drvdata(pdev);
5352 struct e1000_adapter *adapter = netdev->priv;
5353
5354 if (pci_enable_device(pdev)) {
5355 printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
5356 return PCI_ERS_RESULT_DISCONNECT;
5357 }
5358 pci_set_master(pdev);
5359
dbf38c94
LV
5360 pci_enable_wake(pdev, PCI_D3hot, 0);
5361 pci_enable_wake(pdev, PCI_D3cold, 0);
9026729b 5362
9026729b
AK
5363 e1000_reset(adapter);
5364 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
5365
5366 return PCI_ERS_RESULT_RECOVERED;
5367}
5368
5369/**
5370 * e1000_io_resume - called when traffic can start flowing again.
5371 * @pdev: Pointer to PCI device
5372 *
5373 * This callback is called when the error recovery driver tells us that
5374 * its OK to resume normal operation. Implementation resembles the
5375 * second-half of the e1000_resume routine.
5376 */
5377static void e1000_io_resume(struct pci_dev *pdev)
5378{
5379 struct net_device *netdev = pci_get_drvdata(pdev);
5380 struct e1000_adapter *adapter = netdev->priv;
0fccd0e9
JG
5381
5382 e1000_init_manageability(adapter);
9026729b
AK
5383
5384 if (netif_running(netdev)) {
5385 if (e1000_up(adapter)) {
5386 printk("e1000: can't bring device back up after reset\n");
5387 return;
5388 }
5389 }
5390
5391 netif_device_attach(netdev);
5392
0fccd0e9
JG
5393 /* If the controller is 82573 and f/w is AMT, do not set
5394 * DRV_LOAD until the interface is up. For all other cases,
5395 * let the f/w know that the h/w is now under the control
5396 * of the driver. */
5397 if (adapter->hw.mac_type != e1000_82573 ||
5398 !e1000_check_mng_mode(&adapter->hw))
5399 e1000_get_hw_control(adapter);
9026729b 5400
9026729b
AK
5401}
5402
1da177e4 5403/* e1000_main.c */
This page took 0.610243 seconds and 5 git commands to generate.