infiniband: nes: Convert nes_addr_resolve_neigh() over to dst_neigh_lookup().
[deliverable/linux.git] / drivers / net / ethernet / intel / e1000e / netdev.c
CommitLineData
bc7f75fa
AK
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
0d6057e4 4 Copyright(c) 1999 - 2011 Intel Corporation.
bc7f75fa
AK
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
8544b9f7
BA
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
bc7f75fa
AK
31#include <linux/module.h>
32#include <linux/types.h>
33#include <linux/init.h>
34#include <linux/pci.h>
35#include <linux/vmalloc.h>
36#include <linux/pagemap.h>
37#include <linux/delay.h>
38#include <linux/netdevice.h>
9fb7a5f7 39#include <linux/interrupt.h>
bc7f75fa
AK
40#include <linux/tcp.h>
41#include <linux/ipv6.h>
5a0e3ad6 42#include <linux/slab.h>
bc7f75fa
AK
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
45#include <linux/mii.h>
46#include <linux/ethtool.h>
47#include <linux/if_vlan.h>
48#include <linux/cpu.h>
49#include <linux/smp.h>
e8db0be1 50#include <linux/pm_qos.h>
23606cf5 51#include <linux/pm_runtime.h>
111b9dc5 52#include <linux/aer.h>
70c71606 53#include <linux/prefetch.h>
bc7f75fa
AK
54
55#include "e1000.h"
56
b3ccf267 57#define DRV_EXTRAVERSION "-k"
c14c643b 58
c5778b43 59#define DRV_VERSION "1.5.1" DRV_EXTRAVERSION
bc7f75fa
AK
60char e1000e_driver_name[] = "e1000e";
61const char e1000e_driver_version[] = DRV_VERSION;
62
78cd29d5
BA
63static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
64
bc7f75fa
AK
65static const struct e1000_info *e1000_info_tbl[] = {
66 [board_82571] = &e1000_82571_info,
67 [board_82572] = &e1000_82572_info,
68 [board_82573] = &e1000_82573_info,
4662e82b 69 [board_82574] = &e1000_82574_info,
8c81c9c3 70 [board_82583] = &e1000_82583_info,
bc7f75fa
AK
71 [board_80003es2lan] = &e1000_es2_info,
72 [board_ich8lan] = &e1000_ich8_info,
73 [board_ich9lan] = &e1000_ich9_info,
f4187b56 74 [board_ich10lan] = &e1000_ich10_info,
a4f58f54 75 [board_pchlan] = &e1000_pch_info,
d3738bb8 76 [board_pch2lan] = &e1000_pch2_info,
bc7f75fa
AK
77};
78
84f4ee90
TI
79struct e1000_reg_info {
80 u32 ofs;
81 char *name;
82};
83
af667a29
BA
84#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
85#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
86#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
87#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
88#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
89
90#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
91#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
92#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
93#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
94#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
84f4ee90
TI
95
96static const struct e1000_reg_info e1000_reg_info_tbl[] = {
97
98 /* General Registers */
99 {E1000_CTRL, "CTRL"},
100 {E1000_STATUS, "STATUS"},
101 {E1000_CTRL_EXT, "CTRL_EXT"},
102
103 /* Interrupt Registers */
104 {E1000_ICR, "ICR"},
105
af667a29 106 /* Rx Registers */
84f4ee90
TI
107 {E1000_RCTL, "RCTL"},
108 {E1000_RDLEN, "RDLEN"},
109 {E1000_RDH, "RDH"},
110 {E1000_RDT, "RDT"},
111 {E1000_RDTR, "RDTR"},
112 {E1000_RXDCTL(0), "RXDCTL"},
113 {E1000_ERT, "ERT"},
114 {E1000_RDBAL, "RDBAL"},
115 {E1000_RDBAH, "RDBAH"},
116 {E1000_RDFH, "RDFH"},
117 {E1000_RDFT, "RDFT"},
118 {E1000_RDFHS, "RDFHS"},
119 {E1000_RDFTS, "RDFTS"},
120 {E1000_RDFPC, "RDFPC"},
121
af667a29 122 /* Tx Registers */
84f4ee90
TI
123 {E1000_TCTL, "TCTL"},
124 {E1000_TDBAL, "TDBAL"},
125 {E1000_TDBAH, "TDBAH"},
126 {E1000_TDLEN, "TDLEN"},
127 {E1000_TDH, "TDH"},
128 {E1000_TDT, "TDT"},
129 {E1000_TIDV, "TIDV"},
130 {E1000_TXDCTL(0), "TXDCTL"},
131 {E1000_TADV, "TADV"},
132 {E1000_TARC(0), "TARC"},
133 {E1000_TDFH, "TDFH"},
134 {E1000_TDFT, "TDFT"},
135 {E1000_TDFHS, "TDFHS"},
136 {E1000_TDFTS, "TDFTS"},
137 {E1000_TDFPC, "TDFPC"},
138
139 /* List Terminator */
140 {}
141};
142
143/*
144 * e1000_regdump - register printout routine
145 */
146static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
147{
148 int n = 0;
149 char rname[16];
150 u32 regs[8];
151
152 switch (reginfo->ofs) {
153 case E1000_RXDCTL(0):
154 for (n = 0; n < 2; n++)
155 regs[n] = __er32(hw, E1000_RXDCTL(n));
156 break;
157 case E1000_TXDCTL(0):
158 for (n = 0; n < 2; n++)
159 regs[n] = __er32(hw, E1000_TXDCTL(n));
160 break;
161 case E1000_TARC(0):
162 for (n = 0; n < 2; n++)
163 regs[n] = __er32(hw, E1000_TARC(n));
164 break;
165 default:
ef456f85
JK
166 pr_info("%-15s %08x\n",
167 reginfo->name, __er32(hw, reginfo->ofs));
84f4ee90
TI
168 return;
169 }
170
171 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
ef456f85 172 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
84f4ee90
TI
173}
174
84f4ee90 175/*
af667a29 176 * e1000e_dump - Print registers, Tx-ring and Rx-ring
84f4ee90
TI
177 */
178static void e1000e_dump(struct e1000_adapter *adapter)
179{
180 struct net_device *netdev = adapter->netdev;
181 struct e1000_hw *hw = &adapter->hw;
182 struct e1000_reg_info *reginfo;
183 struct e1000_ring *tx_ring = adapter->tx_ring;
184 struct e1000_tx_desc *tx_desc;
af667a29
BA
185 struct my_u0 {
186 u64 a;
187 u64 b;
188 } *u0;
84f4ee90
TI
189 struct e1000_buffer *buffer_info;
190 struct e1000_ring *rx_ring = adapter->rx_ring;
191 union e1000_rx_desc_packet_split *rx_desc_ps;
5f450212 192 union e1000_rx_desc_extended *rx_desc;
af667a29
BA
193 struct my_u1 {
194 u64 a;
195 u64 b;
196 u64 c;
197 u64 d;
198 } *u1;
84f4ee90
TI
199 u32 staterr;
200 int i = 0;
201
202 if (!netif_msg_hw(adapter))
203 return;
204
205 /* Print netdevice Info */
206 if (netdev) {
207 dev_info(&adapter->pdev->dev, "Net device Info\n");
ef456f85
JK
208 pr_info("Device Name state trans_start last_rx\n");
209 pr_info("%-15s %016lX %016lX %016lX\n",
210 netdev->name, netdev->state, netdev->trans_start,
211 netdev->last_rx);
84f4ee90
TI
212 }
213
214 /* Print Registers */
215 dev_info(&adapter->pdev->dev, "Register Dump\n");
ef456f85 216 pr_info(" Register Name Value\n");
84f4ee90
TI
217 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
218 reginfo->name; reginfo++) {
219 e1000_regdump(hw, reginfo);
220 }
221
af667a29 222 /* Print Tx Ring Summary */
84f4ee90
TI
223 if (!netdev || !netif_running(netdev))
224 goto exit;
225
af667a29 226 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
ef456f85 227 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
84f4ee90 228 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
ef456f85
JK
229 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
230 0, tx_ring->next_to_use, tx_ring->next_to_clean,
231 (unsigned long long)buffer_info->dma,
232 buffer_info->length,
233 buffer_info->next_to_watch,
234 (unsigned long long)buffer_info->time_stamp);
84f4ee90 235
af667a29 236 /* Print Tx Ring */
84f4ee90
TI
237 if (!netif_msg_tx_done(adapter))
238 goto rx_ring_summary;
239
af667a29 240 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
84f4ee90
TI
241
242 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
243 *
244 * Legacy Transmit Descriptor
245 * +--------------------------------------------------------------+
246 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
247 * +--------------------------------------------------------------+
248 * 8 | Special | CSS | Status | CMD | CSO | Length |
249 * +--------------------------------------------------------------+
250 * 63 48 47 36 35 32 31 24 23 16 15 0
251 *
252 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
253 * 63 48 47 40 39 32 31 16 15 8 7 0
254 * +----------------------------------------------------------------+
255 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
256 * +----------------------------------------------------------------+
257 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
258 * +----------------------------------------------------------------+
259 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
260 *
261 * Extended Data Descriptor (DTYP=0x1)
262 * +----------------------------------------------------------------+
263 * 0 | Buffer Address [63:0] |
264 * +----------------------------------------------------------------+
265 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
266 * +----------------------------------------------------------------+
267 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
268 */
ef456f85
JK
269 pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
270 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
271 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
84f4ee90 272 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
ef456f85 273 const char *next_desc;
84f4ee90
TI
274 tx_desc = E1000_TX_DESC(*tx_ring, i);
275 buffer_info = &tx_ring->buffer_info[i];
276 u0 = (struct my_u0 *)tx_desc;
84f4ee90 277 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
ef456f85 278 next_desc = " NTC/U";
84f4ee90 279 else if (i == tx_ring->next_to_use)
ef456f85 280 next_desc = " NTU";
84f4ee90 281 else if (i == tx_ring->next_to_clean)
ef456f85 282 next_desc = " NTC";
84f4ee90 283 else
ef456f85
JK
284 next_desc = "";
285 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
286 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
287 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
288 i,
289 (unsigned long long)le64_to_cpu(u0->a),
290 (unsigned long long)le64_to_cpu(u0->b),
291 (unsigned long long)buffer_info->dma,
292 buffer_info->length, buffer_info->next_to_watch,
293 (unsigned long long)buffer_info->time_stamp,
294 buffer_info->skb, next_desc);
84f4ee90
TI
295
296 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
297 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
af667a29
BA
298 16, 1, phys_to_virt(buffer_info->dma),
299 buffer_info->length, true);
84f4ee90
TI
300 }
301
af667a29 302 /* Print Rx Ring Summary */
84f4ee90 303rx_ring_summary:
af667a29 304 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
ef456f85
JK
305 pr_info("Queue [NTU] [NTC]\n");
306 pr_info(" %5d %5X %5X\n",
307 0, rx_ring->next_to_use, rx_ring->next_to_clean);
84f4ee90 308
af667a29 309 /* Print Rx Ring */
84f4ee90
TI
310 if (!netif_msg_rx_status(adapter))
311 goto exit;
312
af667a29 313 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
84f4ee90
TI
314 switch (adapter->rx_ps_pages) {
315 case 1:
316 case 2:
317 case 3:
318 /* [Extended] Packet Split Receive Descriptor Format
319 *
320 * +-----------------------------------------------------+
321 * 0 | Buffer Address 0 [63:0] |
322 * +-----------------------------------------------------+
323 * 8 | Buffer Address 1 [63:0] |
324 * +-----------------------------------------------------+
325 * 16 | Buffer Address 2 [63:0] |
326 * +-----------------------------------------------------+
327 * 24 | Buffer Address 3 [63:0] |
328 * +-----------------------------------------------------+
329 */
ef456f85 330 pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
84f4ee90
TI
331 /* [Extended] Receive Descriptor (Write-Back) Format
332 *
333 * 63 48 47 32 31 13 12 8 7 4 3 0
334 * +------------------------------------------------------+
335 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
336 * | Checksum | Ident | | Queue | | Type |
337 * +------------------------------------------------------+
338 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
339 * +------------------------------------------------------+
340 * 63 48 47 32 31 20 19 0
341 */
ef456f85 342 pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
84f4ee90 343 for (i = 0; i < rx_ring->count; i++) {
ef456f85 344 const char *next_desc;
84f4ee90
TI
345 buffer_info = &rx_ring->buffer_info[i];
346 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
347 u1 = (struct my_u1 *)rx_desc_ps;
348 staterr =
af667a29 349 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
ef456f85
JK
350
351 if (i == rx_ring->next_to_use)
352 next_desc = " NTU";
353 else if (i == rx_ring->next_to_clean)
354 next_desc = " NTC";
355 else
356 next_desc = "";
357
84f4ee90
TI
358 if (staterr & E1000_RXD_STAT_DD) {
359 /* Descriptor Done */
ef456f85
JK
360 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
361 "RWB", i,
362 (unsigned long long)le64_to_cpu(u1->a),
363 (unsigned long long)le64_to_cpu(u1->b),
364 (unsigned long long)le64_to_cpu(u1->c),
365 (unsigned long long)le64_to_cpu(u1->d),
366 buffer_info->skb, next_desc);
84f4ee90 367 } else {
ef456f85
JK
368 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
369 "R ", i,
370 (unsigned long long)le64_to_cpu(u1->a),
371 (unsigned long long)le64_to_cpu(u1->b),
372 (unsigned long long)le64_to_cpu(u1->c),
373 (unsigned long long)le64_to_cpu(u1->d),
374 (unsigned long long)buffer_info->dma,
375 buffer_info->skb, next_desc);
84f4ee90
TI
376
377 if (netif_msg_pktdata(adapter))
378 print_hex_dump(KERN_INFO, "",
379 DUMP_PREFIX_ADDRESS, 16, 1,
380 phys_to_virt(buffer_info->dma),
381 adapter->rx_ps_bsize0, true);
382 }
84f4ee90
TI
383 }
384 break;
385 default:
386 case 0:
5f450212 387 /* Extended Receive Descriptor (Read) Format
84f4ee90 388 *
5f450212
BA
389 * +-----------------------------------------------------+
390 * 0 | Buffer Address [63:0] |
391 * +-----------------------------------------------------+
392 * 8 | Reserved |
393 * +-----------------------------------------------------+
84f4ee90 394 */
ef456f85 395 pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
5f450212
BA
396 /* Extended Receive Descriptor (Write-Back) Format
397 *
398 * 63 48 47 32 31 24 23 4 3 0
399 * +------------------------------------------------------+
400 * | RSS Hash | | | |
401 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS |
402 * | Packet | IP | | | Type |
403 * | Checksum | Ident | | | |
404 * +------------------------------------------------------+
405 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
406 * +------------------------------------------------------+
407 * 63 48 47 32 31 20 19 0
408 */
ef456f85 409 pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
5f450212
BA
410
411 for (i = 0; i < rx_ring->count; i++) {
ef456f85
JK
412 const char *next_desc;
413
84f4ee90 414 buffer_info = &rx_ring->buffer_info[i];
5f450212
BA
415 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
416 u1 = (struct my_u1 *)rx_desc;
417 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
ef456f85
JK
418
419 if (i == rx_ring->next_to_use)
420 next_desc = " NTU";
421 else if (i == rx_ring->next_to_clean)
422 next_desc = " NTC";
423 else
424 next_desc = "";
425
5f450212
BA
426 if (staterr & E1000_RXD_STAT_DD) {
427 /* Descriptor Done */
ef456f85
JK
428 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
429 "RWB", i,
430 (unsigned long long)le64_to_cpu(u1->a),
431 (unsigned long long)le64_to_cpu(u1->b),
432 buffer_info->skb, next_desc);
5f450212 433 } else {
ef456f85
JK
434 pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
435 "R ", i,
436 (unsigned long long)le64_to_cpu(u1->a),
437 (unsigned long long)le64_to_cpu(u1->b),
438 (unsigned long long)buffer_info->dma,
439 buffer_info->skb, next_desc);
5f450212
BA
440
441 if (netif_msg_pktdata(adapter))
442 print_hex_dump(KERN_INFO, "",
443 DUMP_PREFIX_ADDRESS, 16,
444 1,
445 phys_to_virt
446 (buffer_info->dma),
447 adapter->rx_buffer_len,
448 true);
449 }
84f4ee90
TI
450 }
451 }
452
453exit:
454 return;
455}
456
bc7f75fa
AK
457/**
458 * e1000_desc_unused - calculate if we have unused descriptors
459 **/
460static int e1000_desc_unused(struct e1000_ring *ring)
461{
462 if (ring->next_to_clean > ring->next_to_use)
463 return ring->next_to_clean - ring->next_to_use - 1;
464
465 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
466}
467
468/**
ad68076e 469 * e1000_receive_skb - helper function to handle Rx indications
bc7f75fa
AK
470 * @adapter: board private structure
471 * @status: descriptor status field as written by hardware
472 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
473 * @skb: pointer to sk_buff to be indicated to stack
474 **/
475static void e1000_receive_skb(struct e1000_adapter *adapter,
af667a29 476 struct net_device *netdev, struct sk_buff *skb,
a39fe742 477 u8 status, __le16 vlan)
bc7f75fa 478{
86d70e53 479 u16 tag = le16_to_cpu(vlan);
bc7f75fa
AK
480 skb->protocol = eth_type_trans(skb, netdev);
481
86d70e53
JK
482 if (status & E1000_RXD_STAT_VP)
483 __vlan_hwaccel_put_tag(skb, tag);
484
485 napi_gro_receive(&adapter->napi, skb);
bc7f75fa
AK
486}
487
488/**
af667a29 489 * e1000_rx_checksum - Receive Checksum Offload
bc7f75fa
AK
490 * @adapter: board private structure
491 * @status_err: receive descriptor status and error fields
492 * @csum: receive descriptor csum field
493 * @sk_buff: socket buffer with received data
494 **/
495static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
496 u32 csum, struct sk_buff *skb)
497{
498 u16 status = (u16)status_err;
499 u8 errors = (u8)(status_err >> 24);
bc8acf2c
ED
500
501 skb_checksum_none_assert(skb);
bc7f75fa
AK
502
503 /* Ignore Checksum bit is set */
504 if (status & E1000_RXD_STAT_IXSM)
505 return;
506 /* TCP/UDP checksum error bit is set */
507 if (errors & E1000_RXD_ERR_TCPE) {
508 /* let the stack verify checksum errors */
509 adapter->hw_csum_err++;
510 return;
511 }
512
513 /* TCP/UDP Checksum has not been calculated */
514 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
515 return;
516
517 /* It must be a TCP or UDP packet with a valid checksum */
518 if (status & E1000_RXD_STAT_TCPCS) {
519 /* TCP checksum is good */
520 skb->ip_summed = CHECKSUM_UNNECESSARY;
521 } else {
ad68076e
BA
522 /*
523 * IP fragment with UDP payload
524 * Hardware complements the payload checksum, so we undo it
bc7f75fa
AK
525 * and then put the value in host order for further stack use.
526 */
a39fe742
AV
527 __sum16 sum = (__force __sum16)htons(csum);
528 skb->csum = csum_unfold(~sum);
bc7f75fa
AK
529 skb->ip_summed = CHECKSUM_COMPLETE;
530 }
531 adapter->hw_csum_good++;
532}
533
c6e7f51e
BA
534/**
535 * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
536 * @hw: pointer to the HW structure
537 * @tail: address of tail descriptor register
538 * @i: value to write to tail descriptor register
539 *
540 * When updating the tail register, the ME could be accessing Host CSR
541 * registers at the same time. Normally, this is handled in h/w by an
542 * arbiter but on some parts there is a bug that acknowledges Host accesses
543 * later than it should which could result in the descriptor register to
544 * have an incorrect value. Workaround this by checking the FWSM register
545 * which has bit 24 set while ME is accessing Host CSR registers, wait
546 * if it is set and try again a number of times.
547 **/
548static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
549 unsigned int i)
550{
551 unsigned int j = 0;
552
553 while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
554 (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
555 udelay(50);
556
557 writel(i, tail);
558
559 if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
560 return E1000_ERR_SWFW_SYNC;
561
562 return 0;
563}
564
565static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
566{
567 u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail);
568 struct e1000_hw *hw = &adapter->hw;
569
570 if (e1000e_update_tail_wa(hw, tail, i)) {
571 u32 rctl = er32(RCTL);
572 ew32(RCTL, rctl & ~E1000_RCTL_EN);
573 e_err("ME firmware caused invalid RDT - resetting\n");
574 schedule_work(&adapter->reset_task);
575 }
576}
577
578static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
579{
580 u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail);
581 struct e1000_hw *hw = &adapter->hw;
582
583 if (e1000e_update_tail_wa(hw, tail, i)) {
584 u32 tctl = er32(TCTL);
585 ew32(TCTL, tctl & ~E1000_TCTL_EN);
586 e_err("ME firmware caused invalid TDT - resetting\n");
587 schedule_work(&adapter->reset_task);
588 }
589}
590
bc7f75fa 591/**
5f450212 592 * e1000_alloc_rx_buffers - Replace used receive buffers
bc7f75fa
AK
593 * @adapter: address of board private structure
594 **/
595static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
c2fed996 596 int cleaned_count, gfp_t gfp)
bc7f75fa
AK
597{
598 struct net_device *netdev = adapter->netdev;
599 struct pci_dev *pdev = adapter->pdev;
600 struct e1000_ring *rx_ring = adapter->rx_ring;
5f450212 601 union e1000_rx_desc_extended *rx_desc;
bc7f75fa
AK
602 struct e1000_buffer *buffer_info;
603 struct sk_buff *skb;
604 unsigned int i;
89d71a66 605 unsigned int bufsz = adapter->rx_buffer_len;
bc7f75fa
AK
606
607 i = rx_ring->next_to_use;
608 buffer_info = &rx_ring->buffer_info[i];
609
610 while (cleaned_count--) {
611 skb = buffer_info->skb;
612 if (skb) {
613 skb_trim(skb, 0);
614 goto map_skb;
615 }
616
c2fed996 617 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
bc7f75fa
AK
618 if (!skb) {
619 /* Better luck next round */
620 adapter->alloc_rx_buff_failed++;
621 break;
622 }
623
bc7f75fa
AK
624 buffer_info->skb = skb;
625map_skb:
0be3f55f 626 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
bc7f75fa 627 adapter->rx_buffer_len,
0be3f55f
NN
628 DMA_FROM_DEVICE);
629 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
af667a29 630 dev_err(&pdev->dev, "Rx DMA map failed\n");
bc7f75fa
AK
631 adapter->rx_dma_failed++;
632 break;
633 }
634
5f450212
BA
635 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
636 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
bc7f75fa 637
50849d79
TH
638 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
639 /*
640 * Force memory writes to complete before letting h/w
641 * know there are new descriptors to fetch. (Only
642 * applicable for weak-ordered memory model archs,
643 * such as IA-64).
644 */
645 wmb();
c6e7f51e
BA
646 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
647 e1000e_update_rdt_wa(adapter, i);
648 else
649 writel(i, adapter->hw.hw_addr + rx_ring->tail);
50849d79 650 }
bc7f75fa
AK
651 i++;
652 if (i == rx_ring->count)
653 i = 0;
654 buffer_info = &rx_ring->buffer_info[i];
655 }
656
50849d79 657 rx_ring->next_to_use = i;
bc7f75fa
AK
658}
659
660/**
661 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
662 * @adapter: address of board private structure
663 **/
664static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
c2fed996 665 int cleaned_count, gfp_t gfp)
bc7f75fa
AK
666{
667 struct net_device *netdev = adapter->netdev;
668 struct pci_dev *pdev = adapter->pdev;
669 union e1000_rx_desc_packet_split *rx_desc;
670 struct e1000_ring *rx_ring = adapter->rx_ring;
671 struct e1000_buffer *buffer_info;
672 struct e1000_ps_page *ps_page;
673 struct sk_buff *skb;
674 unsigned int i, j;
675
676 i = rx_ring->next_to_use;
677 buffer_info = &rx_ring->buffer_info[i];
678
679 while (cleaned_count--) {
680 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
681
682 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
47f44e40
AK
683 ps_page = &buffer_info->ps_pages[j];
684 if (j >= adapter->rx_ps_pages) {
685 /* all unused desc entries get hw null ptr */
af667a29
BA
686 rx_desc->read.buffer_addr[j + 1] =
687 ~cpu_to_le64(0);
47f44e40
AK
688 continue;
689 }
690 if (!ps_page->page) {
c2fed996 691 ps_page->page = alloc_page(gfp);
bc7f75fa 692 if (!ps_page->page) {
47f44e40
AK
693 adapter->alloc_rx_buff_failed++;
694 goto no_buffers;
695 }
0be3f55f
NN
696 ps_page->dma = dma_map_page(&pdev->dev,
697 ps_page->page,
698 0, PAGE_SIZE,
699 DMA_FROM_DEVICE);
700 if (dma_mapping_error(&pdev->dev,
701 ps_page->dma)) {
47f44e40 702 dev_err(&adapter->pdev->dev,
af667a29 703 "Rx DMA page map failed\n");
47f44e40
AK
704 adapter->rx_dma_failed++;
705 goto no_buffers;
bc7f75fa 706 }
bc7f75fa 707 }
47f44e40
AK
708 /*
709 * Refresh the desc even if buffer_addrs
710 * didn't change because each write-back
711 * erases this info.
712 */
af667a29
BA
713 rx_desc->read.buffer_addr[j + 1] =
714 cpu_to_le64(ps_page->dma);
bc7f75fa
AK
715 }
716
c2fed996
JK
717 skb = __netdev_alloc_skb_ip_align(netdev,
718 adapter->rx_ps_bsize0,
719 gfp);
bc7f75fa
AK
720
721 if (!skb) {
722 adapter->alloc_rx_buff_failed++;
723 break;
724 }
725
bc7f75fa 726 buffer_info->skb = skb;
0be3f55f 727 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
bc7f75fa 728 adapter->rx_ps_bsize0,
0be3f55f
NN
729 DMA_FROM_DEVICE);
730 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
af667a29 731 dev_err(&pdev->dev, "Rx DMA map failed\n");
bc7f75fa
AK
732 adapter->rx_dma_failed++;
733 /* cleanup skb */
734 dev_kfree_skb_any(skb);
735 buffer_info->skb = NULL;
736 break;
737 }
738
739 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
740
50849d79
TH
741 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
742 /*
743 * Force memory writes to complete before letting h/w
744 * know there are new descriptors to fetch. (Only
745 * applicable for weak-ordered memory model archs,
746 * such as IA-64).
747 */
748 wmb();
c6e7f51e
BA
749 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
750 e1000e_update_rdt_wa(adapter, i << 1);
751 else
752 writel(i << 1,
753 adapter->hw.hw_addr + rx_ring->tail);
50849d79
TH
754 }
755
bc7f75fa
AK
756 i++;
757 if (i == rx_ring->count)
758 i = 0;
759 buffer_info = &rx_ring->buffer_info[i];
760 }
761
762no_buffers:
50849d79 763 rx_ring->next_to_use = i;
bc7f75fa
AK
764}
765
97ac8cae
BA
766/**
767 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
768 * @adapter: address of board private structure
97ac8cae
BA
769 * @cleaned_count: number of buffers to allocate this pass
770 **/
771
772static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
c2fed996 773 int cleaned_count, gfp_t gfp)
97ac8cae
BA
774{
775 struct net_device *netdev = adapter->netdev;
776 struct pci_dev *pdev = adapter->pdev;
5f450212 777 union e1000_rx_desc_extended *rx_desc;
97ac8cae
BA
778 struct e1000_ring *rx_ring = adapter->rx_ring;
779 struct e1000_buffer *buffer_info;
780 struct sk_buff *skb;
781 unsigned int i;
89d71a66 782 unsigned int bufsz = 256 - 16 /* for skb_reserve */;
97ac8cae
BA
783
784 i = rx_ring->next_to_use;
785 buffer_info = &rx_ring->buffer_info[i];
786
787 while (cleaned_count--) {
788 skb = buffer_info->skb;
789 if (skb) {
790 skb_trim(skb, 0);
791 goto check_page;
792 }
793
c2fed996 794 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
97ac8cae
BA
795 if (unlikely(!skb)) {
796 /* Better luck next round */
797 adapter->alloc_rx_buff_failed++;
798 break;
799 }
800
97ac8cae
BA
801 buffer_info->skb = skb;
802check_page:
803 /* allocate a new page if necessary */
804 if (!buffer_info->page) {
c2fed996 805 buffer_info->page = alloc_page(gfp);
97ac8cae
BA
806 if (unlikely(!buffer_info->page)) {
807 adapter->alloc_rx_buff_failed++;
808 break;
809 }
810 }
811
812 if (!buffer_info->dma)
0be3f55f 813 buffer_info->dma = dma_map_page(&pdev->dev,
97ac8cae
BA
814 buffer_info->page, 0,
815 PAGE_SIZE,
0be3f55f 816 DMA_FROM_DEVICE);
97ac8cae 817
5f450212
BA
818 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
819 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
97ac8cae
BA
820
821 if (unlikely(++i == rx_ring->count))
822 i = 0;
823 buffer_info = &rx_ring->buffer_info[i];
824 }
825
826 if (likely(rx_ring->next_to_use != i)) {
827 rx_ring->next_to_use = i;
828 if (unlikely(i-- == 0))
829 i = (rx_ring->count - 1);
830
831 /* Force memory writes to complete before letting h/w
832 * know there are new descriptors to fetch. (Only
833 * applicable for weak-ordered memory model archs,
834 * such as IA-64). */
835 wmb();
c6e7f51e
BA
836 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
837 e1000e_update_rdt_wa(adapter, i);
838 else
839 writel(i, adapter->hw.hw_addr + rx_ring->tail);
97ac8cae
BA
840 }
841}
842
bc7f75fa
AK
843/**
844 * e1000_clean_rx_irq - Send received data up the network stack; legacy
845 * @adapter: board private structure
846 *
847 * the return value indicates whether actual cleaning was done, there
848 * is no guarantee that everything was cleaned
849 **/
850static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
851 int *work_done, int work_to_do)
852{
853 struct net_device *netdev = adapter->netdev;
854 struct pci_dev *pdev = adapter->pdev;
3bb99fe2 855 struct e1000_hw *hw = &adapter->hw;
bc7f75fa 856 struct e1000_ring *rx_ring = adapter->rx_ring;
5f450212 857 union e1000_rx_desc_extended *rx_desc, *next_rxd;
bc7f75fa 858 struct e1000_buffer *buffer_info, *next_buffer;
5f450212 859 u32 length, staterr;
bc7f75fa
AK
860 unsigned int i;
861 int cleaned_count = 0;
3db1cd5c 862 bool cleaned = false;
bc7f75fa
AK
863 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
864
865 i = rx_ring->next_to_clean;
5f450212
BA
866 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
867 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
bc7f75fa
AK
868 buffer_info = &rx_ring->buffer_info[i];
869
5f450212 870 while (staterr & E1000_RXD_STAT_DD) {
bc7f75fa 871 struct sk_buff *skb;
bc7f75fa
AK
872
873 if (*work_done >= work_to_do)
874 break;
875 (*work_done)++;
2d0bb1c1 876 rmb(); /* read descriptor and rx_buffer_info after status DD */
bc7f75fa 877
bc7f75fa
AK
878 skb = buffer_info->skb;
879 buffer_info->skb = NULL;
880
881 prefetch(skb->data - NET_IP_ALIGN);
882
883 i++;
884 if (i == rx_ring->count)
885 i = 0;
5f450212 886 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
bc7f75fa
AK
887 prefetch(next_rxd);
888
889 next_buffer = &rx_ring->buffer_info[i];
890
3db1cd5c 891 cleaned = true;
bc7f75fa 892 cleaned_count++;
0be3f55f 893 dma_unmap_single(&pdev->dev,
bc7f75fa
AK
894 buffer_info->dma,
895 adapter->rx_buffer_len,
0be3f55f 896 DMA_FROM_DEVICE);
bc7f75fa
AK
897 buffer_info->dma = 0;
898
5f450212 899 length = le16_to_cpu(rx_desc->wb.upper.length);
bc7f75fa 900
b94b5028
JB
901 /*
902 * !EOP means multiple descriptors were used to store a single
903 * packet, if that's the case we need to toss it. In fact, we
904 * need to toss every packet with the EOP bit clear and the
905 * next frame that _does_ have the EOP bit set, as it is by
906 * definition only a frame fragment
907 */
5f450212 908 if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
b94b5028
JB
909 adapter->flags2 |= FLAG2_IS_DISCARDING;
910
911 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
bc7f75fa 912 /* All receives must fit into a single buffer */
3bb99fe2 913 e_dbg("Receive packet consumed multiple buffers\n");
bc7f75fa
AK
914 /* recycle */
915 buffer_info->skb = skb;
5f450212 916 if (staterr & E1000_RXD_STAT_EOP)
b94b5028 917 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
bc7f75fa
AK
918 goto next_desc;
919 }
920
5f450212 921 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
bc7f75fa
AK
922 /* recycle */
923 buffer_info->skb = skb;
924 goto next_desc;
925 }
926
eb7c3adb
JK
927 /* adjust length to remove Ethernet CRC */
928 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
929 length -= 4;
930
bc7f75fa
AK
931 total_rx_bytes += length;
932 total_rx_packets++;
933
ad68076e
BA
934 /*
935 * code added for copybreak, this should improve
bc7f75fa 936 * performance for small packets with large amounts
ad68076e
BA
937 * of reassembly being done in the stack
938 */
bc7f75fa
AK
939 if (length < copybreak) {
940 struct sk_buff *new_skb =
89d71a66 941 netdev_alloc_skb_ip_align(netdev, length);
bc7f75fa 942 if (new_skb) {
808ff676
BA
943 skb_copy_to_linear_data_offset(new_skb,
944 -NET_IP_ALIGN,
945 (skb->data -
946 NET_IP_ALIGN),
947 (length +
948 NET_IP_ALIGN));
bc7f75fa
AK
949 /* save the skb in buffer_info as good */
950 buffer_info->skb = skb;
951 skb = new_skb;
952 }
953 /* else just continue with the old one */
954 }
955 /* end copybreak code */
956 skb_put(skb, length);
957
958 /* Receive Checksum Offload */
5f450212
BA
959 e1000_rx_checksum(adapter, staterr,
960 le16_to_cpu(rx_desc->wb.lower.hi_dword.
961 csum_ip.csum), skb);
bc7f75fa 962
5f450212
BA
963 e1000_receive_skb(adapter, netdev, skb, staterr,
964 rx_desc->wb.upper.vlan);
bc7f75fa
AK
965
966next_desc:
5f450212 967 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
bc7f75fa
AK
968
969 /* return some buffers to hardware, one at a time is too slow */
970 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
c2fed996
JK
971 adapter->alloc_rx_buf(adapter, cleaned_count,
972 GFP_ATOMIC);
bc7f75fa
AK
973 cleaned_count = 0;
974 }
975
976 /* use prefetched values */
977 rx_desc = next_rxd;
978 buffer_info = next_buffer;
5f450212
BA
979
980 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
bc7f75fa
AK
981 }
982 rx_ring->next_to_clean = i;
983
984 cleaned_count = e1000_desc_unused(rx_ring);
985 if (cleaned_count)
c2fed996 986 adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
bc7f75fa 987
bc7f75fa 988 adapter->total_rx_bytes += total_rx_bytes;
7c25769f 989 adapter->total_rx_packets += total_rx_packets;
bc7f75fa
AK
990 return cleaned;
991}
992
bc7f75fa
AK
993static void e1000_put_txbuf(struct e1000_adapter *adapter,
994 struct e1000_buffer *buffer_info)
995{
03b1320d
AD
996 if (buffer_info->dma) {
997 if (buffer_info->mapped_as_page)
0be3f55f
NN
998 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
999 buffer_info->length, DMA_TO_DEVICE);
03b1320d 1000 else
0be3f55f
NN
1001 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1002 buffer_info->length, DMA_TO_DEVICE);
03b1320d
AD
1003 buffer_info->dma = 0;
1004 }
bc7f75fa
AK
1005 if (buffer_info->skb) {
1006 dev_kfree_skb_any(buffer_info->skb);
1007 buffer_info->skb = NULL;
1008 }
1b7719c4 1009 buffer_info->time_stamp = 0;
bc7f75fa
AK
1010}
1011
41cec6f1 1012static void e1000_print_hw_hang(struct work_struct *work)
bc7f75fa 1013{
41cec6f1
BA
1014 struct e1000_adapter *adapter = container_of(work,
1015 struct e1000_adapter,
1016 print_hang_task);
09357b00 1017 struct net_device *netdev = adapter->netdev;
bc7f75fa
AK
1018 struct e1000_ring *tx_ring = adapter->tx_ring;
1019 unsigned int i = tx_ring->next_to_clean;
1020 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
1021 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
41cec6f1
BA
1022 struct e1000_hw *hw = &adapter->hw;
1023 u16 phy_status, phy_1000t_status, phy_ext_status;
1024 u16 pci_status;
1025
615b32af
JB
1026 if (test_bit(__E1000_DOWN, &adapter->state))
1027 return;
1028
09357b00
JK
1029 if (!adapter->tx_hang_recheck &&
1030 (adapter->flags2 & FLAG2_DMA_BURST)) {
1031 /* May be block on write-back, flush and detect again
1032 * flush pending descriptor writebacks to memory
1033 */
1034 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1035 /* execute the writes immediately */
1036 e1e_flush();
1037 adapter->tx_hang_recheck = true;
1038 return;
1039 }
1040 /* Real hang detected */
1041 adapter->tx_hang_recheck = false;
1042 netif_stop_queue(netdev);
1043
41cec6f1
BA
1044 e1e_rphy(hw, PHY_STATUS, &phy_status);
1045 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
1046 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
bc7f75fa 1047
41cec6f1
BA
1048 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
1049
1050 /* detected Hardware unit hang */
1051 e_err("Detected Hardware Unit Hang:\n"
44defeb3
JK
1052 " TDH <%x>\n"
1053 " TDT <%x>\n"
1054 " next_to_use <%x>\n"
1055 " next_to_clean <%x>\n"
1056 "buffer_info[next_to_clean]:\n"
1057 " time_stamp <%lx>\n"
1058 " next_to_watch <%x>\n"
1059 " jiffies <%lx>\n"
41cec6f1
BA
1060 " next_to_watch.status <%x>\n"
1061 "MAC Status <%x>\n"
1062 "PHY Status <%x>\n"
1063 "PHY 1000BASE-T Status <%x>\n"
1064 "PHY Extended Status <%x>\n"
1065 "PCI Status <%x>\n",
44defeb3
JK
1066 readl(adapter->hw.hw_addr + tx_ring->head),
1067 readl(adapter->hw.hw_addr + tx_ring->tail),
1068 tx_ring->next_to_use,
1069 tx_ring->next_to_clean,
1070 tx_ring->buffer_info[eop].time_stamp,
1071 eop,
1072 jiffies,
41cec6f1
BA
1073 eop_desc->upper.fields.status,
1074 er32(STATUS),
1075 phy_status,
1076 phy_1000t_status,
1077 phy_ext_status,
1078 pci_status);
bc7f75fa
AK
1079}
1080
1081/**
1082 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1083 * @adapter: board private structure
1084 *
1085 * the return value indicates whether actual cleaning was done, there
1086 * is no guarantee that everything was cleaned
1087 **/
1088static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
1089{
1090 struct net_device *netdev = adapter->netdev;
1091 struct e1000_hw *hw = &adapter->hw;
1092 struct e1000_ring *tx_ring = adapter->tx_ring;
1093 struct e1000_tx_desc *tx_desc, *eop_desc;
1094 struct e1000_buffer *buffer_info;
1095 unsigned int i, eop;
1096 unsigned int count = 0;
bc7f75fa 1097 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3f0cfa3b 1098 unsigned int bytes_compl = 0, pkts_compl = 0;
bc7f75fa
AK
1099
1100 i = tx_ring->next_to_clean;
1101 eop = tx_ring->buffer_info[i].next_to_watch;
1102 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1103
12d04a3c
AD
1104 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1105 (count < tx_ring->count)) {
a86043c2 1106 bool cleaned = false;
2d0bb1c1 1107 rmb(); /* read buffer_info after eop_desc */
a86043c2 1108 for (; !cleaned; count++) {
bc7f75fa
AK
1109 tx_desc = E1000_TX_DESC(*tx_ring, i);
1110 buffer_info = &tx_ring->buffer_info[i];
1111 cleaned = (i == eop);
1112
1113 if (cleaned) {
9ed318d5
TH
1114 total_tx_packets += buffer_info->segs;
1115 total_tx_bytes += buffer_info->bytecount;
3f0cfa3b
TH
1116 if (buffer_info->skb) {
1117 bytes_compl += buffer_info->skb->len;
1118 pkts_compl++;
1119 }
bc7f75fa
AK
1120 }
1121
1122 e1000_put_txbuf(adapter, buffer_info);
1123 tx_desc->upper.data = 0;
1124
1125 i++;
1126 if (i == tx_ring->count)
1127 i = 0;
1128 }
1129
dac87619
TL
1130 if (i == tx_ring->next_to_use)
1131 break;
bc7f75fa
AK
1132 eop = tx_ring->buffer_info[i].next_to_watch;
1133 eop_desc = E1000_TX_DESC(*tx_ring, eop);
bc7f75fa
AK
1134 }
1135
1136 tx_ring->next_to_clean = i;
1137
3f0cfa3b
TH
1138 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
1139
bc7f75fa 1140#define TX_WAKE_THRESHOLD 32
a86043c2
JB
1141 if (count && netif_carrier_ok(netdev) &&
1142 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
bc7f75fa
AK
1143 /* Make sure that anybody stopping the queue after this
1144 * sees the new next_to_clean.
1145 */
1146 smp_mb();
1147
1148 if (netif_queue_stopped(netdev) &&
1149 !(test_bit(__E1000_DOWN, &adapter->state))) {
1150 netif_wake_queue(netdev);
1151 ++adapter->restart_queue;
1152 }
1153 }
1154
1155 if (adapter->detect_tx_hung) {
41cec6f1
BA
1156 /*
1157 * Detect a transmit hang in hardware, this serializes the
1158 * check with the clearing of time_stamp and movement of i
1159 */
3db1cd5c 1160 adapter->detect_tx_hung = false;
12d04a3c
AD
1161 if (tx_ring->buffer_info[i].time_stamp &&
1162 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
8e95a202 1163 + (adapter->tx_timeout_factor * HZ)) &&
09357b00 1164 !(er32(STATUS) & E1000_STATUS_TXOFF))
41cec6f1 1165 schedule_work(&adapter->print_hang_task);
09357b00
JK
1166 else
1167 adapter->tx_hang_recheck = false;
bc7f75fa
AK
1168 }
1169 adapter->total_tx_bytes += total_tx_bytes;
1170 adapter->total_tx_packets += total_tx_packets;
807540ba 1171 return count < tx_ring->count;
bc7f75fa
AK
1172}
1173
bc7f75fa
AK
1174/**
1175 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1176 * @adapter: board private structure
1177 *
1178 * the return value indicates whether actual cleaning was done, there
1179 * is no guarantee that everything was cleaned
1180 **/
1181static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
1182 int *work_done, int work_to_do)
1183{
3bb99fe2 1184 struct e1000_hw *hw = &adapter->hw;
bc7f75fa
AK
1185 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1186 struct net_device *netdev = adapter->netdev;
1187 struct pci_dev *pdev = adapter->pdev;
1188 struct e1000_ring *rx_ring = adapter->rx_ring;
1189 struct e1000_buffer *buffer_info, *next_buffer;
1190 struct e1000_ps_page *ps_page;
1191 struct sk_buff *skb;
1192 unsigned int i, j;
1193 u32 length, staterr;
1194 int cleaned_count = 0;
3db1cd5c 1195 bool cleaned = false;
bc7f75fa
AK
1196 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1197
1198 i = rx_ring->next_to_clean;
1199 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1200 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1201 buffer_info = &rx_ring->buffer_info[i];
1202
1203 while (staterr & E1000_RXD_STAT_DD) {
1204 if (*work_done >= work_to_do)
1205 break;
1206 (*work_done)++;
1207 skb = buffer_info->skb;
2d0bb1c1 1208 rmb(); /* read descriptor and rx_buffer_info after status DD */
bc7f75fa
AK
1209
1210 /* in the packet split case this is header only */
1211 prefetch(skb->data - NET_IP_ALIGN);
1212
1213 i++;
1214 if (i == rx_ring->count)
1215 i = 0;
1216 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1217 prefetch(next_rxd);
1218
1219 next_buffer = &rx_ring->buffer_info[i];
1220
3db1cd5c 1221 cleaned = true;
bc7f75fa 1222 cleaned_count++;
0be3f55f 1223 dma_unmap_single(&pdev->dev, buffer_info->dma,
af667a29 1224 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
bc7f75fa
AK
1225 buffer_info->dma = 0;
1226
af667a29 1227 /* see !EOP comment in other Rx routine */
b94b5028
JB
1228 if (!(staterr & E1000_RXD_STAT_EOP))
1229 adapter->flags2 |= FLAG2_IS_DISCARDING;
1230
1231 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
ef456f85 1232 e_dbg("Packet Split buffers didn't pick up the full packet\n");
bc7f75fa 1233 dev_kfree_skb_irq(skb);
b94b5028
JB
1234 if (staterr & E1000_RXD_STAT_EOP)
1235 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
bc7f75fa
AK
1236 goto next_desc;
1237 }
1238
1239 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
1240 dev_kfree_skb_irq(skb);
1241 goto next_desc;
1242 }
1243
1244 length = le16_to_cpu(rx_desc->wb.middle.length0);
1245
1246 if (!length) {
ef456f85 1247 e_dbg("Last part of the packet spanning multiple descriptors\n");
bc7f75fa
AK
1248 dev_kfree_skb_irq(skb);
1249 goto next_desc;
1250 }
1251
1252 /* Good Receive */
1253 skb_put(skb, length);
1254
1255 {
ad68076e
BA
1256 /*
1257 * this looks ugly, but it seems compiler issues make it
1258 * more efficient than reusing j
1259 */
bc7f75fa
AK
1260 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1261
ad68076e
BA
1262 /*
1263 * page alloc/put takes too long and effects small packet
1264 * throughput, so unsplit small packets and save the alloc/put
1265 * only valid in softirq (napi) context to call kmap_*
1266 */
bc7f75fa
AK
1267 if (l1 && (l1 <= copybreak) &&
1268 ((length + l1) <= adapter->rx_ps_bsize0)) {
1269 u8 *vaddr;
1270
47f44e40 1271 ps_page = &buffer_info->ps_pages[0];
bc7f75fa 1272
ad68076e
BA
1273 /*
1274 * there is no documentation about how to call
bc7f75fa 1275 * kmap_atomic, so we can't hold the mapping
ad68076e
BA
1276 * very long
1277 */
0be3f55f
NN
1278 dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
1279 PAGE_SIZE, DMA_FROM_DEVICE);
bc7f75fa
AK
1280 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
1281 memcpy(skb_tail_pointer(skb), vaddr, l1);
1282 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
0be3f55f
NN
1283 dma_sync_single_for_device(&pdev->dev, ps_page->dma,
1284 PAGE_SIZE, DMA_FROM_DEVICE);
140a7480 1285
eb7c3adb
JK
1286 /* remove the CRC */
1287 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
1288 l1 -= 4;
1289
bc7f75fa
AK
1290 skb_put(skb, l1);
1291 goto copydone;
1292 } /* if */
1293 }
1294
1295 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1296 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1297 if (!length)
1298 break;
1299
47f44e40 1300 ps_page = &buffer_info->ps_pages[j];
0be3f55f
NN
1301 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1302 DMA_FROM_DEVICE);
bc7f75fa
AK
1303 ps_page->dma = 0;
1304 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1305 ps_page->page = NULL;
1306 skb->len += length;
1307 skb->data_len += length;
98a045d7 1308 skb->truesize += PAGE_SIZE;
bc7f75fa
AK
1309 }
1310
eb7c3adb
JK
1311 /* strip the ethernet crc, problem is we're using pages now so
1312 * this whole operation can get a little cpu intensive
1313 */
1314 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
1315 pskb_trim(skb, skb->len - 4);
1316
bc7f75fa
AK
1317copydone:
1318 total_rx_bytes += skb->len;
1319 total_rx_packets++;
1320
1321 e1000_rx_checksum(adapter, staterr, le16_to_cpu(
1322 rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
1323
1324 if (rx_desc->wb.upper.header_status &
1325 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1326 adapter->rx_hdr_split++;
1327
1328 e1000_receive_skb(adapter, netdev, skb,
1329 staterr, rx_desc->wb.middle.vlan);
1330
1331next_desc:
1332 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1333 buffer_info->skb = NULL;
1334
1335 /* return some buffers to hardware, one at a time is too slow */
1336 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
c2fed996
JK
1337 adapter->alloc_rx_buf(adapter, cleaned_count,
1338 GFP_ATOMIC);
bc7f75fa
AK
1339 cleaned_count = 0;
1340 }
1341
1342 /* use prefetched values */
1343 rx_desc = next_rxd;
1344 buffer_info = next_buffer;
1345
1346 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1347 }
1348 rx_ring->next_to_clean = i;
1349
1350 cleaned_count = e1000_desc_unused(rx_ring);
1351 if (cleaned_count)
c2fed996 1352 adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
bc7f75fa 1353
bc7f75fa 1354 adapter->total_rx_bytes += total_rx_bytes;
7c25769f 1355 adapter->total_rx_packets += total_rx_packets;
bc7f75fa
AK
1356 return cleaned;
1357}
1358
97ac8cae
BA
1359/**
1360 * e1000_consume_page - helper function
1361 **/
1362static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1363 u16 length)
1364{
1365 bi->page = NULL;
1366 skb->len += length;
1367 skb->data_len += length;
98a045d7 1368 skb->truesize += PAGE_SIZE;
97ac8cae
BA
1369}
1370
1371/**
1372 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1373 * @adapter: board private structure
1374 *
1375 * the return value indicates whether actual cleaning was done, there
1376 * is no guarantee that everything was cleaned
1377 **/
1378
1379static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1380 int *work_done, int work_to_do)
1381{
1382 struct net_device *netdev = adapter->netdev;
1383 struct pci_dev *pdev = adapter->pdev;
1384 struct e1000_ring *rx_ring = adapter->rx_ring;
5f450212 1385 union e1000_rx_desc_extended *rx_desc, *next_rxd;
97ac8cae 1386 struct e1000_buffer *buffer_info, *next_buffer;
5f450212 1387 u32 length, staterr;
97ac8cae
BA
1388 unsigned int i;
1389 int cleaned_count = 0;
1390 bool cleaned = false;
1391 unsigned int total_rx_bytes=0, total_rx_packets=0;
1392
1393 i = rx_ring->next_to_clean;
5f450212
BA
1394 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1395 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
97ac8cae
BA
1396 buffer_info = &rx_ring->buffer_info[i];
1397
5f450212 1398 while (staterr & E1000_RXD_STAT_DD) {
97ac8cae 1399 struct sk_buff *skb;
97ac8cae
BA
1400
1401 if (*work_done >= work_to_do)
1402 break;
1403 (*work_done)++;
2d0bb1c1 1404 rmb(); /* read descriptor and rx_buffer_info after status DD */
97ac8cae 1405
97ac8cae
BA
1406 skb = buffer_info->skb;
1407 buffer_info->skb = NULL;
1408
1409 ++i;
1410 if (i == rx_ring->count)
1411 i = 0;
5f450212 1412 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
97ac8cae
BA
1413 prefetch(next_rxd);
1414
1415 next_buffer = &rx_ring->buffer_info[i];
1416
1417 cleaned = true;
1418 cleaned_count++;
0be3f55f
NN
1419 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1420 DMA_FROM_DEVICE);
97ac8cae
BA
1421 buffer_info->dma = 0;
1422
5f450212 1423 length = le16_to_cpu(rx_desc->wb.upper.length);
97ac8cae
BA
1424
1425 /* errors is only valid for DD + EOP descriptors */
5f450212
BA
1426 if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
1427 (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK))) {
1428 /* recycle both page and skb */
1429 buffer_info->skb = skb;
1430 /* an error means any chain goes out the window too */
1431 if (rx_ring->rx_skb_top)
1432 dev_kfree_skb_irq(rx_ring->rx_skb_top);
1433 rx_ring->rx_skb_top = NULL;
1434 goto next_desc;
97ac8cae
BA
1435 }
1436
f0f1a172 1437#define rxtop (rx_ring->rx_skb_top)
5f450212 1438 if (!(staterr & E1000_RXD_STAT_EOP)) {
97ac8cae
BA
1439 /* this descriptor is only the beginning (or middle) */
1440 if (!rxtop) {
1441 /* this is the beginning of a chain */
1442 rxtop = skb;
1443 skb_fill_page_desc(rxtop, 0, buffer_info->page,
1444 0, length);
1445 } else {
1446 /* this is the middle of a chain */
1447 skb_fill_page_desc(rxtop,
1448 skb_shinfo(rxtop)->nr_frags,
1449 buffer_info->page, 0, length);
1450 /* re-use the skb, only consumed the page */
1451 buffer_info->skb = skb;
1452 }
1453 e1000_consume_page(buffer_info, rxtop, length);
1454 goto next_desc;
1455 } else {
1456 if (rxtop) {
1457 /* end of the chain */
1458 skb_fill_page_desc(rxtop,
1459 skb_shinfo(rxtop)->nr_frags,
1460 buffer_info->page, 0, length);
1461 /* re-use the current skb, we only consumed the
1462 * page */
1463 buffer_info->skb = skb;
1464 skb = rxtop;
1465 rxtop = NULL;
1466 e1000_consume_page(buffer_info, skb, length);
1467 } else {
1468 /* no chain, got EOP, this buf is the packet
1469 * copybreak to save the put_page/alloc_page */
1470 if (length <= copybreak &&
1471 skb_tailroom(skb) >= length) {
1472 u8 *vaddr;
1473 vaddr = kmap_atomic(buffer_info->page,
1474 KM_SKB_DATA_SOFTIRQ);
1475 memcpy(skb_tail_pointer(skb), vaddr,
1476 length);
1477 kunmap_atomic(vaddr,
1478 KM_SKB_DATA_SOFTIRQ);
1479 /* re-use the page, so don't erase
1480 * buffer_info->page */
1481 skb_put(skb, length);
1482 } else {
1483 skb_fill_page_desc(skb, 0,
1484 buffer_info->page, 0,
1485 length);
1486 e1000_consume_page(buffer_info, skb,
1487 length);
1488 }
1489 }
1490 }
1491
1492 /* Receive Checksum Offload XXX recompute due to CRC strip? */
5f450212
BA
1493 e1000_rx_checksum(adapter, staterr,
1494 le16_to_cpu(rx_desc->wb.lower.hi_dword.
1495 csum_ip.csum), skb);
97ac8cae
BA
1496
1497 /* probably a little skewed due to removing CRC */
1498 total_rx_bytes += skb->len;
1499 total_rx_packets++;
1500
1501 /* eth type trans needs skb->data to point to something */
1502 if (!pskb_may_pull(skb, ETH_HLEN)) {
44defeb3 1503 e_err("pskb_may_pull failed.\n");
ef5ab89c 1504 dev_kfree_skb_irq(skb);
97ac8cae
BA
1505 goto next_desc;
1506 }
1507
5f450212
BA
1508 e1000_receive_skb(adapter, netdev, skb, staterr,
1509 rx_desc->wb.upper.vlan);
97ac8cae
BA
1510
1511next_desc:
5f450212 1512 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
97ac8cae
BA
1513
1514 /* return some buffers to hardware, one at a time is too slow */
1515 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
c2fed996
JK
1516 adapter->alloc_rx_buf(adapter, cleaned_count,
1517 GFP_ATOMIC);
97ac8cae
BA
1518 cleaned_count = 0;
1519 }
1520
1521 /* use prefetched values */
1522 rx_desc = next_rxd;
1523 buffer_info = next_buffer;
5f450212
BA
1524
1525 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
97ac8cae
BA
1526 }
1527 rx_ring->next_to_clean = i;
1528
1529 cleaned_count = e1000_desc_unused(rx_ring);
1530 if (cleaned_count)
c2fed996 1531 adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
97ac8cae
BA
1532
1533 adapter->total_rx_bytes += total_rx_bytes;
1534 adapter->total_rx_packets += total_rx_packets;
97ac8cae
BA
1535 return cleaned;
1536}
1537
bc7f75fa
AK
1538/**
1539 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1540 * @adapter: board private structure
1541 **/
1542static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1543{
1544 struct e1000_ring *rx_ring = adapter->rx_ring;
1545 struct e1000_buffer *buffer_info;
1546 struct e1000_ps_page *ps_page;
1547 struct pci_dev *pdev = adapter->pdev;
bc7f75fa
AK
1548 unsigned int i, j;
1549
1550 /* Free all the Rx ring sk_buffs */
1551 for (i = 0; i < rx_ring->count; i++) {
1552 buffer_info = &rx_ring->buffer_info[i];
1553 if (buffer_info->dma) {
1554 if (adapter->clean_rx == e1000_clean_rx_irq)
0be3f55f 1555 dma_unmap_single(&pdev->dev, buffer_info->dma,
bc7f75fa 1556 adapter->rx_buffer_len,
0be3f55f 1557 DMA_FROM_DEVICE);
97ac8cae 1558 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
0be3f55f 1559 dma_unmap_page(&pdev->dev, buffer_info->dma,
97ac8cae 1560 PAGE_SIZE,
0be3f55f 1561 DMA_FROM_DEVICE);
bc7f75fa 1562 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
0be3f55f 1563 dma_unmap_single(&pdev->dev, buffer_info->dma,
bc7f75fa 1564 adapter->rx_ps_bsize0,
0be3f55f 1565 DMA_FROM_DEVICE);
bc7f75fa
AK
1566 buffer_info->dma = 0;
1567 }
1568
97ac8cae
BA
1569 if (buffer_info->page) {
1570 put_page(buffer_info->page);
1571 buffer_info->page = NULL;
1572 }
1573
bc7f75fa
AK
1574 if (buffer_info->skb) {
1575 dev_kfree_skb(buffer_info->skb);
1576 buffer_info->skb = NULL;
1577 }
1578
1579 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
47f44e40 1580 ps_page = &buffer_info->ps_pages[j];
bc7f75fa
AK
1581 if (!ps_page->page)
1582 break;
0be3f55f
NN
1583 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1584 DMA_FROM_DEVICE);
bc7f75fa
AK
1585 ps_page->dma = 0;
1586 put_page(ps_page->page);
1587 ps_page->page = NULL;
1588 }
1589 }
1590
1591 /* there also may be some cached data from a chained receive */
1592 if (rx_ring->rx_skb_top) {
1593 dev_kfree_skb(rx_ring->rx_skb_top);
1594 rx_ring->rx_skb_top = NULL;
1595 }
1596
bc7f75fa
AK
1597 /* Zero out the descriptor ring */
1598 memset(rx_ring->desc, 0, rx_ring->size);
1599
1600 rx_ring->next_to_clean = 0;
1601 rx_ring->next_to_use = 0;
b94b5028 1602 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
bc7f75fa
AK
1603
1604 writel(0, adapter->hw.hw_addr + rx_ring->head);
1605 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1606}
1607
a8f88ff5
JB
1608static void e1000e_downshift_workaround(struct work_struct *work)
1609{
1610 struct e1000_adapter *adapter = container_of(work,
1611 struct e1000_adapter, downshift_task);
1612
615b32af
JB
1613 if (test_bit(__E1000_DOWN, &adapter->state))
1614 return;
1615
a8f88ff5
JB
1616 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1617}
1618
bc7f75fa
AK
1619/**
1620 * e1000_intr_msi - Interrupt Handler
1621 * @irq: interrupt number
1622 * @data: pointer to a network interface device structure
1623 **/
1624static irqreturn_t e1000_intr_msi(int irq, void *data)
1625{
1626 struct net_device *netdev = data;
1627 struct e1000_adapter *adapter = netdev_priv(netdev);
1628 struct e1000_hw *hw = &adapter->hw;
1629 u32 icr = er32(ICR);
1630
ad68076e
BA
1631 /*
1632 * read ICR disables interrupts using IAM
1633 */
bc7f75fa 1634
573cca8c 1635 if (icr & E1000_ICR_LSC) {
bc7f75fa 1636 hw->mac.get_link_status = 1;
ad68076e
BA
1637 /*
1638 * ICH8 workaround-- Call gig speed drop workaround on cable
1639 * disconnect (LSC) before accessing any PHY registers
1640 */
bc7f75fa
AK
1641 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1642 (!(er32(STATUS) & E1000_STATUS_LU)))
a8f88ff5 1643 schedule_work(&adapter->downshift_task);
bc7f75fa 1644
ad68076e
BA
1645 /*
1646 * 80003ES2LAN workaround-- For packet buffer work-around on
bc7f75fa 1647 * link down event; disable receives here in the ISR and reset
ad68076e
BA
1648 * adapter in watchdog
1649 */
bc7f75fa
AK
1650 if (netif_carrier_ok(netdev) &&
1651 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1652 /* disable receives */
1653 u32 rctl = er32(RCTL);
1654 ew32(RCTL, rctl & ~E1000_RCTL_EN);
318a94d6 1655 adapter->flags |= FLAG_RX_RESTART_NOW;
bc7f75fa
AK
1656 }
1657 /* guard against interrupt when we're going down */
1658 if (!test_bit(__E1000_DOWN, &adapter->state))
1659 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1660 }
1661
288379f0 1662 if (napi_schedule_prep(&adapter->napi)) {
bc7f75fa
AK
1663 adapter->total_tx_bytes = 0;
1664 adapter->total_tx_packets = 0;
1665 adapter->total_rx_bytes = 0;
1666 adapter->total_rx_packets = 0;
288379f0 1667 __napi_schedule(&adapter->napi);
bc7f75fa
AK
1668 }
1669
1670 return IRQ_HANDLED;
1671}
1672
1673/**
1674 * e1000_intr - Interrupt Handler
1675 * @irq: interrupt number
1676 * @data: pointer to a network interface device structure
1677 **/
1678static irqreturn_t e1000_intr(int irq, void *data)
1679{
1680 struct net_device *netdev = data;
1681 struct e1000_adapter *adapter = netdev_priv(netdev);
1682 struct e1000_hw *hw = &adapter->hw;
bc7f75fa 1683 u32 rctl, icr = er32(ICR);
4662e82b 1684
a68ea775 1685 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
bc7f75fa
AK
1686 return IRQ_NONE; /* Not our interrupt */
1687
ad68076e
BA
1688 /*
1689 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1690 * not set, then the adapter didn't send an interrupt
1691 */
bc7f75fa
AK
1692 if (!(icr & E1000_ICR_INT_ASSERTED))
1693 return IRQ_NONE;
1694
ad68076e
BA
1695 /*
1696 * Interrupt Auto-Mask...upon reading ICR,
1697 * interrupts are masked. No need for the
1698 * IMC write
1699 */
bc7f75fa 1700
573cca8c 1701 if (icr & E1000_ICR_LSC) {
bc7f75fa 1702 hw->mac.get_link_status = 1;
ad68076e
BA
1703 /*
1704 * ICH8 workaround-- Call gig speed drop workaround on cable
1705 * disconnect (LSC) before accessing any PHY registers
1706 */
bc7f75fa
AK
1707 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1708 (!(er32(STATUS) & E1000_STATUS_LU)))
a8f88ff5 1709 schedule_work(&adapter->downshift_task);
bc7f75fa 1710
ad68076e
BA
1711 /*
1712 * 80003ES2LAN workaround--
bc7f75fa
AK
1713 * For packet buffer work-around on link down event;
1714 * disable receives here in the ISR and
1715 * reset adapter in watchdog
1716 */
1717 if (netif_carrier_ok(netdev) &&
1718 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1719 /* disable receives */
1720 rctl = er32(RCTL);
1721 ew32(RCTL, rctl & ~E1000_RCTL_EN);
318a94d6 1722 adapter->flags |= FLAG_RX_RESTART_NOW;
bc7f75fa
AK
1723 }
1724 /* guard against interrupt when we're going down */
1725 if (!test_bit(__E1000_DOWN, &adapter->state))
1726 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1727 }
1728
288379f0 1729 if (napi_schedule_prep(&adapter->napi)) {
bc7f75fa
AK
1730 adapter->total_tx_bytes = 0;
1731 adapter->total_tx_packets = 0;
1732 adapter->total_rx_bytes = 0;
1733 adapter->total_rx_packets = 0;
288379f0 1734 __napi_schedule(&adapter->napi);
bc7f75fa
AK
1735 }
1736
1737 return IRQ_HANDLED;
1738}
1739
4662e82b
BA
1740static irqreturn_t e1000_msix_other(int irq, void *data)
1741{
1742 struct net_device *netdev = data;
1743 struct e1000_adapter *adapter = netdev_priv(netdev);
1744 struct e1000_hw *hw = &adapter->hw;
1745 u32 icr = er32(ICR);
1746
1747 if (!(icr & E1000_ICR_INT_ASSERTED)) {
a3c69fef
JB
1748 if (!test_bit(__E1000_DOWN, &adapter->state))
1749 ew32(IMS, E1000_IMS_OTHER);
4662e82b
BA
1750 return IRQ_NONE;
1751 }
1752
1753 if (icr & adapter->eiac_mask)
1754 ew32(ICS, (icr & adapter->eiac_mask));
1755
1756 if (icr & E1000_ICR_OTHER) {
1757 if (!(icr & E1000_ICR_LSC))
1758 goto no_link_interrupt;
1759 hw->mac.get_link_status = 1;
1760 /* guard against interrupt when we're going down */
1761 if (!test_bit(__E1000_DOWN, &adapter->state))
1762 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1763 }
1764
1765no_link_interrupt:
a3c69fef
JB
1766 if (!test_bit(__E1000_DOWN, &adapter->state))
1767 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
4662e82b
BA
1768
1769 return IRQ_HANDLED;
1770}
1771
1772
1773static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1774{
1775 struct net_device *netdev = data;
1776 struct e1000_adapter *adapter = netdev_priv(netdev);
1777 struct e1000_hw *hw = &adapter->hw;
1778 struct e1000_ring *tx_ring = adapter->tx_ring;
1779
1780
1781 adapter->total_tx_bytes = 0;
1782 adapter->total_tx_packets = 0;
1783
1784 if (!e1000_clean_tx_irq(adapter))
1785 /* Ring was not completely cleaned, so fire another interrupt */
1786 ew32(ICS, tx_ring->ims_val);
1787
1788 return IRQ_HANDLED;
1789}
1790
1791static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1792{
1793 struct net_device *netdev = data;
1794 struct e1000_adapter *adapter = netdev_priv(netdev);
1795
1796 /* Write the ITR value calculated at the end of the
1797 * previous interrupt.
1798 */
1799 if (adapter->rx_ring->set_itr) {
1800 writel(1000000000 / (adapter->rx_ring->itr_val * 256),
1801 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
1802 adapter->rx_ring->set_itr = 0;
1803 }
1804
288379f0 1805 if (napi_schedule_prep(&adapter->napi)) {
4662e82b
BA
1806 adapter->total_rx_bytes = 0;
1807 adapter->total_rx_packets = 0;
288379f0 1808 __napi_schedule(&adapter->napi);
4662e82b
BA
1809 }
1810 return IRQ_HANDLED;
1811}
1812
1813/**
1814 * e1000_configure_msix - Configure MSI-X hardware
1815 *
1816 * e1000_configure_msix sets up the hardware to properly
1817 * generate MSI-X interrupts.
1818 **/
1819static void e1000_configure_msix(struct e1000_adapter *adapter)
1820{
1821 struct e1000_hw *hw = &adapter->hw;
1822 struct e1000_ring *rx_ring = adapter->rx_ring;
1823 struct e1000_ring *tx_ring = adapter->tx_ring;
1824 int vector = 0;
1825 u32 ctrl_ext, ivar = 0;
1826
1827 adapter->eiac_mask = 0;
1828
1829 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1830 if (hw->mac.type == e1000_82574) {
1831 u32 rfctl = er32(RFCTL);
1832 rfctl |= E1000_RFCTL_ACK_DIS;
1833 ew32(RFCTL, rfctl);
1834 }
1835
1836#define E1000_IVAR_INT_ALLOC_VALID 0x8
1837 /* Configure Rx vector */
1838 rx_ring->ims_val = E1000_IMS_RXQ0;
1839 adapter->eiac_mask |= rx_ring->ims_val;
1840 if (rx_ring->itr_val)
1841 writel(1000000000 / (rx_ring->itr_val * 256),
1842 hw->hw_addr + rx_ring->itr_register);
1843 else
1844 writel(1, hw->hw_addr + rx_ring->itr_register);
1845 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1846
1847 /* Configure Tx vector */
1848 tx_ring->ims_val = E1000_IMS_TXQ0;
1849 vector++;
1850 if (tx_ring->itr_val)
1851 writel(1000000000 / (tx_ring->itr_val * 256),
1852 hw->hw_addr + tx_ring->itr_register);
1853 else
1854 writel(1, hw->hw_addr + tx_ring->itr_register);
1855 adapter->eiac_mask |= tx_ring->ims_val;
1856 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1857
1858 /* set vector for Other Causes, e.g. link changes */
1859 vector++;
1860 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1861 if (rx_ring->itr_val)
1862 writel(1000000000 / (rx_ring->itr_val * 256),
1863 hw->hw_addr + E1000_EITR_82574(vector));
1864 else
1865 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1866
1867 /* Cause Tx interrupts on every write back */
1868 ivar |= (1 << 31);
1869
1870 ew32(IVAR, ivar);
1871
1872 /* enable MSI-X PBA support */
1873 ctrl_ext = er32(CTRL_EXT);
1874 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
1875
1876 /* Auto-Mask Other interrupts upon ICR read */
1877#define E1000_EIAC_MASK_82574 0x01F00000
1878 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
1879 ctrl_ext |= E1000_CTRL_EXT_EIAME;
1880 ew32(CTRL_EXT, ctrl_ext);
1881 e1e_flush();
1882}
1883
1884void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1885{
1886 if (adapter->msix_entries) {
1887 pci_disable_msix(adapter->pdev);
1888 kfree(adapter->msix_entries);
1889 adapter->msix_entries = NULL;
1890 } else if (adapter->flags & FLAG_MSI_ENABLED) {
1891 pci_disable_msi(adapter->pdev);
1892 adapter->flags &= ~FLAG_MSI_ENABLED;
1893 }
4662e82b
BA
1894}
1895
1896/**
1897 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
1898 *
1899 * Attempt to configure interrupts using the best available
1900 * capabilities of the hardware and kernel.
1901 **/
1902void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1903{
1904 int err;
8e86acd7 1905 int i;
4662e82b
BA
1906
1907 switch (adapter->int_mode) {
1908 case E1000E_INT_MODE_MSIX:
1909 if (adapter->flags & FLAG_HAS_MSIX) {
8e86acd7
JK
1910 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
1911 adapter->msix_entries = kcalloc(adapter->num_vectors,
4662e82b
BA
1912 sizeof(struct msix_entry),
1913 GFP_KERNEL);
1914 if (adapter->msix_entries) {
8e86acd7 1915 for (i = 0; i < adapter->num_vectors; i++)
4662e82b
BA
1916 adapter->msix_entries[i].entry = i;
1917
1918 err = pci_enable_msix(adapter->pdev,
1919 adapter->msix_entries,
8e86acd7 1920 adapter->num_vectors);
b1cdfead 1921 if (err == 0)
4662e82b
BA
1922 return;
1923 }
1924 /* MSI-X failed, so fall through and try MSI */
ef456f85 1925 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
4662e82b
BA
1926 e1000e_reset_interrupt_capability(adapter);
1927 }
1928 adapter->int_mode = E1000E_INT_MODE_MSI;
1929 /* Fall through */
1930 case E1000E_INT_MODE_MSI:
1931 if (!pci_enable_msi(adapter->pdev)) {
1932 adapter->flags |= FLAG_MSI_ENABLED;
1933 } else {
1934 adapter->int_mode = E1000E_INT_MODE_LEGACY;
ef456f85 1935 e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
4662e82b
BA
1936 }
1937 /* Fall through */
1938 case E1000E_INT_MODE_LEGACY:
1939 /* Don't do anything; this is the system default */
1940 break;
1941 }
8e86acd7
JK
1942
1943 /* store the number of vectors being used */
1944 adapter->num_vectors = 1;
4662e82b
BA
1945}
1946
1947/**
1948 * e1000_request_msix - Initialize MSI-X interrupts
1949 *
1950 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
1951 * kernel.
1952 **/
1953static int e1000_request_msix(struct e1000_adapter *adapter)
1954{
1955 struct net_device *netdev = adapter->netdev;
1956 int err = 0, vector = 0;
1957
1958 if (strlen(netdev->name) < (IFNAMSIZ - 5))
79f5e840
BA
1959 snprintf(adapter->rx_ring->name,
1960 sizeof(adapter->rx_ring->name) - 1,
1961 "%s-rx-0", netdev->name);
4662e82b
BA
1962 else
1963 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1964 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 1965 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
4662e82b
BA
1966 netdev);
1967 if (err)
1968 goto out;
1969 adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
1970 adapter->rx_ring->itr_val = adapter->itr;
1971 vector++;
1972
1973 if (strlen(netdev->name) < (IFNAMSIZ - 5))
79f5e840
BA
1974 snprintf(adapter->tx_ring->name,
1975 sizeof(adapter->tx_ring->name) - 1,
1976 "%s-tx-0", netdev->name);
4662e82b
BA
1977 else
1978 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1979 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 1980 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
4662e82b
BA
1981 netdev);
1982 if (err)
1983 goto out;
1984 adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
1985 adapter->tx_ring->itr_val = adapter->itr;
1986 vector++;
1987
1988 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 1989 e1000_msix_other, 0, netdev->name, netdev);
4662e82b
BA
1990 if (err)
1991 goto out;
1992
1993 e1000_configure_msix(adapter);
1994 return 0;
1995out:
1996 return err;
1997}
1998
f8d59f78
BA
1999/**
2000 * e1000_request_irq - initialize interrupts
2001 *
2002 * Attempts to configure interrupts using the best available
2003 * capabilities of the hardware and kernel.
2004 **/
bc7f75fa
AK
2005static int e1000_request_irq(struct e1000_adapter *adapter)
2006{
2007 struct net_device *netdev = adapter->netdev;
bc7f75fa
AK
2008 int err;
2009
4662e82b
BA
2010 if (adapter->msix_entries) {
2011 err = e1000_request_msix(adapter);
2012 if (!err)
2013 return err;
2014 /* fall back to MSI */
2015 e1000e_reset_interrupt_capability(adapter);
2016 adapter->int_mode = E1000E_INT_MODE_MSI;
2017 e1000e_set_interrupt_capability(adapter);
bc7f75fa 2018 }
4662e82b 2019 if (adapter->flags & FLAG_MSI_ENABLED) {
a0607fd3 2020 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
4662e82b
BA
2021 netdev->name, netdev);
2022 if (!err)
2023 return err;
bc7f75fa 2024
4662e82b
BA
2025 /* fall back to legacy interrupt */
2026 e1000e_reset_interrupt_capability(adapter);
2027 adapter->int_mode = E1000E_INT_MODE_LEGACY;
bc7f75fa
AK
2028 }
2029
a0607fd3 2030 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
4662e82b
BA
2031 netdev->name, netdev);
2032 if (err)
2033 e_err("Unable to allocate interrupt, Error: %d\n", err);
2034
bc7f75fa
AK
2035 return err;
2036}
2037
2038static void e1000_free_irq(struct e1000_adapter *adapter)
2039{
2040 struct net_device *netdev = adapter->netdev;
2041
4662e82b
BA
2042 if (adapter->msix_entries) {
2043 int vector = 0;
2044
2045 free_irq(adapter->msix_entries[vector].vector, netdev);
2046 vector++;
2047
2048 free_irq(adapter->msix_entries[vector].vector, netdev);
2049 vector++;
2050
2051 /* Other Causes interrupt vector */
2052 free_irq(adapter->msix_entries[vector].vector, netdev);
2053 return;
bc7f75fa 2054 }
4662e82b
BA
2055
2056 free_irq(adapter->pdev->irq, netdev);
bc7f75fa
AK
2057}
2058
2059/**
2060 * e1000_irq_disable - Mask off interrupt generation on the NIC
2061 **/
2062static void e1000_irq_disable(struct e1000_adapter *adapter)
2063{
2064 struct e1000_hw *hw = &adapter->hw;
2065
bc7f75fa 2066 ew32(IMC, ~0);
4662e82b
BA
2067 if (adapter->msix_entries)
2068 ew32(EIAC_82574, 0);
bc7f75fa 2069 e1e_flush();
8e86acd7
JK
2070
2071 if (adapter->msix_entries) {
2072 int i;
2073 for (i = 0; i < adapter->num_vectors; i++)
2074 synchronize_irq(adapter->msix_entries[i].vector);
2075 } else {
2076 synchronize_irq(adapter->pdev->irq);
2077 }
bc7f75fa
AK
2078}
2079
2080/**
2081 * e1000_irq_enable - Enable default interrupt generation settings
2082 **/
2083static void e1000_irq_enable(struct e1000_adapter *adapter)
2084{
2085 struct e1000_hw *hw = &adapter->hw;
2086
4662e82b
BA
2087 if (adapter->msix_entries) {
2088 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2089 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
2090 } else {
2091 ew32(IMS, IMS_ENABLE_MASK);
2092 }
74ef9c39 2093 e1e_flush();
bc7f75fa
AK
2094}
2095
2096/**
31dbe5b4 2097 * e1000e_get_hw_control - get control of the h/w from f/w
bc7f75fa
AK
2098 * @adapter: address of board private structure
2099 *
31dbe5b4 2100 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
bc7f75fa
AK
2101 * For ASF and Pass Through versions of f/w this means that
2102 * the driver is loaded. For AMT version (only with 82573)
2103 * of the f/w this means that the network i/f is open.
2104 **/
31dbe5b4 2105void e1000e_get_hw_control(struct e1000_adapter *adapter)
bc7f75fa
AK
2106{
2107 struct e1000_hw *hw = &adapter->hw;
2108 u32 ctrl_ext;
2109 u32 swsm;
2110
2111 /* Let firmware know the driver has taken over */
2112 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2113 swsm = er32(SWSM);
2114 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
2115 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2116 ctrl_ext = er32(CTRL_EXT);
ad68076e 2117 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
bc7f75fa
AK
2118 }
2119}
2120
2121/**
31dbe5b4 2122 * e1000e_release_hw_control - release control of the h/w to f/w
bc7f75fa
AK
2123 * @adapter: address of board private structure
2124 *
31dbe5b4 2125 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
bc7f75fa
AK
2126 * For ASF and Pass Through versions of f/w this means that the
2127 * driver is no longer loaded. For AMT version (only with 82573) i
2128 * of the f/w this means that the network i/f is closed.
2129 *
2130 **/
31dbe5b4 2131void e1000e_release_hw_control(struct e1000_adapter *adapter)
bc7f75fa
AK
2132{
2133 struct e1000_hw *hw = &adapter->hw;
2134 u32 ctrl_ext;
2135 u32 swsm;
2136
2137 /* Let firmware taken over control of h/w */
2138 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2139 swsm = er32(SWSM);
2140 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2141 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2142 ctrl_ext = er32(CTRL_EXT);
ad68076e 2143 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
bc7f75fa
AK
2144 }
2145}
2146
bc7f75fa
AK
2147/**
2148 * @e1000_alloc_ring - allocate memory for a ring structure
2149 **/
2150static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2151 struct e1000_ring *ring)
2152{
2153 struct pci_dev *pdev = adapter->pdev;
2154
2155 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2156 GFP_KERNEL);
2157 if (!ring->desc)
2158 return -ENOMEM;
2159
2160 return 0;
2161}
2162
2163/**
2164 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2165 * @adapter: board private structure
2166 *
2167 * Return 0 on success, negative on failure
2168 **/
2169int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
2170{
2171 struct e1000_ring *tx_ring = adapter->tx_ring;
2172 int err = -ENOMEM, size;
2173
2174 size = sizeof(struct e1000_buffer) * tx_ring->count;
89bf67f1 2175 tx_ring->buffer_info = vzalloc(size);
bc7f75fa
AK
2176 if (!tx_ring->buffer_info)
2177 goto err;
bc7f75fa
AK
2178
2179 /* round up to nearest 4K */
2180 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2181 tx_ring->size = ALIGN(tx_ring->size, 4096);
2182
2183 err = e1000_alloc_ring_dma(adapter, tx_ring);
2184 if (err)
2185 goto err;
2186
2187 tx_ring->next_to_use = 0;
2188 tx_ring->next_to_clean = 0;
bc7f75fa
AK
2189
2190 return 0;
2191err:
2192 vfree(tx_ring->buffer_info);
44defeb3 2193 e_err("Unable to allocate memory for the transmit descriptor ring\n");
bc7f75fa
AK
2194 return err;
2195}
2196
2197/**
2198 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2199 * @adapter: board private structure
2200 *
2201 * Returns 0 on success, negative on failure
2202 **/
2203int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
2204{
2205 struct e1000_ring *rx_ring = adapter->rx_ring;
47f44e40
AK
2206 struct e1000_buffer *buffer_info;
2207 int i, size, desc_len, err = -ENOMEM;
bc7f75fa
AK
2208
2209 size = sizeof(struct e1000_buffer) * rx_ring->count;
89bf67f1 2210 rx_ring->buffer_info = vzalloc(size);
bc7f75fa
AK
2211 if (!rx_ring->buffer_info)
2212 goto err;
bc7f75fa 2213
47f44e40
AK
2214 for (i = 0; i < rx_ring->count; i++) {
2215 buffer_info = &rx_ring->buffer_info[i];
2216 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2217 sizeof(struct e1000_ps_page),
2218 GFP_KERNEL);
2219 if (!buffer_info->ps_pages)
2220 goto err_pages;
2221 }
bc7f75fa
AK
2222
2223 desc_len = sizeof(union e1000_rx_desc_packet_split);
2224
2225 /* Round up to nearest 4K */
2226 rx_ring->size = rx_ring->count * desc_len;
2227 rx_ring->size = ALIGN(rx_ring->size, 4096);
2228
2229 err = e1000_alloc_ring_dma(adapter, rx_ring);
2230 if (err)
47f44e40 2231 goto err_pages;
bc7f75fa
AK
2232
2233 rx_ring->next_to_clean = 0;
2234 rx_ring->next_to_use = 0;
2235 rx_ring->rx_skb_top = NULL;
2236
2237 return 0;
47f44e40
AK
2238
2239err_pages:
2240 for (i = 0; i < rx_ring->count; i++) {
2241 buffer_info = &rx_ring->buffer_info[i];
2242 kfree(buffer_info->ps_pages);
2243 }
bc7f75fa
AK
2244err:
2245 vfree(rx_ring->buffer_info);
e9262447 2246 e_err("Unable to allocate memory for the receive descriptor ring\n");
bc7f75fa
AK
2247 return err;
2248}
2249
2250/**
2251 * e1000_clean_tx_ring - Free Tx Buffers
2252 * @adapter: board private structure
2253 **/
2254static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
2255{
2256 struct e1000_ring *tx_ring = adapter->tx_ring;
2257 struct e1000_buffer *buffer_info;
2258 unsigned long size;
2259 unsigned int i;
2260
2261 for (i = 0; i < tx_ring->count; i++) {
2262 buffer_info = &tx_ring->buffer_info[i];
2263 e1000_put_txbuf(adapter, buffer_info);
2264 }
2265
3f0cfa3b 2266 netdev_reset_queue(adapter->netdev);
bc7f75fa
AK
2267 size = sizeof(struct e1000_buffer) * tx_ring->count;
2268 memset(tx_ring->buffer_info, 0, size);
2269
2270 memset(tx_ring->desc, 0, tx_ring->size);
2271
2272 tx_ring->next_to_use = 0;
2273 tx_ring->next_to_clean = 0;
2274
2275 writel(0, adapter->hw.hw_addr + tx_ring->head);
2276 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2277}
2278
2279/**
2280 * e1000e_free_tx_resources - Free Tx Resources per Queue
2281 * @adapter: board private structure
2282 *
2283 * Free all transmit software resources
2284 **/
2285void e1000e_free_tx_resources(struct e1000_adapter *adapter)
2286{
2287 struct pci_dev *pdev = adapter->pdev;
2288 struct e1000_ring *tx_ring = adapter->tx_ring;
2289
2290 e1000_clean_tx_ring(adapter);
2291
2292 vfree(tx_ring->buffer_info);
2293 tx_ring->buffer_info = NULL;
2294
2295 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2296 tx_ring->dma);
2297 tx_ring->desc = NULL;
2298}
2299
2300/**
2301 * e1000e_free_rx_resources - Free Rx Resources
2302 * @adapter: board private structure
2303 *
2304 * Free all receive software resources
2305 **/
2306
2307void e1000e_free_rx_resources(struct e1000_adapter *adapter)
2308{
2309 struct pci_dev *pdev = adapter->pdev;
2310 struct e1000_ring *rx_ring = adapter->rx_ring;
47f44e40 2311 int i;
bc7f75fa
AK
2312
2313 e1000_clean_rx_ring(adapter);
2314
b1cdfead 2315 for (i = 0; i < rx_ring->count; i++)
47f44e40 2316 kfree(rx_ring->buffer_info[i].ps_pages);
47f44e40 2317
bc7f75fa
AK
2318 vfree(rx_ring->buffer_info);
2319 rx_ring->buffer_info = NULL;
2320
bc7f75fa
AK
2321 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2322 rx_ring->dma);
2323 rx_ring->desc = NULL;
2324}
2325
2326/**
2327 * e1000_update_itr - update the dynamic ITR value based on statistics
489815ce
AK
2328 * @adapter: pointer to adapter
2329 * @itr_setting: current adapter->itr
2330 * @packets: the number of packets during this measurement interval
2331 * @bytes: the number of bytes during this measurement interval
2332 *
bc7f75fa
AK
2333 * Stores a new ITR value based on packets and byte
2334 * counts during the last interrupt. The advantage of per interrupt
2335 * computation is faster updates and more accurate ITR for the current
2336 * traffic pattern. Constants in this function were computed
2337 * based on theoretical maximum wire speed and thresholds were set based
2338 * on testing data as well as attempting to minimize response time
4662e82b
BA
2339 * while increasing bulk throughput. This functionality is controlled
2340 * by the InterruptThrottleRate module parameter.
bc7f75fa
AK
2341 **/
2342static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2343 u16 itr_setting, int packets,
2344 int bytes)
2345{
2346 unsigned int retval = itr_setting;
2347
2348 if (packets == 0)
2349 goto update_itr_done;
2350
2351 switch (itr_setting) {
2352 case lowest_latency:
2353 /* handle TSO and jumbo frames */
2354 if (bytes/packets > 8000)
2355 retval = bulk_latency;
b1cdfead 2356 else if ((packets < 5) && (bytes > 512))
bc7f75fa 2357 retval = low_latency;
bc7f75fa
AK
2358 break;
2359 case low_latency: /* 50 usec aka 20000 ints/s */
2360 if (bytes > 10000) {
2361 /* this if handles the TSO accounting */
b1cdfead 2362 if (bytes/packets > 8000)
bc7f75fa 2363 retval = bulk_latency;
b1cdfead 2364 else if ((packets < 10) || ((bytes/packets) > 1200))
bc7f75fa 2365 retval = bulk_latency;
b1cdfead 2366 else if ((packets > 35))
bc7f75fa 2367 retval = lowest_latency;
bc7f75fa
AK
2368 } else if (bytes/packets > 2000) {
2369 retval = bulk_latency;
2370 } else if (packets <= 2 && bytes < 512) {
2371 retval = lowest_latency;
2372 }
2373 break;
2374 case bulk_latency: /* 250 usec aka 4000 ints/s */
2375 if (bytes > 25000) {
b1cdfead 2376 if (packets > 35)
bc7f75fa 2377 retval = low_latency;
bc7f75fa
AK
2378 } else if (bytes < 6000) {
2379 retval = low_latency;
2380 }
2381 break;
2382 }
2383
2384update_itr_done:
2385 return retval;
2386}
2387
2388static void e1000_set_itr(struct e1000_adapter *adapter)
2389{
2390 struct e1000_hw *hw = &adapter->hw;
2391 u16 current_itr;
2392 u32 new_itr = adapter->itr;
2393
2394 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2395 if (adapter->link_speed != SPEED_1000) {
2396 current_itr = 0;
2397 new_itr = 4000;
2398 goto set_itr_now;
2399 }
2400
828bac87
BA
2401 if (adapter->flags2 & FLAG2_DISABLE_AIM) {
2402 new_itr = 0;
2403 goto set_itr_now;
2404 }
2405
bc7f75fa
AK
2406 adapter->tx_itr = e1000_update_itr(adapter,
2407 adapter->tx_itr,
2408 adapter->total_tx_packets,
2409 adapter->total_tx_bytes);
2410 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2411 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2412 adapter->tx_itr = low_latency;
2413
2414 adapter->rx_itr = e1000_update_itr(adapter,
2415 adapter->rx_itr,
2416 adapter->total_rx_packets,
2417 adapter->total_rx_bytes);
2418 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2419 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2420 adapter->rx_itr = low_latency;
2421
2422 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2423
2424 switch (current_itr) {
2425 /* counts and packets in update_itr are dependent on these numbers */
2426 case lowest_latency:
2427 new_itr = 70000;
2428 break;
2429 case low_latency:
2430 new_itr = 20000; /* aka hwitr = ~200 */
2431 break;
2432 case bulk_latency:
2433 new_itr = 4000;
2434 break;
2435 default:
2436 break;
2437 }
2438
2439set_itr_now:
2440 if (new_itr != adapter->itr) {
ad68076e
BA
2441 /*
2442 * this attempts to bias the interrupt rate towards Bulk
bc7f75fa 2443 * by adding intermediate steps when interrupt rate is
ad68076e
BA
2444 * increasing
2445 */
bc7f75fa
AK
2446 new_itr = new_itr > adapter->itr ?
2447 min(adapter->itr + (new_itr >> 2), new_itr) :
2448 new_itr;
2449 adapter->itr = new_itr;
4662e82b
BA
2450 adapter->rx_ring->itr_val = new_itr;
2451 if (adapter->msix_entries)
2452 adapter->rx_ring->set_itr = 1;
2453 else
828bac87
BA
2454 if (new_itr)
2455 ew32(ITR, 1000000000 / (new_itr * 256));
2456 else
2457 ew32(ITR, 0);
bc7f75fa
AK
2458 }
2459}
2460
4662e82b
BA
2461/**
2462 * e1000_alloc_queues - Allocate memory for all rings
2463 * @adapter: board private structure to initialize
2464 **/
2465static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
2466{
2467 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2468 if (!adapter->tx_ring)
2469 goto err;
2470
2471 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2472 if (!adapter->rx_ring)
2473 goto err;
2474
2475 return 0;
2476err:
2477 e_err("Unable to allocate memory for queues\n");
2478 kfree(adapter->rx_ring);
2479 kfree(adapter->tx_ring);
2480 return -ENOMEM;
2481}
2482
bc7f75fa
AK
2483/**
2484 * e1000_clean - NAPI Rx polling callback
ad68076e 2485 * @napi: struct associated with this polling callback
489815ce 2486 * @budget: amount of packets driver is allowed to process this poll
bc7f75fa
AK
2487 **/
2488static int e1000_clean(struct napi_struct *napi, int budget)
2489{
2490 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
4662e82b 2491 struct e1000_hw *hw = &adapter->hw;
bc7f75fa 2492 struct net_device *poll_dev = adapter->netdev;
679e8a0f 2493 int tx_cleaned = 1, work_done = 0;
bc7f75fa 2494
4cf1653a 2495 adapter = netdev_priv(poll_dev);
bc7f75fa 2496
4662e82b
BA
2497 if (adapter->msix_entries &&
2498 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2499 goto clean_rx;
2500
92af3e95 2501 tx_cleaned = e1000_clean_tx_irq(adapter);
bc7f75fa 2502
4662e82b 2503clean_rx:
bc7f75fa 2504 adapter->clean_rx(adapter, &work_done, budget);
d2c7ddd6 2505
12d04a3c 2506 if (!tx_cleaned)
d2c7ddd6 2507 work_done = budget;
bc7f75fa 2508
53e52c72
DM
2509 /* If budget not fully consumed, exit the polling mode */
2510 if (work_done < budget) {
bc7f75fa
AK
2511 if (adapter->itr_setting & 3)
2512 e1000_set_itr(adapter);
288379f0 2513 napi_complete(napi);
a3c69fef
JB
2514 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2515 if (adapter->msix_entries)
2516 ew32(IMS, adapter->rx_ring->ims_val);
2517 else
2518 e1000_irq_enable(adapter);
2519 }
bc7f75fa
AK
2520 }
2521
2522 return work_done;
2523}
2524
8e586137 2525static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
bc7f75fa
AK
2526{
2527 struct e1000_adapter *adapter = netdev_priv(netdev);
2528 struct e1000_hw *hw = &adapter->hw;
2529 u32 vfta, index;
2530
2531 /* don't update vlan cookie if already programmed */
2532 if ((adapter->hw.mng_cookie.status &
2533 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2534 (vid == adapter->mng_vlan_id))
8e586137 2535 return 0;
caaddaf8 2536
bc7f75fa 2537 /* add VID to filter table */
caaddaf8
BA
2538 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2539 index = (vid >> 5) & 0x7F;
2540 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2541 vfta |= (1 << (vid & 0x1F));
2542 hw->mac.ops.write_vfta(hw, index, vfta);
2543 }
86d70e53
JK
2544
2545 set_bit(vid, adapter->active_vlans);
8e586137
JP
2546
2547 return 0;
bc7f75fa
AK
2548}
2549
8e586137 2550static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
bc7f75fa
AK
2551{
2552 struct e1000_adapter *adapter = netdev_priv(netdev);
2553 struct e1000_hw *hw = &adapter->hw;
2554 u32 vfta, index;
2555
bc7f75fa
AK
2556 if ((adapter->hw.mng_cookie.status &
2557 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2558 (vid == adapter->mng_vlan_id)) {
2559 /* release control to f/w */
31dbe5b4 2560 e1000e_release_hw_control(adapter);
8e586137 2561 return 0;
bc7f75fa
AK
2562 }
2563
2564 /* remove VID from filter table */
caaddaf8
BA
2565 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2566 index = (vid >> 5) & 0x7F;
2567 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2568 vfta &= ~(1 << (vid & 0x1F));
2569 hw->mac.ops.write_vfta(hw, index, vfta);
2570 }
86d70e53
JK
2571
2572 clear_bit(vid, adapter->active_vlans);
8e586137
JP
2573
2574 return 0;
bc7f75fa
AK
2575}
2576
86d70e53
JK
2577/**
2578 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2579 * @adapter: board private structure to initialize
2580 **/
2581static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
bc7f75fa
AK
2582{
2583 struct net_device *netdev = adapter->netdev;
86d70e53
JK
2584 struct e1000_hw *hw = &adapter->hw;
2585 u32 rctl;
bc7f75fa 2586
86d70e53
JK
2587 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2588 /* disable VLAN receive filtering */
2589 rctl = er32(RCTL);
2590 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
2591 ew32(RCTL, rctl);
2592
2593 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
2594 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2595 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
bc7f75fa 2596 }
bc7f75fa
AK
2597 }
2598}
2599
86d70e53
JK
2600/**
2601 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2602 * @adapter: board private structure to initialize
2603 **/
2604static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2605{
2606 struct e1000_hw *hw = &adapter->hw;
2607 u32 rctl;
2608
2609 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2610 /* enable VLAN receive filtering */
2611 rctl = er32(RCTL);
2612 rctl |= E1000_RCTL_VFE;
2613 rctl &= ~E1000_RCTL_CFIEN;
2614 ew32(RCTL, rctl);
2615 }
2616}
bc7f75fa 2617
86d70e53
JK
2618/**
2619 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
2620 * @adapter: board private structure to initialize
2621 **/
2622static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
bc7f75fa 2623{
bc7f75fa 2624 struct e1000_hw *hw = &adapter->hw;
86d70e53 2625 u32 ctrl;
bc7f75fa 2626
86d70e53
JK
2627 /* disable VLAN tag insert/strip */
2628 ctrl = er32(CTRL);
2629 ctrl &= ~E1000_CTRL_VME;
2630 ew32(CTRL, ctrl);
2631}
bc7f75fa 2632
86d70e53
JK
2633/**
2634 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2635 * @adapter: board private structure to initialize
2636 **/
2637static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
2638{
2639 struct e1000_hw *hw = &adapter->hw;
2640 u32 ctrl;
bc7f75fa 2641
86d70e53
JK
2642 /* enable VLAN tag insert/strip */
2643 ctrl = er32(CTRL);
2644 ctrl |= E1000_CTRL_VME;
2645 ew32(CTRL, ctrl);
2646}
bc7f75fa 2647
86d70e53
JK
2648static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2649{
2650 struct net_device *netdev = adapter->netdev;
2651 u16 vid = adapter->hw.mng_cookie.vlan_id;
2652 u16 old_vid = adapter->mng_vlan_id;
2653
2654 if (adapter->hw.mng_cookie.status &
2655 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2656 e1000_vlan_rx_add_vid(netdev, vid);
2657 adapter->mng_vlan_id = vid;
bc7f75fa
AK
2658 }
2659
86d70e53
JK
2660 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
2661 e1000_vlan_rx_kill_vid(netdev, old_vid);
bc7f75fa
AK
2662}
2663
2664static void e1000_restore_vlan(struct e1000_adapter *adapter)
2665{
2666 u16 vid;
2667
86d70e53 2668 e1000_vlan_rx_add_vid(adapter->netdev, 0);
bc7f75fa 2669
86d70e53 2670 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
bc7f75fa 2671 e1000_vlan_rx_add_vid(adapter->netdev, vid);
bc7f75fa
AK
2672}
2673
cd791618 2674static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
bc7f75fa
AK
2675{
2676 struct e1000_hw *hw = &adapter->hw;
cd791618 2677 u32 manc, manc2h, mdef, i, j;
bc7f75fa
AK
2678
2679 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2680 return;
2681
2682 manc = er32(MANC);
2683
ad68076e
BA
2684 /*
2685 * enable receiving management packets to the host. this will probably
bc7f75fa 2686 * generate destination unreachable messages from the host OS, but
ad68076e
BA
2687 * the packets will be handled on SMBUS
2688 */
bc7f75fa
AK
2689 manc |= E1000_MANC_EN_MNG2HOST;
2690 manc2h = er32(MANC2H);
cd791618
BA
2691
2692 switch (hw->mac.type) {
2693 default:
2694 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2695 break;
2696 case e1000_82574:
2697 case e1000_82583:
2698 /*
2699 * Check if IPMI pass-through decision filter already exists;
2700 * if so, enable it.
2701 */
2702 for (i = 0, j = 0; i < 8; i++) {
2703 mdef = er32(MDEF(i));
2704
2705 /* Ignore filters with anything other than IPMI ports */
3b21b508 2706 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
cd791618
BA
2707 continue;
2708
2709 /* Enable this decision filter in MANC2H */
2710 if (mdef)
2711 manc2h |= (1 << i);
2712
2713 j |= mdef;
2714 }
2715
2716 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2717 break;
2718
2719 /* Create new decision filter in an empty filter */
2720 for (i = 0, j = 0; i < 8; i++)
2721 if (er32(MDEF(i)) == 0) {
2722 ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2723 E1000_MDEF_PORT_664));
2724 manc2h |= (1 << 1);
2725 j++;
2726 break;
2727 }
2728
2729 if (!j)
2730 e_warn("Unable to create IPMI pass-through filter\n");
2731 break;
2732 }
2733
bc7f75fa
AK
2734 ew32(MANC2H, manc2h);
2735 ew32(MANC, manc);
2736}
2737
2738/**
af667a29 2739 * e1000_configure_tx - Configure Transmit Unit after Reset
bc7f75fa
AK
2740 * @adapter: board private structure
2741 *
2742 * Configure the Tx unit of the MAC after a reset.
2743 **/
2744static void e1000_configure_tx(struct e1000_adapter *adapter)
2745{
2746 struct e1000_hw *hw = &adapter->hw;
2747 struct e1000_ring *tx_ring = adapter->tx_ring;
2748 u64 tdba;
2749 u32 tdlen, tctl, tipg, tarc;
2750 u32 ipgr1, ipgr2;
2751
2752 /* Setup the HW Tx Head and Tail descriptor pointers */
2753 tdba = tx_ring->dma;
2754 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
284901a9 2755 ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
bc7f75fa
AK
2756 ew32(TDBAH, (tdba >> 32));
2757 ew32(TDLEN, tdlen);
2758 ew32(TDH, 0);
2759 ew32(TDT, 0);
2760 tx_ring->head = E1000_TDH;
2761 tx_ring->tail = E1000_TDT;
2762
2763 /* Set the default values for the Tx Inter Packet Gap timer */
2764 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */
2765 ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */
2766 ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */
2767
2768 if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
2769 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */
2770
2771 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
2772 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
2773 ew32(TIPG, tipg);
2774
2775 /* Set the Tx Interrupt Delay register */
2776 ew32(TIDV, adapter->tx_int_delay);
ad68076e 2777 /* Tx irq moderation */
bc7f75fa
AK
2778 ew32(TADV, adapter->tx_abs_int_delay);
2779
3a3b7586
JB
2780 if (adapter->flags2 & FLAG2_DMA_BURST) {
2781 u32 txdctl = er32(TXDCTL(0));
2782 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2783 E1000_TXDCTL_WTHRESH);
2784 /*
2785 * set up some performance related parameters to encourage the
2786 * hardware to use the bus more efficiently in bursts, depends
2787 * on the tx_int_delay to be enabled,
2788 * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
2789 * hthresh = 1 ==> prefetch when one or more available
2790 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2791 * BEWARE: this seems to work but should be considered first if
af667a29 2792 * there are Tx hangs or other Tx related bugs
3a3b7586
JB
2793 */
2794 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2795 ew32(TXDCTL(0), txdctl);
2796 /* erratum work around: set txdctl the same for both queues */
2797 ew32(TXDCTL(1), txdctl);
2798 }
2799
bc7f75fa
AK
2800 /* Program the Transmit Control Register */
2801 tctl = er32(TCTL);
2802 tctl &= ~E1000_TCTL_CT;
2803 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2804 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2805
2806 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
e9ec2c0f 2807 tarc = er32(TARC(0));
ad68076e
BA
2808 /*
2809 * set the speed mode bit, we'll clear it if we're not at
2810 * gigabit link later
2811 */
bc7f75fa
AK
2812#define SPEED_MODE_BIT (1 << 21)
2813 tarc |= SPEED_MODE_BIT;
e9ec2c0f 2814 ew32(TARC(0), tarc);
bc7f75fa
AK
2815 }
2816
2817 /* errata: program both queues to unweighted RR */
2818 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
e9ec2c0f 2819 tarc = er32(TARC(0));
bc7f75fa 2820 tarc |= 1;
e9ec2c0f
JK
2821 ew32(TARC(0), tarc);
2822 tarc = er32(TARC(1));
bc7f75fa 2823 tarc |= 1;
e9ec2c0f 2824 ew32(TARC(1), tarc);
bc7f75fa
AK
2825 }
2826
bc7f75fa
AK
2827 /* Setup Transmit Descriptor Settings for eop descriptor */
2828 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2829
2830 /* only set IDE if we are delaying interrupts using the timers */
2831 if (adapter->tx_int_delay)
2832 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2833
2834 /* enable Report Status bit */
2835 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2836
2837 ew32(TCTL, tctl);
2838
edfea6e6 2839 e1000e_config_collision_dist(hw);
bc7f75fa
AK
2840}
2841
2842/**
2843 * e1000_setup_rctl - configure the receive control registers
2844 * @adapter: Board private structure
2845 **/
2846#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2847 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2848static void e1000_setup_rctl(struct e1000_adapter *adapter)
2849{
2850 struct e1000_hw *hw = &adapter->hw;
2851 u32 rctl, rfctl;
bc7f75fa
AK
2852 u32 pages = 0;
2853
a1ce6473
BA
2854 /* Workaround Si errata on 82579 - configure jumbo frame flow */
2855 if (hw->mac.type == e1000_pch2lan) {
2856 s32 ret_val;
2857
2858 if (adapter->netdev->mtu > ETH_DATA_LEN)
2859 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2860 else
2861 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
dd93f95e
BA
2862
2863 if (ret_val)
2864 e_dbg("failed to enable jumbo frame workaround mode\n");
a1ce6473
BA
2865 }
2866
bc7f75fa
AK
2867 /* Program MC offset vector base */
2868 rctl = er32(RCTL);
2869 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2870 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
2871 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
2872 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2873
2874 /* Do not Store bad packets */
2875 rctl &= ~E1000_RCTL_SBP;
2876
2877 /* Enable Long Packet receive */
2878 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2879 rctl &= ~E1000_RCTL_LPE;
2880 else
2881 rctl |= E1000_RCTL_LPE;
2882
eb7c3adb
JK
2883 /* Some systems expect that the CRC is included in SMBUS traffic. The
2884 * hardware strips the CRC before sending to both SMBUS (BMC) and to
2885 * host memory when this is enabled
2886 */
2887 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
2888 rctl |= E1000_RCTL_SECRC;
5918bd88 2889
a4f58f54
BA
2890 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
2891 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
2892 u16 phy_data;
2893
2894 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
2895 phy_data &= 0xfff8;
2896 phy_data |= (1 << 2);
2897 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
2898
2899 e1e_rphy(hw, 22, &phy_data);
2900 phy_data &= 0x0fff;
2901 phy_data |= (1 << 14);
2902 e1e_wphy(hw, 0x10, 0x2823);
2903 e1e_wphy(hw, 0x11, 0x0003);
2904 e1e_wphy(hw, 22, phy_data);
2905 }
2906
bc7f75fa
AK
2907 /* Setup buffer sizes */
2908 rctl &= ~E1000_RCTL_SZ_4096;
2909 rctl |= E1000_RCTL_BSEX;
2910 switch (adapter->rx_buffer_len) {
bc7f75fa
AK
2911 case 2048:
2912 default:
2913 rctl |= E1000_RCTL_SZ_2048;
2914 rctl &= ~E1000_RCTL_BSEX;
2915 break;
2916 case 4096:
2917 rctl |= E1000_RCTL_SZ_4096;
2918 break;
2919 case 8192:
2920 rctl |= E1000_RCTL_SZ_8192;
2921 break;
2922 case 16384:
2923 rctl |= E1000_RCTL_SZ_16384;
2924 break;
2925 }
2926
5f450212
BA
2927 /* Enable Extended Status in all Receive Descriptors */
2928 rfctl = er32(RFCTL);
2929 rfctl |= E1000_RFCTL_EXTEN;
2930
bc7f75fa
AK
2931 /*
2932 * 82571 and greater support packet-split where the protocol
2933 * header is placed in skb->data and the packet data is
2934 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2935 * In the case of a non-split, skb->data is linearly filled,
2936 * followed by the page buffers. Therefore, skb->data is
2937 * sized to hold the largest protocol header.
2938 *
2939 * allocations using alloc_page take too long for regular MTU
2940 * so only enable packet split for jumbo frames
2941 *
2942 * Using pages when the page size is greater than 16k wastes
2943 * a lot of memory, since we allocate 3 pages at all times
2944 * per packet.
2945 */
bc7f75fa 2946 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
dbcb9fec 2947 if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) &&
97ac8cae 2948 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
bc7f75fa 2949 adapter->rx_ps_pages = pages;
97ac8cae
BA
2950 else
2951 adapter->rx_ps_pages = 0;
bc7f75fa
AK
2952
2953 if (adapter->rx_ps_pages) {
90da0669
BA
2954 u32 psrctl = 0;
2955
ad68076e
BA
2956 /*
2957 * disable packet split support for IPv6 extension headers,
2958 * because some malformed IPv6 headers can hang the Rx
2959 */
bc7f75fa
AK
2960 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2961 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2962
140a7480
AK
2963 /* Enable Packet split descriptors */
2964 rctl |= E1000_RCTL_DTYP_PS;
bc7f75fa
AK
2965
2966 psrctl |= adapter->rx_ps_bsize0 >>
2967 E1000_PSRCTL_BSIZE0_SHIFT;
2968
2969 switch (adapter->rx_ps_pages) {
2970 case 3:
2971 psrctl |= PAGE_SIZE <<
2972 E1000_PSRCTL_BSIZE3_SHIFT;
2973 case 2:
2974 psrctl |= PAGE_SIZE <<
2975 E1000_PSRCTL_BSIZE2_SHIFT;
2976 case 1:
2977 psrctl |= PAGE_SIZE >>
2978 E1000_PSRCTL_BSIZE1_SHIFT;
2979 break;
2980 }
2981
2982 ew32(PSRCTL, psrctl);
2983 }
2984
5f450212 2985 ew32(RFCTL, rfctl);
bc7f75fa 2986 ew32(RCTL, rctl);
318a94d6
JK
2987 /* just started the receive unit, no need to restart */
2988 adapter->flags &= ~FLAG_RX_RESTART_NOW;
bc7f75fa
AK
2989}
2990
2991/**
2992 * e1000_configure_rx - Configure Receive Unit after Reset
2993 * @adapter: board private structure
2994 *
2995 * Configure the Rx unit of the MAC after a reset.
2996 **/
2997static void e1000_configure_rx(struct e1000_adapter *adapter)
2998{
2999 struct e1000_hw *hw = &adapter->hw;
3000 struct e1000_ring *rx_ring = adapter->rx_ring;
3001 u64 rdba;
3002 u32 rdlen, rctl, rxcsum, ctrl_ext;
3003
3004 if (adapter->rx_ps_pages) {
3005 /* this is a 32 byte descriptor */
3006 rdlen = rx_ring->count *
af667a29 3007 sizeof(union e1000_rx_desc_packet_split);
bc7f75fa
AK
3008 adapter->clean_rx = e1000_clean_rx_irq_ps;
3009 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
97ac8cae 3010 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
5f450212 3011 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
97ac8cae
BA
3012 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
3013 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
bc7f75fa 3014 } else {
5f450212 3015 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
bc7f75fa
AK
3016 adapter->clean_rx = e1000_clean_rx_irq;
3017 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
3018 }
3019
3020 /* disable receives while setting up the descriptors */
3021 rctl = er32(RCTL);
7f99ae63
BA
3022 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3023 ew32(RCTL, rctl & ~E1000_RCTL_EN);
bc7f75fa 3024 e1e_flush();
1bba4386 3025 usleep_range(10000, 20000);
bc7f75fa 3026
3a3b7586
JB
3027 if (adapter->flags2 & FLAG2_DMA_BURST) {
3028 /*
3029 * set the writeback threshold (only takes effect if the RDTR
3030 * is set). set GRAN=1 and write back up to 0x4 worth, and
af667a29 3031 * enable prefetching of 0x20 Rx descriptors
3a3b7586
JB
3032 * granularity = 01
3033 * wthresh = 04,
3034 * hthresh = 04,
3035 * pthresh = 0x20
3036 */
3037 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
3038 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
3039
3040 /*
3041 * override the delay timers for enabling bursting, only if
3042 * the value was not set by the user via module options
3043 */
3044 if (adapter->rx_int_delay == DEFAULT_RDTR)
3045 adapter->rx_int_delay = BURST_RDTR;
3046 if (adapter->rx_abs_int_delay == DEFAULT_RADV)
3047 adapter->rx_abs_int_delay = BURST_RADV;
3048 }
3049
bc7f75fa
AK
3050 /* set the Receive Delay Timer Register */
3051 ew32(RDTR, adapter->rx_int_delay);
3052
3053 /* irq moderation */
3054 ew32(RADV, adapter->rx_abs_int_delay);
828bac87 3055 if ((adapter->itr_setting != 0) && (adapter->itr != 0))
ad68076e 3056 ew32(ITR, 1000000000 / (adapter->itr * 256));
bc7f75fa
AK
3057
3058 ctrl_ext = er32(CTRL_EXT);
bc7f75fa
AK
3059 /* Auto-Mask interrupts upon ICR access */
3060 ctrl_ext |= E1000_CTRL_EXT_IAME;
3061 ew32(IAM, 0xffffffff);
3062 ew32(CTRL_EXT, ctrl_ext);
3063 e1e_flush();
3064
ad68076e
BA
3065 /*
3066 * Setup the HW Rx Head and Tail Descriptor Pointers and
3067 * the Base and Length of the Rx Descriptor Ring
3068 */
bc7f75fa 3069 rdba = rx_ring->dma;
284901a9 3070 ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
bc7f75fa
AK
3071 ew32(RDBAH, (rdba >> 32));
3072 ew32(RDLEN, rdlen);
3073 ew32(RDH, 0);
3074 ew32(RDT, 0);
3075 rx_ring->head = E1000_RDH;
3076 rx_ring->tail = E1000_RDT;
3077
3078 /* Enable Receive Checksum Offload for TCP and UDP */
3079 rxcsum = er32(RXCSUM);
dc221294 3080 if (adapter->netdev->features & NETIF_F_RXCSUM) {
bc7f75fa
AK
3081 rxcsum |= E1000_RXCSUM_TUOFL;
3082
ad68076e
BA
3083 /*
3084 * IPv4 payload checksum for UDP fragments must be
3085 * used in conjunction with packet-split.
3086 */
bc7f75fa
AK
3087 if (adapter->rx_ps_pages)
3088 rxcsum |= E1000_RXCSUM_IPPCSE;
3089 } else {
3090 rxcsum &= ~E1000_RXCSUM_TUOFL;
3091 /* no need to clear IPPCSE as it defaults to 0 */
3092 }
3093 ew32(RXCSUM, rxcsum);
3094
ad68076e
BA
3095 /*
3096 * Enable early receives on supported devices, only takes effect when
bc7f75fa 3097 * packet size is equal or larger than the specified value (in 8 byte
ad68076e
BA
3098 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
3099 */
828bac87
BA
3100 if ((adapter->flags & FLAG_HAS_ERT) ||
3101 (adapter->hw.mac.type == e1000_pch2lan)) {
53ec5498
BA
3102 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3103 u32 rxdctl = er32(RXDCTL(0));
3104 ew32(RXDCTL(0), rxdctl | 0x3);
828bac87
BA
3105 if (adapter->flags & FLAG_HAS_ERT)
3106 ew32(ERT, E1000_ERT_2048 | (1 << 13));
53ec5498
BA
3107 /*
3108 * With jumbo frames and early-receive enabled,
3109 * excessive C-state transition latencies result in
3110 * dropped transactions.
3111 */
af667a29 3112 pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
53ec5498 3113 } else {
af667a29
BA
3114 pm_qos_update_request(&adapter->netdev->pm_qos_req,
3115 PM_QOS_DEFAULT_VALUE);
53ec5498 3116 }
97ac8cae 3117 }
bc7f75fa
AK
3118
3119 /* Enable Receives */
3120 ew32(RCTL, rctl);
3121}
3122
3123/**
ef9b965a
JB
3124 * e1000e_write_mc_addr_list - write multicast addresses to MTA
3125 * @netdev: network interface device structure
bc7f75fa 3126 *
ef9b965a
JB
3127 * Writes multicast address list to the MTA hash table.
3128 * Returns: -ENOMEM on failure
3129 * 0 on no addresses written
3130 * X on writing X addresses to MTA
3131 */
3132static int e1000e_write_mc_addr_list(struct net_device *netdev)
3133{
3134 struct e1000_adapter *adapter = netdev_priv(netdev);
3135 struct e1000_hw *hw = &adapter->hw;
3136 struct netdev_hw_addr *ha;
3137 u8 *mta_list;
3138 int i;
3139
3140 if (netdev_mc_empty(netdev)) {
3141 /* nothing to program, so clear mc list */
3142 hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
3143 return 0;
3144 }
3145
3146 mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
3147 if (!mta_list)
3148 return -ENOMEM;
3149
3150 /* update_mc_addr_list expects a packed array of only addresses. */
3151 i = 0;
3152 netdev_for_each_mc_addr(ha, netdev)
3153 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3154
3155 hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
3156 kfree(mta_list);
3157
3158 return netdev_mc_count(netdev);
3159}
3160
3161/**
3162 * e1000e_write_uc_addr_list - write unicast addresses to RAR table
3163 * @netdev: network interface device structure
bc7f75fa 3164 *
ef9b965a
JB
3165 * Writes unicast address list to the RAR table.
3166 * Returns: -ENOMEM on failure/insufficient address space
3167 * 0 on no addresses written
3168 * X on writing X addresses to the RAR table
bc7f75fa 3169 **/
ef9b965a 3170static int e1000e_write_uc_addr_list(struct net_device *netdev)
bc7f75fa 3171{
ef9b965a
JB
3172 struct e1000_adapter *adapter = netdev_priv(netdev);
3173 struct e1000_hw *hw = &adapter->hw;
3174 unsigned int rar_entries = hw->mac.rar_entry_count;
3175 int count = 0;
3176
3177 /* save a rar entry for our hardware address */
3178 rar_entries--;
3179
3180 /* save a rar entry for the LAA workaround */
3181 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
3182 rar_entries--;
3183
3184 /* return ENOMEM indicating insufficient memory for addresses */
3185 if (netdev_uc_count(netdev) > rar_entries)
3186 return -ENOMEM;
3187
3188 if (!netdev_uc_empty(netdev) && rar_entries) {
3189 struct netdev_hw_addr *ha;
3190
3191 /*
3192 * write the addresses in reverse order to avoid write
3193 * combining
3194 */
3195 netdev_for_each_uc_addr(ha, netdev) {
3196 if (!rar_entries)
3197 break;
3198 e1000e_rar_set(hw, ha->addr, rar_entries--);
3199 count++;
3200 }
3201 }
3202
3203 /* zero out the remaining RAR entries not used above */
3204 for (; rar_entries > 0; rar_entries--) {
3205 ew32(RAH(rar_entries), 0);
3206 ew32(RAL(rar_entries), 0);
3207 }
3208 e1e_flush();
3209
3210 return count;
bc7f75fa
AK
3211}
3212
3213/**
ef9b965a 3214 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
bc7f75fa
AK
3215 * @netdev: network interface device structure
3216 *
ef9b965a
JB
3217 * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
3218 * address list or the network interface flags are updated. This routine is
3219 * responsible for configuring the hardware for proper unicast, multicast,
bc7f75fa
AK
3220 * promiscuous mode, and all-multi behavior.
3221 **/
ef9b965a 3222static void e1000e_set_rx_mode(struct net_device *netdev)
bc7f75fa
AK
3223{
3224 struct e1000_adapter *adapter = netdev_priv(netdev);
3225 struct e1000_hw *hw = &adapter->hw;
bc7f75fa 3226 u32 rctl;
bc7f75fa
AK
3227
3228 /* Check for Promiscuous and All Multicast modes */
bc7f75fa
AK
3229 rctl = er32(RCTL);
3230
ef9b965a
JB
3231 /* clear the affected bits */
3232 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
3233
bc7f75fa
AK
3234 if (netdev->flags & IFF_PROMISC) {
3235 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
86d70e53
JK
3236 /* Do not hardware filter VLANs in promisc mode */
3237 e1000e_vlan_filter_disable(adapter);
bc7f75fa 3238 } else {
ef9b965a 3239 int count;
746b9f02
PM
3240 if (netdev->flags & IFF_ALLMULTI) {
3241 rctl |= E1000_RCTL_MPE;
746b9f02 3242 } else {
ef9b965a
JB
3243 /*
3244 * Write addresses to the MTA, if the attempt fails
3245 * then we should just turn on promiscuous mode so
3246 * that we can at least receive multicast traffic
3247 */
3248 count = e1000e_write_mc_addr_list(netdev);
3249 if (count < 0)
3250 rctl |= E1000_RCTL_MPE;
746b9f02 3251 }
86d70e53 3252 e1000e_vlan_filter_enable(adapter);
bc7f75fa 3253 /*
ef9b965a
JB
3254 * Write addresses to available RAR registers, if there is not
3255 * sufficient space to store all the addresses then enable
3256 * unicast promiscuous mode
bc7f75fa 3257 */
ef9b965a
JB
3258 count = e1000e_write_uc_addr_list(netdev);
3259 if (count < 0)
3260 rctl |= E1000_RCTL_UPE;
bc7f75fa 3261 }
86d70e53 3262
ef9b965a
JB
3263 ew32(RCTL, rctl);
3264
86d70e53
JK
3265 if (netdev->features & NETIF_F_HW_VLAN_RX)
3266 e1000e_vlan_strip_enable(adapter);
3267 else
3268 e1000e_vlan_strip_disable(adapter);
bc7f75fa
AK
3269}
3270
3271/**
ad68076e 3272 * e1000_configure - configure the hardware for Rx and Tx
bc7f75fa
AK
3273 * @adapter: private board structure
3274 **/
3275static void e1000_configure(struct e1000_adapter *adapter)
3276{
ef9b965a 3277 e1000e_set_rx_mode(adapter->netdev);
bc7f75fa
AK
3278
3279 e1000_restore_vlan(adapter);
cd791618 3280 e1000_init_manageability_pt(adapter);
bc7f75fa
AK
3281
3282 e1000_configure_tx(adapter);
3283 e1000_setup_rctl(adapter);
3284 e1000_configure_rx(adapter);
c2fed996
JK
3285 adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring),
3286 GFP_KERNEL);
bc7f75fa
AK
3287}
3288
3289/**
3290 * e1000e_power_up_phy - restore link in case the phy was powered down
3291 * @adapter: address of board private structure
3292 *
3293 * The phy may be powered down to save power and turn off link when the
3294 * driver is unloaded and wake on lan is not enabled (among others)
3295 * *** this routine MUST be followed by a call to e1000e_reset ***
3296 **/
3297void e1000e_power_up_phy(struct e1000_adapter *adapter)
3298{
17f208de
BA
3299 if (adapter->hw.phy.ops.power_up)
3300 adapter->hw.phy.ops.power_up(&adapter->hw);
bc7f75fa
AK
3301
3302 adapter->hw.mac.ops.setup_link(&adapter->hw);
3303}
3304
3305/**
3306 * e1000_power_down_phy - Power down the PHY
3307 *
17f208de
BA
3308 * Power down the PHY so no link is implied when interface is down.
3309 * The PHY cannot be powered down if management or WoL is active.
bc7f75fa
AK
3310 */
3311static void e1000_power_down_phy(struct e1000_adapter *adapter)
3312{
bc7f75fa 3313 /* WoL is enabled */
23b66e2b 3314 if (adapter->wol)
bc7f75fa
AK
3315 return;
3316
17f208de
BA
3317 if (adapter->hw.phy.ops.power_down)
3318 adapter->hw.phy.ops.power_down(&adapter->hw);
bc7f75fa
AK
3319}
3320
3321/**
3322 * e1000e_reset - bring the hardware into a known good state
3323 *
3324 * This function boots the hardware and enables some settings that
3325 * require a configuration cycle of the hardware - those cannot be
3326 * set/changed during runtime. After reset the device needs to be
ad68076e 3327 * properly configured for Rx, Tx etc.
bc7f75fa
AK
3328 */
3329void e1000e_reset(struct e1000_adapter *adapter)
3330{
3331 struct e1000_mac_info *mac = &adapter->hw.mac;
318a94d6 3332 struct e1000_fc_info *fc = &adapter->hw.fc;
bc7f75fa
AK
3333 struct e1000_hw *hw = &adapter->hw;
3334 u32 tx_space, min_tx_space, min_rx_space;
318a94d6 3335 u32 pba = adapter->pba;
bc7f75fa
AK
3336 u16 hwm;
3337
ad68076e 3338 /* reset Packet Buffer Allocation to default */
318a94d6 3339 ew32(PBA, pba);
df762464 3340
318a94d6 3341 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
ad68076e
BA
3342 /*
3343 * To maintain wire speed transmits, the Tx FIFO should be
bc7f75fa
AK
3344 * large enough to accommodate two full transmit packets,
3345 * rounded up to the next 1KB and expressed in KB. Likewise,
3346 * the Rx FIFO should be large enough to accommodate at least
3347 * one full receive packet and is similarly rounded up and
ad68076e
BA
3348 * expressed in KB.
3349 */
df762464 3350 pba = er32(PBA);
bc7f75fa 3351 /* upper 16 bits has Tx packet buffer allocation size in KB */
df762464 3352 tx_space = pba >> 16;
bc7f75fa 3353 /* lower 16 bits has Rx packet buffer allocation size in KB */
df762464 3354 pba &= 0xffff;
ad68076e 3355 /*
af667a29 3356 * the Tx fifo also stores 16 bytes of information about the Tx
ad68076e 3357 * but don't include ethernet FCS because hardware appends it
318a94d6
JK
3358 */
3359 min_tx_space = (adapter->max_frame_size +
bc7f75fa
AK
3360 sizeof(struct e1000_tx_desc) -
3361 ETH_FCS_LEN) * 2;
3362 min_tx_space = ALIGN(min_tx_space, 1024);
3363 min_tx_space >>= 10;
3364 /* software strips receive CRC, so leave room for it */
318a94d6 3365 min_rx_space = adapter->max_frame_size;
bc7f75fa
AK
3366 min_rx_space = ALIGN(min_rx_space, 1024);
3367 min_rx_space >>= 10;
3368
ad68076e
BA
3369 /*
3370 * If current Tx allocation is less than the min Tx FIFO size,
bc7f75fa 3371 * and the min Tx FIFO size is less than the current Rx FIFO
ad68076e
BA
3372 * allocation, take space away from current Rx allocation
3373 */
df762464
AK
3374 if ((tx_space < min_tx_space) &&
3375 ((min_tx_space - tx_space) < pba)) {
3376 pba -= min_tx_space - tx_space;
bc7f75fa 3377
ad68076e 3378 /*
af667a29 3379 * if short on Rx space, Rx wins and must trump Tx
ad68076e
BA
3380 * adjustment or use Early Receive if available
3381 */
df762464 3382 if ((pba < min_rx_space) &&
bc7f75fa
AK
3383 (!(adapter->flags & FLAG_HAS_ERT)))
3384 /* ERT enabled in e1000_configure_rx */
df762464 3385 pba = min_rx_space;
bc7f75fa 3386 }
df762464
AK
3387
3388 ew32(PBA, pba);
bc7f75fa
AK
3389 }
3390
ad68076e
BA
3391 /*
3392 * flow control settings
3393 *
38eb394e 3394 * The high water mark must be low enough to fit one full frame
bc7f75fa
AK
3395 * (or the size used for early receive) above it in the Rx FIFO.
3396 * Set it to the lower of:
3397 * - 90% of the Rx FIFO size, and
3398 * - the full Rx FIFO size minus the early receive size (for parts
3399 * with ERT support assuming ERT set to E1000_ERT_2048), or
38eb394e 3400 * - the full Rx FIFO size minus one full frame
ad68076e 3401 */
d3738bb8
BA
3402 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3403 fc->pause_time = 0xFFFF;
3404 else
3405 fc->pause_time = E1000_FC_PAUSE_TIME;
3406 fc->send_xon = 1;
3407 fc->current_mode = fc->requested_mode;
3408
3409 switch (hw->mac.type) {
3410 default:
3411 if ((adapter->flags & FLAG_HAS_ERT) &&
3412 (adapter->netdev->mtu > ETH_DATA_LEN))
3413 hwm = min(((pba << 10) * 9 / 10),
3414 ((pba << 10) - (E1000_ERT_2048 << 3)));
3415 else
3416 hwm = min(((pba << 10) * 9 / 10),
3417 ((pba << 10) - adapter->max_frame_size));
3418
3419 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3420 fc->low_water = fc->high_water - 8;
3421 break;
3422 case e1000_pchlan:
38eb394e
BA
3423 /*
3424 * Workaround PCH LOM adapter hangs with certain network
3425 * loads. If hangs persist, try disabling Tx flow control.
3426 */
3427 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3428 fc->high_water = 0x3500;
3429 fc->low_water = 0x1500;
3430 } else {
3431 fc->high_water = 0x5000;
3432 fc->low_water = 0x3000;
3433 }
a305595b 3434 fc->refresh_time = 0x1000;
d3738bb8
BA
3435 break;
3436 case e1000_pch2lan:
3437 fc->high_water = 0x05C20;
3438 fc->low_water = 0x05048;
3439 fc->pause_time = 0x0650;
3440 fc->refresh_time = 0x0400;
828bac87
BA
3441 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3442 pba = 14;
3443 ew32(PBA, pba);
3444 }
d3738bb8 3445 break;
38eb394e 3446 }
bc7f75fa 3447
828bac87
BA
3448 /*
3449 * Disable Adaptive Interrupt Moderation if 2 full packets cannot
3450 * fit in receive buffer and early-receive not supported.
3451 */
3452 if (adapter->itr_setting & 0x3) {
3453 if (((adapter->max_frame_size * 2) > (pba << 10)) &&
3454 !(adapter->flags & FLAG_HAS_ERT)) {
3455 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3456 dev_info(&adapter->pdev->dev,
3457 "Interrupt Throttle Rate turned off\n");
3458 adapter->flags2 |= FLAG2_DISABLE_AIM;
3459 ew32(ITR, 0);
3460 }
3461 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3462 dev_info(&adapter->pdev->dev,
3463 "Interrupt Throttle Rate turned on\n");
3464 adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3465 adapter->itr = 20000;
3466 ew32(ITR, 1000000000 / (adapter->itr * 256));
3467 }
3468 }
3469
bc7f75fa
AK
3470 /* Allow time for pending master requests to run */
3471 mac->ops.reset_hw(hw);
97ac8cae
BA
3472
3473 /*
3474 * For parts with AMT enabled, let the firmware know
3475 * that the network interface is in control
3476 */
c43bc57e 3477 if (adapter->flags & FLAG_HAS_AMT)
31dbe5b4 3478 e1000e_get_hw_control(adapter);
97ac8cae 3479
bc7f75fa
AK
3480 ew32(WUC, 0);
3481
3482 if (mac->ops.init_hw(hw))
44defeb3 3483 e_err("Hardware Error\n");
bc7f75fa
AK
3484
3485 e1000_update_mng_vlan(adapter);
3486
3487 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
3488 ew32(VET, ETH_P_8021Q);
3489
3490 e1000e_reset_adaptive(hw);
31dbe5b4
BA
3491
3492 if (!netif_running(adapter->netdev) &&
3493 !test_bit(__E1000_TESTING, &adapter->state)) {
3494 e1000_power_down_phy(adapter);
3495 return;
3496 }
3497
bc7f75fa
AK
3498 e1000_get_phy_info(hw);
3499
918d7197
BA
3500 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3501 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
bc7f75fa 3502 u16 phy_data = 0;
ad68076e
BA
3503 /*
3504 * speed up time to link by disabling smart power down, ignore
bc7f75fa 3505 * the return value of this function because there is nothing
ad68076e
BA
3506 * different we would do if it failed
3507 */
bc7f75fa
AK
3508 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
3509 phy_data &= ~IGP02E1000_PM_SPD;
3510 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
3511 }
bc7f75fa
AK
3512}
3513
3514int e1000e_up(struct e1000_adapter *adapter)
3515{
3516 struct e1000_hw *hw = &adapter->hw;
3517
3518 /* hardware has been reset, we need to reload some things */
3519 e1000_configure(adapter);
3520
3521 clear_bit(__E1000_DOWN, &adapter->state);
3522
4662e82b
BA
3523 if (adapter->msix_entries)
3524 e1000_configure_msix(adapter);
bc7f75fa
AK
3525 e1000_irq_enable(adapter);
3526
400484fa 3527 netif_start_queue(adapter->netdev);
4cb9be7a 3528
bc7f75fa 3529 /* fire a link change interrupt to start the watchdog */
52a9b231
BA
3530 if (adapter->msix_entries)
3531 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3532 else
3533 ew32(ICS, E1000_ICS_LSC);
3534
bc7f75fa
AK
3535 return 0;
3536}
3537
713b3c9e
JB
3538static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3539{
3540 struct e1000_hw *hw = &adapter->hw;
3541
3542 if (!(adapter->flags2 & FLAG2_DMA_BURST))
3543 return;
3544
3545 /* flush pending descriptor writebacks to memory */
3546 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3547 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3548
3549 /* execute the writes immediately */
3550 e1e_flush();
3551}
3552
67fd4fcb
JK
3553static void e1000e_update_stats(struct e1000_adapter *adapter);
3554
bc7f75fa
AK
3555void e1000e_down(struct e1000_adapter *adapter)
3556{
3557 struct net_device *netdev = adapter->netdev;
3558 struct e1000_hw *hw = &adapter->hw;
3559 u32 tctl, rctl;
3560
ad68076e
BA
3561 /*
3562 * signal that we're down so the interrupt handler does not
3563 * reschedule our watchdog timer
3564 */
bc7f75fa
AK
3565 set_bit(__E1000_DOWN, &adapter->state);
3566
3567 /* disable receives in the hardware */
3568 rctl = er32(RCTL);
7f99ae63
BA
3569 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3570 ew32(RCTL, rctl & ~E1000_RCTL_EN);
bc7f75fa
AK
3571 /* flush and sleep below */
3572
4cb9be7a 3573 netif_stop_queue(netdev);
bc7f75fa
AK
3574
3575 /* disable transmits in the hardware */
3576 tctl = er32(TCTL);
3577 tctl &= ~E1000_TCTL_EN;
3578 ew32(TCTL, tctl);
7f99ae63 3579
bc7f75fa
AK
3580 /* flush both disables and wait for them to finish */
3581 e1e_flush();
1bba4386 3582 usleep_range(10000, 20000);
bc7f75fa 3583
bc7f75fa
AK
3584 e1000_irq_disable(adapter);
3585
3586 del_timer_sync(&adapter->watchdog_timer);
3587 del_timer_sync(&adapter->phy_info_timer);
3588
bc7f75fa 3589 netif_carrier_off(netdev);
67fd4fcb
JK
3590
3591 spin_lock(&adapter->stats64_lock);
3592 e1000e_update_stats(adapter);
3593 spin_unlock(&adapter->stats64_lock);
3594
400484fa
BA
3595 e1000e_flush_descriptors(adapter);
3596 e1000_clean_tx_ring(adapter);
3597 e1000_clean_rx_ring(adapter);
3598
bc7f75fa
AK
3599 adapter->link_speed = 0;
3600 adapter->link_duplex = 0;
3601
52cc3086
JK
3602 if (!pci_channel_offline(adapter->pdev))
3603 e1000e_reset(adapter);
713b3c9e 3604
bc7f75fa
AK
3605 /*
3606 * TODO: for power management, we could drop the link and
3607 * pci_disable_device here.
3608 */
3609}
3610
3611void e1000e_reinit_locked(struct e1000_adapter *adapter)
3612{
3613 might_sleep();
3614 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
1bba4386 3615 usleep_range(1000, 2000);
bc7f75fa
AK
3616 e1000e_down(adapter);
3617 e1000e_up(adapter);
3618 clear_bit(__E1000_RESETTING, &adapter->state);
3619}
3620
3621/**
3622 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
3623 * @adapter: board private structure to initialize
3624 *
3625 * e1000_sw_init initializes the Adapter private data structure.
3626 * Fields are initialized based on PCI device information and
3627 * OS network device settings (MTU size).
3628 **/
3629static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
3630{
bc7f75fa
AK
3631 struct net_device *netdev = adapter->netdev;
3632
3633 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
3634 adapter->rx_ps_bsize0 = 128;
318a94d6
JK
3635 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3636 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
bc7f75fa 3637
67fd4fcb
JK
3638 spin_lock_init(&adapter->stats64_lock);
3639
4662e82b 3640 e1000e_set_interrupt_capability(adapter);
bc7f75fa 3641
4662e82b
BA
3642 if (e1000_alloc_queues(adapter))
3643 return -ENOMEM;
bc7f75fa 3644
bc7f75fa 3645 /* Explicitly disable IRQ since the NIC can be in any state. */
bc7f75fa
AK
3646 e1000_irq_disable(adapter);
3647
bc7f75fa
AK
3648 set_bit(__E1000_DOWN, &adapter->state);
3649 return 0;
bc7f75fa
AK
3650}
3651
f8d59f78
BA
3652/**
3653 * e1000_intr_msi_test - Interrupt Handler
3654 * @irq: interrupt number
3655 * @data: pointer to a network interface device structure
3656 **/
3657static irqreturn_t e1000_intr_msi_test(int irq, void *data)
3658{
3659 struct net_device *netdev = data;
3660 struct e1000_adapter *adapter = netdev_priv(netdev);
3661 struct e1000_hw *hw = &adapter->hw;
3662 u32 icr = er32(ICR);
3663
3bb99fe2 3664 e_dbg("icr is %08X\n", icr);
f8d59f78
BA
3665 if (icr & E1000_ICR_RXSEQ) {
3666 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
3667 wmb();
3668 }
3669
3670 return IRQ_HANDLED;
3671}
3672
3673/**
3674 * e1000_test_msi_interrupt - Returns 0 for successful test
3675 * @adapter: board private struct
3676 *
3677 * code flow taken from tg3.c
3678 **/
3679static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3680{
3681 struct net_device *netdev = adapter->netdev;
3682 struct e1000_hw *hw = &adapter->hw;
3683 int err;
3684
3685 /* poll_enable hasn't been called yet, so don't need disable */
3686 /* clear any pending events */
3687 er32(ICR);
3688
3689 /* free the real vector and request a test handler */
3690 e1000_free_irq(adapter);
4662e82b 3691 e1000e_reset_interrupt_capability(adapter);
f8d59f78
BA
3692
3693 /* Assume that the test fails, if it succeeds then the test
3694 * MSI irq handler will unset this flag */
3695 adapter->flags |= FLAG_MSI_TEST_FAILED;
3696
3697 err = pci_enable_msi(adapter->pdev);
3698 if (err)
3699 goto msi_test_failed;
3700
a0607fd3 3701 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
f8d59f78
BA
3702 netdev->name, netdev);
3703 if (err) {
3704 pci_disable_msi(adapter->pdev);
3705 goto msi_test_failed;
3706 }
3707
3708 wmb();
3709
3710 e1000_irq_enable(adapter);
3711
3712 /* fire an unusual interrupt on the test handler */
3713 ew32(ICS, E1000_ICS_RXSEQ);
3714 e1e_flush();
3715 msleep(50);
3716
3717 e1000_irq_disable(adapter);
3718
3719 rmb();
3720
3721 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
4662e82b 3722 adapter->int_mode = E1000E_INT_MODE_LEGACY;
068e8a30
JD
3723 e_info("MSI interrupt test failed, using legacy interrupt.\n");
3724 } else
3725 e_dbg("MSI interrupt test succeeded!\n");
f8d59f78
BA
3726
3727 free_irq(adapter->pdev->irq, netdev);
3728 pci_disable_msi(adapter->pdev);
3729
f8d59f78 3730msi_test_failed:
4662e82b 3731 e1000e_set_interrupt_capability(adapter);
068e8a30 3732 return e1000_request_irq(adapter);
f8d59f78
BA
3733}
3734
3735/**
3736 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
3737 * @adapter: board private struct
3738 *
3739 * code flow taken from tg3.c, called with e1000 interrupts disabled.
3740 **/
3741static int e1000_test_msi(struct e1000_adapter *adapter)
3742{
3743 int err;
3744 u16 pci_cmd;
3745
3746 if (!(adapter->flags & FLAG_MSI_ENABLED))
3747 return 0;
3748
3749 /* disable SERR in case the MSI write causes a master abort */
3750 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
36f2407f
DN
3751 if (pci_cmd & PCI_COMMAND_SERR)
3752 pci_write_config_word(adapter->pdev, PCI_COMMAND,
3753 pci_cmd & ~PCI_COMMAND_SERR);
f8d59f78
BA
3754
3755 err = e1000_test_msi_interrupt(adapter);
3756
36f2407f
DN
3757 /* re-enable SERR */
3758 if (pci_cmd & PCI_COMMAND_SERR) {
3759 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3760 pci_cmd |= PCI_COMMAND_SERR;
3761 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
3762 }
f8d59f78 3763
f8d59f78
BA
3764 return err;
3765}
3766
bc7f75fa
AK
3767/**
3768 * e1000_open - Called when a network interface is made active
3769 * @netdev: network interface device structure
3770 *
3771 * Returns 0 on success, negative value on failure
3772 *
3773 * The open entry point is called when a network interface is made
3774 * active by the system (IFF_UP). At this point all resources needed
3775 * for transmit and receive operations are allocated, the interrupt
3776 * handler is registered with the OS, the watchdog timer is started,
3777 * and the stack is notified that the interface is ready.
3778 **/
3779static int e1000_open(struct net_device *netdev)
3780{
3781 struct e1000_adapter *adapter = netdev_priv(netdev);
3782 struct e1000_hw *hw = &adapter->hw;
23606cf5 3783 struct pci_dev *pdev = adapter->pdev;
bc7f75fa
AK
3784 int err;
3785
3786 /* disallow open during test */
3787 if (test_bit(__E1000_TESTING, &adapter->state))
3788 return -EBUSY;
3789
23606cf5
RW
3790 pm_runtime_get_sync(&pdev->dev);
3791
9c563d20
JB
3792 netif_carrier_off(netdev);
3793
bc7f75fa
AK
3794 /* allocate transmit descriptors */
3795 err = e1000e_setup_tx_resources(adapter);
3796 if (err)
3797 goto err_setup_tx;
3798
3799 /* allocate receive descriptors */
3800 err = e1000e_setup_rx_resources(adapter);
3801 if (err)
3802 goto err_setup_rx;
3803
11b08be8
BA
3804 /*
3805 * If AMT is enabled, let the firmware know that the network
3806 * interface is now open and reset the part to a known state.
3807 */
3808 if (adapter->flags & FLAG_HAS_AMT) {
31dbe5b4 3809 e1000e_get_hw_control(adapter);
11b08be8
BA
3810 e1000e_reset(adapter);
3811 }
3812
bc7f75fa
AK
3813 e1000e_power_up_phy(adapter);
3814
3815 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3816 if ((adapter->hw.mng_cookie.status &
3817 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
3818 e1000_update_mng_vlan(adapter);
3819
c128ec29 3820 /* DMA latency requirement to workaround early-receive/jumbo issue */
828bac87
BA
3821 if ((adapter->flags & FLAG_HAS_ERT) ||
3822 (adapter->hw.mac.type == e1000_pch2lan))
6ba74014
LT
3823 pm_qos_add_request(&adapter->netdev->pm_qos_req,
3824 PM_QOS_CPU_DMA_LATENCY,
3825 PM_QOS_DEFAULT_VALUE);
c128ec29 3826
ad68076e
BA
3827 /*
3828 * before we allocate an interrupt, we must be ready to handle it.
bc7f75fa
AK
3829 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3830 * as soon as we call pci_request_irq, so we have to setup our
ad68076e
BA
3831 * clean_rx handler before we do so.
3832 */
bc7f75fa
AK
3833 e1000_configure(adapter);
3834
3835 err = e1000_request_irq(adapter);
3836 if (err)
3837 goto err_req_irq;
3838
f8d59f78
BA
3839 /*
3840 * Work around PCIe errata with MSI interrupts causing some chipsets to
3841 * ignore e1000e MSI messages, which means we need to test our MSI
3842 * interrupt now
3843 */
4662e82b 3844 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
f8d59f78
BA
3845 err = e1000_test_msi(adapter);
3846 if (err) {
3847 e_err("Interrupt allocation failed\n");
3848 goto err_req_irq;
3849 }
3850 }
3851
bc7f75fa
AK
3852 /* From here on the code is the same as e1000e_up() */
3853 clear_bit(__E1000_DOWN, &adapter->state);
3854
3855 napi_enable(&adapter->napi);
3856
3857 e1000_irq_enable(adapter);
3858
09357b00 3859 adapter->tx_hang_recheck = false;
4cb9be7a 3860 netif_start_queue(netdev);
d55b53ff 3861
23606cf5
RW
3862 adapter->idle_check = true;
3863 pm_runtime_put(&pdev->dev);
3864
bc7f75fa 3865 /* fire a link status change interrupt to start the watchdog */
52a9b231
BA
3866 if (adapter->msix_entries)
3867 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3868 else
3869 ew32(ICS, E1000_ICS_LSC);
bc7f75fa
AK
3870
3871 return 0;
3872
3873err_req_irq:
31dbe5b4 3874 e1000e_release_hw_control(adapter);
bc7f75fa
AK
3875 e1000_power_down_phy(adapter);
3876 e1000e_free_rx_resources(adapter);
3877err_setup_rx:
3878 e1000e_free_tx_resources(adapter);
3879err_setup_tx:
3880 e1000e_reset(adapter);
23606cf5 3881 pm_runtime_put_sync(&pdev->dev);
bc7f75fa
AK
3882
3883 return err;
3884}
3885
3886/**
3887 * e1000_close - Disables a network interface
3888 * @netdev: network interface device structure
3889 *
3890 * Returns 0, this is not allowed to fail
3891 *
3892 * The close entry point is called when an interface is de-activated
3893 * by the OS. The hardware is still under the drivers control, but
3894 * needs to be disabled. A global MAC reset is issued to stop the
3895 * hardware, and all transmit and receive resources are freed.
3896 **/
3897static int e1000_close(struct net_device *netdev)
3898{
3899 struct e1000_adapter *adapter = netdev_priv(netdev);
23606cf5 3900 struct pci_dev *pdev = adapter->pdev;
bc7f75fa
AK
3901
3902 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
23606cf5
RW
3903
3904 pm_runtime_get_sync(&pdev->dev);
3905
5f4a780d
BA
3906 napi_disable(&adapter->napi);
3907
23606cf5
RW
3908 if (!test_bit(__E1000_DOWN, &adapter->state)) {
3909 e1000e_down(adapter);
3910 e1000_free_irq(adapter);
3911 }
bc7f75fa 3912 e1000_power_down_phy(adapter);
bc7f75fa
AK
3913
3914 e1000e_free_tx_resources(adapter);
3915 e1000e_free_rx_resources(adapter);
3916
ad68076e
BA
3917 /*
3918 * kill manageability vlan ID if supported, but not if a vlan with
3919 * the same ID is registered on the host OS (let 8021q kill it)
3920 */
86d70e53
JK
3921 if (adapter->hw.mng_cookie.status &
3922 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
bc7f75fa
AK
3923 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3924
ad68076e
BA
3925 /*
3926 * If AMT is enabled, let the firmware know that the network
3927 * interface is now closed
3928 */
31dbe5b4
BA
3929 if ((adapter->flags & FLAG_HAS_AMT) &&
3930 !test_bit(__E1000_TESTING, &adapter->state))
3931 e1000e_release_hw_control(adapter);
bc7f75fa 3932
828bac87
BA
3933 if ((adapter->flags & FLAG_HAS_ERT) ||
3934 (adapter->hw.mac.type == e1000_pch2lan))
6ba74014 3935 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
c128ec29 3936
23606cf5
RW
3937 pm_runtime_put_sync(&pdev->dev);
3938
bc7f75fa
AK
3939 return 0;
3940}
3941/**
3942 * e1000_set_mac - Change the Ethernet Address of the NIC
3943 * @netdev: network interface device structure
3944 * @p: pointer to an address structure
3945 *
3946 * Returns 0 on success, negative on failure
3947 **/
3948static int e1000_set_mac(struct net_device *netdev, void *p)
3949{
3950 struct e1000_adapter *adapter = netdev_priv(netdev);
3951 struct sockaddr *addr = p;
3952
3953 if (!is_valid_ether_addr(addr->sa_data))
3954 return -EADDRNOTAVAIL;
3955
3956 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3957 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
3958
3959 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
3960
3961 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
3962 /* activate the work around */
3963 e1000e_set_laa_state_82571(&adapter->hw, 1);
3964
ad68076e
BA
3965 /*
3966 * Hold a copy of the LAA in RAR[14] This is done so that
bc7f75fa
AK
3967 * between the time RAR[0] gets clobbered and the time it
3968 * gets fixed (in e1000_watchdog), the actual LAA is in one
3969 * of the RARs and no incoming packets directed to this port
3970 * are dropped. Eventually the LAA will be in RAR[0] and
ad68076e
BA
3971 * RAR[14]
3972 */
bc7f75fa
AK
3973 e1000e_rar_set(&adapter->hw,
3974 adapter->hw.mac.addr,
3975 adapter->hw.mac.rar_entry_count - 1);
3976 }
3977
3978 return 0;
3979}
3980
a8f88ff5
JB
3981/**
3982 * e1000e_update_phy_task - work thread to update phy
3983 * @work: pointer to our work struct
3984 *
3985 * this worker thread exists because we must acquire a
3986 * semaphore to read the phy, which we could msleep while
3987 * waiting for it, and we can't msleep in a timer.
3988 **/
3989static void e1000e_update_phy_task(struct work_struct *work)
3990{
3991 struct e1000_adapter *adapter = container_of(work,
3992 struct e1000_adapter, update_phy_task);
615b32af
JB
3993
3994 if (test_bit(__E1000_DOWN, &adapter->state))
3995 return;
3996
a8f88ff5
JB
3997 e1000_get_phy_info(&adapter->hw);
3998}
3999
ad68076e
BA
4000/*
4001 * Need to wait a few seconds after link up to get diagnostic information from
4002 * the phy
4003 */
bc7f75fa
AK
4004static void e1000_update_phy_info(unsigned long data)
4005{
4006 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
615b32af
JB
4007
4008 if (test_bit(__E1000_DOWN, &adapter->state))
4009 return;
4010
a8f88ff5 4011 schedule_work(&adapter->update_phy_task);
bc7f75fa
AK
4012}
4013
8c7bbb92
BA
4014/**
4015 * e1000e_update_phy_stats - Update the PHY statistics counters
4016 * @adapter: board private structure
2b6b168d
BA
4017 *
4018 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
8c7bbb92
BA
4019 **/
4020static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
4021{
4022 struct e1000_hw *hw = &adapter->hw;
4023 s32 ret_val;
4024 u16 phy_data;
4025
4026 ret_val = hw->phy.ops.acquire(hw);
4027 if (ret_val)
4028 return;
4029
8c7bbb92
BA
4030 /*
4031 * A page set is expensive so check if already on desired page.
4032 * If not, set to the page with the PHY status registers.
4033 */
2b6b168d 4034 hw->phy.addr = 1;
8c7bbb92
BA
4035 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4036 &phy_data);
4037 if (ret_val)
4038 goto release;
2b6b168d
BA
4039 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
4040 ret_val = hw->phy.ops.set_page(hw,
4041 HV_STATS_PAGE << IGP_PAGE_SHIFT);
8c7bbb92
BA
4042 if (ret_val)
4043 goto release;
4044 }
4045
8c7bbb92 4046 /* Single Collision Count */
2b6b168d
BA
4047 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4048 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
8c7bbb92
BA
4049 if (!ret_val)
4050 adapter->stats.scc += phy_data;
4051
4052 /* Excessive Collision Count */
2b6b168d
BA
4053 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4054 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
8c7bbb92
BA
4055 if (!ret_val)
4056 adapter->stats.ecol += phy_data;
4057
4058 /* Multiple Collision Count */
2b6b168d
BA
4059 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4060 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
8c7bbb92
BA
4061 if (!ret_val)
4062 adapter->stats.mcc += phy_data;
4063
4064 /* Late Collision Count */
2b6b168d
BA
4065 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4066 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
8c7bbb92
BA
4067 if (!ret_val)
4068 adapter->stats.latecol += phy_data;
4069
4070 /* Collision Count - also used for adaptive IFS */
2b6b168d
BA
4071 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4072 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
8c7bbb92
BA
4073 if (!ret_val)
4074 hw->mac.collision_delta = phy_data;
4075
4076 /* Defer Count */
2b6b168d
BA
4077 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4078 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
8c7bbb92
BA
4079 if (!ret_val)
4080 adapter->stats.dc += phy_data;
4081
4082 /* Transmit with no CRS */
2b6b168d
BA
4083 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4084 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
8c7bbb92
BA
4085 if (!ret_val)
4086 adapter->stats.tncrs += phy_data;
4087
4088release:
4089 hw->phy.ops.release(hw);
4090}
4091
bc7f75fa
AK
4092/**
4093 * e1000e_update_stats - Update the board statistics counters
4094 * @adapter: board private structure
4095 **/
67fd4fcb 4096static void e1000e_update_stats(struct e1000_adapter *adapter)
bc7f75fa 4097{
7274c20f 4098 struct net_device *netdev = adapter->netdev;
bc7f75fa
AK
4099 struct e1000_hw *hw = &adapter->hw;
4100 struct pci_dev *pdev = adapter->pdev;
bc7f75fa
AK
4101
4102 /*
4103 * Prevent stats update while adapter is being reset, or if the pci
4104 * connection is down.
4105 */
4106 if (adapter->link_speed == 0)
4107 return;
4108 if (pci_channel_offline(pdev))
4109 return;
4110
bc7f75fa
AK
4111 adapter->stats.crcerrs += er32(CRCERRS);
4112 adapter->stats.gprc += er32(GPRC);
7c25769f
BA
4113 adapter->stats.gorc += er32(GORCL);
4114 er32(GORCH); /* Clear gorc */
bc7f75fa
AK
4115 adapter->stats.bprc += er32(BPRC);
4116 adapter->stats.mprc += er32(MPRC);
4117 adapter->stats.roc += er32(ROC);
4118
bc7f75fa 4119 adapter->stats.mpc += er32(MPC);
8c7bbb92
BA
4120
4121 /* Half-duplex statistics */
4122 if (adapter->link_duplex == HALF_DUPLEX) {
4123 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
4124 e1000e_update_phy_stats(adapter);
4125 } else {
4126 adapter->stats.scc += er32(SCC);
4127 adapter->stats.ecol += er32(ECOL);
4128 adapter->stats.mcc += er32(MCC);
4129 adapter->stats.latecol += er32(LATECOL);
4130 adapter->stats.dc += er32(DC);
4131
4132 hw->mac.collision_delta = er32(COLC);
4133
4134 if ((hw->mac.type != e1000_82574) &&
4135 (hw->mac.type != e1000_82583))
4136 adapter->stats.tncrs += er32(TNCRS);
4137 }
4138 adapter->stats.colc += hw->mac.collision_delta;
a4f58f54 4139 }
8c7bbb92 4140
bc7f75fa
AK
4141 adapter->stats.xonrxc += er32(XONRXC);
4142 adapter->stats.xontxc += er32(XONTXC);
4143 adapter->stats.xoffrxc += er32(XOFFRXC);
4144 adapter->stats.xofftxc += er32(XOFFTXC);
bc7f75fa 4145 adapter->stats.gptc += er32(GPTC);
7c25769f
BA
4146 adapter->stats.gotc += er32(GOTCL);
4147 er32(GOTCH); /* Clear gotc */
bc7f75fa
AK
4148 adapter->stats.rnbc += er32(RNBC);
4149 adapter->stats.ruc += er32(RUC);
bc7f75fa
AK
4150
4151 adapter->stats.mptc += er32(MPTC);
4152 adapter->stats.bptc += er32(BPTC);
4153
4154 /* used for adaptive IFS */
4155
4156 hw->mac.tx_packet_delta = er32(TPT);
4157 adapter->stats.tpt += hw->mac.tx_packet_delta;
bc7f75fa
AK
4158
4159 adapter->stats.algnerrc += er32(ALGNERRC);
4160 adapter->stats.rxerrc += er32(RXERRC);
bc7f75fa
AK
4161 adapter->stats.cexterr += er32(CEXTERR);
4162 adapter->stats.tsctc += er32(TSCTC);
4163 adapter->stats.tsctfc += er32(TSCTFC);
4164
bc7f75fa 4165 /* Fill out the OS statistics structure */
7274c20f
AK
4166 netdev->stats.multicast = adapter->stats.mprc;
4167 netdev->stats.collisions = adapter->stats.colc;
bc7f75fa
AK
4168
4169 /* Rx Errors */
4170
ad68076e
BA
4171 /*
4172 * RLEC on some newer hardware can be incorrect so build
4173 * our own version based on RUC and ROC
4174 */
7274c20f 4175 netdev->stats.rx_errors = adapter->stats.rxerrc +
bc7f75fa
AK
4176 adapter->stats.crcerrs + adapter->stats.algnerrc +
4177 adapter->stats.ruc + adapter->stats.roc +
4178 adapter->stats.cexterr;
7274c20f 4179 netdev->stats.rx_length_errors = adapter->stats.ruc +
bc7f75fa 4180 adapter->stats.roc;
7274c20f
AK
4181 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4182 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
4183 netdev->stats.rx_missed_errors = adapter->stats.mpc;
bc7f75fa
AK
4184
4185 /* Tx Errors */
7274c20f 4186 netdev->stats.tx_errors = adapter->stats.ecol +
bc7f75fa 4187 adapter->stats.latecol;
7274c20f
AK
4188 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
4189 netdev->stats.tx_window_errors = adapter->stats.latecol;
4190 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
bc7f75fa
AK
4191
4192 /* Tx Dropped needs to be maintained elsewhere */
4193
bc7f75fa
AK
4194 /* Management Stats */
4195 adapter->stats.mgptc += er32(MGTPTC);
4196 adapter->stats.mgprc += er32(MGTPRC);
4197 adapter->stats.mgpdc += er32(MGTPDC);
bc7f75fa
AK
4198}
4199
7c25769f
BA
4200/**
4201 * e1000_phy_read_status - Update the PHY register status snapshot
4202 * @adapter: board private structure
4203 **/
4204static void e1000_phy_read_status(struct e1000_adapter *adapter)
4205{
4206 struct e1000_hw *hw = &adapter->hw;
4207 struct e1000_phy_regs *phy = &adapter->phy_regs;
7c25769f
BA
4208
4209 if ((er32(STATUS) & E1000_STATUS_LU) &&
4210 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
90da0669
BA
4211 int ret_val;
4212
7c25769f
BA
4213 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
4214 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
4215 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
4216 ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
4217 ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
4218 ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
4219 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
4220 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
4221 if (ret_val)
44defeb3 4222 e_warn("Error reading PHY register\n");
7c25769f
BA
4223 } else {
4224 /*
4225 * Do not read PHY registers if link is not up
4226 * Set values to typical power-on defaults
4227 */
4228 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
4229 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
4230 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
4231 BMSR_ERCAP);
4232 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
4233 ADVERTISE_ALL | ADVERTISE_CSMA);
4234 phy->lpa = 0;
4235 phy->expansion = EXPANSION_ENABLENPAGE;
4236 phy->ctrl1000 = ADVERTISE_1000FULL;
4237 phy->stat1000 = 0;
4238 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
4239 }
7c25769f
BA
4240}
4241
bc7f75fa
AK
4242static void e1000_print_link_info(struct e1000_adapter *adapter)
4243{
bc7f75fa
AK
4244 struct e1000_hw *hw = &adapter->hw;
4245 u32 ctrl = er32(CTRL);
4246
8f12fe86 4247 /* Link status message must follow this format for user tools */
ef456f85
JK
4248 printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4249 adapter->netdev->name,
4250 adapter->link_speed,
4251 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
4252 (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
4253 (ctrl & E1000_CTRL_RFCE) ? "Rx" :
4254 (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
bc7f75fa
AK
4255}
4256
0c6bdb30 4257static bool e1000e_has_link(struct e1000_adapter *adapter)
318a94d6
JK
4258{
4259 struct e1000_hw *hw = &adapter->hw;
3db1cd5c 4260 bool link_active = false;
318a94d6
JK
4261 s32 ret_val = 0;
4262
4263 /*
4264 * get_link_status is set on LSC (link status) interrupt or
4265 * Rx sequence error interrupt. get_link_status will stay
4266 * false until the check_for_link establishes link
4267 * for copper adapters ONLY
4268 */
4269 switch (hw->phy.media_type) {
4270 case e1000_media_type_copper:
4271 if (hw->mac.get_link_status) {
4272 ret_val = hw->mac.ops.check_for_link(hw);
4273 link_active = !hw->mac.get_link_status;
4274 } else {
3db1cd5c 4275 link_active = true;
318a94d6
JK
4276 }
4277 break;
4278 case e1000_media_type_fiber:
4279 ret_val = hw->mac.ops.check_for_link(hw);
4280 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
4281 break;
4282 case e1000_media_type_internal_serdes:
4283 ret_val = hw->mac.ops.check_for_link(hw);
4284 link_active = adapter->hw.mac.serdes_has_link;
4285 break;
4286 default:
4287 case e1000_media_type_unknown:
4288 break;
4289 }
4290
4291 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4292 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4293 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
44defeb3 4294 e_info("Gigabit has been disabled, downgrading speed\n");
318a94d6
JK
4295 }
4296
4297 return link_active;
4298}
4299
4300static void e1000e_enable_receives(struct e1000_adapter *adapter)
4301{
4302 /* make sure the receive unit is started */
4303 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4304 (adapter->flags & FLAG_RX_RESTART_NOW)) {
4305 struct e1000_hw *hw = &adapter->hw;
4306 u32 rctl = er32(RCTL);
4307 ew32(RCTL, rctl | E1000_RCTL_EN);
4308 adapter->flags &= ~FLAG_RX_RESTART_NOW;
4309 }
4310}
4311
ff10e13c
CW
4312static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4313{
4314 struct e1000_hw *hw = &adapter->hw;
4315
4316 /*
4317 * With 82574 controllers, PHY needs to be checked periodically
4318 * for hung state and reset, if two calls return true
4319 */
4320 if (e1000_check_phy_82574(hw))
4321 adapter->phy_hang_count++;
4322 else
4323 adapter->phy_hang_count = 0;
4324
4325 if (adapter->phy_hang_count > 1) {
4326 adapter->phy_hang_count = 0;
4327 schedule_work(&adapter->reset_task);
4328 }
4329}
4330
bc7f75fa
AK
4331/**
4332 * e1000_watchdog - Timer Call-back
4333 * @data: pointer to adapter cast into an unsigned long
4334 **/
4335static void e1000_watchdog(unsigned long data)
4336{
4337 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
4338
4339 /* Do the rest outside of interrupt context */
4340 schedule_work(&adapter->watchdog_task);
4341
4342 /* TODO: make this use queue_delayed_work() */
4343}
4344
4345static void e1000_watchdog_task(struct work_struct *work)
4346{
4347 struct e1000_adapter *adapter = container_of(work,
4348 struct e1000_adapter, watchdog_task);
bc7f75fa
AK
4349 struct net_device *netdev = adapter->netdev;
4350 struct e1000_mac_info *mac = &adapter->hw.mac;
75eb0fad 4351 struct e1000_phy_info *phy = &adapter->hw.phy;
bc7f75fa
AK
4352 struct e1000_ring *tx_ring = adapter->tx_ring;
4353 struct e1000_hw *hw = &adapter->hw;
4354 u32 link, tctl;
bc7f75fa 4355
615b32af
JB
4356 if (test_bit(__E1000_DOWN, &adapter->state))
4357 return;
4358
b405e8df 4359 link = e1000e_has_link(adapter);
318a94d6 4360 if ((netif_carrier_ok(netdev)) && link) {
23606cf5
RW
4361 /* Cancel scheduled suspend requests. */
4362 pm_runtime_resume(netdev->dev.parent);
4363
318a94d6 4364 e1000e_enable_receives(adapter);
bc7f75fa 4365 goto link_up;
bc7f75fa
AK
4366 }
4367
4368 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
4369 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
4370 e1000_update_mng_vlan(adapter);
4371
bc7f75fa
AK
4372 if (link) {
4373 if (!netif_carrier_ok(netdev)) {
3db1cd5c 4374 bool txb2b = true;
23606cf5
RW
4375
4376 /* Cancel scheduled suspend requests. */
4377 pm_runtime_resume(netdev->dev.parent);
4378
318a94d6 4379 /* update snapshot of PHY registers on LSC */
7c25769f 4380 e1000_phy_read_status(adapter);
bc7f75fa
AK
4381 mac->ops.get_link_up_info(&adapter->hw,
4382 &adapter->link_speed,
4383 &adapter->link_duplex);
4384 e1000_print_link_info(adapter);
f4187b56
BA
4385 /*
4386 * On supported PHYs, check for duplex mismatch only
4387 * if link has autonegotiated at 10/100 half
4388 */
4389 if ((hw->phy.type == e1000_phy_igp_3 ||
4390 hw->phy.type == e1000_phy_bm) &&
4391 (hw->mac.autoneg == true) &&
4392 (adapter->link_speed == SPEED_10 ||
4393 adapter->link_speed == SPEED_100) &&
4394 (adapter->link_duplex == HALF_DUPLEX)) {
4395 u16 autoneg_exp;
4396
4397 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
4398
4399 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
ef456f85 4400 e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
f4187b56
BA
4401 }
4402
f49c57e1 4403 /* adjust timeout factor according to speed/duplex */
bc7f75fa
AK
4404 adapter->tx_timeout_factor = 1;
4405 switch (adapter->link_speed) {
4406 case SPEED_10:
3db1cd5c 4407 txb2b = false;
10f1b492 4408 adapter->tx_timeout_factor = 16;
bc7f75fa
AK
4409 break;
4410 case SPEED_100:
3db1cd5c 4411 txb2b = false;
4c86e0b9 4412 adapter->tx_timeout_factor = 10;
bc7f75fa
AK
4413 break;
4414 }
4415
ad68076e
BA
4416 /*
4417 * workaround: re-program speed mode bit after
4418 * link-up event
4419 */
bc7f75fa
AK
4420 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4421 !txb2b) {
4422 u32 tarc0;
e9ec2c0f 4423 tarc0 = er32(TARC(0));
bc7f75fa 4424 tarc0 &= ~SPEED_MODE_BIT;
e9ec2c0f 4425 ew32(TARC(0), tarc0);
bc7f75fa
AK
4426 }
4427
ad68076e
BA
4428 /*
4429 * disable TSO for pcie and 10/100 speeds, to avoid
4430 * some hardware issues
4431 */
bc7f75fa
AK
4432 if (!(adapter->flags & FLAG_TSO_FORCE)) {
4433 switch (adapter->link_speed) {
4434 case SPEED_10:
4435 case SPEED_100:
44defeb3 4436 e_info("10/100 speed: disabling TSO\n");
bc7f75fa
AK
4437 netdev->features &= ~NETIF_F_TSO;
4438 netdev->features &= ~NETIF_F_TSO6;
4439 break;
4440 case SPEED_1000:
4441 netdev->features |= NETIF_F_TSO;
4442 netdev->features |= NETIF_F_TSO6;
4443 break;
4444 default:
4445 /* oops */
4446 break;
4447 }
4448 }
4449
ad68076e
BA
4450 /*
4451 * enable transmits in the hardware, need to do this
4452 * after setting TARC(0)
4453 */
bc7f75fa
AK
4454 tctl = er32(TCTL);
4455 tctl |= E1000_TCTL_EN;
4456 ew32(TCTL, tctl);
4457
75eb0fad
BA
4458 /*
4459 * Perform any post-link-up configuration before
4460 * reporting link up.
4461 */
4462 if (phy->ops.cfg_on_link_up)
4463 phy->ops.cfg_on_link_up(hw);
4464
bc7f75fa 4465 netif_carrier_on(netdev);
bc7f75fa
AK
4466
4467 if (!test_bit(__E1000_DOWN, &adapter->state))
4468 mod_timer(&adapter->phy_info_timer,
4469 round_jiffies(jiffies + 2 * HZ));
bc7f75fa
AK
4470 }
4471 } else {
4472 if (netif_carrier_ok(netdev)) {
4473 adapter->link_speed = 0;
4474 adapter->link_duplex = 0;
8f12fe86
BA
4475 /* Link status message must follow this format */
4476 printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
4477 adapter->netdev->name);
bc7f75fa 4478 netif_carrier_off(netdev);
bc7f75fa
AK
4479 if (!test_bit(__E1000_DOWN, &adapter->state))
4480 mod_timer(&adapter->phy_info_timer,
4481 round_jiffies(jiffies + 2 * HZ));
4482
4483 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
4484 schedule_work(&adapter->reset_task);
23606cf5
RW
4485 else
4486 pm_schedule_suspend(netdev->dev.parent,
4487 LINK_TIMEOUT);
bc7f75fa
AK
4488 }
4489 }
4490
4491link_up:
67fd4fcb 4492 spin_lock(&adapter->stats64_lock);
bc7f75fa
AK
4493 e1000e_update_stats(adapter);
4494
4495 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4496 adapter->tpt_old = adapter->stats.tpt;
4497 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
4498 adapter->colc_old = adapter->stats.colc;
4499
7c25769f
BA
4500 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
4501 adapter->gorc_old = adapter->stats.gorc;
4502 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
4503 adapter->gotc_old = adapter->stats.gotc;
2084b114 4504 spin_unlock(&adapter->stats64_lock);
bc7f75fa
AK
4505
4506 e1000e_update_adaptive(&adapter->hw);
4507
90da0669
BA
4508 if (!netif_carrier_ok(netdev) &&
4509 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
4510 /*
4511 * We've lost link, so the controller stops DMA,
4512 * but we've got queued Tx work that's never going
4513 * to get done, so reset controller to flush Tx.
4514 * (Do the reset outside of interrupt context).
4515 */
90da0669
BA
4516 schedule_work(&adapter->reset_task);
4517 /* return immediately since reset is imminent */
4518 return;
bc7f75fa
AK
4519 }
4520
eab2abf5
JB
4521 /* Simple mode for Interrupt Throttle Rate (ITR) */
4522 if (adapter->itr_setting == 4) {
4523 /*
4524 * Symmetric Tx/Rx gets a reduced ITR=2000;
4525 * Total asymmetrical Tx or Rx gets ITR=8000;
4526 * everyone else is between 2000-8000.
4527 */
4528 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
4529 u32 dif = (adapter->gotc > adapter->gorc ?
4530 adapter->gotc - adapter->gorc :
4531 adapter->gorc - adapter->gotc) / 10000;
4532 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
4533
4534 ew32(ITR, 1000000000 / (itr * 256));
4535 }
4536
ad68076e 4537 /* Cause software interrupt to ensure Rx ring is cleaned */
4662e82b
BA
4538 if (adapter->msix_entries)
4539 ew32(ICS, adapter->rx_ring->ims_val);
4540 else
4541 ew32(ICS, E1000_ICS_RXDMT0);
bc7f75fa 4542
713b3c9e
JB
4543 /* flush pending descriptors to memory before detecting Tx hang */
4544 e1000e_flush_descriptors(adapter);
4545
bc7f75fa 4546 /* Force detection of hung controller every watchdog period */
3db1cd5c 4547 adapter->detect_tx_hung = true;
bc7f75fa 4548
ad68076e
BA
4549 /*
4550 * With 82571 controllers, LAA may be overwritten due to controller
4551 * reset from the other port. Set the appropriate LAA in RAR[0]
4552 */
bc7f75fa
AK
4553 if (e1000e_get_laa_state_82571(hw))
4554 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
4555
ff10e13c
CW
4556 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
4557 e1000e_check_82574_phy_workaround(adapter);
4558
bc7f75fa
AK
4559 /* Reset the timer */
4560 if (!test_bit(__E1000_DOWN, &adapter->state))
4561 mod_timer(&adapter->watchdog_timer,
4562 round_jiffies(jiffies + 2 * HZ));
4563}
4564
4565#define E1000_TX_FLAGS_CSUM 0x00000001
4566#define E1000_TX_FLAGS_VLAN 0x00000002
4567#define E1000_TX_FLAGS_TSO 0x00000004
4568#define E1000_TX_FLAGS_IPV4 0x00000008
4569#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
4570#define E1000_TX_FLAGS_VLAN_SHIFT 16
4571
4572static int e1000_tso(struct e1000_adapter *adapter,
4573 struct sk_buff *skb)
4574{
4575 struct e1000_ring *tx_ring = adapter->tx_ring;
4576 struct e1000_context_desc *context_desc;
4577 struct e1000_buffer *buffer_info;
4578 unsigned int i;
4579 u32 cmd_length = 0;
4580 u16 ipcse = 0, tucse, mss;
4581 u8 ipcss, ipcso, tucss, tucso, hdr_len;
bc7f75fa 4582
3d5e33c9
BA
4583 if (!skb_is_gso(skb))
4584 return 0;
bc7f75fa 4585
3d5e33c9 4586 if (skb_header_cloned(skb)) {
90da0669
BA
4587 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4588
3d5e33c9
BA
4589 if (err)
4590 return err;
bc7f75fa
AK
4591 }
4592
3d5e33c9
BA
4593 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4594 mss = skb_shinfo(skb)->gso_size;
4595 if (skb->protocol == htons(ETH_P_IP)) {
4596 struct iphdr *iph = ip_hdr(skb);
4597 iph->tot_len = 0;
4598 iph->check = 0;
4599 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
4600 0, IPPROTO_TCP, 0);
4601 cmd_length = E1000_TXD_CMD_IP;
4602 ipcse = skb_transport_offset(skb) - 1;
8e1e8a47 4603 } else if (skb_is_gso_v6(skb)) {
3d5e33c9
BA
4604 ipv6_hdr(skb)->payload_len = 0;
4605 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4606 &ipv6_hdr(skb)->daddr,
4607 0, IPPROTO_TCP, 0);
4608 ipcse = 0;
4609 }
4610 ipcss = skb_network_offset(skb);
4611 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
4612 tucss = skb_transport_offset(skb);
4613 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
4614 tucse = 0;
4615
4616 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
4617 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
4618
4619 i = tx_ring->next_to_use;
4620 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4621 buffer_info = &tx_ring->buffer_info[i];
4622
4623 context_desc->lower_setup.ip_fields.ipcss = ipcss;
4624 context_desc->lower_setup.ip_fields.ipcso = ipcso;
4625 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
4626 context_desc->upper_setup.tcp_fields.tucss = tucss;
4627 context_desc->upper_setup.tcp_fields.tucso = tucso;
4628 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
4629 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
4630 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
4631 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
4632
4633 buffer_info->time_stamp = jiffies;
4634 buffer_info->next_to_watch = i;
4635
4636 i++;
4637 if (i == tx_ring->count)
4638 i = 0;
4639 tx_ring->next_to_use = i;
4640
4641 return 1;
bc7f75fa
AK
4642}
4643
4644static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
4645{
4646 struct e1000_ring *tx_ring = adapter->tx_ring;
4647 struct e1000_context_desc *context_desc;
4648 struct e1000_buffer *buffer_info;
4649 unsigned int i;
4650 u8 css;
af807c82 4651 u32 cmd_len = E1000_TXD_CMD_DEXT;
5f66f208 4652 __be16 protocol;
bc7f75fa 4653
af807c82
DG
4654 if (skb->ip_summed != CHECKSUM_PARTIAL)
4655 return 0;
bc7f75fa 4656
5f66f208
AJ
4657 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
4658 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
4659 else
4660 protocol = skb->protocol;
4661
3f518390 4662 switch (protocol) {
09640e63 4663 case cpu_to_be16(ETH_P_IP):
af807c82
DG
4664 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4665 cmd_len |= E1000_TXD_CMD_TCP;
4666 break;
09640e63 4667 case cpu_to_be16(ETH_P_IPV6):
af807c82
DG
4668 /* XXX not handling all IPV6 headers */
4669 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4670 cmd_len |= E1000_TXD_CMD_TCP;
4671 break;
4672 default:
4673 if (unlikely(net_ratelimit()))
5f66f208
AJ
4674 e_warn("checksum_partial proto=%x!\n",
4675 be16_to_cpu(protocol));
af807c82 4676 break;
bc7f75fa
AK
4677 }
4678
0d0b1672 4679 css = skb_checksum_start_offset(skb);
af807c82
DG
4680
4681 i = tx_ring->next_to_use;
4682 buffer_info = &tx_ring->buffer_info[i];
4683 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4684
4685 context_desc->lower_setup.ip_config = 0;
4686 context_desc->upper_setup.tcp_fields.tucss = css;
4687 context_desc->upper_setup.tcp_fields.tucso =
4688 css + skb->csum_offset;
4689 context_desc->upper_setup.tcp_fields.tucse = 0;
4690 context_desc->tcp_seg_setup.data = 0;
4691 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
4692
4693 buffer_info->time_stamp = jiffies;
4694 buffer_info->next_to_watch = i;
4695
4696 i++;
4697 if (i == tx_ring->count)
4698 i = 0;
4699 tx_ring->next_to_use = i;
4700
4701 return 1;
bc7f75fa
AK
4702}
4703
4704#define E1000_MAX_PER_TXD 8192
4705#define E1000_MAX_TXD_PWR 12
4706
4707static int e1000_tx_map(struct e1000_adapter *adapter,
4708 struct sk_buff *skb, unsigned int first,
4709 unsigned int max_per_txd, unsigned int nr_frags,
4710 unsigned int mss)
4711{
4712 struct e1000_ring *tx_ring = adapter->tx_ring;
03b1320d 4713 struct pci_dev *pdev = adapter->pdev;
1b7719c4 4714 struct e1000_buffer *buffer_info;
8ddc951c 4715 unsigned int len = skb_headlen(skb);
03b1320d 4716 unsigned int offset = 0, size, count = 0, i;
9ed318d5 4717 unsigned int f, bytecount, segs;
bc7f75fa
AK
4718
4719 i = tx_ring->next_to_use;
4720
4721 while (len) {
1b7719c4 4722 buffer_info = &tx_ring->buffer_info[i];
bc7f75fa
AK
4723 size = min(len, max_per_txd);
4724
bc7f75fa 4725 buffer_info->length = size;
bc7f75fa 4726 buffer_info->time_stamp = jiffies;
bc7f75fa 4727 buffer_info->next_to_watch = i;
0be3f55f
NN
4728 buffer_info->dma = dma_map_single(&pdev->dev,
4729 skb->data + offset,
af667a29 4730 size, DMA_TO_DEVICE);
03b1320d 4731 buffer_info->mapped_as_page = false;
0be3f55f 4732 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
03b1320d 4733 goto dma_error;
bc7f75fa
AK
4734
4735 len -= size;
4736 offset += size;
03b1320d 4737 count++;
1b7719c4
AD
4738
4739 if (len) {
4740 i++;
4741 if (i == tx_ring->count)
4742 i = 0;
4743 }
bc7f75fa
AK
4744 }
4745
4746 for (f = 0; f < nr_frags; f++) {
9e903e08 4747 const struct skb_frag_struct *frag;
bc7f75fa
AK
4748
4749 frag = &skb_shinfo(skb)->frags[f];
9e903e08 4750 len = skb_frag_size(frag);
877749bf 4751 offset = 0;
bc7f75fa
AK
4752
4753 while (len) {
1b7719c4
AD
4754 i++;
4755 if (i == tx_ring->count)
4756 i = 0;
4757
bc7f75fa
AK
4758 buffer_info = &tx_ring->buffer_info[i];
4759 size = min(len, max_per_txd);
bc7f75fa
AK
4760
4761 buffer_info->length = size;
4762 buffer_info->time_stamp = jiffies;
bc7f75fa 4763 buffer_info->next_to_watch = i;
877749bf
IC
4764 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
4765 offset, size, DMA_TO_DEVICE);
03b1320d 4766 buffer_info->mapped_as_page = true;
0be3f55f 4767 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
03b1320d 4768 goto dma_error;
bc7f75fa
AK
4769
4770 len -= size;
4771 offset += size;
4772 count++;
bc7f75fa
AK
4773 }
4774 }
4775
af667a29 4776 segs = skb_shinfo(skb)->gso_segs ? : 1;
9ed318d5
TH
4777 /* multiply data chunks by size of headers */
4778 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
4779
bc7f75fa 4780 tx_ring->buffer_info[i].skb = skb;
9ed318d5
TH
4781 tx_ring->buffer_info[i].segs = segs;
4782 tx_ring->buffer_info[i].bytecount = bytecount;
bc7f75fa
AK
4783 tx_ring->buffer_info[first].next_to_watch = i;
4784
4785 return count;
03b1320d
AD
4786
4787dma_error:
af667a29 4788 dev_err(&pdev->dev, "Tx DMA map failed\n");
03b1320d 4789 buffer_info->dma = 0;
c1fa347f 4790 if (count)
03b1320d 4791 count--;
c1fa347f
RK
4792
4793 while (count--) {
af667a29 4794 if (i == 0)
03b1320d 4795 i += tx_ring->count;
c1fa347f 4796 i--;
03b1320d 4797 buffer_info = &tx_ring->buffer_info[i];
1d51c418 4798 e1000_put_txbuf(adapter, buffer_info);
03b1320d
AD
4799 }
4800
4801 return 0;
bc7f75fa
AK
4802}
4803
4804static void e1000_tx_queue(struct e1000_adapter *adapter,
4805 int tx_flags, int count)
4806{
4807 struct e1000_ring *tx_ring = adapter->tx_ring;
4808 struct e1000_tx_desc *tx_desc = NULL;
4809 struct e1000_buffer *buffer_info;
4810 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
4811 unsigned int i;
4812
4813 if (tx_flags & E1000_TX_FLAGS_TSO) {
4814 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
4815 E1000_TXD_CMD_TSE;
4816 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4817
4818 if (tx_flags & E1000_TX_FLAGS_IPV4)
4819 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
4820 }
4821
4822 if (tx_flags & E1000_TX_FLAGS_CSUM) {
4823 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
4824 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4825 }
4826
4827 if (tx_flags & E1000_TX_FLAGS_VLAN) {
4828 txd_lower |= E1000_TXD_CMD_VLE;
4829 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
4830 }
4831
4832 i = tx_ring->next_to_use;
4833
36b973df 4834 do {
bc7f75fa
AK
4835 buffer_info = &tx_ring->buffer_info[i];
4836 tx_desc = E1000_TX_DESC(*tx_ring, i);
4837 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4838 tx_desc->lower.data =
4839 cpu_to_le32(txd_lower | buffer_info->length);
4840 tx_desc->upper.data = cpu_to_le32(txd_upper);
4841
4842 i++;
4843 if (i == tx_ring->count)
4844 i = 0;
36b973df 4845 } while (--count > 0);
bc7f75fa
AK
4846
4847 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
4848
ad68076e
BA
4849 /*
4850 * Force memory writes to complete before letting h/w
bc7f75fa
AK
4851 * know there are new descriptors to fetch. (Only
4852 * applicable for weak-ordered memory model archs,
ad68076e
BA
4853 * such as IA-64).
4854 */
bc7f75fa
AK
4855 wmb();
4856
4857 tx_ring->next_to_use = i;
c6e7f51e
BA
4858
4859 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
4860 e1000e_update_tdt_wa(adapter, i);
4861 else
4862 writel(i, adapter->hw.hw_addr + tx_ring->tail);
4863
ad68076e
BA
4864 /*
4865 * we need this if more than one processor can write to our tail
4866 * at a time, it synchronizes IO on IA64/Altix systems
4867 */
bc7f75fa
AK
4868 mmiowb();
4869}
4870
4871#define MINIMUM_DHCP_PACKET_SIZE 282
4872static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
4873 struct sk_buff *skb)
4874{
4875 struct e1000_hw *hw = &adapter->hw;
4876 u16 length, offset;
4877
4878 if (vlan_tx_tag_present(skb)) {
8e95a202
JP
4879 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
4880 (adapter->hw.mng_cookie.status &
bc7f75fa
AK
4881 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
4882 return 0;
4883 }
4884
4885 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
4886 return 0;
4887
4888 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
4889 return 0;
4890
4891 {
4892 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
4893 struct udphdr *udp;
4894
4895 if (ip->protocol != IPPROTO_UDP)
4896 return 0;
4897
4898 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
4899 if (ntohs(udp->dest) != 67)
4900 return 0;
4901
4902 offset = (u8 *)udp + 8 - skb->data;
4903 length = skb->len - offset;
4904 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
4905 }
4906
4907 return 0;
4908}
4909
4910static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
4911{
4912 struct e1000_adapter *adapter = netdev_priv(netdev);
4913
4914 netif_stop_queue(netdev);
ad68076e
BA
4915 /*
4916 * Herbert's original patch had:
bc7f75fa 4917 * smp_mb__after_netif_stop_queue();
ad68076e
BA
4918 * but since that doesn't exist yet, just open code it.
4919 */
bc7f75fa
AK
4920 smp_mb();
4921
ad68076e
BA
4922 /*
4923 * We need to check again in a case another CPU has just
4924 * made room available.
4925 */
bc7f75fa
AK
4926 if (e1000_desc_unused(adapter->tx_ring) < size)
4927 return -EBUSY;
4928
4929 /* A reprieve! */
4930 netif_start_queue(netdev);
4931 ++adapter->restart_queue;
4932 return 0;
4933}
4934
4935static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
4936{
4937 struct e1000_adapter *adapter = netdev_priv(netdev);
4938
4939 if (e1000_desc_unused(adapter->tx_ring) >= size)
4940 return 0;
4941 return __e1000_maybe_stop_tx(netdev, size);
4942}
4943
4944#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3b29a56d
SH
4945static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4946 struct net_device *netdev)
bc7f75fa
AK
4947{
4948 struct e1000_adapter *adapter = netdev_priv(netdev);
4949 struct e1000_ring *tx_ring = adapter->tx_ring;
4950 unsigned int first;
4951 unsigned int max_per_txd = E1000_MAX_PER_TXD;
4952 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
4953 unsigned int tx_flags = 0;
e743d313 4954 unsigned int len = skb_headlen(skb);
4e6c709c
AK
4955 unsigned int nr_frags;
4956 unsigned int mss;
bc7f75fa
AK
4957 int count = 0;
4958 int tso;
4959 unsigned int f;
bc7f75fa
AK
4960
4961 if (test_bit(__E1000_DOWN, &adapter->state)) {
4962 dev_kfree_skb_any(skb);
4963 return NETDEV_TX_OK;
4964 }
4965
4966 if (skb->len <= 0) {
4967 dev_kfree_skb_any(skb);
4968 return NETDEV_TX_OK;
4969 }
4970
4971 mss = skb_shinfo(skb)->gso_size;
ad68076e
BA
4972 /*
4973 * The controller does a simple calculation to
bc7f75fa
AK
4974 * make sure there is enough room in the FIFO before
4975 * initiating the DMA for each buffer. The calc is:
4976 * 4 = ceil(buffer len/mss). To make sure we don't
4977 * overrun the FIFO, adjust the max buffer len if mss
ad68076e
BA
4978 * drops.
4979 */
bc7f75fa
AK
4980 if (mss) {
4981 u8 hdr_len;
4982 max_per_txd = min(mss << 2, max_per_txd);
4983 max_txd_pwr = fls(max_per_txd) - 1;
4984
ad68076e
BA
4985 /*
4986 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
4987 * points to just header, pull a few bytes of payload from
4988 * frags into skb->data
4989 */
bc7f75fa 4990 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
ad68076e
BA
4991 /*
4992 * we do this workaround for ES2LAN, but it is un-necessary,
4993 * avoiding it could save a lot of cycles
4994 */
4e6c709c 4995 if (skb->data_len && (hdr_len == len)) {
bc7f75fa
AK
4996 unsigned int pull_size;
4997
4998 pull_size = min((unsigned int)4, skb->data_len);
4999 if (!__pskb_pull_tail(skb, pull_size)) {
44defeb3 5000 e_err("__pskb_pull_tail failed.\n");
bc7f75fa
AK
5001 dev_kfree_skb_any(skb);
5002 return NETDEV_TX_OK;
5003 }
e743d313 5004 len = skb_headlen(skb);
bc7f75fa
AK
5005 }
5006 }
5007
5008 /* reserve a descriptor for the offload context */
5009 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
5010 count++;
5011 count++;
5012
5013 count += TXD_USE_COUNT(len, max_txd_pwr);
5014
5015 nr_frags = skb_shinfo(skb)->nr_frags;
5016 for (f = 0; f < nr_frags; f++)
9e903e08 5017 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
bc7f75fa
AK
5018 max_txd_pwr);
5019
5020 if (adapter->hw.mac.tx_pkt_filtering)
5021 e1000_transfer_dhcp_info(adapter, skb);
5022
ad68076e
BA
5023 /*
5024 * need: count + 2 desc gap to keep tail from touching
5025 * head, otherwise try next time
5026 */
92af3e95 5027 if (e1000_maybe_stop_tx(netdev, count + 2))
bc7f75fa 5028 return NETDEV_TX_BUSY;
bc7f75fa 5029
eab6d18d 5030 if (vlan_tx_tag_present(skb)) {
bc7f75fa
AK
5031 tx_flags |= E1000_TX_FLAGS_VLAN;
5032 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
5033 }
5034
5035 first = tx_ring->next_to_use;
5036
5037 tso = e1000_tso(adapter, skb);
5038 if (tso < 0) {
5039 dev_kfree_skb_any(skb);
bc7f75fa
AK
5040 return NETDEV_TX_OK;
5041 }
5042
5043 if (tso)
5044 tx_flags |= E1000_TX_FLAGS_TSO;
5045 else if (e1000_tx_csum(adapter, skb))
5046 tx_flags |= E1000_TX_FLAGS_CSUM;
5047
ad68076e
BA
5048 /*
5049 * Old method was to assume IPv4 packet by default if TSO was enabled.
bc7f75fa 5050 * 82571 hardware supports TSO capabilities for IPv6 as well...
ad68076e
BA
5051 * no longer assume, we must.
5052 */
bc7f75fa
AK
5053 if (skb->protocol == htons(ETH_P_IP))
5054 tx_flags |= E1000_TX_FLAGS_IPV4;
5055
25985edc 5056 /* if count is 0 then mapping error has occurred */
bc7f75fa 5057 count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
1b7719c4 5058 if (count) {
3f0cfa3b 5059 netdev_sent_queue(netdev, skb->len);
1b7719c4 5060 e1000_tx_queue(adapter, tx_flags, count);
1b7719c4
AD
5061 /* Make sure there is space in the ring for the next send. */
5062 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
5063
5064 } else {
bc7f75fa 5065 dev_kfree_skb_any(skb);
1b7719c4
AD
5066 tx_ring->buffer_info[first].time_stamp = 0;
5067 tx_ring->next_to_use = first;
bc7f75fa
AK
5068 }
5069
bc7f75fa
AK
5070 return NETDEV_TX_OK;
5071}
5072
5073/**
5074 * e1000_tx_timeout - Respond to a Tx Hang
5075 * @netdev: network interface device structure
5076 **/
5077static void e1000_tx_timeout(struct net_device *netdev)
5078{
5079 struct e1000_adapter *adapter = netdev_priv(netdev);
5080
5081 /* Do the reset outside of interrupt context */
5082 adapter->tx_timeout_count++;
5083 schedule_work(&adapter->reset_task);
5084}
5085
5086static void e1000_reset_task(struct work_struct *work)
5087{
5088 struct e1000_adapter *adapter;
5089 adapter = container_of(work, struct e1000_adapter, reset_task);
5090
615b32af
JB
5091 /* don't run the task if already down */
5092 if (test_bit(__E1000_DOWN, &adapter->state))
5093 return;
5094
affa9dfb
CW
5095 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
5096 (adapter->flags & FLAG_RX_RESTART_NOW))) {
5097 e1000e_dump(adapter);
5098 e_err("Reset adapter\n");
5099 }
bc7f75fa
AK
5100 e1000e_reinit_locked(adapter);
5101}
5102
5103/**
67fd4fcb 5104 * e1000_get_stats64 - Get System Network Statistics
bc7f75fa 5105 * @netdev: network interface device structure
67fd4fcb 5106 * @stats: rtnl_link_stats64 pointer
bc7f75fa
AK
5107 *
5108 * Returns the address of the device statistics structure.
bc7f75fa 5109 **/
67fd4fcb
JK
5110struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5111 struct rtnl_link_stats64 *stats)
bc7f75fa 5112{
67fd4fcb
JK
5113 struct e1000_adapter *adapter = netdev_priv(netdev);
5114
5115 memset(stats, 0, sizeof(struct rtnl_link_stats64));
5116 spin_lock(&adapter->stats64_lock);
5117 e1000e_update_stats(adapter);
5118 /* Fill out the OS statistics structure */
5119 stats->rx_bytes = adapter->stats.gorc;
5120 stats->rx_packets = adapter->stats.gprc;
5121 stats->tx_bytes = adapter->stats.gotc;
5122 stats->tx_packets = adapter->stats.gptc;
5123 stats->multicast = adapter->stats.mprc;
5124 stats->collisions = adapter->stats.colc;
5125
5126 /* Rx Errors */
5127
5128 /*
5129 * RLEC on some newer hardware can be incorrect so build
5130 * our own version based on RUC and ROC
5131 */
5132 stats->rx_errors = adapter->stats.rxerrc +
5133 adapter->stats.crcerrs + adapter->stats.algnerrc +
5134 adapter->stats.ruc + adapter->stats.roc +
5135 adapter->stats.cexterr;
5136 stats->rx_length_errors = adapter->stats.ruc +
5137 adapter->stats.roc;
5138 stats->rx_crc_errors = adapter->stats.crcerrs;
5139 stats->rx_frame_errors = adapter->stats.algnerrc;
5140 stats->rx_missed_errors = adapter->stats.mpc;
5141
5142 /* Tx Errors */
5143 stats->tx_errors = adapter->stats.ecol +
5144 adapter->stats.latecol;
5145 stats->tx_aborted_errors = adapter->stats.ecol;
5146 stats->tx_window_errors = adapter->stats.latecol;
5147 stats->tx_carrier_errors = adapter->stats.tncrs;
5148
5149 /* Tx Dropped needs to be maintained elsewhere */
5150
5151 spin_unlock(&adapter->stats64_lock);
5152 return stats;
bc7f75fa
AK
5153}
5154
5155/**
5156 * e1000_change_mtu - Change the Maximum Transfer Unit
5157 * @netdev: network interface device structure
5158 * @new_mtu: new value for maximum frame size
5159 *
5160 * Returns 0 on success, negative on failure
5161 **/
5162static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5163{
5164 struct e1000_adapter *adapter = netdev_priv(netdev);
5165 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5166
2adc55c9
BA
5167 /* Jumbo frame support */
5168 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
5169 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
5170 e_err("Jumbo Frames not supported.\n");
bc7f75fa
AK
5171 return -EINVAL;
5172 }
5173
2adc55c9
BA
5174 /* Supported frame sizes */
5175 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
5176 (max_frame > adapter->max_hw_frame_size)) {
5177 e_err("Unsupported MTU setting\n");
bc7f75fa
AK
5178 return -EINVAL;
5179 }
5180
a1ce6473
BA
5181 /* Jumbo frame workaround on 82579 requires CRC be stripped */
5182 if ((adapter->hw.mac.type == e1000_pch2lan) &&
5183 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
5184 (new_mtu > ETH_DATA_LEN)) {
ef456f85 5185 e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n");
a1ce6473
BA
5186 return -EINVAL;
5187 }
5188
6f461f6c
BA
5189 /* 82573 Errata 17 */
5190 if (((adapter->hw.mac.type == e1000_82573) ||
5191 (adapter->hw.mac.type == e1000_82574)) &&
5192 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
5193 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
5194 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
5195 }
5196
bc7f75fa 5197 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
1bba4386 5198 usleep_range(1000, 2000);
610c9928 5199 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
318a94d6 5200 adapter->max_frame_size = max_frame;
610c9928
BA
5201 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5202 netdev->mtu = new_mtu;
bc7f75fa
AK
5203 if (netif_running(netdev))
5204 e1000e_down(adapter);
5205
ad68076e
BA
5206 /*
5207 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
bc7f75fa
AK
5208 * means we reserve 2 more, this pushes us to allocate from the next
5209 * larger slab size.
ad68076e 5210 * i.e. RXBUFFER_2048 --> size-4096 slab
97ac8cae
BA
5211 * However with the new *_jumbo_rx* routines, jumbo receives will use
5212 * fragmented skbs
ad68076e 5213 */
bc7f75fa 5214
9926146b 5215 if (max_frame <= 2048)
bc7f75fa
AK
5216 adapter->rx_buffer_len = 2048;
5217 else
5218 adapter->rx_buffer_len = 4096;
5219
5220 /* adjust allocation if LPE protects us, and we aren't using SBP */
5221 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
5222 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
5223 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
ad68076e 5224 + ETH_FCS_LEN;
bc7f75fa 5225
bc7f75fa
AK
5226 if (netif_running(netdev))
5227 e1000e_up(adapter);
5228 else
5229 e1000e_reset(adapter);
5230
5231 clear_bit(__E1000_RESETTING, &adapter->state);
5232
5233 return 0;
5234}
5235
5236static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
5237 int cmd)
5238{
5239 struct e1000_adapter *adapter = netdev_priv(netdev);
5240 struct mii_ioctl_data *data = if_mii(ifr);
bc7f75fa 5241
318a94d6 5242 if (adapter->hw.phy.media_type != e1000_media_type_copper)
bc7f75fa
AK
5243 return -EOPNOTSUPP;
5244
5245 switch (cmd) {
5246 case SIOCGMIIPHY:
5247 data->phy_id = adapter->hw.phy.addr;
5248 break;
5249 case SIOCGMIIREG:
b16a002e
BA
5250 e1000_phy_read_status(adapter);
5251
7c25769f
BA
5252 switch (data->reg_num & 0x1F) {
5253 case MII_BMCR:
5254 data->val_out = adapter->phy_regs.bmcr;
5255 break;
5256 case MII_BMSR:
5257 data->val_out = adapter->phy_regs.bmsr;
5258 break;
5259 case MII_PHYSID1:
5260 data->val_out = (adapter->hw.phy.id >> 16);
5261 break;
5262 case MII_PHYSID2:
5263 data->val_out = (adapter->hw.phy.id & 0xFFFF);
5264 break;
5265 case MII_ADVERTISE:
5266 data->val_out = adapter->phy_regs.advertise;
5267 break;
5268 case MII_LPA:
5269 data->val_out = adapter->phy_regs.lpa;
5270 break;
5271 case MII_EXPANSION:
5272 data->val_out = adapter->phy_regs.expansion;
5273 break;
5274 case MII_CTRL1000:
5275 data->val_out = adapter->phy_regs.ctrl1000;
5276 break;
5277 case MII_STAT1000:
5278 data->val_out = adapter->phy_regs.stat1000;
5279 break;
5280 case MII_ESTATUS:
5281 data->val_out = adapter->phy_regs.estatus;
5282 break;
5283 default:
bc7f75fa
AK
5284 return -EIO;
5285 }
bc7f75fa
AK
5286 break;
5287 case SIOCSMIIREG:
5288 default:
5289 return -EOPNOTSUPP;
5290 }
5291 return 0;
5292}
5293
5294static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5295{
5296 switch (cmd) {
5297 case SIOCGMIIPHY:
5298 case SIOCGMIIREG:
5299 case SIOCSMIIREG:
5300 return e1000_mii_ioctl(netdev, ifr, cmd);
5301 default:
5302 return -EOPNOTSUPP;
5303 }
5304}
5305
a4f58f54
BA
5306static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5307{
5308 struct e1000_hw *hw = &adapter->hw;
5309 u32 i, mac_reg;
2b6b168d 5310 u16 phy_reg, wuc_enable;
a4f58f54
BA
5311 int retval = 0;
5312
5313 /* copy MAC RARs to PHY RARs */
d3738bb8 5314 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
a4f58f54 5315
2b6b168d
BA
5316 retval = hw->phy.ops.acquire(hw);
5317 if (retval) {
5318 e_err("Could not acquire PHY\n");
5319 return retval;
5320 }
5321
5322 /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
5323 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5324 if (retval)
5325 goto out;
5326
5327 /* copy MAC MTA to PHY MTA - only needed for pchlan */
a4f58f54
BA
5328 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5329 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
2b6b168d
BA
5330 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
5331 (u16)(mac_reg & 0xFFFF));
5332 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
5333 (u16)((mac_reg >> 16) & 0xFFFF));
a4f58f54
BA
5334 }
5335
5336 /* configure PHY Rx Control register */
2b6b168d 5337 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
a4f58f54
BA
5338 mac_reg = er32(RCTL);
5339 if (mac_reg & E1000_RCTL_UPE)
5340 phy_reg |= BM_RCTL_UPE;
5341 if (mac_reg & E1000_RCTL_MPE)
5342 phy_reg |= BM_RCTL_MPE;
5343 phy_reg &= ~(BM_RCTL_MO_MASK);
5344 if (mac_reg & E1000_RCTL_MO_3)
5345 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5346 << BM_RCTL_MO_SHIFT);
5347 if (mac_reg & E1000_RCTL_BAM)
5348 phy_reg |= BM_RCTL_BAM;
5349 if (mac_reg & E1000_RCTL_PMCF)
5350 phy_reg |= BM_RCTL_PMCF;
5351 mac_reg = er32(CTRL);
5352 if (mac_reg & E1000_CTRL_RFCE)
5353 phy_reg |= BM_RCTL_RFCE;
2b6b168d 5354 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
a4f58f54
BA
5355
5356 /* enable PHY wakeup in MAC register */
5357 ew32(WUFC, wufc);
5358 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
5359
5360 /* configure and enable PHY wakeup in PHY registers */
2b6b168d
BA
5361 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
5362 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
a4f58f54
BA
5363
5364 /* activate PHY wakeup */
2b6b168d
BA
5365 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5366 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
a4f58f54
BA
5367 if (retval)
5368 e_err("Could not set PHY Host Wakeup bit\n");
5369out:
94d8186a 5370 hw->phy.ops.release(hw);
a4f58f54
BA
5371
5372 return retval;
5373}
5374
23606cf5
RW
5375static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5376 bool runtime)
bc7f75fa
AK
5377{
5378 struct net_device *netdev = pci_get_drvdata(pdev);
5379 struct e1000_adapter *adapter = netdev_priv(netdev);
5380 struct e1000_hw *hw = &adapter->hw;
5381 u32 ctrl, ctrl_ext, rctl, status;
23606cf5
RW
5382 /* Runtime suspend should only enable wakeup for link changes */
5383 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
bc7f75fa
AK
5384 int retval = 0;
5385
5386 netif_device_detach(netdev);
5387
5388 if (netif_running(netdev)) {
5389 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5390 e1000e_down(adapter);
5391 e1000_free_irq(adapter);
5392 }
4662e82b 5393 e1000e_reset_interrupt_capability(adapter);
bc7f75fa
AK
5394
5395 retval = pci_save_state(pdev);
5396 if (retval)
5397 return retval;
5398
5399 status = er32(STATUS);
5400 if (status & E1000_STATUS_LU)
5401 wufc &= ~E1000_WUFC_LNKC;
5402
5403 if (wufc) {
5404 e1000_setup_rctl(adapter);
ef9b965a 5405 e1000e_set_rx_mode(netdev);
bc7f75fa
AK
5406
5407 /* turn on all-multi mode if wake on multicast is enabled */
5408 if (wufc & E1000_WUFC_MC) {
5409 rctl = er32(RCTL);
5410 rctl |= E1000_RCTL_MPE;
5411 ew32(RCTL, rctl);
5412 }
5413
5414 ctrl = er32(CTRL);
5415 /* advertise wake from D3Cold */
5416 #define E1000_CTRL_ADVD3WUC 0x00100000
5417 /* phy power management enable */
5418 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
a4f58f54
BA
5419 ctrl |= E1000_CTRL_ADVD3WUC;
5420 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
5421 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
bc7f75fa
AK
5422 ew32(CTRL, ctrl);
5423
318a94d6
JK
5424 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
5425 adapter->hw.phy.media_type ==
5426 e1000_media_type_internal_serdes) {
bc7f75fa
AK
5427 /* keep the laser running in D3 */
5428 ctrl_ext = er32(CTRL_EXT);
93a23f48 5429 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
bc7f75fa
AK
5430 ew32(CTRL_EXT, ctrl_ext);
5431 }
5432
97ac8cae 5433 if (adapter->flags & FLAG_IS_ICH)
99730e4c 5434 e1000_suspend_workarounds_ich8lan(&adapter->hw);
97ac8cae 5435
bc7f75fa
AK
5436 /* Allow time for pending master requests to run */
5437 e1000e_disable_pcie_master(&adapter->hw);
5438
82776a4b 5439 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
a4f58f54
BA
5440 /* enable wakeup by the PHY */
5441 retval = e1000_init_phy_wakeup(adapter, wufc);
5442 if (retval)
5443 return retval;
5444 } else {
5445 /* enable wakeup by the MAC */
5446 ew32(WUFC, wufc);
5447 ew32(WUC, E1000_WUC_PME_EN);
5448 }
bc7f75fa
AK
5449 } else {
5450 ew32(WUC, 0);
5451 ew32(WUFC, 0);
bc7f75fa
AK
5452 }
5453
4f9de721
RW
5454 *enable_wake = !!wufc;
5455
bc7f75fa 5456 /* make sure adapter isn't asleep if manageability is enabled */
82776a4b
BA
5457 if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
5458 (hw->mac.ops.check_mng_mode(hw)))
4f9de721 5459 *enable_wake = true;
bc7f75fa
AK
5460
5461 if (adapter->hw.phy.type == e1000_phy_igp_3)
5462 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5463
ad68076e
BA
5464 /*
5465 * Release control of h/w to f/w. If f/w is AMT enabled, this
5466 * would have already happened in close and is redundant.
5467 */
31dbe5b4 5468 e1000e_release_hw_control(adapter);
bc7f75fa
AK
5469
5470 pci_disable_device(pdev);
5471
4f9de721
RW
5472 return 0;
5473}
5474
5475static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
5476{
5477 if (sleep && wake) {
5478 pci_prepare_to_sleep(pdev);
5479 return;
5480 }
5481
5482 pci_wake_from_d3(pdev, wake);
5483 pci_set_power_state(pdev, PCI_D3hot);
5484}
5485
5486static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
5487 bool wake)
5488{
5489 struct net_device *netdev = pci_get_drvdata(pdev);
5490 struct e1000_adapter *adapter = netdev_priv(netdev);
5491
005cbdfc
AD
5492 /*
5493 * The pci-e switch on some quad port adapters will report a
5494 * correctable error when the MAC transitions from D0 to D3. To
5495 * prevent this we need to mask off the correctable errors on the
5496 * downstream port of the pci-e switch.
5497 */
5498 if (adapter->flags & FLAG_IS_QUAD_PORT) {
5499 struct pci_dev *us_dev = pdev->bus->self;
353064de 5500 int pos = pci_pcie_cap(us_dev);
005cbdfc
AD
5501 u16 devctl;
5502
5503 pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
5504 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
5505 (devctl & ~PCI_EXP_DEVCTL_CERE));
5506
4f9de721 5507 e1000_power_off(pdev, sleep, wake);
005cbdfc
AD
5508
5509 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
5510 } else {
4f9de721 5511 e1000_power_off(pdev, sleep, wake);
005cbdfc 5512 }
bc7f75fa
AK
5513}
5514
6f461f6c
BA
5515#ifdef CONFIG_PCIEASPM
5516static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5517{
9f728f53 5518 pci_disable_link_state_locked(pdev, state);
6f461f6c
BA
5519}
5520#else
5521static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
1eae4eb2
AK
5522{
5523 int pos;
6f461f6c 5524 u16 reg16;
1eae4eb2
AK
5525
5526 /*
6f461f6c
BA
5527 * Both device and parent should have the same ASPM setting.
5528 * Disable ASPM in downstream component first and then upstream.
1eae4eb2 5529 */
6f461f6c
BA
5530 pos = pci_pcie_cap(pdev);
5531 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
5532 reg16 &= ~state;
5533 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
5534
0c75ba22
AB
5535 if (!pdev->bus->self)
5536 return;
5537
6f461f6c
BA
5538 pos = pci_pcie_cap(pdev->bus->self);
5539 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
5540 reg16 &= ~state;
5541 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
5542}
5543#endif
78cd29d5 5544static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6f461f6c
BA
5545{
5546 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
5547 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
5548 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
5549
5550 __e1000e_disable_aspm(pdev, state);
1eae4eb2
AK
5551}
5552
aa338601 5553#ifdef CONFIG_PM
23606cf5 5554static bool e1000e_pm_ready(struct e1000_adapter *adapter)
4f9de721 5555{
23606cf5 5556 return !!adapter->tx_ring->buffer_info;
4f9de721
RW
5557}
5558
23606cf5 5559static int __e1000_resume(struct pci_dev *pdev)
bc7f75fa
AK
5560{
5561 struct net_device *netdev = pci_get_drvdata(pdev);
5562 struct e1000_adapter *adapter = netdev_priv(netdev);
5563 struct e1000_hw *hw = &adapter->hw;
78cd29d5 5564 u16 aspm_disable_flag = 0;
bc7f75fa
AK
5565 u32 err;
5566
78cd29d5
BA
5567 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
5568 aspm_disable_flag = PCIE_LINK_STATE_L0S;
5569 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5570 aspm_disable_flag |= PCIE_LINK_STATE_L1;
5571 if (aspm_disable_flag)
5572 e1000e_disable_aspm(pdev, aspm_disable_flag);
5573
bc7f75fa
AK
5574 pci_set_power_state(pdev, PCI_D0);
5575 pci_restore_state(pdev);
28b8f04a 5576 pci_save_state(pdev);
6e4f6f6b 5577
4662e82b 5578 e1000e_set_interrupt_capability(adapter);
bc7f75fa
AK
5579 if (netif_running(netdev)) {
5580 err = e1000_request_irq(adapter);
5581 if (err)
5582 return err;
5583 }
5584
99730e4c
BA
5585 if (hw->mac.type == e1000_pch2lan)
5586 e1000_resume_workarounds_pchlan(&adapter->hw);
5587
bc7f75fa 5588 e1000e_power_up_phy(adapter);
a4f58f54
BA
5589
5590 /* report the system wakeup cause from S3/S4 */
5591 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5592 u16 phy_data;
5593
5594 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
5595 if (phy_data) {
5596 e_info("PHY Wakeup cause - %s\n",
5597 phy_data & E1000_WUS_EX ? "Unicast Packet" :
5598 phy_data & E1000_WUS_MC ? "Multicast Packet" :
5599 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
5600 phy_data & E1000_WUS_MAG ? "Magic Packet" :
ef456f85
JK
5601 phy_data & E1000_WUS_LNKC ?
5602 "Link Status Change" : "other");
a4f58f54
BA
5603 }
5604 e1e_wphy(&adapter->hw, BM_WUS, ~0);
5605 } else {
5606 u32 wus = er32(WUS);
5607 if (wus) {
5608 e_info("MAC Wakeup cause - %s\n",
5609 wus & E1000_WUS_EX ? "Unicast Packet" :
5610 wus & E1000_WUS_MC ? "Multicast Packet" :
5611 wus & E1000_WUS_BC ? "Broadcast Packet" :
5612 wus & E1000_WUS_MAG ? "Magic Packet" :
5613 wus & E1000_WUS_LNKC ? "Link Status Change" :
5614 "other");
5615 }
5616 ew32(WUS, ~0);
5617 }
5618
bc7f75fa 5619 e1000e_reset(adapter);
bc7f75fa 5620
cd791618 5621 e1000_init_manageability_pt(adapter);
bc7f75fa
AK
5622
5623 if (netif_running(netdev))
5624 e1000e_up(adapter);
5625
5626 netif_device_attach(netdev);
5627
ad68076e
BA
5628 /*
5629 * If the controller has AMT, do not set DRV_LOAD until the interface
bc7f75fa 5630 * is up. For all other cases, let the f/w know that the h/w is now
ad68076e
BA
5631 * under the control of the driver.
5632 */
c43bc57e 5633 if (!(adapter->flags & FLAG_HAS_AMT))
31dbe5b4 5634 e1000e_get_hw_control(adapter);
bc7f75fa
AK
5635
5636 return 0;
5637}
23606cf5 5638
a0340162
RW
5639#ifdef CONFIG_PM_SLEEP
5640static int e1000_suspend(struct device *dev)
5641{
5642 struct pci_dev *pdev = to_pci_dev(dev);
5643 int retval;
5644 bool wake;
5645
5646 retval = __e1000_shutdown(pdev, &wake, false);
5647 if (!retval)
5648 e1000_complete_shutdown(pdev, true, wake);
5649
5650 return retval;
5651}
5652
23606cf5
RW
5653static int e1000_resume(struct device *dev)
5654{
5655 struct pci_dev *pdev = to_pci_dev(dev);
5656 struct net_device *netdev = pci_get_drvdata(pdev);
5657 struct e1000_adapter *adapter = netdev_priv(netdev);
5658
5659 if (e1000e_pm_ready(adapter))
5660 adapter->idle_check = true;
5661
5662 return __e1000_resume(pdev);
5663}
a0340162
RW
5664#endif /* CONFIG_PM_SLEEP */
5665
5666#ifdef CONFIG_PM_RUNTIME
5667static int e1000_runtime_suspend(struct device *dev)
5668{
5669 struct pci_dev *pdev = to_pci_dev(dev);
5670 struct net_device *netdev = pci_get_drvdata(pdev);
5671 struct e1000_adapter *adapter = netdev_priv(netdev);
5672
5673 if (e1000e_pm_ready(adapter)) {
5674 bool wake;
5675
5676 __e1000_shutdown(pdev, &wake, true);
5677 }
5678
5679 return 0;
5680}
5681
5682static int e1000_idle(struct device *dev)
5683{
5684 struct pci_dev *pdev = to_pci_dev(dev);
5685 struct net_device *netdev = pci_get_drvdata(pdev);
5686 struct e1000_adapter *adapter = netdev_priv(netdev);
5687
5688 if (!e1000e_pm_ready(adapter))
5689 return 0;
5690
5691 if (adapter->idle_check) {
5692 adapter->idle_check = false;
5693 if (!e1000e_has_link(adapter))
5694 pm_schedule_suspend(dev, MSEC_PER_SEC);
5695 }
5696
5697 return -EBUSY;
5698}
23606cf5
RW
5699
5700static int e1000_runtime_resume(struct device *dev)
5701{
5702 struct pci_dev *pdev = to_pci_dev(dev);
5703 struct net_device *netdev = pci_get_drvdata(pdev);
5704 struct e1000_adapter *adapter = netdev_priv(netdev);
5705
5706 if (!e1000e_pm_ready(adapter))
5707 return 0;
5708
5709 adapter->idle_check = !dev->power.runtime_auto;
5710 return __e1000_resume(pdev);
5711}
a0340162 5712#endif /* CONFIG_PM_RUNTIME */
aa338601 5713#endif /* CONFIG_PM */
bc7f75fa
AK
5714
5715static void e1000_shutdown(struct pci_dev *pdev)
5716{
4f9de721
RW
5717 bool wake = false;
5718
23606cf5 5719 __e1000_shutdown(pdev, &wake, false);
4f9de721
RW
5720
5721 if (system_state == SYSTEM_POWER_OFF)
5722 e1000_complete_shutdown(pdev, false, wake);
bc7f75fa
AK
5723}
5724
5725#ifdef CONFIG_NET_POLL_CONTROLLER
147b2c8c
DD
5726
5727static irqreturn_t e1000_intr_msix(int irq, void *data)
5728{
5729 struct net_device *netdev = data;
5730 struct e1000_adapter *adapter = netdev_priv(netdev);
147b2c8c
DD
5731
5732 if (adapter->msix_entries) {
90da0669
BA
5733 int vector, msix_irq;
5734
147b2c8c
DD
5735 vector = 0;
5736 msix_irq = adapter->msix_entries[vector].vector;
5737 disable_irq(msix_irq);
5738 e1000_intr_msix_rx(msix_irq, netdev);
5739 enable_irq(msix_irq);
5740
5741 vector++;
5742 msix_irq = adapter->msix_entries[vector].vector;
5743 disable_irq(msix_irq);
5744 e1000_intr_msix_tx(msix_irq, netdev);
5745 enable_irq(msix_irq);
5746
5747 vector++;
5748 msix_irq = adapter->msix_entries[vector].vector;
5749 disable_irq(msix_irq);
5750 e1000_msix_other(msix_irq, netdev);
5751 enable_irq(msix_irq);
5752 }
5753
5754 return IRQ_HANDLED;
5755}
5756
bc7f75fa
AK
5757/*
5758 * Polling 'interrupt' - used by things like netconsole to send skbs
5759 * without having to re-enable interrupts. It's not called while
5760 * the interrupt routine is executing.
5761 */
5762static void e1000_netpoll(struct net_device *netdev)
5763{
5764 struct e1000_adapter *adapter = netdev_priv(netdev);
5765
147b2c8c
DD
5766 switch (adapter->int_mode) {
5767 case E1000E_INT_MODE_MSIX:
5768 e1000_intr_msix(adapter->pdev->irq, netdev);
5769 break;
5770 case E1000E_INT_MODE_MSI:
5771 disable_irq(adapter->pdev->irq);
5772 e1000_intr_msi(adapter->pdev->irq, netdev);
5773 enable_irq(adapter->pdev->irq);
5774 break;
5775 default: /* E1000E_INT_MODE_LEGACY */
5776 disable_irq(adapter->pdev->irq);
5777 e1000_intr(adapter->pdev->irq, netdev);
5778 enable_irq(adapter->pdev->irq);
5779 break;
5780 }
bc7f75fa
AK
5781}
5782#endif
5783
5784/**
5785 * e1000_io_error_detected - called when PCI error is detected
5786 * @pdev: Pointer to PCI device
5787 * @state: The current pci connection state
5788 *
5789 * This function is called after a PCI bus error affecting
5790 * this device has been detected.
5791 */
5792static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5793 pci_channel_state_t state)
5794{
5795 struct net_device *netdev = pci_get_drvdata(pdev);
5796 struct e1000_adapter *adapter = netdev_priv(netdev);
5797
5798 netif_device_detach(netdev);
5799
c93b5a76
MM
5800 if (state == pci_channel_io_perm_failure)
5801 return PCI_ERS_RESULT_DISCONNECT;
5802
bc7f75fa
AK
5803 if (netif_running(netdev))
5804 e1000e_down(adapter);
5805 pci_disable_device(pdev);
5806
5807 /* Request a slot slot reset. */
5808 return PCI_ERS_RESULT_NEED_RESET;
5809}
5810
5811/**
5812 * e1000_io_slot_reset - called after the pci bus has been reset.
5813 * @pdev: Pointer to PCI device
5814 *
5815 * Restart the card from scratch, as if from a cold-boot. Implementation
5816 * resembles the first-half of the e1000_resume routine.
5817 */
5818static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5819{
5820 struct net_device *netdev = pci_get_drvdata(pdev);
5821 struct e1000_adapter *adapter = netdev_priv(netdev);
5822 struct e1000_hw *hw = &adapter->hw;
78cd29d5 5823 u16 aspm_disable_flag = 0;
6e4f6f6b 5824 int err;
111b9dc5 5825 pci_ers_result_t result;
bc7f75fa 5826
78cd29d5
BA
5827 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
5828 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6f461f6c 5829 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
78cd29d5
BA
5830 aspm_disable_flag |= PCIE_LINK_STATE_L1;
5831 if (aspm_disable_flag)
5832 e1000e_disable_aspm(pdev, aspm_disable_flag);
5833
f0f422e5 5834 err = pci_enable_device_mem(pdev);
6e4f6f6b 5835 if (err) {
bc7f75fa
AK
5836 dev_err(&pdev->dev,
5837 "Cannot re-enable PCI device after reset.\n");
111b9dc5
JB
5838 result = PCI_ERS_RESULT_DISCONNECT;
5839 } else {
5840 pci_set_master(pdev);
23606cf5 5841 pdev->state_saved = true;
111b9dc5 5842 pci_restore_state(pdev);
bc7f75fa 5843
111b9dc5
JB
5844 pci_enable_wake(pdev, PCI_D3hot, 0);
5845 pci_enable_wake(pdev, PCI_D3cold, 0);
bc7f75fa 5846
111b9dc5
JB
5847 e1000e_reset(adapter);
5848 ew32(WUS, ~0);
5849 result = PCI_ERS_RESULT_RECOVERED;
5850 }
bc7f75fa 5851
111b9dc5
JB
5852 pci_cleanup_aer_uncorrect_error_status(pdev);
5853
5854 return result;
bc7f75fa
AK
5855}
5856
5857/**
5858 * e1000_io_resume - called when traffic can start flowing again.
5859 * @pdev: Pointer to PCI device
5860 *
5861 * This callback is called when the error recovery driver tells us that
5862 * its OK to resume normal operation. Implementation resembles the
5863 * second-half of the e1000_resume routine.
5864 */
5865static void e1000_io_resume(struct pci_dev *pdev)
5866{
5867 struct net_device *netdev = pci_get_drvdata(pdev);
5868 struct e1000_adapter *adapter = netdev_priv(netdev);
5869
cd791618 5870 e1000_init_manageability_pt(adapter);
bc7f75fa
AK
5871
5872 if (netif_running(netdev)) {
5873 if (e1000e_up(adapter)) {
5874 dev_err(&pdev->dev,
5875 "can't bring device back up after reset\n");
5876 return;
5877 }
5878 }
5879
5880 netif_device_attach(netdev);
5881
ad68076e
BA
5882 /*
5883 * If the controller has AMT, do not set DRV_LOAD until the interface
bc7f75fa 5884 * is up. For all other cases, let the f/w know that the h/w is now
ad68076e
BA
5885 * under the control of the driver.
5886 */
c43bc57e 5887 if (!(adapter->flags & FLAG_HAS_AMT))
31dbe5b4 5888 e1000e_get_hw_control(adapter);
bc7f75fa
AK
5889
5890}
5891
5892static void e1000_print_device_info(struct e1000_adapter *adapter)
5893{
5894 struct e1000_hw *hw = &adapter->hw;
5895 struct net_device *netdev = adapter->netdev;
073287c0
BA
5896 u32 ret_val;
5897 u8 pba_str[E1000_PBANUM_LENGTH];
bc7f75fa
AK
5898
5899 /* print bus type/speed/width info */
a5cc7642 5900 e_info("(PCI Express:2.5GT/s:%s) %pM\n",
44defeb3
JK
5901 /* bus width */
5902 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
5903 "Width x1"),
5904 /* MAC address */
7c510e4b 5905 netdev->dev_addr);
44defeb3
JK
5906 e_info("Intel(R) PRO/%s Network Connection\n",
5907 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
073287c0
BA
5908 ret_val = e1000_read_pba_string_generic(hw, pba_str,
5909 E1000_PBANUM_LENGTH);
5910 if (ret_val)
e0dc4f12 5911 strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
073287c0
BA
5912 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
5913 hw->mac.type, hw->phy.type, pba_str);
bc7f75fa
AK
5914}
5915
10aa4c04
AK
5916static void e1000_eeprom_checks(struct e1000_adapter *adapter)
5917{
5918 struct e1000_hw *hw = &adapter->hw;
5919 int ret_val;
5920 u16 buf = 0;
5921
5922 if (hw->mac.type != e1000_82573)
5923 return;
5924
5925 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
e243455d 5926 if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
10aa4c04 5927 /* Deep Smart Power Down (DSPD) */
6c2a9efa
FP
5928 dev_warn(&adapter->pdev->dev,
5929 "Warning: detected DSPD enabled in EEPROM\n");
10aa4c04 5930 }
10aa4c04
AK
5931}
5932
c8f44aff
MM
5933static int e1000_set_features(struct net_device *netdev,
5934 netdev_features_t features)
dc221294
BA
5935{
5936 struct e1000_adapter *adapter = netdev_priv(netdev);
c8f44aff 5937 netdev_features_t changed = features ^ netdev->features;
dc221294
BA
5938
5939 if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
5940 adapter->flags |= FLAG_TSO_FORCE;
5941
5942 if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX |
5943 NETIF_F_RXCSUM)))
5944 return 0;
5945
5946 if (netif_running(netdev))
5947 e1000e_reinit_locked(adapter);
5948 else
5949 e1000e_reset(adapter);
5950
5951 return 0;
5952}
5953
651c2466
SH
5954static const struct net_device_ops e1000e_netdev_ops = {
5955 .ndo_open = e1000_open,
5956 .ndo_stop = e1000_close,
00829823 5957 .ndo_start_xmit = e1000_xmit_frame,
67fd4fcb 5958 .ndo_get_stats64 = e1000e_get_stats64,
ef9b965a 5959 .ndo_set_rx_mode = e1000e_set_rx_mode,
651c2466
SH
5960 .ndo_set_mac_address = e1000_set_mac,
5961 .ndo_change_mtu = e1000_change_mtu,
5962 .ndo_do_ioctl = e1000_ioctl,
5963 .ndo_tx_timeout = e1000_tx_timeout,
5964 .ndo_validate_addr = eth_validate_addr,
5965
651c2466
SH
5966 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
5967 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
5968#ifdef CONFIG_NET_POLL_CONTROLLER
5969 .ndo_poll_controller = e1000_netpoll,
5970#endif
dc221294 5971 .ndo_set_features = e1000_set_features,
651c2466
SH
5972};
5973
bc7f75fa
AK
5974/**
5975 * e1000_probe - Device Initialization Routine
5976 * @pdev: PCI device information struct
5977 * @ent: entry in e1000_pci_tbl
5978 *
5979 * Returns 0 on success, negative on failure
5980 *
5981 * e1000_probe initializes an adapter identified by a pci_dev structure.
5982 * The OS initialization, configuring of the adapter private structure,
5983 * and a hardware reset occur.
5984 **/
5985static int __devinit e1000_probe(struct pci_dev *pdev,
5986 const struct pci_device_id *ent)
5987{
5988 struct net_device *netdev;
5989 struct e1000_adapter *adapter;
5990 struct e1000_hw *hw;
5991 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
f47e81fc
BB
5992 resource_size_t mmio_start, mmio_len;
5993 resource_size_t flash_start, flash_len;
bc7f75fa
AK
5994
5995 static int cards_found;
78cd29d5 5996 u16 aspm_disable_flag = 0;
bc7f75fa
AK
5997 int i, err, pci_using_dac;
5998 u16 eeprom_data = 0;
5999 u16 eeprom_apme_mask = E1000_EEPROM_APME;
6000
78cd29d5
BA
6001 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
6002 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6f461f6c 6003 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
78cd29d5
BA
6004 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6005 if (aspm_disable_flag)
6006 e1000e_disable_aspm(pdev, aspm_disable_flag);
6e4f6f6b 6007
f0f422e5 6008 err = pci_enable_device_mem(pdev);
bc7f75fa
AK
6009 if (err)
6010 return err;
6011
6012 pci_using_dac = 0;
0be3f55f 6013 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
bc7f75fa 6014 if (!err) {
0be3f55f 6015 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
bc7f75fa
AK
6016 if (!err)
6017 pci_using_dac = 1;
6018 } else {
0be3f55f 6019 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
bc7f75fa 6020 if (err) {
0be3f55f
NN
6021 err = dma_set_coherent_mask(&pdev->dev,
6022 DMA_BIT_MASK(32));
bc7f75fa 6023 if (err) {
ef456f85 6024 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
bc7f75fa
AK
6025 goto err_dma;
6026 }
6027 }
6028 }
6029
e8de1481 6030 err = pci_request_selected_regions_exclusive(pdev,
f0f422e5
BA
6031 pci_select_bars(pdev, IORESOURCE_MEM),
6032 e1000e_driver_name);
bc7f75fa
AK
6033 if (err)
6034 goto err_pci_reg;
6035
68eac460 6036 /* AER (Advanced Error Reporting) hooks */
19d5afd4 6037 pci_enable_pcie_error_reporting(pdev);
68eac460 6038
bc7f75fa 6039 pci_set_master(pdev);
438b365a
BA
6040 /* PCI config space info */
6041 err = pci_save_state(pdev);
6042 if (err)
6043 goto err_alloc_etherdev;
bc7f75fa
AK
6044
6045 err = -ENOMEM;
6046 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
6047 if (!netdev)
6048 goto err_alloc_etherdev;
6049
bc7f75fa
AK
6050 SET_NETDEV_DEV(netdev, &pdev->dev);
6051
f85e4dfa
TH
6052 netdev->irq = pdev->irq;
6053
bc7f75fa
AK
6054 pci_set_drvdata(pdev, netdev);
6055 adapter = netdev_priv(netdev);
6056 hw = &adapter->hw;
6057 adapter->netdev = netdev;
6058 adapter->pdev = pdev;
6059 adapter->ei = ei;
6060 adapter->pba = ei->pba;
6061 adapter->flags = ei->flags;
eb7c3adb 6062 adapter->flags2 = ei->flags2;
bc7f75fa
AK
6063 adapter->hw.adapter = adapter;
6064 adapter->hw.mac.type = ei->mac;
2adc55c9 6065 adapter->max_hw_frame_size = ei->max_hw_frame_size;
bc7f75fa
AK
6066 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
6067
6068 mmio_start = pci_resource_start(pdev, 0);
6069 mmio_len = pci_resource_len(pdev, 0);
6070
6071 err = -EIO;
6072 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
6073 if (!adapter->hw.hw_addr)
6074 goto err_ioremap;
6075
6076 if ((adapter->flags & FLAG_HAS_FLASH) &&
6077 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
6078 flash_start = pci_resource_start(pdev, 1);
6079 flash_len = pci_resource_len(pdev, 1);
6080 adapter->hw.flash_address = ioremap(flash_start, flash_len);
6081 if (!adapter->hw.flash_address)
6082 goto err_flashmap;
6083 }
6084
6085 /* construct the net_device struct */
651c2466 6086 netdev->netdev_ops = &e1000e_netdev_ops;
bc7f75fa 6087 e1000e_set_ethtool_ops(netdev);
bc7f75fa
AK
6088 netdev->watchdog_timeo = 5 * HZ;
6089 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
bc7f75fa
AK
6090 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
6091
6092 netdev->mem_start = mmio_start;
6093 netdev->mem_end = mmio_start + mmio_len;
6094
6095 adapter->bd_number = cards_found++;
6096
4662e82b
BA
6097 e1000e_check_options(adapter);
6098
bc7f75fa
AK
6099 /* setup adapter struct */
6100 err = e1000_sw_init(adapter);
6101 if (err)
6102 goto err_sw_init;
6103
bc7f75fa
AK
6104 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
6105 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
6106 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6107
69e3fd8c 6108 err = ei->get_variants(adapter);
bc7f75fa
AK
6109 if (err)
6110 goto err_hw_init;
6111
4a770358
BA
6112 if ((adapter->flags & FLAG_IS_ICH) &&
6113 (adapter->flags & FLAG_READ_ONLY_NVM))
6114 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
6115
bc7f75fa
AK
6116 hw->mac.ops.get_bus_info(&adapter->hw);
6117
318a94d6 6118 adapter->hw.phy.autoneg_wait_to_complete = 0;
bc7f75fa
AK
6119
6120 /* Copper options */
318a94d6 6121 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
bc7f75fa
AK
6122 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6123 adapter->hw.phy.disable_polarity_correction = 0;
6124 adapter->hw.phy.ms_type = e1000_ms_hw_default;
6125 }
6126
6127 if (e1000_check_reset_block(&adapter->hw))
44defeb3 6128 e_info("PHY reset is blocked due to SOL/IDER session.\n");
bc7f75fa 6129
dc221294
BA
6130 /* Set initial default active device features */
6131 netdev->features = (NETIF_F_SG |
6132 NETIF_F_HW_VLAN_RX |
6133 NETIF_F_HW_VLAN_TX |
6134 NETIF_F_TSO |
6135 NETIF_F_TSO6 |
6136 NETIF_F_RXCSUM |
6137 NETIF_F_HW_CSUM);
6138
6139 /* Set user-changeable features (subset of all device features) */
6140 netdev->hw_features = netdev->features;
bc7f75fa
AK
6141
6142 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
6143 netdev->features |= NETIF_F_HW_VLAN_FILTER;
6144
dc221294
BA
6145 netdev->vlan_features |= (NETIF_F_SG |
6146 NETIF_F_TSO |
6147 NETIF_F_TSO6 |
6148 NETIF_F_HW_CSUM);
a5136e23 6149
ef9b965a
JB
6150 netdev->priv_flags |= IFF_UNICAST_FLT;
6151
7b872a55 6152 if (pci_using_dac) {
bc7f75fa 6153 netdev->features |= NETIF_F_HIGHDMA;
7b872a55
YZ
6154 netdev->vlan_features |= NETIF_F_HIGHDMA;
6155 }
bc7f75fa 6156
bc7f75fa
AK
6157 if (e1000e_enable_mng_pass_thru(&adapter->hw))
6158 adapter->flags |= FLAG_MNG_PT_ENABLED;
6159
ad68076e
BA
6160 /*
6161 * before reading the NVM, reset the controller to
6162 * put the device in a known good starting state
6163 */
bc7f75fa
AK
6164 adapter->hw.mac.ops.reset_hw(&adapter->hw);
6165
6166 /*
6167 * systems with ASPM and others may see the checksum fail on the first
6168 * attempt. Let's give it a few tries
6169 */
6170 for (i = 0;; i++) {
6171 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
6172 break;
6173 if (i == 2) {
44defeb3 6174 e_err("The NVM Checksum Is Not Valid\n");
bc7f75fa
AK
6175 err = -EIO;
6176 goto err_eeprom;
6177 }
6178 }
6179
10aa4c04
AK
6180 e1000_eeprom_checks(adapter);
6181
608f8a0d 6182 /* copy the MAC address */
bc7f75fa 6183 if (e1000e_read_mac_addr(&adapter->hw))
44defeb3 6184 e_err("NVM Read Error while reading MAC address\n");
bc7f75fa
AK
6185
6186 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
6187 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
6188
6189 if (!is_valid_ether_addr(netdev->perm_addr)) {
7c510e4b 6190 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
bc7f75fa
AK
6191 err = -EIO;
6192 goto err_eeprom;
6193 }
6194
6195 init_timer(&adapter->watchdog_timer);
c061b18d 6196 adapter->watchdog_timer.function = e1000_watchdog;
bc7f75fa
AK
6197 adapter->watchdog_timer.data = (unsigned long) adapter;
6198
6199 init_timer(&adapter->phy_info_timer);
c061b18d 6200 adapter->phy_info_timer.function = e1000_update_phy_info;
bc7f75fa
AK
6201 adapter->phy_info_timer.data = (unsigned long) adapter;
6202
6203 INIT_WORK(&adapter->reset_task, e1000_reset_task);
6204 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
a8f88ff5
JB
6205 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
6206 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
41cec6f1 6207 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
bc7f75fa 6208
bc7f75fa
AK
6209 /* Initialize link parameters. User can change them with ethtool */
6210 adapter->hw.mac.autoneg = 1;
3db1cd5c 6211 adapter->fc_autoneg = true;
5c48ef3e
BA
6212 adapter->hw.fc.requested_mode = e1000_fc_default;
6213 adapter->hw.fc.current_mode = e1000_fc_default;
bc7f75fa
AK
6214 adapter->hw.phy.autoneg_advertised = 0x2f;
6215
6216 /* ring size defaults */
6217 adapter->rx_ring->count = 256;
6218 adapter->tx_ring->count = 256;
6219
6220 /*
6221 * Initial Wake on LAN setting - If APM wake is enabled in
6222 * the EEPROM, enable the ACPI Magic Packet filter
6223 */
6224 if (adapter->flags & FLAG_APME_IN_WUC) {
6225 /* APME bit in EEPROM is mapped to WUC.APME */
6226 eeprom_data = er32(WUC);
6227 eeprom_apme_mask = E1000_WUC_APME;
4def99bb
BA
6228 if ((hw->mac.type > e1000_ich10lan) &&
6229 (eeprom_data & E1000_WUC_PHY_WAKE))
a4f58f54 6230 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
bc7f75fa
AK
6231 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
6232 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
6233 (adapter->hw.bus.func == 1))
6234 e1000_read_nvm(&adapter->hw,
6235 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
6236 else
6237 e1000_read_nvm(&adapter->hw,
6238 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
6239 }
6240
6241 /* fetch WoL from EEPROM */
6242 if (eeprom_data & eeprom_apme_mask)
6243 adapter->eeprom_wol |= E1000_WUFC_MAG;
6244
6245 /*
6246 * now that we have the eeprom settings, apply the special cases
6247 * where the eeprom may be wrong or the board simply won't support
6248 * wake on lan on a particular port
6249 */
6250 if (!(adapter->flags & FLAG_HAS_WOL))
6251 adapter->eeprom_wol = 0;
6252
6253 /* initialize the wol settings based on the eeprom settings */
6254 adapter->wol = adapter->eeprom_wol;
6ff68026 6255 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
bc7f75fa 6256
84527590
BA
6257 /* save off EEPROM version number */
6258 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
6259
bc7f75fa
AK
6260 /* reset the hardware with the new settings */
6261 e1000e_reset(adapter);
6262
ad68076e
BA
6263 /*
6264 * If the controller has AMT, do not set DRV_LOAD until the interface
bc7f75fa 6265 * is up. For all other cases, let the f/w know that the h/w is now
ad68076e
BA
6266 * under the control of the driver.
6267 */
c43bc57e 6268 if (!(adapter->flags & FLAG_HAS_AMT))
31dbe5b4 6269 e1000e_get_hw_control(adapter);
bc7f75fa 6270
e0dc4f12 6271 strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
bc7f75fa
AK
6272 err = register_netdev(netdev);
6273 if (err)
6274 goto err_register;
6275
9c563d20
JB
6276 /* carrier off reporting is important to ethtool even BEFORE open */
6277 netif_carrier_off(netdev);
6278
bc7f75fa
AK
6279 e1000_print_device_info(adapter);
6280
f3ec4f87
AS
6281 if (pci_dev_run_wake(pdev))
6282 pm_runtime_put_noidle(&pdev->dev);
23606cf5 6283
bc7f75fa
AK
6284 return 0;
6285
6286err_register:
c43bc57e 6287 if (!(adapter->flags & FLAG_HAS_AMT))
31dbe5b4 6288 e1000e_release_hw_control(adapter);
bc7f75fa
AK
6289err_eeprom:
6290 if (!e1000_check_reset_block(&adapter->hw))
6291 e1000_phy_hw_reset(&adapter->hw);
c43bc57e 6292err_hw_init:
bc7f75fa
AK
6293 kfree(adapter->tx_ring);
6294 kfree(adapter->rx_ring);
6295err_sw_init:
c43bc57e
JB
6296 if (adapter->hw.flash_address)
6297 iounmap(adapter->hw.flash_address);
e82f54ba 6298 e1000e_reset_interrupt_capability(adapter);
c43bc57e 6299err_flashmap:
bc7f75fa
AK
6300 iounmap(adapter->hw.hw_addr);
6301err_ioremap:
6302 free_netdev(netdev);
6303err_alloc_etherdev:
f0f422e5
BA
6304 pci_release_selected_regions(pdev,
6305 pci_select_bars(pdev, IORESOURCE_MEM));
bc7f75fa
AK
6306err_pci_reg:
6307err_dma:
6308 pci_disable_device(pdev);
6309 return err;
6310}
6311
6312/**
6313 * e1000_remove - Device Removal Routine
6314 * @pdev: PCI device information struct
6315 *
6316 * e1000_remove is called by the PCI subsystem to alert the driver
6317 * that it should release a PCI device. The could be caused by a
6318 * Hot-Plug event, or because the driver is going to be removed from
6319 * memory.
6320 **/
6321static void __devexit e1000_remove(struct pci_dev *pdev)
6322{
6323 struct net_device *netdev = pci_get_drvdata(pdev);
6324 struct e1000_adapter *adapter = netdev_priv(netdev);
23606cf5
RW
6325 bool down = test_bit(__E1000_DOWN, &adapter->state);
6326
ad68076e 6327 /*
23f333a2
TH
6328 * The timers may be rescheduled, so explicitly disable them
6329 * from being rescheduled.
ad68076e 6330 */
23606cf5
RW
6331 if (!down)
6332 set_bit(__E1000_DOWN, &adapter->state);
bc7f75fa
AK
6333 del_timer_sync(&adapter->watchdog_timer);
6334 del_timer_sync(&adapter->phy_info_timer);
6335
41cec6f1
BA
6336 cancel_work_sync(&adapter->reset_task);
6337 cancel_work_sync(&adapter->watchdog_task);
6338 cancel_work_sync(&adapter->downshift_task);
6339 cancel_work_sync(&adapter->update_phy_task);
6340 cancel_work_sync(&adapter->print_hang_task);
bc7f75fa 6341
17f208de
BA
6342 if (!(netdev->flags & IFF_UP))
6343 e1000_power_down_phy(adapter);
6344
23606cf5
RW
6345 /* Don't lie to e1000_close() down the road. */
6346 if (!down)
6347 clear_bit(__E1000_DOWN, &adapter->state);
17f208de
BA
6348 unregister_netdev(netdev);
6349
f3ec4f87
AS
6350 if (pci_dev_run_wake(pdev))
6351 pm_runtime_get_noresume(&pdev->dev);
23606cf5 6352
ad68076e
BA
6353 /*
6354 * Release control of h/w to f/w. If f/w is AMT enabled, this
6355 * would have already happened in close and is redundant.
6356 */
31dbe5b4 6357 e1000e_release_hw_control(adapter);
bc7f75fa 6358
4662e82b 6359 e1000e_reset_interrupt_capability(adapter);
bc7f75fa
AK
6360 kfree(adapter->tx_ring);
6361 kfree(adapter->rx_ring);
6362
6363 iounmap(adapter->hw.hw_addr);
6364 if (adapter->hw.flash_address)
6365 iounmap(adapter->hw.flash_address);
f0f422e5
BA
6366 pci_release_selected_regions(pdev,
6367 pci_select_bars(pdev, IORESOURCE_MEM));
bc7f75fa
AK
6368
6369 free_netdev(netdev);
6370
111b9dc5 6371 /* AER disable */
19d5afd4 6372 pci_disable_pcie_error_reporting(pdev);
111b9dc5 6373
bc7f75fa
AK
6374 pci_disable_device(pdev);
6375}
6376
6377/* PCI Error Recovery (ERS) */
6378static struct pci_error_handlers e1000_err_handler = {
6379 .error_detected = e1000_io_error_detected,
6380 .slot_reset = e1000_io_slot_reset,
6381 .resume = e1000_io_resume,
6382};
6383
a3aa1884 6384static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
bc7f75fa
AK
6385 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
6386 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
6387 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
6388 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
6389 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
6390 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
040babf9
AK
6391 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
6392 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
6393 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
ad68076e 6394
bc7f75fa
AK
6395 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
6396 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
6397 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
6398 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
ad68076e 6399
bc7f75fa
AK
6400 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
6401 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
6402 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
ad68076e 6403
4662e82b 6404 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
bef28b11 6405 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
8c81c9c3 6406 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
4662e82b 6407
bc7f75fa
AK
6408 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
6409 board_80003es2lan },
6410 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
6411 board_80003es2lan },
6412 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
6413 board_80003es2lan },
6414 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
6415 board_80003es2lan },
ad68076e 6416
bc7f75fa
AK
6417 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
6418 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
6419 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
6420 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
6421 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
6422 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
6423 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
9e135a2e 6424 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
ad68076e 6425
bc7f75fa
AK
6426 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
6427 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
6428 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
6429 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
6430 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
2f15f9d6 6431 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
97ac8cae
BA
6432 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
6433 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
6434 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
6435
6436 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
6437 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
6438 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
bc7f75fa 6439
f4187b56
BA
6440 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
6441 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
10df0b91 6442 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
f4187b56 6443
a4f58f54
BA
6444 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
6445 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
6446 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
6447 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
6448
d3738bb8
BA
6449 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
6450 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
6451
bc7f75fa
AK
6452 { } /* terminate list */
6453};
6454MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
6455
aa338601 6456#ifdef CONFIG_PM
23606cf5 6457static const struct dev_pm_ops e1000_pm_ops = {
a0340162
RW
6458 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
6459 SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
6460 e1000_runtime_resume, e1000_idle)
23606cf5 6461};
e50208a0 6462#endif
23606cf5 6463
bc7f75fa
AK
6464/* PCI Device API Driver */
6465static struct pci_driver e1000_driver = {
6466 .name = e1000e_driver_name,
6467 .id_table = e1000_pci_tbl,
6468 .probe = e1000_probe,
6469 .remove = __devexit_p(e1000_remove),
aa338601 6470#ifdef CONFIG_PM
23606cf5 6471 .driver.pm = &e1000_pm_ops,
bc7f75fa
AK
6472#endif
6473 .shutdown = e1000_shutdown,
6474 .err_handler = &e1000_err_handler
6475};
6476
6477/**
6478 * e1000_init_module - Driver Registration Routine
6479 *
6480 * e1000_init_module is the first routine called when the driver is
6481 * loaded. All it does is register with the PCI subsystem.
6482 **/
6483static int __init e1000_init_module(void)
6484{
6485 int ret;
8544b9f7
BA
6486 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
6487 e1000e_driver_version);
0d6057e4 6488 pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
bc7f75fa 6489 ret = pci_register_driver(&e1000_driver);
53ec5498 6490
bc7f75fa
AK
6491 return ret;
6492}
6493module_init(e1000_init_module);
6494
6495/**
6496 * e1000_exit_module - Driver Exit Cleanup Routine
6497 *
6498 * e1000_exit_module is called just before the driver is removed
6499 * from memory.
6500 **/
6501static void __exit e1000_exit_module(void)
6502{
6503 pci_unregister_driver(&e1000_driver);
6504}
6505module_exit(e1000_exit_module);
6506
6507
6508MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
6509MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
6510MODULE_LICENSE("GPL");
6511MODULE_VERSION(DRV_VERSION);
6512
6513/* e1000_main.c */
This page took 1.122853 seconds and 5 git commands to generate.