e1000e: Fix PBA calculation for jumbo frame packets
[deliverable/linux.git] / drivers / net / e1000e / netdev.c
CommitLineData
bc7f75fa
AK
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include <linux/module.h>
30#include <linux/types.h>
31#include <linux/init.h>
32#include <linux/pci.h>
33#include <linux/vmalloc.h>
34#include <linux/pagemap.h>
35#include <linux/delay.h>
36#include <linux/netdevice.h>
37#include <linux/tcp.h>
38#include <linux/ipv6.h>
39#include <net/checksum.h>
40#include <net/ip6_checksum.h>
41#include <linux/mii.h>
42#include <linux/ethtool.h>
43#include <linux/if_vlan.h>
44#include <linux/cpu.h>
45#include <linux/smp.h>
46
47#include "e1000.h"
48
49#define DRV_VERSION "0.2.0"
50char e1000e_driver_name[] = "e1000e";
51const char e1000e_driver_version[] = DRV_VERSION;
52
53static const struct e1000_info *e1000_info_tbl[] = {
54 [board_82571] = &e1000_82571_info,
55 [board_82572] = &e1000_82572_info,
56 [board_82573] = &e1000_82573_info,
57 [board_80003es2lan] = &e1000_es2_info,
58 [board_ich8lan] = &e1000_ich8_info,
59 [board_ich9lan] = &e1000_ich9_info,
60};
61
62#ifdef DEBUG
63/**
64 * e1000_get_hw_dev_name - return device name string
65 * used by hardware layer to print debugging information
66 **/
67char *e1000e_get_hw_dev_name(struct e1000_hw *hw)
68{
589c085f 69 return hw->adapter->netdev->name;
bc7f75fa
AK
70}
71#endif
72
73/**
74 * e1000_desc_unused - calculate if we have unused descriptors
75 **/
76static int e1000_desc_unused(struct e1000_ring *ring)
77{
78 if (ring->next_to_clean > ring->next_to_use)
79 return ring->next_to_clean - ring->next_to_use - 1;
80
81 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
82}
83
84/**
85 * e1000_receive_skb - helper function to handle rx indications
86 * @adapter: board private structure
87 * @status: descriptor status field as written by hardware
88 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
89 * @skb: pointer to sk_buff to be indicated to stack
90 **/
91static void e1000_receive_skb(struct e1000_adapter *adapter,
92 struct net_device *netdev,
93 struct sk_buff *skb,
94 u8 status, u16 vlan)
95{
96 skb->protocol = eth_type_trans(skb, netdev);
97
98 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
99 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
100 le16_to_cpu(vlan) &
101 E1000_RXD_SPC_VLAN_MASK);
102 else
103 netif_receive_skb(skb);
104
105 netdev->last_rx = jiffies;
106}
107
108/**
109 * e1000_rx_checksum - Receive Checksum Offload for 82543
110 * @adapter: board private structure
111 * @status_err: receive descriptor status and error fields
112 * @csum: receive descriptor csum field
113 * @sk_buff: socket buffer with received data
114 **/
115static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
116 u32 csum, struct sk_buff *skb)
117{
118 u16 status = (u16)status_err;
119 u8 errors = (u8)(status_err >> 24);
120 skb->ip_summed = CHECKSUM_NONE;
121
122 /* Ignore Checksum bit is set */
123 if (status & E1000_RXD_STAT_IXSM)
124 return;
125 /* TCP/UDP checksum error bit is set */
126 if (errors & E1000_RXD_ERR_TCPE) {
127 /* let the stack verify checksum errors */
128 adapter->hw_csum_err++;
129 return;
130 }
131
132 /* TCP/UDP Checksum has not been calculated */
133 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
134 return;
135
136 /* It must be a TCP or UDP packet with a valid checksum */
137 if (status & E1000_RXD_STAT_TCPCS) {
138 /* TCP checksum is good */
139 skb->ip_summed = CHECKSUM_UNNECESSARY;
140 } else {
141 /* IP fragment with UDP payload */
142 /* Hardware complements the payload checksum, so we undo it
143 * and then put the value in host order for further stack use.
144 */
145 csum = ntohl(csum ^ 0xFFFF);
146 skb->csum = csum;
147 skb->ip_summed = CHECKSUM_COMPLETE;
148 }
149 adapter->hw_csum_good++;
150}
151
152/**
153 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
154 * @adapter: address of board private structure
155 **/
156static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
157 int cleaned_count)
158{
159 struct net_device *netdev = adapter->netdev;
160 struct pci_dev *pdev = adapter->pdev;
161 struct e1000_ring *rx_ring = adapter->rx_ring;
162 struct e1000_rx_desc *rx_desc;
163 struct e1000_buffer *buffer_info;
164 struct sk_buff *skb;
165 unsigned int i;
166 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
167
168 i = rx_ring->next_to_use;
169 buffer_info = &rx_ring->buffer_info[i];
170
171 while (cleaned_count--) {
172 skb = buffer_info->skb;
173 if (skb) {
174 skb_trim(skb, 0);
175 goto map_skb;
176 }
177
178 skb = netdev_alloc_skb(netdev, bufsz);
179 if (!skb) {
180 /* Better luck next round */
181 adapter->alloc_rx_buff_failed++;
182 break;
183 }
184
185 /* Make buffer alignment 2 beyond a 16 byte boundary
186 * this will result in a 16 byte aligned IP header after
187 * the 14 byte MAC header is removed
188 */
189 skb_reserve(skb, NET_IP_ALIGN);
190
191 buffer_info->skb = skb;
192map_skb:
193 buffer_info->dma = pci_map_single(pdev, skb->data,
194 adapter->rx_buffer_len,
195 PCI_DMA_FROMDEVICE);
196 if (pci_dma_mapping_error(buffer_info->dma)) {
197 dev_err(&pdev->dev, "RX DMA map failed\n");
198 adapter->rx_dma_failed++;
199 break;
200 }
201
202 rx_desc = E1000_RX_DESC(*rx_ring, i);
203 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
204
205 i++;
206 if (i == rx_ring->count)
207 i = 0;
208 buffer_info = &rx_ring->buffer_info[i];
209 }
210
211 if (rx_ring->next_to_use != i) {
212 rx_ring->next_to_use = i;
213 if (i-- == 0)
214 i = (rx_ring->count - 1);
215
216 /* Force memory writes to complete before letting h/w
217 * know there are new descriptors to fetch. (Only
218 * applicable for weak-ordered memory model archs,
219 * such as IA-64). */
220 wmb();
221 writel(i, adapter->hw.hw_addr + rx_ring->tail);
222 }
223}
224
225/**
226 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
227 * @adapter: address of board private structure
228 **/
229static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
230 int cleaned_count)
231{
232 struct net_device *netdev = adapter->netdev;
233 struct pci_dev *pdev = adapter->pdev;
234 union e1000_rx_desc_packet_split *rx_desc;
235 struct e1000_ring *rx_ring = adapter->rx_ring;
236 struct e1000_buffer *buffer_info;
237 struct e1000_ps_page *ps_page;
238 struct sk_buff *skb;
239 unsigned int i, j;
240
241 i = rx_ring->next_to_use;
242 buffer_info = &rx_ring->buffer_info[i];
243
244 while (cleaned_count--) {
245 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
246
247 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
47f44e40
AK
248 ps_page = &buffer_info->ps_pages[j];
249 if (j >= adapter->rx_ps_pages) {
250 /* all unused desc entries get hw null ptr */
251 rx_desc->read.buffer_addr[j+1] = ~0;
252 continue;
253 }
254 if (!ps_page->page) {
255 ps_page->page = alloc_page(GFP_ATOMIC);
bc7f75fa 256 if (!ps_page->page) {
47f44e40
AK
257 adapter->alloc_rx_buff_failed++;
258 goto no_buffers;
259 }
260 ps_page->dma = pci_map_page(pdev,
261 ps_page->page,
262 0, PAGE_SIZE,
263 PCI_DMA_FROMDEVICE);
264 if (pci_dma_mapping_error(ps_page->dma)) {
265 dev_err(&adapter->pdev->dev,
266 "RX DMA page map failed\n");
267 adapter->rx_dma_failed++;
268 goto no_buffers;
bc7f75fa 269 }
bc7f75fa 270 }
47f44e40
AK
271 /*
272 * Refresh the desc even if buffer_addrs
273 * didn't change because each write-back
274 * erases this info.
275 */
276 rx_desc->read.buffer_addr[j+1] =
277 cpu_to_le64(ps_page->dma);
bc7f75fa
AK
278 }
279
280 skb = netdev_alloc_skb(netdev,
281 adapter->rx_ps_bsize0 + NET_IP_ALIGN);
282
283 if (!skb) {
284 adapter->alloc_rx_buff_failed++;
285 break;
286 }
287
288 /* Make buffer alignment 2 beyond a 16 byte boundary
289 * this will result in a 16 byte aligned IP header after
290 * the 14 byte MAC header is removed
291 */
292 skb_reserve(skb, NET_IP_ALIGN);
293
294 buffer_info->skb = skb;
295 buffer_info->dma = pci_map_single(pdev, skb->data,
296 adapter->rx_ps_bsize0,
297 PCI_DMA_FROMDEVICE);
298 if (pci_dma_mapping_error(buffer_info->dma)) {
299 dev_err(&pdev->dev, "RX DMA map failed\n");
300 adapter->rx_dma_failed++;
301 /* cleanup skb */
302 dev_kfree_skb_any(skb);
303 buffer_info->skb = NULL;
304 break;
305 }
306
307 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
308
309 i++;
310 if (i == rx_ring->count)
311 i = 0;
312 buffer_info = &rx_ring->buffer_info[i];
313 }
314
315no_buffers:
316 if (rx_ring->next_to_use != i) {
317 rx_ring->next_to_use = i;
318
319 if (!(i--))
320 i = (rx_ring->count - 1);
321
322 /* Force memory writes to complete before letting h/w
323 * know there are new descriptors to fetch. (Only
324 * applicable for weak-ordered memory model archs,
325 * such as IA-64). */
326 wmb();
327 /* Hardware increments by 16 bytes, but packet split
328 * descriptors are 32 bytes...so we increment tail
329 * twice as much.
330 */
331 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
332 }
333}
334
335/**
336 * e1000_alloc_rx_buffers_jumbo - Replace used jumbo receive buffers
337 *
338 * @adapter: address of board private structure
339 * @cleaned_count: number of buffers to allocate this pass
340 **/
341static void e1000_alloc_rx_buffers_jumbo(struct e1000_adapter *adapter,
342 int cleaned_count)
343{
344 struct net_device *netdev = adapter->netdev;
345 struct pci_dev *pdev = adapter->pdev;
346 struct e1000_ring *rx_ring = adapter->rx_ring;
347 struct e1000_rx_desc *rx_desc;
348 struct e1000_buffer *buffer_info;
349 struct sk_buff *skb;
350 unsigned int i;
351 unsigned int bufsz = 256 -
352 16 /*for skb_reserve */ -
353 NET_IP_ALIGN;
354
355 i = rx_ring->next_to_use;
356 buffer_info = &rx_ring->buffer_info[i];
357
358 while (cleaned_count--) {
359 skb = buffer_info->skb;
360 if (skb) {
361 skb_trim(skb, 0);
362 goto check_page;
363 }
364
365 skb = netdev_alloc_skb(netdev, bufsz);
366 if (!skb) {
367 /* Better luck next round */
368 adapter->alloc_rx_buff_failed++;
369 break;
370 }
371
372 /* Make buffer alignment 2 beyond a 16 byte boundary
373 * this will result in a 16 byte aligned IP header after
374 * the 14 byte MAC header is removed
375 */
376 skb_reserve(skb, NET_IP_ALIGN);
377
378 buffer_info->skb = skb;
379check_page:
380 /* allocate a new page if necessary */
381 if (!buffer_info->page) {
382 buffer_info->page = alloc_page(GFP_ATOMIC);
383 if (!buffer_info->page) {
384 adapter->alloc_rx_buff_failed++;
385 break;
386 }
387 }
388
389 if (!buffer_info->dma)
390 buffer_info->dma = pci_map_page(pdev,
391 buffer_info->page, 0,
392 PAGE_SIZE,
393 PCI_DMA_FROMDEVICE);
394 if (pci_dma_mapping_error(buffer_info->dma)) {
395 dev_err(&adapter->pdev->dev, "RX DMA page map failed\n");
396 adapter->rx_dma_failed++;
397 break;
398 }
399
400 rx_desc = E1000_RX_DESC(*rx_ring, i);
401 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
402
403 i++;
404 if (i == rx_ring->count)
405 i = 0;
406 buffer_info = &rx_ring->buffer_info[i];
407 }
408
409 if (rx_ring->next_to_use != i) {
410 rx_ring->next_to_use = i;
411 if (i-- == 0)
412 i = (rx_ring->count - 1);
413
414 /* Force memory writes to complete before letting h/w
415 * know there are new descriptors to fetch. (Only
416 * applicable for weak-ordered memory model archs,
417 * such as IA-64). */
418 wmb();
419 writel(i, adapter->hw.hw_addr + rx_ring->tail);
420 }
421}
422
423/**
424 * e1000_clean_rx_irq - Send received data up the network stack; legacy
425 * @adapter: board private structure
426 *
427 * the return value indicates whether actual cleaning was done, there
428 * is no guarantee that everything was cleaned
429 **/
430static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
431 int *work_done, int work_to_do)
432{
433 struct net_device *netdev = adapter->netdev;
434 struct pci_dev *pdev = adapter->pdev;
435 struct e1000_ring *rx_ring = adapter->rx_ring;
436 struct e1000_rx_desc *rx_desc, *next_rxd;
437 struct e1000_buffer *buffer_info, *next_buffer;
438 u32 length;
439 unsigned int i;
440 int cleaned_count = 0;
441 bool cleaned = 0;
442 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
443
444 i = rx_ring->next_to_clean;
445 rx_desc = E1000_RX_DESC(*rx_ring, i);
446 buffer_info = &rx_ring->buffer_info[i];
447
448 while (rx_desc->status & E1000_RXD_STAT_DD) {
449 struct sk_buff *skb;
450 u8 status;
451
452 if (*work_done >= work_to_do)
453 break;
454 (*work_done)++;
455
456 status = rx_desc->status;
457 skb = buffer_info->skb;
458 buffer_info->skb = NULL;
459
460 prefetch(skb->data - NET_IP_ALIGN);
461
462 i++;
463 if (i == rx_ring->count)
464 i = 0;
465 next_rxd = E1000_RX_DESC(*rx_ring, i);
466 prefetch(next_rxd);
467
468 next_buffer = &rx_ring->buffer_info[i];
469
470 cleaned = 1;
471 cleaned_count++;
472 pci_unmap_single(pdev,
473 buffer_info->dma,
474 adapter->rx_buffer_len,
475 PCI_DMA_FROMDEVICE);
476 buffer_info->dma = 0;
477
478 length = le16_to_cpu(rx_desc->length);
479
480 /* !EOP means multiple descriptors were used to store a single
481 * packet, also make sure the frame isn't just CRC only */
482 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
483 /* All receives must fit into a single buffer */
484 ndev_dbg(netdev, "%s: Receive packet consumed "
485 "multiple buffers\n", netdev->name);
486 /* recycle */
487 buffer_info->skb = skb;
488 goto next_desc;
489 }
490
491 if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
492 /* recycle */
493 buffer_info->skb = skb;
494 goto next_desc;
495 }
496
497 /* adjust length to remove Ethernet CRC */
498 length -= 4;
499
500 /* probably a little skewed due to removing CRC */
501 total_rx_bytes += length;
502 total_rx_packets++;
503
504 /* code added for copybreak, this should improve
505 * performance for small packets with large amounts
506 * of reassembly being done in the stack */
507 if (length < copybreak) {
508 struct sk_buff *new_skb =
509 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
510 if (new_skb) {
511 skb_reserve(new_skb, NET_IP_ALIGN);
512 memcpy(new_skb->data - NET_IP_ALIGN,
513 skb->data - NET_IP_ALIGN,
514 length + NET_IP_ALIGN);
515 /* save the skb in buffer_info as good */
516 buffer_info->skb = skb;
517 skb = new_skb;
518 }
519 /* else just continue with the old one */
520 }
521 /* end copybreak code */
522 skb_put(skb, length);
523
524 /* Receive Checksum Offload */
525 e1000_rx_checksum(adapter,
526 (u32)(status) |
527 ((u32)(rx_desc->errors) << 24),
528 le16_to_cpu(rx_desc->csum), skb);
529
530 e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
531
532next_desc:
533 rx_desc->status = 0;
534
535 /* return some buffers to hardware, one at a time is too slow */
536 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
537 adapter->alloc_rx_buf(adapter, cleaned_count);
538 cleaned_count = 0;
539 }
540
541 /* use prefetched values */
542 rx_desc = next_rxd;
543 buffer_info = next_buffer;
544 }
545 rx_ring->next_to_clean = i;
546
547 cleaned_count = e1000_desc_unused(rx_ring);
548 if (cleaned_count)
549 adapter->alloc_rx_buf(adapter, cleaned_count);
550
551 adapter->total_rx_packets += total_rx_packets;
552 adapter->total_rx_bytes += total_rx_bytes;
553 return cleaned;
554}
555
556static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
557 u16 length)
558{
559 bi->page = NULL;
560 skb->len += length;
561 skb->data_len += length;
562 skb->truesize += length;
563}
564
565static void e1000_put_txbuf(struct e1000_adapter *adapter,
566 struct e1000_buffer *buffer_info)
567{
568 if (buffer_info->dma) {
569 pci_unmap_page(adapter->pdev, buffer_info->dma,
570 buffer_info->length, PCI_DMA_TODEVICE);
571 buffer_info->dma = 0;
572 }
573 if (buffer_info->skb) {
574 dev_kfree_skb_any(buffer_info->skb);
575 buffer_info->skb = NULL;
576 }
577}
578
579static void e1000_print_tx_hang(struct e1000_adapter *adapter)
580{
581 struct e1000_ring *tx_ring = adapter->tx_ring;
582 unsigned int i = tx_ring->next_to_clean;
583 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
584 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
585 struct net_device *netdev = adapter->netdev;
586
587 /* detected Tx unit hang */
588 ndev_err(netdev,
589 "Detected Tx Unit Hang:\n"
590 " TDH <%x>\n"
591 " TDT <%x>\n"
592 " next_to_use <%x>\n"
593 " next_to_clean <%x>\n"
594 "buffer_info[next_to_clean]:\n"
595 " time_stamp <%lx>\n"
596 " next_to_watch <%x>\n"
597 " jiffies <%lx>\n"
598 " next_to_watch.status <%x>\n",
599 readl(adapter->hw.hw_addr + tx_ring->head),
600 readl(adapter->hw.hw_addr + tx_ring->tail),
601 tx_ring->next_to_use,
602 tx_ring->next_to_clean,
603 tx_ring->buffer_info[eop].time_stamp,
604 eop,
605 jiffies,
606 eop_desc->upper.fields.status);
607}
608
609/**
610 * e1000_clean_tx_irq - Reclaim resources after transmit completes
611 * @adapter: board private structure
612 *
613 * the return value indicates whether actual cleaning was done, there
614 * is no guarantee that everything was cleaned
615 **/
616static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
617{
618 struct net_device *netdev = adapter->netdev;
619 struct e1000_hw *hw = &adapter->hw;
620 struct e1000_ring *tx_ring = adapter->tx_ring;
621 struct e1000_tx_desc *tx_desc, *eop_desc;
622 struct e1000_buffer *buffer_info;
623 unsigned int i, eop;
624 unsigned int count = 0;
625 bool cleaned = 0;
626 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
627
628 i = tx_ring->next_to_clean;
629 eop = tx_ring->buffer_info[i].next_to_watch;
630 eop_desc = E1000_TX_DESC(*tx_ring, eop);
631
632 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
633 for (cleaned = 0; !cleaned; ) {
634 tx_desc = E1000_TX_DESC(*tx_ring, i);
635 buffer_info = &tx_ring->buffer_info[i];
636 cleaned = (i == eop);
637
638 if (cleaned) {
639 struct sk_buff *skb = buffer_info->skb;
640 unsigned int segs, bytecount;
641 segs = skb_shinfo(skb)->gso_segs ?: 1;
642 /* multiply data chunks by size of headers */
643 bytecount = ((segs - 1) * skb_headlen(skb)) +
644 skb->len;
645 total_tx_packets += segs;
646 total_tx_bytes += bytecount;
647 }
648
649 e1000_put_txbuf(adapter, buffer_info);
650 tx_desc->upper.data = 0;
651
652 i++;
653 if (i == tx_ring->count)
654 i = 0;
655 }
656
657 eop = tx_ring->buffer_info[i].next_to_watch;
658 eop_desc = E1000_TX_DESC(*tx_ring, eop);
659#define E1000_TX_WEIGHT 64
660 /* weight of a sort for tx, to avoid endless transmit cleanup */
661 if (count++ == E1000_TX_WEIGHT)
662 break;
663 }
664
665 tx_ring->next_to_clean = i;
666
667#define TX_WAKE_THRESHOLD 32
668 if (cleaned && netif_carrier_ok(netdev) &&
669 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
670 /* Make sure that anybody stopping the queue after this
671 * sees the new next_to_clean.
672 */
673 smp_mb();
674
675 if (netif_queue_stopped(netdev) &&
676 !(test_bit(__E1000_DOWN, &adapter->state))) {
677 netif_wake_queue(netdev);
678 ++adapter->restart_queue;
679 }
680 }
681
682 if (adapter->detect_tx_hung) {
683 /* Detect a transmit hang in hardware, this serializes the
684 * check with the clearing of time_stamp and movement of i */
685 adapter->detect_tx_hung = 0;
686 if (tx_ring->buffer_info[eop].dma &&
687 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp
688 + (adapter->tx_timeout_factor * HZ))
689 && !(er32(STATUS) &
690 E1000_STATUS_TXOFF)) {
691 e1000_print_tx_hang(adapter);
692 netif_stop_queue(netdev);
693 }
694 }
695 adapter->total_tx_bytes += total_tx_bytes;
696 adapter->total_tx_packets += total_tx_packets;
697 return cleaned;
698}
699
700/**
701 * e1000_clean_rx_irq_jumbo - Send received data up the network stack; legacy
702 * @adapter: board private structure
703 *
704 * the return value indicates whether actual cleaning was done, there
705 * is no guarantee that everything was cleaned
706 **/
707static bool e1000_clean_rx_irq_jumbo(struct e1000_adapter *adapter,
708 int *work_done, int work_to_do)
709{
710 struct net_device *netdev = adapter->netdev;
711 struct pci_dev *pdev = adapter->pdev;
712 struct e1000_ring *rx_ring = adapter->rx_ring;
713 struct e1000_rx_desc *rx_desc, *next_rxd;
714 struct e1000_buffer *buffer_info, *next_buffer;
715 u32 length;
716 unsigned int i;
717 int cleaned_count = 0;
718 bool cleaned = 0;
719 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
720
721 i = rx_ring->next_to_clean;
722 rx_desc = E1000_RX_DESC(*rx_ring, i);
723 buffer_info = &rx_ring->buffer_info[i];
724
725 while (rx_desc->status & E1000_RXD_STAT_DD) {
726 struct sk_buff *skb;
727 u8 status;
728
729 if (*work_done >= work_to_do)
730 break;
731 (*work_done)++;
732
733 status = rx_desc->status;
734 skb = buffer_info->skb;
735 buffer_info->skb = NULL;
736
737 i++;
738 if (i == rx_ring->count)
739 i = 0;
740 next_rxd = E1000_RX_DESC(*rx_ring, i);
741 prefetch(next_rxd);
742
743 next_buffer = &rx_ring->buffer_info[i];
744
745 cleaned = 1;
746 cleaned_count++;
747 pci_unmap_page(pdev,
748 buffer_info->dma,
749 PAGE_SIZE,
750 PCI_DMA_FROMDEVICE);
751 buffer_info->dma = 0;
752
753 length = le16_to_cpu(rx_desc->length);
754
755 /* errors is only valid for DD + EOP descriptors */
756 if ((status & E1000_RXD_STAT_EOP) &&
757 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
758 /* recycle both page and skb */
759 buffer_info->skb = skb;
760 /* an error means any chain goes out the window too */
761 if (rx_ring->rx_skb_top)
762 dev_kfree_skb(rx_ring->rx_skb_top);
763 rx_ring->rx_skb_top = NULL;
764 goto next_desc;
765 }
766
767#define rxtop rx_ring->rx_skb_top
768 if (!(status & E1000_RXD_STAT_EOP)) {
769 /* this descriptor is only the beginning (or middle) */
770 if (!rxtop) {
771 /* this is the beginning of a chain */
772 rxtop = skb;
773 skb_fill_page_desc(rxtop, 0, buffer_info->page,
774 0, length);
775 } else {
776 /* this is the middle of a chain */
777 skb_fill_page_desc(rxtop,
778 skb_shinfo(rxtop)->nr_frags,
779 buffer_info->page, 0,
780 length);
781 /* re-use the skb, only consumed the page */
782 buffer_info->skb = skb;
783 }
784 e1000_consume_page(buffer_info, rxtop, length);
785 goto next_desc;
786 } else {
787 if (rxtop) {
788 /* end of the chain */
789 skb_fill_page_desc(rxtop,
790 skb_shinfo(rxtop)->nr_frags,
791 buffer_info->page, 0, length);
792 /* re-use the current skb, we only consumed the
793 * page */
794 buffer_info->skb = skb;
795 skb = rxtop;
796 rxtop = NULL;
797 e1000_consume_page(buffer_info, skb, length);
798 } else {
799 /* no chain, got EOP, this buf is the packet
800 * copybreak to save the put_page/alloc_page */
801 if (length <= copybreak &&
802 skb_tailroom(skb) >= length) {
803 u8 *vaddr;
804 vaddr = kmap_atomic(buffer_info->page,
805 KM_SKB_DATA_SOFTIRQ);
806 memcpy(skb_tail_pointer(skb),
807 vaddr, length);
808 kunmap_atomic(vaddr,
809 KM_SKB_DATA_SOFTIRQ);
810 /* re-use the page, so don't erase
811 * buffer_info->page */
812 skb_put(skb, length);
813 } else {
814 skb_fill_page_desc(skb, 0,
815 buffer_info->page, 0,
816 length);
817 e1000_consume_page(buffer_info, skb,
818 length);
819 }
820 }
821 }
822
823 /* Receive Checksum Offload XXX recompute due to CRC strip? */
824 e1000_rx_checksum(adapter,
825 (u32)(status) |
826 ((u32)(rx_desc->errors) << 24),
827 le16_to_cpu(rx_desc->csum), skb);
828
829 pskb_trim(skb, skb->len - 4);
830
831 /* probably a little skewed due to removing CRC */
832 total_rx_bytes += skb->len;
833 total_rx_packets++;
834
835 /* eth type trans needs skb->data to point to something */
836 if (!pskb_may_pull(skb, ETH_HLEN)) {
837 ndev_err(netdev, "__pskb_pull_tail failed.\n");
838 dev_kfree_skb(skb);
839 goto next_desc;
840 }
841
842 e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
843
844next_desc:
845 rx_desc->status = 0;
846
847 /* return some buffers to hardware, one at a time is too slow */
848 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
849 adapter->alloc_rx_buf(adapter, cleaned_count);
850 cleaned_count = 0;
851 }
852
853 /* use prefetched values */
854 rx_desc = next_rxd;
855 buffer_info = next_buffer;
856 }
857 rx_ring->next_to_clean = i;
858
859 cleaned_count = e1000_desc_unused(rx_ring);
860 if (cleaned_count)
861 adapter->alloc_rx_buf(adapter, cleaned_count);
862
863 adapter->total_rx_packets += total_rx_packets;
864 adapter->total_rx_bytes += total_rx_bytes;
865 return cleaned;
866}
867
868/**
869 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
870 * @adapter: board private structure
871 *
872 * the return value indicates whether actual cleaning was done, there
873 * is no guarantee that everything was cleaned
874 **/
875static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
876 int *work_done, int work_to_do)
877{
878 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
879 struct net_device *netdev = adapter->netdev;
880 struct pci_dev *pdev = adapter->pdev;
881 struct e1000_ring *rx_ring = adapter->rx_ring;
882 struct e1000_buffer *buffer_info, *next_buffer;
883 struct e1000_ps_page *ps_page;
884 struct sk_buff *skb;
885 unsigned int i, j;
886 u32 length, staterr;
887 int cleaned_count = 0;
888 bool cleaned = 0;
889 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
890
891 i = rx_ring->next_to_clean;
892 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
893 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
894 buffer_info = &rx_ring->buffer_info[i];
895
896 while (staterr & E1000_RXD_STAT_DD) {
897 if (*work_done >= work_to_do)
898 break;
899 (*work_done)++;
900 skb = buffer_info->skb;
901
902 /* in the packet split case this is header only */
903 prefetch(skb->data - NET_IP_ALIGN);
904
905 i++;
906 if (i == rx_ring->count)
907 i = 0;
908 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
909 prefetch(next_rxd);
910
911 next_buffer = &rx_ring->buffer_info[i];
912
913 cleaned = 1;
914 cleaned_count++;
915 pci_unmap_single(pdev, buffer_info->dma,
916 adapter->rx_ps_bsize0,
917 PCI_DMA_FROMDEVICE);
918 buffer_info->dma = 0;
919
920 if (!(staterr & E1000_RXD_STAT_EOP)) {
921 ndev_dbg(netdev, "%s: Packet Split buffers didn't pick "
922 "up the full packet\n", netdev->name);
923 dev_kfree_skb_irq(skb);
924 goto next_desc;
925 }
926
927 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
928 dev_kfree_skb_irq(skb);
929 goto next_desc;
930 }
931
932 length = le16_to_cpu(rx_desc->wb.middle.length0);
933
934 if (!length) {
935 ndev_dbg(netdev, "%s: Last part of the packet spanning"
936 " multiple descriptors\n", netdev->name);
937 dev_kfree_skb_irq(skb);
938 goto next_desc;
939 }
940
941 /* Good Receive */
942 skb_put(skb, length);
943
944 {
945 /* this looks ugly, but it seems compiler issues make it
946 more efficient than reusing j */
947 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
948
949 /* page alloc/put takes too long and effects small packet
950 * throughput, so unsplit small packets and save the alloc/put*/
951 if (l1 && (l1 <= copybreak) &&
952 ((length + l1) <= adapter->rx_ps_bsize0)) {
953 u8 *vaddr;
954
47f44e40 955 ps_page = &buffer_info->ps_pages[0];
bc7f75fa
AK
956
957 /* there is no documentation about how to call
958 * kmap_atomic, so we can't hold the mapping
959 * very long */
960 pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
961 PAGE_SIZE, PCI_DMA_FROMDEVICE);
962 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
963 memcpy(skb_tail_pointer(skb), vaddr, l1);
964 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
965 pci_dma_sync_single_for_device(pdev, ps_page->dma,
966 PAGE_SIZE, PCI_DMA_FROMDEVICE);
967 /* remove the CRC */
968 l1 -= 4;
969 skb_put(skb, l1);
970 goto copydone;
971 } /* if */
972 }
973
974 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
975 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
976 if (!length)
977 break;
978
47f44e40 979 ps_page = &buffer_info->ps_pages[j];
bc7f75fa
AK
980 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
981 PCI_DMA_FROMDEVICE);
982 ps_page->dma = 0;
983 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
984 ps_page->page = NULL;
985 skb->len += length;
986 skb->data_len += length;
987 skb->truesize += length;
988 }
989
990 /* strip the ethernet crc, problem is we're using pages now so
991 * this whole operation can get a little cpu intensive */
992 pskb_trim(skb, skb->len - 4);
993
994copydone:
995 total_rx_bytes += skb->len;
996 total_rx_packets++;
997
998 e1000_rx_checksum(adapter, staterr, le16_to_cpu(
999 rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
1000
1001 if (rx_desc->wb.upper.header_status &
1002 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1003 adapter->rx_hdr_split++;
1004
1005 e1000_receive_skb(adapter, netdev, skb,
1006 staterr, rx_desc->wb.middle.vlan);
1007
1008next_desc:
1009 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1010 buffer_info->skb = NULL;
1011
1012 /* return some buffers to hardware, one at a time is too slow */
1013 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1014 adapter->alloc_rx_buf(adapter, cleaned_count);
1015 cleaned_count = 0;
1016 }
1017
1018 /* use prefetched values */
1019 rx_desc = next_rxd;
1020 buffer_info = next_buffer;
1021
1022 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1023 }
1024 rx_ring->next_to_clean = i;
1025
1026 cleaned_count = e1000_desc_unused(rx_ring);
1027 if (cleaned_count)
1028 adapter->alloc_rx_buf(adapter, cleaned_count);
1029
1030 adapter->total_rx_packets += total_rx_packets;
1031 adapter->total_rx_bytes += total_rx_bytes;
1032 return cleaned;
1033}
1034
1035/**
1036 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1037 * @adapter: board private structure
1038 **/
1039static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1040{
1041 struct e1000_ring *rx_ring = adapter->rx_ring;
1042 struct e1000_buffer *buffer_info;
1043 struct e1000_ps_page *ps_page;
1044 struct pci_dev *pdev = adapter->pdev;
bc7f75fa
AK
1045 unsigned int i, j;
1046
1047 /* Free all the Rx ring sk_buffs */
1048 for (i = 0; i < rx_ring->count; i++) {
1049 buffer_info = &rx_ring->buffer_info[i];
1050 if (buffer_info->dma) {
1051 if (adapter->clean_rx == e1000_clean_rx_irq)
1052 pci_unmap_single(pdev, buffer_info->dma,
1053 adapter->rx_buffer_len,
1054 PCI_DMA_FROMDEVICE);
1055 else if (adapter->clean_rx == e1000_clean_rx_irq_jumbo)
1056 pci_unmap_page(pdev, buffer_info->dma,
1057 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1058 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1059 pci_unmap_single(pdev, buffer_info->dma,
1060 adapter->rx_ps_bsize0,
1061 PCI_DMA_FROMDEVICE);
1062 buffer_info->dma = 0;
1063 }
1064
1065 if (buffer_info->page) {
1066 put_page(buffer_info->page);
1067 buffer_info->page = NULL;
1068 }
1069
1070 if (buffer_info->skb) {
1071 dev_kfree_skb(buffer_info->skb);
1072 buffer_info->skb = NULL;
1073 }
1074
1075 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
47f44e40 1076 ps_page = &buffer_info->ps_pages[j];
bc7f75fa
AK
1077 if (!ps_page->page)
1078 break;
1079 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
1080 PCI_DMA_FROMDEVICE);
1081 ps_page->dma = 0;
1082 put_page(ps_page->page);
1083 ps_page->page = NULL;
1084 }
1085 }
1086
1087 /* there also may be some cached data from a chained receive */
1088 if (rx_ring->rx_skb_top) {
1089 dev_kfree_skb(rx_ring->rx_skb_top);
1090 rx_ring->rx_skb_top = NULL;
1091 }
1092
bc7f75fa
AK
1093 /* Zero out the descriptor ring */
1094 memset(rx_ring->desc, 0, rx_ring->size);
1095
1096 rx_ring->next_to_clean = 0;
1097 rx_ring->next_to_use = 0;
1098
1099 writel(0, adapter->hw.hw_addr + rx_ring->head);
1100 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1101}
1102
1103/**
1104 * e1000_intr_msi - Interrupt Handler
1105 * @irq: interrupt number
1106 * @data: pointer to a network interface device structure
1107 **/
1108static irqreturn_t e1000_intr_msi(int irq, void *data)
1109{
1110 struct net_device *netdev = data;
1111 struct e1000_adapter *adapter = netdev_priv(netdev);
1112 struct e1000_hw *hw = &adapter->hw;
1113 u32 icr = er32(ICR);
1114
1115 /* read ICR disables interrupts using IAM, so keep up with our
1116 * enable/disable accounting */
1117 atomic_inc(&adapter->irq_sem);
1118
1119 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1120 hw->mac.get_link_status = 1;
1121 /* ICH8 workaround-- Call gig speed drop workaround on cable
1122 * disconnect (LSC) before accessing any PHY registers */
1123 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1124 (!(er32(STATUS) & E1000_STATUS_LU)))
1125 e1000e_gig_downshift_workaround_ich8lan(hw);
1126
1127 /* 80003ES2LAN workaround-- For packet buffer work-around on
1128 * link down event; disable receives here in the ISR and reset
1129 * adapter in watchdog */
1130 if (netif_carrier_ok(netdev) &&
1131 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1132 /* disable receives */
1133 u32 rctl = er32(RCTL);
1134 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1135 }
1136 /* guard against interrupt when we're going down */
1137 if (!test_bit(__E1000_DOWN, &adapter->state))
1138 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1139 }
1140
1141 if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
1142 adapter->total_tx_bytes = 0;
1143 adapter->total_tx_packets = 0;
1144 adapter->total_rx_bytes = 0;
1145 adapter->total_rx_packets = 0;
1146 __netif_rx_schedule(netdev, &adapter->napi);
1147 } else {
1148 atomic_dec(&adapter->irq_sem);
1149 }
1150
1151 return IRQ_HANDLED;
1152}
1153
1154/**
1155 * e1000_intr - Interrupt Handler
1156 * @irq: interrupt number
1157 * @data: pointer to a network interface device structure
1158 **/
1159static irqreturn_t e1000_intr(int irq, void *data)
1160{
1161 struct net_device *netdev = data;
1162 struct e1000_adapter *adapter = netdev_priv(netdev);
1163 struct e1000_hw *hw = &adapter->hw;
1164
1165 u32 rctl, icr = er32(ICR);
1166 if (!icr)
1167 return IRQ_NONE; /* Not our interrupt */
1168
1169 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1170 * not set, then the adapter didn't send an interrupt */
1171 if (!(icr & E1000_ICR_INT_ASSERTED))
1172 return IRQ_NONE;
1173
1174 /* Interrupt Auto-Mask...upon reading ICR,
1175 * interrupts are masked. No need for the
1176 * IMC write, but it does mean we should
1177 * account for it ASAP. */
1178 atomic_inc(&adapter->irq_sem);
1179
1180 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1181 hw->mac.get_link_status = 1;
1182 /* ICH8 workaround-- Call gig speed drop workaround on cable
1183 * disconnect (LSC) before accessing any PHY registers */
1184 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1185 (!(er32(STATUS) & E1000_STATUS_LU)))
1186 e1000e_gig_downshift_workaround_ich8lan(hw);
1187
1188 /* 80003ES2LAN workaround--
1189 * For packet buffer work-around on link down event;
1190 * disable receives here in the ISR and
1191 * reset adapter in watchdog
1192 */
1193 if (netif_carrier_ok(netdev) &&
1194 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1195 /* disable receives */
1196 rctl = er32(RCTL);
1197 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1198 }
1199 /* guard against interrupt when we're going down */
1200 if (!test_bit(__E1000_DOWN, &adapter->state))
1201 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1202 }
1203
1204 if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
1205 adapter->total_tx_bytes = 0;
1206 adapter->total_tx_packets = 0;
1207 adapter->total_rx_bytes = 0;
1208 adapter->total_rx_packets = 0;
1209 __netif_rx_schedule(netdev, &adapter->napi);
1210 } else {
1211 atomic_dec(&adapter->irq_sem);
1212 }
1213
1214 return IRQ_HANDLED;
1215}
1216
1217static int e1000_request_irq(struct e1000_adapter *adapter)
1218{
1219 struct net_device *netdev = adapter->netdev;
1220 void (*handler) = &e1000_intr;
1221 int irq_flags = IRQF_SHARED;
1222 int err;
1223
1224 err = pci_enable_msi(adapter->pdev);
1225 if (err) {
1226 ndev_warn(netdev,
1227 "Unable to allocate MSI interrupt Error: %d\n", err);
1228 } else {
1229 adapter->flags |= FLAG_MSI_ENABLED;
1230 handler = &e1000_intr_msi;
1231 irq_flags = 0;
1232 }
1233
1234 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
1235 netdev);
1236 if (err) {
1237 if (adapter->flags & FLAG_MSI_ENABLED)
1238 pci_disable_msi(adapter->pdev);
1239 ndev_err(netdev,
1240 "Unable to allocate interrupt Error: %d\n", err);
1241 }
1242
1243 return err;
1244}
1245
1246static void e1000_free_irq(struct e1000_adapter *adapter)
1247{
1248 struct net_device *netdev = adapter->netdev;
1249
1250 free_irq(adapter->pdev->irq, netdev);
1251 if (adapter->flags & FLAG_MSI_ENABLED) {
1252 pci_disable_msi(adapter->pdev);
1253 adapter->flags &= ~FLAG_MSI_ENABLED;
1254 }
1255}
1256
1257/**
1258 * e1000_irq_disable - Mask off interrupt generation on the NIC
1259 **/
1260static void e1000_irq_disable(struct e1000_adapter *adapter)
1261{
1262 struct e1000_hw *hw = &adapter->hw;
1263
1264 atomic_inc(&adapter->irq_sem);
1265 ew32(IMC, ~0);
1266 e1e_flush();
1267 synchronize_irq(adapter->pdev->irq);
1268}
1269
1270/**
1271 * e1000_irq_enable - Enable default interrupt generation settings
1272 **/
1273static void e1000_irq_enable(struct e1000_adapter *adapter)
1274{
1275 struct e1000_hw *hw = &adapter->hw;
1276
1277 if (atomic_dec_and_test(&adapter->irq_sem)) {
1278 ew32(IMS, IMS_ENABLE_MASK);
1279 e1e_flush();
1280 }
1281}
1282
1283/**
1284 * e1000_get_hw_control - get control of the h/w from f/w
1285 * @adapter: address of board private structure
1286 *
1287 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
1288 * For ASF and Pass Through versions of f/w this means that
1289 * the driver is loaded. For AMT version (only with 82573)
1290 * of the f/w this means that the network i/f is open.
1291 **/
1292static void e1000_get_hw_control(struct e1000_adapter *adapter)
1293{
1294 struct e1000_hw *hw = &adapter->hw;
1295 u32 ctrl_ext;
1296 u32 swsm;
1297
1298 /* Let firmware know the driver has taken over */
1299 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
1300 swsm = er32(SWSM);
1301 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
1302 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1303 ctrl_ext = er32(CTRL_EXT);
1304 ew32(CTRL_EXT,
1305 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1306 }
1307}
1308
1309/**
1310 * e1000_release_hw_control - release control of the h/w to f/w
1311 * @adapter: address of board private structure
1312 *
1313 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
1314 * For ASF and Pass Through versions of f/w this means that the
1315 * driver is no longer loaded. For AMT version (only with 82573) i
1316 * of the f/w this means that the network i/f is closed.
1317 *
1318 **/
1319static void e1000_release_hw_control(struct e1000_adapter *adapter)
1320{
1321 struct e1000_hw *hw = &adapter->hw;
1322 u32 ctrl_ext;
1323 u32 swsm;
1324
1325 /* Let firmware taken over control of h/w */
1326 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
1327 swsm = er32(SWSM);
1328 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
1329 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1330 ctrl_ext = er32(CTRL_EXT);
1331 ew32(CTRL_EXT,
1332 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1333 }
1334}
1335
1336static void e1000_release_manageability(struct e1000_adapter *adapter)
1337{
1338 if (adapter->flags & FLAG_MNG_PT_ENABLED) {
1339 struct e1000_hw *hw = &adapter->hw;
1340
1341 u32 manc = er32(MANC);
1342
1343 /* re-enable hardware interception of ARP */
1344 manc |= E1000_MANC_ARP_EN;
1345 manc &= ~E1000_MANC_EN_MNG2HOST;
1346
1347 /* don't explicitly have to mess with MANC2H since
1348 * MANC has an enable disable that gates MANC2H */
1349 ew32(MANC, manc);
1350 }
1351}
1352
1353/**
1354 * @e1000_alloc_ring - allocate memory for a ring structure
1355 **/
1356static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
1357 struct e1000_ring *ring)
1358{
1359 struct pci_dev *pdev = adapter->pdev;
1360
1361 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
1362 GFP_KERNEL);
1363 if (!ring->desc)
1364 return -ENOMEM;
1365
1366 return 0;
1367}
1368
1369/**
1370 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
1371 * @adapter: board private structure
1372 *
1373 * Return 0 on success, negative on failure
1374 **/
1375int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
1376{
1377 struct e1000_ring *tx_ring = adapter->tx_ring;
1378 int err = -ENOMEM, size;
1379
1380 size = sizeof(struct e1000_buffer) * tx_ring->count;
1381 tx_ring->buffer_info = vmalloc(size);
1382 if (!tx_ring->buffer_info)
1383 goto err;
1384 memset(tx_ring->buffer_info, 0, size);
1385
1386 /* round up to nearest 4K */
1387 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
1388 tx_ring->size = ALIGN(tx_ring->size, 4096);
1389
1390 err = e1000_alloc_ring_dma(adapter, tx_ring);
1391 if (err)
1392 goto err;
1393
1394 tx_ring->next_to_use = 0;
1395 tx_ring->next_to_clean = 0;
1396 spin_lock_init(&adapter->tx_queue_lock);
1397
1398 return 0;
1399err:
1400 vfree(tx_ring->buffer_info);
1401 ndev_err(adapter->netdev,
1402 "Unable to allocate memory for the transmit descriptor ring\n");
1403 return err;
1404}
1405
1406/**
1407 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
1408 * @adapter: board private structure
1409 *
1410 * Returns 0 on success, negative on failure
1411 **/
1412int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
1413{
1414 struct e1000_ring *rx_ring = adapter->rx_ring;
47f44e40
AK
1415 struct e1000_buffer *buffer_info;
1416 int i, size, desc_len, err = -ENOMEM;
bc7f75fa
AK
1417
1418 size = sizeof(struct e1000_buffer) * rx_ring->count;
1419 rx_ring->buffer_info = vmalloc(size);
1420 if (!rx_ring->buffer_info)
1421 goto err;
1422 memset(rx_ring->buffer_info, 0, size);
1423
47f44e40
AK
1424 for (i = 0; i < rx_ring->count; i++) {
1425 buffer_info = &rx_ring->buffer_info[i];
1426 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
1427 sizeof(struct e1000_ps_page),
1428 GFP_KERNEL);
1429 if (!buffer_info->ps_pages)
1430 goto err_pages;
1431 }
bc7f75fa
AK
1432
1433 desc_len = sizeof(union e1000_rx_desc_packet_split);
1434
1435 /* Round up to nearest 4K */
1436 rx_ring->size = rx_ring->count * desc_len;
1437 rx_ring->size = ALIGN(rx_ring->size, 4096);
1438
1439 err = e1000_alloc_ring_dma(adapter, rx_ring);
1440 if (err)
47f44e40 1441 goto err_pages;
bc7f75fa
AK
1442
1443 rx_ring->next_to_clean = 0;
1444 rx_ring->next_to_use = 0;
1445 rx_ring->rx_skb_top = NULL;
1446
1447 return 0;
47f44e40
AK
1448
1449err_pages:
1450 for (i = 0; i < rx_ring->count; i++) {
1451 buffer_info = &rx_ring->buffer_info[i];
1452 kfree(buffer_info->ps_pages);
1453 }
bc7f75fa
AK
1454err:
1455 vfree(rx_ring->buffer_info);
bc7f75fa
AK
1456 ndev_err(adapter->netdev,
1457 "Unable to allocate memory for the transmit descriptor ring\n");
1458 return err;
1459}
1460
1461/**
1462 * e1000_clean_tx_ring - Free Tx Buffers
1463 * @adapter: board private structure
1464 **/
1465static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
1466{
1467 struct e1000_ring *tx_ring = adapter->tx_ring;
1468 struct e1000_buffer *buffer_info;
1469 unsigned long size;
1470 unsigned int i;
1471
1472 for (i = 0; i < tx_ring->count; i++) {
1473 buffer_info = &tx_ring->buffer_info[i];
1474 e1000_put_txbuf(adapter, buffer_info);
1475 }
1476
1477 size = sizeof(struct e1000_buffer) * tx_ring->count;
1478 memset(tx_ring->buffer_info, 0, size);
1479
1480 memset(tx_ring->desc, 0, tx_ring->size);
1481
1482 tx_ring->next_to_use = 0;
1483 tx_ring->next_to_clean = 0;
1484
1485 writel(0, adapter->hw.hw_addr + tx_ring->head);
1486 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1487}
1488
1489/**
1490 * e1000e_free_tx_resources - Free Tx Resources per Queue
1491 * @adapter: board private structure
1492 *
1493 * Free all transmit software resources
1494 **/
1495void e1000e_free_tx_resources(struct e1000_adapter *adapter)
1496{
1497 struct pci_dev *pdev = adapter->pdev;
1498 struct e1000_ring *tx_ring = adapter->tx_ring;
1499
1500 e1000_clean_tx_ring(adapter);
1501
1502 vfree(tx_ring->buffer_info);
1503 tx_ring->buffer_info = NULL;
1504
1505 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1506 tx_ring->dma);
1507 tx_ring->desc = NULL;
1508}
1509
1510/**
1511 * e1000e_free_rx_resources - Free Rx Resources
1512 * @adapter: board private structure
1513 *
1514 * Free all receive software resources
1515 **/
1516
1517void e1000e_free_rx_resources(struct e1000_adapter *adapter)
1518{
1519 struct pci_dev *pdev = adapter->pdev;
1520 struct e1000_ring *rx_ring = adapter->rx_ring;
47f44e40 1521 int i;
bc7f75fa
AK
1522
1523 e1000_clean_rx_ring(adapter);
1524
47f44e40
AK
1525 for (i = 0; i < rx_ring->count; i++) {
1526 kfree(rx_ring->buffer_info[i].ps_pages);
1527 }
1528
bc7f75fa
AK
1529 vfree(rx_ring->buffer_info);
1530 rx_ring->buffer_info = NULL;
1531
bc7f75fa
AK
1532 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1533 rx_ring->dma);
1534 rx_ring->desc = NULL;
1535}
1536
1537/**
1538 * e1000_update_itr - update the dynamic ITR value based on statistics
1539 * Stores a new ITR value based on packets and byte
1540 * counts during the last interrupt. The advantage of per interrupt
1541 * computation is faster updates and more accurate ITR for the current
1542 * traffic pattern. Constants in this function were computed
1543 * based on theoretical maximum wire speed and thresholds were set based
1544 * on testing data as well as attempting to minimize response time
1545 * while increasing bulk throughput.
1546 * this functionality is controlled by the InterruptThrottleRate module
1547 * parameter (see e1000_param.c)
1548 * @adapter: pointer to adapter
1549 * @itr_setting: current adapter->itr
1550 * @packets: the number of packets during this measurement interval
1551 * @bytes: the number of bytes during this measurement interval
1552 **/
1553static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
1554 u16 itr_setting, int packets,
1555 int bytes)
1556{
1557 unsigned int retval = itr_setting;
1558
1559 if (packets == 0)
1560 goto update_itr_done;
1561
1562 switch (itr_setting) {
1563 case lowest_latency:
1564 /* handle TSO and jumbo frames */
1565 if (bytes/packets > 8000)
1566 retval = bulk_latency;
1567 else if ((packets < 5) && (bytes > 512)) {
1568 retval = low_latency;
1569 }
1570 break;
1571 case low_latency: /* 50 usec aka 20000 ints/s */
1572 if (bytes > 10000) {
1573 /* this if handles the TSO accounting */
1574 if (bytes/packets > 8000) {
1575 retval = bulk_latency;
1576 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
1577 retval = bulk_latency;
1578 } else if ((packets > 35)) {
1579 retval = lowest_latency;
1580 }
1581 } else if (bytes/packets > 2000) {
1582 retval = bulk_latency;
1583 } else if (packets <= 2 && bytes < 512) {
1584 retval = lowest_latency;
1585 }
1586 break;
1587 case bulk_latency: /* 250 usec aka 4000 ints/s */
1588 if (bytes > 25000) {
1589 if (packets > 35) {
1590 retval = low_latency;
1591 }
1592 } else if (bytes < 6000) {
1593 retval = low_latency;
1594 }
1595 break;
1596 }
1597
1598update_itr_done:
1599 return retval;
1600}
1601
1602static void e1000_set_itr(struct e1000_adapter *adapter)
1603{
1604 struct e1000_hw *hw = &adapter->hw;
1605 u16 current_itr;
1606 u32 new_itr = adapter->itr;
1607
1608 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
1609 if (adapter->link_speed != SPEED_1000) {
1610 current_itr = 0;
1611 new_itr = 4000;
1612 goto set_itr_now;
1613 }
1614
1615 adapter->tx_itr = e1000_update_itr(adapter,
1616 adapter->tx_itr,
1617 adapter->total_tx_packets,
1618 adapter->total_tx_bytes);
1619 /* conservative mode (itr 3) eliminates the lowest_latency setting */
1620 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
1621 adapter->tx_itr = low_latency;
1622
1623 adapter->rx_itr = e1000_update_itr(adapter,
1624 adapter->rx_itr,
1625 adapter->total_rx_packets,
1626 adapter->total_rx_bytes);
1627 /* conservative mode (itr 3) eliminates the lowest_latency setting */
1628 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
1629 adapter->rx_itr = low_latency;
1630
1631 current_itr = max(adapter->rx_itr, adapter->tx_itr);
1632
1633 switch (current_itr) {
1634 /* counts and packets in update_itr are dependent on these numbers */
1635 case lowest_latency:
1636 new_itr = 70000;
1637 break;
1638 case low_latency:
1639 new_itr = 20000; /* aka hwitr = ~200 */
1640 break;
1641 case bulk_latency:
1642 new_itr = 4000;
1643 break;
1644 default:
1645 break;
1646 }
1647
1648set_itr_now:
1649 if (new_itr != adapter->itr) {
1650 /* this attempts to bias the interrupt rate towards Bulk
1651 * by adding intermediate steps when interrupt rate is
1652 * increasing */
1653 new_itr = new_itr > adapter->itr ?
1654 min(adapter->itr + (new_itr >> 2), new_itr) :
1655 new_itr;
1656 adapter->itr = new_itr;
1657 ew32(ITR, 1000000000 / (new_itr * 256));
1658 }
1659}
1660
1661/**
1662 * e1000_clean - NAPI Rx polling callback
1663 * @adapter: board private structure
1664 **/
1665static int e1000_clean(struct napi_struct *napi, int budget)
1666{
1667 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
1668 struct net_device *poll_dev = adapter->netdev;
1669 int tx_cleaned = 0, work_done = 0;
1670
1671 /* Must NOT use netdev_priv macro here. */
1672 adapter = poll_dev->priv;
1673
1674 /* Keep link state information with original netdev */
1675 if (!netif_carrier_ok(poll_dev))
1676 goto quit_polling;
1677
1678 /* e1000_clean is called per-cpu. This lock protects
1679 * tx_ring from being cleaned by multiple cpus
1680 * simultaneously. A failure obtaining the lock means
1681 * tx_ring is currently being cleaned anyway. */
1682 if (spin_trylock(&adapter->tx_queue_lock)) {
1683 tx_cleaned = e1000_clean_tx_irq(adapter);
1684 spin_unlock(&adapter->tx_queue_lock);
1685 }
1686
1687 adapter->clean_rx(adapter, &work_done, budget);
1688
1689 /* If no Tx and not enough Rx work done, exit the polling mode */
1690 if ((!tx_cleaned && (work_done < budget)) ||
1691 !netif_running(poll_dev)) {
1692quit_polling:
1693 if (adapter->itr_setting & 3)
1694 e1000_set_itr(adapter);
1695 netif_rx_complete(poll_dev, napi);
1696 e1000_irq_enable(adapter);
1697 }
1698
1699 return work_done;
1700}
1701
1702static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1703{
1704 struct e1000_adapter *adapter = netdev_priv(netdev);
1705 struct e1000_hw *hw = &adapter->hw;
1706 u32 vfta, index;
1707
1708 /* don't update vlan cookie if already programmed */
1709 if ((adapter->hw.mng_cookie.status &
1710 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1711 (vid == adapter->mng_vlan_id))
1712 return;
1713 /* add VID to filter table */
1714 index = (vid >> 5) & 0x7F;
1715 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
1716 vfta |= (1 << (vid & 0x1F));
1717 e1000e_write_vfta(hw, index, vfta);
1718}
1719
1720static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1721{
1722 struct e1000_adapter *adapter = netdev_priv(netdev);
1723 struct e1000_hw *hw = &adapter->hw;
1724 u32 vfta, index;
1725
1726 e1000_irq_disable(adapter);
1727 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1728 e1000_irq_enable(adapter);
1729
1730 if ((adapter->hw.mng_cookie.status &
1731 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1732 (vid == adapter->mng_vlan_id)) {
1733 /* release control to f/w */
1734 e1000_release_hw_control(adapter);
1735 return;
1736 }
1737
1738 /* remove VID from filter table */
1739 index = (vid >> 5) & 0x7F;
1740 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
1741 vfta &= ~(1 << (vid & 0x1F));
1742 e1000e_write_vfta(hw, index, vfta);
1743}
1744
1745static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
1746{
1747 struct net_device *netdev = adapter->netdev;
1748 u16 vid = adapter->hw.mng_cookie.vlan_id;
1749 u16 old_vid = adapter->mng_vlan_id;
1750
1751 if (!adapter->vlgrp)
1752 return;
1753
1754 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
1755 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1756 if (adapter->hw.mng_cookie.status &
1757 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1758 e1000_vlan_rx_add_vid(netdev, vid);
1759 adapter->mng_vlan_id = vid;
1760 }
1761
1762 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
1763 (vid != old_vid) &&
1764 !vlan_group_get_device(adapter->vlgrp, old_vid))
1765 e1000_vlan_rx_kill_vid(netdev, old_vid);
1766 } else {
1767 adapter->mng_vlan_id = vid;
1768 }
1769}
1770
1771
1772static void e1000_vlan_rx_register(struct net_device *netdev,
1773 struct vlan_group *grp)
1774{
1775 struct e1000_adapter *adapter = netdev_priv(netdev);
1776 struct e1000_hw *hw = &adapter->hw;
1777 u32 ctrl, rctl;
1778
1779 e1000_irq_disable(adapter);
1780 adapter->vlgrp = grp;
1781
1782 if (grp) {
1783 /* enable VLAN tag insert/strip */
1784 ctrl = er32(CTRL);
1785 ctrl |= E1000_CTRL_VME;
1786 ew32(CTRL, ctrl);
1787
1788 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
1789 /* enable VLAN receive filtering */
1790 rctl = er32(RCTL);
1791 rctl |= E1000_RCTL_VFE;
1792 rctl &= ~E1000_RCTL_CFIEN;
1793 ew32(RCTL, rctl);
1794 e1000_update_mng_vlan(adapter);
1795 }
1796 } else {
1797 /* disable VLAN tag insert/strip */
1798 ctrl = er32(CTRL);
1799 ctrl &= ~E1000_CTRL_VME;
1800 ew32(CTRL, ctrl);
1801
1802 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
1803 /* disable VLAN filtering */
1804 rctl = er32(RCTL);
1805 rctl &= ~E1000_RCTL_VFE;
1806 ew32(RCTL, rctl);
1807 if (adapter->mng_vlan_id !=
1808 (u16)E1000_MNG_VLAN_NONE) {
1809 e1000_vlan_rx_kill_vid(netdev,
1810 adapter->mng_vlan_id);
1811 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1812 }
1813 }
1814 }
1815
1816 e1000_irq_enable(adapter);
1817}
1818
1819static void e1000_restore_vlan(struct e1000_adapter *adapter)
1820{
1821 u16 vid;
1822
1823 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1824
1825 if (!adapter->vlgrp)
1826 return;
1827
1828 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1829 if (!vlan_group_get_device(adapter->vlgrp, vid))
1830 continue;
1831 e1000_vlan_rx_add_vid(adapter->netdev, vid);
1832 }
1833}
1834
1835static void e1000_init_manageability(struct e1000_adapter *adapter)
1836{
1837 struct e1000_hw *hw = &adapter->hw;
1838 u32 manc, manc2h;
1839
1840 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
1841 return;
1842
1843 manc = er32(MANC);
1844
1845 /* disable hardware interception of ARP */
1846 manc &= ~(E1000_MANC_ARP_EN);
1847
1848 /* enable receiving management packets to the host. this will probably
1849 * generate destination unreachable messages from the host OS, but
1850 * the packets will be handled on SMBUS */
1851 manc |= E1000_MANC_EN_MNG2HOST;
1852 manc2h = er32(MANC2H);
1853#define E1000_MNG2HOST_PORT_623 (1 << 5)
1854#define E1000_MNG2HOST_PORT_664 (1 << 6)
1855 manc2h |= E1000_MNG2HOST_PORT_623;
1856 manc2h |= E1000_MNG2HOST_PORT_664;
1857 ew32(MANC2H, manc2h);
1858 ew32(MANC, manc);
1859}
1860
1861/**
1862 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1863 * @adapter: board private structure
1864 *
1865 * Configure the Tx unit of the MAC after a reset.
1866 **/
1867static void e1000_configure_tx(struct e1000_adapter *adapter)
1868{
1869 struct e1000_hw *hw = &adapter->hw;
1870 struct e1000_ring *tx_ring = adapter->tx_ring;
1871 u64 tdba;
1872 u32 tdlen, tctl, tipg, tarc;
1873 u32 ipgr1, ipgr2;
1874
1875 /* Setup the HW Tx Head and Tail descriptor pointers */
1876 tdba = tx_ring->dma;
1877 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
1878 ew32(TDBAL, (tdba & DMA_32BIT_MASK));
1879 ew32(TDBAH, (tdba >> 32));
1880 ew32(TDLEN, tdlen);
1881 ew32(TDH, 0);
1882 ew32(TDT, 0);
1883 tx_ring->head = E1000_TDH;
1884 tx_ring->tail = E1000_TDT;
1885
1886 /* Set the default values for the Tx Inter Packet Gap timer */
1887 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */
1888 ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */
1889 ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */
1890
1891 if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
1892 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */
1893
1894 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1895 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1896 ew32(TIPG, tipg);
1897
1898 /* Set the Tx Interrupt Delay register */
1899 ew32(TIDV, adapter->tx_int_delay);
1900 /* tx irq moderation */
1901 ew32(TADV, adapter->tx_abs_int_delay);
1902
1903 /* Program the Transmit Control Register */
1904 tctl = er32(TCTL);
1905 tctl &= ~E1000_TCTL_CT;
1906 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1907 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1908
1909 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
1910 tarc = er32(TARC0);
1911 /* set the speed mode bit, we'll clear it if we're not at
1912 * gigabit link later */
1913#define SPEED_MODE_BIT (1 << 21)
1914 tarc |= SPEED_MODE_BIT;
1915 ew32(TARC0, tarc);
1916 }
1917
1918 /* errata: program both queues to unweighted RR */
1919 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
1920 tarc = er32(TARC0);
1921 tarc |= 1;
1922 ew32(TARC0, tarc);
1923 tarc = er32(TARC1);
1924 tarc |= 1;
1925 ew32(TARC1, tarc);
1926 }
1927
1928 e1000e_config_collision_dist(hw);
1929
1930 /* Setup Transmit Descriptor Settings for eop descriptor */
1931 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1932
1933 /* only set IDE if we are delaying interrupts using the timers */
1934 if (adapter->tx_int_delay)
1935 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1936
1937 /* enable Report Status bit */
1938 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1939
1940 ew32(TCTL, tctl);
1941
1942 adapter->tx_queue_len = adapter->netdev->tx_queue_len;
1943}
1944
1945/**
1946 * e1000_setup_rctl - configure the receive control registers
1947 * @adapter: Board private structure
1948 **/
1949#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1950 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1951static void e1000_setup_rctl(struct e1000_adapter *adapter)
1952{
1953 struct e1000_hw *hw = &adapter->hw;
1954 u32 rctl, rfctl;
1955 u32 psrctl = 0;
1956 u32 pages = 0;
1957
1958 /* Program MC offset vector base */
1959 rctl = er32(RCTL);
1960 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1961 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1962 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1963 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1964
1965 /* Do not Store bad packets */
1966 rctl &= ~E1000_RCTL_SBP;
1967
1968 /* Enable Long Packet receive */
1969 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1970 rctl &= ~E1000_RCTL_LPE;
1971 else
1972 rctl |= E1000_RCTL_LPE;
1973
1974 /* Setup buffer sizes */
1975 rctl &= ~E1000_RCTL_SZ_4096;
1976 rctl |= E1000_RCTL_BSEX;
1977 switch (adapter->rx_buffer_len) {
1978 case 256:
1979 rctl |= E1000_RCTL_SZ_256;
1980 rctl &= ~E1000_RCTL_BSEX;
1981 break;
1982 case 512:
1983 rctl |= E1000_RCTL_SZ_512;
1984 rctl &= ~E1000_RCTL_BSEX;
1985 break;
1986 case 1024:
1987 rctl |= E1000_RCTL_SZ_1024;
1988 rctl &= ~E1000_RCTL_BSEX;
1989 break;
1990 case 2048:
1991 default:
1992 rctl |= E1000_RCTL_SZ_2048;
1993 rctl &= ~E1000_RCTL_BSEX;
1994 break;
1995 case 4096:
1996 rctl |= E1000_RCTL_SZ_4096;
1997 break;
1998 case 8192:
1999 rctl |= E1000_RCTL_SZ_8192;
2000 break;
2001 case 16384:
2002 rctl |= E1000_RCTL_SZ_16384;
2003 break;
2004 }
2005
2006 /*
2007 * 82571 and greater support packet-split where the protocol
2008 * header is placed in skb->data and the packet data is
2009 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2010 * In the case of a non-split, skb->data is linearly filled,
2011 * followed by the page buffers. Therefore, skb->data is
2012 * sized to hold the largest protocol header.
2013 *
2014 * allocations using alloc_page take too long for regular MTU
2015 * so only enable packet split for jumbo frames
2016 *
2017 * Using pages when the page size is greater than 16k wastes
2018 * a lot of memory, since we allocate 3 pages at all times
2019 * per packet.
2020 */
2021 adapter->rx_ps_pages = 0;
2022 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2023 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2024 adapter->rx_ps_pages = pages;
2025
2026 if (adapter->rx_ps_pages) {
2027 /* Configure extra packet-split registers */
2028 rfctl = er32(RFCTL);
2029 rfctl |= E1000_RFCTL_EXTEN;
2030 /* disable packet split support for IPv6 extension headers,
2031 * because some malformed IPv6 headers can hang the RX */
2032 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2033 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2034
2035 ew32(RFCTL, rfctl);
2036
2037 /* disable the stripping of CRC because it breaks
2038 * BMC firmware connected over SMBUS */
2039 rctl |= E1000_RCTL_DTYP_PS /* | E1000_RCTL_SECRC */;
2040
2041 psrctl |= adapter->rx_ps_bsize0 >>
2042 E1000_PSRCTL_BSIZE0_SHIFT;
2043
2044 switch (adapter->rx_ps_pages) {
2045 case 3:
2046 psrctl |= PAGE_SIZE <<
2047 E1000_PSRCTL_BSIZE3_SHIFT;
2048 case 2:
2049 psrctl |= PAGE_SIZE <<
2050 E1000_PSRCTL_BSIZE2_SHIFT;
2051 case 1:
2052 psrctl |= PAGE_SIZE >>
2053 E1000_PSRCTL_BSIZE1_SHIFT;
2054 break;
2055 }
2056
2057 ew32(PSRCTL, psrctl);
2058 }
2059
2060 ew32(RCTL, rctl);
2061}
2062
2063/**
2064 * e1000_configure_rx - Configure Receive Unit after Reset
2065 * @adapter: board private structure
2066 *
2067 * Configure the Rx unit of the MAC after a reset.
2068 **/
2069static void e1000_configure_rx(struct e1000_adapter *adapter)
2070{
2071 struct e1000_hw *hw = &adapter->hw;
2072 struct e1000_ring *rx_ring = adapter->rx_ring;
2073 u64 rdba;
2074 u32 rdlen, rctl, rxcsum, ctrl_ext;
2075
2076 if (adapter->rx_ps_pages) {
2077 /* this is a 32 byte descriptor */
2078 rdlen = rx_ring->count *
2079 sizeof(union e1000_rx_desc_packet_split);
2080 adapter->clean_rx = e1000_clean_rx_irq_ps;
2081 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2082 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + VLAN_HLEN + 4) {
2083 rdlen = rx_ring->count *
2084 sizeof(struct e1000_rx_desc);
2085 adapter->clean_rx = e1000_clean_rx_irq_jumbo;
2086 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_jumbo;
2087 } else {
2088 rdlen = rx_ring->count *
2089 sizeof(struct e1000_rx_desc);
2090 adapter->clean_rx = e1000_clean_rx_irq;
2091 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2092 }
2093
2094 /* disable receives while setting up the descriptors */
2095 rctl = er32(RCTL);
2096 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2097 e1e_flush();
2098 msleep(10);
2099
2100 /* set the Receive Delay Timer Register */
2101 ew32(RDTR, adapter->rx_int_delay);
2102
2103 /* irq moderation */
2104 ew32(RADV, adapter->rx_abs_int_delay);
2105 if (adapter->itr_setting != 0)
2106 ew32(ITR,
2107 1000000000 / (adapter->itr * 256));
2108
2109 ctrl_ext = er32(CTRL_EXT);
2110 /* Reset delay timers after every interrupt */
2111 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
2112 /* Auto-Mask interrupts upon ICR access */
2113 ctrl_ext |= E1000_CTRL_EXT_IAME;
2114 ew32(IAM, 0xffffffff);
2115 ew32(CTRL_EXT, ctrl_ext);
2116 e1e_flush();
2117
2118 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2119 * the Base and Length of the Rx Descriptor Ring */
2120 rdba = rx_ring->dma;
2121 ew32(RDBAL, (rdba & DMA_32BIT_MASK));
2122 ew32(RDBAH, (rdba >> 32));
2123 ew32(RDLEN, rdlen);
2124 ew32(RDH, 0);
2125 ew32(RDT, 0);
2126 rx_ring->head = E1000_RDH;
2127 rx_ring->tail = E1000_RDT;
2128
2129 /* Enable Receive Checksum Offload for TCP and UDP */
2130 rxcsum = er32(RXCSUM);
2131 if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
2132 rxcsum |= E1000_RXCSUM_TUOFL;
2133
2134 /* IPv4 payload checksum for UDP fragments must be
2135 * used in conjunction with packet-split. */
2136 if (adapter->rx_ps_pages)
2137 rxcsum |= E1000_RXCSUM_IPPCSE;
2138 } else {
2139 rxcsum &= ~E1000_RXCSUM_TUOFL;
2140 /* no need to clear IPPCSE as it defaults to 0 */
2141 }
2142 ew32(RXCSUM, rxcsum);
2143
2144 /* Enable early receives on supported devices, only takes effect when
2145 * packet size is equal or larger than the specified value (in 8 byte
2146 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 */
2147 if ((adapter->flags & FLAG_HAS_ERT) &&
2148 (adapter->netdev->mtu > ETH_DATA_LEN))
2149 ew32(ERT, E1000_ERT_2048);
2150
2151 /* Enable Receives */
2152 ew32(RCTL, rctl);
2153}
2154
2155/**
2156 * e1000_mc_addr_list_update - Update Multicast addresses
2157 * @hw: pointer to the HW structure
2158 * @mc_addr_list: array of multicast addresses to program
2159 * @mc_addr_count: number of multicast addresses to program
2160 * @rar_used_count: the first RAR register free to program
2161 * @rar_count: total number of supported Receive Address Registers
2162 *
2163 * Updates the Receive Address Registers and Multicast Table Array.
2164 * The caller must have a packed mc_addr_list of multicast addresses.
2165 * The parameter rar_count will usually be hw->mac.rar_entry_count
2166 * unless there are workarounds that change this. Currently no func pointer
2167 * exists and all implementations are handled in the generic version of this
2168 * function.
2169 **/
2170static void e1000_mc_addr_list_update(struct e1000_hw *hw, u8 *mc_addr_list,
2171 u32 mc_addr_count, u32 rar_used_count,
2172 u32 rar_count)
2173{
2174 hw->mac.ops.mc_addr_list_update(hw, mc_addr_list, mc_addr_count,
2175 rar_used_count, rar_count);
2176}
2177
2178/**
2179 * e1000_set_multi - Multicast and Promiscuous mode set
2180 * @netdev: network interface device structure
2181 *
2182 * The set_multi entry point is called whenever the multicast address
2183 * list or the network interface flags are updated. This routine is
2184 * responsible for configuring the hardware for proper multicast,
2185 * promiscuous mode, and all-multi behavior.
2186 **/
2187static void e1000_set_multi(struct net_device *netdev)
2188{
2189 struct e1000_adapter *adapter = netdev_priv(netdev);
2190 struct e1000_hw *hw = &adapter->hw;
2191 struct e1000_mac_info *mac = &hw->mac;
2192 struct dev_mc_list *mc_ptr;
2193 u8 *mta_list;
2194 u32 rctl;
2195 int i;
2196
2197 /* Check for Promiscuous and All Multicast modes */
2198
2199 rctl = er32(RCTL);
2200
2201 if (netdev->flags & IFF_PROMISC) {
2202 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2203 } else if (netdev->flags & IFF_ALLMULTI) {
2204 rctl |= E1000_RCTL_MPE;
2205 rctl &= ~E1000_RCTL_UPE;
2206 } else {
2207 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2208 }
2209
2210 ew32(RCTL, rctl);
2211
2212 if (netdev->mc_count) {
2213 mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC);
2214 if (!mta_list)
2215 return;
2216
2217 /* prepare a packed array of only addresses. */
2218 mc_ptr = netdev->mc_list;
2219
2220 for (i = 0; i < netdev->mc_count; i++) {
2221 if (!mc_ptr)
2222 break;
2223 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
2224 ETH_ALEN);
2225 mc_ptr = mc_ptr->next;
2226 }
2227
2228 e1000_mc_addr_list_update(hw, mta_list, i, 1,
2229 mac->rar_entry_count);
2230 kfree(mta_list);
2231 } else {
2232 /*
2233 * if we're called from probe, we might not have
2234 * anything to do here, so clear out the list
2235 */
2236 e1000_mc_addr_list_update(hw, NULL, 0, 1,
2237 mac->rar_entry_count);
2238 }
2239}
2240
2241/**
2242 * e1000_configure - configure the hardware for RX and TX
2243 * @adapter: private board structure
2244 **/
2245static void e1000_configure(struct e1000_adapter *adapter)
2246{
2247 e1000_set_multi(adapter->netdev);
2248
2249 e1000_restore_vlan(adapter);
2250 e1000_init_manageability(adapter);
2251
2252 e1000_configure_tx(adapter);
2253 e1000_setup_rctl(adapter);
2254 e1000_configure_rx(adapter);
2255 adapter->alloc_rx_buf(adapter,
2256 e1000_desc_unused(adapter->rx_ring));
2257}
2258
2259/**
2260 * e1000e_power_up_phy - restore link in case the phy was powered down
2261 * @adapter: address of board private structure
2262 *
2263 * The phy may be powered down to save power and turn off link when the
2264 * driver is unloaded and wake on lan is not enabled (among others)
2265 * *** this routine MUST be followed by a call to e1000e_reset ***
2266 **/
2267void e1000e_power_up_phy(struct e1000_adapter *adapter)
2268{
2269 u16 mii_reg = 0;
2270
2271 /* Just clear the power down bit to wake the phy back up */
2272 if (adapter->hw.media_type == e1000_media_type_copper) {
2273 /* according to the manual, the phy will retain its
2274 * settings across a power-down/up cycle */
2275 e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg);
2276 mii_reg &= ~MII_CR_POWER_DOWN;
2277 e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg);
2278 }
2279
2280 adapter->hw.mac.ops.setup_link(&adapter->hw);
2281}
2282
2283/**
2284 * e1000_power_down_phy - Power down the PHY
2285 *
2286 * Power down the PHY so no link is implied when interface is down
2287 * The PHY cannot be powered down is management or WoL is active
2288 */
2289static void e1000_power_down_phy(struct e1000_adapter *adapter)
2290{
2291 struct e1000_hw *hw = &adapter->hw;
2292 u16 mii_reg;
2293
2294 /* WoL is enabled */
2295 if (!adapter->wol)
2296 return;
2297
2298 /* non-copper PHY? */
2299 if (adapter->hw.media_type != e1000_media_type_copper)
2300 return;
2301
2302 /* reset is blocked because of a SoL/IDER session */
2303 if (e1000e_check_mng_mode(hw) ||
2304 e1000_check_reset_block(hw))
2305 return;
2306
2307 /* managebility (AMT) is enabled */
2308 if (er32(MANC) & E1000_MANC_SMBUS_EN)
2309 return;
2310
2311 /* power down the PHY */
2312 e1e_rphy(hw, PHY_CONTROL, &mii_reg);
2313 mii_reg |= MII_CR_POWER_DOWN;
2314 e1e_wphy(hw, PHY_CONTROL, mii_reg);
2315 mdelay(1);
2316}
2317
2318/**
2319 * e1000e_reset - bring the hardware into a known good state
2320 *
2321 * This function boots the hardware and enables some settings that
2322 * require a configuration cycle of the hardware - those cannot be
2323 * set/changed during runtime. After reset the device needs to be
2324 * properly configured for rx, tx etc.
2325 */
2326void e1000e_reset(struct e1000_adapter *adapter)
2327{
2328 struct e1000_mac_info *mac = &adapter->hw.mac;
2329 struct e1000_hw *hw = &adapter->hw;
2330 u32 tx_space, min_tx_space, min_rx_space;
df762464 2331 u32 pba;
bc7f75fa
AK
2332 u16 hwm;
2333
df762464
AK
2334 ew32(PBA, adapter->pba);
2335
bc7f75fa
AK
2336 if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) {
2337 /* To maintain wire speed transmits, the Tx FIFO should be
2338 * large enough to accommodate two full transmit packets,
2339 * rounded up to the next 1KB and expressed in KB. Likewise,
2340 * the Rx FIFO should be large enough to accommodate at least
2341 * one full receive packet and is similarly rounded up and
2342 * expressed in KB. */
df762464 2343 pba = er32(PBA);
bc7f75fa 2344 /* upper 16 bits has Tx packet buffer allocation size in KB */
df762464 2345 tx_space = pba >> 16;
bc7f75fa 2346 /* lower 16 bits has Rx packet buffer allocation size in KB */
df762464 2347 pba &= 0xffff;
bc7f75fa
AK
2348 /* the tx fifo also stores 16 bytes of information about the tx
2349 * but don't include ethernet FCS because hardware appends it */
2350 min_tx_space = (mac->max_frame_size +
2351 sizeof(struct e1000_tx_desc) -
2352 ETH_FCS_LEN) * 2;
2353 min_tx_space = ALIGN(min_tx_space, 1024);
2354 min_tx_space >>= 10;
2355 /* software strips receive CRC, so leave room for it */
2356 min_rx_space = mac->max_frame_size;
2357 min_rx_space = ALIGN(min_rx_space, 1024);
2358 min_rx_space >>= 10;
2359
2360 /* If current Tx allocation is less than the min Tx FIFO size,
2361 * and the min Tx FIFO size is less than the current Rx FIFO
2362 * allocation, take space away from current Rx allocation */
df762464
AK
2363 if ((tx_space < min_tx_space) &&
2364 ((min_tx_space - tx_space) < pba)) {
2365 pba -= min_tx_space - tx_space;
bc7f75fa
AK
2366
2367 /* if short on rx space, rx wins and must trump tx
2368 * adjustment or use Early Receive if available */
df762464 2369 if ((pba < min_rx_space) &&
bc7f75fa
AK
2370 (!(adapter->flags & FLAG_HAS_ERT)))
2371 /* ERT enabled in e1000_configure_rx */
df762464 2372 pba = min_rx_space;
bc7f75fa 2373 }
df762464
AK
2374
2375 ew32(PBA, pba);
bc7f75fa
AK
2376 }
2377
bc7f75fa
AK
2378
2379 /* flow control settings */
2380 /* The high water mark must be low enough to fit one full frame
2381 * (or the size used for early receive) above it in the Rx FIFO.
2382 * Set it to the lower of:
2383 * - 90% of the Rx FIFO size, and
2384 * - the full Rx FIFO size minus the early receive size (for parts
2385 * with ERT support assuming ERT set to E1000_ERT_2048), or
2386 * - the full Rx FIFO size minus one full frame */
2387 if (adapter->flags & FLAG_HAS_ERT)
2388 hwm = min(((adapter->pba << 10) * 9 / 10),
2389 ((adapter->pba << 10) - (E1000_ERT_2048 << 3)));
2390 else
2391 hwm = min(((adapter->pba << 10) * 9 / 10),
2392 ((adapter->pba << 10) - mac->max_frame_size));
2393
2394 mac->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
2395 mac->fc_low_water = mac->fc_high_water - 8;
2396
2397 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
2398 mac->fc_pause_time = 0xFFFF;
2399 else
2400 mac->fc_pause_time = E1000_FC_PAUSE_TIME;
2401 mac->fc = mac->original_fc;
2402
2403 /* Allow time for pending master requests to run */
2404 mac->ops.reset_hw(hw);
2405 ew32(WUC, 0);
2406
2407 if (mac->ops.init_hw(hw))
2408 ndev_err(adapter->netdev, "Hardware Error\n");
2409
2410 e1000_update_mng_vlan(adapter);
2411
2412 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
2413 ew32(VET, ETH_P_8021Q);
2414
2415 e1000e_reset_adaptive(hw);
2416 e1000_get_phy_info(hw);
2417
2418 if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) {
2419 u16 phy_data = 0;
2420 /* speed up time to link by disabling smart power down, ignore
2421 * the return value of this function because there is nothing
2422 * different we would do if it failed */
2423 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
2424 phy_data &= ~IGP02E1000_PM_SPD;
2425 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
2426 }
2427
2428 e1000_release_manageability(adapter);
2429}
2430
2431int e1000e_up(struct e1000_adapter *adapter)
2432{
2433 struct e1000_hw *hw = &adapter->hw;
2434
2435 /* hardware has been reset, we need to reload some things */
2436 e1000_configure(adapter);
2437
2438 clear_bit(__E1000_DOWN, &adapter->state);
2439
2440 napi_enable(&adapter->napi);
2441 e1000_irq_enable(adapter);
2442
2443 /* fire a link change interrupt to start the watchdog */
2444 ew32(ICS, E1000_ICS_LSC);
2445 return 0;
2446}
2447
2448void e1000e_down(struct e1000_adapter *adapter)
2449{
2450 struct net_device *netdev = adapter->netdev;
2451 struct e1000_hw *hw = &adapter->hw;
2452 u32 tctl, rctl;
2453
2454 /* signal that we're down so the interrupt handler does not
2455 * reschedule our watchdog timer */
2456 set_bit(__E1000_DOWN, &adapter->state);
2457
2458 /* disable receives in the hardware */
2459 rctl = er32(RCTL);
2460 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2461 /* flush and sleep below */
2462
2463 netif_stop_queue(netdev);
2464
2465 /* disable transmits in the hardware */
2466 tctl = er32(TCTL);
2467 tctl &= ~E1000_TCTL_EN;
2468 ew32(TCTL, tctl);
2469 /* flush both disables and wait for them to finish */
2470 e1e_flush();
2471 msleep(10);
2472
2473 napi_disable(&adapter->napi);
2474 e1000_irq_disable(adapter);
2475
2476 del_timer_sync(&adapter->watchdog_timer);
2477 del_timer_sync(&adapter->phy_info_timer);
2478
2479 netdev->tx_queue_len = adapter->tx_queue_len;
2480 netif_carrier_off(netdev);
2481 adapter->link_speed = 0;
2482 adapter->link_duplex = 0;
2483
2484 e1000e_reset(adapter);
2485 e1000_clean_tx_ring(adapter);
2486 e1000_clean_rx_ring(adapter);
2487
2488 /*
2489 * TODO: for power management, we could drop the link and
2490 * pci_disable_device here.
2491 */
2492}
2493
2494void e1000e_reinit_locked(struct e1000_adapter *adapter)
2495{
2496 might_sleep();
2497 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
2498 msleep(1);
2499 e1000e_down(adapter);
2500 e1000e_up(adapter);
2501 clear_bit(__E1000_RESETTING, &adapter->state);
2502}
2503
2504/**
2505 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
2506 * @adapter: board private structure to initialize
2507 *
2508 * e1000_sw_init initializes the Adapter private data structure.
2509 * Fields are initialized based on PCI device information and
2510 * OS network device settings (MTU size).
2511 **/
2512static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2513{
2514 struct e1000_hw *hw = &adapter->hw;
2515 struct net_device *netdev = adapter->netdev;
2516
2517 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
2518 adapter->rx_ps_bsize0 = 128;
2519 hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2520 hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2521
2522 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2523 if (!adapter->tx_ring)
2524 goto err;
2525
2526 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2527 if (!adapter->rx_ring)
2528 goto err;
2529
2530 spin_lock_init(&adapter->tx_queue_lock);
2531
2532 /* Explicitly disable IRQ since the NIC can be in any state. */
2533 atomic_set(&adapter->irq_sem, 0);
2534 e1000_irq_disable(adapter);
2535
2536 spin_lock_init(&adapter->stats_lock);
2537
2538 set_bit(__E1000_DOWN, &adapter->state);
2539 return 0;
2540
2541err:
2542 ndev_err(netdev, "Unable to allocate memory for queues\n");
2543 kfree(adapter->rx_ring);
2544 kfree(adapter->tx_ring);
2545 return -ENOMEM;
2546}
2547
2548/**
2549 * e1000_open - Called when a network interface is made active
2550 * @netdev: network interface device structure
2551 *
2552 * Returns 0 on success, negative value on failure
2553 *
2554 * The open entry point is called when a network interface is made
2555 * active by the system (IFF_UP). At this point all resources needed
2556 * for transmit and receive operations are allocated, the interrupt
2557 * handler is registered with the OS, the watchdog timer is started,
2558 * and the stack is notified that the interface is ready.
2559 **/
2560static int e1000_open(struct net_device *netdev)
2561{
2562 struct e1000_adapter *adapter = netdev_priv(netdev);
2563 struct e1000_hw *hw = &adapter->hw;
2564 int err;
2565
2566 /* disallow open during test */
2567 if (test_bit(__E1000_TESTING, &adapter->state))
2568 return -EBUSY;
2569
2570 /* allocate transmit descriptors */
2571 err = e1000e_setup_tx_resources(adapter);
2572 if (err)
2573 goto err_setup_tx;
2574
2575 /* allocate receive descriptors */
2576 err = e1000e_setup_rx_resources(adapter);
2577 if (err)
2578 goto err_setup_rx;
2579
2580 e1000e_power_up_phy(adapter);
2581
2582 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2583 if ((adapter->hw.mng_cookie.status &
2584 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
2585 e1000_update_mng_vlan(adapter);
2586
2587 /* If AMT is enabled, let the firmware know that the network
2588 * interface is now open */
2589 if ((adapter->flags & FLAG_HAS_AMT) &&
2590 e1000e_check_mng_mode(&adapter->hw))
2591 e1000_get_hw_control(adapter);
2592
2593 /* before we allocate an interrupt, we must be ready to handle it.
2594 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2595 * as soon as we call pci_request_irq, so we have to setup our
2596 * clean_rx handler before we do so. */
2597 e1000_configure(adapter);
2598
2599 err = e1000_request_irq(adapter);
2600 if (err)
2601 goto err_req_irq;
2602
2603 /* From here on the code is the same as e1000e_up() */
2604 clear_bit(__E1000_DOWN, &adapter->state);
2605
2606 napi_enable(&adapter->napi);
2607
2608 e1000_irq_enable(adapter);
2609
2610 /* fire a link status change interrupt to start the watchdog */
2611 ew32(ICS, E1000_ICS_LSC);
2612
2613 return 0;
2614
2615err_req_irq:
2616 e1000_release_hw_control(adapter);
2617 e1000_power_down_phy(adapter);
2618 e1000e_free_rx_resources(adapter);
2619err_setup_rx:
2620 e1000e_free_tx_resources(adapter);
2621err_setup_tx:
2622 e1000e_reset(adapter);
2623
2624 return err;
2625}
2626
2627/**
2628 * e1000_close - Disables a network interface
2629 * @netdev: network interface device structure
2630 *
2631 * Returns 0, this is not allowed to fail
2632 *
2633 * The close entry point is called when an interface is de-activated
2634 * by the OS. The hardware is still under the drivers control, but
2635 * needs to be disabled. A global MAC reset is issued to stop the
2636 * hardware, and all transmit and receive resources are freed.
2637 **/
2638static int e1000_close(struct net_device *netdev)
2639{
2640 struct e1000_adapter *adapter = netdev_priv(netdev);
2641
2642 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
2643 e1000e_down(adapter);
2644 e1000_power_down_phy(adapter);
2645 e1000_free_irq(adapter);
2646
2647 e1000e_free_tx_resources(adapter);
2648 e1000e_free_rx_resources(adapter);
2649
2650 /* kill manageability vlan ID if supported, but not if a vlan with
2651 * the same ID is registered on the host OS (let 8021q kill it) */
2652 if ((adapter->hw.mng_cookie.status &
2653 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2654 !(adapter->vlgrp &&
2655 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
2656 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2657
2658 /* If AMT is enabled, let the firmware know that the network
2659 * interface is now closed */
2660 if ((adapter->flags & FLAG_HAS_AMT) &&
2661 e1000e_check_mng_mode(&adapter->hw))
2662 e1000_release_hw_control(adapter);
2663
2664 return 0;
2665}
2666/**
2667 * e1000_set_mac - Change the Ethernet Address of the NIC
2668 * @netdev: network interface device structure
2669 * @p: pointer to an address structure
2670 *
2671 * Returns 0 on success, negative on failure
2672 **/
2673static int e1000_set_mac(struct net_device *netdev, void *p)
2674{
2675 struct e1000_adapter *adapter = netdev_priv(netdev);
2676 struct sockaddr *addr = p;
2677
2678 if (!is_valid_ether_addr(addr->sa_data))
2679 return -EADDRNOTAVAIL;
2680
2681 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2682 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
2683
2684 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2685
2686 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
2687 /* activate the work around */
2688 e1000e_set_laa_state_82571(&adapter->hw, 1);
2689
2690 /* Hold a copy of the LAA in RAR[14] This is done so that
2691 * between the time RAR[0] gets clobbered and the time it
2692 * gets fixed (in e1000_watchdog), the actual LAA is in one
2693 * of the RARs and no incoming packets directed to this port
2694 * are dropped. Eventually the LAA will be in RAR[0] and
2695 * RAR[14] */
2696 e1000e_rar_set(&adapter->hw,
2697 adapter->hw.mac.addr,
2698 adapter->hw.mac.rar_entry_count - 1);
2699 }
2700
2701 return 0;
2702}
2703
2704/* Need to wait a few seconds after link up to get diagnostic information from
2705 * the phy */
2706static void e1000_update_phy_info(unsigned long data)
2707{
2708 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2709 e1000_get_phy_info(&adapter->hw);
2710}
2711
2712/**
2713 * e1000e_update_stats - Update the board statistics counters
2714 * @adapter: board private structure
2715 **/
2716void e1000e_update_stats(struct e1000_adapter *adapter)
2717{
2718 struct e1000_hw *hw = &adapter->hw;
2719 struct pci_dev *pdev = adapter->pdev;
2720 unsigned long irq_flags;
2721 u16 phy_tmp;
2722
2723#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
2724
2725 /*
2726 * Prevent stats update while adapter is being reset, or if the pci
2727 * connection is down.
2728 */
2729 if (adapter->link_speed == 0)
2730 return;
2731 if (pci_channel_offline(pdev))
2732 return;
2733
2734 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
2735
2736 /* these counters are modified from e1000_adjust_tbi_stats,
2737 * called from the interrupt context, so they must only
2738 * be written while holding adapter->stats_lock
2739 */
2740
2741 adapter->stats.crcerrs += er32(CRCERRS);
2742 adapter->stats.gprc += er32(GPRC);
2743 adapter->stats.gorcl += er32(GORCL);
2744 adapter->stats.gorch += er32(GORCH);
2745 adapter->stats.bprc += er32(BPRC);
2746 adapter->stats.mprc += er32(MPRC);
2747 adapter->stats.roc += er32(ROC);
2748
2749 if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) {
2750 adapter->stats.prc64 += er32(PRC64);
2751 adapter->stats.prc127 += er32(PRC127);
2752 adapter->stats.prc255 += er32(PRC255);
2753 adapter->stats.prc511 += er32(PRC511);
2754 adapter->stats.prc1023 += er32(PRC1023);
2755 adapter->stats.prc1522 += er32(PRC1522);
2756 adapter->stats.symerrs += er32(SYMERRS);
2757 adapter->stats.sec += er32(SEC);
2758 }
2759
2760 adapter->stats.mpc += er32(MPC);
2761 adapter->stats.scc += er32(SCC);
2762 adapter->stats.ecol += er32(ECOL);
2763 adapter->stats.mcc += er32(MCC);
2764 adapter->stats.latecol += er32(LATECOL);
2765 adapter->stats.dc += er32(DC);
2766 adapter->stats.rlec += er32(RLEC);
2767 adapter->stats.xonrxc += er32(XONRXC);
2768 adapter->stats.xontxc += er32(XONTXC);
2769 adapter->stats.xoffrxc += er32(XOFFRXC);
2770 adapter->stats.xofftxc += er32(XOFFTXC);
2771 adapter->stats.fcruc += er32(FCRUC);
2772 adapter->stats.gptc += er32(GPTC);
2773 adapter->stats.gotcl += er32(GOTCL);
2774 adapter->stats.gotch += er32(GOTCH);
2775 adapter->stats.rnbc += er32(RNBC);
2776 adapter->stats.ruc += er32(RUC);
2777 adapter->stats.rfc += er32(RFC);
2778 adapter->stats.rjc += er32(RJC);
2779 adapter->stats.torl += er32(TORL);
2780 adapter->stats.torh += er32(TORH);
2781 adapter->stats.totl += er32(TOTL);
2782 adapter->stats.toth += er32(TOTH);
2783 adapter->stats.tpr += er32(TPR);
2784
2785 if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) {
2786 adapter->stats.ptc64 += er32(PTC64);
2787 adapter->stats.ptc127 += er32(PTC127);
2788 adapter->stats.ptc255 += er32(PTC255);
2789 adapter->stats.ptc511 += er32(PTC511);
2790 adapter->stats.ptc1023 += er32(PTC1023);
2791 adapter->stats.ptc1522 += er32(PTC1522);
2792 }
2793
2794 adapter->stats.mptc += er32(MPTC);
2795 adapter->stats.bptc += er32(BPTC);
2796
2797 /* used for adaptive IFS */
2798
2799 hw->mac.tx_packet_delta = er32(TPT);
2800 adapter->stats.tpt += hw->mac.tx_packet_delta;
2801 hw->mac.collision_delta = er32(COLC);
2802 adapter->stats.colc += hw->mac.collision_delta;
2803
2804 adapter->stats.algnerrc += er32(ALGNERRC);
2805 adapter->stats.rxerrc += er32(RXERRC);
2806 adapter->stats.tncrs += er32(TNCRS);
2807 adapter->stats.cexterr += er32(CEXTERR);
2808 adapter->stats.tsctc += er32(TSCTC);
2809 adapter->stats.tsctfc += er32(TSCTFC);
2810
2811 adapter->stats.iac += er32(IAC);
2812
2813 if (adapter->flags & FLAG_HAS_STATS_ICR_ICT) {
2814 adapter->stats.icrxoc += er32(ICRXOC);
2815 adapter->stats.icrxptc += er32(ICRXPTC);
2816 adapter->stats.icrxatc += er32(ICRXATC);
2817 adapter->stats.ictxptc += er32(ICTXPTC);
2818 adapter->stats.ictxatc += er32(ICTXATC);
2819 adapter->stats.ictxqec += er32(ICTXQEC);
2820 adapter->stats.ictxqmtc += er32(ICTXQMTC);
2821 adapter->stats.icrxdmtc += er32(ICRXDMTC);
2822 }
2823
2824 /* Fill out the OS statistics structure */
2825 adapter->net_stats.rx_packets = adapter->stats.gprc;
2826 adapter->net_stats.tx_packets = adapter->stats.gptc;
2827 adapter->net_stats.rx_bytes = adapter->stats.gorcl;
2828 adapter->net_stats.tx_bytes = adapter->stats.gotcl;
2829 adapter->net_stats.multicast = adapter->stats.mprc;
2830 adapter->net_stats.collisions = adapter->stats.colc;
2831
2832 /* Rx Errors */
2833
2834 /* RLEC on some newer hardware can be incorrect so build
2835 * our own version based on RUC and ROC */
2836 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2837 adapter->stats.crcerrs + adapter->stats.algnerrc +
2838 adapter->stats.ruc + adapter->stats.roc +
2839 adapter->stats.cexterr;
2840 adapter->net_stats.rx_length_errors = adapter->stats.ruc +
2841 adapter->stats.roc;
2842 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2843 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
2844 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
2845
2846 /* Tx Errors */
2847 adapter->net_stats.tx_errors = adapter->stats.ecol +
2848 adapter->stats.latecol;
2849 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
2850 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
2851 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
2852
2853 /* Tx Dropped needs to be maintained elsewhere */
2854
2855 /* Phy Stats */
2856 if (hw->media_type == e1000_media_type_copper) {
2857 if ((adapter->link_speed == SPEED_1000) &&
2858 (!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) {
2859 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
2860 adapter->phy_stats.idle_errors += phy_tmp;
2861 }
2862 }
2863
2864 /* Management Stats */
2865 adapter->stats.mgptc += er32(MGTPTC);
2866 adapter->stats.mgprc += er32(MGTPRC);
2867 adapter->stats.mgpdc += er32(MGTPDC);
2868
2869 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
2870}
2871
2872static void e1000_print_link_info(struct e1000_adapter *adapter)
2873{
2874 struct net_device *netdev = adapter->netdev;
2875 struct e1000_hw *hw = &adapter->hw;
2876 u32 ctrl = er32(CTRL);
2877
2878 ndev_info(netdev,
2879 "Link is Up %d Mbps %s, Flow Control: %s\n",
2880 adapter->link_speed,
2881 (adapter->link_duplex == FULL_DUPLEX) ?
2882 "Full Duplex" : "Half Duplex",
2883 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
2884 "RX/TX" :
2885 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
2886 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
2887}
2888
2889/**
2890 * e1000_watchdog - Timer Call-back
2891 * @data: pointer to adapter cast into an unsigned long
2892 **/
2893static void e1000_watchdog(unsigned long data)
2894{
2895 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2896
2897 /* Do the rest outside of interrupt context */
2898 schedule_work(&adapter->watchdog_task);
2899
2900 /* TODO: make this use queue_delayed_work() */
2901}
2902
2903static void e1000_watchdog_task(struct work_struct *work)
2904{
2905 struct e1000_adapter *adapter = container_of(work,
2906 struct e1000_adapter, watchdog_task);
2907
2908 struct net_device *netdev = adapter->netdev;
2909 struct e1000_mac_info *mac = &adapter->hw.mac;
2910 struct e1000_ring *tx_ring = adapter->tx_ring;
2911 struct e1000_hw *hw = &adapter->hw;
2912 u32 link, tctl;
2913 s32 ret_val;
2914 int tx_pending = 0;
2915
2916 if ((netif_carrier_ok(netdev)) &&
2917 (er32(STATUS) & E1000_STATUS_LU))
2918 goto link_up;
2919
2920 ret_val = mac->ops.check_for_link(hw);
2921 if ((ret_val == E1000_ERR_PHY) &&
2922 (adapter->hw.phy.type == e1000_phy_igp_3) &&
2923 (er32(CTRL) &
2924 E1000_PHY_CTRL_GBE_DISABLE)) {
2925 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
2926 ndev_info(netdev,
2927 "Gigabit has been disabled, downgrading speed\n");
2928 }
2929
2930 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
2931 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
2932 e1000_update_mng_vlan(adapter);
2933
2934 if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
2935 !(er32(TXCW) & E1000_TXCW_ANE))
2936 link = adapter->hw.mac.serdes_has_link;
2937 else
2938 link = er32(STATUS) & E1000_STATUS_LU;
2939
2940 if (link) {
2941 if (!netif_carrier_ok(netdev)) {
2942 bool txb2b = 1;
2943 mac->ops.get_link_up_info(&adapter->hw,
2944 &adapter->link_speed,
2945 &adapter->link_duplex);
2946 e1000_print_link_info(adapter);
2947 /* tweak tx_queue_len according to speed/duplex
2948 * and adjust the timeout factor */
2949 netdev->tx_queue_len = adapter->tx_queue_len;
2950 adapter->tx_timeout_factor = 1;
2951 switch (adapter->link_speed) {
2952 case SPEED_10:
2953 txb2b = 0;
2954 netdev->tx_queue_len = 10;
2955 adapter->tx_timeout_factor = 14;
2956 break;
2957 case SPEED_100:
2958 txb2b = 0;
2959 netdev->tx_queue_len = 100;
2960 /* maybe add some timeout factor ? */
2961 break;
2962 }
2963
2964 /* workaround: re-program speed mode bit after
2965 * link-up event */
2966 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
2967 !txb2b) {
2968 u32 tarc0;
2969 tarc0 = er32(TARC0);
2970 tarc0 &= ~SPEED_MODE_BIT;
2971 ew32(TARC0, tarc0);
2972 }
2973
2974 /* disable TSO for pcie and 10/100 speeds, to avoid
2975 * some hardware issues */
2976 if (!(adapter->flags & FLAG_TSO_FORCE)) {
2977 switch (adapter->link_speed) {
2978 case SPEED_10:
2979 case SPEED_100:
2980 ndev_info(netdev,
2981 "10/100 speed: disabling TSO\n");
2982 netdev->features &= ~NETIF_F_TSO;
2983 netdev->features &= ~NETIF_F_TSO6;
2984 break;
2985 case SPEED_1000:
2986 netdev->features |= NETIF_F_TSO;
2987 netdev->features |= NETIF_F_TSO6;
2988 break;
2989 default:
2990 /* oops */
2991 break;
2992 }
2993 }
2994
2995 /* enable transmits in the hardware, need to do this
2996 * after setting TARC0 */
2997 tctl = er32(TCTL);
2998 tctl |= E1000_TCTL_EN;
2999 ew32(TCTL, tctl);
3000
3001 netif_carrier_on(netdev);
3002 netif_wake_queue(netdev);
3003
3004 if (!test_bit(__E1000_DOWN, &adapter->state))
3005 mod_timer(&adapter->phy_info_timer,
3006 round_jiffies(jiffies + 2 * HZ));
3007 } else {
3008 /* make sure the receive unit is started */
3009 if (adapter->flags & FLAG_RX_NEEDS_RESTART) {
3010 u32 rctl = er32(RCTL);
3011 ew32(RCTL, rctl |
3012 E1000_RCTL_EN);
3013 }
3014 }
3015 } else {
3016 if (netif_carrier_ok(netdev)) {
3017 adapter->link_speed = 0;
3018 adapter->link_duplex = 0;
3019 ndev_info(netdev, "Link is Down\n");
3020 netif_carrier_off(netdev);
3021 netif_stop_queue(netdev);
3022 if (!test_bit(__E1000_DOWN, &adapter->state))
3023 mod_timer(&adapter->phy_info_timer,
3024 round_jiffies(jiffies + 2 * HZ));
3025
3026 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
3027 schedule_work(&adapter->reset_task);
3028 }
3029 }
3030
3031link_up:
3032 e1000e_update_stats(adapter);
3033
3034 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
3035 adapter->tpt_old = adapter->stats.tpt;
3036 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
3037 adapter->colc_old = adapter->stats.colc;
3038
3039 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
3040 adapter->gorcl_old = adapter->stats.gorcl;
3041 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
3042 adapter->gotcl_old = adapter->stats.gotcl;
3043
3044 e1000e_update_adaptive(&adapter->hw);
3045
3046 if (!netif_carrier_ok(netdev)) {
3047 tx_pending = (e1000_desc_unused(tx_ring) + 1 <
3048 tx_ring->count);
3049 if (tx_pending) {
3050 /* We've lost link, so the controller stops DMA,
3051 * but we've got queued Tx work that's never going
3052 * to get done, so reset controller to flush Tx.
3053 * (Do the reset outside of interrupt context). */
3054 adapter->tx_timeout_count++;
3055 schedule_work(&adapter->reset_task);
3056 }
3057 }
3058
3059 /* Cause software interrupt to ensure rx ring is cleaned */
3060 ew32(ICS, E1000_ICS_RXDMT0);
3061
3062 /* Force detection of hung controller every watchdog period */
3063 adapter->detect_tx_hung = 1;
3064
3065 /* With 82571 controllers, LAA may be overwritten due to controller
3066 * reset from the other port. Set the appropriate LAA in RAR[0] */
3067 if (e1000e_get_laa_state_82571(hw))
3068 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
3069
3070 /* Reset the timer */
3071 if (!test_bit(__E1000_DOWN, &adapter->state))
3072 mod_timer(&adapter->watchdog_timer,
3073 round_jiffies(jiffies + 2 * HZ));
3074}
3075
3076#define E1000_TX_FLAGS_CSUM 0x00000001
3077#define E1000_TX_FLAGS_VLAN 0x00000002
3078#define E1000_TX_FLAGS_TSO 0x00000004
3079#define E1000_TX_FLAGS_IPV4 0x00000008
3080#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
3081#define E1000_TX_FLAGS_VLAN_SHIFT 16
3082
3083static int e1000_tso(struct e1000_adapter *adapter,
3084 struct sk_buff *skb)
3085{
3086 struct e1000_ring *tx_ring = adapter->tx_ring;
3087 struct e1000_context_desc *context_desc;
3088 struct e1000_buffer *buffer_info;
3089 unsigned int i;
3090 u32 cmd_length = 0;
3091 u16 ipcse = 0, tucse, mss;
3092 u8 ipcss, ipcso, tucss, tucso, hdr_len;
3093 int err;
3094
3095 if (skb_is_gso(skb)) {
3096 if (skb_header_cloned(skb)) {
3097 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3098 if (err)
3099 return err;
3100 }
3101
3102 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3103 mss = skb_shinfo(skb)->gso_size;
3104 if (skb->protocol == htons(ETH_P_IP)) {
3105 struct iphdr *iph = ip_hdr(skb);
3106 iph->tot_len = 0;
3107 iph->check = 0;
3108 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3109 iph->daddr, 0,
3110 IPPROTO_TCP,
3111 0);
3112 cmd_length = E1000_TXD_CMD_IP;
3113 ipcse = skb_transport_offset(skb) - 1;
3114 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3115 ipv6_hdr(skb)->payload_len = 0;
3116 tcp_hdr(skb)->check =
3117 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3118 &ipv6_hdr(skb)->daddr,
3119 0, IPPROTO_TCP, 0);
3120 ipcse = 0;
3121 }
3122 ipcss = skb_network_offset(skb);
3123 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
3124 tucss = skb_transport_offset(skb);
3125 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
3126 tucse = 0;
3127
3128 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
3129 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
3130
3131 i = tx_ring->next_to_use;
3132 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
3133 buffer_info = &tx_ring->buffer_info[i];
3134
3135 context_desc->lower_setup.ip_fields.ipcss = ipcss;
3136 context_desc->lower_setup.ip_fields.ipcso = ipcso;
3137 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
3138 context_desc->upper_setup.tcp_fields.tucss = tucss;
3139 context_desc->upper_setup.tcp_fields.tucso = tucso;
3140 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
3141 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
3142 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
3143 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
3144
3145 buffer_info->time_stamp = jiffies;
3146 buffer_info->next_to_watch = i;
3147
3148 i++;
3149 if (i == tx_ring->count)
3150 i = 0;
3151 tx_ring->next_to_use = i;
3152
3153 return 1;
3154 }
3155
3156 return 0;
3157}
3158
3159static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
3160{
3161 struct e1000_ring *tx_ring = adapter->tx_ring;
3162 struct e1000_context_desc *context_desc;
3163 struct e1000_buffer *buffer_info;
3164 unsigned int i;
3165 u8 css;
3166
3167 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3168 css = skb_transport_offset(skb);
3169
3170 i = tx_ring->next_to_use;
3171 buffer_info = &tx_ring->buffer_info[i];
3172 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
3173
3174 context_desc->lower_setup.ip_config = 0;
3175 context_desc->upper_setup.tcp_fields.tucss = css;
3176 context_desc->upper_setup.tcp_fields.tucso =
3177 css + skb->csum_offset;
3178 context_desc->upper_setup.tcp_fields.tucse = 0;
3179 context_desc->tcp_seg_setup.data = 0;
3180 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
3181
3182 buffer_info->time_stamp = jiffies;
3183 buffer_info->next_to_watch = i;
3184
3185 i++;
3186 if (i == tx_ring->count)
3187 i = 0;
3188 tx_ring->next_to_use = i;
3189
3190 return 1;
3191 }
3192
3193 return 0;
3194}
3195
3196#define E1000_MAX_PER_TXD 8192
3197#define E1000_MAX_TXD_PWR 12
3198
3199static int e1000_tx_map(struct e1000_adapter *adapter,
3200 struct sk_buff *skb, unsigned int first,
3201 unsigned int max_per_txd, unsigned int nr_frags,
3202 unsigned int mss)
3203{
3204 struct e1000_ring *tx_ring = adapter->tx_ring;
3205 struct e1000_buffer *buffer_info;
3206 unsigned int len = skb->len - skb->data_len;
3207 unsigned int offset = 0, size, count = 0, i;
3208 unsigned int f;
3209
3210 i = tx_ring->next_to_use;
3211
3212 while (len) {
3213 buffer_info = &tx_ring->buffer_info[i];
3214 size = min(len, max_per_txd);
3215
3216 /* Workaround for premature desc write-backs
3217 * in TSO mode. Append 4-byte sentinel desc */
3218 if (mss && !nr_frags && size == len && size > 8)
3219 size -= 4;
3220
3221 buffer_info->length = size;
3222 /* set time_stamp *before* dma to help avoid a possible race */
3223 buffer_info->time_stamp = jiffies;
3224 buffer_info->dma =
3225 pci_map_single(adapter->pdev,
3226 skb->data + offset,
3227 size,
3228 PCI_DMA_TODEVICE);
3229 if (pci_dma_mapping_error(buffer_info->dma)) {
3230 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
3231 adapter->tx_dma_failed++;
3232 return -1;
3233 }
3234 buffer_info->next_to_watch = i;
3235
3236 len -= size;
3237 offset += size;
3238 count++;
3239 i++;
3240 if (i == tx_ring->count)
3241 i = 0;
3242 }
3243
3244 for (f = 0; f < nr_frags; f++) {
3245 struct skb_frag_struct *frag;
3246
3247 frag = &skb_shinfo(skb)->frags[f];
3248 len = frag->size;
3249 offset = frag->page_offset;
3250
3251 while (len) {
3252 buffer_info = &tx_ring->buffer_info[i];
3253 size = min(len, max_per_txd);
3254 /* Workaround for premature desc write-backs
3255 * in TSO mode. Append 4-byte sentinel desc */
3256 if (mss && f == (nr_frags-1) && size == len && size > 8)
3257 size -= 4;
3258
3259 buffer_info->length = size;
3260 buffer_info->time_stamp = jiffies;
3261 buffer_info->dma =
3262 pci_map_page(adapter->pdev,
3263 frag->page,
3264 offset,
3265 size,
3266 PCI_DMA_TODEVICE);
3267 if (pci_dma_mapping_error(buffer_info->dma)) {
3268 dev_err(&adapter->pdev->dev,
3269 "TX DMA page map failed\n");
3270 adapter->tx_dma_failed++;
3271 return -1;
3272 }
3273
3274 buffer_info->next_to_watch = i;
3275
3276 len -= size;
3277 offset += size;
3278 count++;
3279
3280 i++;
3281 if (i == tx_ring->count)
3282 i = 0;
3283 }
3284 }
3285
3286 if (i == 0)
3287 i = tx_ring->count - 1;
3288 else
3289 i--;
3290
3291 tx_ring->buffer_info[i].skb = skb;
3292 tx_ring->buffer_info[first].next_to_watch = i;
3293
3294 return count;
3295}
3296
3297static void e1000_tx_queue(struct e1000_adapter *adapter,
3298 int tx_flags, int count)
3299{
3300 struct e1000_ring *tx_ring = adapter->tx_ring;
3301 struct e1000_tx_desc *tx_desc = NULL;
3302 struct e1000_buffer *buffer_info;
3303 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3304 unsigned int i;
3305
3306 if (tx_flags & E1000_TX_FLAGS_TSO) {
3307 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3308 E1000_TXD_CMD_TSE;
3309 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3310
3311 if (tx_flags & E1000_TX_FLAGS_IPV4)
3312 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3313 }
3314
3315 if (tx_flags & E1000_TX_FLAGS_CSUM) {
3316 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3317 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3318 }
3319
3320 if (tx_flags & E1000_TX_FLAGS_VLAN) {
3321 txd_lower |= E1000_TXD_CMD_VLE;
3322 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3323 }
3324
3325 i = tx_ring->next_to_use;
3326
3327 while (count--) {
3328 buffer_info = &tx_ring->buffer_info[i];
3329 tx_desc = E1000_TX_DESC(*tx_ring, i);
3330 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3331 tx_desc->lower.data =
3332 cpu_to_le32(txd_lower | buffer_info->length);
3333 tx_desc->upper.data = cpu_to_le32(txd_upper);
3334
3335 i++;
3336 if (i == tx_ring->count)
3337 i = 0;
3338 }
3339
3340 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3341
3342 /* Force memory writes to complete before letting h/w
3343 * know there are new descriptors to fetch. (Only
3344 * applicable for weak-ordered memory model archs,
3345 * such as IA-64). */
3346 wmb();
3347
3348 tx_ring->next_to_use = i;
3349 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3350 /* we need this if more than one processor can write to our tail
3351 * at a time, it synchronizes IO on IA64/Altix systems */
3352 mmiowb();
3353}
3354
3355#define MINIMUM_DHCP_PACKET_SIZE 282
3356static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
3357 struct sk_buff *skb)
3358{
3359 struct e1000_hw *hw = &adapter->hw;
3360 u16 length, offset;
3361
3362 if (vlan_tx_tag_present(skb)) {
3363 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id)
3364 && (adapter->hw.mng_cookie.status &
3365 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
3366 return 0;
3367 }
3368
3369 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
3370 return 0;
3371
3372 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
3373 return 0;
3374
3375 {
3376 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
3377 struct udphdr *udp;
3378
3379 if (ip->protocol != IPPROTO_UDP)
3380 return 0;
3381
3382 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
3383 if (ntohs(udp->dest) != 67)
3384 return 0;
3385
3386 offset = (u8 *)udp + 8 - skb->data;
3387 length = skb->len - offset;
3388 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
3389 }
3390
3391 return 0;
3392}
3393
3394static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3395{
3396 struct e1000_adapter *adapter = netdev_priv(netdev);
3397
3398 netif_stop_queue(netdev);
3399 /* Herbert's original patch had:
3400 * smp_mb__after_netif_stop_queue();
3401 * but since that doesn't exist yet, just open code it. */
3402 smp_mb();
3403
3404 /* We need to check again in a case another CPU has just
3405 * made room available. */
3406 if (e1000_desc_unused(adapter->tx_ring) < size)
3407 return -EBUSY;
3408
3409 /* A reprieve! */
3410 netif_start_queue(netdev);
3411 ++adapter->restart_queue;
3412 return 0;
3413}
3414
3415static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
3416{
3417 struct e1000_adapter *adapter = netdev_priv(netdev);
3418
3419 if (e1000_desc_unused(adapter->tx_ring) >= size)
3420 return 0;
3421 return __e1000_maybe_stop_tx(netdev, size);
3422}
3423
3424#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3425static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3426{
3427 struct e1000_adapter *adapter = netdev_priv(netdev);
3428 struct e1000_ring *tx_ring = adapter->tx_ring;
3429 unsigned int first;
3430 unsigned int max_per_txd = E1000_MAX_PER_TXD;
3431 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3432 unsigned int tx_flags = 0;
4e6c709c 3433 unsigned int len = skb->len - skb->data_len;
bc7f75fa 3434 unsigned long irq_flags;
4e6c709c
AK
3435 unsigned int nr_frags;
3436 unsigned int mss;
bc7f75fa
AK
3437 int count = 0;
3438 int tso;
3439 unsigned int f;
bc7f75fa
AK
3440
3441 if (test_bit(__E1000_DOWN, &adapter->state)) {
3442 dev_kfree_skb_any(skb);
3443 return NETDEV_TX_OK;
3444 }
3445
3446 if (skb->len <= 0) {
3447 dev_kfree_skb_any(skb);
3448 return NETDEV_TX_OK;
3449 }
3450
3451 mss = skb_shinfo(skb)->gso_size;
3452 /* The controller does a simple calculation to
3453 * make sure there is enough room in the FIFO before
3454 * initiating the DMA for each buffer. The calc is:
3455 * 4 = ceil(buffer len/mss). To make sure we don't
3456 * overrun the FIFO, adjust the max buffer len if mss
3457 * drops. */
3458 if (mss) {
3459 u8 hdr_len;
3460 max_per_txd = min(mss << 2, max_per_txd);
3461 max_txd_pwr = fls(max_per_txd) - 1;
3462
3463 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
3464 * points to just header, pull a few bytes of payload from
3465 * frags into skb->data */
3466 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4e6c709c 3467 if (skb->data_len && (hdr_len == len)) {
bc7f75fa
AK
3468 unsigned int pull_size;
3469
3470 pull_size = min((unsigned int)4, skb->data_len);
3471 if (!__pskb_pull_tail(skb, pull_size)) {
3472 ndev_err(netdev,
3473 "__pskb_pull_tail failed.\n");
3474 dev_kfree_skb_any(skb);
3475 return NETDEV_TX_OK;
3476 }
3477 len = skb->len - skb->data_len;
3478 }
3479 }
3480
3481 /* reserve a descriptor for the offload context */
3482 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3483 count++;
3484 count++;
3485
3486 count += TXD_USE_COUNT(len, max_txd_pwr);
3487
3488 nr_frags = skb_shinfo(skb)->nr_frags;
3489 for (f = 0; f < nr_frags; f++)
3490 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
3491 max_txd_pwr);
3492
3493 if (adapter->hw.mac.tx_pkt_filtering)
3494 e1000_transfer_dhcp_info(adapter, skb);
3495
3496 if (!spin_trylock_irqsave(&adapter->tx_queue_lock, irq_flags))
3497 /* Collision - tell upper layer to requeue */
3498 return NETDEV_TX_LOCKED;
3499
3500 /* need: count + 2 desc gap to keep tail from touching
3501 * head, otherwise try next time */
3502 if (e1000_maybe_stop_tx(netdev, count + 2)) {
3503 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
3504 return NETDEV_TX_BUSY;
3505 }
3506
3507 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3508 tx_flags |= E1000_TX_FLAGS_VLAN;
3509 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3510 }
3511
3512 first = tx_ring->next_to_use;
3513
3514 tso = e1000_tso(adapter, skb);
3515 if (tso < 0) {
3516 dev_kfree_skb_any(skb);
3517 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
3518 return NETDEV_TX_OK;
3519 }
3520
3521 if (tso)
3522 tx_flags |= E1000_TX_FLAGS_TSO;
3523 else if (e1000_tx_csum(adapter, skb))
3524 tx_flags |= E1000_TX_FLAGS_CSUM;
3525
3526 /* Old method was to assume IPv4 packet by default if TSO was enabled.
3527 * 82571 hardware supports TSO capabilities for IPv6 as well...
3528 * no longer assume, we must. */
3529 if (skb->protocol == htons(ETH_P_IP))
3530 tx_flags |= E1000_TX_FLAGS_IPV4;
3531
3532 count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
3533 if (count < 0) {
3534 /* handle pci_map_single() error in e1000_tx_map */
3535 dev_kfree_skb_any(skb);
3536 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
7b5dfe1a 3537 return NETDEV_TX_OK;
bc7f75fa
AK
3538 }
3539
3540 e1000_tx_queue(adapter, tx_flags, count);
3541
3542 netdev->trans_start = jiffies;
3543
3544 /* Make sure there is space in the ring for the next send. */
3545 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
3546
3547 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
3548 return NETDEV_TX_OK;
3549}
3550
3551/**
3552 * e1000_tx_timeout - Respond to a Tx Hang
3553 * @netdev: network interface device structure
3554 **/
3555static void e1000_tx_timeout(struct net_device *netdev)
3556{
3557 struct e1000_adapter *adapter = netdev_priv(netdev);
3558
3559 /* Do the reset outside of interrupt context */
3560 adapter->tx_timeout_count++;
3561 schedule_work(&adapter->reset_task);
3562}
3563
3564static void e1000_reset_task(struct work_struct *work)
3565{
3566 struct e1000_adapter *adapter;
3567 adapter = container_of(work, struct e1000_adapter, reset_task);
3568
3569 e1000e_reinit_locked(adapter);
3570}
3571
3572/**
3573 * e1000_get_stats - Get System Network Statistics
3574 * @netdev: network interface device structure
3575 *
3576 * Returns the address of the device statistics structure.
3577 * The statistics are actually updated from the timer callback.
3578 **/
3579static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3580{
3581 struct e1000_adapter *adapter = netdev_priv(netdev);
3582
3583 /* only return the current stats */
3584 return &adapter->net_stats;
3585}
3586
3587/**
3588 * e1000_change_mtu - Change the Maximum Transfer Unit
3589 * @netdev: network interface device structure
3590 * @new_mtu: new value for maximum frame size
3591 *
3592 * Returns 0 on success, negative on failure
3593 **/
3594static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3595{
3596 struct e1000_adapter *adapter = netdev_priv(netdev);
3597 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3598
3599 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3600 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3601 ndev_err(netdev, "Invalid MTU setting\n");
3602 return -EINVAL;
3603 }
3604
3605 /* Jumbo frame size limits */
3606 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
3607 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
3608 ndev_err(netdev, "Jumbo Frames not supported.\n");
3609 return -EINVAL;
3610 }
3611 if (adapter->hw.phy.type == e1000_phy_ife) {
3612 ndev_err(netdev, "Jumbo Frames not supported.\n");
3613 return -EINVAL;
3614 }
3615 }
3616
3617#define MAX_STD_JUMBO_FRAME_SIZE 9234
3618 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3619 ndev_err(netdev, "MTU > 9216 not supported.\n");
3620 return -EINVAL;
3621 }
3622
3623 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
3624 msleep(1);
3625 /* e1000e_down has a dependency on max_frame_size */
3626 adapter->hw.mac.max_frame_size = max_frame;
3627 if (netif_running(netdev))
3628 e1000e_down(adapter);
3629
3630 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3631 * means we reserve 2 more, this pushes us to allocate from the next
3632 * larger slab size.
3633 * i.e. RXBUFFER_2048 --> size-4096 slab
3634 * however with the new *_jumbo* routines, jumbo receives will use
3635 * fragmented skbs */
3636
3637 if (max_frame <= 256)
3638 adapter->rx_buffer_len = 256;
3639 else if (max_frame <= 512)
3640 adapter->rx_buffer_len = 512;
3641 else if (max_frame <= 1024)
3642 adapter->rx_buffer_len = 1024;
3643 else if (max_frame <= 2048)
3644 adapter->rx_buffer_len = 2048;
3645 else
3646 adapter->rx_buffer_len = 4096;
3647
3648 /* adjust allocation if LPE protects us, and we aren't using SBP */
3649 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
3650 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
3651 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
3652 + ETH_FCS_LEN ;
3653
3654 ndev_info(netdev, "changing MTU from %d to %d\n",
3655 netdev->mtu, new_mtu);
3656 netdev->mtu = new_mtu;
3657
3658 if (netif_running(netdev))
3659 e1000e_up(adapter);
3660 else
3661 e1000e_reset(adapter);
3662
3663 clear_bit(__E1000_RESETTING, &adapter->state);
3664
3665 return 0;
3666}
3667
3668static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
3669 int cmd)
3670{
3671 struct e1000_adapter *adapter = netdev_priv(netdev);
3672 struct mii_ioctl_data *data = if_mii(ifr);
3673 unsigned long irq_flags;
3674
3675 if (adapter->hw.media_type != e1000_media_type_copper)
3676 return -EOPNOTSUPP;
3677
3678 switch (cmd) {
3679 case SIOCGMIIPHY:
3680 data->phy_id = adapter->hw.phy.addr;
3681 break;
3682 case SIOCGMIIREG:
3683 if (!capable(CAP_NET_ADMIN))
3684 return -EPERM;
3685 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
3686 if (e1e_rphy(&adapter->hw, data->reg_num & 0x1F,
3687 &data->val_out)) {
3688 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
3689 return -EIO;
3690 }
3691 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
3692 break;
3693 case SIOCSMIIREG:
3694 default:
3695 return -EOPNOTSUPP;
3696 }
3697 return 0;
3698}
3699
3700static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3701{
3702 switch (cmd) {
3703 case SIOCGMIIPHY:
3704 case SIOCGMIIREG:
3705 case SIOCSMIIREG:
3706 return e1000_mii_ioctl(netdev, ifr, cmd);
3707 default:
3708 return -EOPNOTSUPP;
3709 }
3710}
3711
3712static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3713{
3714 struct net_device *netdev = pci_get_drvdata(pdev);
3715 struct e1000_adapter *adapter = netdev_priv(netdev);
3716 struct e1000_hw *hw = &adapter->hw;
3717 u32 ctrl, ctrl_ext, rctl, status;
3718 u32 wufc = adapter->wol;
3719 int retval = 0;
3720
3721 netif_device_detach(netdev);
3722
3723 if (netif_running(netdev)) {
3724 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3725 e1000e_down(adapter);
3726 e1000_free_irq(adapter);
3727 }
3728
3729 retval = pci_save_state(pdev);
3730 if (retval)
3731 return retval;
3732
3733 status = er32(STATUS);
3734 if (status & E1000_STATUS_LU)
3735 wufc &= ~E1000_WUFC_LNKC;
3736
3737 if (wufc) {
3738 e1000_setup_rctl(adapter);
3739 e1000_set_multi(netdev);
3740
3741 /* turn on all-multi mode if wake on multicast is enabled */
3742 if (wufc & E1000_WUFC_MC) {
3743 rctl = er32(RCTL);
3744 rctl |= E1000_RCTL_MPE;
3745 ew32(RCTL, rctl);
3746 }
3747
3748 ctrl = er32(CTRL);
3749 /* advertise wake from D3Cold */
3750 #define E1000_CTRL_ADVD3WUC 0x00100000
3751 /* phy power management enable */
3752 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
3753 ctrl |= E1000_CTRL_ADVD3WUC |
3754 E1000_CTRL_EN_PHY_PWR_MGMT;
3755 ew32(CTRL, ctrl);
3756
3757 if (adapter->hw.media_type == e1000_media_type_fiber ||
3758 adapter->hw.media_type == e1000_media_type_internal_serdes) {
3759 /* keep the laser running in D3 */
3760 ctrl_ext = er32(CTRL_EXT);
3761 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
3762 ew32(CTRL_EXT, ctrl_ext);
3763 }
3764
3765 /* Allow time for pending master requests to run */
3766 e1000e_disable_pcie_master(&adapter->hw);
3767
3768 ew32(WUC, E1000_WUC_PME_EN);
3769 ew32(WUFC, wufc);
3770 pci_enable_wake(pdev, PCI_D3hot, 1);
3771 pci_enable_wake(pdev, PCI_D3cold, 1);
3772 } else {
3773 ew32(WUC, 0);
3774 ew32(WUFC, 0);
3775 pci_enable_wake(pdev, PCI_D3hot, 0);
3776 pci_enable_wake(pdev, PCI_D3cold, 0);
3777 }
3778
3779 e1000_release_manageability(adapter);
3780
3781 /* make sure adapter isn't asleep if manageability is enabled */
3782 if (adapter->flags & FLAG_MNG_PT_ENABLED) {
3783 pci_enable_wake(pdev, PCI_D3hot, 1);
3784 pci_enable_wake(pdev, PCI_D3cold, 1);
3785 }
3786
3787 if (adapter->hw.phy.type == e1000_phy_igp_3)
3788 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
3789
3790 /* Release control of h/w to f/w. If f/w is AMT enabled, this
3791 * would have already happened in close and is redundant. */
3792 e1000_release_hw_control(adapter);
3793
3794 pci_disable_device(pdev);
3795
3796 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3797
3798 return 0;
3799}
3800
3801#ifdef CONFIG_PM
3802static int e1000_resume(struct pci_dev *pdev)
3803{
3804 struct net_device *netdev = pci_get_drvdata(pdev);
3805 struct e1000_adapter *adapter = netdev_priv(netdev);
3806 struct e1000_hw *hw = &adapter->hw;
3807 u32 err;
3808
3809 pci_set_power_state(pdev, PCI_D0);
3810 pci_restore_state(pdev);
3811 err = pci_enable_device(pdev);
3812 if (err) {
3813 dev_err(&pdev->dev,
3814 "Cannot enable PCI device from suspend\n");
3815 return err;
3816 }
3817
3818 pci_set_master(pdev);
3819
3820 pci_enable_wake(pdev, PCI_D3hot, 0);
3821 pci_enable_wake(pdev, PCI_D3cold, 0);
3822
3823 if (netif_running(netdev)) {
3824 err = e1000_request_irq(adapter);
3825 if (err)
3826 return err;
3827 }
3828
3829 e1000e_power_up_phy(adapter);
3830 e1000e_reset(adapter);
3831 ew32(WUS, ~0);
3832
3833 e1000_init_manageability(adapter);
3834
3835 if (netif_running(netdev))
3836 e1000e_up(adapter);
3837
3838 netif_device_attach(netdev);
3839
3840 /* If the controller has AMT, do not set DRV_LOAD until the interface
3841 * is up. For all other cases, let the f/w know that the h/w is now
3842 * under the control of the driver. */
3843 if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw))
3844 e1000_get_hw_control(adapter);
3845
3846 return 0;
3847}
3848#endif
3849
3850static void e1000_shutdown(struct pci_dev *pdev)
3851{
3852 e1000_suspend(pdev, PMSG_SUSPEND);
3853}
3854
3855#ifdef CONFIG_NET_POLL_CONTROLLER
3856/*
3857 * Polling 'interrupt' - used by things like netconsole to send skbs
3858 * without having to re-enable interrupts. It's not called while
3859 * the interrupt routine is executing.
3860 */
3861static void e1000_netpoll(struct net_device *netdev)
3862{
3863 struct e1000_adapter *adapter = netdev_priv(netdev);
3864
3865 disable_irq(adapter->pdev->irq);
3866 e1000_intr(adapter->pdev->irq, netdev);
3867
3868 e1000_clean_tx_irq(adapter);
3869
3870 enable_irq(adapter->pdev->irq);
3871}
3872#endif
3873
3874/**
3875 * e1000_io_error_detected - called when PCI error is detected
3876 * @pdev: Pointer to PCI device
3877 * @state: The current pci connection state
3878 *
3879 * This function is called after a PCI bus error affecting
3880 * this device has been detected.
3881 */
3882static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
3883 pci_channel_state_t state)
3884{
3885 struct net_device *netdev = pci_get_drvdata(pdev);
3886 struct e1000_adapter *adapter = netdev_priv(netdev);
3887
3888 netif_device_detach(netdev);
3889
3890 if (netif_running(netdev))
3891 e1000e_down(adapter);
3892 pci_disable_device(pdev);
3893
3894 /* Request a slot slot reset. */
3895 return PCI_ERS_RESULT_NEED_RESET;
3896}
3897
3898/**
3899 * e1000_io_slot_reset - called after the pci bus has been reset.
3900 * @pdev: Pointer to PCI device
3901 *
3902 * Restart the card from scratch, as if from a cold-boot. Implementation
3903 * resembles the first-half of the e1000_resume routine.
3904 */
3905static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
3906{
3907 struct net_device *netdev = pci_get_drvdata(pdev);
3908 struct e1000_adapter *adapter = netdev_priv(netdev);
3909 struct e1000_hw *hw = &adapter->hw;
3910
3911 if (pci_enable_device(pdev)) {
3912 dev_err(&pdev->dev,
3913 "Cannot re-enable PCI device after reset.\n");
3914 return PCI_ERS_RESULT_DISCONNECT;
3915 }
3916 pci_set_master(pdev);
3917
3918 pci_enable_wake(pdev, PCI_D3hot, 0);
3919 pci_enable_wake(pdev, PCI_D3cold, 0);
3920
3921 e1000e_reset(adapter);
3922 ew32(WUS, ~0);
3923
3924 return PCI_ERS_RESULT_RECOVERED;
3925}
3926
3927/**
3928 * e1000_io_resume - called when traffic can start flowing again.
3929 * @pdev: Pointer to PCI device
3930 *
3931 * This callback is called when the error recovery driver tells us that
3932 * its OK to resume normal operation. Implementation resembles the
3933 * second-half of the e1000_resume routine.
3934 */
3935static void e1000_io_resume(struct pci_dev *pdev)
3936{
3937 struct net_device *netdev = pci_get_drvdata(pdev);
3938 struct e1000_adapter *adapter = netdev_priv(netdev);
3939
3940 e1000_init_manageability(adapter);
3941
3942 if (netif_running(netdev)) {
3943 if (e1000e_up(adapter)) {
3944 dev_err(&pdev->dev,
3945 "can't bring device back up after reset\n");
3946 return;
3947 }
3948 }
3949
3950 netif_device_attach(netdev);
3951
3952 /* If the controller has AMT, do not set DRV_LOAD until the interface
3953 * is up. For all other cases, let the f/w know that the h/w is now
3954 * under the control of the driver. */
3955 if (!(adapter->flags & FLAG_HAS_AMT) ||
3956 !e1000e_check_mng_mode(&adapter->hw))
3957 e1000_get_hw_control(adapter);
3958
3959}
3960
3961static void e1000_print_device_info(struct e1000_adapter *adapter)
3962{
3963 struct e1000_hw *hw = &adapter->hw;
3964 struct net_device *netdev = adapter->netdev;
3965 u32 part_num;
3966
3967 /* print bus type/speed/width info */
3968 ndev_info(netdev, "(PCI Express:2.5GB/s:%s) "
3969 "%02x:%02x:%02x:%02x:%02x:%02x\n",
3970 /* bus width */
3971 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
3972 "Width x1"),
3973 /* MAC address */
3974 netdev->dev_addr[0], netdev->dev_addr[1],
3975 netdev->dev_addr[2], netdev->dev_addr[3],
3976 netdev->dev_addr[4], netdev->dev_addr[5]);
3977 ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n",
3978 (hw->phy.type == e1000_phy_ife)
3979 ? "10/100" : "1000");
3980 e1000e_read_part_num(hw, &part_num);
3981 ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
3982 hw->mac.type, hw->phy.type,
3983 (part_num >> 8), (part_num & 0xff));
3984}
3985
3986/**
3987 * e1000_probe - Device Initialization Routine
3988 * @pdev: PCI device information struct
3989 * @ent: entry in e1000_pci_tbl
3990 *
3991 * Returns 0 on success, negative on failure
3992 *
3993 * e1000_probe initializes an adapter identified by a pci_dev structure.
3994 * The OS initialization, configuring of the adapter private structure,
3995 * and a hardware reset occur.
3996 **/
3997static int __devinit e1000_probe(struct pci_dev *pdev,
3998 const struct pci_device_id *ent)
3999{
4000 struct net_device *netdev;
4001 struct e1000_adapter *adapter;
4002 struct e1000_hw *hw;
4003 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
4004 unsigned long mmio_start, mmio_len;
4005 unsigned long flash_start, flash_len;
4006
4007 static int cards_found;
4008 int i, err, pci_using_dac;
4009 u16 eeprom_data = 0;
4010 u16 eeprom_apme_mask = E1000_EEPROM_APME;
4011
4012 err = pci_enable_device(pdev);
4013 if (err)
4014 return err;
4015
4016 pci_using_dac = 0;
4017 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
4018 if (!err) {
4019 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
4020 if (!err)
4021 pci_using_dac = 1;
4022 } else {
4023 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
4024 if (err) {
4025 err = pci_set_consistent_dma_mask(pdev,
4026 DMA_32BIT_MASK);
4027 if (err) {
4028 dev_err(&pdev->dev, "No usable DMA "
4029 "configuration, aborting\n");
4030 goto err_dma;
4031 }
4032 }
4033 }
4034
4035 err = pci_request_regions(pdev, e1000e_driver_name);
4036 if (err)
4037 goto err_pci_reg;
4038
4039 pci_set_master(pdev);
4040
4041 err = -ENOMEM;
4042 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
4043 if (!netdev)
4044 goto err_alloc_etherdev;
4045
bc7f75fa
AK
4046 SET_NETDEV_DEV(netdev, &pdev->dev);
4047
4048 pci_set_drvdata(pdev, netdev);
4049 adapter = netdev_priv(netdev);
4050 hw = &adapter->hw;
4051 adapter->netdev = netdev;
4052 adapter->pdev = pdev;
4053 adapter->ei = ei;
4054 adapter->pba = ei->pba;
4055 adapter->flags = ei->flags;
4056 adapter->hw.adapter = adapter;
4057 adapter->hw.mac.type = ei->mac;
4058 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
4059
4060 mmio_start = pci_resource_start(pdev, 0);
4061 mmio_len = pci_resource_len(pdev, 0);
4062
4063 err = -EIO;
4064 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
4065 if (!adapter->hw.hw_addr)
4066 goto err_ioremap;
4067
4068 if ((adapter->flags & FLAG_HAS_FLASH) &&
4069 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
4070 flash_start = pci_resource_start(pdev, 1);
4071 flash_len = pci_resource_len(pdev, 1);
4072 adapter->hw.flash_address = ioremap(flash_start, flash_len);
4073 if (!adapter->hw.flash_address)
4074 goto err_flashmap;
4075 }
4076
4077 /* construct the net_device struct */
4078 netdev->open = &e1000_open;
4079 netdev->stop = &e1000_close;
4080 netdev->hard_start_xmit = &e1000_xmit_frame;
4081 netdev->get_stats = &e1000_get_stats;
4082 netdev->set_multicast_list = &e1000_set_multi;
4083 netdev->set_mac_address = &e1000_set_mac;
4084 netdev->change_mtu = &e1000_change_mtu;
4085 netdev->do_ioctl = &e1000_ioctl;
4086 e1000e_set_ethtool_ops(netdev);
4087 netdev->tx_timeout = &e1000_tx_timeout;
4088 netdev->watchdog_timeo = 5 * HZ;
4089 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
4090 netdev->vlan_rx_register = e1000_vlan_rx_register;
4091 netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
4092 netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
4093#ifdef CONFIG_NET_POLL_CONTROLLER
4094 netdev->poll_controller = e1000_netpoll;
4095#endif
4096 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
4097
4098 netdev->mem_start = mmio_start;
4099 netdev->mem_end = mmio_start + mmio_len;
4100
4101 adapter->bd_number = cards_found++;
4102
4103 /* setup adapter struct */
4104 err = e1000_sw_init(adapter);
4105 if (err)
4106 goto err_sw_init;
4107
4108 err = -EIO;
4109
4110 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
4111 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
4112 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
4113
4114 err = ei->get_invariants(adapter);
4115 if (err)
4116 goto err_hw_init;
4117
4118 hw->mac.ops.get_bus_info(&adapter->hw);
4119
4120 adapter->hw.phy.wait_for_link = 0;
4121
4122 /* Copper options */
4123 if (adapter->hw.media_type == e1000_media_type_copper) {
4124 adapter->hw.phy.mdix = AUTO_ALL_MODES;
4125 adapter->hw.phy.disable_polarity_correction = 0;
4126 adapter->hw.phy.ms_type = e1000_ms_hw_default;
4127 }
4128
4129 if (e1000_check_reset_block(&adapter->hw))
4130 ndev_info(netdev,
4131 "PHY reset is blocked due to SOL/IDER session.\n");
4132
4133 netdev->features = NETIF_F_SG |
4134 NETIF_F_HW_CSUM |
4135 NETIF_F_HW_VLAN_TX |
4136 NETIF_F_HW_VLAN_RX;
4137
4138 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
4139 netdev->features |= NETIF_F_HW_VLAN_FILTER;
4140
4141 netdev->features |= NETIF_F_TSO;
4142 netdev->features |= NETIF_F_TSO6;
4143
4144 if (pci_using_dac)
4145 netdev->features |= NETIF_F_HIGHDMA;
4146
4147 /* We should not be using LLTX anymore, but we are still TX faster with
4148 * it. */
4149 netdev->features |= NETIF_F_LLTX;
4150
4151 if (e1000e_enable_mng_pass_thru(&adapter->hw))
4152 adapter->flags |= FLAG_MNG_PT_ENABLED;
4153
4154 /* before reading the NVM, reset the controller to
4155 * put the device in a known good starting state */
4156 adapter->hw.mac.ops.reset_hw(&adapter->hw);
4157
4158 /*
4159 * systems with ASPM and others may see the checksum fail on the first
4160 * attempt. Let's give it a few tries
4161 */
4162 for (i = 0;; i++) {
4163 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
4164 break;
4165 if (i == 2) {
4166 ndev_err(netdev, "The NVM Checksum Is Not Valid\n");
4167 err = -EIO;
4168 goto err_eeprom;
4169 }
4170 }
4171
4172 /* copy the MAC address out of the NVM */
4173 if (e1000e_read_mac_addr(&adapter->hw))
4174 ndev_err(netdev, "NVM Read Error while reading MAC address\n");
4175
4176 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
4177 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
4178
4179 if (!is_valid_ether_addr(netdev->perm_addr)) {
4180 ndev_err(netdev, "Invalid MAC Address: "
4181 "%02x:%02x:%02x:%02x:%02x:%02x\n",
4182 netdev->perm_addr[0], netdev->perm_addr[1],
4183 netdev->perm_addr[2], netdev->perm_addr[3],
4184 netdev->perm_addr[4], netdev->perm_addr[5]);
4185 err = -EIO;
4186 goto err_eeprom;
4187 }
4188
4189 init_timer(&adapter->watchdog_timer);
4190 adapter->watchdog_timer.function = &e1000_watchdog;
4191 adapter->watchdog_timer.data = (unsigned long) adapter;
4192
4193 init_timer(&adapter->phy_info_timer);
4194 adapter->phy_info_timer.function = &e1000_update_phy_info;
4195 adapter->phy_info_timer.data = (unsigned long) adapter;
4196
4197 INIT_WORK(&adapter->reset_task, e1000_reset_task);
4198 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
4199
4200 e1000e_check_options(adapter);
4201
4202 /* Initialize link parameters. User can change them with ethtool */
4203 adapter->hw.mac.autoneg = 1;
309af40b 4204 adapter->fc_autoneg = 1;
bc7f75fa
AK
4205 adapter->hw.mac.original_fc = e1000_fc_default;
4206 adapter->hw.mac.fc = e1000_fc_default;
4207 adapter->hw.phy.autoneg_advertised = 0x2f;
4208
4209 /* ring size defaults */
4210 adapter->rx_ring->count = 256;
4211 adapter->tx_ring->count = 256;
4212
4213 /*
4214 * Initial Wake on LAN setting - If APM wake is enabled in
4215 * the EEPROM, enable the ACPI Magic Packet filter
4216 */
4217 if (adapter->flags & FLAG_APME_IN_WUC) {
4218 /* APME bit in EEPROM is mapped to WUC.APME */
4219 eeprom_data = er32(WUC);
4220 eeprom_apme_mask = E1000_WUC_APME;
4221 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
4222 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
4223 (adapter->hw.bus.func == 1))
4224 e1000_read_nvm(&adapter->hw,
4225 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
4226 else
4227 e1000_read_nvm(&adapter->hw,
4228 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4229 }
4230
4231 /* fetch WoL from EEPROM */
4232 if (eeprom_data & eeprom_apme_mask)
4233 adapter->eeprom_wol |= E1000_WUFC_MAG;
4234
4235 /*
4236 * now that we have the eeprom settings, apply the special cases
4237 * where the eeprom may be wrong or the board simply won't support
4238 * wake on lan on a particular port
4239 */
4240 if (!(adapter->flags & FLAG_HAS_WOL))
4241 adapter->eeprom_wol = 0;
4242
4243 /* initialize the wol settings based on the eeprom settings */
4244 adapter->wol = adapter->eeprom_wol;
4245
4246 /* reset the hardware with the new settings */
4247 e1000e_reset(adapter);
4248
4249 /* If the controller has AMT, do not set DRV_LOAD until the interface
4250 * is up. For all other cases, let the f/w know that the h/w is now
4251 * under the control of the driver. */
4252 if (!(adapter->flags & FLAG_HAS_AMT) ||
4253 !e1000e_check_mng_mode(&adapter->hw))
4254 e1000_get_hw_control(adapter);
4255
4256 /* tell the stack to leave us alone until e1000_open() is called */
4257 netif_carrier_off(netdev);
4258 netif_stop_queue(netdev);
4259
4260 strcpy(netdev->name, "eth%d");
4261 err = register_netdev(netdev);
4262 if (err)
4263 goto err_register;
4264
4265 e1000_print_device_info(adapter);
4266
4267 return 0;
4268
4269err_register:
4270err_hw_init:
4271 e1000_release_hw_control(adapter);
4272err_eeprom:
4273 if (!e1000_check_reset_block(&adapter->hw))
4274 e1000_phy_hw_reset(&adapter->hw);
4275
4276 if (adapter->hw.flash_address)
4277 iounmap(adapter->hw.flash_address);
4278
4279err_flashmap:
4280 kfree(adapter->tx_ring);
4281 kfree(adapter->rx_ring);
4282err_sw_init:
4283 iounmap(adapter->hw.hw_addr);
4284err_ioremap:
4285 free_netdev(netdev);
4286err_alloc_etherdev:
4287 pci_release_regions(pdev);
4288err_pci_reg:
4289err_dma:
4290 pci_disable_device(pdev);
4291 return err;
4292}
4293
4294/**
4295 * e1000_remove - Device Removal Routine
4296 * @pdev: PCI device information struct
4297 *
4298 * e1000_remove is called by the PCI subsystem to alert the driver
4299 * that it should release a PCI device. The could be caused by a
4300 * Hot-Plug event, or because the driver is going to be removed from
4301 * memory.
4302 **/
4303static void __devexit e1000_remove(struct pci_dev *pdev)
4304{
4305 struct net_device *netdev = pci_get_drvdata(pdev);
4306 struct e1000_adapter *adapter = netdev_priv(netdev);
4307
4308 /* flush_scheduled work may reschedule our watchdog task, so
4309 * explicitly disable watchdog tasks from being rescheduled */
4310 set_bit(__E1000_DOWN, &adapter->state);
4311 del_timer_sync(&adapter->watchdog_timer);
4312 del_timer_sync(&adapter->phy_info_timer);
4313
4314 flush_scheduled_work();
4315
4316 e1000_release_manageability(adapter);
4317
4318 /* Release control of h/w to f/w. If f/w is AMT enabled, this
4319 * would have already happened in close and is redundant. */
4320 e1000_release_hw_control(adapter);
4321
4322 unregister_netdev(netdev);
4323
4324 if (!e1000_check_reset_block(&adapter->hw))
4325 e1000_phy_hw_reset(&adapter->hw);
4326
4327 kfree(adapter->tx_ring);
4328 kfree(adapter->rx_ring);
4329
4330 iounmap(adapter->hw.hw_addr);
4331 if (adapter->hw.flash_address)
4332 iounmap(adapter->hw.flash_address);
4333 pci_release_regions(pdev);
4334
4335 free_netdev(netdev);
4336
4337 pci_disable_device(pdev);
4338}
4339
4340/* PCI Error Recovery (ERS) */
4341static struct pci_error_handlers e1000_err_handler = {
4342 .error_detected = e1000_io_error_detected,
4343 .slot_reset = e1000_io_slot_reset,
4344 .resume = e1000_io_resume,
4345};
4346
4347static struct pci_device_id e1000_pci_tbl[] = {
4348 /*
4349 * Support for 82571/2/3, es2lan and ich8 will be phased in
4350 * stepwise.
4351
4352 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
4353 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
4354 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
4355 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
4356 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
4357 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
4358 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
4359 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
4360 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
4361 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
4362 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
4363 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
4364 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
4365 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
4366 board_80003es2lan },
4367 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
4368 board_80003es2lan },
4369 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
4370 board_80003es2lan },
4371 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
4372 board_80003es2lan },
4373 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
4374 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
4375 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
4376 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
4377 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
4378 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
4379 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
4380 */
4381
4382 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
4383 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
4384 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
4385 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
4386 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
4387
4388 { } /* terminate list */
4389};
4390MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
4391
4392/* PCI Device API Driver */
4393static struct pci_driver e1000_driver = {
4394 .name = e1000e_driver_name,
4395 .id_table = e1000_pci_tbl,
4396 .probe = e1000_probe,
4397 .remove = __devexit_p(e1000_remove),
4398#ifdef CONFIG_PM
4399 /* Power Managment Hooks */
4400 .suspend = e1000_suspend,
4401 .resume = e1000_resume,
4402#endif
4403 .shutdown = e1000_shutdown,
4404 .err_handler = &e1000_err_handler
4405};
4406
4407/**
4408 * e1000_init_module - Driver Registration Routine
4409 *
4410 * e1000_init_module is the first routine called when the driver is
4411 * loaded. All it does is register with the PCI subsystem.
4412 **/
4413static int __init e1000_init_module(void)
4414{
4415 int ret;
4416 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
4417 e1000e_driver_name, e1000e_driver_version);
4418 printk(KERN_INFO "%s: Copyright (c) 1999-2007 Intel Corporation.\n",
4419 e1000e_driver_name);
4420 ret = pci_register_driver(&e1000_driver);
4421
4422 return ret;
4423}
4424module_init(e1000_init_module);
4425
4426/**
4427 * e1000_exit_module - Driver Exit Cleanup Routine
4428 *
4429 * e1000_exit_module is called just before the driver is removed
4430 * from memory.
4431 **/
4432static void __exit e1000_exit_module(void)
4433{
4434 pci_unregister_driver(&e1000_driver);
4435}
4436module_exit(e1000_exit_module);
4437
4438
4439MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
4440MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
4441MODULE_LICENSE("GPL");
4442MODULE_VERSION(DRV_VERSION);
4443
4444/* e1000_main.c */
This page took 0.234843 seconds and 5 git commands to generate.