Commit | Line | Data |
---|---|---|
bc7f75fa AK |
1 | /******************************************************************************* |
2 | ||
3 | Intel PRO/1000 Linux driver | |
ad68076e | 4 | Copyright(c) 1999 - 2008 Intel Corporation. |
bc7f75fa AK |
5 | |
6 | This program is free software; you can redistribute it and/or modify it | |
7 | under the terms and conditions of the GNU General Public License, | |
8 | version 2, as published by the Free Software Foundation. | |
9 | ||
10 | This program is distributed in the hope it will be useful, but WITHOUT | |
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | more details. | |
14 | ||
15 | You should have received a copy of the GNU General Public License along with | |
16 | this program; if not, write to the Free Software Foundation, Inc., | |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
18 | ||
19 | The full GNU General Public License is included in this distribution in | |
20 | the file called "COPYING". | |
21 | ||
22 | Contact Information: | |
23 | Linux NICS <linux.nics@intel.com> | |
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
26 | ||
27 | *******************************************************************************/ | |
28 | ||
29 | #include <linux/module.h> | |
30 | #include <linux/types.h> | |
31 | #include <linux/init.h> | |
32 | #include <linux/pci.h> | |
33 | #include <linux/vmalloc.h> | |
34 | #include <linux/pagemap.h> | |
35 | #include <linux/delay.h> | |
36 | #include <linux/netdevice.h> | |
37 | #include <linux/tcp.h> | |
38 | #include <linux/ipv6.h> | |
39 | #include <net/checksum.h> | |
40 | #include <net/ip6_checksum.h> | |
41 | #include <linux/mii.h> | |
42 | #include <linux/ethtool.h> | |
43 | #include <linux/if_vlan.h> | |
44 | #include <linux/cpu.h> | |
45 | #include <linux/smp.h> | |
97ac8cae | 46 | #include <linux/pm_qos_params.h> |
bc7f75fa AK |
47 | |
48 | #include "e1000.h" | |
49 | ||
97ac8cae | 50 | #define DRV_VERSION "0.3.3.3-k2" |
bc7f75fa AK |
51 | char e1000e_driver_name[] = "e1000e"; |
52 | const char e1000e_driver_version[] = DRV_VERSION; | |
53 | ||
54 | static const struct e1000_info *e1000_info_tbl[] = { | |
55 | [board_82571] = &e1000_82571_info, | |
56 | [board_82572] = &e1000_82572_info, | |
57 | [board_82573] = &e1000_82573_info, | |
4662e82b | 58 | [board_82574] = &e1000_82574_info, |
bc7f75fa AK |
59 | [board_80003es2lan] = &e1000_es2_info, |
60 | [board_ich8lan] = &e1000_ich8_info, | |
61 | [board_ich9lan] = &e1000_ich9_info, | |
f4187b56 | 62 | [board_ich10lan] = &e1000_ich10_info, |
bc7f75fa AK |
63 | }; |
64 | ||
65 | #ifdef DEBUG | |
66 | /** | |
67 | * e1000_get_hw_dev_name - return device name string | |
68 | * used by hardware layer to print debugging information | |
69 | **/ | |
70 | char *e1000e_get_hw_dev_name(struct e1000_hw *hw) | |
71 | { | |
589c085f | 72 | return hw->adapter->netdev->name; |
bc7f75fa AK |
73 | } |
74 | #endif | |
75 | ||
76 | /** | |
77 | * e1000_desc_unused - calculate if we have unused descriptors | |
78 | **/ | |
79 | static int e1000_desc_unused(struct e1000_ring *ring) | |
80 | { | |
81 | if (ring->next_to_clean > ring->next_to_use) | |
82 | return ring->next_to_clean - ring->next_to_use - 1; | |
83 | ||
84 | return ring->count + ring->next_to_clean - ring->next_to_use - 1; | |
85 | } | |
86 | ||
87 | /** | |
ad68076e | 88 | * e1000_receive_skb - helper function to handle Rx indications |
bc7f75fa AK |
89 | * @adapter: board private structure |
90 | * @status: descriptor status field as written by hardware | |
91 | * @vlan: descriptor vlan field as written by hardware (no le/be conversion) | |
92 | * @skb: pointer to sk_buff to be indicated to stack | |
93 | **/ | |
94 | static void e1000_receive_skb(struct e1000_adapter *adapter, | |
95 | struct net_device *netdev, | |
96 | struct sk_buff *skb, | |
a39fe742 | 97 | u8 status, __le16 vlan) |
bc7f75fa AK |
98 | { |
99 | skb->protocol = eth_type_trans(skb, netdev); | |
100 | ||
101 | if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) | |
102 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | |
38b22195 | 103 | le16_to_cpu(vlan)); |
bc7f75fa AK |
104 | else |
105 | netif_receive_skb(skb); | |
106 | ||
107 | netdev->last_rx = jiffies; | |
108 | } | |
109 | ||
110 | /** | |
111 | * e1000_rx_checksum - Receive Checksum Offload for 82543 | |
112 | * @adapter: board private structure | |
113 | * @status_err: receive descriptor status and error fields | |
114 | * @csum: receive descriptor csum field | |
115 | * @sk_buff: socket buffer with received data | |
116 | **/ | |
117 | static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, | |
118 | u32 csum, struct sk_buff *skb) | |
119 | { | |
120 | u16 status = (u16)status_err; | |
121 | u8 errors = (u8)(status_err >> 24); | |
122 | skb->ip_summed = CHECKSUM_NONE; | |
123 | ||
124 | /* Ignore Checksum bit is set */ | |
125 | if (status & E1000_RXD_STAT_IXSM) | |
126 | return; | |
127 | /* TCP/UDP checksum error bit is set */ | |
128 | if (errors & E1000_RXD_ERR_TCPE) { | |
129 | /* let the stack verify checksum errors */ | |
130 | adapter->hw_csum_err++; | |
131 | return; | |
132 | } | |
133 | ||
134 | /* TCP/UDP Checksum has not been calculated */ | |
135 | if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) | |
136 | return; | |
137 | ||
138 | /* It must be a TCP or UDP packet with a valid checksum */ | |
139 | if (status & E1000_RXD_STAT_TCPCS) { | |
140 | /* TCP checksum is good */ | |
141 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
142 | } else { | |
ad68076e BA |
143 | /* |
144 | * IP fragment with UDP payload | |
145 | * Hardware complements the payload checksum, so we undo it | |
bc7f75fa AK |
146 | * and then put the value in host order for further stack use. |
147 | */ | |
a39fe742 AV |
148 | __sum16 sum = (__force __sum16)htons(csum); |
149 | skb->csum = csum_unfold(~sum); | |
bc7f75fa AK |
150 | skb->ip_summed = CHECKSUM_COMPLETE; |
151 | } | |
152 | adapter->hw_csum_good++; | |
153 | } | |
154 | ||
155 | /** | |
156 | * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended | |
157 | * @adapter: address of board private structure | |
158 | **/ | |
159 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |
160 | int cleaned_count) | |
161 | { | |
162 | struct net_device *netdev = adapter->netdev; | |
163 | struct pci_dev *pdev = adapter->pdev; | |
164 | struct e1000_ring *rx_ring = adapter->rx_ring; | |
165 | struct e1000_rx_desc *rx_desc; | |
166 | struct e1000_buffer *buffer_info; | |
167 | struct sk_buff *skb; | |
168 | unsigned int i; | |
169 | unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; | |
170 | ||
171 | i = rx_ring->next_to_use; | |
172 | buffer_info = &rx_ring->buffer_info[i]; | |
173 | ||
174 | while (cleaned_count--) { | |
175 | skb = buffer_info->skb; | |
176 | if (skb) { | |
177 | skb_trim(skb, 0); | |
178 | goto map_skb; | |
179 | } | |
180 | ||
181 | skb = netdev_alloc_skb(netdev, bufsz); | |
182 | if (!skb) { | |
183 | /* Better luck next round */ | |
184 | adapter->alloc_rx_buff_failed++; | |
185 | break; | |
186 | } | |
187 | ||
ad68076e BA |
188 | /* |
189 | * Make buffer alignment 2 beyond a 16 byte boundary | |
bc7f75fa AK |
190 | * this will result in a 16 byte aligned IP header after |
191 | * the 14 byte MAC header is removed | |
192 | */ | |
193 | skb_reserve(skb, NET_IP_ALIGN); | |
194 | ||
195 | buffer_info->skb = skb; | |
196 | map_skb: | |
197 | buffer_info->dma = pci_map_single(pdev, skb->data, | |
198 | adapter->rx_buffer_len, | |
199 | PCI_DMA_FROMDEVICE); | |
8d8bb39b | 200 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) { |
bc7f75fa AK |
201 | dev_err(&pdev->dev, "RX DMA map failed\n"); |
202 | adapter->rx_dma_failed++; | |
203 | break; | |
204 | } | |
205 | ||
206 | rx_desc = E1000_RX_DESC(*rx_ring, i); | |
207 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | |
208 | ||
209 | i++; | |
210 | if (i == rx_ring->count) | |
211 | i = 0; | |
212 | buffer_info = &rx_ring->buffer_info[i]; | |
213 | } | |
214 | ||
215 | if (rx_ring->next_to_use != i) { | |
216 | rx_ring->next_to_use = i; | |
217 | if (i-- == 0) | |
218 | i = (rx_ring->count - 1); | |
219 | ||
ad68076e BA |
220 | /* |
221 | * Force memory writes to complete before letting h/w | |
bc7f75fa AK |
222 | * know there are new descriptors to fetch. (Only |
223 | * applicable for weak-ordered memory model archs, | |
ad68076e BA |
224 | * such as IA-64). |
225 | */ | |
bc7f75fa AK |
226 | wmb(); |
227 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | |
228 | } | |
229 | } | |
230 | ||
231 | /** | |
232 | * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split | |
233 | * @adapter: address of board private structure | |
234 | **/ | |
235 | static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |
236 | int cleaned_count) | |
237 | { | |
238 | struct net_device *netdev = adapter->netdev; | |
239 | struct pci_dev *pdev = adapter->pdev; | |
240 | union e1000_rx_desc_packet_split *rx_desc; | |
241 | struct e1000_ring *rx_ring = adapter->rx_ring; | |
242 | struct e1000_buffer *buffer_info; | |
243 | struct e1000_ps_page *ps_page; | |
244 | struct sk_buff *skb; | |
245 | unsigned int i, j; | |
246 | ||
247 | i = rx_ring->next_to_use; | |
248 | buffer_info = &rx_ring->buffer_info[i]; | |
249 | ||
250 | while (cleaned_count--) { | |
251 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | |
252 | ||
253 | for (j = 0; j < PS_PAGE_BUFFERS; j++) { | |
47f44e40 AK |
254 | ps_page = &buffer_info->ps_pages[j]; |
255 | if (j >= adapter->rx_ps_pages) { | |
256 | /* all unused desc entries get hw null ptr */ | |
a39fe742 | 257 | rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0); |
47f44e40 AK |
258 | continue; |
259 | } | |
260 | if (!ps_page->page) { | |
261 | ps_page->page = alloc_page(GFP_ATOMIC); | |
bc7f75fa | 262 | if (!ps_page->page) { |
47f44e40 AK |
263 | adapter->alloc_rx_buff_failed++; |
264 | goto no_buffers; | |
265 | } | |
266 | ps_page->dma = pci_map_page(pdev, | |
267 | ps_page->page, | |
268 | 0, PAGE_SIZE, | |
269 | PCI_DMA_FROMDEVICE); | |
8d8bb39b | 270 | if (pci_dma_mapping_error(pdev, ps_page->dma)) { |
47f44e40 AK |
271 | dev_err(&adapter->pdev->dev, |
272 | "RX DMA page map failed\n"); | |
273 | adapter->rx_dma_failed++; | |
274 | goto no_buffers; | |
bc7f75fa | 275 | } |
bc7f75fa | 276 | } |
47f44e40 AK |
277 | /* |
278 | * Refresh the desc even if buffer_addrs | |
279 | * didn't change because each write-back | |
280 | * erases this info. | |
281 | */ | |
282 | rx_desc->read.buffer_addr[j+1] = | |
283 | cpu_to_le64(ps_page->dma); | |
bc7f75fa AK |
284 | } |
285 | ||
286 | skb = netdev_alloc_skb(netdev, | |
287 | adapter->rx_ps_bsize0 + NET_IP_ALIGN); | |
288 | ||
289 | if (!skb) { | |
290 | adapter->alloc_rx_buff_failed++; | |
291 | break; | |
292 | } | |
293 | ||
ad68076e BA |
294 | /* |
295 | * Make buffer alignment 2 beyond a 16 byte boundary | |
bc7f75fa AK |
296 | * this will result in a 16 byte aligned IP header after |
297 | * the 14 byte MAC header is removed | |
298 | */ | |
299 | skb_reserve(skb, NET_IP_ALIGN); | |
300 | ||
301 | buffer_info->skb = skb; | |
302 | buffer_info->dma = pci_map_single(pdev, skb->data, | |
303 | adapter->rx_ps_bsize0, | |
304 | PCI_DMA_FROMDEVICE); | |
8d8bb39b | 305 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) { |
bc7f75fa AK |
306 | dev_err(&pdev->dev, "RX DMA map failed\n"); |
307 | adapter->rx_dma_failed++; | |
308 | /* cleanup skb */ | |
309 | dev_kfree_skb_any(skb); | |
310 | buffer_info->skb = NULL; | |
311 | break; | |
312 | } | |
313 | ||
314 | rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); | |
315 | ||
316 | i++; | |
317 | if (i == rx_ring->count) | |
318 | i = 0; | |
319 | buffer_info = &rx_ring->buffer_info[i]; | |
320 | } | |
321 | ||
322 | no_buffers: | |
323 | if (rx_ring->next_to_use != i) { | |
324 | rx_ring->next_to_use = i; | |
325 | ||
326 | if (!(i--)) | |
327 | i = (rx_ring->count - 1); | |
328 | ||
ad68076e BA |
329 | /* |
330 | * Force memory writes to complete before letting h/w | |
bc7f75fa AK |
331 | * know there are new descriptors to fetch. (Only |
332 | * applicable for weak-ordered memory model archs, | |
ad68076e BA |
333 | * such as IA-64). |
334 | */ | |
bc7f75fa | 335 | wmb(); |
ad68076e BA |
336 | /* |
337 | * Hardware increments by 16 bytes, but packet split | |
bc7f75fa AK |
338 | * descriptors are 32 bytes...so we increment tail |
339 | * twice as much. | |
340 | */ | |
341 | writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); | |
342 | } | |
343 | } | |
344 | ||
97ac8cae BA |
345 | /** |
346 | * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers | |
347 | * @adapter: address of board private structure | |
348 | * @rx_ring: pointer to receive ring structure | |
349 | * @cleaned_count: number of buffers to allocate this pass | |
350 | **/ | |
351 | ||
352 | static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | |
353 | int cleaned_count) | |
354 | { | |
355 | struct net_device *netdev = adapter->netdev; | |
356 | struct pci_dev *pdev = adapter->pdev; | |
357 | struct e1000_rx_desc *rx_desc; | |
358 | struct e1000_ring *rx_ring = adapter->rx_ring; | |
359 | struct e1000_buffer *buffer_info; | |
360 | struct sk_buff *skb; | |
361 | unsigned int i; | |
362 | unsigned int bufsz = 256 - | |
363 | 16 /* for skb_reserve */ - | |
364 | NET_IP_ALIGN; | |
365 | ||
366 | i = rx_ring->next_to_use; | |
367 | buffer_info = &rx_ring->buffer_info[i]; | |
368 | ||
369 | while (cleaned_count--) { | |
370 | skb = buffer_info->skb; | |
371 | if (skb) { | |
372 | skb_trim(skb, 0); | |
373 | goto check_page; | |
374 | } | |
375 | ||
376 | skb = netdev_alloc_skb(netdev, bufsz); | |
377 | if (unlikely(!skb)) { | |
378 | /* Better luck next round */ | |
379 | adapter->alloc_rx_buff_failed++; | |
380 | break; | |
381 | } | |
382 | ||
383 | /* Make buffer alignment 2 beyond a 16 byte boundary | |
384 | * this will result in a 16 byte aligned IP header after | |
385 | * the 14 byte MAC header is removed | |
386 | */ | |
387 | skb_reserve(skb, NET_IP_ALIGN); | |
388 | ||
389 | buffer_info->skb = skb; | |
390 | check_page: | |
391 | /* allocate a new page if necessary */ | |
392 | if (!buffer_info->page) { | |
393 | buffer_info->page = alloc_page(GFP_ATOMIC); | |
394 | if (unlikely(!buffer_info->page)) { | |
395 | adapter->alloc_rx_buff_failed++; | |
396 | break; | |
397 | } | |
398 | } | |
399 | ||
400 | if (!buffer_info->dma) | |
401 | buffer_info->dma = pci_map_page(pdev, | |
402 | buffer_info->page, 0, | |
403 | PAGE_SIZE, | |
404 | PCI_DMA_FROMDEVICE); | |
405 | ||
406 | rx_desc = E1000_RX_DESC(*rx_ring, i); | |
407 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | |
408 | ||
409 | if (unlikely(++i == rx_ring->count)) | |
410 | i = 0; | |
411 | buffer_info = &rx_ring->buffer_info[i]; | |
412 | } | |
413 | ||
414 | if (likely(rx_ring->next_to_use != i)) { | |
415 | rx_ring->next_to_use = i; | |
416 | if (unlikely(i-- == 0)) | |
417 | i = (rx_ring->count - 1); | |
418 | ||
419 | /* Force memory writes to complete before letting h/w | |
420 | * know there are new descriptors to fetch. (Only | |
421 | * applicable for weak-ordered memory model archs, | |
422 | * such as IA-64). */ | |
423 | wmb(); | |
424 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | |
425 | } | |
426 | } | |
427 | ||
bc7f75fa AK |
428 | /** |
429 | * e1000_clean_rx_irq - Send received data up the network stack; legacy | |
430 | * @adapter: board private structure | |
431 | * | |
432 | * the return value indicates whether actual cleaning was done, there | |
433 | * is no guarantee that everything was cleaned | |
434 | **/ | |
435 | static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |
436 | int *work_done, int work_to_do) | |
437 | { | |
438 | struct net_device *netdev = adapter->netdev; | |
439 | struct pci_dev *pdev = adapter->pdev; | |
440 | struct e1000_ring *rx_ring = adapter->rx_ring; | |
441 | struct e1000_rx_desc *rx_desc, *next_rxd; | |
442 | struct e1000_buffer *buffer_info, *next_buffer; | |
443 | u32 length; | |
444 | unsigned int i; | |
445 | int cleaned_count = 0; | |
446 | bool cleaned = 0; | |
447 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | |
448 | ||
449 | i = rx_ring->next_to_clean; | |
450 | rx_desc = E1000_RX_DESC(*rx_ring, i); | |
451 | buffer_info = &rx_ring->buffer_info[i]; | |
452 | ||
453 | while (rx_desc->status & E1000_RXD_STAT_DD) { | |
454 | struct sk_buff *skb; | |
455 | u8 status; | |
456 | ||
457 | if (*work_done >= work_to_do) | |
458 | break; | |
459 | (*work_done)++; | |
460 | ||
461 | status = rx_desc->status; | |
462 | skb = buffer_info->skb; | |
463 | buffer_info->skb = NULL; | |
464 | ||
465 | prefetch(skb->data - NET_IP_ALIGN); | |
466 | ||
467 | i++; | |
468 | if (i == rx_ring->count) | |
469 | i = 0; | |
470 | next_rxd = E1000_RX_DESC(*rx_ring, i); | |
471 | prefetch(next_rxd); | |
472 | ||
473 | next_buffer = &rx_ring->buffer_info[i]; | |
474 | ||
475 | cleaned = 1; | |
476 | cleaned_count++; | |
477 | pci_unmap_single(pdev, | |
478 | buffer_info->dma, | |
479 | adapter->rx_buffer_len, | |
480 | PCI_DMA_FROMDEVICE); | |
481 | buffer_info->dma = 0; | |
482 | ||
483 | length = le16_to_cpu(rx_desc->length); | |
484 | ||
485 | /* !EOP means multiple descriptors were used to store a single | |
486 | * packet, also make sure the frame isn't just CRC only */ | |
487 | if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { | |
488 | /* All receives must fit into a single buffer */ | |
44defeb3 JK |
489 | e_dbg("%s: Receive packet consumed multiple buffers\n", |
490 | netdev->name); | |
bc7f75fa AK |
491 | /* recycle */ |
492 | buffer_info->skb = skb; | |
493 | goto next_desc; | |
494 | } | |
495 | ||
496 | if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { | |
497 | /* recycle */ | |
498 | buffer_info->skb = skb; | |
499 | goto next_desc; | |
500 | } | |
501 | ||
bc7f75fa AK |
502 | total_rx_bytes += length; |
503 | total_rx_packets++; | |
504 | ||
ad68076e BA |
505 | /* |
506 | * code added for copybreak, this should improve | |
bc7f75fa | 507 | * performance for small packets with large amounts |
ad68076e BA |
508 | * of reassembly being done in the stack |
509 | */ | |
bc7f75fa AK |
510 | if (length < copybreak) { |
511 | struct sk_buff *new_skb = | |
512 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); | |
513 | if (new_skb) { | |
514 | skb_reserve(new_skb, NET_IP_ALIGN); | |
808ff676 BA |
515 | skb_copy_to_linear_data_offset(new_skb, |
516 | -NET_IP_ALIGN, | |
517 | (skb->data - | |
518 | NET_IP_ALIGN), | |
519 | (length + | |
520 | NET_IP_ALIGN)); | |
bc7f75fa AK |
521 | /* save the skb in buffer_info as good */ |
522 | buffer_info->skb = skb; | |
523 | skb = new_skb; | |
524 | } | |
525 | /* else just continue with the old one */ | |
526 | } | |
527 | /* end copybreak code */ | |
528 | skb_put(skb, length); | |
529 | ||
530 | /* Receive Checksum Offload */ | |
531 | e1000_rx_checksum(adapter, | |
532 | (u32)(status) | | |
533 | ((u32)(rx_desc->errors) << 24), | |
534 | le16_to_cpu(rx_desc->csum), skb); | |
535 | ||
536 | e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special); | |
537 | ||
538 | next_desc: | |
539 | rx_desc->status = 0; | |
540 | ||
541 | /* return some buffers to hardware, one at a time is too slow */ | |
542 | if (cleaned_count >= E1000_RX_BUFFER_WRITE) { | |
543 | adapter->alloc_rx_buf(adapter, cleaned_count); | |
544 | cleaned_count = 0; | |
545 | } | |
546 | ||
547 | /* use prefetched values */ | |
548 | rx_desc = next_rxd; | |
549 | buffer_info = next_buffer; | |
550 | } | |
551 | rx_ring->next_to_clean = i; | |
552 | ||
553 | cleaned_count = e1000_desc_unused(rx_ring); | |
554 | if (cleaned_count) | |
555 | adapter->alloc_rx_buf(adapter, cleaned_count); | |
556 | ||
bc7f75fa | 557 | adapter->total_rx_bytes += total_rx_bytes; |
7c25769f | 558 | adapter->total_rx_packets += total_rx_packets; |
41988692 | 559 | adapter->net_stats.rx_bytes += total_rx_bytes; |
7c25769f | 560 | adapter->net_stats.rx_packets += total_rx_packets; |
bc7f75fa AK |
561 | return cleaned; |
562 | } | |
563 | ||
bc7f75fa AK |
564 | static void e1000_put_txbuf(struct e1000_adapter *adapter, |
565 | struct e1000_buffer *buffer_info) | |
566 | { | |
567 | if (buffer_info->dma) { | |
568 | pci_unmap_page(adapter->pdev, buffer_info->dma, | |
569 | buffer_info->length, PCI_DMA_TODEVICE); | |
570 | buffer_info->dma = 0; | |
571 | } | |
572 | if (buffer_info->skb) { | |
573 | dev_kfree_skb_any(buffer_info->skb); | |
574 | buffer_info->skb = NULL; | |
575 | } | |
576 | } | |
577 | ||
578 | static void e1000_print_tx_hang(struct e1000_adapter *adapter) | |
579 | { | |
580 | struct e1000_ring *tx_ring = adapter->tx_ring; | |
581 | unsigned int i = tx_ring->next_to_clean; | |
582 | unsigned int eop = tx_ring->buffer_info[i].next_to_watch; | |
583 | struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); | |
bc7f75fa AK |
584 | |
585 | /* detected Tx unit hang */ | |
44defeb3 JK |
586 | e_err("Detected Tx Unit Hang:\n" |
587 | " TDH <%x>\n" | |
588 | " TDT <%x>\n" | |
589 | " next_to_use <%x>\n" | |
590 | " next_to_clean <%x>\n" | |
591 | "buffer_info[next_to_clean]:\n" | |
592 | " time_stamp <%lx>\n" | |
593 | " next_to_watch <%x>\n" | |
594 | " jiffies <%lx>\n" | |
595 | " next_to_watch.status <%x>\n", | |
596 | readl(adapter->hw.hw_addr + tx_ring->head), | |
597 | readl(adapter->hw.hw_addr + tx_ring->tail), | |
598 | tx_ring->next_to_use, | |
599 | tx_ring->next_to_clean, | |
600 | tx_ring->buffer_info[eop].time_stamp, | |
601 | eop, | |
602 | jiffies, | |
603 | eop_desc->upper.fields.status); | |
bc7f75fa AK |
604 | } |
605 | ||
606 | /** | |
607 | * e1000_clean_tx_irq - Reclaim resources after transmit completes | |
608 | * @adapter: board private structure | |
609 | * | |
610 | * the return value indicates whether actual cleaning was done, there | |
611 | * is no guarantee that everything was cleaned | |
612 | **/ | |
613 | static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |
614 | { | |
615 | struct net_device *netdev = adapter->netdev; | |
616 | struct e1000_hw *hw = &adapter->hw; | |
617 | struct e1000_ring *tx_ring = adapter->tx_ring; | |
618 | struct e1000_tx_desc *tx_desc, *eop_desc; | |
619 | struct e1000_buffer *buffer_info; | |
620 | unsigned int i, eop; | |
621 | unsigned int count = 0; | |
622 | bool cleaned = 0; | |
623 | unsigned int total_tx_bytes = 0, total_tx_packets = 0; | |
624 | ||
625 | i = tx_ring->next_to_clean; | |
626 | eop = tx_ring->buffer_info[i].next_to_watch; | |
627 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | |
628 | ||
629 | while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { | |
630 | for (cleaned = 0; !cleaned; ) { | |
631 | tx_desc = E1000_TX_DESC(*tx_ring, i); | |
632 | buffer_info = &tx_ring->buffer_info[i]; | |
633 | cleaned = (i == eop); | |
634 | ||
635 | if (cleaned) { | |
636 | struct sk_buff *skb = buffer_info->skb; | |
637 | unsigned int segs, bytecount; | |
638 | segs = skb_shinfo(skb)->gso_segs ?: 1; | |
639 | /* multiply data chunks by size of headers */ | |
640 | bytecount = ((segs - 1) * skb_headlen(skb)) + | |
641 | skb->len; | |
642 | total_tx_packets += segs; | |
643 | total_tx_bytes += bytecount; | |
644 | } | |
645 | ||
646 | e1000_put_txbuf(adapter, buffer_info); | |
647 | tx_desc->upper.data = 0; | |
648 | ||
649 | i++; | |
650 | if (i == tx_ring->count) | |
651 | i = 0; | |
652 | } | |
653 | ||
654 | eop = tx_ring->buffer_info[i].next_to_watch; | |
655 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | |
656 | #define E1000_TX_WEIGHT 64 | |
657 | /* weight of a sort for tx, to avoid endless transmit cleanup */ | |
658 | if (count++ == E1000_TX_WEIGHT) | |
659 | break; | |
660 | } | |
661 | ||
662 | tx_ring->next_to_clean = i; | |
663 | ||
664 | #define TX_WAKE_THRESHOLD 32 | |
665 | if (cleaned && netif_carrier_ok(netdev) && | |
666 | e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { | |
667 | /* Make sure that anybody stopping the queue after this | |
668 | * sees the new next_to_clean. | |
669 | */ | |
670 | smp_mb(); | |
671 | ||
672 | if (netif_queue_stopped(netdev) && | |
673 | !(test_bit(__E1000_DOWN, &adapter->state))) { | |
674 | netif_wake_queue(netdev); | |
675 | ++adapter->restart_queue; | |
676 | } | |
677 | } | |
678 | ||
679 | if (adapter->detect_tx_hung) { | |
ad68076e BA |
680 | /* |
681 | * Detect a transmit hang in hardware, this serializes the | |
682 | * check with the clearing of time_stamp and movement of i | |
683 | */ | |
bc7f75fa AK |
684 | adapter->detect_tx_hung = 0; |
685 | if (tx_ring->buffer_info[eop].dma && | |
686 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp | |
687 | + (adapter->tx_timeout_factor * HZ)) | |
ad68076e | 688 | && !(er32(STATUS) & E1000_STATUS_TXOFF)) { |
bc7f75fa AK |
689 | e1000_print_tx_hang(adapter); |
690 | netif_stop_queue(netdev); | |
691 | } | |
692 | } | |
693 | adapter->total_tx_bytes += total_tx_bytes; | |
694 | adapter->total_tx_packets += total_tx_packets; | |
41988692 | 695 | adapter->net_stats.tx_bytes += total_tx_bytes; |
7c25769f | 696 | adapter->net_stats.tx_packets += total_tx_packets; |
bc7f75fa AK |
697 | return cleaned; |
698 | } | |
699 | ||
bc7f75fa AK |
700 | /** |
701 | * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split | |
702 | * @adapter: board private structure | |
703 | * | |
704 | * the return value indicates whether actual cleaning was done, there | |
705 | * is no guarantee that everything was cleaned | |
706 | **/ | |
707 | static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |
708 | int *work_done, int work_to_do) | |
709 | { | |
710 | union e1000_rx_desc_packet_split *rx_desc, *next_rxd; | |
711 | struct net_device *netdev = adapter->netdev; | |
712 | struct pci_dev *pdev = adapter->pdev; | |
713 | struct e1000_ring *rx_ring = adapter->rx_ring; | |
714 | struct e1000_buffer *buffer_info, *next_buffer; | |
715 | struct e1000_ps_page *ps_page; | |
716 | struct sk_buff *skb; | |
717 | unsigned int i, j; | |
718 | u32 length, staterr; | |
719 | int cleaned_count = 0; | |
720 | bool cleaned = 0; | |
721 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | |
722 | ||
723 | i = rx_ring->next_to_clean; | |
724 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | |
725 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); | |
726 | buffer_info = &rx_ring->buffer_info[i]; | |
727 | ||
728 | while (staterr & E1000_RXD_STAT_DD) { | |
729 | if (*work_done >= work_to_do) | |
730 | break; | |
731 | (*work_done)++; | |
732 | skb = buffer_info->skb; | |
733 | ||
734 | /* in the packet split case this is header only */ | |
735 | prefetch(skb->data - NET_IP_ALIGN); | |
736 | ||
737 | i++; | |
738 | if (i == rx_ring->count) | |
739 | i = 0; | |
740 | next_rxd = E1000_RX_DESC_PS(*rx_ring, i); | |
741 | prefetch(next_rxd); | |
742 | ||
743 | next_buffer = &rx_ring->buffer_info[i]; | |
744 | ||
745 | cleaned = 1; | |
746 | cleaned_count++; | |
747 | pci_unmap_single(pdev, buffer_info->dma, | |
748 | adapter->rx_ps_bsize0, | |
749 | PCI_DMA_FROMDEVICE); | |
750 | buffer_info->dma = 0; | |
751 | ||
752 | if (!(staterr & E1000_RXD_STAT_EOP)) { | |
44defeb3 JK |
753 | e_dbg("%s: Packet Split buffers didn't pick up the " |
754 | "full packet\n", netdev->name); | |
bc7f75fa AK |
755 | dev_kfree_skb_irq(skb); |
756 | goto next_desc; | |
757 | } | |
758 | ||
759 | if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { | |
760 | dev_kfree_skb_irq(skb); | |
761 | goto next_desc; | |
762 | } | |
763 | ||
764 | length = le16_to_cpu(rx_desc->wb.middle.length0); | |
765 | ||
766 | if (!length) { | |
44defeb3 JK |
767 | e_dbg("%s: Last part of the packet spanning multiple " |
768 | "descriptors\n", netdev->name); | |
bc7f75fa AK |
769 | dev_kfree_skb_irq(skb); |
770 | goto next_desc; | |
771 | } | |
772 | ||
773 | /* Good Receive */ | |
774 | skb_put(skb, length); | |
775 | ||
776 | { | |
ad68076e BA |
777 | /* |
778 | * this looks ugly, but it seems compiler issues make it | |
779 | * more efficient than reusing j | |
780 | */ | |
bc7f75fa AK |
781 | int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); |
782 | ||
ad68076e BA |
783 | /* |
784 | * page alloc/put takes too long and effects small packet | |
785 | * throughput, so unsplit small packets and save the alloc/put | |
786 | * only valid in softirq (napi) context to call kmap_* | |
787 | */ | |
bc7f75fa AK |
788 | if (l1 && (l1 <= copybreak) && |
789 | ((length + l1) <= adapter->rx_ps_bsize0)) { | |
790 | u8 *vaddr; | |
791 | ||
47f44e40 | 792 | ps_page = &buffer_info->ps_pages[0]; |
bc7f75fa | 793 | |
ad68076e BA |
794 | /* |
795 | * there is no documentation about how to call | |
bc7f75fa | 796 | * kmap_atomic, so we can't hold the mapping |
ad68076e BA |
797 | * very long |
798 | */ | |
bc7f75fa AK |
799 | pci_dma_sync_single_for_cpu(pdev, ps_page->dma, |
800 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | |
801 | vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); | |
802 | memcpy(skb_tail_pointer(skb), vaddr, l1); | |
803 | kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); | |
804 | pci_dma_sync_single_for_device(pdev, ps_page->dma, | |
805 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | |
140a7480 | 806 | |
bc7f75fa AK |
807 | skb_put(skb, l1); |
808 | goto copydone; | |
809 | } /* if */ | |
810 | } | |
811 | ||
812 | for (j = 0; j < PS_PAGE_BUFFERS; j++) { | |
813 | length = le16_to_cpu(rx_desc->wb.upper.length[j]); | |
814 | if (!length) | |
815 | break; | |
816 | ||
47f44e40 | 817 | ps_page = &buffer_info->ps_pages[j]; |
bc7f75fa AK |
818 | pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, |
819 | PCI_DMA_FROMDEVICE); | |
820 | ps_page->dma = 0; | |
821 | skb_fill_page_desc(skb, j, ps_page->page, 0, length); | |
822 | ps_page->page = NULL; | |
823 | skb->len += length; | |
824 | skb->data_len += length; | |
825 | skb->truesize += length; | |
826 | } | |
827 | ||
bc7f75fa AK |
828 | copydone: |
829 | total_rx_bytes += skb->len; | |
830 | total_rx_packets++; | |
831 | ||
832 | e1000_rx_checksum(adapter, staterr, le16_to_cpu( | |
833 | rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); | |
834 | ||
835 | if (rx_desc->wb.upper.header_status & | |
836 | cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) | |
837 | adapter->rx_hdr_split++; | |
838 | ||
839 | e1000_receive_skb(adapter, netdev, skb, | |
840 | staterr, rx_desc->wb.middle.vlan); | |
841 | ||
842 | next_desc: | |
843 | rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); | |
844 | buffer_info->skb = NULL; | |
845 | ||
846 | /* return some buffers to hardware, one at a time is too slow */ | |
847 | if (cleaned_count >= E1000_RX_BUFFER_WRITE) { | |
848 | adapter->alloc_rx_buf(adapter, cleaned_count); | |
849 | cleaned_count = 0; | |
850 | } | |
851 | ||
852 | /* use prefetched values */ | |
853 | rx_desc = next_rxd; | |
854 | buffer_info = next_buffer; | |
855 | ||
856 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); | |
857 | } | |
858 | rx_ring->next_to_clean = i; | |
859 | ||
860 | cleaned_count = e1000_desc_unused(rx_ring); | |
861 | if (cleaned_count) | |
862 | adapter->alloc_rx_buf(adapter, cleaned_count); | |
863 | ||
bc7f75fa | 864 | adapter->total_rx_bytes += total_rx_bytes; |
7c25769f | 865 | adapter->total_rx_packets += total_rx_packets; |
41988692 | 866 | adapter->net_stats.rx_bytes += total_rx_bytes; |
7c25769f | 867 | adapter->net_stats.rx_packets += total_rx_packets; |
bc7f75fa AK |
868 | return cleaned; |
869 | } | |
870 | ||
97ac8cae BA |
871 | /** |
872 | * e1000_consume_page - helper function | |
873 | **/ | |
874 | static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, | |
875 | u16 length) | |
876 | { | |
877 | bi->page = NULL; | |
878 | skb->len += length; | |
879 | skb->data_len += length; | |
880 | skb->truesize += length; | |
881 | } | |
882 | ||
883 | /** | |
884 | * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy | |
885 | * @adapter: board private structure | |
886 | * | |
887 | * the return value indicates whether actual cleaning was done, there | |
888 | * is no guarantee that everything was cleaned | |
889 | **/ | |
890 | ||
891 | static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, | |
892 | int *work_done, int work_to_do) | |
893 | { | |
894 | struct net_device *netdev = adapter->netdev; | |
895 | struct pci_dev *pdev = adapter->pdev; | |
896 | struct e1000_ring *rx_ring = adapter->rx_ring; | |
897 | struct e1000_rx_desc *rx_desc, *next_rxd; | |
898 | struct e1000_buffer *buffer_info, *next_buffer; | |
899 | u32 length; | |
900 | unsigned int i; | |
901 | int cleaned_count = 0; | |
902 | bool cleaned = false; | |
903 | unsigned int total_rx_bytes=0, total_rx_packets=0; | |
904 | ||
905 | i = rx_ring->next_to_clean; | |
906 | rx_desc = E1000_RX_DESC(*rx_ring, i); | |
907 | buffer_info = &rx_ring->buffer_info[i]; | |
908 | ||
909 | while (rx_desc->status & E1000_RXD_STAT_DD) { | |
910 | struct sk_buff *skb; | |
911 | u8 status; | |
912 | ||
913 | if (*work_done >= work_to_do) | |
914 | break; | |
915 | (*work_done)++; | |
916 | ||
917 | status = rx_desc->status; | |
918 | skb = buffer_info->skb; | |
919 | buffer_info->skb = NULL; | |
920 | ||
921 | ++i; | |
922 | if (i == rx_ring->count) | |
923 | i = 0; | |
924 | next_rxd = E1000_RX_DESC(*rx_ring, i); | |
925 | prefetch(next_rxd); | |
926 | ||
927 | next_buffer = &rx_ring->buffer_info[i]; | |
928 | ||
929 | cleaned = true; | |
930 | cleaned_count++; | |
931 | pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE, | |
932 | PCI_DMA_FROMDEVICE); | |
933 | buffer_info->dma = 0; | |
934 | ||
935 | length = le16_to_cpu(rx_desc->length); | |
936 | ||
937 | /* errors is only valid for DD + EOP descriptors */ | |
938 | if (unlikely((status & E1000_RXD_STAT_EOP) && | |
939 | (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { | |
940 | /* recycle both page and skb */ | |
941 | buffer_info->skb = skb; | |
942 | /* an error means any chain goes out the window | |
943 | * too */ | |
944 | if (rx_ring->rx_skb_top) | |
945 | dev_kfree_skb(rx_ring->rx_skb_top); | |
946 | rx_ring->rx_skb_top = NULL; | |
947 | goto next_desc; | |
948 | } | |
949 | ||
950 | #define rxtop rx_ring->rx_skb_top | |
951 | if (!(status & E1000_RXD_STAT_EOP)) { | |
952 | /* this descriptor is only the beginning (or middle) */ | |
953 | if (!rxtop) { | |
954 | /* this is the beginning of a chain */ | |
955 | rxtop = skb; | |
956 | skb_fill_page_desc(rxtop, 0, buffer_info->page, | |
957 | 0, length); | |
958 | } else { | |
959 | /* this is the middle of a chain */ | |
960 | skb_fill_page_desc(rxtop, | |
961 | skb_shinfo(rxtop)->nr_frags, | |
962 | buffer_info->page, 0, length); | |
963 | /* re-use the skb, only consumed the page */ | |
964 | buffer_info->skb = skb; | |
965 | } | |
966 | e1000_consume_page(buffer_info, rxtop, length); | |
967 | goto next_desc; | |
968 | } else { | |
969 | if (rxtop) { | |
970 | /* end of the chain */ | |
971 | skb_fill_page_desc(rxtop, | |
972 | skb_shinfo(rxtop)->nr_frags, | |
973 | buffer_info->page, 0, length); | |
974 | /* re-use the current skb, we only consumed the | |
975 | * page */ | |
976 | buffer_info->skb = skb; | |
977 | skb = rxtop; | |
978 | rxtop = NULL; | |
979 | e1000_consume_page(buffer_info, skb, length); | |
980 | } else { | |
981 | /* no chain, got EOP, this buf is the packet | |
982 | * copybreak to save the put_page/alloc_page */ | |
983 | if (length <= copybreak && | |
984 | skb_tailroom(skb) >= length) { | |
985 | u8 *vaddr; | |
986 | vaddr = kmap_atomic(buffer_info->page, | |
987 | KM_SKB_DATA_SOFTIRQ); | |
988 | memcpy(skb_tail_pointer(skb), vaddr, | |
989 | length); | |
990 | kunmap_atomic(vaddr, | |
991 | KM_SKB_DATA_SOFTIRQ); | |
992 | /* re-use the page, so don't erase | |
993 | * buffer_info->page */ | |
994 | skb_put(skb, length); | |
995 | } else { | |
996 | skb_fill_page_desc(skb, 0, | |
997 | buffer_info->page, 0, | |
998 | length); | |
999 | e1000_consume_page(buffer_info, skb, | |
1000 | length); | |
1001 | } | |
1002 | } | |
1003 | } | |
1004 | ||
1005 | /* Receive Checksum Offload XXX recompute due to CRC strip? */ | |
1006 | e1000_rx_checksum(adapter, | |
1007 | (u32)(status) | | |
1008 | ((u32)(rx_desc->errors) << 24), | |
1009 | le16_to_cpu(rx_desc->csum), skb); | |
1010 | ||
1011 | /* probably a little skewed due to removing CRC */ | |
1012 | total_rx_bytes += skb->len; | |
1013 | total_rx_packets++; | |
1014 | ||
1015 | /* eth type trans needs skb->data to point to something */ | |
1016 | if (!pskb_may_pull(skb, ETH_HLEN)) { | |
44defeb3 | 1017 | e_err("pskb_may_pull failed.\n"); |
97ac8cae BA |
1018 | dev_kfree_skb(skb); |
1019 | goto next_desc; | |
1020 | } | |
1021 | ||
1022 | e1000_receive_skb(adapter, netdev, skb, status, | |
1023 | rx_desc->special); | |
1024 | ||
1025 | next_desc: | |
1026 | rx_desc->status = 0; | |
1027 | ||
1028 | /* return some buffers to hardware, one at a time is too slow */ | |
1029 | if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { | |
1030 | adapter->alloc_rx_buf(adapter, cleaned_count); | |
1031 | cleaned_count = 0; | |
1032 | } | |
1033 | ||
1034 | /* use prefetched values */ | |
1035 | rx_desc = next_rxd; | |
1036 | buffer_info = next_buffer; | |
1037 | } | |
1038 | rx_ring->next_to_clean = i; | |
1039 | ||
1040 | cleaned_count = e1000_desc_unused(rx_ring); | |
1041 | if (cleaned_count) | |
1042 | adapter->alloc_rx_buf(adapter, cleaned_count); | |
1043 | ||
1044 | adapter->total_rx_bytes += total_rx_bytes; | |
1045 | adapter->total_rx_packets += total_rx_packets; | |
1046 | adapter->net_stats.rx_bytes += total_rx_bytes; | |
1047 | adapter->net_stats.rx_packets += total_rx_packets; | |
1048 | return cleaned; | |
1049 | } | |
1050 | ||
bc7f75fa AK |
1051 | /** |
1052 | * e1000_clean_rx_ring - Free Rx Buffers per Queue | |
1053 | * @adapter: board private structure | |
1054 | **/ | |
1055 | static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |
1056 | { | |
1057 | struct e1000_ring *rx_ring = adapter->rx_ring; | |
1058 | struct e1000_buffer *buffer_info; | |
1059 | struct e1000_ps_page *ps_page; | |
1060 | struct pci_dev *pdev = adapter->pdev; | |
bc7f75fa AK |
1061 | unsigned int i, j; |
1062 | ||
1063 | /* Free all the Rx ring sk_buffs */ | |
1064 | for (i = 0; i < rx_ring->count; i++) { | |
1065 | buffer_info = &rx_ring->buffer_info[i]; | |
1066 | if (buffer_info->dma) { | |
1067 | if (adapter->clean_rx == e1000_clean_rx_irq) | |
1068 | pci_unmap_single(pdev, buffer_info->dma, | |
1069 | adapter->rx_buffer_len, | |
1070 | PCI_DMA_FROMDEVICE); | |
97ac8cae BA |
1071 | else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) |
1072 | pci_unmap_page(pdev, buffer_info->dma, | |
1073 | PAGE_SIZE, | |
1074 | PCI_DMA_FROMDEVICE); | |
bc7f75fa AK |
1075 | else if (adapter->clean_rx == e1000_clean_rx_irq_ps) |
1076 | pci_unmap_single(pdev, buffer_info->dma, | |
1077 | adapter->rx_ps_bsize0, | |
1078 | PCI_DMA_FROMDEVICE); | |
1079 | buffer_info->dma = 0; | |
1080 | } | |
1081 | ||
97ac8cae BA |
1082 | if (buffer_info->page) { |
1083 | put_page(buffer_info->page); | |
1084 | buffer_info->page = NULL; | |
1085 | } | |
1086 | ||
bc7f75fa AK |
1087 | if (buffer_info->skb) { |
1088 | dev_kfree_skb(buffer_info->skb); | |
1089 | buffer_info->skb = NULL; | |
1090 | } | |
1091 | ||
1092 | for (j = 0; j < PS_PAGE_BUFFERS; j++) { | |
47f44e40 | 1093 | ps_page = &buffer_info->ps_pages[j]; |
bc7f75fa AK |
1094 | if (!ps_page->page) |
1095 | break; | |
1096 | pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, | |
1097 | PCI_DMA_FROMDEVICE); | |
1098 | ps_page->dma = 0; | |
1099 | put_page(ps_page->page); | |
1100 | ps_page->page = NULL; | |
1101 | } | |
1102 | } | |
1103 | ||
1104 | /* there also may be some cached data from a chained receive */ | |
1105 | if (rx_ring->rx_skb_top) { | |
1106 | dev_kfree_skb(rx_ring->rx_skb_top); | |
1107 | rx_ring->rx_skb_top = NULL; | |
1108 | } | |
1109 | ||
bc7f75fa AK |
1110 | /* Zero out the descriptor ring */ |
1111 | memset(rx_ring->desc, 0, rx_ring->size); | |
1112 | ||
1113 | rx_ring->next_to_clean = 0; | |
1114 | rx_ring->next_to_use = 0; | |
1115 | ||
1116 | writel(0, adapter->hw.hw_addr + rx_ring->head); | |
1117 | writel(0, adapter->hw.hw_addr + rx_ring->tail); | |
1118 | } | |
1119 | ||
1120 | /** | |
1121 | * e1000_intr_msi - Interrupt Handler | |
1122 | * @irq: interrupt number | |
1123 | * @data: pointer to a network interface device structure | |
1124 | **/ | |
1125 | static irqreturn_t e1000_intr_msi(int irq, void *data) | |
1126 | { | |
1127 | struct net_device *netdev = data; | |
1128 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
1129 | struct e1000_hw *hw = &adapter->hw; | |
1130 | u32 icr = er32(ICR); | |
1131 | ||
ad68076e BA |
1132 | /* |
1133 | * read ICR disables interrupts using IAM | |
1134 | */ | |
bc7f75fa AK |
1135 | |
1136 | if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { | |
1137 | hw->mac.get_link_status = 1; | |
ad68076e BA |
1138 | /* |
1139 | * ICH8 workaround-- Call gig speed drop workaround on cable | |
1140 | * disconnect (LSC) before accessing any PHY registers | |
1141 | */ | |
bc7f75fa AK |
1142 | if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && |
1143 | (!(er32(STATUS) & E1000_STATUS_LU))) | |
1144 | e1000e_gig_downshift_workaround_ich8lan(hw); | |
1145 | ||
ad68076e BA |
1146 | /* |
1147 | * 80003ES2LAN workaround-- For packet buffer work-around on | |
bc7f75fa | 1148 | * link down event; disable receives here in the ISR and reset |
ad68076e BA |
1149 | * adapter in watchdog |
1150 | */ | |
bc7f75fa AK |
1151 | if (netif_carrier_ok(netdev) && |
1152 | adapter->flags & FLAG_RX_NEEDS_RESTART) { | |
1153 | /* disable receives */ | |
1154 | u32 rctl = er32(RCTL); | |
1155 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | |
318a94d6 | 1156 | adapter->flags |= FLAG_RX_RESTART_NOW; |
bc7f75fa AK |
1157 | } |
1158 | /* guard against interrupt when we're going down */ | |
1159 | if (!test_bit(__E1000_DOWN, &adapter->state)) | |
1160 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | |
1161 | } | |
1162 | ||
1163 | if (netif_rx_schedule_prep(netdev, &adapter->napi)) { | |
1164 | adapter->total_tx_bytes = 0; | |
1165 | adapter->total_tx_packets = 0; | |
1166 | adapter->total_rx_bytes = 0; | |
1167 | adapter->total_rx_packets = 0; | |
1168 | __netif_rx_schedule(netdev, &adapter->napi); | |
bc7f75fa AK |
1169 | } |
1170 | ||
1171 | return IRQ_HANDLED; | |
1172 | } | |
1173 | ||
1174 | /** | |
1175 | * e1000_intr - Interrupt Handler | |
1176 | * @irq: interrupt number | |
1177 | * @data: pointer to a network interface device structure | |
1178 | **/ | |
1179 | static irqreturn_t e1000_intr(int irq, void *data) | |
1180 | { | |
1181 | struct net_device *netdev = data; | |
1182 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
1183 | struct e1000_hw *hw = &adapter->hw; | |
bc7f75fa | 1184 | u32 rctl, icr = er32(ICR); |
4662e82b | 1185 | |
bc7f75fa AK |
1186 | if (!icr) |
1187 | return IRQ_NONE; /* Not our interrupt */ | |
1188 | ||
ad68076e BA |
1189 | /* |
1190 | * IMS will not auto-mask if INT_ASSERTED is not set, and if it is | |
1191 | * not set, then the adapter didn't send an interrupt | |
1192 | */ | |
bc7f75fa AK |
1193 | if (!(icr & E1000_ICR_INT_ASSERTED)) |
1194 | return IRQ_NONE; | |
1195 | ||
ad68076e BA |
1196 | /* |
1197 | * Interrupt Auto-Mask...upon reading ICR, | |
1198 | * interrupts are masked. No need for the | |
1199 | * IMC write | |
1200 | */ | |
bc7f75fa AK |
1201 | |
1202 | if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { | |
1203 | hw->mac.get_link_status = 1; | |
ad68076e BA |
1204 | /* |
1205 | * ICH8 workaround-- Call gig speed drop workaround on cable | |
1206 | * disconnect (LSC) before accessing any PHY registers | |
1207 | */ | |
bc7f75fa AK |
1208 | if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && |
1209 | (!(er32(STATUS) & E1000_STATUS_LU))) | |
1210 | e1000e_gig_downshift_workaround_ich8lan(hw); | |
1211 | ||
ad68076e BA |
1212 | /* |
1213 | * 80003ES2LAN workaround-- | |
bc7f75fa AK |
1214 | * For packet buffer work-around on link down event; |
1215 | * disable receives here in the ISR and | |
1216 | * reset adapter in watchdog | |
1217 | */ | |
1218 | if (netif_carrier_ok(netdev) && | |
1219 | (adapter->flags & FLAG_RX_NEEDS_RESTART)) { | |
1220 | /* disable receives */ | |
1221 | rctl = er32(RCTL); | |
1222 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | |
318a94d6 | 1223 | adapter->flags |= FLAG_RX_RESTART_NOW; |
bc7f75fa AK |
1224 | } |
1225 | /* guard against interrupt when we're going down */ | |
1226 | if (!test_bit(__E1000_DOWN, &adapter->state)) | |
1227 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | |
1228 | } | |
1229 | ||
1230 | if (netif_rx_schedule_prep(netdev, &adapter->napi)) { | |
1231 | adapter->total_tx_bytes = 0; | |
1232 | adapter->total_tx_packets = 0; | |
1233 | adapter->total_rx_bytes = 0; | |
1234 | adapter->total_rx_packets = 0; | |
1235 | __netif_rx_schedule(netdev, &adapter->napi); | |
bc7f75fa AK |
1236 | } |
1237 | ||
1238 | return IRQ_HANDLED; | |
1239 | } | |
1240 | ||
4662e82b BA |
1241 | static irqreturn_t e1000_msix_other(int irq, void *data) |
1242 | { | |
1243 | struct net_device *netdev = data; | |
1244 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
1245 | struct e1000_hw *hw = &adapter->hw; | |
1246 | u32 icr = er32(ICR); | |
1247 | ||
1248 | if (!(icr & E1000_ICR_INT_ASSERTED)) { | |
1249 | ew32(IMS, E1000_IMS_OTHER); | |
1250 | return IRQ_NONE; | |
1251 | } | |
1252 | ||
1253 | if (icr & adapter->eiac_mask) | |
1254 | ew32(ICS, (icr & adapter->eiac_mask)); | |
1255 | ||
1256 | if (icr & E1000_ICR_OTHER) { | |
1257 | if (!(icr & E1000_ICR_LSC)) | |
1258 | goto no_link_interrupt; | |
1259 | hw->mac.get_link_status = 1; | |
1260 | /* guard against interrupt when we're going down */ | |
1261 | if (!test_bit(__E1000_DOWN, &adapter->state)) | |
1262 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | |
1263 | } | |
1264 | ||
1265 | no_link_interrupt: | |
1266 | ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); | |
1267 | ||
1268 | return IRQ_HANDLED; | |
1269 | } | |
1270 | ||
1271 | ||
1272 | static irqreturn_t e1000_intr_msix_tx(int irq, void *data) | |
1273 | { | |
1274 | struct net_device *netdev = data; | |
1275 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
1276 | struct e1000_hw *hw = &adapter->hw; | |
1277 | struct e1000_ring *tx_ring = adapter->tx_ring; | |
1278 | ||
1279 | ||
1280 | adapter->total_tx_bytes = 0; | |
1281 | adapter->total_tx_packets = 0; | |
1282 | ||
1283 | if (!e1000_clean_tx_irq(adapter)) | |
1284 | /* Ring was not completely cleaned, so fire another interrupt */ | |
1285 | ew32(ICS, tx_ring->ims_val); | |
1286 | ||
1287 | return IRQ_HANDLED; | |
1288 | } | |
1289 | ||
1290 | static irqreturn_t e1000_intr_msix_rx(int irq, void *data) | |
1291 | { | |
1292 | struct net_device *netdev = data; | |
1293 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
1294 | ||
1295 | /* Write the ITR value calculated at the end of the | |
1296 | * previous interrupt. | |
1297 | */ | |
1298 | if (adapter->rx_ring->set_itr) { | |
1299 | writel(1000000000 / (adapter->rx_ring->itr_val * 256), | |
1300 | adapter->hw.hw_addr + adapter->rx_ring->itr_register); | |
1301 | adapter->rx_ring->set_itr = 0; | |
1302 | } | |
1303 | ||
1304 | if (netif_rx_schedule_prep(netdev, &adapter->napi)) { | |
1305 | adapter->total_rx_bytes = 0; | |
1306 | adapter->total_rx_packets = 0; | |
1307 | __netif_rx_schedule(netdev, &adapter->napi); | |
1308 | } | |
1309 | return IRQ_HANDLED; | |
1310 | } | |
1311 | ||
1312 | /** | |
1313 | * e1000_configure_msix - Configure MSI-X hardware | |
1314 | * | |
1315 | * e1000_configure_msix sets up the hardware to properly | |
1316 | * generate MSI-X interrupts. | |
1317 | **/ | |
1318 | static void e1000_configure_msix(struct e1000_adapter *adapter) | |
1319 | { | |
1320 | struct e1000_hw *hw = &adapter->hw; | |
1321 | struct e1000_ring *rx_ring = adapter->rx_ring; | |
1322 | struct e1000_ring *tx_ring = adapter->tx_ring; | |
1323 | int vector = 0; | |
1324 | u32 ctrl_ext, ivar = 0; | |
1325 | ||
1326 | adapter->eiac_mask = 0; | |
1327 | ||
1328 | /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ | |
1329 | if (hw->mac.type == e1000_82574) { | |
1330 | u32 rfctl = er32(RFCTL); | |
1331 | rfctl |= E1000_RFCTL_ACK_DIS; | |
1332 | ew32(RFCTL, rfctl); | |
1333 | } | |
1334 | ||
1335 | #define E1000_IVAR_INT_ALLOC_VALID 0x8 | |
1336 | /* Configure Rx vector */ | |
1337 | rx_ring->ims_val = E1000_IMS_RXQ0; | |
1338 | adapter->eiac_mask |= rx_ring->ims_val; | |
1339 | if (rx_ring->itr_val) | |
1340 | writel(1000000000 / (rx_ring->itr_val * 256), | |
1341 | hw->hw_addr + rx_ring->itr_register); | |
1342 | else | |
1343 | writel(1, hw->hw_addr + rx_ring->itr_register); | |
1344 | ivar = E1000_IVAR_INT_ALLOC_VALID | vector; | |
1345 | ||
1346 | /* Configure Tx vector */ | |
1347 | tx_ring->ims_val = E1000_IMS_TXQ0; | |
1348 | vector++; | |
1349 | if (tx_ring->itr_val) | |
1350 | writel(1000000000 / (tx_ring->itr_val * 256), | |
1351 | hw->hw_addr + tx_ring->itr_register); | |
1352 | else | |
1353 | writel(1, hw->hw_addr + tx_ring->itr_register); | |
1354 | adapter->eiac_mask |= tx_ring->ims_val; | |
1355 | ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); | |
1356 | ||
1357 | /* set vector for Other Causes, e.g. link changes */ | |
1358 | vector++; | |
1359 | ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); | |
1360 | if (rx_ring->itr_val) | |
1361 | writel(1000000000 / (rx_ring->itr_val * 256), | |
1362 | hw->hw_addr + E1000_EITR_82574(vector)); | |
1363 | else | |
1364 | writel(1, hw->hw_addr + E1000_EITR_82574(vector)); | |
1365 | ||
1366 | /* Cause Tx interrupts on every write back */ | |
1367 | ivar |= (1 << 31); | |
1368 | ||
1369 | ew32(IVAR, ivar); | |
1370 | ||
1371 | /* enable MSI-X PBA support */ | |
1372 | ctrl_ext = er32(CTRL_EXT); | |
1373 | ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; | |
1374 | ||
1375 | /* Auto-Mask Other interrupts upon ICR read */ | |
1376 | #define E1000_EIAC_MASK_82574 0x01F00000 | |
1377 | ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); | |
1378 | ctrl_ext |= E1000_CTRL_EXT_EIAME; | |
1379 | ew32(CTRL_EXT, ctrl_ext); | |
1380 | e1e_flush(); | |
1381 | } | |
1382 | ||
1383 | void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) | |
1384 | { | |
1385 | if (adapter->msix_entries) { | |
1386 | pci_disable_msix(adapter->pdev); | |
1387 | kfree(adapter->msix_entries); | |
1388 | adapter->msix_entries = NULL; | |
1389 | } else if (adapter->flags & FLAG_MSI_ENABLED) { | |
1390 | pci_disable_msi(adapter->pdev); | |
1391 | adapter->flags &= ~FLAG_MSI_ENABLED; | |
1392 | } | |
1393 | ||
1394 | return; | |
1395 | } | |
1396 | ||
1397 | /** | |
1398 | * e1000e_set_interrupt_capability - set MSI or MSI-X if supported | |
1399 | * | |
1400 | * Attempt to configure interrupts using the best available | |
1401 | * capabilities of the hardware and kernel. | |
1402 | **/ | |
1403 | void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) | |
1404 | { | |
1405 | int err; | |
1406 | int numvecs, i; | |
1407 | ||
1408 | ||
1409 | switch (adapter->int_mode) { | |
1410 | case E1000E_INT_MODE_MSIX: | |
1411 | if (adapter->flags & FLAG_HAS_MSIX) { | |
1412 | numvecs = 3; /* RxQ0, TxQ0 and other */ | |
1413 | adapter->msix_entries = kcalloc(numvecs, | |
1414 | sizeof(struct msix_entry), | |
1415 | GFP_KERNEL); | |
1416 | if (adapter->msix_entries) { | |
1417 | for (i = 0; i < numvecs; i++) | |
1418 | adapter->msix_entries[i].entry = i; | |
1419 | ||
1420 | err = pci_enable_msix(adapter->pdev, | |
1421 | adapter->msix_entries, | |
1422 | numvecs); | |
1423 | if (err == 0) | |
1424 | return; | |
1425 | } | |
1426 | /* MSI-X failed, so fall through and try MSI */ | |
1427 | e_err("Failed to initialize MSI-X interrupts. " | |
1428 | "Falling back to MSI interrupts.\n"); | |
1429 | e1000e_reset_interrupt_capability(adapter); | |
1430 | } | |
1431 | adapter->int_mode = E1000E_INT_MODE_MSI; | |
1432 | /* Fall through */ | |
1433 | case E1000E_INT_MODE_MSI: | |
1434 | if (!pci_enable_msi(adapter->pdev)) { | |
1435 | adapter->flags |= FLAG_MSI_ENABLED; | |
1436 | } else { | |
1437 | adapter->int_mode = E1000E_INT_MODE_LEGACY; | |
1438 | e_err("Failed to initialize MSI interrupts. Falling " | |
1439 | "back to legacy interrupts.\n"); | |
1440 | } | |
1441 | /* Fall through */ | |
1442 | case E1000E_INT_MODE_LEGACY: | |
1443 | /* Don't do anything; this is the system default */ | |
1444 | break; | |
1445 | } | |
1446 | ||
1447 | return; | |
1448 | } | |
1449 | ||
1450 | /** | |
1451 | * e1000_request_msix - Initialize MSI-X interrupts | |
1452 | * | |
1453 | * e1000_request_msix allocates MSI-X vectors and requests interrupts from the | |
1454 | * kernel. | |
1455 | **/ | |
1456 | static int e1000_request_msix(struct e1000_adapter *adapter) | |
1457 | { | |
1458 | struct net_device *netdev = adapter->netdev; | |
1459 | int err = 0, vector = 0; | |
1460 | ||
1461 | if (strlen(netdev->name) < (IFNAMSIZ - 5)) | |
1462 | sprintf(adapter->rx_ring->name, "%s-rx0", netdev->name); | |
1463 | else | |
1464 | memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); | |
1465 | err = request_irq(adapter->msix_entries[vector].vector, | |
1466 | &e1000_intr_msix_rx, 0, adapter->rx_ring->name, | |
1467 | netdev); | |
1468 | if (err) | |
1469 | goto out; | |
1470 | adapter->rx_ring->itr_register = E1000_EITR_82574(vector); | |
1471 | adapter->rx_ring->itr_val = adapter->itr; | |
1472 | vector++; | |
1473 | ||
1474 | if (strlen(netdev->name) < (IFNAMSIZ - 5)) | |
1475 | sprintf(adapter->tx_ring->name, "%s-tx0", netdev->name); | |
1476 | else | |
1477 | memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); | |
1478 | err = request_irq(adapter->msix_entries[vector].vector, | |
1479 | &e1000_intr_msix_tx, 0, adapter->tx_ring->name, | |
1480 | netdev); | |
1481 | if (err) | |
1482 | goto out; | |
1483 | adapter->tx_ring->itr_register = E1000_EITR_82574(vector); | |
1484 | adapter->tx_ring->itr_val = adapter->itr; | |
1485 | vector++; | |
1486 | ||
1487 | err = request_irq(adapter->msix_entries[vector].vector, | |
1488 | &e1000_msix_other, 0, netdev->name, netdev); | |
1489 | if (err) | |
1490 | goto out; | |
1491 | ||
1492 | e1000_configure_msix(adapter); | |
1493 | return 0; | |
1494 | out: | |
1495 | return err; | |
1496 | } | |
1497 | ||
f8d59f78 BA |
1498 | /** |
1499 | * e1000_request_irq - initialize interrupts | |
1500 | * | |
1501 | * Attempts to configure interrupts using the best available | |
1502 | * capabilities of the hardware and kernel. | |
1503 | **/ | |
bc7f75fa AK |
1504 | static int e1000_request_irq(struct e1000_adapter *adapter) |
1505 | { | |
1506 | struct net_device *netdev = adapter->netdev; | |
bc7f75fa AK |
1507 | int err; |
1508 | ||
4662e82b BA |
1509 | if (adapter->msix_entries) { |
1510 | err = e1000_request_msix(adapter); | |
1511 | if (!err) | |
1512 | return err; | |
1513 | /* fall back to MSI */ | |
1514 | e1000e_reset_interrupt_capability(adapter); | |
1515 | adapter->int_mode = E1000E_INT_MODE_MSI; | |
1516 | e1000e_set_interrupt_capability(adapter); | |
bc7f75fa | 1517 | } |
4662e82b BA |
1518 | if (adapter->flags & FLAG_MSI_ENABLED) { |
1519 | err = request_irq(adapter->pdev->irq, &e1000_intr_msi, 0, | |
1520 | netdev->name, netdev); | |
1521 | if (!err) | |
1522 | return err; | |
bc7f75fa | 1523 | |
4662e82b BA |
1524 | /* fall back to legacy interrupt */ |
1525 | e1000e_reset_interrupt_capability(adapter); | |
1526 | adapter->int_mode = E1000E_INT_MODE_LEGACY; | |
bc7f75fa AK |
1527 | } |
1528 | ||
4662e82b BA |
1529 | err = request_irq(adapter->pdev->irq, &e1000_intr, IRQF_SHARED, |
1530 | netdev->name, netdev); | |
1531 | if (err) | |
1532 | e_err("Unable to allocate interrupt, Error: %d\n", err); | |
1533 | ||
bc7f75fa AK |
1534 | return err; |
1535 | } | |
1536 | ||
1537 | static void e1000_free_irq(struct e1000_adapter *adapter) | |
1538 | { | |
1539 | struct net_device *netdev = adapter->netdev; | |
1540 | ||
4662e82b BA |
1541 | if (adapter->msix_entries) { |
1542 | int vector = 0; | |
1543 | ||
1544 | free_irq(adapter->msix_entries[vector].vector, netdev); | |
1545 | vector++; | |
1546 | ||
1547 | free_irq(adapter->msix_entries[vector].vector, netdev); | |
1548 | vector++; | |
1549 | ||
1550 | /* Other Causes interrupt vector */ | |
1551 | free_irq(adapter->msix_entries[vector].vector, netdev); | |
1552 | return; | |
bc7f75fa | 1553 | } |
4662e82b BA |
1554 | |
1555 | free_irq(adapter->pdev->irq, netdev); | |
bc7f75fa AK |
1556 | } |
1557 | ||
1558 | /** | |
1559 | * e1000_irq_disable - Mask off interrupt generation on the NIC | |
1560 | **/ | |
1561 | static void e1000_irq_disable(struct e1000_adapter *adapter) | |
1562 | { | |
1563 | struct e1000_hw *hw = &adapter->hw; | |
1564 | ||
bc7f75fa | 1565 | ew32(IMC, ~0); |
4662e82b BA |
1566 | if (adapter->msix_entries) |
1567 | ew32(EIAC_82574, 0); | |
bc7f75fa AK |
1568 | e1e_flush(); |
1569 | synchronize_irq(adapter->pdev->irq); | |
1570 | } | |
1571 | ||
1572 | /** | |
1573 | * e1000_irq_enable - Enable default interrupt generation settings | |
1574 | **/ | |
1575 | static void e1000_irq_enable(struct e1000_adapter *adapter) | |
1576 | { | |
1577 | struct e1000_hw *hw = &adapter->hw; | |
1578 | ||
4662e82b BA |
1579 | if (adapter->msix_entries) { |
1580 | ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); | |
1581 | ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); | |
1582 | } else { | |
1583 | ew32(IMS, IMS_ENABLE_MASK); | |
1584 | } | |
74ef9c39 | 1585 | e1e_flush(); |
bc7f75fa AK |
1586 | } |
1587 | ||
1588 | /** | |
1589 | * e1000_get_hw_control - get control of the h/w from f/w | |
1590 | * @adapter: address of board private structure | |
1591 | * | |
489815ce | 1592 | * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. |
bc7f75fa AK |
1593 | * For ASF and Pass Through versions of f/w this means that |
1594 | * the driver is loaded. For AMT version (only with 82573) | |
1595 | * of the f/w this means that the network i/f is open. | |
1596 | **/ | |
1597 | static void e1000_get_hw_control(struct e1000_adapter *adapter) | |
1598 | { | |
1599 | struct e1000_hw *hw = &adapter->hw; | |
1600 | u32 ctrl_ext; | |
1601 | u32 swsm; | |
1602 | ||
1603 | /* Let firmware know the driver has taken over */ | |
1604 | if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { | |
1605 | swsm = er32(SWSM); | |
1606 | ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); | |
1607 | } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { | |
1608 | ctrl_ext = er32(CTRL_EXT); | |
ad68076e | 1609 | ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); |
bc7f75fa AK |
1610 | } |
1611 | } | |
1612 | ||
1613 | /** | |
1614 | * e1000_release_hw_control - release control of the h/w to f/w | |
1615 | * @adapter: address of board private structure | |
1616 | * | |
489815ce | 1617 | * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. |
bc7f75fa AK |
1618 | * For ASF and Pass Through versions of f/w this means that the |
1619 | * driver is no longer loaded. For AMT version (only with 82573) i | |
1620 | * of the f/w this means that the network i/f is closed. | |
1621 | * | |
1622 | **/ | |
1623 | static void e1000_release_hw_control(struct e1000_adapter *adapter) | |
1624 | { | |
1625 | struct e1000_hw *hw = &adapter->hw; | |
1626 | u32 ctrl_ext; | |
1627 | u32 swsm; | |
1628 | ||
1629 | /* Let firmware taken over control of h/w */ | |
1630 | if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { | |
1631 | swsm = er32(SWSM); | |
1632 | ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); | |
1633 | } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { | |
1634 | ctrl_ext = er32(CTRL_EXT); | |
ad68076e | 1635 | ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); |
bc7f75fa AK |
1636 | } |
1637 | } | |
1638 | ||
bc7f75fa AK |
1639 | /** |
1640 | * @e1000_alloc_ring - allocate memory for a ring structure | |
1641 | **/ | |
1642 | static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, | |
1643 | struct e1000_ring *ring) | |
1644 | { | |
1645 | struct pci_dev *pdev = adapter->pdev; | |
1646 | ||
1647 | ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, | |
1648 | GFP_KERNEL); | |
1649 | if (!ring->desc) | |
1650 | return -ENOMEM; | |
1651 | ||
1652 | return 0; | |
1653 | } | |
1654 | ||
1655 | /** | |
1656 | * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) | |
1657 | * @adapter: board private structure | |
1658 | * | |
1659 | * Return 0 on success, negative on failure | |
1660 | **/ | |
1661 | int e1000e_setup_tx_resources(struct e1000_adapter *adapter) | |
1662 | { | |
1663 | struct e1000_ring *tx_ring = adapter->tx_ring; | |
1664 | int err = -ENOMEM, size; | |
1665 | ||
1666 | size = sizeof(struct e1000_buffer) * tx_ring->count; | |
1667 | tx_ring->buffer_info = vmalloc(size); | |
1668 | if (!tx_ring->buffer_info) | |
1669 | goto err; | |
1670 | memset(tx_ring->buffer_info, 0, size); | |
1671 | ||
1672 | /* round up to nearest 4K */ | |
1673 | tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); | |
1674 | tx_ring->size = ALIGN(tx_ring->size, 4096); | |
1675 | ||
1676 | err = e1000_alloc_ring_dma(adapter, tx_ring); | |
1677 | if (err) | |
1678 | goto err; | |
1679 | ||
1680 | tx_ring->next_to_use = 0; | |
1681 | tx_ring->next_to_clean = 0; | |
1682 | spin_lock_init(&adapter->tx_queue_lock); | |
1683 | ||
1684 | return 0; | |
1685 | err: | |
1686 | vfree(tx_ring->buffer_info); | |
44defeb3 | 1687 | e_err("Unable to allocate memory for the transmit descriptor ring\n"); |
bc7f75fa AK |
1688 | return err; |
1689 | } | |
1690 | ||
1691 | /** | |
1692 | * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) | |
1693 | * @adapter: board private structure | |
1694 | * | |
1695 | * Returns 0 on success, negative on failure | |
1696 | **/ | |
1697 | int e1000e_setup_rx_resources(struct e1000_adapter *adapter) | |
1698 | { | |
1699 | struct e1000_ring *rx_ring = adapter->rx_ring; | |
47f44e40 AK |
1700 | struct e1000_buffer *buffer_info; |
1701 | int i, size, desc_len, err = -ENOMEM; | |
bc7f75fa AK |
1702 | |
1703 | size = sizeof(struct e1000_buffer) * rx_ring->count; | |
1704 | rx_ring->buffer_info = vmalloc(size); | |
1705 | if (!rx_ring->buffer_info) | |
1706 | goto err; | |
1707 | memset(rx_ring->buffer_info, 0, size); | |
1708 | ||
47f44e40 AK |
1709 | for (i = 0; i < rx_ring->count; i++) { |
1710 | buffer_info = &rx_ring->buffer_info[i]; | |
1711 | buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, | |
1712 | sizeof(struct e1000_ps_page), | |
1713 | GFP_KERNEL); | |
1714 | if (!buffer_info->ps_pages) | |
1715 | goto err_pages; | |
1716 | } | |
bc7f75fa AK |
1717 | |
1718 | desc_len = sizeof(union e1000_rx_desc_packet_split); | |
1719 | ||
1720 | /* Round up to nearest 4K */ | |
1721 | rx_ring->size = rx_ring->count * desc_len; | |
1722 | rx_ring->size = ALIGN(rx_ring->size, 4096); | |
1723 | ||
1724 | err = e1000_alloc_ring_dma(adapter, rx_ring); | |
1725 | if (err) | |
47f44e40 | 1726 | goto err_pages; |
bc7f75fa AK |
1727 | |
1728 | rx_ring->next_to_clean = 0; | |
1729 | rx_ring->next_to_use = 0; | |
1730 | rx_ring->rx_skb_top = NULL; | |
1731 | ||
1732 | return 0; | |
47f44e40 AK |
1733 | |
1734 | err_pages: | |
1735 | for (i = 0; i < rx_ring->count; i++) { | |
1736 | buffer_info = &rx_ring->buffer_info[i]; | |
1737 | kfree(buffer_info->ps_pages); | |
1738 | } | |
bc7f75fa AK |
1739 | err: |
1740 | vfree(rx_ring->buffer_info); | |
44defeb3 | 1741 | e_err("Unable to allocate memory for the transmit descriptor ring\n"); |
bc7f75fa AK |
1742 | return err; |
1743 | } | |
1744 | ||
1745 | /** | |
1746 | * e1000_clean_tx_ring - Free Tx Buffers | |
1747 | * @adapter: board private structure | |
1748 | **/ | |
1749 | static void e1000_clean_tx_ring(struct e1000_adapter *adapter) | |
1750 | { | |
1751 | struct e1000_ring *tx_ring = adapter->tx_ring; | |
1752 | struct e1000_buffer *buffer_info; | |
1753 | unsigned long size; | |
1754 | unsigned int i; | |
1755 | ||
1756 | for (i = 0; i < tx_ring->count; i++) { | |
1757 | buffer_info = &tx_ring->buffer_info[i]; | |
1758 | e1000_put_txbuf(adapter, buffer_info); | |
1759 | } | |
1760 | ||
1761 | size = sizeof(struct e1000_buffer) * tx_ring->count; | |
1762 | memset(tx_ring->buffer_info, 0, size); | |
1763 | ||
1764 | memset(tx_ring->desc, 0, tx_ring->size); | |
1765 | ||
1766 | tx_ring->next_to_use = 0; | |
1767 | tx_ring->next_to_clean = 0; | |
1768 | ||
1769 | writel(0, adapter->hw.hw_addr + tx_ring->head); | |
1770 | writel(0, adapter->hw.hw_addr + tx_ring->tail); | |
1771 | } | |
1772 | ||
1773 | /** | |
1774 | * e1000e_free_tx_resources - Free Tx Resources per Queue | |
1775 | * @adapter: board private structure | |
1776 | * | |
1777 | * Free all transmit software resources | |
1778 | **/ | |
1779 | void e1000e_free_tx_resources(struct e1000_adapter *adapter) | |
1780 | { | |
1781 | struct pci_dev *pdev = adapter->pdev; | |
1782 | struct e1000_ring *tx_ring = adapter->tx_ring; | |
1783 | ||
1784 | e1000_clean_tx_ring(adapter); | |
1785 | ||
1786 | vfree(tx_ring->buffer_info); | |
1787 | tx_ring->buffer_info = NULL; | |
1788 | ||
1789 | dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, | |
1790 | tx_ring->dma); | |
1791 | tx_ring->desc = NULL; | |
1792 | } | |
1793 | ||
1794 | /** | |
1795 | * e1000e_free_rx_resources - Free Rx Resources | |
1796 | * @adapter: board private structure | |
1797 | * | |
1798 | * Free all receive software resources | |
1799 | **/ | |
1800 | ||
1801 | void e1000e_free_rx_resources(struct e1000_adapter *adapter) | |
1802 | { | |
1803 | struct pci_dev *pdev = adapter->pdev; | |
1804 | struct e1000_ring *rx_ring = adapter->rx_ring; | |
47f44e40 | 1805 | int i; |
bc7f75fa AK |
1806 | |
1807 | e1000_clean_rx_ring(adapter); | |
1808 | ||
47f44e40 AK |
1809 | for (i = 0; i < rx_ring->count; i++) { |
1810 | kfree(rx_ring->buffer_info[i].ps_pages); | |
1811 | } | |
1812 | ||
bc7f75fa AK |
1813 | vfree(rx_ring->buffer_info); |
1814 | rx_ring->buffer_info = NULL; | |
1815 | ||
bc7f75fa AK |
1816 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, |
1817 | rx_ring->dma); | |
1818 | rx_ring->desc = NULL; | |
1819 | } | |
1820 | ||
1821 | /** | |
1822 | * e1000_update_itr - update the dynamic ITR value based on statistics | |
489815ce AK |
1823 | * @adapter: pointer to adapter |
1824 | * @itr_setting: current adapter->itr | |
1825 | * @packets: the number of packets during this measurement interval | |
1826 | * @bytes: the number of bytes during this measurement interval | |
1827 | * | |
bc7f75fa AK |
1828 | * Stores a new ITR value based on packets and byte |
1829 | * counts during the last interrupt. The advantage of per interrupt | |
1830 | * computation is faster updates and more accurate ITR for the current | |
1831 | * traffic pattern. Constants in this function were computed | |
1832 | * based on theoretical maximum wire speed and thresholds were set based | |
1833 | * on testing data as well as attempting to minimize response time | |
4662e82b BA |
1834 | * while increasing bulk throughput. This functionality is controlled |
1835 | * by the InterruptThrottleRate module parameter. | |
bc7f75fa AK |
1836 | **/ |
1837 | static unsigned int e1000_update_itr(struct e1000_adapter *adapter, | |
1838 | u16 itr_setting, int packets, | |
1839 | int bytes) | |
1840 | { | |
1841 | unsigned int retval = itr_setting; | |
1842 | ||
1843 | if (packets == 0) | |
1844 | goto update_itr_done; | |
1845 | ||
1846 | switch (itr_setting) { | |
1847 | case lowest_latency: | |
1848 | /* handle TSO and jumbo frames */ | |
1849 | if (bytes/packets > 8000) | |
1850 | retval = bulk_latency; | |
1851 | else if ((packets < 5) && (bytes > 512)) { | |
1852 | retval = low_latency; | |
1853 | } | |
1854 | break; | |
1855 | case low_latency: /* 50 usec aka 20000 ints/s */ | |
1856 | if (bytes > 10000) { | |
1857 | /* this if handles the TSO accounting */ | |
1858 | if (bytes/packets > 8000) { | |
1859 | retval = bulk_latency; | |
1860 | } else if ((packets < 10) || ((bytes/packets) > 1200)) { | |
1861 | retval = bulk_latency; | |
1862 | } else if ((packets > 35)) { | |
1863 | retval = lowest_latency; | |
1864 | } | |
1865 | } else if (bytes/packets > 2000) { | |
1866 | retval = bulk_latency; | |
1867 | } else if (packets <= 2 && bytes < 512) { | |
1868 | retval = lowest_latency; | |
1869 | } | |
1870 | break; | |
1871 | case bulk_latency: /* 250 usec aka 4000 ints/s */ | |
1872 | if (bytes > 25000) { | |
1873 | if (packets > 35) { | |
1874 | retval = low_latency; | |
1875 | } | |
1876 | } else if (bytes < 6000) { | |
1877 | retval = low_latency; | |
1878 | } | |
1879 | break; | |
1880 | } | |
1881 | ||
1882 | update_itr_done: | |
1883 | return retval; | |
1884 | } | |
1885 | ||
1886 | static void e1000_set_itr(struct e1000_adapter *adapter) | |
1887 | { | |
1888 | struct e1000_hw *hw = &adapter->hw; | |
1889 | u16 current_itr; | |
1890 | u32 new_itr = adapter->itr; | |
1891 | ||
1892 | /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ | |
1893 | if (adapter->link_speed != SPEED_1000) { | |
1894 | current_itr = 0; | |
1895 | new_itr = 4000; | |
1896 | goto set_itr_now; | |
1897 | } | |
1898 | ||
1899 | adapter->tx_itr = e1000_update_itr(adapter, | |
1900 | adapter->tx_itr, | |
1901 | adapter->total_tx_packets, | |
1902 | adapter->total_tx_bytes); | |
1903 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | |
1904 | if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) | |
1905 | adapter->tx_itr = low_latency; | |
1906 | ||
1907 | adapter->rx_itr = e1000_update_itr(adapter, | |
1908 | adapter->rx_itr, | |
1909 | adapter->total_rx_packets, | |
1910 | adapter->total_rx_bytes); | |
1911 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | |
1912 | if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) | |
1913 | adapter->rx_itr = low_latency; | |
1914 | ||
1915 | current_itr = max(adapter->rx_itr, adapter->tx_itr); | |
1916 | ||
1917 | switch (current_itr) { | |
1918 | /* counts and packets in update_itr are dependent on these numbers */ | |
1919 | case lowest_latency: | |
1920 | new_itr = 70000; | |
1921 | break; | |
1922 | case low_latency: | |
1923 | new_itr = 20000; /* aka hwitr = ~200 */ | |
1924 | break; | |
1925 | case bulk_latency: | |
1926 | new_itr = 4000; | |
1927 | break; | |
1928 | default: | |
1929 | break; | |
1930 | } | |
1931 | ||
1932 | set_itr_now: | |
1933 | if (new_itr != adapter->itr) { | |
ad68076e BA |
1934 | /* |
1935 | * this attempts to bias the interrupt rate towards Bulk | |
bc7f75fa | 1936 | * by adding intermediate steps when interrupt rate is |
ad68076e BA |
1937 | * increasing |
1938 | */ | |
bc7f75fa AK |
1939 | new_itr = new_itr > adapter->itr ? |
1940 | min(adapter->itr + (new_itr >> 2), new_itr) : | |
1941 | new_itr; | |
1942 | adapter->itr = new_itr; | |
4662e82b BA |
1943 | adapter->rx_ring->itr_val = new_itr; |
1944 | if (adapter->msix_entries) | |
1945 | adapter->rx_ring->set_itr = 1; | |
1946 | else | |
1947 | ew32(ITR, 1000000000 / (new_itr * 256)); | |
bc7f75fa AK |
1948 | } |
1949 | } | |
1950 | ||
4662e82b BA |
1951 | /** |
1952 | * e1000_alloc_queues - Allocate memory for all rings | |
1953 | * @adapter: board private structure to initialize | |
1954 | **/ | |
1955 | static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) | |
1956 | { | |
1957 | adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); | |
1958 | if (!adapter->tx_ring) | |
1959 | goto err; | |
1960 | ||
1961 | adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); | |
1962 | if (!adapter->rx_ring) | |
1963 | goto err; | |
1964 | ||
1965 | return 0; | |
1966 | err: | |
1967 | e_err("Unable to allocate memory for queues\n"); | |
1968 | kfree(adapter->rx_ring); | |
1969 | kfree(adapter->tx_ring); | |
1970 | return -ENOMEM; | |
1971 | } | |
1972 | ||
bc7f75fa AK |
1973 | /** |
1974 | * e1000_clean - NAPI Rx polling callback | |
ad68076e | 1975 | * @napi: struct associated with this polling callback |
489815ce | 1976 | * @budget: amount of packets driver is allowed to process this poll |
bc7f75fa AK |
1977 | **/ |
1978 | static int e1000_clean(struct napi_struct *napi, int budget) | |
1979 | { | |
1980 | struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); | |
4662e82b | 1981 | struct e1000_hw *hw = &adapter->hw; |
bc7f75fa | 1982 | struct net_device *poll_dev = adapter->netdev; |
d2c7ddd6 | 1983 | int tx_cleaned = 0, work_done = 0; |
bc7f75fa AK |
1984 | |
1985 | /* Must NOT use netdev_priv macro here. */ | |
1986 | adapter = poll_dev->priv; | |
1987 | ||
4662e82b BA |
1988 | if (adapter->msix_entries && |
1989 | !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) | |
1990 | goto clean_rx; | |
1991 | ||
ad68076e BA |
1992 | /* |
1993 | * e1000_clean is called per-cpu. This lock protects | |
bc7f75fa AK |
1994 | * tx_ring from being cleaned by multiple cpus |
1995 | * simultaneously. A failure obtaining the lock means | |
ad68076e BA |
1996 | * tx_ring is currently being cleaned anyway. |
1997 | */ | |
bc7f75fa | 1998 | if (spin_trylock(&adapter->tx_queue_lock)) { |
d2c7ddd6 | 1999 | tx_cleaned = e1000_clean_tx_irq(adapter); |
bc7f75fa AK |
2000 | spin_unlock(&adapter->tx_queue_lock); |
2001 | } | |
2002 | ||
4662e82b | 2003 | clean_rx: |
bc7f75fa | 2004 | adapter->clean_rx(adapter, &work_done, budget); |
d2c7ddd6 DM |
2005 | |
2006 | if (tx_cleaned) | |
2007 | work_done = budget; | |
bc7f75fa | 2008 | |
53e52c72 DM |
2009 | /* If budget not fully consumed, exit the polling mode */ |
2010 | if (work_done < budget) { | |
bc7f75fa AK |
2011 | if (adapter->itr_setting & 3) |
2012 | e1000_set_itr(adapter); | |
2013 | netif_rx_complete(poll_dev, napi); | |
4662e82b BA |
2014 | if (adapter->msix_entries) |
2015 | ew32(IMS, adapter->rx_ring->ims_val); | |
2016 | else | |
2017 | e1000_irq_enable(adapter); | |
bc7f75fa AK |
2018 | } |
2019 | ||
2020 | return work_done; | |
2021 | } | |
2022 | ||
2023 | static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |
2024 | { | |
2025 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
2026 | struct e1000_hw *hw = &adapter->hw; | |
2027 | u32 vfta, index; | |
2028 | ||
2029 | /* don't update vlan cookie if already programmed */ | |
2030 | if ((adapter->hw.mng_cookie.status & | |
2031 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && | |
2032 | (vid == adapter->mng_vlan_id)) | |
2033 | return; | |
2034 | /* add VID to filter table */ | |
2035 | index = (vid >> 5) & 0x7F; | |
2036 | vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); | |
2037 | vfta |= (1 << (vid & 0x1F)); | |
2038 | e1000e_write_vfta(hw, index, vfta); | |
2039 | } | |
2040 | ||
2041 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |
2042 | { | |
2043 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
2044 | struct e1000_hw *hw = &adapter->hw; | |
2045 | u32 vfta, index; | |
2046 | ||
74ef9c39 JB |
2047 | if (!test_bit(__E1000_DOWN, &adapter->state)) |
2048 | e1000_irq_disable(adapter); | |
bc7f75fa | 2049 | vlan_group_set_device(adapter->vlgrp, vid, NULL); |
74ef9c39 JB |
2050 | |
2051 | if (!test_bit(__E1000_DOWN, &adapter->state)) | |
2052 | e1000_irq_enable(adapter); | |
bc7f75fa AK |
2053 | |
2054 | if ((adapter->hw.mng_cookie.status & | |
2055 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && | |
2056 | (vid == adapter->mng_vlan_id)) { | |
2057 | /* release control to f/w */ | |
2058 | e1000_release_hw_control(adapter); | |
2059 | return; | |
2060 | } | |
2061 | ||
2062 | /* remove VID from filter table */ | |
2063 | index = (vid >> 5) & 0x7F; | |
2064 | vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); | |
2065 | vfta &= ~(1 << (vid & 0x1F)); | |
2066 | e1000e_write_vfta(hw, index, vfta); | |
2067 | } | |
2068 | ||
2069 | static void e1000_update_mng_vlan(struct e1000_adapter *adapter) | |
2070 | { | |
2071 | struct net_device *netdev = adapter->netdev; | |
2072 | u16 vid = adapter->hw.mng_cookie.vlan_id; | |
2073 | u16 old_vid = adapter->mng_vlan_id; | |
2074 | ||
2075 | if (!adapter->vlgrp) | |
2076 | return; | |
2077 | ||
2078 | if (!vlan_group_get_device(adapter->vlgrp, vid)) { | |
2079 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | |
2080 | if (adapter->hw.mng_cookie.status & | |
2081 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { | |
2082 | e1000_vlan_rx_add_vid(netdev, vid); | |
2083 | adapter->mng_vlan_id = vid; | |
2084 | } | |
2085 | ||
2086 | if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && | |
2087 | (vid != old_vid) && | |
2088 | !vlan_group_get_device(adapter->vlgrp, old_vid)) | |
2089 | e1000_vlan_rx_kill_vid(netdev, old_vid); | |
2090 | } else { | |
2091 | adapter->mng_vlan_id = vid; | |
2092 | } | |
2093 | } | |
2094 | ||
2095 | ||
2096 | static void e1000_vlan_rx_register(struct net_device *netdev, | |
2097 | struct vlan_group *grp) | |
2098 | { | |
2099 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
2100 | struct e1000_hw *hw = &adapter->hw; | |
2101 | u32 ctrl, rctl; | |
2102 | ||
74ef9c39 JB |
2103 | if (!test_bit(__E1000_DOWN, &adapter->state)) |
2104 | e1000_irq_disable(adapter); | |
bc7f75fa AK |
2105 | adapter->vlgrp = grp; |
2106 | ||
2107 | if (grp) { | |
2108 | /* enable VLAN tag insert/strip */ | |
2109 | ctrl = er32(CTRL); | |
2110 | ctrl |= E1000_CTRL_VME; | |
2111 | ew32(CTRL, ctrl); | |
2112 | ||
2113 | if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { | |
2114 | /* enable VLAN receive filtering */ | |
2115 | rctl = er32(RCTL); | |
bc7f75fa AK |
2116 | rctl &= ~E1000_RCTL_CFIEN; |
2117 | ew32(RCTL, rctl); | |
2118 | e1000_update_mng_vlan(adapter); | |
2119 | } | |
2120 | } else { | |
2121 | /* disable VLAN tag insert/strip */ | |
2122 | ctrl = er32(CTRL); | |
2123 | ctrl &= ~E1000_CTRL_VME; | |
2124 | ew32(CTRL, ctrl); | |
2125 | ||
2126 | if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { | |
bc7f75fa AK |
2127 | if (adapter->mng_vlan_id != |
2128 | (u16)E1000_MNG_VLAN_NONE) { | |
2129 | e1000_vlan_rx_kill_vid(netdev, | |
2130 | adapter->mng_vlan_id); | |
2131 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | |
2132 | } | |
2133 | } | |
2134 | } | |
2135 | ||
74ef9c39 JB |
2136 | if (!test_bit(__E1000_DOWN, &adapter->state)) |
2137 | e1000_irq_enable(adapter); | |
bc7f75fa AK |
2138 | } |
2139 | ||
2140 | static void e1000_restore_vlan(struct e1000_adapter *adapter) | |
2141 | { | |
2142 | u16 vid; | |
2143 | ||
2144 | e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); | |
2145 | ||
2146 | if (!adapter->vlgrp) | |
2147 | return; | |
2148 | ||
2149 | for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | |
2150 | if (!vlan_group_get_device(adapter->vlgrp, vid)) | |
2151 | continue; | |
2152 | e1000_vlan_rx_add_vid(adapter->netdev, vid); | |
2153 | } | |
2154 | } | |
2155 | ||
2156 | static void e1000_init_manageability(struct e1000_adapter *adapter) | |
2157 | { | |
2158 | struct e1000_hw *hw = &adapter->hw; | |
2159 | u32 manc, manc2h; | |
2160 | ||
2161 | if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) | |
2162 | return; | |
2163 | ||
2164 | manc = er32(MANC); | |
2165 | ||
ad68076e BA |
2166 | /* |
2167 | * enable receiving management packets to the host. this will probably | |
bc7f75fa | 2168 | * generate destination unreachable messages from the host OS, but |
ad68076e BA |
2169 | * the packets will be handled on SMBUS |
2170 | */ | |
bc7f75fa AK |
2171 | manc |= E1000_MANC_EN_MNG2HOST; |
2172 | manc2h = er32(MANC2H); | |
2173 | #define E1000_MNG2HOST_PORT_623 (1 << 5) | |
2174 | #define E1000_MNG2HOST_PORT_664 (1 << 6) | |
2175 | manc2h |= E1000_MNG2HOST_PORT_623; | |
2176 | manc2h |= E1000_MNG2HOST_PORT_664; | |
2177 | ew32(MANC2H, manc2h); | |
2178 | ew32(MANC, manc); | |
2179 | } | |
2180 | ||
2181 | /** | |
2182 | * e1000_configure_tx - Configure 8254x Transmit Unit after Reset | |
2183 | * @adapter: board private structure | |
2184 | * | |
2185 | * Configure the Tx unit of the MAC after a reset. | |
2186 | **/ | |
2187 | static void e1000_configure_tx(struct e1000_adapter *adapter) | |
2188 | { | |
2189 | struct e1000_hw *hw = &adapter->hw; | |
2190 | struct e1000_ring *tx_ring = adapter->tx_ring; | |
2191 | u64 tdba; | |
2192 | u32 tdlen, tctl, tipg, tarc; | |
2193 | u32 ipgr1, ipgr2; | |
2194 | ||
2195 | /* Setup the HW Tx Head and Tail descriptor pointers */ | |
2196 | tdba = tx_ring->dma; | |
2197 | tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); | |
2198 | ew32(TDBAL, (tdba & DMA_32BIT_MASK)); | |
2199 | ew32(TDBAH, (tdba >> 32)); | |
2200 | ew32(TDLEN, tdlen); | |
2201 | ew32(TDH, 0); | |
2202 | ew32(TDT, 0); | |
2203 | tx_ring->head = E1000_TDH; | |
2204 | tx_ring->tail = E1000_TDT; | |
2205 | ||
2206 | /* Set the default values for the Tx Inter Packet Gap timer */ | |
2207 | tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */ | |
2208 | ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */ | |
2209 | ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */ | |
2210 | ||
2211 | if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN) | |
2212 | ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */ | |
2213 | ||
2214 | tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; | |
2215 | tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; | |
2216 | ew32(TIPG, tipg); | |
2217 | ||
2218 | /* Set the Tx Interrupt Delay register */ | |
2219 | ew32(TIDV, adapter->tx_int_delay); | |
ad68076e | 2220 | /* Tx irq moderation */ |
bc7f75fa AK |
2221 | ew32(TADV, adapter->tx_abs_int_delay); |
2222 | ||
2223 | /* Program the Transmit Control Register */ | |
2224 | tctl = er32(TCTL); | |
2225 | tctl &= ~E1000_TCTL_CT; | |
2226 | tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | | |
2227 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); | |
2228 | ||
2229 | if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { | |
e9ec2c0f | 2230 | tarc = er32(TARC(0)); |
ad68076e BA |
2231 | /* |
2232 | * set the speed mode bit, we'll clear it if we're not at | |
2233 | * gigabit link later | |
2234 | */ | |
bc7f75fa AK |
2235 | #define SPEED_MODE_BIT (1 << 21) |
2236 | tarc |= SPEED_MODE_BIT; | |
e9ec2c0f | 2237 | ew32(TARC(0), tarc); |
bc7f75fa AK |
2238 | } |
2239 | ||
2240 | /* errata: program both queues to unweighted RR */ | |
2241 | if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { | |
e9ec2c0f | 2242 | tarc = er32(TARC(0)); |
bc7f75fa | 2243 | tarc |= 1; |
e9ec2c0f JK |
2244 | ew32(TARC(0), tarc); |
2245 | tarc = er32(TARC(1)); | |
bc7f75fa | 2246 | tarc |= 1; |
e9ec2c0f | 2247 | ew32(TARC(1), tarc); |
bc7f75fa AK |
2248 | } |
2249 | ||
2250 | e1000e_config_collision_dist(hw); | |
2251 | ||
2252 | /* Setup Transmit Descriptor Settings for eop descriptor */ | |
2253 | adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; | |
2254 | ||
2255 | /* only set IDE if we are delaying interrupts using the timers */ | |
2256 | if (adapter->tx_int_delay) | |
2257 | adapter->txd_cmd |= E1000_TXD_CMD_IDE; | |
2258 | ||
2259 | /* enable Report Status bit */ | |
2260 | adapter->txd_cmd |= E1000_TXD_CMD_RS; | |
2261 | ||
2262 | ew32(TCTL, tctl); | |
2263 | ||
2264 | adapter->tx_queue_len = adapter->netdev->tx_queue_len; | |
2265 | } | |
2266 | ||
2267 | /** | |
2268 | * e1000_setup_rctl - configure the receive control registers | |
2269 | * @adapter: Board private structure | |
2270 | **/ | |
2271 | #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ | |
2272 | (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) | |
2273 | static void e1000_setup_rctl(struct e1000_adapter *adapter) | |
2274 | { | |
2275 | struct e1000_hw *hw = &adapter->hw; | |
2276 | u32 rctl, rfctl; | |
2277 | u32 psrctl = 0; | |
2278 | u32 pages = 0; | |
2279 | ||
2280 | /* Program MC offset vector base */ | |
2281 | rctl = er32(RCTL); | |
2282 | rctl &= ~(3 << E1000_RCTL_MO_SHIFT); | |
2283 | rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | | |
2284 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | | |
2285 | (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); | |
2286 | ||
2287 | /* Do not Store bad packets */ | |
2288 | rctl &= ~E1000_RCTL_SBP; | |
2289 | ||
2290 | /* Enable Long Packet receive */ | |
2291 | if (adapter->netdev->mtu <= ETH_DATA_LEN) | |
2292 | rctl &= ~E1000_RCTL_LPE; | |
2293 | else | |
2294 | rctl |= E1000_RCTL_LPE; | |
2295 | ||
5918bd88 AK |
2296 | /* Enable hardware CRC frame stripping */ |
2297 | rctl |= E1000_RCTL_SECRC; | |
2298 | ||
bc7f75fa AK |
2299 | /* Setup buffer sizes */ |
2300 | rctl &= ~E1000_RCTL_SZ_4096; | |
2301 | rctl |= E1000_RCTL_BSEX; | |
2302 | switch (adapter->rx_buffer_len) { | |
2303 | case 256: | |
2304 | rctl |= E1000_RCTL_SZ_256; | |
2305 | rctl &= ~E1000_RCTL_BSEX; | |
2306 | break; | |
2307 | case 512: | |
2308 | rctl |= E1000_RCTL_SZ_512; | |
2309 | rctl &= ~E1000_RCTL_BSEX; | |
2310 | break; | |
2311 | case 1024: | |
2312 | rctl |= E1000_RCTL_SZ_1024; | |
2313 | rctl &= ~E1000_RCTL_BSEX; | |
2314 | break; | |
2315 | case 2048: | |
2316 | default: | |
2317 | rctl |= E1000_RCTL_SZ_2048; | |
2318 | rctl &= ~E1000_RCTL_BSEX; | |
2319 | break; | |
2320 | case 4096: | |
2321 | rctl |= E1000_RCTL_SZ_4096; | |
2322 | break; | |
2323 | case 8192: | |
2324 | rctl |= E1000_RCTL_SZ_8192; | |
2325 | break; | |
2326 | case 16384: | |
2327 | rctl |= E1000_RCTL_SZ_16384; | |
2328 | break; | |
2329 | } | |
2330 | ||
2331 | /* | |
2332 | * 82571 and greater support packet-split where the protocol | |
2333 | * header is placed in skb->data and the packet data is | |
2334 | * placed in pages hanging off of skb_shinfo(skb)->nr_frags. | |
2335 | * In the case of a non-split, skb->data is linearly filled, | |
2336 | * followed by the page buffers. Therefore, skb->data is | |
2337 | * sized to hold the largest protocol header. | |
2338 | * | |
2339 | * allocations using alloc_page take too long for regular MTU | |
2340 | * so only enable packet split for jumbo frames | |
2341 | * | |
2342 | * Using pages when the page size is greater than 16k wastes | |
2343 | * a lot of memory, since we allocate 3 pages at all times | |
2344 | * per packet. | |
2345 | */ | |
bc7f75fa | 2346 | pages = PAGE_USE_COUNT(adapter->netdev->mtu); |
97ac8cae BA |
2347 | if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) && |
2348 | (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) | |
bc7f75fa | 2349 | adapter->rx_ps_pages = pages; |
97ac8cae BA |
2350 | else |
2351 | adapter->rx_ps_pages = 0; | |
bc7f75fa AK |
2352 | |
2353 | if (adapter->rx_ps_pages) { | |
2354 | /* Configure extra packet-split registers */ | |
2355 | rfctl = er32(RFCTL); | |
2356 | rfctl |= E1000_RFCTL_EXTEN; | |
ad68076e BA |
2357 | /* |
2358 | * disable packet split support for IPv6 extension headers, | |
2359 | * because some malformed IPv6 headers can hang the Rx | |
2360 | */ | |
bc7f75fa AK |
2361 | rfctl |= (E1000_RFCTL_IPV6_EX_DIS | |
2362 | E1000_RFCTL_NEW_IPV6_EXT_DIS); | |
2363 | ||
2364 | ew32(RFCTL, rfctl); | |
2365 | ||
140a7480 AK |
2366 | /* Enable Packet split descriptors */ |
2367 | rctl |= E1000_RCTL_DTYP_PS; | |
bc7f75fa AK |
2368 | |
2369 | psrctl |= adapter->rx_ps_bsize0 >> | |
2370 | E1000_PSRCTL_BSIZE0_SHIFT; | |
2371 | ||
2372 | switch (adapter->rx_ps_pages) { | |
2373 | case 3: | |
2374 | psrctl |= PAGE_SIZE << | |
2375 | E1000_PSRCTL_BSIZE3_SHIFT; | |
2376 | case 2: | |
2377 | psrctl |= PAGE_SIZE << | |
2378 | E1000_PSRCTL_BSIZE2_SHIFT; | |
2379 | case 1: | |
2380 | psrctl |= PAGE_SIZE >> | |
2381 | E1000_PSRCTL_BSIZE1_SHIFT; | |
2382 | break; | |
2383 | } | |
2384 | ||
2385 | ew32(PSRCTL, psrctl); | |
2386 | } | |
2387 | ||
2388 | ew32(RCTL, rctl); | |
318a94d6 JK |
2389 | /* just started the receive unit, no need to restart */ |
2390 | adapter->flags &= ~FLAG_RX_RESTART_NOW; | |
bc7f75fa AK |
2391 | } |
2392 | ||
2393 | /** | |
2394 | * e1000_configure_rx - Configure Receive Unit after Reset | |
2395 | * @adapter: board private structure | |
2396 | * | |
2397 | * Configure the Rx unit of the MAC after a reset. | |
2398 | **/ | |
2399 | static void e1000_configure_rx(struct e1000_adapter *adapter) | |
2400 | { | |
2401 | struct e1000_hw *hw = &adapter->hw; | |
2402 | struct e1000_ring *rx_ring = adapter->rx_ring; | |
2403 | u64 rdba; | |
2404 | u32 rdlen, rctl, rxcsum, ctrl_ext; | |
2405 | ||
2406 | if (adapter->rx_ps_pages) { | |
2407 | /* this is a 32 byte descriptor */ | |
2408 | rdlen = rx_ring->count * | |
2409 | sizeof(union e1000_rx_desc_packet_split); | |
2410 | adapter->clean_rx = e1000_clean_rx_irq_ps; | |
2411 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; | |
97ac8cae BA |
2412 | } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { |
2413 | rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); | |
2414 | adapter->clean_rx = e1000_clean_jumbo_rx_irq; | |
2415 | adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; | |
bc7f75fa | 2416 | } else { |
97ac8cae | 2417 | rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); |
bc7f75fa AK |
2418 | adapter->clean_rx = e1000_clean_rx_irq; |
2419 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers; | |
2420 | } | |
2421 | ||
2422 | /* disable receives while setting up the descriptors */ | |
2423 | rctl = er32(RCTL); | |
2424 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | |
2425 | e1e_flush(); | |
2426 | msleep(10); | |
2427 | ||
2428 | /* set the Receive Delay Timer Register */ | |
2429 | ew32(RDTR, adapter->rx_int_delay); | |
2430 | ||
2431 | /* irq moderation */ | |
2432 | ew32(RADV, adapter->rx_abs_int_delay); | |
2433 | if (adapter->itr_setting != 0) | |
ad68076e | 2434 | ew32(ITR, 1000000000 / (adapter->itr * 256)); |
bc7f75fa AK |
2435 | |
2436 | ctrl_ext = er32(CTRL_EXT); | |
2437 | /* Reset delay timers after every interrupt */ | |
2438 | ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; | |
2439 | /* Auto-Mask interrupts upon ICR access */ | |
2440 | ctrl_ext |= E1000_CTRL_EXT_IAME; | |
2441 | ew32(IAM, 0xffffffff); | |
2442 | ew32(CTRL_EXT, ctrl_ext); | |
2443 | e1e_flush(); | |
2444 | ||
ad68076e BA |
2445 | /* |
2446 | * Setup the HW Rx Head and Tail Descriptor Pointers and | |
2447 | * the Base and Length of the Rx Descriptor Ring | |
2448 | */ | |
bc7f75fa AK |
2449 | rdba = rx_ring->dma; |
2450 | ew32(RDBAL, (rdba & DMA_32BIT_MASK)); | |
2451 | ew32(RDBAH, (rdba >> 32)); | |
2452 | ew32(RDLEN, rdlen); | |
2453 | ew32(RDH, 0); | |
2454 | ew32(RDT, 0); | |
2455 | rx_ring->head = E1000_RDH; | |
2456 | rx_ring->tail = E1000_RDT; | |
2457 | ||
2458 | /* Enable Receive Checksum Offload for TCP and UDP */ | |
2459 | rxcsum = er32(RXCSUM); | |
2460 | if (adapter->flags & FLAG_RX_CSUM_ENABLED) { | |
2461 | rxcsum |= E1000_RXCSUM_TUOFL; | |
2462 | ||
ad68076e BA |
2463 | /* |
2464 | * IPv4 payload checksum for UDP fragments must be | |
2465 | * used in conjunction with packet-split. | |
2466 | */ | |
bc7f75fa AK |
2467 | if (adapter->rx_ps_pages) |
2468 | rxcsum |= E1000_RXCSUM_IPPCSE; | |
2469 | } else { | |
2470 | rxcsum &= ~E1000_RXCSUM_TUOFL; | |
2471 | /* no need to clear IPPCSE as it defaults to 0 */ | |
2472 | } | |
2473 | ew32(RXCSUM, rxcsum); | |
2474 | ||
ad68076e BA |
2475 | /* |
2476 | * Enable early receives on supported devices, only takes effect when | |
bc7f75fa | 2477 | * packet size is equal or larger than the specified value (in 8 byte |
ad68076e BA |
2478 | * units), e.g. using jumbo frames when setting to E1000_ERT_2048 |
2479 | */ | |
bc7f75fa | 2480 | if ((adapter->flags & FLAG_HAS_ERT) && |
97ac8cae BA |
2481 | (adapter->netdev->mtu > ETH_DATA_LEN)) { |
2482 | u32 rxdctl = er32(RXDCTL(0)); | |
2483 | ew32(RXDCTL(0), rxdctl | 0x3); | |
2484 | ew32(ERT, E1000_ERT_2048 | (1 << 13)); | |
2485 | /* | |
2486 | * With jumbo frames and early-receive enabled, excessive | |
2487 | * C4->C2 latencies result in dropped transactions. | |
2488 | */ | |
2489 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, | |
2490 | e1000e_driver_name, 55); | |
2491 | } else { | |
2492 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, | |
2493 | e1000e_driver_name, | |
2494 | PM_QOS_DEFAULT_VALUE); | |
2495 | } | |
bc7f75fa AK |
2496 | |
2497 | /* Enable Receives */ | |
2498 | ew32(RCTL, rctl); | |
2499 | } | |
2500 | ||
2501 | /** | |
e2de3eb6 | 2502 | * e1000_update_mc_addr_list - Update Multicast addresses |
bc7f75fa AK |
2503 | * @hw: pointer to the HW structure |
2504 | * @mc_addr_list: array of multicast addresses to program | |
2505 | * @mc_addr_count: number of multicast addresses to program | |
2506 | * @rar_used_count: the first RAR register free to program | |
2507 | * @rar_count: total number of supported Receive Address Registers | |
2508 | * | |
2509 | * Updates the Receive Address Registers and Multicast Table Array. | |
2510 | * The caller must have a packed mc_addr_list of multicast addresses. | |
2511 | * The parameter rar_count will usually be hw->mac.rar_entry_count | |
2512 | * unless there are workarounds that change this. Currently no func pointer | |
2513 | * exists and all implementations are handled in the generic version of this | |
2514 | * function. | |
2515 | **/ | |
e2de3eb6 JK |
2516 | static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, |
2517 | u32 mc_addr_count, u32 rar_used_count, | |
2518 | u32 rar_count) | |
bc7f75fa | 2519 | { |
e2de3eb6 | 2520 | hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count, |
bc7f75fa AK |
2521 | rar_used_count, rar_count); |
2522 | } | |
2523 | ||
2524 | /** | |
2525 | * e1000_set_multi - Multicast and Promiscuous mode set | |
2526 | * @netdev: network interface device structure | |
2527 | * | |
2528 | * The set_multi entry point is called whenever the multicast address | |
2529 | * list or the network interface flags are updated. This routine is | |
2530 | * responsible for configuring the hardware for proper multicast, | |
2531 | * promiscuous mode, and all-multi behavior. | |
2532 | **/ | |
2533 | static void e1000_set_multi(struct net_device *netdev) | |
2534 | { | |
2535 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
2536 | struct e1000_hw *hw = &adapter->hw; | |
2537 | struct e1000_mac_info *mac = &hw->mac; | |
2538 | struct dev_mc_list *mc_ptr; | |
2539 | u8 *mta_list; | |
2540 | u32 rctl; | |
2541 | int i; | |
2542 | ||
2543 | /* Check for Promiscuous and All Multicast modes */ | |
2544 | ||
2545 | rctl = er32(RCTL); | |
2546 | ||
2547 | if (netdev->flags & IFF_PROMISC) { | |
2548 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); | |
746b9f02 | 2549 | rctl &= ~E1000_RCTL_VFE; |
bc7f75fa | 2550 | } else { |
746b9f02 PM |
2551 | if (netdev->flags & IFF_ALLMULTI) { |
2552 | rctl |= E1000_RCTL_MPE; | |
2553 | rctl &= ~E1000_RCTL_UPE; | |
2554 | } else { | |
2555 | rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); | |
2556 | } | |
78ed11a5 | 2557 | if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) |
746b9f02 | 2558 | rctl |= E1000_RCTL_VFE; |
bc7f75fa AK |
2559 | } |
2560 | ||
2561 | ew32(RCTL, rctl); | |
2562 | ||
2563 | if (netdev->mc_count) { | |
2564 | mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC); | |
2565 | if (!mta_list) | |
2566 | return; | |
2567 | ||
2568 | /* prepare a packed array of only addresses. */ | |
2569 | mc_ptr = netdev->mc_list; | |
2570 | ||
2571 | for (i = 0; i < netdev->mc_count; i++) { | |
2572 | if (!mc_ptr) | |
2573 | break; | |
2574 | memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, | |
2575 | ETH_ALEN); | |
2576 | mc_ptr = mc_ptr->next; | |
2577 | } | |
2578 | ||
e2de3eb6 | 2579 | e1000_update_mc_addr_list(hw, mta_list, i, 1, |
bc7f75fa AK |
2580 | mac->rar_entry_count); |
2581 | kfree(mta_list); | |
2582 | } else { | |
2583 | /* | |
2584 | * if we're called from probe, we might not have | |
2585 | * anything to do here, so clear out the list | |
2586 | */ | |
e2de3eb6 | 2587 | e1000_update_mc_addr_list(hw, NULL, 0, 1, mac->rar_entry_count); |
bc7f75fa AK |
2588 | } |
2589 | } | |
2590 | ||
2591 | /** | |
ad68076e | 2592 | * e1000_configure - configure the hardware for Rx and Tx |
bc7f75fa AK |
2593 | * @adapter: private board structure |
2594 | **/ | |
2595 | static void e1000_configure(struct e1000_adapter *adapter) | |
2596 | { | |
2597 | e1000_set_multi(adapter->netdev); | |
2598 | ||
2599 | e1000_restore_vlan(adapter); | |
2600 | e1000_init_manageability(adapter); | |
2601 | ||
2602 | e1000_configure_tx(adapter); | |
2603 | e1000_setup_rctl(adapter); | |
2604 | e1000_configure_rx(adapter); | |
ad68076e | 2605 | adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring)); |
bc7f75fa AK |
2606 | } |
2607 | ||
2608 | /** | |
2609 | * e1000e_power_up_phy - restore link in case the phy was powered down | |
2610 | * @adapter: address of board private structure | |
2611 | * | |
2612 | * The phy may be powered down to save power and turn off link when the | |
2613 | * driver is unloaded and wake on lan is not enabled (among others) | |
2614 | * *** this routine MUST be followed by a call to e1000e_reset *** | |
2615 | **/ | |
2616 | void e1000e_power_up_phy(struct e1000_adapter *adapter) | |
2617 | { | |
2618 | u16 mii_reg = 0; | |
2619 | ||
2620 | /* Just clear the power down bit to wake the phy back up */ | |
318a94d6 | 2621 | if (adapter->hw.phy.media_type == e1000_media_type_copper) { |
ad68076e BA |
2622 | /* |
2623 | * According to the manual, the phy will retain its | |
2624 | * settings across a power-down/up cycle | |
2625 | */ | |
bc7f75fa AK |
2626 | e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg); |
2627 | mii_reg &= ~MII_CR_POWER_DOWN; | |
2628 | e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg); | |
2629 | } | |
2630 | ||
2631 | adapter->hw.mac.ops.setup_link(&adapter->hw); | |
2632 | } | |
2633 | ||
2634 | /** | |
2635 | * e1000_power_down_phy - Power down the PHY | |
2636 | * | |
2637 | * Power down the PHY so no link is implied when interface is down | |
2638 | * The PHY cannot be powered down is management or WoL is active | |
2639 | */ | |
2640 | static void e1000_power_down_phy(struct e1000_adapter *adapter) | |
2641 | { | |
2642 | struct e1000_hw *hw = &adapter->hw; | |
2643 | u16 mii_reg; | |
2644 | ||
2645 | /* WoL is enabled */ | |
23b66e2b | 2646 | if (adapter->wol) |
bc7f75fa AK |
2647 | return; |
2648 | ||
2649 | /* non-copper PHY? */ | |
318a94d6 | 2650 | if (adapter->hw.phy.media_type != e1000_media_type_copper) |
bc7f75fa AK |
2651 | return; |
2652 | ||
2653 | /* reset is blocked because of a SoL/IDER session */ | |
ad68076e | 2654 | if (e1000e_check_mng_mode(hw) || e1000_check_reset_block(hw)) |
bc7f75fa AK |
2655 | return; |
2656 | ||
489815ce | 2657 | /* manageability (AMT) is enabled */ |
bc7f75fa AK |
2658 | if (er32(MANC) & E1000_MANC_SMBUS_EN) |
2659 | return; | |
2660 | ||
2661 | /* power down the PHY */ | |
2662 | e1e_rphy(hw, PHY_CONTROL, &mii_reg); | |
2663 | mii_reg |= MII_CR_POWER_DOWN; | |
2664 | e1e_wphy(hw, PHY_CONTROL, mii_reg); | |
2665 | mdelay(1); | |
2666 | } | |
2667 | ||
2668 | /** | |
2669 | * e1000e_reset - bring the hardware into a known good state | |
2670 | * | |
2671 | * This function boots the hardware and enables some settings that | |
2672 | * require a configuration cycle of the hardware - those cannot be | |
2673 | * set/changed during runtime. After reset the device needs to be | |
ad68076e | 2674 | * properly configured for Rx, Tx etc. |
bc7f75fa AK |
2675 | */ |
2676 | void e1000e_reset(struct e1000_adapter *adapter) | |
2677 | { | |
2678 | struct e1000_mac_info *mac = &adapter->hw.mac; | |
318a94d6 | 2679 | struct e1000_fc_info *fc = &adapter->hw.fc; |
bc7f75fa AK |
2680 | struct e1000_hw *hw = &adapter->hw; |
2681 | u32 tx_space, min_tx_space, min_rx_space; | |
318a94d6 | 2682 | u32 pba = adapter->pba; |
bc7f75fa AK |
2683 | u16 hwm; |
2684 | ||
ad68076e | 2685 | /* reset Packet Buffer Allocation to default */ |
318a94d6 | 2686 | ew32(PBA, pba); |
df762464 | 2687 | |
318a94d6 | 2688 | if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { |
ad68076e BA |
2689 | /* |
2690 | * To maintain wire speed transmits, the Tx FIFO should be | |
bc7f75fa AK |
2691 | * large enough to accommodate two full transmit packets, |
2692 | * rounded up to the next 1KB and expressed in KB. Likewise, | |
2693 | * the Rx FIFO should be large enough to accommodate at least | |
2694 | * one full receive packet and is similarly rounded up and | |
ad68076e BA |
2695 | * expressed in KB. |
2696 | */ | |
df762464 | 2697 | pba = er32(PBA); |
bc7f75fa | 2698 | /* upper 16 bits has Tx packet buffer allocation size in KB */ |
df762464 | 2699 | tx_space = pba >> 16; |
bc7f75fa | 2700 | /* lower 16 bits has Rx packet buffer allocation size in KB */ |
df762464 | 2701 | pba &= 0xffff; |
ad68076e BA |
2702 | /* |
2703 | * the Tx fifo also stores 16 bytes of information about the tx | |
2704 | * but don't include ethernet FCS because hardware appends it | |
318a94d6 JK |
2705 | */ |
2706 | min_tx_space = (adapter->max_frame_size + | |
bc7f75fa AK |
2707 | sizeof(struct e1000_tx_desc) - |
2708 | ETH_FCS_LEN) * 2; | |
2709 | min_tx_space = ALIGN(min_tx_space, 1024); | |
2710 | min_tx_space >>= 10; | |
2711 | /* software strips receive CRC, so leave room for it */ | |
318a94d6 | 2712 | min_rx_space = adapter->max_frame_size; |
bc7f75fa AK |
2713 | min_rx_space = ALIGN(min_rx_space, 1024); |
2714 | min_rx_space >>= 10; | |
2715 | ||
ad68076e BA |
2716 | /* |
2717 | * If current Tx allocation is less than the min Tx FIFO size, | |
bc7f75fa | 2718 | * and the min Tx FIFO size is less than the current Rx FIFO |
ad68076e BA |
2719 | * allocation, take space away from current Rx allocation |
2720 | */ | |
df762464 AK |
2721 | if ((tx_space < min_tx_space) && |
2722 | ((min_tx_space - tx_space) < pba)) { | |
2723 | pba -= min_tx_space - tx_space; | |
bc7f75fa | 2724 | |
ad68076e BA |
2725 | /* |
2726 | * if short on Rx space, Rx wins and must trump tx | |
2727 | * adjustment or use Early Receive if available | |
2728 | */ | |
df762464 | 2729 | if ((pba < min_rx_space) && |
bc7f75fa AK |
2730 | (!(adapter->flags & FLAG_HAS_ERT))) |
2731 | /* ERT enabled in e1000_configure_rx */ | |
df762464 | 2732 | pba = min_rx_space; |
bc7f75fa | 2733 | } |
df762464 AK |
2734 | |
2735 | ew32(PBA, pba); | |
bc7f75fa AK |
2736 | } |
2737 | ||
bc7f75fa | 2738 | |
ad68076e BA |
2739 | /* |
2740 | * flow control settings | |
2741 | * | |
2742 | * The high water mark must be low enough to fit one full frame | |
bc7f75fa AK |
2743 | * (or the size used for early receive) above it in the Rx FIFO. |
2744 | * Set it to the lower of: | |
2745 | * - 90% of the Rx FIFO size, and | |
2746 | * - the full Rx FIFO size minus the early receive size (for parts | |
2747 | * with ERT support assuming ERT set to E1000_ERT_2048), or | |
ad68076e BA |
2748 | * - the full Rx FIFO size minus one full frame |
2749 | */ | |
bc7f75fa | 2750 | if (adapter->flags & FLAG_HAS_ERT) |
318a94d6 JK |
2751 | hwm = min(((pba << 10) * 9 / 10), |
2752 | ((pba << 10) - (E1000_ERT_2048 << 3))); | |
bc7f75fa | 2753 | else |
318a94d6 JK |
2754 | hwm = min(((pba << 10) * 9 / 10), |
2755 | ((pba << 10) - adapter->max_frame_size)); | |
bc7f75fa | 2756 | |
318a94d6 JK |
2757 | fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ |
2758 | fc->low_water = fc->high_water - 8; | |
bc7f75fa AK |
2759 | |
2760 | if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) | |
318a94d6 | 2761 | fc->pause_time = 0xFFFF; |
bc7f75fa | 2762 | else |
318a94d6 JK |
2763 | fc->pause_time = E1000_FC_PAUSE_TIME; |
2764 | fc->send_xon = 1; | |
2765 | fc->type = fc->original_type; | |
bc7f75fa AK |
2766 | |
2767 | /* Allow time for pending master requests to run */ | |
2768 | mac->ops.reset_hw(hw); | |
97ac8cae BA |
2769 | |
2770 | /* | |
2771 | * For parts with AMT enabled, let the firmware know | |
2772 | * that the network interface is in control | |
2773 | */ | |
c43bc57e | 2774 | if (adapter->flags & FLAG_HAS_AMT) |
97ac8cae BA |
2775 | e1000_get_hw_control(adapter); |
2776 | ||
bc7f75fa AK |
2777 | ew32(WUC, 0); |
2778 | ||
2779 | if (mac->ops.init_hw(hw)) | |
44defeb3 | 2780 | e_err("Hardware Error\n"); |
bc7f75fa AK |
2781 | |
2782 | e1000_update_mng_vlan(adapter); | |
2783 | ||
2784 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ | |
2785 | ew32(VET, ETH_P_8021Q); | |
2786 | ||
2787 | e1000e_reset_adaptive(hw); | |
2788 | e1000_get_phy_info(hw); | |
2789 | ||
2790 | if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) { | |
2791 | u16 phy_data = 0; | |
ad68076e BA |
2792 | /* |
2793 | * speed up time to link by disabling smart power down, ignore | |
bc7f75fa | 2794 | * the return value of this function because there is nothing |
ad68076e BA |
2795 | * different we would do if it failed |
2796 | */ | |
bc7f75fa AK |
2797 | e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); |
2798 | phy_data &= ~IGP02E1000_PM_SPD; | |
2799 | e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); | |
2800 | } | |
bc7f75fa AK |
2801 | } |
2802 | ||
2803 | int e1000e_up(struct e1000_adapter *adapter) | |
2804 | { | |
2805 | struct e1000_hw *hw = &adapter->hw; | |
2806 | ||
2807 | /* hardware has been reset, we need to reload some things */ | |
2808 | e1000_configure(adapter); | |
2809 | ||
2810 | clear_bit(__E1000_DOWN, &adapter->state); | |
2811 | ||
2812 | napi_enable(&adapter->napi); | |
4662e82b BA |
2813 | if (adapter->msix_entries) |
2814 | e1000_configure_msix(adapter); | |
bc7f75fa AK |
2815 | e1000_irq_enable(adapter); |
2816 | ||
2817 | /* fire a link change interrupt to start the watchdog */ | |
2818 | ew32(ICS, E1000_ICS_LSC); | |
2819 | return 0; | |
2820 | } | |
2821 | ||
2822 | void e1000e_down(struct e1000_adapter *adapter) | |
2823 | { | |
2824 | struct net_device *netdev = adapter->netdev; | |
2825 | struct e1000_hw *hw = &adapter->hw; | |
2826 | u32 tctl, rctl; | |
2827 | ||
ad68076e BA |
2828 | /* |
2829 | * signal that we're down so the interrupt handler does not | |
2830 | * reschedule our watchdog timer | |
2831 | */ | |
bc7f75fa AK |
2832 | set_bit(__E1000_DOWN, &adapter->state); |
2833 | ||
2834 | /* disable receives in the hardware */ | |
2835 | rctl = er32(RCTL); | |
2836 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | |
2837 | /* flush and sleep below */ | |
2838 | ||
d55b53ff | 2839 | netif_tx_stop_all_queues(netdev); |
bc7f75fa AK |
2840 | |
2841 | /* disable transmits in the hardware */ | |
2842 | tctl = er32(TCTL); | |
2843 | tctl &= ~E1000_TCTL_EN; | |
2844 | ew32(TCTL, tctl); | |
2845 | /* flush both disables and wait for them to finish */ | |
2846 | e1e_flush(); | |
2847 | msleep(10); | |
2848 | ||
2849 | napi_disable(&adapter->napi); | |
2850 | e1000_irq_disable(adapter); | |
2851 | ||
2852 | del_timer_sync(&adapter->watchdog_timer); | |
2853 | del_timer_sync(&adapter->phy_info_timer); | |
2854 | ||
2855 | netdev->tx_queue_len = adapter->tx_queue_len; | |
2856 | netif_carrier_off(netdev); | |
2857 | adapter->link_speed = 0; | |
2858 | adapter->link_duplex = 0; | |
2859 | ||
52cc3086 JK |
2860 | if (!pci_channel_offline(adapter->pdev)) |
2861 | e1000e_reset(adapter); | |
bc7f75fa AK |
2862 | e1000_clean_tx_ring(adapter); |
2863 | e1000_clean_rx_ring(adapter); | |
2864 | ||
2865 | /* | |
2866 | * TODO: for power management, we could drop the link and | |
2867 | * pci_disable_device here. | |
2868 | */ | |
2869 | } | |
2870 | ||
2871 | void e1000e_reinit_locked(struct e1000_adapter *adapter) | |
2872 | { | |
2873 | might_sleep(); | |
2874 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | |
2875 | msleep(1); | |
2876 | e1000e_down(adapter); | |
2877 | e1000e_up(adapter); | |
2878 | clear_bit(__E1000_RESETTING, &adapter->state); | |
2879 | } | |
2880 | ||
2881 | /** | |
2882 | * e1000_sw_init - Initialize general software structures (struct e1000_adapter) | |
2883 | * @adapter: board private structure to initialize | |
2884 | * | |
2885 | * e1000_sw_init initializes the Adapter private data structure. | |
2886 | * Fields are initialized based on PCI device information and | |
2887 | * OS network device settings (MTU size). | |
2888 | **/ | |
2889 | static int __devinit e1000_sw_init(struct e1000_adapter *adapter) | |
2890 | { | |
bc7f75fa AK |
2891 | struct net_device *netdev = adapter->netdev; |
2892 | ||
2893 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; | |
2894 | adapter->rx_ps_bsize0 = 128; | |
318a94d6 JK |
2895 | adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
2896 | adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; | |
bc7f75fa | 2897 | |
4662e82b | 2898 | e1000e_set_interrupt_capability(adapter); |
bc7f75fa | 2899 | |
4662e82b BA |
2900 | if (e1000_alloc_queues(adapter)) |
2901 | return -ENOMEM; | |
bc7f75fa AK |
2902 | |
2903 | spin_lock_init(&adapter->tx_queue_lock); | |
2904 | ||
2905 | /* Explicitly disable IRQ since the NIC can be in any state. */ | |
bc7f75fa AK |
2906 | e1000_irq_disable(adapter); |
2907 | ||
2908 | spin_lock_init(&adapter->stats_lock); | |
2909 | ||
2910 | set_bit(__E1000_DOWN, &adapter->state); | |
2911 | return 0; | |
bc7f75fa AK |
2912 | } |
2913 | ||
f8d59f78 BA |
2914 | /** |
2915 | * e1000_intr_msi_test - Interrupt Handler | |
2916 | * @irq: interrupt number | |
2917 | * @data: pointer to a network interface device structure | |
2918 | **/ | |
2919 | static irqreturn_t e1000_intr_msi_test(int irq, void *data) | |
2920 | { | |
2921 | struct net_device *netdev = data; | |
2922 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
2923 | struct e1000_hw *hw = &adapter->hw; | |
2924 | u32 icr = er32(ICR); | |
2925 | ||
2926 | e_dbg("%s: icr is %08X\n", netdev->name, icr); | |
2927 | if (icr & E1000_ICR_RXSEQ) { | |
2928 | adapter->flags &= ~FLAG_MSI_TEST_FAILED; | |
2929 | wmb(); | |
2930 | } | |
2931 | ||
2932 | return IRQ_HANDLED; | |
2933 | } | |
2934 | ||
2935 | /** | |
2936 | * e1000_test_msi_interrupt - Returns 0 for successful test | |
2937 | * @adapter: board private struct | |
2938 | * | |
2939 | * code flow taken from tg3.c | |
2940 | **/ | |
2941 | static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) | |
2942 | { | |
2943 | struct net_device *netdev = adapter->netdev; | |
2944 | struct e1000_hw *hw = &adapter->hw; | |
2945 | int err; | |
2946 | ||
2947 | /* poll_enable hasn't been called yet, so don't need disable */ | |
2948 | /* clear any pending events */ | |
2949 | er32(ICR); | |
2950 | ||
2951 | /* free the real vector and request a test handler */ | |
2952 | e1000_free_irq(adapter); | |
4662e82b | 2953 | e1000e_reset_interrupt_capability(adapter); |
f8d59f78 BA |
2954 | |
2955 | /* Assume that the test fails, if it succeeds then the test | |
2956 | * MSI irq handler will unset this flag */ | |
2957 | adapter->flags |= FLAG_MSI_TEST_FAILED; | |
2958 | ||
2959 | err = pci_enable_msi(adapter->pdev); | |
2960 | if (err) | |
2961 | goto msi_test_failed; | |
2962 | ||
2963 | err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0, | |
2964 | netdev->name, netdev); | |
2965 | if (err) { | |
2966 | pci_disable_msi(adapter->pdev); | |
2967 | goto msi_test_failed; | |
2968 | } | |
2969 | ||
2970 | wmb(); | |
2971 | ||
2972 | e1000_irq_enable(adapter); | |
2973 | ||
2974 | /* fire an unusual interrupt on the test handler */ | |
2975 | ew32(ICS, E1000_ICS_RXSEQ); | |
2976 | e1e_flush(); | |
2977 | msleep(50); | |
2978 | ||
2979 | e1000_irq_disable(adapter); | |
2980 | ||
2981 | rmb(); | |
2982 | ||
2983 | if (adapter->flags & FLAG_MSI_TEST_FAILED) { | |
4662e82b | 2984 | adapter->int_mode = E1000E_INT_MODE_LEGACY; |
f8d59f78 BA |
2985 | err = -EIO; |
2986 | e_info("MSI interrupt test failed!\n"); | |
2987 | } | |
2988 | ||
2989 | free_irq(adapter->pdev->irq, netdev); | |
2990 | pci_disable_msi(adapter->pdev); | |
2991 | ||
2992 | if (err == -EIO) | |
2993 | goto msi_test_failed; | |
2994 | ||
2995 | /* okay so the test worked, restore settings */ | |
2996 | e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name); | |
2997 | msi_test_failed: | |
4662e82b | 2998 | e1000e_set_interrupt_capability(adapter); |
f8d59f78 BA |
2999 | e1000_request_irq(adapter); |
3000 | return err; | |
3001 | } | |
3002 | ||
3003 | /** | |
3004 | * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored | |
3005 | * @adapter: board private struct | |
3006 | * | |
3007 | * code flow taken from tg3.c, called with e1000 interrupts disabled. | |
3008 | **/ | |
3009 | static int e1000_test_msi(struct e1000_adapter *adapter) | |
3010 | { | |
3011 | int err; | |
3012 | u16 pci_cmd; | |
3013 | ||
3014 | if (!(adapter->flags & FLAG_MSI_ENABLED)) | |
3015 | return 0; | |
3016 | ||
3017 | /* disable SERR in case the MSI write causes a master abort */ | |
3018 | pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); | |
3019 | pci_write_config_word(adapter->pdev, PCI_COMMAND, | |
3020 | pci_cmd & ~PCI_COMMAND_SERR); | |
3021 | ||
3022 | err = e1000_test_msi_interrupt(adapter); | |
3023 | ||
3024 | /* restore previous setting of command word */ | |
3025 | pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); | |
3026 | ||
3027 | /* success ! */ | |
3028 | if (!err) | |
3029 | return 0; | |
3030 | ||
3031 | /* EIO means MSI test failed */ | |
3032 | if (err != -EIO) | |
3033 | return err; | |
3034 | ||
3035 | /* back to INTx mode */ | |
3036 | e_warn("MSI interrupt test failed, using legacy interrupt.\n"); | |
3037 | ||
3038 | e1000_free_irq(adapter); | |
3039 | ||
3040 | err = e1000_request_irq(adapter); | |
3041 | ||
3042 | return err; | |
3043 | } | |
3044 | ||
bc7f75fa AK |
3045 | /** |
3046 | * e1000_open - Called when a network interface is made active | |
3047 | * @netdev: network interface device structure | |
3048 | * | |
3049 | * Returns 0 on success, negative value on failure | |
3050 | * | |
3051 | * The open entry point is called when a network interface is made | |
3052 | * active by the system (IFF_UP). At this point all resources needed | |
3053 | * for transmit and receive operations are allocated, the interrupt | |
3054 | * handler is registered with the OS, the watchdog timer is started, | |
3055 | * and the stack is notified that the interface is ready. | |
3056 | **/ | |
3057 | static int e1000_open(struct net_device *netdev) | |
3058 | { | |
3059 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
3060 | struct e1000_hw *hw = &adapter->hw; | |
3061 | int err; | |
3062 | ||
3063 | /* disallow open during test */ | |
3064 | if (test_bit(__E1000_TESTING, &adapter->state)) | |
3065 | return -EBUSY; | |
3066 | ||
3067 | /* allocate transmit descriptors */ | |
3068 | err = e1000e_setup_tx_resources(adapter); | |
3069 | if (err) | |
3070 | goto err_setup_tx; | |
3071 | ||
3072 | /* allocate receive descriptors */ | |
3073 | err = e1000e_setup_rx_resources(adapter); | |
3074 | if (err) | |
3075 | goto err_setup_rx; | |
3076 | ||
3077 | e1000e_power_up_phy(adapter); | |
3078 | ||
3079 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | |
3080 | if ((adapter->hw.mng_cookie.status & | |
3081 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) | |
3082 | e1000_update_mng_vlan(adapter); | |
3083 | ||
ad68076e BA |
3084 | /* |
3085 | * If AMT is enabled, let the firmware know that the network | |
3086 | * interface is now open | |
3087 | */ | |
c43bc57e | 3088 | if (adapter->flags & FLAG_HAS_AMT) |
bc7f75fa AK |
3089 | e1000_get_hw_control(adapter); |
3090 | ||
ad68076e BA |
3091 | /* |
3092 | * before we allocate an interrupt, we must be ready to handle it. | |
bc7f75fa AK |
3093 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt |
3094 | * as soon as we call pci_request_irq, so we have to setup our | |
ad68076e BA |
3095 | * clean_rx handler before we do so. |
3096 | */ | |
bc7f75fa AK |
3097 | e1000_configure(adapter); |
3098 | ||
3099 | err = e1000_request_irq(adapter); | |
3100 | if (err) | |
3101 | goto err_req_irq; | |
3102 | ||
f8d59f78 BA |
3103 | /* |
3104 | * Work around PCIe errata with MSI interrupts causing some chipsets to | |
3105 | * ignore e1000e MSI messages, which means we need to test our MSI | |
3106 | * interrupt now | |
3107 | */ | |
4662e82b | 3108 | if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { |
f8d59f78 BA |
3109 | err = e1000_test_msi(adapter); |
3110 | if (err) { | |
3111 | e_err("Interrupt allocation failed\n"); | |
3112 | goto err_req_irq; | |
3113 | } | |
3114 | } | |
3115 | ||
bc7f75fa AK |
3116 | /* From here on the code is the same as e1000e_up() */ |
3117 | clear_bit(__E1000_DOWN, &adapter->state); | |
3118 | ||
3119 | napi_enable(&adapter->napi); | |
3120 | ||
3121 | e1000_irq_enable(adapter); | |
3122 | ||
d55b53ff JK |
3123 | netif_tx_start_all_queues(netdev); |
3124 | ||
bc7f75fa AK |
3125 | /* fire a link status change interrupt to start the watchdog */ |
3126 | ew32(ICS, E1000_ICS_LSC); | |
3127 | ||
3128 | return 0; | |
3129 | ||
3130 | err_req_irq: | |
3131 | e1000_release_hw_control(adapter); | |
3132 | e1000_power_down_phy(adapter); | |
3133 | e1000e_free_rx_resources(adapter); | |
3134 | err_setup_rx: | |
3135 | e1000e_free_tx_resources(adapter); | |
3136 | err_setup_tx: | |
3137 | e1000e_reset(adapter); | |
3138 | ||
3139 | return err; | |
3140 | } | |
3141 | ||
3142 | /** | |
3143 | * e1000_close - Disables a network interface | |
3144 | * @netdev: network interface device structure | |
3145 | * | |
3146 | * Returns 0, this is not allowed to fail | |
3147 | * | |
3148 | * The close entry point is called when an interface is de-activated | |
3149 | * by the OS. The hardware is still under the drivers control, but | |
3150 | * needs to be disabled. A global MAC reset is issued to stop the | |
3151 | * hardware, and all transmit and receive resources are freed. | |
3152 | **/ | |
3153 | static int e1000_close(struct net_device *netdev) | |
3154 | { | |
3155 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
3156 | ||
3157 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); | |
3158 | e1000e_down(adapter); | |
3159 | e1000_power_down_phy(adapter); | |
3160 | e1000_free_irq(adapter); | |
3161 | ||
3162 | e1000e_free_tx_resources(adapter); | |
3163 | e1000e_free_rx_resources(adapter); | |
3164 | ||
ad68076e BA |
3165 | /* |
3166 | * kill manageability vlan ID if supported, but not if a vlan with | |
3167 | * the same ID is registered on the host OS (let 8021q kill it) | |
3168 | */ | |
bc7f75fa AK |
3169 | if ((adapter->hw.mng_cookie.status & |
3170 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && | |
3171 | !(adapter->vlgrp && | |
3172 | vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) | |
3173 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | |
3174 | ||
ad68076e BA |
3175 | /* |
3176 | * If AMT is enabled, let the firmware know that the network | |
3177 | * interface is now closed | |
3178 | */ | |
c43bc57e | 3179 | if (adapter->flags & FLAG_HAS_AMT) |
bc7f75fa AK |
3180 | e1000_release_hw_control(adapter); |
3181 | ||
3182 | return 0; | |
3183 | } | |
3184 | /** | |
3185 | * e1000_set_mac - Change the Ethernet Address of the NIC | |
3186 | * @netdev: network interface device structure | |
3187 | * @p: pointer to an address structure | |
3188 | * | |
3189 | * Returns 0 on success, negative on failure | |
3190 | **/ | |
3191 | static int e1000_set_mac(struct net_device *netdev, void *p) | |
3192 | { | |
3193 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
3194 | struct sockaddr *addr = p; | |
3195 | ||
3196 | if (!is_valid_ether_addr(addr->sa_data)) | |
3197 | return -EADDRNOTAVAIL; | |
3198 | ||
3199 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | |
3200 | memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); | |
3201 | ||
3202 | e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); | |
3203 | ||
3204 | if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { | |
3205 | /* activate the work around */ | |
3206 | e1000e_set_laa_state_82571(&adapter->hw, 1); | |
3207 | ||
ad68076e BA |
3208 | /* |
3209 | * Hold a copy of the LAA in RAR[14] This is done so that | |
bc7f75fa AK |
3210 | * between the time RAR[0] gets clobbered and the time it |
3211 | * gets fixed (in e1000_watchdog), the actual LAA is in one | |
3212 | * of the RARs and no incoming packets directed to this port | |
3213 | * are dropped. Eventually the LAA will be in RAR[0] and | |
ad68076e BA |
3214 | * RAR[14] |
3215 | */ | |
bc7f75fa AK |
3216 | e1000e_rar_set(&adapter->hw, |
3217 | adapter->hw.mac.addr, | |
3218 | adapter->hw.mac.rar_entry_count - 1); | |
3219 | } | |
3220 | ||
3221 | return 0; | |
3222 | } | |
3223 | ||
ad68076e BA |
3224 | /* |
3225 | * Need to wait a few seconds after link up to get diagnostic information from | |
3226 | * the phy | |
3227 | */ | |
bc7f75fa AK |
3228 | static void e1000_update_phy_info(unsigned long data) |
3229 | { | |
3230 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | |
3231 | e1000_get_phy_info(&adapter->hw); | |
3232 | } | |
3233 | ||
3234 | /** | |
3235 | * e1000e_update_stats - Update the board statistics counters | |
3236 | * @adapter: board private structure | |
3237 | **/ | |
3238 | void e1000e_update_stats(struct e1000_adapter *adapter) | |
3239 | { | |
3240 | struct e1000_hw *hw = &adapter->hw; | |
3241 | struct pci_dev *pdev = adapter->pdev; | |
3242 | unsigned long irq_flags; | |
3243 | u16 phy_tmp; | |
3244 | ||
3245 | #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF | |
3246 | ||
3247 | /* | |
3248 | * Prevent stats update while adapter is being reset, or if the pci | |
3249 | * connection is down. | |
3250 | */ | |
3251 | if (adapter->link_speed == 0) | |
3252 | return; | |
3253 | if (pci_channel_offline(pdev)) | |
3254 | return; | |
3255 | ||
3256 | spin_lock_irqsave(&adapter->stats_lock, irq_flags); | |
3257 | ||
ad68076e BA |
3258 | /* |
3259 | * these counters are modified from e1000_adjust_tbi_stats, | |
bc7f75fa AK |
3260 | * called from the interrupt context, so they must only |
3261 | * be written while holding adapter->stats_lock | |
3262 | */ | |
3263 | ||
3264 | adapter->stats.crcerrs += er32(CRCERRS); | |
3265 | adapter->stats.gprc += er32(GPRC); | |
7c25769f BA |
3266 | adapter->stats.gorc += er32(GORCL); |
3267 | er32(GORCH); /* Clear gorc */ | |
bc7f75fa AK |
3268 | adapter->stats.bprc += er32(BPRC); |
3269 | adapter->stats.mprc += er32(MPRC); | |
3270 | adapter->stats.roc += er32(ROC); | |
3271 | ||
bc7f75fa AK |
3272 | adapter->stats.mpc += er32(MPC); |
3273 | adapter->stats.scc += er32(SCC); | |
3274 | adapter->stats.ecol += er32(ECOL); | |
3275 | adapter->stats.mcc += er32(MCC); | |
3276 | adapter->stats.latecol += er32(LATECOL); | |
3277 | adapter->stats.dc += er32(DC); | |
bc7f75fa AK |
3278 | adapter->stats.xonrxc += er32(XONRXC); |
3279 | adapter->stats.xontxc += er32(XONTXC); | |
3280 | adapter->stats.xoffrxc += er32(XOFFRXC); | |
3281 | adapter->stats.xofftxc += er32(XOFFTXC); | |
bc7f75fa | 3282 | adapter->stats.gptc += er32(GPTC); |
7c25769f BA |
3283 | adapter->stats.gotc += er32(GOTCL); |
3284 | er32(GOTCH); /* Clear gotc */ | |
bc7f75fa AK |
3285 | adapter->stats.rnbc += er32(RNBC); |
3286 | adapter->stats.ruc += er32(RUC); | |
bc7f75fa AK |
3287 | |
3288 | adapter->stats.mptc += er32(MPTC); | |
3289 | adapter->stats.bptc += er32(BPTC); | |
3290 | ||
3291 | /* used for adaptive IFS */ | |
3292 | ||
3293 | hw->mac.tx_packet_delta = er32(TPT); | |
3294 | adapter->stats.tpt += hw->mac.tx_packet_delta; | |
3295 | hw->mac.collision_delta = er32(COLC); | |
3296 | adapter->stats.colc += hw->mac.collision_delta; | |
3297 | ||
3298 | adapter->stats.algnerrc += er32(ALGNERRC); | |
3299 | adapter->stats.rxerrc += er32(RXERRC); | |
4662e82b BA |
3300 | if (hw->mac.type != e1000_82574) |
3301 | adapter->stats.tncrs += er32(TNCRS); | |
bc7f75fa AK |
3302 | adapter->stats.cexterr += er32(CEXTERR); |
3303 | adapter->stats.tsctc += er32(TSCTC); | |
3304 | adapter->stats.tsctfc += er32(TSCTFC); | |
3305 | ||
bc7f75fa | 3306 | /* Fill out the OS statistics structure */ |
bc7f75fa AK |
3307 | adapter->net_stats.multicast = adapter->stats.mprc; |
3308 | adapter->net_stats.collisions = adapter->stats.colc; | |
3309 | ||
3310 | /* Rx Errors */ | |
3311 | ||
ad68076e BA |
3312 | /* |
3313 | * RLEC on some newer hardware can be incorrect so build | |
3314 | * our own version based on RUC and ROC | |
3315 | */ | |
bc7f75fa AK |
3316 | adapter->net_stats.rx_errors = adapter->stats.rxerrc + |
3317 | adapter->stats.crcerrs + adapter->stats.algnerrc + | |
3318 | adapter->stats.ruc + adapter->stats.roc + | |
3319 | adapter->stats.cexterr; | |
3320 | adapter->net_stats.rx_length_errors = adapter->stats.ruc + | |
3321 | adapter->stats.roc; | |
3322 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; | |
3323 | adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; | |
3324 | adapter->net_stats.rx_missed_errors = adapter->stats.mpc; | |
3325 | ||
3326 | /* Tx Errors */ | |
3327 | adapter->net_stats.tx_errors = adapter->stats.ecol + | |
3328 | adapter->stats.latecol; | |
3329 | adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; | |
3330 | adapter->net_stats.tx_window_errors = adapter->stats.latecol; | |
3331 | adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; | |
3332 | ||
3333 | /* Tx Dropped needs to be maintained elsewhere */ | |
3334 | ||
3335 | /* Phy Stats */ | |
318a94d6 | 3336 | if (hw->phy.media_type == e1000_media_type_copper) { |
bc7f75fa AK |
3337 | if ((adapter->link_speed == SPEED_1000) && |
3338 | (!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) { | |
3339 | phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; | |
3340 | adapter->phy_stats.idle_errors += phy_tmp; | |
3341 | } | |
3342 | } | |
3343 | ||
3344 | /* Management Stats */ | |
3345 | adapter->stats.mgptc += er32(MGTPTC); | |
3346 | adapter->stats.mgprc += er32(MGTPRC); | |
3347 | adapter->stats.mgpdc += er32(MGTPDC); | |
3348 | ||
3349 | spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); | |
3350 | } | |
3351 | ||
7c25769f BA |
3352 | /** |
3353 | * e1000_phy_read_status - Update the PHY register status snapshot | |
3354 | * @adapter: board private structure | |
3355 | **/ | |
3356 | static void e1000_phy_read_status(struct e1000_adapter *adapter) | |
3357 | { | |
3358 | struct e1000_hw *hw = &adapter->hw; | |
3359 | struct e1000_phy_regs *phy = &adapter->phy_regs; | |
3360 | int ret_val; | |
3361 | unsigned long irq_flags; | |
3362 | ||
3363 | ||
3364 | spin_lock_irqsave(&adapter->stats_lock, irq_flags); | |
3365 | ||
3366 | if ((er32(STATUS) & E1000_STATUS_LU) && | |
3367 | (adapter->hw.phy.media_type == e1000_media_type_copper)) { | |
3368 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); | |
3369 | ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); | |
3370 | ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); | |
3371 | ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa); | |
3372 | ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion); | |
3373 | ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000); | |
3374 | ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); | |
3375 | ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); | |
3376 | if (ret_val) | |
44defeb3 | 3377 | e_warn("Error reading PHY register\n"); |
7c25769f BA |
3378 | } else { |
3379 | /* | |
3380 | * Do not read PHY registers if link is not up | |
3381 | * Set values to typical power-on defaults | |
3382 | */ | |
3383 | phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); | |
3384 | phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | | |
3385 | BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | | |
3386 | BMSR_ERCAP); | |
3387 | phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | | |
3388 | ADVERTISE_ALL | ADVERTISE_CSMA); | |
3389 | phy->lpa = 0; | |
3390 | phy->expansion = EXPANSION_ENABLENPAGE; | |
3391 | phy->ctrl1000 = ADVERTISE_1000FULL; | |
3392 | phy->stat1000 = 0; | |
3393 | phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); | |
3394 | } | |
3395 | ||
3396 | spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); | |
3397 | } | |
3398 | ||
bc7f75fa AK |
3399 | static void e1000_print_link_info(struct e1000_adapter *adapter) |
3400 | { | |
bc7f75fa AK |
3401 | struct e1000_hw *hw = &adapter->hw; |
3402 | u32 ctrl = er32(CTRL); | |
3403 | ||
44defeb3 JK |
3404 | e_info("Link is Up %d Mbps %s, Flow Control: %s\n", |
3405 | adapter->link_speed, | |
3406 | (adapter->link_duplex == FULL_DUPLEX) ? | |
3407 | "Full Duplex" : "Half Duplex", | |
3408 | ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? | |
3409 | "RX/TX" : | |
3410 | ((ctrl & E1000_CTRL_RFCE) ? "RX" : | |
3411 | ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); | |
bc7f75fa AK |
3412 | } |
3413 | ||
318a94d6 JK |
3414 | static bool e1000_has_link(struct e1000_adapter *adapter) |
3415 | { | |
3416 | struct e1000_hw *hw = &adapter->hw; | |
3417 | bool link_active = 0; | |
3418 | s32 ret_val = 0; | |
3419 | ||
3420 | /* | |
3421 | * get_link_status is set on LSC (link status) interrupt or | |
3422 | * Rx sequence error interrupt. get_link_status will stay | |
3423 | * false until the check_for_link establishes link | |
3424 | * for copper adapters ONLY | |
3425 | */ | |
3426 | switch (hw->phy.media_type) { | |
3427 | case e1000_media_type_copper: | |
3428 | if (hw->mac.get_link_status) { | |
3429 | ret_val = hw->mac.ops.check_for_link(hw); | |
3430 | link_active = !hw->mac.get_link_status; | |
3431 | } else { | |
3432 | link_active = 1; | |
3433 | } | |
3434 | break; | |
3435 | case e1000_media_type_fiber: | |
3436 | ret_val = hw->mac.ops.check_for_link(hw); | |
3437 | link_active = !!(er32(STATUS) & E1000_STATUS_LU); | |
3438 | break; | |
3439 | case e1000_media_type_internal_serdes: | |
3440 | ret_val = hw->mac.ops.check_for_link(hw); | |
3441 | link_active = adapter->hw.mac.serdes_has_link; | |
3442 | break; | |
3443 | default: | |
3444 | case e1000_media_type_unknown: | |
3445 | break; | |
3446 | } | |
3447 | ||
3448 | if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && | |
3449 | (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { | |
3450 | /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ | |
44defeb3 | 3451 | e_info("Gigabit has been disabled, downgrading speed\n"); |
318a94d6 JK |
3452 | } |
3453 | ||
3454 | return link_active; | |
3455 | } | |
3456 | ||
3457 | static void e1000e_enable_receives(struct e1000_adapter *adapter) | |
3458 | { | |
3459 | /* make sure the receive unit is started */ | |
3460 | if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && | |
3461 | (adapter->flags & FLAG_RX_RESTART_NOW)) { | |
3462 | struct e1000_hw *hw = &adapter->hw; | |
3463 | u32 rctl = er32(RCTL); | |
3464 | ew32(RCTL, rctl | E1000_RCTL_EN); | |
3465 | adapter->flags &= ~FLAG_RX_RESTART_NOW; | |
3466 | } | |
3467 | } | |
3468 | ||
bc7f75fa AK |
3469 | /** |
3470 | * e1000_watchdog - Timer Call-back | |
3471 | * @data: pointer to adapter cast into an unsigned long | |
3472 | **/ | |
3473 | static void e1000_watchdog(unsigned long data) | |
3474 | { | |
3475 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | |
3476 | ||
3477 | /* Do the rest outside of interrupt context */ | |
3478 | schedule_work(&adapter->watchdog_task); | |
3479 | ||
3480 | /* TODO: make this use queue_delayed_work() */ | |
3481 | } | |
3482 | ||
3483 | static void e1000_watchdog_task(struct work_struct *work) | |
3484 | { | |
3485 | struct e1000_adapter *adapter = container_of(work, | |
3486 | struct e1000_adapter, watchdog_task); | |
bc7f75fa AK |
3487 | struct net_device *netdev = adapter->netdev; |
3488 | struct e1000_mac_info *mac = &adapter->hw.mac; | |
3489 | struct e1000_ring *tx_ring = adapter->tx_ring; | |
3490 | struct e1000_hw *hw = &adapter->hw; | |
3491 | u32 link, tctl; | |
bc7f75fa AK |
3492 | int tx_pending = 0; |
3493 | ||
318a94d6 JK |
3494 | link = e1000_has_link(adapter); |
3495 | if ((netif_carrier_ok(netdev)) && link) { | |
3496 | e1000e_enable_receives(adapter); | |
bc7f75fa | 3497 | goto link_up; |
bc7f75fa AK |
3498 | } |
3499 | ||
3500 | if ((e1000e_enable_tx_pkt_filtering(hw)) && | |
3501 | (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) | |
3502 | e1000_update_mng_vlan(adapter); | |
3503 | ||
bc7f75fa AK |
3504 | if (link) { |
3505 | if (!netif_carrier_ok(netdev)) { | |
3506 | bool txb2b = 1; | |
318a94d6 | 3507 | /* update snapshot of PHY registers on LSC */ |
7c25769f | 3508 | e1000_phy_read_status(adapter); |
bc7f75fa AK |
3509 | mac->ops.get_link_up_info(&adapter->hw, |
3510 | &adapter->link_speed, | |
3511 | &adapter->link_duplex); | |
3512 | e1000_print_link_info(adapter); | |
f4187b56 BA |
3513 | /* |
3514 | * On supported PHYs, check for duplex mismatch only | |
3515 | * if link has autonegotiated at 10/100 half | |
3516 | */ | |
3517 | if ((hw->phy.type == e1000_phy_igp_3 || | |
3518 | hw->phy.type == e1000_phy_bm) && | |
3519 | (hw->mac.autoneg == true) && | |
3520 | (adapter->link_speed == SPEED_10 || | |
3521 | adapter->link_speed == SPEED_100) && | |
3522 | (adapter->link_duplex == HALF_DUPLEX)) { | |
3523 | u16 autoneg_exp; | |
3524 | ||
3525 | e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp); | |
3526 | ||
3527 | if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS)) | |
3528 | e_info("Autonegotiated half duplex but" | |
3529 | " link partner cannot autoneg. " | |
3530 | " Try forcing full duplex if " | |
3531 | "link gets many collisions.\n"); | |
3532 | } | |
3533 | ||
ad68076e BA |
3534 | /* |
3535 | * tweak tx_queue_len according to speed/duplex | |
3536 | * and adjust the timeout factor | |
3537 | */ | |
bc7f75fa AK |
3538 | netdev->tx_queue_len = adapter->tx_queue_len; |
3539 | adapter->tx_timeout_factor = 1; | |
3540 | switch (adapter->link_speed) { | |
3541 | case SPEED_10: | |
3542 | txb2b = 0; | |
3543 | netdev->tx_queue_len = 10; | |
10f1b492 | 3544 | adapter->tx_timeout_factor = 16; |
bc7f75fa AK |
3545 | break; |
3546 | case SPEED_100: | |
3547 | txb2b = 0; | |
3548 | netdev->tx_queue_len = 100; | |
3549 | /* maybe add some timeout factor ? */ | |
3550 | break; | |
3551 | } | |
3552 | ||
ad68076e BA |
3553 | /* |
3554 | * workaround: re-program speed mode bit after | |
3555 | * link-up event | |
3556 | */ | |
bc7f75fa AK |
3557 | if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && |
3558 | !txb2b) { | |
3559 | u32 tarc0; | |
e9ec2c0f | 3560 | tarc0 = er32(TARC(0)); |
bc7f75fa | 3561 | tarc0 &= ~SPEED_MODE_BIT; |
e9ec2c0f | 3562 | ew32(TARC(0), tarc0); |
bc7f75fa AK |
3563 | } |
3564 | ||
ad68076e BA |
3565 | /* |
3566 | * disable TSO for pcie and 10/100 speeds, to avoid | |
3567 | * some hardware issues | |
3568 | */ | |
bc7f75fa AK |
3569 | if (!(adapter->flags & FLAG_TSO_FORCE)) { |
3570 | switch (adapter->link_speed) { | |
3571 | case SPEED_10: | |
3572 | case SPEED_100: | |
44defeb3 | 3573 | e_info("10/100 speed: disabling TSO\n"); |
bc7f75fa AK |
3574 | netdev->features &= ~NETIF_F_TSO; |
3575 | netdev->features &= ~NETIF_F_TSO6; | |
3576 | break; | |
3577 | case SPEED_1000: | |
3578 | netdev->features |= NETIF_F_TSO; | |
3579 | netdev->features |= NETIF_F_TSO6; | |
3580 | break; | |
3581 | default: | |
3582 | /* oops */ | |
3583 | break; | |
3584 | } | |
3585 | } | |
3586 | ||
ad68076e BA |
3587 | /* |
3588 | * enable transmits in the hardware, need to do this | |
3589 | * after setting TARC(0) | |
3590 | */ | |
bc7f75fa AK |
3591 | tctl = er32(TCTL); |
3592 | tctl |= E1000_TCTL_EN; | |
3593 | ew32(TCTL, tctl); | |
3594 | ||
3595 | netif_carrier_on(netdev); | |
d55b53ff | 3596 | netif_tx_wake_all_queues(netdev); |
bc7f75fa AK |
3597 | |
3598 | if (!test_bit(__E1000_DOWN, &adapter->state)) | |
3599 | mod_timer(&adapter->phy_info_timer, | |
3600 | round_jiffies(jiffies + 2 * HZ)); | |
bc7f75fa AK |
3601 | } |
3602 | } else { | |
3603 | if (netif_carrier_ok(netdev)) { | |
3604 | adapter->link_speed = 0; | |
3605 | adapter->link_duplex = 0; | |
44defeb3 | 3606 | e_info("Link is Down\n"); |
bc7f75fa | 3607 | netif_carrier_off(netdev); |
d55b53ff | 3608 | netif_tx_stop_all_queues(netdev); |
bc7f75fa AK |
3609 | if (!test_bit(__E1000_DOWN, &adapter->state)) |
3610 | mod_timer(&adapter->phy_info_timer, | |
3611 | round_jiffies(jiffies + 2 * HZ)); | |
3612 | ||
3613 | if (adapter->flags & FLAG_RX_NEEDS_RESTART) | |
3614 | schedule_work(&adapter->reset_task); | |
3615 | } | |
3616 | } | |
3617 | ||
3618 | link_up: | |
3619 | e1000e_update_stats(adapter); | |
3620 | ||
3621 | mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; | |
3622 | adapter->tpt_old = adapter->stats.tpt; | |
3623 | mac->collision_delta = adapter->stats.colc - adapter->colc_old; | |
3624 | adapter->colc_old = adapter->stats.colc; | |
3625 | ||
7c25769f BA |
3626 | adapter->gorc = adapter->stats.gorc - adapter->gorc_old; |
3627 | adapter->gorc_old = adapter->stats.gorc; | |
3628 | adapter->gotc = adapter->stats.gotc - adapter->gotc_old; | |
3629 | adapter->gotc_old = adapter->stats.gotc; | |
bc7f75fa AK |
3630 | |
3631 | e1000e_update_adaptive(&adapter->hw); | |
3632 | ||
3633 | if (!netif_carrier_ok(netdev)) { | |
3634 | tx_pending = (e1000_desc_unused(tx_ring) + 1 < | |
3635 | tx_ring->count); | |
3636 | if (tx_pending) { | |
ad68076e BA |
3637 | /* |
3638 | * We've lost link, so the controller stops DMA, | |
bc7f75fa AK |
3639 | * but we've got queued Tx work that's never going |
3640 | * to get done, so reset controller to flush Tx. | |
ad68076e BA |
3641 | * (Do the reset outside of interrupt context). |
3642 | */ | |
bc7f75fa AK |
3643 | adapter->tx_timeout_count++; |
3644 | schedule_work(&adapter->reset_task); | |
3645 | } | |
3646 | } | |
3647 | ||
ad68076e | 3648 | /* Cause software interrupt to ensure Rx ring is cleaned */ |
4662e82b BA |
3649 | if (adapter->msix_entries) |
3650 | ew32(ICS, adapter->rx_ring->ims_val); | |
3651 | else | |
3652 | ew32(ICS, E1000_ICS_RXDMT0); | |
bc7f75fa AK |
3653 | |
3654 | /* Force detection of hung controller every watchdog period */ | |
3655 | adapter->detect_tx_hung = 1; | |
3656 | ||
ad68076e BA |
3657 | /* |
3658 | * With 82571 controllers, LAA may be overwritten due to controller | |
3659 | * reset from the other port. Set the appropriate LAA in RAR[0] | |
3660 | */ | |
bc7f75fa AK |
3661 | if (e1000e_get_laa_state_82571(hw)) |
3662 | e1000e_rar_set(hw, adapter->hw.mac.addr, 0); | |
3663 | ||
3664 | /* Reset the timer */ | |
3665 | if (!test_bit(__E1000_DOWN, &adapter->state)) | |
3666 | mod_timer(&adapter->watchdog_timer, | |
3667 | round_jiffies(jiffies + 2 * HZ)); | |
3668 | } | |
3669 | ||
3670 | #define E1000_TX_FLAGS_CSUM 0x00000001 | |
3671 | #define E1000_TX_FLAGS_VLAN 0x00000002 | |
3672 | #define E1000_TX_FLAGS_TSO 0x00000004 | |
3673 | #define E1000_TX_FLAGS_IPV4 0x00000008 | |
3674 | #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 | |
3675 | #define E1000_TX_FLAGS_VLAN_SHIFT 16 | |
3676 | ||
3677 | static int e1000_tso(struct e1000_adapter *adapter, | |
3678 | struct sk_buff *skb) | |
3679 | { | |
3680 | struct e1000_ring *tx_ring = adapter->tx_ring; | |
3681 | struct e1000_context_desc *context_desc; | |
3682 | struct e1000_buffer *buffer_info; | |
3683 | unsigned int i; | |
3684 | u32 cmd_length = 0; | |
3685 | u16 ipcse = 0, tucse, mss; | |
3686 | u8 ipcss, ipcso, tucss, tucso, hdr_len; | |
3687 | int err; | |
3688 | ||
3689 | if (skb_is_gso(skb)) { | |
3690 | if (skb_header_cloned(skb)) { | |
3691 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | |
3692 | if (err) | |
3693 | return err; | |
3694 | } | |
3695 | ||
3696 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
3697 | mss = skb_shinfo(skb)->gso_size; | |
3698 | if (skb->protocol == htons(ETH_P_IP)) { | |
3699 | struct iphdr *iph = ip_hdr(skb); | |
3700 | iph->tot_len = 0; | |
3701 | iph->check = 0; | |
3702 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | |
3703 | iph->daddr, 0, | |
3704 | IPPROTO_TCP, | |
3705 | 0); | |
3706 | cmd_length = E1000_TXD_CMD_IP; | |
3707 | ipcse = skb_transport_offset(skb) - 1; | |
3708 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | |
3709 | ipv6_hdr(skb)->payload_len = 0; | |
3710 | tcp_hdr(skb)->check = | |
3711 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | |
3712 | &ipv6_hdr(skb)->daddr, | |
3713 | 0, IPPROTO_TCP, 0); | |
3714 | ipcse = 0; | |
3715 | } | |
3716 | ipcss = skb_network_offset(skb); | |
3717 | ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; | |
3718 | tucss = skb_transport_offset(skb); | |
3719 | tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; | |
3720 | tucse = 0; | |
3721 | ||
3722 | cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | | |
3723 | E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); | |
3724 | ||
3725 | i = tx_ring->next_to_use; | |
3726 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); | |
3727 | buffer_info = &tx_ring->buffer_info[i]; | |
3728 | ||
3729 | context_desc->lower_setup.ip_fields.ipcss = ipcss; | |
3730 | context_desc->lower_setup.ip_fields.ipcso = ipcso; | |
3731 | context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); | |
3732 | context_desc->upper_setup.tcp_fields.tucss = tucss; | |
3733 | context_desc->upper_setup.tcp_fields.tucso = tucso; | |
3734 | context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); | |
3735 | context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); | |
3736 | context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; | |
3737 | context_desc->cmd_and_length = cpu_to_le32(cmd_length); | |
3738 | ||
3739 | buffer_info->time_stamp = jiffies; | |
3740 | buffer_info->next_to_watch = i; | |
3741 | ||
3742 | i++; | |
3743 | if (i == tx_ring->count) | |
3744 | i = 0; | |
3745 | tx_ring->next_to_use = i; | |
3746 | ||
3747 | return 1; | |
3748 | } | |
3749 | ||
3750 | return 0; | |
3751 | } | |
3752 | ||
3753 | static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) | |
3754 | { | |
3755 | struct e1000_ring *tx_ring = adapter->tx_ring; | |
3756 | struct e1000_context_desc *context_desc; | |
3757 | struct e1000_buffer *buffer_info; | |
3758 | unsigned int i; | |
3759 | u8 css; | |
3760 | ||
3761 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
3762 | css = skb_transport_offset(skb); | |
3763 | ||
3764 | i = tx_ring->next_to_use; | |
3765 | buffer_info = &tx_ring->buffer_info[i]; | |
3766 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); | |
3767 | ||
3768 | context_desc->lower_setup.ip_config = 0; | |
3769 | context_desc->upper_setup.tcp_fields.tucss = css; | |
3770 | context_desc->upper_setup.tcp_fields.tucso = | |
3771 | css + skb->csum_offset; | |
3772 | context_desc->upper_setup.tcp_fields.tucse = 0; | |
3773 | context_desc->tcp_seg_setup.data = 0; | |
3774 | context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); | |
3775 | ||
3776 | buffer_info->time_stamp = jiffies; | |
3777 | buffer_info->next_to_watch = i; | |
3778 | ||
3779 | i++; | |
3780 | if (i == tx_ring->count) | |
3781 | i = 0; | |
3782 | tx_ring->next_to_use = i; | |
3783 | ||
3784 | return 1; | |
3785 | } | |
3786 | ||
3787 | return 0; | |
3788 | } | |
3789 | ||
3790 | #define E1000_MAX_PER_TXD 8192 | |
3791 | #define E1000_MAX_TXD_PWR 12 | |
3792 | ||
3793 | static int e1000_tx_map(struct e1000_adapter *adapter, | |
3794 | struct sk_buff *skb, unsigned int first, | |
3795 | unsigned int max_per_txd, unsigned int nr_frags, | |
3796 | unsigned int mss) | |
3797 | { | |
3798 | struct e1000_ring *tx_ring = adapter->tx_ring; | |
3799 | struct e1000_buffer *buffer_info; | |
3800 | unsigned int len = skb->len - skb->data_len; | |
3801 | unsigned int offset = 0, size, count = 0, i; | |
3802 | unsigned int f; | |
3803 | ||
3804 | i = tx_ring->next_to_use; | |
3805 | ||
3806 | while (len) { | |
3807 | buffer_info = &tx_ring->buffer_info[i]; | |
3808 | size = min(len, max_per_txd); | |
3809 | ||
3810 | /* Workaround for premature desc write-backs | |
3811 | * in TSO mode. Append 4-byte sentinel desc */ | |
3812 | if (mss && !nr_frags && size == len && size > 8) | |
3813 | size -= 4; | |
3814 | ||
3815 | buffer_info->length = size; | |
3816 | /* set time_stamp *before* dma to help avoid a possible race */ | |
3817 | buffer_info->time_stamp = jiffies; | |
3818 | buffer_info->dma = | |
3819 | pci_map_single(adapter->pdev, | |
3820 | skb->data + offset, | |
3821 | size, | |
3822 | PCI_DMA_TODEVICE); | |
8d8bb39b | 3823 | if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) { |
bc7f75fa AK |
3824 | dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); |
3825 | adapter->tx_dma_failed++; | |
3826 | return -1; | |
3827 | } | |
3828 | buffer_info->next_to_watch = i; | |
3829 | ||
3830 | len -= size; | |
3831 | offset += size; | |
3832 | count++; | |
3833 | i++; | |
3834 | if (i == tx_ring->count) | |
3835 | i = 0; | |
3836 | } | |
3837 | ||
3838 | for (f = 0; f < nr_frags; f++) { | |
3839 | struct skb_frag_struct *frag; | |
3840 | ||
3841 | frag = &skb_shinfo(skb)->frags[f]; | |
3842 | len = frag->size; | |
3843 | offset = frag->page_offset; | |
3844 | ||
3845 | while (len) { | |
3846 | buffer_info = &tx_ring->buffer_info[i]; | |
3847 | size = min(len, max_per_txd); | |
3848 | /* Workaround for premature desc write-backs | |
3849 | * in TSO mode. Append 4-byte sentinel desc */ | |
3850 | if (mss && f == (nr_frags-1) && size == len && size > 8) | |
3851 | size -= 4; | |
3852 | ||
3853 | buffer_info->length = size; | |
3854 | buffer_info->time_stamp = jiffies; | |
3855 | buffer_info->dma = | |
3856 | pci_map_page(adapter->pdev, | |
3857 | frag->page, | |
3858 | offset, | |
3859 | size, | |
3860 | PCI_DMA_TODEVICE); | |
8d8bb39b FT |
3861 | if (pci_dma_mapping_error(adapter->pdev, |
3862 | buffer_info->dma)) { | |
bc7f75fa AK |
3863 | dev_err(&adapter->pdev->dev, |
3864 | "TX DMA page map failed\n"); | |
3865 | adapter->tx_dma_failed++; | |
3866 | return -1; | |
3867 | } | |
3868 | ||
3869 | buffer_info->next_to_watch = i; | |
3870 | ||
3871 | len -= size; | |
3872 | offset += size; | |
3873 | count++; | |
3874 | ||
3875 | i++; | |
3876 | if (i == tx_ring->count) | |
3877 | i = 0; | |
3878 | } | |
3879 | } | |
3880 | ||
3881 | if (i == 0) | |
3882 | i = tx_ring->count - 1; | |
3883 | else | |
3884 | i--; | |
3885 | ||
3886 | tx_ring->buffer_info[i].skb = skb; | |
3887 | tx_ring->buffer_info[first].next_to_watch = i; | |
3888 | ||
3889 | return count; | |
3890 | } | |
3891 | ||
3892 | static void e1000_tx_queue(struct e1000_adapter *adapter, | |
3893 | int tx_flags, int count) | |
3894 | { | |
3895 | struct e1000_ring *tx_ring = adapter->tx_ring; | |
3896 | struct e1000_tx_desc *tx_desc = NULL; | |
3897 | struct e1000_buffer *buffer_info; | |
3898 | u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; | |
3899 | unsigned int i; | |
3900 | ||
3901 | if (tx_flags & E1000_TX_FLAGS_TSO) { | |
3902 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | | |
3903 | E1000_TXD_CMD_TSE; | |
3904 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; | |
3905 | ||
3906 | if (tx_flags & E1000_TX_FLAGS_IPV4) | |
3907 | txd_upper |= E1000_TXD_POPTS_IXSM << 8; | |
3908 | } | |
3909 | ||
3910 | if (tx_flags & E1000_TX_FLAGS_CSUM) { | |
3911 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; | |
3912 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; | |
3913 | } | |
3914 | ||
3915 | if (tx_flags & E1000_TX_FLAGS_VLAN) { | |
3916 | txd_lower |= E1000_TXD_CMD_VLE; | |
3917 | txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); | |
3918 | } | |
3919 | ||
3920 | i = tx_ring->next_to_use; | |
3921 | ||
3922 | while (count--) { | |
3923 | buffer_info = &tx_ring->buffer_info[i]; | |
3924 | tx_desc = E1000_TX_DESC(*tx_ring, i); | |
3925 | tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | |
3926 | tx_desc->lower.data = | |
3927 | cpu_to_le32(txd_lower | buffer_info->length); | |
3928 | tx_desc->upper.data = cpu_to_le32(txd_upper); | |
3929 | ||
3930 | i++; | |
3931 | if (i == tx_ring->count) | |
3932 | i = 0; | |
3933 | } | |
3934 | ||
3935 | tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); | |
3936 | ||
ad68076e BA |
3937 | /* |
3938 | * Force memory writes to complete before letting h/w | |
bc7f75fa AK |
3939 | * know there are new descriptors to fetch. (Only |
3940 | * applicable for weak-ordered memory model archs, | |
ad68076e BA |
3941 | * such as IA-64). |
3942 | */ | |
bc7f75fa AK |
3943 | wmb(); |
3944 | ||
3945 | tx_ring->next_to_use = i; | |
3946 | writel(i, adapter->hw.hw_addr + tx_ring->tail); | |
ad68076e BA |
3947 | /* |
3948 | * we need this if more than one processor can write to our tail | |
3949 | * at a time, it synchronizes IO on IA64/Altix systems | |
3950 | */ | |
bc7f75fa AK |
3951 | mmiowb(); |
3952 | } | |
3953 | ||
3954 | #define MINIMUM_DHCP_PACKET_SIZE 282 | |
3955 | static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, | |
3956 | struct sk_buff *skb) | |
3957 | { | |
3958 | struct e1000_hw *hw = &adapter->hw; | |
3959 | u16 length, offset; | |
3960 | ||
3961 | if (vlan_tx_tag_present(skb)) { | |
3962 | if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) | |
3963 | && (adapter->hw.mng_cookie.status & | |
3964 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) | |
3965 | return 0; | |
3966 | } | |
3967 | ||
3968 | if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) | |
3969 | return 0; | |
3970 | ||
3971 | if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) | |
3972 | return 0; | |
3973 | ||
3974 | { | |
3975 | const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); | |
3976 | struct udphdr *udp; | |
3977 | ||
3978 | if (ip->protocol != IPPROTO_UDP) | |
3979 | return 0; | |
3980 | ||
3981 | udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); | |
3982 | if (ntohs(udp->dest) != 67) | |
3983 | return 0; | |
3984 | ||
3985 | offset = (u8 *)udp + 8 - skb->data; | |
3986 | length = skb->len - offset; | |
3987 | return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); | |
3988 | } | |
3989 | ||
3990 | return 0; | |
3991 | } | |
3992 | ||
3993 | static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) | |
3994 | { | |
3995 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
3996 | ||
3997 | netif_stop_queue(netdev); | |
ad68076e BA |
3998 | /* |
3999 | * Herbert's original patch had: | |
bc7f75fa | 4000 | * smp_mb__after_netif_stop_queue(); |
ad68076e BA |
4001 | * but since that doesn't exist yet, just open code it. |
4002 | */ | |
bc7f75fa AK |
4003 | smp_mb(); |
4004 | ||
ad68076e BA |
4005 | /* |
4006 | * We need to check again in a case another CPU has just | |
4007 | * made room available. | |
4008 | */ | |
bc7f75fa AK |
4009 | if (e1000_desc_unused(adapter->tx_ring) < size) |
4010 | return -EBUSY; | |
4011 | ||
4012 | /* A reprieve! */ | |
4013 | netif_start_queue(netdev); | |
4014 | ++adapter->restart_queue; | |
4015 | return 0; | |
4016 | } | |
4017 | ||
4018 | static int e1000_maybe_stop_tx(struct net_device *netdev, int size) | |
4019 | { | |
4020 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4021 | ||
4022 | if (e1000_desc_unused(adapter->tx_ring) >= size) | |
4023 | return 0; | |
4024 | return __e1000_maybe_stop_tx(netdev, size); | |
4025 | } | |
4026 | ||
4027 | #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) | |
4028 | static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |
4029 | { | |
4030 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4031 | struct e1000_ring *tx_ring = adapter->tx_ring; | |
4032 | unsigned int first; | |
4033 | unsigned int max_per_txd = E1000_MAX_PER_TXD; | |
4034 | unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; | |
4035 | unsigned int tx_flags = 0; | |
4e6c709c | 4036 | unsigned int len = skb->len - skb->data_len; |
bc7f75fa | 4037 | unsigned long irq_flags; |
4e6c709c AK |
4038 | unsigned int nr_frags; |
4039 | unsigned int mss; | |
bc7f75fa AK |
4040 | int count = 0; |
4041 | int tso; | |
4042 | unsigned int f; | |
bc7f75fa AK |
4043 | |
4044 | if (test_bit(__E1000_DOWN, &adapter->state)) { | |
4045 | dev_kfree_skb_any(skb); | |
4046 | return NETDEV_TX_OK; | |
4047 | } | |
4048 | ||
4049 | if (skb->len <= 0) { | |
4050 | dev_kfree_skb_any(skb); | |
4051 | return NETDEV_TX_OK; | |
4052 | } | |
4053 | ||
4054 | mss = skb_shinfo(skb)->gso_size; | |
ad68076e BA |
4055 | /* |
4056 | * The controller does a simple calculation to | |
bc7f75fa AK |
4057 | * make sure there is enough room in the FIFO before |
4058 | * initiating the DMA for each buffer. The calc is: | |
4059 | * 4 = ceil(buffer len/mss). To make sure we don't | |
4060 | * overrun the FIFO, adjust the max buffer len if mss | |
ad68076e BA |
4061 | * drops. |
4062 | */ | |
bc7f75fa AK |
4063 | if (mss) { |
4064 | u8 hdr_len; | |
4065 | max_per_txd = min(mss << 2, max_per_txd); | |
4066 | max_txd_pwr = fls(max_per_txd) - 1; | |
4067 | ||
ad68076e BA |
4068 | /* |
4069 | * TSO Workaround for 82571/2/3 Controllers -- if skb->data | |
4070 | * points to just header, pull a few bytes of payload from | |
4071 | * frags into skb->data | |
4072 | */ | |
bc7f75fa | 4073 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
ad68076e BA |
4074 | /* |
4075 | * we do this workaround for ES2LAN, but it is un-necessary, | |
4076 | * avoiding it could save a lot of cycles | |
4077 | */ | |
4e6c709c | 4078 | if (skb->data_len && (hdr_len == len)) { |
bc7f75fa AK |
4079 | unsigned int pull_size; |
4080 | ||
4081 | pull_size = min((unsigned int)4, skb->data_len); | |
4082 | if (!__pskb_pull_tail(skb, pull_size)) { | |
44defeb3 | 4083 | e_err("__pskb_pull_tail failed.\n"); |
bc7f75fa AK |
4084 | dev_kfree_skb_any(skb); |
4085 | return NETDEV_TX_OK; | |
4086 | } | |
4087 | len = skb->len - skb->data_len; | |
4088 | } | |
4089 | } | |
4090 | ||
4091 | /* reserve a descriptor for the offload context */ | |
4092 | if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) | |
4093 | count++; | |
4094 | count++; | |
4095 | ||
4096 | count += TXD_USE_COUNT(len, max_txd_pwr); | |
4097 | ||
4098 | nr_frags = skb_shinfo(skb)->nr_frags; | |
4099 | for (f = 0; f < nr_frags; f++) | |
4100 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, | |
4101 | max_txd_pwr); | |
4102 | ||
4103 | if (adapter->hw.mac.tx_pkt_filtering) | |
4104 | e1000_transfer_dhcp_info(adapter, skb); | |
4105 | ||
4106 | if (!spin_trylock_irqsave(&adapter->tx_queue_lock, irq_flags)) | |
4107 | /* Collision - tell upper layer to requeue */ | |
4108 | return NETDEV_TX_LOCKED; | |
4109 | ||
ad68076e BA |
4110 | /* |
4111 | * need: count + 2 desc gap to keep tail from touching | |
4112 | * head, otherwise try next time | |
4113 | */ | |
bc7f75fa AK |
4114 | if (e1000_maybe_stop_tx(netdev, count + 2)) { |
4115 | spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); | |
4116 | return NETDEV_TX_BUSY; | |
4117 | } | |
4118 | ||
4119 | if (adapter->vlgrp && vlan_tx_tag_present(skb)) { | |
4120 | tx_flags |= E1000_TX_FLAGS_VLAN; | |
4121 | tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); | |
4122 | } | |
4123 | ||
4124 | first = tx_ring->next_to_use; | |
4125 | ||
4126 | tso = e1000_tso(adapter, skb); | |
4127 | if (tso < 0) { | |
4128 | dev_kfree_skb_any(skb); | |
4129 | spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); | |
4130 | return NETDEV_TX_OK; | |
4131 | } | |
4132 | ||
4133 | if (tso) | |
4134 | tx_flags |= E1000_TX_FLAGS_TSO; | |
4135 | else if (e1000_tx_csum(adapter, skb)) | |
4136 | tx_flags |= E1000_TX_FLAGS_CSUM; | |
4137 | ||
ad68076e BA |
4138 | /* |
4139 | * Old method was to assume IPv4 packet by default if TSO was enabled. | |
bc7f75fa | 4140 | * 82571 hardware supports TSO capabilities for IPv6 as well... |
ad68076e BA |
4141 | * no longer assume, we must. |
4142 | */ | |
bc7f75fa AK |
4143 | if (skb->protocol == htons(ETH_P_IP)) |
4144 | tx_flags |= E1000_TX_FLAGS_IPV4; | |
4145 | ||
4146 | count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss); | |
4147 | if (count < 0) { | |
4148 | /* handle pci_map_single() error in e1000_tx_map */ | |
4149 | dev_kfree_skb_any(skb); | |
4150 | spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); | |
7b5dfe1a | 4151 | return NETDEV_TX_OK; |
bc7f75fa AK |
4152 | } |
4153 | ||
4154 | e1000_tx_queue(adapter, tx_flags, count); | |
4155 | ||
4156 | netdev->trans_start = jiffies; | |
4157 | ||
4158 | /* Make sure there is space in the ring for the next send. */ | |
4159 | e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); | |
4160 | ||
4161 | spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); | |
4162 | return NETDEV_TX_OK; | |
4163 | } | |
4164 | ||
4165 | /** | |
4166 | * e1000_tx_timeout - Respond to a Tx Hang | |
4167 | * @netdev: network interface device structure | |
4168 | **/ | |
4169 | static void e1000_tx_timeout(struct net_device *netdev) | |
4170 | { | |
4171 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4172 | ||
4173 | /* Do the reset outside of interrupt context */ | |
4174 | adapter->tx_timeout_count++; | |
4175 | schedule_work(&adapter->reset_task); | |
4176 | } | |
4177 | ||
4178 | static void e1000_reset_task(struct work_struct *work) | |
4179 | { | |
4180 | struct e1000_adapter *adapter; | |
4181 | adapter = container_of(work, struct e1000_adapter, reset_task); | |
4182 | ||
4183 | e1000e_reinit_locked(adapter); | |
4184 | } | |
4185 | ||
4186 | /** | |
4187 | * e1000_get_stats - Get System Network Statistics | |
4188 | * @netdev: network interface device structure | |
4189 | * | |
4190 | * Returns the address of the device statistics structure. | |
4191 | * The statistics are actually updated from the timer callback. | |
4192 | **/ | |
4193 | static struct net_device_stats *e1000_get_stats(struct net_device *netdev) | |
4194 | { | |
4195 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4196 | ||
4197 | /* only return the current stats */ | |
4198 | return &adapter->net_stats; | |
4199 | } | |
4200 | ||
4201 | /** | |
4202 | * e1000_change_mtu - Change the Maximum Transfer Unit | |
4203 | * @netdev: network interface device structure | |
4204 | * @new_mtu: new value for maximum frame size | |
4205 | * | |
4206 | * Returns 0 on success, negative on failure | |
4207 | **/ | |
4208 | static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |
4209 | { | |
4210 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4211 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; | |
4212 | ||
d53f706d | 4213 | if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || |
bc7f75fa | 4214 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
44defeb3 | 4215 | e_err("Invalid MTU setting\n"); |
bc7f75fa AK |
4216 | return -EINVAL; |
4217 | } | |
4218 | ||
4219 | /* Jumbo frame size limits */ | |
4220 | if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { | |
4221 | if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { | |
44defeb3 | 4222 | e_err("Jumbo Frames not supported.\n"); |
bc7f75fa AK |
4223 | return -EINVAL; |
4224 | } | |
4225 | if (adapter->hw.phy.type == e1000_phy_ife) { | |
44defeb3 | 4226 | e_err("Jumbo Frames not supported.\n"); |
bc7f75fa AK |
4227 | return -EINVAL; |
4228 | } | |
4229 | } | |
4230 | ||
4231 | #define MAX_STD_JUMBO_FRAME_SIZE 9234 | |
4232 | if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { | |
44defeb3 | 4233 | e_err("MTU > 9216 not supported.\n"); |
bc7f75fa AK |
4234 | return -EINVAL; |
4235 | } | |
4236 | ||
4237 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | |
4238 | msleep(1); | |
4239 | /* e1000e_down has a dependency on max_frame_size */ | |
318a94d6 | 4240 | adapter->max_frame_size = max_frame; |
bc7f75fa AK |
4241 | if (netif_running(netdev)) |
4242 | e1000e_down(adapter); | |
4243 | ||
ad68076e BA |
4244 | /* |
4245 | * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | |
bc7f75fa AK |
4246 | * means we reserve 2 more, this pushes us to allocate from the next |
4247 | * larger slab size. | |
ad68076e | 4248 | * i.e. RXBUFFER_2048 --> size-4096 slab |
97ac8cae BA |
4249 | * However with the new *_jumbo_rx* routines, jumbo receives will use |
4250 | * fragmented skbs | |
ad68076e | 4251 | */ |
bc7f75fa AK |
4252 | |
4253 | if (max_frame <= 256) | |
4254 | adapter->rx_buffer_len = 256; | |
4255 | else if (max_frame <= 512) | |
4256 | adapter->rx_buffer_len = 512; | |
4257 | else if (max_frame <= 1024) | |
4258 | adapter->rx_buffer_len = 1024; | |
4259 | else if (max_frame <= 2048) | |
4260 | adapter->rx_buffer_len = 2048; | |
4261 | else | |
4262 | adapter->rx_buffer_len = 4096; | |
4263 | ||
4264 | /* adjust allocation if LPE protects us, and we aren't using SBP */ | |
4265 | if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || | |
4266 | (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) | |
4267 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN | |
ad68076e | 4268 | + ETH_FCS_LEN; |
bc7f75fa | 4269 | |
44defeb3 | 4270 | e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); |
bc7f75fa AK |
4271 | netdev->mtu = new_mtu; |
4272 | ||
4273 | if (netif_running(netdev)) | |
4274 | e1000e_up(adapter); | |
4275 | else | |
4276 | e1000e_reset(adapter); | |
4277 | ||
4278 | clear_bit(__E1000_RESETTING, &adapter->state); | |
4279 | ||
4280 | return 0; | |
4281 | } | |
4282 | ||
4283 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | |
4284 | int cmd) | |
4285 | { | |
4286 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4287 | struct mii_ioctl_data *data = if_mii(ifr); | |
bc7f75fa | 4288 | |
318a94d6 | 4289 | if (adapter->hw.phy.media_type != e1000_media_type_copper) |
bc7f75fa AK |
4290 | return -EOPNOTSUPP; |
4291 | ||
4292 | switch (cmd) { | |
4293 | case SIOCGMIIPHY: | |
4294 | data->phy_id = adapter->hw.phy.addr; | |
4295 | break; | |
4296 | case SIOCGMIIREG: | |
4297 | if (!capable(CAP_NET_ADMIN)) | |
4298 | return -EPERM; | |
7c25769f BA |
4299 | switch (data->reg_num & 0x1F) { |
4300 | case MII_BMCR: | |
4301 | data->val_out = adapter->phy_regs.bmcr; | |
4302 | break; | |
4303 | case MII_BMSR: | |
4304 | data->val_out = adapter->phy_regs.bmsr; | |
4305 | break; | |
4306 | case MII_PHYSID1: | |
4307 | data->val_out = (adapter->hw.phy.id >> 16); | |
4308 | break; | |
4309 | case MII_PHYSID2: | |
4310 | data->val_out = (adapter->hw.phy.id & 0xFFFF); | |
4311 | break; | |
4312 | case MII_ADVERTISE: | |
4313 | data->val_out = adapter->phy_regs.advertise; | |
4314 | break; | |
4315 | case MII_LPA: | |
4316 | data->val_out = adapter->phy_regs.lpa; | |
4317 | break; | |
4318 | case MII_EXPANSION: | |
4319 | data->val_out = adapter->phy_regs.expansion; | |
4320 | break; | |
4321 | case MII_CTRL1000: | |
4322 | data->val_out = adapter->phy_regs.ctrl1000; | |
4323 | break; | |
4324 | case MII_STAT1000: | |
4325 | data->val_out = adapter->phy_regs.stat1000; | |
4326 | break; | |
4327 | case MII_ESTATUS: | |
4328 | data->val_out = adapter->phy_regs.estatus; | |
4329 | break; | |
4330 | default: | |
bc7f75fa AK |
4331 | return -EIO; |
4332 | } | |
bc7f75fa AK |
4333 | break; |
4334 | case SIOCSMIIREG: | |
4335 | default: | |
4336 | return -EOPNOTSUPP; | |
4337 | } | |
4338 | return 0; | |
4339 | } | |
4340 | ||
4341 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |
4342 | { | |
4343 | switch (cmd) { | |
4344 | case SIOCGMIIPHY: | |
4345 | case SIOCGMIIREG: | |
4346 | case SIOCSMIIREG: | |
4347 | return e1000_mii_ioctl(netdev, ifr, cmd); | |
4348 | default: | |
4349 | return -EOPNOTSUPP; | |
4350 | } | |
4351 | } | |
4352 | ||
4353 | static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |
4354 | { | |
4355 | struct net_device *netdev = pci_get_drvdata(pdev); | |
4356 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4357 | struct e1000_hw *hw = &adapter->hw; | |
4358 | u32 ctrl, ctrl_ext, rctl, status; | |
4359 | u32 wufc = adapter->wol; | |
4360 | int retval = 0; | |
4361 | ||
4362 | netif_device_detach(netdev); | |
4363 | ||
4364 | if (netif_running(netdev)) { | |
4365 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); | |
4366 | e1000e_down(adapter); | |
4367 | e1000_free_irq(adapter); | |
4368 | } | |
4662e82b | 4369 | e1000e_reset_interrupt_capability(adapter); |
bc7f75fa AK |
4370 | |
4371 | retval = pci_save_state(pdev); | |
4372 | if (retval) | |
4373 | return retval; | |
4374 | ||
4375 | status = er32(STATUS); | |
4376 | if (status & E1000_STATUS_LU) | |
4377 | wufc &= ~E1000_WUFC_LNKC; | |
4378 | ||
4379 | if (wufc) { | |
4380 | e1000_setup_rctl(adapter); | |
4381 | e1000_set_multi(netdev); | |
4382 | ||
4383 | /* turn on all-multi mode if wake on multicast is enabled */ | |
4384 | if (wufc & E1000_WUFC_MC) { | |
4385 | rctl = er32(RCTL); | |
4386 | rctl |= E1000_RCTL_MPE; | |
4387 | ew32(RCTL, rctl); | |
4388 | } | |
4389 | ||
4390 | ctrl = er32(CTRL); | |
4391 | /* advertise wake from D3Cold */ | |
4392 | #define E1000_CTRL_ADVD3WUC 0x00100000 | |
4393 | /* phy power management enable */ | |
4394 | #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 | |
4395 | ctrl |= E1000_CTRL_ADVD3WUC | | |
4396 | E1000_CTRL_EN_PHY_PWR_MGMT; | |
4397 | ew32(CTRL, ctrl); | |
4398 | ||
318a94d6 JK |
4399 | if (adapter->hw.phy.media_type == e1000_media_type_fiber || |
4400 | adapter->hw.phy.media_type == | |
4401 | e1000_media_type_internal_serdes) { | |
bc7f75fa AK |
4402 | /* keep the laser running in D3 */ |
4403 | ctrl_ext = er32(CTRL_EXT); | |
4404 | ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; | |
4405 | ew32(CTRL_EXT, ctrl_ext); | |
4406 | } | |
4407 | ||
97ac8cae BA |
4408 | if (adapter->flags & FLAG_IS_ICH) |
4409 | e1000e_disable_gig_wol_ich8lan(&adapter->hw); | |
4410 | ||
bc7f75fa AK |
4411 | /* Allow time for pending master requests to run */ |
4412 | e1000e_disable_pcie_master(&adapter->hw); | |
4413 | ||
4414 | ew32(WUC, E1000_WUC_PME_EN); | |
4415 | ew32(WUFC, wufc); | |
4416 | pci_enable_wake(pdev, PCI_D3hot, 1); | |
4417 | pci_enable_wake(pdev, PCI_D3cold, 1); | |
4418 | } else { | |
4419 | ew32(WUC, 0); | |
4420 | ew32(WUFC, 0); | |
4421 | pci_enable_wake(pdev, PCI_D3hot, 0); | |
4422 | pci_enable_wake(pdev, PCI_D3cold, 0); | |
4423 | } | |
4424 | ||
bc7f75fa AK |
4425 | /* make sure adapter isn't asleep if manageability is enabled */ |
4426 | if (adapter->flags & FLAG_MNG_PT_ENABLED) { | |
4427 | pci_enable_wake(pdev, PCI_D3hot, 1); | |
4428 | pci_enable_wake(pdev, PCI_D3cold, 1); | |
4429 | } | |
4430 | ||
4431 | if (adapter->hw.phy.type == e1000_phy_igp_3) | |
4432 | e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); | |
4433 | ||
ad68076e BA |
4434 | /* |
4435 | * Release control of h/w to f/w. If f/w is AMT enabled, this | |
4436 | * would have already happened in close and is redundant. | |
4437 | */ | |
bc7f75fa AK |
4438 | e1000_release_hw_control(adapter); |
4439 | ||
4440 | pci_disable_device(pdev); | |
4441 | ||
4442 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
4443 | ||
4444 | return 0; | |
4445 | } | |
4446 | ||
1eae4eb2 AK |
4447 | static void e1000e_disable_l1aspm(struct pci_dev *pdev) |
4448 | { | |
4449 | int pos; | |
1eae4eb2 AK |
4450 | u16 val; |
4451 | ||
4452 | /* | |
4453 | * 82573 workaround - disable L1 ASPM on mobile chipsets | |
4454 | * | |
4455 | * L1 ASPM on various mobile (ich7) chipsets do not behave properly | |
4456 | * resulting in lost data or garbage information on the pci-e link | |
4457 | * level. This could result in (false) bad EEPROM checksum errors, | |
4458 | * long ping times (up to 2s) or even a system freeze/hang. | |
4459 | * | |
4460 | * Unfortunately this feature saves about 1W power consumption when | |
4461 | * active. | |
4462 | */ | |
4463 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | |
1eae4eb2 AK |
4464 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val); |
4465 | if (val & 0x2) { | |
4466 | dev_warn(&pdev->dev, "Disabling L1 ASPM\n"); | |
4467 | val &= ~0x2; | |
4468 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, val); | |
4469 | } | |
4470 | } | |
4471 | ||
bc7f75fa AK |
4472 | #ifdef CONFIG_PM |
4473 | static int e1000_resume(struct pci_dev *pdev) | |
4474 | { | |
4475 | struct net_device *netdev = pci_get_drvdata(pdev); | |
4476 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4477 | struct e1000_hw *hw = &adapter->hw; | |
4478 | u32 err; | |
4479 | ||
4480 | pci_set_power_state(pdev, PCI_D0); | |
4481 | pci_restore_state(pdev); | |
1eae4eb2 | 4482 | e1000e_disable_l1aspm(pdev); |
6e4f6f6b | 4483 | |
f0f422e5 | 4484 | err = pci_enable_device_mem(pdev); |
bc7f75fa AK |
4485 | if (err) { |
4486 | dev_err(&pdev->dev, | |
4487 | "Cannot enable PCI device from suspend\n"); | |
4488 | return err; | |
4489 | } | |
4490 | ||
4491 | pci_set_master(pdev); | |
4492 | ||
4493 | pci_enable_wake(pdev, PCI_D3hot, 0); | |
4494 | pci_enable_wake(pdev, PCI_D3cold, 0); | |
4495 | ||
4662e82b | 4496 | e1000e_set_interrupt_capability(adapter); |
bc7f75fa AK |
4497 | if (netif_running(netdev)) { |
4498 | err = e1000_request_irq(adapter); | |
4499 | if (err) | |
4500 | return err; | |
4501 | } | |
4502 | ||
4503 | e1000e_power_up_phy(adapter); | |
4504 | e1000e_reset(adapter); | |
4505 | ew32(WUS, ~0); | |
4506 | ||
4507 | e1000_init_manageability(adapter); | |
4508 | ||
4509 | if (netif_running(netdev)) | |
4510 | e1000e_up(adapter); | |
4511 | ||
4512 | netif_device_attach(netdev); | |
4513 | ||
ad68076e BA |
4514 | /* |
4515 | * If the controller has AMT, do not set DRV_LOAD until the interface | |
bc7f75fa | 4516 | * is up. For all other cases, let the f/w know that the h/w is now |
ad68076e BA |
4517 | * under the control of the driver. |
4518 | */ | |
c43bc57e | 4519 | if (!(adapter->flags & FLAG_HAS_AMT)) |
bc7f75fa AK |
4520 | e1000_get_hw_control(adapter); |
4521 | ||
4522 | return 0; | |
4523 | } | |
4524 | #endif | |
4525 | ||
4526 | static void e1000_shutdown(struct pci_dev *pdev) | |
4527 | { | |
4528 | e1000_suspend(pdev, PMSG_SUSPEND); | |
4529 | } | |
4530 | ||
4531 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
4532 | /* | |
4533 | * Polling 'interrupt' - used by things like netconsole to send skbs | |
4534 | * without having to re-enable interrupts. It's not called while | |
4535 | * the interrupt routine is executing. | |
4536 | */ | |
4537 | static void e1000_netpoll(struct net_device *netdev) | |
4538 | { | |
4539 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4540 | ||
4541 | disable_irq(adapter->pdev->irq); | |
4542 | e1000_intr(adapter->pdev->irq, netdev); | |
4543 | ||
bc7f75fa AK |
4544 | enable_irq(adapter->pdev->irq); |
4545 | } | |
4546 | #endif | |
4547 | ||
4548 | /** | |
4549 | * e1000_io_error_detected - called when PCI error is detected | |
4550 | * @pdev: Pointer to PCI device | |
4551 | * @state: The current pci connection state | |
4552 | * | |
4553 | * This function is called after a PCI bus error affecting | |
4554 | * this device has been detected. | |
4555 | */ | |
4556 | static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, | |
4557 | pci_channel_state_t state) | |
4558 | { | |
4559 | struct net_device *netdev = pci_get_drvdata(pdev); | |
4560 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4561 | ||
4562 | netif_device_detach(netdev); | |
4563 | ||
4564 | if (netif_running(netdev)) | |
4565 | e1000e_down(adapter); | |
4566 | pci_disable_device(pdev); | |
4567 | ||
4568 | /* Request a slot slot reset. */ | |
4569 | return PCI_ERS_RESULT_NEED_RESET; | |
4570 | } | |
4571 | ||
4572 | /** | |
4573 | * e1000_io_slot_reset - called after the pci bus has been reset. | |
4574 | * @pdev: Pointer to PCI device | |
4575 | * | |
4576 | * Restart the card from scratch, as if from a cold-boot. Implementation | |
4577 | * resembles the first-half of the e1000_resume routine. | |
4578 | */ | |
4579 | static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) | |
4580 | { | |
4581 | struct net_device *netdev = pci_get_drvdata(pdev); | |
4582 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4583 | struct e1000_hw *hw = &adapter->hw; | |
6e4f6f6b | 4584 | int err; |
bc7f75fa | 4585 | |
1eae4eb2 | 4586 | e1000e_disable_l1aspm(pdev); |
f0f422e5 | 4587 | err = pci_enable_device_mem(pdev); |
6e4f6f6b | 4588 | if (err) { |
bc7f75fa AK |
4589 | dev_err(&pdev->dev, |
4590 | "Cannot re-enable PCI device after reset.\n"); | |
4591 | return PCI_ERS_RESULT_DISCONNECT; | |
4592 | } | |
4593 | pci_set_master(pdev); | |
aad32739 | 4594 | pci_restore_state(pdev); |
bc7f75fa AK |
4595 | |
4596 | pci_enable_wake(pdev, PCI_D3hot, 0); | |
4597 | pci_enable_wake(pdev, PCI_D3cold, 0); | |
4598 | ||
4599 | e1000e_reset(adapter); | |
4600 | ew32(WUS, ~0); | |
4601 | ||
4602 | return PCI_ERS_RESULT_RECOVERED; | |
4603 | } | |
4604 | ||
4605 | /** | |
4606 | * e1000_io_resume - called when traffic can start flowing again. | |
4607 | * @pdev: Pointer to PCI device | |
4608 | * | |
4609 | * This callback is called when the error recovery driver tells us that | |
4610 | * its OK to resume normal operation. Implementation resembles the | |
4611 | * second-half of the e1000_resume routine. | |
4612 | */ | |
4613 | static void e1000_io_resume(struct pci_dev *pdev) | |
4614 | { | |
4615 | struct net_device *netdev = pci_get_drvdata(pdev); | |
4616 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4617 | ||
4618 | e1000_init_manageability(adapter); | |
4619 | ||
4620 | if (netif_running(netdev)) { | |
4621 | if (e1000e_up(adapter)) { | |
4622 | dev_err(&pdev->dev, | |
4623 | "can't bring device back up after reset\n"); | |
4624 | return; | |
4625 | } | |
4626 | } | |
4627 | ||
4628 | netif_device_attach(netdev); | |
4629 | ||
ad68076e BA |
4630 | /* |
4631 | * If the controller has AMT, do not set DRV_LOAD until the interface | |
bc7f75fa | 4632 | * is up. For all other cases, let the f/w know that the h/w is now |
ad68076e BA |
4633 | * under the control of the driver. |
4634 | */ | |
c43bc57e | 4635 | if (!(adapter->flags & FLAG_HAS_AMT)) |
bc7f75fa AK |
4636 | e1000_get_hw_control(adapter); |
4637 | ||
4638 | } | |
4639 | ||
4640 | static void e1000_print_device_info(struct e1000_adapter *adapter) | |
4641 | { | |
4642 | struct e1000_hw *hw = &adapter->hw; | |
4643 | struct net_device *netdev = adapter->netdev; | |
69e3fd8c | 4644 | u32 pba_num; |
bc7f75fa AK |
4645 | |
4646 | /* print bus type/speed/width info */ | |
44defeb3 JK |
4647 | e_info("(PCI Express:2.5GB/s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n", |
4648 | /* bus width */ | |
4649 | ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : | |
4650 | "Width x1"), | |
4651 | /* MAC address */ | |
4652 | netdev->dev_addr[0], netdev->dev_addr[1], | |
4653 | netdev->dev_addr[2], netdev->dev_addr[3], | |
4654 | netdev->dev_addr[4], netdev->dev_addr[5]); | |
4655 | e_info("Intel(R) PRO/%s Network Connection\n", | |
4656 | (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); | |
69e3fd8c | 4657 | e1000e_read_pba_num(hw, &pba_num); |
44defeb3 JK |
4658 | e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n", |
4659 | hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff)); | |
bc7f75fa AK |
4660 | } |
4661 | ||
10aa4c04 AK |
4662 | static void e1000_eeprom_checks(struct e1000_adapter *adapter) |
4663 | { | |
4664 | struct e1000_hw *hw = &adapter->hw; | |
4665 | int ret_val; | |
4666 | u16 buf = 0; | |
4667 | ||
4668 | if (hw->mac.type != e1000_82573) | |
4669 | return; | |
4670 | ||
4671 | ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); | |
4672 | if (!(le16_to_cpu(buf) & (1 << 0))) { | |
4673 | /* Deep Smart Power Down (DSPD) */ | |
4674 | e_warn("Warning: detected DSPD enabled in EEPROM\n"); | |
4675 | } | |
4676 | ||
4677 | ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf); | |
4678 | if (le16_to_cpu(buf) & (3 << 2)) { | |
4679 | /* ASPM enable */ | |
4680 | e_warn("Warning: detected ASPM enabled in EEPROM\n"); | |
4681 | } | |
4682 | } | |
4683 | ||
bc7f75fa AK |
4684 | /** |
4685 | * e1000_probe - Device Initialization Routine | |
4686 | * @pdev: PCI device information struct | |
4687 | * @ent: entry in e1000_pci_tbl | |
4688 | * | |
4689 | * Returns 0 on success, negative on failure | |
4690 | * | |
4691 | * e1000_probe initializes an adapter identified by a pci_dev structure. | |
4692 | * The OS initialization, configuring of the adapter private structure, | |
4693 | * and a hardware reset occur. | |
4694 | **/ | |
4695 | static int __devinit e1000_probe(struct pci_dev *pdev, | |
4696 | const struct pci_device_id *ent) | |
4697 | { | |
4698 | struct net_device *netdev; | |
4699 | struct e1000_adapter *adapter; | |
4700 | struct e1000_hw *hw; | |
4701 | const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; | |
f47e81fc BB |
4702 | resource_size_t mmio_start, mmio_len; |
4703 | resource_size_t flash_start, flash_len; | |
bc7f75fa AK |
4704 | |
4705 | static int cards_found; | |
4706 | int i, err, pci_using_dac; | |
4707 | u16 eeprom_data = 0; | |
4708 | u16 eeprom_apme_mask = E1000_EEPROM_APME; | |
4709 | ||
1eae4eb2 | 4710 | e1000e_disable_l1aspm(pdev); |
6e4f6f6b | 4711 | |
f0f422e5 | 4712 | err = pci_enable_device_mem(pdev); |
bc7f75fa AK |
4713 | if (err) |
4714 | return err; | |
4715 | ||
4716 | pci_using_dac = 0; | |
4717 | err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | |
4718 | if (!err) { | |
4719 | err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | |
4720 | if (!err) | |
4721 | pci_using_dac = 1; | |
4722 | } else { | |
4723 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | |
4724 | if (err) { | |
4725 | err = pci_set_consistent_dma_mask(pdev, | |
4726 | DMA_32BIT_MASK); | |
4727 | if (err) { | |
4728 | dev_err(&pdev->dev, "No usable DMA " | |
4729 | "configuration, aborting\n"); | |
4730 | goto err_dma; | |
4731 | } | |
4732 | } | |
4733 | } | |
4734 | ||
f0f422e5 BA |
4735 | err = pci_request_selected_regions(pdev, |
4736 | pci_select_bars(pdev, IORESOURCE_MEM), | |
4737 | e1000e_driver_name); | |
bc7f75fa AK |
4738 | if (err) |
4739 | goto err_pci_reg; | |
4740 | ||
4741 | pci_set_master(pdev); | |
aad32739 | 4742 | pci_save_state(pdev); |
bc7f75fa AK |
4743 | |
4744 | err = -ENOMEM; | |
4745 | netdev = alloc_etherdev(sizeof(struct e1000_adapter)); | |
4746 | if (!netdev) | |
4747 | goto err_alloc_etherdev; | |
4748 | ||
bc7f75fa AK |
4749 | SET_NETDEV_DEV(netdev, &pdev->dev); |
4750 | ||
4751 | pci_set_drvdata(pdev, netdev); | |
4752 | adapter = netdev_priv(netdev); | |
4753 | hw = &adapter->hw; | |
4754 | adapter->netdev = netdev; | |
4755 | adapter->pdev = pdev; | |
4756 | adapter->ei = ei; | |
4757 | adapter->pba = ei->pba; | |
4758 | adapter->flags = ei->flags; | |
4759 | adapter->hw.adapter = adapter; | |
4760 | adapter->hw.mac.type = ei->mac; | |
4761 | adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; | |
4762 | ||
4763 | mmio_start = pci_resource_start(pdev, 0); | |
4764 | mmio_len = pci_resource_len(pdev, 0); | |
4765 | ||
4766 | err = -EIO; | |
4767 | adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); | |
4768 | if (!adapter->hw.hw_addr) | |
4769 | goto err_ioremap; | |
4770 | ||
4771 | if ((adapter->flags & FLAG_HAS_FLASH) && | |
4772 | (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { | |
4773 | flash_start = pci_resource_start(pdev, 1); | |
4774 | flash_len = pci_resource_len(pdev, 1); | |
4775 | adapter->hw.flash_address = ioremap(flash_start, flash_len); | |
4776 | if (!adapter->hw.flash_address) | |
4777 | goto err_flashmap; | |
4778 | } | |
4779 | ||
4780 | /* construct the net_device struct */ | |
4781 | netdev->open = &e1000_open; | |
4782 | netdev->stop = &e1000_close; | |
4783 | netdev->hard_start_xmit = &e1000_xmit_frame; | |
4784 | netdev->get_stats = &e1000_get_stats; | |
4785 | netdev->set_multicast_list = &e1000_set_multi; | |
4786 | netdev->set_mac_address = &e1000_set_mac; | |
4787 | netdev->change_mtu = &e1000_change_mtu; | |
4788 | netdev->do_ioctl = &e1000_ioctl; | |
4789 | e1000e_set_ethtool_ops(netdev); | |
4790 | netdev->tx_timeout = &e1000_tx_timeout; | |
4791 | netdev->watchdog_timeo = 5 * HZ; | |
4792 | netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); | |
4793 | netdev->vlan_rx_register = e1000_vlan_rx_register; | |
4794 | netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid; | |
4795 | netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid; | |
4796 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
4797 | netdev->poll_controller = e1000_netpoll; | |
4798 | #endif | |
4799 | strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); | |
4800 | ||
4801 | netdev->mem_start = mmio_start; | |
4802 | netdev->mem_end = mmio_start + mmio_len; | |
4803 | ||
4804 | adapter->bd_number = cards_found++; | |
4805 | ||
4662e82b BA |
4806 | e1000e_check_options(adapter); |
4807 | ||
bc7f75fa AK |
4808 | /* setup adapter struct */ |
4809 | err = e1000_sw_init(adapter); | |
4810 | if (err) | |
4811 | goto err_sw_init; | |
4812 | ||
4813 | err = -EIO; | |
4814 | ||
4815 | memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); | |
4816 | memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); | |
4817 | memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); | |
4818 | ||
69e3fd8c | 4819 | err = ei->get_variants(adapter); |
bc7f75fa AK |
4820 | if (err) |
4821 | goto err_hw_init; | |
4822 | ||
4823 | hw->mac.ops.get_bus_info(&adapter->hw); | |
4824 | ||
318a94d6 | 4825 | adapter->hw.phy.autoneg_wait_to_complete = 0; |
bc7f75fa AK |
4826 | |
4827 | /* Copper options */ | |
318a94d6 | 4828 | if (adapter->hw.phy.media_type == e1000_media_type_copper) { |
bc7f75fa AK |
4829 | adapter->hw.phy.mdix = AUTO_ALL_MODES; |
4830 | adapter->hw.phy.disable_polarity_correction = 0; | |
4831 | adapter->hw.phy.ms_type = e1000_ms_hw_default; | |
4832 | } | |
4833 | ||
4834 | if (e1000_check_reset_block(&adapter->hw)) | |
44defeb3 | 4835 | e_info("PHY reset is blocked due to SOL/IDER session.\n"); |
bc7f75fa AK |
4836 | |
4837 | netdev->features = NETIF_F_SG | | |
4838 | NETIF_F_HW_CSUM | | |
4839 | NETIF_F_HW_VLAN_TX | | |
4840 | NETIF_F_HW_VLAN_RX; | |
4841 | ||
4842 | if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) | |
4843 | netdev->features |= NETIF_F_HW_VLAN_FILTER; | |
4844 | ||
4845 | netdev->features |= NETIF_F_TSO; | |
4846 | netdev->features |= NETIF_F_TSO6; | |
4847 | ||
a5136e23 JK |
4848 | netdev->vlan_features |= NETIF_F_TSO; |
4849 | netdev->vlan_features |= NETIF_F_TSO6; | |
4850 | netdev->vlan_features |= NETIF_F_HW_CSUM; | |
4851 | netdev->vlan_features |= NETIF_F_SG; | |
4852 | ||
bc7f75fa AK |
4853 | if (pci_using_dac) |
4854 | netdev->features |= NETIF_F_HIGHDMA; | |
4855 | ||
ad68076e BA |
4856 | /* |
4857 | * We should not be using LLTX anymore, but we are still Tx faster with | |
4858 | * it. | |
4859 | */ | |
bc7f75fa AK |
4860 | netdev->features |= NETIF_F_LLTX; |
4861 | ||
4862 | if (e1000e_enable_mng_pass_thru(&adapter->hw)) | |
4863 | adapter->flags |= FLAG_MNG_PT_ENABLED; | |
4864 | ||
ad68076e BA |
4865 | /* |
4866 | * before reading the NVM, reset the controller to | |
4867 | * put the device in a known good starting state | |
4868 | */ | |
bc7f75fa AK |
4869 | adapter->hw.mac.ops.reset_hw(&adapter->hw); |
4870 | ||
4871 | /* | |
4872 | * systems with ASPM and others may see the checksum fail on the first | |
4873 | * attempt. Let's give it a few tries | |
4874 | */ | |
4875 | for (i = 0;; i++) { | |
4876 | if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) | |
4877 | break; | |
4878 | if (i == 2) { | |
44defeb3 | 4879 | e_err("The NVM Checksum Is Not Valid\n"); |
bc7f75fa AK |
4880 | err = -EIO; |
4881 | goto err_eeprom; | |
4882 | } | |
4883 | } | |
4884 | ||
10aa4c04 AK |
4885 | e1000_eeprom_checks(adapter); |
4886 | ||
bc7f75fa AK |
4887 | /* copy the MAC address out of the NVM */ |
4888 | if (e1000e_read_mac_addr(&adapter->hw)) | |
44defeb3 | 4889 | e_err("NVM Read Error while reading MAC address\n"); |
bc7f75fa AK |
4890 | |
4891 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); | |
4892 | memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); | |
4893 | ||
4894 | if (!is_valid_ether_addr(netdev->perm_addr)) { | |
44defeb3 JK |
4895 | e_err("Invalid MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n", |
4896 | netdev->perm_addr[0], netdev->perm_addr[1], | |
4897 | netdev->perm_addr[2], netdev->perm_addr[3], | |
4898 | netdev->perm_addr[4], netdev->perm_addr[5]); | |
bc7f75fa AK |
4899 | err = -EIO; |
4900 | goto err_eeprom; | |
4901 | } | |
4902 | ||
4903 | init_timer(&adapter->watchdog_timer); | |
4904 | adapter->watchdog_timer.function = &e1000_watchdog; | |
4905 | adapter->watchdog_timer.data = (unsigned long) adapter; | |
4906 | ||
4907 | init_timer(&adapter->phy_info_timer); | |
4908 | adapter->phy_info_timer.function = &e1000_update_phy_info; | |
4909 | adapter->phy_info_timer.data = (unsigned long) adapter; | |
4910 | ||
4911 | INIT_WORK(&adapter->reset_task, e1000_reset_task); | |
4912 | INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); | |
4913 | ||
bc7f75fa AK |
4914 | /* Initialize link parameters. User can change them with ethtool */ |
4915 | adapter->hw.mac.autoneg = 1; | |
309af40b | 4916 | adapter->fc_autoneg = 1; |
318a94d6 JK |
4917 | adapter->hw.fc.original_type = e1000_fc_default; |
4918 | adapter->hw.fc.type = e1000_fc_default; | |
bc7f75fa AK |
4919 | adapter->hw.phy.autoneg_advertised = 0x2f; |
4920 | ||
4921 | /* ring size defaults */ | |
4922 | adapter->rx_ring->count = 256; | |
4923 | adapter->tx_ring->count = 256; | |
4924 | ||
4925 | /* | |
4926 | * Initial Wake on LAN setting - If APM wake is enabled in | |
4927 | * the EEPROM, enable the ACPI Magic Packet filter | |
4928 | */ | |
4929 | if (adapter->flags & FLAG_APME_IN_WUC) { | |
4930 | /* APME bit in EEPROM is mapped to WUC.APME */ | |
4931 | eeprom_data = er32(WUC); | |
4932 | eeprom_apme_mask = E1000_WUC_APME; | |
4933 | } else if (adapter->flags & FLAG_APME_IN_CTRL3) { | |
4934 | if (adapter->flags & FLAG_APME_CHECK_PORT_B && | |
4935 | (adapter->hw.bus.func == 1)) | |
4936 | e1000_read_nvm(&adapter->hw, | |
4937 | NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); | |
4938 | else | |
4939 | e1000_read_nvm(&adapter->hw, | |
4940 | NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); | |
4941 | } | |
4942 | ||
4943 | /* fetch WoL from EEPROM */ | |
4944 | if (eeprom_data & eeprom_apme_mask) | |
4945 | adapter->eeprom_wol |= E1000_WUFC_MAG; | |
4946 | ||
4947 | /* | |
4948 | * now that we have the eeprom settings, apply the special cases | |
4949 | * where the eeprom may be wrong or the board simply won't support | |
4950 | * wake on lan on a particular port | |
4951 | */ | |
4952 | if (!(adapter->flags & FLAG_HAS_WOL)) | |
4953 | adapter->eeprom_wol = 0; | |
4954 | ||
4955 | /* initialize the wol settings based on the eeprom settings */ | |
4956 | adapter->wol = adapter->eeprom_wol; | |
4957 | ||
4958 | /* reset the hardware with the new settings */ | |
4959 | e1000e_reset(adapter); | |
4960 | ||
ad68076e BA |
4961 | /* |
4962 | * If the controller has AMT, do not set DRV_LOAD until the interface | |
bc7f75fa | 4963 | * is up. For all other cases, let the f/w know that the h/w is now |
ad68076e BA |
4964 | * under the control of the driver. |
4965 | */ | |
c43bc57e | 4966 | if (!(adapter->flags & FLAG_HAS_AMT)) |
bc7f75fa AK |
4967 | e1000_get_hw_control(adapter); |
4968 | ||
4969 | /* tell the stack to leave us alone until e1000_open() is called */ | |
4970 | netif_carrier_off(netdev); | |
d55b53ff | 4971 | netif_tx_stop_all_queues(netdev); |
bc7f75fa AK |
4972 | |
4973 | strcpy(netdev->name, "eth%d"); | |
4974 | err = register_netdev(netdev); | |
4975 | if (err) | |
4976 | goto err_register; | |
4977 | ||
4978 | e1000_print_device_info(adapter); | |
4979 | ||
4980 | return 0; | |
4981 | ||
4982 | err_register: | |
c43bc57e JB |
4983 | if (!(adapter->flags & FLAG_HAS_AMT)) |
4984 | e1000_release_hw_control(adapter); | |
bc7f75fa AK |
4985 | err_eeprom: |
4986 | if (!e1000_check_reset_block(&adapter->hw)) | |
4987 | e1000_phy_hw_reset(&adapter->hw); | |
c43bc57e | 4988 | err_hw_init: |
bc7f75fa | 4989 | |
bc7f75fa AK |
4990 | kfree(adapter->tx_ring); |
4991 | kfree(adapter->rx_ring); | |
4992 | err_sw_init: | |
c43bc57e JB |
4993 | if (adapter->hw.flash_address) |
4994 | iounmap(adapter->hw.flash_address); | |
4995 | err_flashmap: | |
bc7f75fa AK |
4996 | iounmap(adapter->hw.hw_addr); |
4997 | err_ioremap: | |
4998 | free_netdev(netdev); | |
4999 | err_alloc_etherdev: | |
f0f422e5 BA |
5000 | pci_release_selected_regions(pdev, |
5001 | pci_select_bars(pdev, IORESOURCE_MEM)); | |
bc7f75fa AK |
5002 | err_pci_reg: |
5003 | err_dma: | |
5004 | pci_disable_device(pdev); | |
5005 | return err; | |
5006 | } | |
5007 | ||
5008 | /** | |
5009 | * e1000_remove - Device Removal Routine | |
5010 | * @pdev: PCI device information struct | |
5011 | * | |
5012 | * e1000_remove is called by the PCI subsystem to alert the driver | |
5013 | * that it should release a PCI device. The could be caused by a | |
5014 | * Hot-Plug event, or because the driver is going to be removed from | |
5015 | * memory. | |
5016 | **/ | |
5017 | static void __devexit e1000_remove(struct pci_dev *pdev) | |
5018 | { | |
5019 | struct net_device *netdev = pci_get_drvdata(pdev); | |
5020 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
5021 | ||
ad68076e BA |
5022 | /* |
5023 | * flush_scheduled work may reschedule our watchdog task, so | |
5024 | * explicitly disable watchdog tasks from being rescheduled | |
5025 | */ | |
bc7f75fa AK |
5026 | set_bit(__E1000_DOWN, &adapter->state); |
5027 | del_timer_sync(&adapter->watchdog_timer); | |
5028 | del_timer_sync(&adapter->phy_info_timer); | |
5029 | ||
5030 | flush_scheduled_work(); | |
5031 | ||
ad68076e BA |
5032 | /* |
5033 | * Release control of h/w to f/w. If f/w is AMT enabled, this | |
5034 | * would have already happened in close and is redundant. | |
5035 | */ | |
bc7f75fa AK |
5036 | e1000_release_hw_control(adapter); |
5037 | ||
5038 | unregister_netdev(netdev); | |
5039 | ||
5040 | if (!e1000_check_reset_block(&adapter->hw)) | |
5041 | e1000_phy_hw_reset(&adapter->hw); | |
5042 | ||
4662e82b | 5043 | e1000e_reset_interrupt_capability(adapter); |
bc7f75fa AK |
5044 | kfree(adapter->tx_ring); |
5045 | kfree(adapter->rx_ring); | |
5046 | ||
5047 | iounmap(adapter->hw.hw_addr); | |
5048 | if (adapter->hw.flash_address) | |
5049 | iounmap(adapter->hw.flash_address); | |
f0f422e5 BA |
5050 | pci_release_selected_regions(pdev, |
5051 | pci_select_bars(pdev, IORESOURCE_MEM)); | |
bc7f75fa AK |
5052 | |
5053 | free_netdev(netdev); | |
5054 | ||
5055 | pci_disable_device(pdev); | |
5056 | } | |
5057 | ||
5058 | /* PCI Error Recovery (ERS) */ | |
5059 | static struct pci_error_handlers e1000_err_handler = { | |
5060 | .error_detected = e1000_io_error_detected, | |
5061 | .slot_reset = e1000_io_slot_reset, | |
5062 | .resume = e1000_io_resume, | |
5063 | }; | |
5064 | ||
5065 | static struct pci_device_id e1000_pci_tbl[] = { | |
bc7f75fa AK |
5066 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, |
5067 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, | |
5068 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, | |
5069 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, | |
5070 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, | |
5071 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, | |
040babf9 AK |
5072 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, |
5073 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, | |
5074 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, | |
ad68076e | 5075 | |
bc7f75fa AK |
5076 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, |
5077 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, | |
5078 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, | |
5079 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, | |
ad68076e | 5080 | |
bc7f75fa AK |
5081 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, |
5082 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, | |
5083 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, | |
ad68076e | 5084 | |
4662e82b BA |
5085 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, |
5086 | ||
bc7f75fa AK |
5087 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), |
5088 | board_80003es2lan }, | |
5089 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), | |
5090 | board_80003es2lan }, | |
5091 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), | |
5092 | board_80003es2lan }, | |
5093 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), | |
5094 | board_80003es2lan }, | |
ad68076e | 5095 | |
bc7f75fa AK |
5096 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, |
5097 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, | |
5098 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, | |
5099 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, | |
5100 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, | |
5101 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, | |
5102 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, | |
ad68076e | 5103 | |
bc7f75fa AK |
5104 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, |
5105 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, | |
5106 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, | |
5107 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, | |
5108 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, | |
2f15f9d6 | 5109 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, |
97ac8cae BA |
5110 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, |
5111 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, | |
5112 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, | |
5113 | ||
5114 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, | |
5115 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, | |
5116 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, | |
bc7f75fa | 5117 | |
f4187b56 BA |
5118 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, |
5119 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, | |
5120 | ||
bc7f75fa AK |
5121 | { } /* terminate list */ |
5122 | }; | |
5123 | MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); | |
5124 | ||
5125 | /* PCI Device API Driver */ | |
5126 | static struct pci_driver e1000_driver = { | |
5127 | .name = e1000e_driver_name, | |
5128 | .id_table = e1000_pci_tbl, | |
5129 | .probe = e1000_probe, | |
5130 | .remove = __devexit_p(e1000_remove), | |
5131 | #ifdef CONFIG_PM | |
ad68076e | 5132 | /* Power Management Hooks */ |
bc7f75fa AK |
5133 | .suspend = e1000_suspend, |
5134 | .resume = e1000_resume, | |
5135 | #endif | |
5136 | .shutdown = e1000_shutdown, | |
5137 | .err_handler = &e1000_err_handler | |
5138 | }; | |
5139 | ||
5140 | /** | |
5141 | * e1000_init_module - Driver Registration Routine | |
5142 | * | |
5143 | * e1000_init_module is the first routine called when the driver is | |
5144 | * loaded. All it does is register with the PCI subsystem. | |
5145 | **/ | |
5146 | static int __init e1000_init_module(void) | |
5147 | { | |
5148 | int ret; | |
5149 | printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", | |
5150 | e1000e_driver_name, e1000e_driver_version); | |
ad68076e | 5151 | printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", |
bc7f75fa AK |
5152 | e1000e_driver_name); |
5153 | ret = pci_register_driver(&e1000_driver); | |
97ac8cae BA |
5154 | pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name, |
5155 | PM_QOS_DEFAULT_VALUE); | |
5156 | ||
bc7f75fa AK |
5157 | return ret; |
5158 | } | |
5159 | module_init(e1000_init_module); | |
5160 | ||
5161 | /** | |
5162 | * e1000_exit_module - Driver Exit Cleanup Routine | |
5163 | * | |
5164 | * e1000_exit_module is called just before the driver is removed | |
5165 | * from memory. | |
5166 | **/ | |
5167 | static void __exit e1000_exit_module(void) | |
5168 | { | |
5169 | pci_unregister_driver(&e1000_driver); | |
97ac8cae | 5170 | pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name); |
bc7f75fa AK |
5171 | } |
5172 | module_exit(e1000_exit_module); | |
5173 | ||
5174 | ||
5175 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); | |
5176 | MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); | |
5177 | MODULE_LICENSE("GPL"); | |
5178 | MODULE_VERSION(DRV_VERSION); | |
5179 | ||
5180 | /* e1000_main.c */ |