Merge branch 'r8169-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/romieu...
[deliverable/linux.git] / drivers / net / e1000e / lib.c
1 /*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include <linux/netdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/delay.h>
32 #include <linux/pci.h>
33
34 #include "e1000.h"
35
36 enum e1000_mng_mode {
37 e1000_mng_mode_none = 0,
38 e1000_mng_mode_asf,
39 e1000_mng_mode_pt,
40 e1000_mng_mode_ipmi,
41 e1000_mng_mode_host_if_only
42 };
43
44 #define E1000_FACTPS_MNGCG 0x20000000
45
46 /* Intel(R) Active Management Technology signature */
47 #define E1000_IAMT_SIGNATURE 0x544D4149
48
49 /**
50 * e1000e_get_bus_info_pcie - Get PCIe bus information
51 * @hw: pointer to the HW structure
52 *
53 * Determines and stores the system bus information for a particular
54 * network interface. The following bus information is determined and stored:
55 * bus speed, bus width, type (PCIe), and PCIe function.
56 **/
57 s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
58 {
59 struct e1000_bus_info *bus = &hw->bus;
60 struct e1000_adapter *adapter = hw->adapter;
61 u32 status;
62 u16 pcie_link_status, pci_header_type, cap_offset;
63
64 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
65 if (!cap_offset) {
66 bus->width = e1000_bus_width_unknown;
67 } else {
68 pci_read_config_word(adapter->pdev,
69 cap_offset + PCIE_LINK_STATUS,
70 &pcie_link_status);
71 bus->width = (enum e1000_bus_width)((pcie_link_status &
72 PCIE_LINK_WIDTH_MASK) >>
73 PCIE_LINK_WIDTH_SHIFT);
74 }
75
76 pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER,
77 &pci_header_type);
78 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
79 status = er32(STATUS);
80 bus->func = (status & E1000_STATUS_FUNC_MASK)
81 >> E1000_STATUS_FUNC_SHIFT;
82 } else {
83 bus->func = 0;
84 }
85
86 return 0;
87 }
88
89 /**
90 * e1000e_write_vfta - Write value to VLAN filter table
91 * @hw: pointer to the HW structure
92 * @offset: register offset in VLAN filter table
93 * @value: register value written to VLAN filter table
94 *
95 * Writes value at the given offset in the register array which stores
96 * the VLAN filter table.
97 **/
98 void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
99 {
100 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
101 e1e_flush();
102 }
103
104 /**
105 * e1000e_init_rx_addrs - Initialize receive address's
106 * @hw: pointer to the HW structure
107 * @rar_count: receive address registers
108 *
109 * Setups the receive address registers by setting the base receive address
110 * register to the devices MAC address and clearing all the other receive
111 * address registers to 0.
112 **/
113 void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
114 {
115 u32 i;
116
117 /* Setup the receive address */
118 hw_dbg(hw, "Programming MAC Address into RAR[0]\n");
119
120 e1000e_rar_set(hw, hw->mac.addr, 0);
121
122 /* Zero out the other (rar_entry_count - 1) receive addresses */
123 hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1);
124 for (i = 1; i < rar_count; i++) {
125 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
126 e1e_flush();
127 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
128 e1e_flush();
129 }
130 }
131
132 /**
133 * e1000e_rar_set - Set receive address register
134 * @hw: pointer to the HW structure
135 * @addr: pointer to the receive address
136 * @index: receive address array register
137 *
138 * Sets the receive address array register at index to the address passed
139 * in by addr.
140 **/
141 void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
142 {
143 u32 rar_low, rar_high;
144
145 /*
146 * HW expects these in little endian so we reverse the byte order
147 * from network order (big endian) to little endian
148 */
149 rar_low = ((u32) addr[0] |
150 ((u32) addr[1] << 8) |
151 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
152
153 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
154
155 rar_high |= E1000_RAH_AV;
156
157 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
158 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
159 }
160
161 /**
162 * e1000_mta_set - Set multicast filter table address
163 * @hw: pointer to the HW structure
164 * @hash_value: determines the MTA register and bit to set
165 *
166 * The multicast table address is a register array of 32-bit registers.
167 * The hash_value is used to determine what register the bit is in, the
168 * current value is read, the new bit is OR'd in and the new value is
169 * written back into the register.
170 **/
171 static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
172 {
173 u32 hash_bit, hash_reg, mta;
174
175 /*
176 * The MTA is a register array of 32-bit registers. It is
177 * treated like an array of (32*mta_reg_count) bits. We want to
178 * set bit BitArray[hash_value]. So we figure out what register
179 * the bit is in, read it, OR in the new bit, then write
180 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
181 * mask to bits 31:5 of the hash value which gives us the
182 * register we're modifying. The hash bit within that register
183 * is determined by the lower 5 bits of the hash value.
184 */
185 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
186 hash_bit = hash_value & 0x1F;
187
188 mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
189
190 mta |= (1 << hash_bit);
191
192 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
193 e1e_flush();
194 }
195
196 /**
197 * e1000_hash_mc_addr - Generate a multicast hash value
198 * @hw: pointer to the HW structure
199 * @mc_addr: pointer to a multicast address
200 *
201 * Generates a multicast address hash value which is used to determine
202 * the multicast filter table array address and new table value. See
203 * e1000_mta_set_generic()
204 **/
205 static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
206 {
207 u32 hash_value, hash_mask;
208 u8 bit_shift = 0;
209
210 /* Register count multiplied by bits per register */
211 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
212
213 /*
214 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
215 * where 0xFF would still fall within the hash mask.
216 */
217 while (hash_mask >> bit_shift != 0xFF)
218 bit_shift++;
219
220 /*
221 * The portion of the address that is used for the hash table
222 * is determined by the mc_filter_type setting.
223 * The algorithm is such that there is a total of 8 bits of shifting.
224 * The bit_shift for a mc_filter_type of 0 represents the number of
225 * left-shifts where the MSB of mc_addr[5] would still fall within
226 * the hash_mask. Case 0 does this exactly. Since there are a total
227 * of 8 bits of shifting, then mc_addr[4] will shift right the
228 * remaining number of bits. Thus 8 - bit_shift. The rest of the
229 * cases are a variation of this algorithm...essentially raising the
230 * number of bits to shift mc_addr[5] left, while still keeping the
231 * 8-bit shifting total.
232 *
233 * For example, given the following Destination MAC Address and an
234 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
235 * we can see that the bit_shift for case 0 is 4. These are the hash
236 * values resulting from each mc_filter_type...
237 * [0] [1] [2] [3] [4] [5]
238 * 01 AA 00 12 34 56
239 * LSB MSB
240 *
241 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
242 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
243 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
244 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
245 */
246 switch (hw->mac.mc_filter_type) {
247 default:
248 case 0:
249 break;
250 case 1:
251 bit_shift += 1;
252 break;
253 case 2:
254 bit_shift += 2;
255 break;
256 case 3:
257 bit_shift += 4;
258 break;
259 }
260
261 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
262 (((u16) mc_addr[5]) << bit_shift)));
263
264 return hash_value;
265 }
266
267 /**
268 * e1000e_update_mc_addr_list_generic - Update Multicast addresses
269 * @hw: pointer to the HW structure
270 * @mc_addr_list: array of multicast addresses to program
271 * @mc_addr_count: number of multicast addresses to program
272 * @rar_used_count: the first RAR register free to program
273 * @rar_count: total number of supported Receive Address Registers
274 *
275 * Updates the Receive Address Registers and Multicast Table Array.
276 * The caller must have a packed mc_addr_list of multicast addresses.
277 * The parameter rar_count will usually be hw->mac.rar_entry_count
278 * unless there are workarounds that change this.
279 **/
280 void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
281 u8 *mc_addr_list, u32 mc_addr_count,
282 u32 rar_used_count, u32 rar_count)
283 {
284 u32 hash_value;
285 u32 i;
286
287 /*
288 * Load the first set of multicast addresses into the exact
289 * filters (RAR). If there are not enough to fill the RAR
290 * array, clear the filters.
291 */
292 for (i = rar_used_count; i < rar_count; i++) {
293 if (mc_addr_count) {
294 e1000e_rar_set(hw, mc_addr_list, i);
295 mc_addr_count--;
296 mc_addr_list += ETH_ALEN;
297 } else {
298 E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
299 e1e_flush();
300 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
301 e1e_flush();
302 }
303 }
304
305 /* Clear the old settings from the MTA */
306 hw_dbg(hw, "Clearing MTA\n");
307 for (i = 0; i < hw->mac.mta_reg_count; i++) {
308 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
309 e1e_flush();
310 }
311
312 /* Load any remaining multicast addresses into the hash table. */
313 for (; mc_addr_count > 0; mc_addr_count--) {
314 hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
315 hw_dbg(hw, "Hash value = 0x%03X\n", hash_value);
316 e1000_mta_set(hw, hash_value);
317 mc_addr_list += ETH_ALEN;
318 }
319 }
320
321 /**
322 * e1000e_clear_hw_cntrs_base - Clear base hardware counters
323 * @hw: pointer to the HW structure
324 *
325 * Clears the base hardware counters by reading the counter registers.
326 **/
327 void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
328 {
329 u32 temp;
330
331 temp = er32(CRCERRS);
332 temp = er32(SYMERRS);
333 temp = er32(MPC);
334 temp = er32(SCC);
335 temp = er32(ECOL);
336 temp = er32(MCC);
337 temp = er32(LATECOL);
338 temp = er32(COLC);
339 temp = er32(DC);
340 temp = er32(SEC);
341 temp = er32(RLEC);
342 temp = er32(XONRXC);
343 temp = er32(XONTXC);
344 temp = er32(XOFFRXC);
345 temp = er32(XOFFTXC);
346 temp = er32(FCRUC);
347 temp = er32(GPRC);
348 temp = er32(BPRC);
349 temp = er32(MPRC);
350 temp = er32(GPTC);
351 temp = er32(GORCL);
352 temp = er32(GORCH);
353 temp = er32(GOTCL);
354 temp = er32(GOTCH);
355 temp = er32(RNBC);
356 temp = er32(RUC);
357 temp = er32(RFC);
358 temp = er32(ROC);
359 temp = er32(RJC);
360 temp = er32(TORL);
361 temp = er32(TORH);
362 temp = er32(TOTL);
363 temp = er32(TOTH);
364 temp = er32(TPR);
365 temp = er32(TPT);
366 temp = er32(MPTC);
367 temp = er32(BPTC);
368 }
369
370 /**
371 * e1000e_check_for_copper_link - Check for link (Copper)
372 * @hw: pointer to the HW structure
373 *
374 * Checks to see of the link status of the hardware has changed. If a
375 * change in link status has been detected, then we read the PHY registers
376 * to get the current speed/duplex if link exists.
377 **/
378 s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
379 {
380 struct e1000_mac_info *mac = &hw->mac;
381 s32 ret_val;
382 bool link;
383
384 /*
385 * We only want to go out to the PHY registers to see if Auto-Neg
386 * has completed and/or if our link status has changed. The
387 * get_link_status flag is set upon receiving a Link Status
388 * Change or Rx Sequence Error interrupt.
389 */
390 if (!mac->get_link_status)
391 return 0;
392
393 /*
394 * First we want to see if the MII Status Register reports
395 * link. If so, then we want to get the current speed/duplex
396 * of the PHY.
397 */
398 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
399 if (ret_val)
400 return ret_val;
401
402 if (!link)
403 return ret_val; /* No link detected */
404
405 mac->get_link_status = 0;
406
407 /*
408 * Check if there was DownShift, must be checked
409 * immediately after link-up
410 */
411 e1000e_check_downshift(hw);
412
413 /*
414 * If we are forcing speed/duplex, then we simply return since
415 * we have already determined whether we have link or not.
416 */
417 if (!mac->autoneg) {
418 ret_val = -E1000_ERR_CONFIG;
419 return ret_val;
420 }
421
422 /*
423 * Auto-Neg is enabled. Auto Speed Detection takes care
424 * of MAC speed/duplex configuration. So we only need to
425 * configure Collision Distance in the MAC.
426 */
427 e1000e_config_collision_dist(hw);
428
429 /*
430 * Configure Flow Control now that Auto-Neg has completed.
431 * First, we need to restore the desired flow control
432 * settings because we may have had to re-autoneg with a
433 * different link partner.
434 */
435 ret_val = e1000e_config_fc_after_link_up(hw);
436 if (ret_val) {
437 hw_dbg(hw, "Error configuring flow control\n");
438 }
439
440 return ret_val;
441 }
442
443 /**
444 * e1000e_check_for_fiber_link - Check for link (Fiber)
445 * @hw: pointer to the HW structure
446 *
447 * Checks for link up on the hardware. If link is not up and we have
448 * a signal, then we need to force link up.
449 **/
450 s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
451 {
452 struct e1000_mac_info *mac = &hw->mac;
453 u32 rxcw;
454 u32 ctrl;
455 u32 status;
456 s32 ret_val;
457
458 ctrl = er32(CTRL);
459 status = er32(STATUS);
460 rxcw = er32(RXCW);
461
462 /*
463 * If we don't have link (auto-negotiation failed or link partner
464 * cannot auto-negotiate), the cable is plugged in (we have signal),
465 * and our link partner is not trying to auto-negotiate with us (we
466 * are receiving idles or data), we need to force link up. We also
467 * need to give auto-negotiation time to complete, in case the cable
468 * was just plugged in. The autoneg_failed flag does this.
469 */
470 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
471 if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
472 (!(rxcw & E1000_RXCW_C))) {
473 if (mac->autoneg_failed == 0) {
474 mac->autoneg_failed = 1;
475 return 0;
476 }
477 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
478
479 /* Disable auto-negotiation in the TXCW register */
480 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
481
482 /* Force link-up and also force full-duplex. */
483 ctrl = er32(CTRL);
484 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
485 ew32(CTRL, ctrl);
486
487 /* Configure Flow Control after forcing link up. */
488 ret_val = e1000e_config_fc_after_link_up(hw);
489 if (ret_val) {
490 hw_dbg(hw, "Error configuring flow control\n");
491 return ret_val;
492 }
493 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
494 /*
495 * If we are forcing link and we are receiving /C/ ordered
496 * sets, re-enable auto-negotiation in the TXCW register
497 * and disable forced link in the Device Control register
498 * in an attempt to auto-negotiate with our link partner.
499 */
500 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
501 ew32(TXCW, mac->txcw);
502 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
503
504 mac->serdes_has_link = 1;
505 }
506
507 return 0;
508 }
509
510 /**
511 * e1000e_check_for_serdes_link - Check for link (Serdes)
512 * @hw: pointer to the HW structure
513 *
514 * Checks for link up on the hardware. If link is not up and we have
515 * a signal, then we need to force link up.
516 **/
517 s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
518 {
519 struct e1000_mac_info *mac = &hw->mac;
520 u32 rxcw;
521 u32 ctrl;
522 u32 status;
523 s32 ret_val;
524
525 ctrl = er32(CTRL);
526 status = er32(STATUS);
527 rxcw = er32(RXCW);
528
529 /*
530 * If we don't have link (auto-negotiation failed or link partner
531 * cannot auto-negotiate), and our link partner is not trying to
532 * auto-negotiate with us (we are receiving idles or data),
533 * we need to force link up. We also need to give auto-negotiation
534 * time to complete.
535 */
536 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
537 if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
538 if (mac->autoneg_failed == 0) {
539 mac->autoneg_failed = 1;
540 return 0;
541 }
542 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
543
544 /* Disable auto-negotiation in the TXCW register */
545 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
546
547 /* Force link-up and also force full-duplex. */
548 ctrl = er32(CTRL);
549 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
550 ew32(CTRL, ctrl);
551
552 /* Configure Flow Control after forcing link up. */
553 ret_val = e1000e_config_fc_after_link_up(hw);
554 if (ret_val) {
555 hw_dbg(hw, "Error configuring flow control\n");
556 return ret_val;
557 }
558 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
559 /*
560 * If we are forcing link and we are receiving /C/ ordered
561 * sets, re-enable auto-negotiation in the TXCW register
562 * and disable forced link in the Device Control register
563 * in an attempt to auto-negotiate with our link partner.
564 */
565 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
566 ew32(TXCW, mac->txcw);
567 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
568
569 mac->serdes_has_link = 1;
570 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
571 /*
572 * If we force link for non-auto-negotiation switch, check
573 * link status based on MAC synchronization for internal
574 * serdes media type.
575 */
576 /* SYNCH bit and IV bit are sticky. */
577 udelay(10);
578 if (E1000_RXCW_SYNCH & er32(RXCW)) {
579 if (!(rxcw & E1000_RXCW_IV)) {
580 mac->serdes_has_link = 1;
581 hw_dbg(hw, "SERDES: Link is up.\n");
582 }
583 } else {
584 mac->serdes_has_link = 0;
585 hw_dbg(hw, "SERDES: Link is down.\n");
586 }
587 }
588
589 if (E1000_TXCW_ANE & er32(TXCW)) {
590 status = er32(STATUS);
591 mac->serdes_has_link = (status & E1000_STATUS_LU);
592 }
593
594 return 0;
595 }
596
597 /**
598 * e1000_set_default_fc_generic - Set flow control default values
599 * @hw: pointer to the HW structure
600 *
601 * Read the EEPROM for the default values for flow control and store the
602 * values.
603 **/
604 static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
605 {
606 s32 ret_val;
607 u16 nvm_data;
608
609 /*
610 * Read and store word 0x0F of the EEPROM. This word contains bits
611 * that determine the hardware's default PAUSE (flow control) mode,
612 * a bit that determines whether the HW defaults to enabling or
613 * disabling auto-negotiation, and the direction of the
614 * SW defined pins. If there is no SW over-ride of the flow
615 * control setting, then the variable hw->fc will
616 * be initialized based on a value in the EEPROM.
617 */
618 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
619
620 if (ret_val) {
621 hw_dbg(hw, "NVM Read Error\n");
622 return ret_val;
623 }
624
625 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
626 hw->fc.type = e1000_fc_none;
627 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
628 NVM_WORD0F_ASM_DIR)
629 hw->fc.type = e1000_fc_tx_pause;
630 else
631 hw->fc.type = e1000_fc_full;
632
633 return 0;
634 }
635
636 /**
637 * e1000e_setup_link - Setup flow control and link settings
638 * @hw: pointer to the HW structure
639 *
640 * Determines which flow control settings to use, then configures flow
641 * control. Calls the appropriate media-specific link configuration
642 * function. Assuming the adapter has a valid link partner, a valid link
643 * should be established. Assumes the hardware has previously been reset
644 * and the transmitter and receiver are not enabled.
645 **/
646 s32 e1000e_setup_link(struct e1000_hw *hw)
647 {
648 struct e1000_mac_info *mac = &hw->mac;
649 s32 ret_val;
650
651 /*
652 * In the case of the phy reset being blocked, we already have a link.
653 * We do not need to set it up again.
654 */
655 if (e1000_check_reset_block(hw))
656 return 0;
657
658 /*
659 * If flow control is set to default, set flow control based on
660 * the EEPROM flow control settings.
661 */
662 if (hw->fc.type == e1000_fc_default) {
663 ret_val = e1000_set_default_fc_generic(hw);
664 if (ret_val)
665 return ret_val;
666 }
667
668 /*
669 * We want to save off the original Flow Control configuration just
670 * in case we get disconnected and then reconnected into a different
671 * hub or switch with different Flow Control capabilities.
672 */
673 hw->fc.original_type = hw->fc.type;
674
675 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", hw->fc.type);
676
677 /* Call the necessary media_type subroutine to configure the link. */
678 ret_val = mac->ops.setup_physical_interface(hw);
679 if (ret_val)
680 return ret_val;
681
682 /*
683 * Initialize the flow control address, type, and PAUSE timer
684 * registers to their default values. This is done even if flow
685 * control is disabled, because it does not hurt anything to
686 * initialize these registers.
687 */
688 hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n");
689 ew32(FCT, FLOW_CONTROL_TYPE);
690 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
691 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
692
693 ew32(FCTTV, hw->fc.pause_time);
694
695 return e1000e_set_fc_watermarks(hw);
696 }
697
698 /**
699 * e1000_commit_fc_settings_generic - Configure flow control
700 * @hw: pointer to the HW structure
701 *
702 * Write the flow control settings to the Transmit Config Word Register (TXCW)
703 * base on the flow control settings in e1000_mac_info.
704 **/
705 static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
706 {
707 struct e1000_mac_info *mac = &hw->mac;
708 u32 txcw;
709
710 /*
711 * Check for a software override of the flow control settings, and
712 * setup the device accordingly. If auto-negotiation is enabled, then
713 * software will have to set the "PAUSE" bits to the correct value in
714 * the Transmit Config Word Register (TXCW) and re-start auto-
715 * negotiation. However, if auto-negotiation is disabled, then
716 * software will have to manually configure the two flow control enable
717 * bits in the CTRL register.
718 *
719 * The possible values of the "fc" parameter are:
720 * 0: Flow control is completely disabled
721 * 1: Rx flow control is enabled (we can receive pause frames,
722 * but not send pause frames).
723 * 2: Tx flow control is enabled (we can send pause frames but we
724 * do not support receiving pause frames).
725 * 3: Both Rx and Tx flow control (symmetric) are enabled.
726 */
727 switch (hw->fc.type) {
728 case e1000_fc_none:
729 /* Flow control completely disabled by a software over-ride. */
730 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
731 break;
732 case e1000_fc_rx_pause:
733 /*
734 * Rx Flow control is enabled and Tx Flow control is disabled
735 * by a software over-ride. Since there really isn't a way to
736 * advertise that we are capable of Rx Pause ONLY, we will
737 * advertise that we support both symmetric and asymmetric Rx
738 * PAUSE. Later, we will disable the adapter's ability to send
739 * PAUSE frames.
740 */
741 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
742 break;
743 case e1000_fc_tx_pause:
744 /*
745 * Tx Flow control is enabled, and Rx Flow control is disabled,
746 * by a software over-ride.
747 */
748 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
749 break;
750 case e1000_fc_full:
751 /*
752 * Flow control (both Rx and Tx) is enabled by a software
753 * over-ride.
754 */
755 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
756 break;
757 default:
758 hw_dbg(hw, "Flow control param set incorrectly\n");
759 return -E1000_ERR_CONFIG;
760 break;
761 }
762
763 ew32(TXCW, txcw);
764 mac->txcw = txcw;
765
766 return 0;
767 }
768
769 /**
770 * e1000_poll_fiber_serdes_link_generic - Poll for link up
771 * @hw: pointer to the HW structure
772 *
773 * Polls for link up by reading the status register, if link fails to come
774 * up with auto-negotiation, then the link is forced if a signal is detected.
775 **/
776 static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
777 {
778 struct e1000_mac_info *mac = &hw->mac;
779 u32 i, status;
780 s32 ret_val;
781
782 /*
783 * If we have a signal (the cable is plugged in, or assumed true for
784 * serdes media) then poll for a "Link-Up" indication in the Device
785 * Status Register. Time-out if a link isn't seen in 500 milliseconds
786 * seconds (Auto-negotiation should complete in less than 500
787 * milliseconds even if the other end is doing it in SW).
788 */
789 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
790 msleep(10);
791 status = er32(STATUS);
792 if (status & E1000_STATUS_LU)
793 break;
794 }
795 if (i == FIBER_LINK_UP_LIMIT) {
796 hw_dbg(hw, "Never got a valid link from auto-neg!!!\n");
797 mac->autoneg_failed = 1;
798 /*
799 * AutoNeg failed to achieve a link, so we'll call
800 * mac->check_for_link. This routine will force the
801 * link up if we detect a signal. This will allow us to
802 * communicate with non-autonegotiating link partners.
803 */
804 ret_val = mac->ops.check_for_link(hw);
805 if (ret_val) {
806 hw_dbg(hw, "Error while checking for link\n");
807 return ret_val;
808 }
809 mac->autoneg_failed = 0;
810 } else {
811 mac->autoneg_failed = 0;
812 hw_dbg(hw, "Valid Link Found\n");
813 }
814
815 return 0;
816 }
817
818 /**
819 * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes
820 * @hw: pointer to the HW structure
821 *
822 * Configures collision distance and flow control for fiber and serdes
823 * links. Upon successful setup, poll for link.
824 **/
825 s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
826 {
827 u32 ctrl;
828 s32 ret_val;
829
830 ctrl = er32(CTRL);
831
832 /* Take the link out of reset */
833 ctrl &= ~E1000_CTRL_LRST;
834
835 e1000e_config_collision_dist(hw);
836
837 ret_val = e1000_commit_fc_settings_generic(hw);
838 if (ret_val)
839 return ret_val;
840
841 /*
842 * Since auto-negotiation is enabled, take the link out of reset (the
843 * link will be in reset, because we previously reset the chip). This
844 * will restart auto-negotiation. If auto-negotiation is successful
845 * then the link-up status bit will be set and the flow control enable
846 * bits (RFCE and TFCE) will be set according to their negotiated value.
847 */
848 hw_dbg(hw, "Auto-negotiation enabled\n");
849
850 ew32(CTRL, ctrl);
851 e1e_flush();
852 msleep(1);
853
854 /*
855 * For these adapters, the SW definable pin 1 is set when the optics
856 * detect a signal. If we have a signal, then poll for a "Link-Up"
857 * indication.
858 */
859 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
860 (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
861 ret_val = e1000_poll_fiber_serdes_link_generic(hw);
862 } else {
863 hw_dbg(hw, "No signal detected\n");
864 }
865
866 return 0;
867 }
868
869 /**
870 * e1000e_config_collision_dist - Configure collision distance
871 * @hw: pointer to the HW structure
872 *
873 * Configures the collision distance to the default value and is used
874 * during link setup. Currently no func pointer exists and all
875 * implementations are handled in the generic version of this function.
876 **/
877 void e1000e_config_collision_dist(struct e1000_hw *hw)
878 {
879 u32 tctl;
880
881 tctl = er32(TCTL);
882
883 tctl &= ~E1000_TCTL_COLD;
884 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
885
886 ew32(TCTL, tctl);
887 e1e_flush();
888 }
889
890 /**
891 * e1000e_set_fc_watermarks - Set flow control high/low watermarks
892 * @hw: pointer to the HW structure
893 *
894 * Sets the flow control high/low threshold (watermark) registers. If
895 * flow control XON frame transmission is enabled, then set XON frame
896 * transmission as well.
897 **/
898 s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
899 {
900 u32 fcrtl = 0, fcrth = 0;
901
902 /*
903 * Set the flow control receive threshold registers. Normally,
904 * these registers will be set to a default threshold that may be
905 * adjusted later by the driver's runtime code. However, if the
906 * ability to transmit pause frames is not enabled, then these
907 * registers will be set to 0.
908 */
909 if (hw->fc.type & e1000_fc_tx_pause) {
910 /*
911 * We need to set up the Receive Threshold high and low water
912 * marks as well as (optionally) enabling the transmission of
913 * XON frames.
914 */
915 fcrtl = hw->fc.low_water;
916 fcrtl |= E1000_FCRTL_XONE;
917 fcrth = hw->fc.high_water;
918 }
919 ew32(FCRTL, fcrtl);
920 ew32(FCRTH, fcrth);
921
922 return 0;
923 }
924
925 /**
926 * e1000e_force_mac_fc - Force the MAC's flow control settings
927 * @hw: pointer to the HW structure
928 *
929 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
930 * device control register to reflect the adapter settings. TFCE and RFCE
931 * need to be explicitly set by software when a copper PHY is used because
932 * autonegotiation is managed by the PHY rather than the MAC. Software must
933 * also configure these bits when link is forced on a fiber connection.
934 **/
935 s32 e1000e_force_mac_fc(struct e1000_hw *hw)
936 {
937 u32 ctrl;
938
939 ctrl = er32(CTRL);
940
941 /*
942 * Because we didn't get link via the internal auto-negotiation
943 * mechanism (we either forced link or we got link via PHY
944 * auto-neg), we have to manually enable/disable transmit an
945 * receive flow control.
946 *
947 * The "Case" statement below enables/disable flow control
948 * according to the "hw->fc.type" parameter.
949 *
950 * The possible values of the "fc" parameter are:
951 * 0: Flow control is completely disabled
952 * 1: Rx flow control is enabled (we can receive pause
953 * frames but not send pause frames).
954 * 2: Tx flow control is enabled (we can send pause frames
955 * frames but we do not receive pause frames).
956 * 3: Both Rx and Tx flow control (symmetric) is enabled.
957 * other: No other values should be possible at this point.
958 */
959 hw_dbg(hw, "hw->fc.type = %u\n", hw->fc.type);
960
961 switch (hw->fc.type) {
962 case e1000_fc_none:
963 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
964 break;
965 case e1000_fc_rx_pause:
966 ctrl &= (~E1000_CTRL_TFCE);
967 ctrl |= E1000_CTRL_RFCE;
968 break;
969 case e1000_fc_tx_pause:
970 ctrl &= (~E1000_CTRL_RFCE);
971 ctrl |= E1000_CTRL_TFCE;
972 break;
973 case e1000_fc_full:
974 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
975 break;
976 default:
977 hw_dbg(hw, "Flow control param set incorrectly\n");
978 return -E1000_ERR_CONFIG;
979 }
980
981 ew32(CTRL, ctrl);
982
983 return 0;
984 }
985
986 /**
987 * e1000e_config_fc_after_link_up - Configures flow control after link
988 * @hw: pointer to the HW structure
989 *
990 * Checks the status of auto-negotiation after link up to ensure that the
991 * speed and duplex were not forced. If the link needed to be forced, then
992 * flow control needs to be forced also. If auto-negotiation is enabled
993 * and did not fail, then we configure flow control based on our link
994 * partner.
995 **/
996 s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
997 {
998 struct e1000_mac_info *mac = &hw->mac;
999 s32 ret_val = 0;
1000 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
1001 u16 speed, duplex;
1002
1003 /*
1004 * Check for the case where we have fiber media and auto-neg failed
1005 * so we had to force link. In this case, we need to force the
1006 * configuration of the MAC to match the "fc" parameter.
1007 */
1008 if (mac->autoneg_failed) {
1009 if (hw->phy.media_type == e1000_media_type_fiber ||
1010 hw->phy.media_type == e1000_media_type_internal_serdes)
1011 ret_val = e1000e_force_mac_fc(hw);
1012 } else {
1013 if (hw->phy.media_type == e1000_media_type_copper)
1014 ret_val = e1000e_force_mac_fc(hw);
1015 }
1016
1017 if (ret_val) {
1018 hw_dbg(hw, "Error forcing flow control settings\n");
1019 return ret_val;
1020 }
1021
1022 /*
1023 * Check for the case where we have copper media and auto-neg is
1024 * enabled. In this case, we need to check and see if Auto-Neg
1025 * has completed, and if so, how the PHY and link partner has
1026 * flow control configured.
1027 */
1028 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
1029 /*
1030 * Read the MII Status Register and check to see if AutoNeg
1031 * has completed. We read this twice because this reg has
1032 * some "sticky" (latched) bits.
1033 */
1034 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
1035 if (ret_val)
1036 return ret_val;
1037 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
1038 if (ret_val)
1039 return ret_val;
1040
1041 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
1042 hw_dbg(hw, "Copper PHY and Auto Neg "
1043 "has not completed.\n");
1044 return ret_val;
1045 }
1046
1047 /*
1048 * The AutoNeg process has completed, so we now need to
1049 * read both the Auto Negotiation Advertisement
1050 * Register (Address 4) and the Auto_Negotiation Base
1051 * Page Ability Register (Address 5) to determine how
1052 * flow control was negotiated.
1053 */
1054 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
1055 if (ret_val)
1056 return ret_val;
1057 ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
1058 if (ret_val)
1059 return ret_val;
1060
1061 /*
1062 * Two bits in the Auto Negotiation Advertisement Register
1063 * (Address 4) and two bits in the Auto Negotiation Base
1064 * Page Ability Register (Address 5) determine flow control
1065 * for both the PHY and the link partner. The following
1066 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1067 * 1999, describes these PAUSE resolution bits and how flow
1068 * control is determined based upon these settings.
1069 * NOTE: DC = Don't Care
1070 *
1071 * LOCAL DEVICE | LINK PARTNER
1072 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1073 *-------|---------|-------|---------|--------------------
1074 * 0 | 0 | DC | DC | e1000_fc_none
1075 * 0 | 1 | 0 | DC | e1000_fc_none
1076 * 0 | 1 | 1 | 0 | e1000_fc_none
1077 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1078 * 1 | 0 | 0 | DC | e1000_fc_none
1079 * 1 | DC | 1 | DC | e1000_fc_full
1080 * 1 | 1 | 0 | 0 | e1000_fc_none
1081 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1082 *
1083 *
1084 * Are both PAUSE bits set to 1? If so, this implies
1085 * Symmetric Flow Control is enabled at both ends. The
1086 * ASM_DIR bits are irrelevant per the spec.
1087 *
1088 * For Symmetric Flow Control:
1089 *
1090 * LOCAL DEVICE | LINK PARTNER
1091 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1092 *-------|---------|-------|---------|--------------------
1093 * 1 | DC | 1 | DC | E1000_fc_full
1094 *
1095 */
1096 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1097 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
1098 /*
1099 * Now we need to check if the user selected Rx ONLY
1100 * of pause frames. In this case, we had to advertise
1101 * FULL flow control because we could not advertise Rx
1102 * ONLY. Hence, we must now check to see if we need to
1103 * turn OFF the TRANSMISSION of PAUSE frames.
1104 */
1105 if (hw->fc.original_type == e1000_fc_full) {
1106 hw->fc.type = e1000_fc_full;
1107 hw_dbg(hw, "Flow Control = FULL.\r\n");
1108 } else {
1109 hw->fc.type = e1000_fc_rx_pause;
1110 hw_dbg(hw, "Flow Control = "
1111 "RX PAUSE frames only.\r\n");
1112 }
1113 }
1114 /*
1115 * For receiving PAUSE frames ONLY.
1116 *
1117 * LOCAL DEVICE | LINK PARTNER
1118 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1119 *-------|---------|-------|---------|--------------------
1120 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1121 *
1122 */
1123 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1124 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1125 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1126 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1127 hw->fc.type = e1000_fc_tx_pause;
1128 hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n");
1129 }
1130 /*
1131 * For transmitting PAUSE frames ONLY.
1132 *
1133 * LOCAL DEVICE | LINK PARTNER
1134 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1135 *-------|---------|-------|---------|--------------------
1136 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1137 *
1138 */
1139 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1140 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1141 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1142 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1143 hw->fc.type = e1000_fc_rx_pause;
1144 hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n");
1145 } else {
1146 /*
1147 * Per the IEEE spec, at this point flow control
1148 * should be disabled.
1149 */
1150 hw->fc.type = e1000_fc_none;
1151 hw_dbg(hw, "Flow Control = NONE.\r\n");
1152 }
1153
1154 /*
1155 * Now we need to do one last check... If we auto-
1156 * negotiated to HALF DUPLEX, flow control should not be
1157 * enabled per IEEE 802.3 spec.
1158 */
1159 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
1160 if (ret_val) {
1161 hw_dbg(hw, "Error getting link speed and duplex\n");
1162 return ret_val;
1163 }
1164
1165 if (duplex == HALF_DUPLEX)
1166 hw->fc.type = e1000_fc_none;
1167
1168 /*
1169 * Now we call a subroutine to actually force the MAC
1170 * controller to use the correct flow control settings.
1171 */
1172 ret_val = e1000e_force_mac_fc(hw);
1173 if (ret_val) {
1174 hw_dbg(hw, "Error forcing flow control settings\n");
1175 return ret_val;
1176 }
1177 }
1178
1179 return 0;
1180 }
1181
1182 /**
1183 * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex
1184 * @hw: pointer to the HW structure
1185 * @speed: stores the current speed
1186 * @duplex: stores the current duplex
1187 *
1188 * Read the status register for the current speed/duplex and store the current
1189 * speed and duplex for copper connections.
1190 **/
1191 s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1192 {
1193 u32 status;
1194
1195 status = er32(STATUS);
1196 if (status & E1000_STATUS_SPEED_1000) {
1197 *speed = SPEED_1000;
1198 hw_dbg(hw, "1000 Mbs, ");
1199 } else if (status & E1000_STATUS_SPEED_100) {
1200 *speed = SPEED_100;
1201 hw_dbg(hw, "100 Mbs, ");
1202 } else {
1203 *speed = SPEED_10;
1204 hw_dbg(hw, "10 Mbs, ");
1205 }
1206
1207 if (status & E1000_STATUS_FD) {
1208 *duplex = FULL_DUPLEX;
1209 hw_dbg(hw, "Full Duplex\n");
1210 } else {
1211 *duplex = HALF_DUPLEX;
1212 hw_dbg(hw, "Half Duplex\n");
1213 }
1214
1215 return 0;
1216 }
1217
1218 /**
1219 * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex
1220 * @hw: pointer to the HW structure
1221 * @speed: stores the current speed
1222 * @duplex: stores the current duplex
1223 *
1224 * Sets the speed and duplex to gigabit full duplex (the only possible option)
1225 * for fiber/serdes links.
1226 **/
1227 s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1228 {
1229 *speed = SPEED_1000;
1230 *duplex = FULL_DUPLEX;
1231
1232 return 0;
1233 }
1234
1235 /**
1236 * e1000e_get_hw_semaphore - Acquire hardware semaphore
1237 * @hw: pointer to the HW structure
1238 *
1239 * Acquire the HW semaphore to access the PHY or NVM
1240 **/
1241 s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1242 {
1243 u32 swsm;
1244 s32 timeout = hw->nvm.word_size + 1;
1245 s32 i = 0;
1246
1247 /* Get the SW semaphore */
1248 while (i < timeout) {
1249 swsm = er32(SWSM);
1250 if (!(swsm & E1000_SWSM_SMBI))
1251 break;
1252
1253 udelay(50);
1254 i++;
1255 }
1256
1257 if (i == timeout) {
1258 hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n");
1259 return -E1000_ERR_NVM;
1260 }
1261
1262 /* Get the FW semaphore. */
1263 for (i = 0; i < timeout; i++) {
1264 swsm = er32(SWSM);
1265 ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
1266
1267 /* Semaphore acquired if bit latched */
1268 if (er32(SWSM) & E1000_SWSM_SWESMBI)
1269 break;
1270
1271 udelay(50);
1272 }
1273
1274 if (i == timeout) {
1275 /* Release semaphores */
1276 e1000e_put_hw_semaphore(hw);
1277 hw_dbg(hw, "Driver can't access the NVM\n");
1278 return -E1000_ERR_NVM;
1279 }
1280
1281 return 0;
1282 }
1283
1284 /**
1285 * e1000e_put_hw_semaphore - Release hardware semaphore
1286 * @hw: pointer to the HW structure
1287 *
1288 * Release hardware semaphore used to access the PHY or NVM
1289 **/
1290 void e1000e_put_hw_semaphore(struct e1000_hw *hw)
1291 {
1292 u32 swsm;
1293
1294 swsm = er32(SWSM);
1295 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1296 ew32(SWSM, swsm);
1297 }
1298
1299 /**
1300 * e1000e_get_auto_rd_done - Check for auto read completion
1301 * @hw: pointer to the HW structure
1302 *
1303 * Check EEPROM for Auto Read done bit.
1304 **/
1305 s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
1306 {
1307 s32 i = 0;
1308
1309 while (i < AUTO_READ_DONE_TIMEOUT) {
1310 if (er32(EECD) & E1000_EECD_AUTO_RD)
1311 break;
1312 msleep(1);
1313 i++;
1314 }
1315
1316 if (i == AUTO_READ_DONE_TIMEOUT) {
1317 hw_dbg(hw, "Auto read by HW from NVM has not completed.\n");
1318 return -E1000_ERR_RESET;
1319 }
1320
1321 return 0;
1322 }
1323
1324 /**
1325 * e1000e_valid_led_default - Verify a valid default LED config
1326 * @hw: pointer to the HW structure
1327 * @data: pointer to the NVM (EEPROM)
1328 *
1329 * Read the EEPROM for the current default LED configuration. If the
1330 * LED configuration is not valid, set to a valid LED configuration.
1331 **/
1332 s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
1333 {
1334 s32 ret_val;
1335
1336 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1337 if (ret_val) {
1338 hw_dbg(hw, "NVM Read Error\n");
1339 return ret_val;
1340 }
1341
1342 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1343 *data = ID_LED_DEFAULT;
1344
1345 return 0;
1346 }
1347
1348 /**
1349 * e1000e_id_led_init -
1350 * @hw: pointer to the HW structure
1351 *
1352 **/
1353 s32 e1000e_id_led_init(struct e1000_hw *hw)
1354 {
1355 struct e1000_mac_info *mac = &hw->mac;
1356 s32 ret_val;
1357 const u32 ledctl_mask = 0x000000FF;
1358 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1359 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1360 u16 data, i, temp;
1361 const u16 led_mask = 0x0F;
1362
1363 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
1364 if (ret_val)
1365 return ret_val;
1366
1367 mac->ledctl_default = er32(LEDCTL);
1368 mac->ledctl_mode1 = mac->ledctl_default;
1369 mac->ledctl_mode2 = mac->ledctl_default;
1370
1371 for (i = 0; i < 4; i++) {
1372 temp = (data >> (i << 2)) & led_mask;
1373 switch (temp) {
1374 case ID_LED_ON1_DEF2:
1375 case ID_LED_ON1_ON2:
1376 case ID_LED_ON1_OFF2:
1377 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1378 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1379 break;
1380 case ID_LED_OFF1_DEF2:
1381 case ID_LED_OFF1_ON2:
1382 case ID_LED_OFF1_OFF2:
1383 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1384 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1385 break;
1386 default:
1387 /* Do nothing */
1388 break;
1389 }
1390 switch (temp) {
1391 case ID_LED_DEF1_ON2:
1392 case ID_LED_ON1_ON2:
1393 case ID_LED_OFF1_ON2:
1394 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1395 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1396 break;
1397 case ID_LED_DEF1_OFF2:
1398 case ID_LED_ON1_OFF2:
1399 case ID_LED_OFF1_OFF2:
1400 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1401 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1402 break;
1403 default:
1404 /* Do nothing */
1405 break;
1406 }
1407 }
1408
1409 return 0;
1410 }
1411
1412 /**
1413 * e1000e_cleanup_led_generic - Set LED config to default operation
1414 * @hw: pointer to the HW structure
1415 *
1416 * Remove the current LED configuration and set the LED configuration
1417 * to the default value, saved from the EEPROM.
1418 **/
1419 s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
1420 {
1421 ew32(LEDCTL, hw->mac.ledctl_default);
1422 return 0;
1423 }
1424
1425 /**
1426 * e1000e_blink_led - Blink LED
1427 * @hw: pointer to the HW structure
1428 *
1429 * Blink the LEDs which are set to be on.
1430 **/
1431 s32 e1000e_blink_led(struct e1000_hw *hw)
1432 {
1433 u32 ledctl_blink = 0;
1434 u32 i;
1435
1436 if (hw->phy.media_type == e1000_media_type_fiber) {
1437 /* always blink LED0 for PCI-E fiber */
1438 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1439 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1440 } else {
1441 /*
1442 * set the blink bit for each LED that's "on" (0x0E)
1443 * in ledctl_mode2
1444 */
1445 ledctl_blink = hw->mac.ledctl_mode2;
1446 for (i = 0; i < 4; i++)
1447 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1448 E1000_LEDCTL_MODE_LED_ON)
1449 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1450 (i * 8));
1451 }
1452
1453 ew32(LEDCTL, ledctl_blink);
1454
1455 return 0;
1456 }
1457
1458 /**
1459 * e1000e_led_on_generic - Turn LED on
1460 * @hw: pointer to the HW structure
1461 *
1462 * Turn LED on.
1463 **/
1464 s32 e1000e_led_on_generic(struct e1000_hw *hw)
1465 {
1466 u32 ctrl;
1467
1468 switch (hw->phy.media_type) {
1469 case e1000_media_type_fiber:
1470 ctrl = er32(CTRL);
1471 ctrl &= ~E1000_CTRL_SWDPIN0;
1472 ctrl |= E1000_CTRL_SWDPIO0;
1473 ew32(CTRL, ctrl);
1474 break;
1475 case e1000_media_type_copper:
1476 ew32(LEDCTL, hw->mac.ledctl_mode2);
1477 break;
1478 default:
1479 break;
1480 }
1481
1482 return 0;
1483 }
1484
1485 /**
1486 * e1000e_led_off_generic - Turn LED off
1487 * @hw: pointer to the HW structure
1488 *
1489 * Turn LED off.
1490 **/
1491 s32 e1000e_led_off_generic(struct e1000_hw *hw)
1492 {
1493 u32 ctrl;
1494
1495 switch (hw->phy.media_type) {
1496 case e1000_media_type_fiber:
1497 ctrl = er32(CTRL);
1498 ctrl |= E1000_CTRL_SWDPIN0;
1499 ctrl |= E1000_CTRL_SWDPIO0;
1500 ew32(CTRL, ctrl);
1501 break;
1502 case e1000_media_type_copper:
1503 ew32(LEDCTL, hw->mac.ledctl_mode1);
1504 break;
1505 default:
1506 break;
1507 }
1508
1509 return 0;
1510 }
1511
1512 /**
1513 * e1000e_set_pcie_no_snoop - Set PCI-express capabilities
1514 * @hw: pointer to the HW structure
1515 * @no_snoop: bitmap of snoop events
1516 *
1517 * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
1518 **/
1519 void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
1520 {
1521 u32 gcr;
1522
1523 if (no_snoop) {
1524 gcr = er32(GCR);
1525 gcr &= ~(PCIE_NO_SNOOP_ALL);
1526 gcr |= no_snoop;
1527 ew32(GCR, gcr);
1528 }
1529 }
1530
1531 /**
1532 * e1000e_disable_pcie_master - Disables PCI-express master access
1533 * @hw: pointer to the HW structure
1534 *
1535 * Returns 0 if successful, else returns -10
1536 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
1537 * the master requests to be disabled.
1538 *
1539 * Disables PCI-Express master access and verifies there are no pending
1540 * requests.
1541 **/
1542 s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
1543 {
1544 u32 ctrl;
1545 s32 timeout = MASTER_DISABLE_TIMEOUT;
1546
1547 ctrl = er32(CTRL);
1548 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1549 ew32(CTRL, ctrl);
1550
1551 while (timeout) {
1552 if (!(er32(STATUS) &
1553 E1000_STATUS_GIO_MASTER_ENABLE))
1554 break;
1555 udelay(100);
1556 timeout--;
1557 }
1558
1559 if (!timeout) {
1560 hw_dbg(hw, "Master requests are pending.\n");
1561 return -E1000_ERR_MASTER_REQUESTS_PENDING;
1562 }
1563
1564 return 0;
1565 }
1566
1567 /**
1568 * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing
1569 * @hw: pointer to the HW structure
1570 *
1571 * Reset the Adaptive Interframe Spacing throttle to default values.
1572 **/
1573 void e1000e_reset_adaptive(struct e1000_hw *hw)
1574 {
1575 struct e1000_mac_info *mac = &hw->mac;
1576
1577 mac->current_ifs_val = 0;
1578 mac->ifs_min_val = IFS_MIN;
1579 mac->ifs_max_val = IFS_MAX;
1580 mac->ifs_step_size = IFS_STEP;
1581 mac->ifs_ratio = IFS_RATIO;
1582
1583 mac->in_ifs_mode = 0;
1584 ew32(AIT, 0);
1585 }
1586
1587 /**
1588 * e1000e_update_adaptive - Update Adaptive Interframe Spacing
1589 * @hw: pointer to the HW structure
1590 *
1591 * Update the Adaptive Interframe Spacing Throttle value based on the
1592 * time between transmitted packets and time between collisions.
1593 **/
1594 void e1000e_update_adaptive(struct e1000_hw *hw)
1595 {
1596 struct e1000_mac_info *mac = &hw->mac;
1597
1598 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1599 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1600 mac->in_ifs_mode = 1;
1601 if (mac->current_ifs_val < mac->ifs_max_val) {
1602 if (!mac->current_ifs_val)
1603 mac->current_ifs_val = mac->ifs_min_val;
1604 else
1605 mac->current_ifs_val +=
1606 mac->ifs_step_size;
1607 ew32(AIT, mac->current_ifs_val);
1608 }
1609 }
1610 } else {
1611 if (mac->in_ifs_mode &&
1612 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1613 mac->current_ifs_val = 0;
1614 mac->in_ifs_mode = 0;
1615 ew32(AIT, 0);
1616 }
1617 }
1618 }
1619
1620 /**
1621 * e1000_raise_eec_clk - Raise EEPROM clock
1622 * @hw: pointer to the HW structure
1623 * @eecd: pointer to the EEPROM
1624 *
1625 * Enable/Raise the EEPROM clock bit.
1626 **/
1627 static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
1628 {
1629 *eecd = *eecd | E1000_EECD_SK;
1630 ew32(EECD, *eecd);
1631 e1e_flush();
1632 udelay(hw->nvm.delay_usec);
1633 }
1634
1635 /**
1636 * e1000_lower_eec_clk - Lower EEPROM clock
1637 * @hw: pointer to the HW structure
1638 * @eecd: pointer to the EEPROM
1639 *
1640 * Clear/Lower the EEPROM clock bit.
1641 **/
1642 static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
1643 {
1644 *eecd = *eecd & ~E1000_EECD_SK;
1645 ew32(EECD, *eecd);
1646 e1e_flush();
1647 udelay(hw->nvm.delay_usec);
1648 }
1649
1650 /**
1651 * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
1652 * @hw: pointer to the HW structure
1653 * @data: data to send to the EEPROM
1654 * @count: number of bits to shift out
1655 *
1656 * We need to shift 'count' bits out to the EEPROM. So, the value in the
1657 * "data" parameter will be shifted out to the EEPROM one bit at a time.
1658 * In order to do this, "data" must be broken down into bits.
1659 **/
1660 static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
1661 {
1662 struct e1000_nvm_info *nvm = &hw->nvm;
1663 u32 eecd = er32(EECD);
1664 u32 mask;
1665
1666 mask = 0x01 << (count - 1);
1667 if (nvm->type == e1000_nvm_eeprom_spi)
1668 eecd |= E1000_EECD_DO;
1669
1670 do {
1671 eecd &= ~E1000_EECD_DI;
1672
1673 if (data & mask)
1674 eecd |= E1000_EECD_DI;
1675
1676 ew32(EECD, eecd);
1677 e1e_flush();
1678
1679 udelay(nvm->delay_usec);
1680
1681 e1000_raise_eec_clk(hw, &eecd);
1682 e1000_lower_eec_clk(hw, &eecd);
1683
1684 mask >>= 1;
1685 } while (mask);
1686
1687 eecd &= ~E1000_EECD_DI;
1688 ew32(EECD, eecd);
1689 }
1690
1691 /**
1692 * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
1693 * @hw: pointer to the HW structure
1694 * @count: number of bits to shift in
1695 *
1696 * In order to read a register from the EEPROM, we need to shift 'count' bits
1697 * in from the EEPROM. Bits are "shifted in" by raising the clock input to
1698 * the EEPROM (setting the SK bit), and then reading the value of the data out
1699 * "DO" bit. During this "shifting in" process the data in "DI" bit should
1700 * always be clear.
1701 **/
1702 static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
1703 {
1704 u32 eecd;
1705 u32 i;
1706 u16 data;
1707
1708 eecd = er32(EECD);
1709
1710 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
1711 data = 0;
1712
1713 for (i = 0; i < count; i++) {
1714 data <<= 1;
1715 e1000_raise_eec_clk(hw, &eecd);
1716
1717 eecd = er32(EECD);
1718
1719 eecd &= ~E1000_EECD_DI;
1720 if (eecd & E1000_EECD_DO)
1721 data |= 1;
1722
1723 e1000_lower_eec_clk(hw, &eecd);
1724 }
1725
1726 return data;
1727 }
1728
1729 /**
1730 * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
1731 * @hw: pointer to the HW structure
1732 * @ee_reg: EEPROM flag for polling
1733 *
1734 * Polls the EEPROM status bit for either read or write completion based
1735 * upon the value of 'ee_reg'.
1736 **/
1737 s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
1738 {
1739 u32 attempts = 100000;
1740 u32 i, reg = 0;
1741
1742 for (i = 0; i < attempts; i++) {
1743 if (ee_reg == E1000_NVM_POLL_READ)
1744 reg = er32(EERD);
1745 else
1746 reg = er32(EEWR);
1747
1748 if (reg & E1000_NVM_RW_REG_DONE)
1749 return 0;
1750
1751 udelay(5);
1752 }
1753
1754 return -E1000_ERR_NVM;
1755 }
1756
1757 /**
1758 * e1000e_acquire_nvm - Generic request for access to EEPROM
1759 * @hw: pointer to the HW structure
1760 *
1761 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
1762 * Return successful if access grant bit set, else clear the request for
1763 * EEPROM access and return -E1000_ERR_NVM (-1).
1764 **/
1765 s32 e1000e_acquire_nvm(struct e1000_hw *hw)
1766 {
1767 u32 eecd = er32(EECD);
1768 s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
1769
1770 ew32(EECD, eecd | E1000_EECD_REQ);
1771 eecd = er32(EECD);
1772
1773 while (timeout) {
1774 if (eecd & E1000_EECD_GNT)
1775 break;
1776 udelay(5);
1777 eecd = er32(EECD);
1778 timeout--;
1779 }
1780
1781 if (!timeout) {
1782 eecd &= ~E1000_EECD_REQ;
1783 ew32(EECD, eecd);
1784 hw_dbg(hw, "Could not acquire NVM grant\n");
1785 return -E1000_ERR_NVM;
1786 }
1787
1788 return 0;
1789 }
1790
1791 /**
1792 * e1000_standby_nvm - Return EEPROM to standby state
1793 * @hw: pointer to the HW structure
1794 *
1795 * Return the EEPROM to a standby state.
1796 **/
1797 static void e1000_standby_nvm(struct e1000_hw *hw)
1798 {
1799 struct e1000_nvm_info *nvm = &hw->nvm;
1800 u32 eecd = er32(EECD);
1801
1802 if (nvm->type == e1000_nvm_eeprom_spi) {
1803 /* Toggle CS to flush commands */
1804 eecd |= E1000_EECD_CS;
1805 ew32(EECD, eecd);
1806 e1e_flush();
1807 udelay(nvm->delay_usec);
1808 eecd &= ~E1000_EECD_CS;
1809 ew32(EECD, eecd);
1810 e1e_flush();
1811 udelay(nvm->delay_usec);
1812 }
1813 }
1814
1815 /**
1816 * e1000_stop_nvm - Terminate EEPROM command
1817 * @hw: pointer to the HW structure
1818 *
1819 * Terminates the current command by inverting the EEPROM's chip select pin.
1820 **/
1821 static void e1000_stop_nvm(struct e1000_hw *hw)
1822 {
1823 u32 eecd;
1824
1825 eecd = er32(EECD);
1826 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
1827 /* Pull CS high */
1828 eecd |= E1000_EECD_CS;
1829 e1000_lower_eec_clk(hw, &eecd);
1830 }
1831 }
1832
1833 /**
1834 * e1000e_release_nvm - Release exclusive access to EEPROM
1835 * @hw: pointer to the HW structure
1836 *
1837 * Stop any current commands to the EEPROM and clear the EEPROM request bit.
1838 **/
1839 void e1000e_release_nvm(struct e1000_hw *hw)
1840 {
1841 u32 eecd;
1842
1843 e1000_stop_nvm(hw);
1844
1845 eecd = er32(EECD);
1846 eecd &= ~E1000_EECD_REQ;
1847 ew32(EECD, eecd);
1848 }
1849
1850 /**
1851 * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
1852 * @hw: pointer to the HW structure
1853 *
1854 * Setups the EEPROM for reading and writing.
1855 **/
1856 static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1857 {
1858 struct e1000_nvm_info *nvm = &hw->nvm;
1859 u32 eecd = er32(EECD);
1860 u16 timeout = 0;
1861 u8 spi_stat_reg;
1862
1863 if (nvm->type == e1000_nvm_eeprom_spi) {
1864 /* Clear SK and CS */
1865 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
1866 ew32(EECD, eecd);
1867 udelay(1);
1868 timeout = NVM_MAX_RETRY_SPI;
1869
1870 /*
1871 * Read "Status Register" repeatedly until the LSB is cleared.
1872 * The EEPROM will signal that the command has been completed
1873 * by clearing bit 0 of the internal status register. If it's
1874 * not cleared within 'timeout', then error out.
1875 */
1876 while (timeout) {
1877 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
1878 hw->nvm.opcode_bits);
1879 spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
1880 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
1881 break;
1882
1883 udelay(5);
1884 e1000_standby_nvm(hw);
1885 timeout--;
1886 }
1887
1888 if (!timeout) {
1889 hw_dbg(hw, "SPI NVM Status error\n");
1890 return -E1000_ERR_NVM;
1891 }
1892 }
1893
1894 return 0;
1895 }
1896
1897 /**
1898 * e1000e_read_nvm_eerd - Reads EEPROM using EERD register
1899 * @hw: pointer to the HW structure
1900 * @offset: offset of word in the EEPROM to read
1901 * @words: number of words to read
1902 * @data: word read from the EEPROM
1903 *
1904 * Reads a 16 bit word from the EEPROM using the EERD register.
1905 **/
1906 s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1907 {
1908 struct e1000_nvm_info *nvm = &hw->nvm;
1909 u32 i, eerd = 0;
1910 s32 ret_val = 0;
1911
1912 /*
1913 * A check for invalid values: offset too large, too many words,
1914 * too many words for the offset, and not enough words.
1915 */
1916 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1917 (words == 0)) {
1918 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1919 return -E1000_ERR_NVM;
1920 }
1921
1922 for (i = 0; i < words; i++) {
1923 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
1924 E1000_NVM_RW_REG_START;
1925
1926 ew32(EERD, eerd);
1927 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
1928 if (ret_val)
1929 break;
1930
1931 data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
1932 }
1933
1934 return ret_val;
1935 }
1936
1937 /**
1938 * e1000e_write_nvm_spi - Write to EEPROM using SPI
1939 * @hw: pointer to the HW structure
1940 * @offset: offset within the EEPROM to be written to
1941 * @words: number of words to write
1942 * @data: 16 bit word(s) to be written to the EEPROM
1943 *
1944 * Writes data to EEPROM at offset using SPI interface.
1945 *
1946 * If e1000e_update_nvm_checksum is not called after this function , the
1947 * EEPROM will most likely contain an invalid checksum.
1948 **/
1949 s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1950 {
1951 struct e1000_nvm_info *nvm = &hw->nvm;
1952 s32 ret_val;
1953 u16 widx = 0;
1954
1955 /*
1956 * A check for invalid values: offset too large, too many words,
1957 * and not enough words.
1958 */
1959 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1960 (words == 0)) {
1961 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1962 return -E1000_ERR_NVM;
1963 }
1964
1965 ret_val = nvm->ops.acquire_nvm(hw);
1966 if (ret_val)
1967 return ret_val;
1968
1969 msleep(10);
1970
1971 while (widx < words) {
1972 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
1973
1974 ret_val = e1000_ready_nvm_eeprom(hw);
1975 if (ret_val) {
1976 nvm->ops.release_nvm(hw);
1977 return ret_val;
1978 }
1979
1980 e1000_standby_nvm(hw);
1981
1982 /* Send the WRITE ENABLE command (8 bit opcode) */
1983 e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
1984 nvm->opcode_bits);
1985
1986 e1000_standby_nvm(hw);
1987
1988 /*
1989 * Some SPI eeproms use the 8th address bit embedded in the
1990 * opcode
1991 */
1992 if ((nvm->address_bits == 8) && (offset >= 128))
1993 write_opcode |= NVM_A8_OPCODE_SPI;
1994
1995 /* Send the Write command (8-bit opcode + addr) */
1996 e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
1997 e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
1998 nvm->address_bits);
1999
2000 /* Loop to allow for up to whole page write of eeprom */
2001 while (widx < words) {
2002 u16 word_out = data[widx];
2003 word_out = (word_out >> 8) | (word_out << 8);
2004 e1000_shift_out_eec_bits(hw, word_out, 16);
2005 widx++;
2006
2007 if ((((offset + widx) * 2) % nvm->page_size) == 0) {
2008 e1000_standby_nvm(hw);
2009 break;
2010 }
2011 }
2012 }
2013
2014 msleep(10);
2015 return 0;
2016 }
2017
2018 /**
2019 * e1000e_read_mac_addr - Read device MAC address
2020 * @hw: pointer to the HW structure
2021 *
2022 * Reads the device MAC address from the EEPROM and stores the value.
2023 * Since devices with two ports use the same EEPROM, we increment the
2024 * last bit in the MAC address for the second port.
2025 **/
2026 s32 e1000e_read_mac_addr(struct e1000_hw *hw)
2027 {
2028 s32 ret_val;
2029 u16 offset, nvm_data, i;
2030 u16 mac_addr_offset = 0;
2031
2032 if (hw->mac.type == e1000_82571) {
2033 /* Check for an alternate MAC address. An alternate MAC
2034 * address can be setup by pre-boot software and must be
2035 * treated like a permanent address and must override the
2036 * actual permanent MAC address.*/
2037 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
2038 &mac_addr_offset);
2039 if (ret_val) {
2040 hw_dbg(hw, "NVM Read Error\n");
2041 return ret_val;
2042 }
2043 if (mac_addr_offset == 0xFFFF)
2044 mac_addr_offset = 0;
2045
2046 if (mac_addr_offset) {
2047 if (hw->bus.func == E1000_FUNC_1)
2048 mac_addr_offset += ETH_ALEN/sizeof(u16);
2049
2050 /* make sure we have a valid mac address here
2051 * before using it */
2052 ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
2053 &nvm_data);
2054 if (ret_val) {
2055 hw_dbg(hw, "NVM Read Error\n");
2056 return ret_val;
2057 }
2058 if (nvm_data & 0x0001)
2059 mac_addr_offset = 0;
2060 }
2061
2062 if (mac_addr_offset)
2063 hw->dev_spec.e82571.alt_mac_addr_is_present = 1;
2064 }
2065
2066 for (i = 0; i < ETH_ALEN; i += 2) {
2067 offset = mac_addr_offset + (i >> 1);
2068 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
2069 if (ret_val) {
2070 hw_dbg(hw, "NVM Read Error\n");
2071 return ret_val;
2072 }
2073 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
2074 hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
2075 }
2076
2077 /* Flip last bit of mac address if we're on second port */
2078 if (!mac_addr_offset && hw->bus.func == E1000_FUNC_1)
2079 hw->mac.perm_addr[5] ^= 1;
2080
2081 for (i = 0; i < ETH_ALEN; i++)
2082 hw->mac.addr[i] = hw->mac.perm_addr[i];
2083
2084 return 0;
2085 }
2086
2087 /**
2088 * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
2089 * @hw: pointer to the HW structure
2090 *
2091 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2092 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
2093 **/
2094 s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
2095 {
2096 s32 ret_val;
2097 u16 checksum = 0;
2098 u16 i, nvm_data;
2099
2100 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
2101 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2102 if (ret_val) {
2103 hw_dbg(hw, "NVM Read Error\n");
2104 return ret_val;
2105 }
2106 checksum += nvm_data;
2107 }
2108
2109 if (checksum != (u16) NVM_SUM) {
2110 hw_dbg(hw, "NVM Checksum Invalid\n");
2111 return -E1000_ERR_NVM;
2112 }
2113
2114 return 0;
2115 }
2116
2117 /**
2118 * e1000e_update_nvm_checksum_generic - Update EEPROM checksum
2119 * @hw: pointer to the HW structure
2120 *
2121 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
2122 * up to the checksum. Then calculates the EEPROM checksum and writes the
2123 * value to the EEPROM.
2124 **/
2125 s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
2126 {
2127 s32 ret_val;
2128 u16 checksum = 0;
2129 u16 i, nvm_data;
2130
2131 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
2132 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2133 if (ret_val) {
2134 hw_dbg(hw, "NVM Read Error while updating checksum.\n");
2135 return ret_val;
2136 }
2137 checksum += nvm_data;
2138 }
2139 checksum = (u16) NVM_SUM - checksum;
2140 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
2141 if (ret_val)
2142 hw_dbg(hw, "NVM Write Error while updating checksum.\n");
2143
2144 return ret_val;
2145 }
2146
2147 /**
2148 * e1000e_reload_nvm - Reloads EEPROM
2149 * @hw: pointer to the HW structure
2150 *
2151 * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
2152 * extended control register.
2153 **/
2154 void e1000e_reload_nvm(struct e1000_hw *hw)
2155 {
2156 u32 ctrl_ext;
2157
2158 udelay(10);
2159 ctrl_ext = er32(CTRL_EXT);
2160 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
2161 ew32(CTRL_EXT, ctrl_ext);
2162 e1e_flush();
2163 }
2164
2165 /**
2166 * e1000_calculate_checksum - Calculate checksum for buffer
2167 * @buffer: pointer to EEPROM
2168 * @length: size of EEPROM to calculate a checksum for
2169 *
2170 * Calculates the checksum for some buffer on a specified length. The
2171 * checksum calculated is returned.
2172 **/
2173 static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
2174 {
2175 u32 i;
2176 u8 sum = 0;
2177
2178 if (!buffer)
2179 return 0;
2180
2181 for (i = 0; i < length; i++)
2182 sum += buffer[i];
2183
2184 return (u8) (0 - sum);
2185 }
2186
2187 /**
2188 * e1000_mng_enable_host_if - Checks host interface is enabled
2189 * @hw: pointer to the HW structure
2190 *
2191 * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
2192 *
2193 * This function checks whether the HOST IF is enabled for command operation
2194 * and also checks whether the previous command is completed. It busy waits
2195 * in case of previous command is not completed.
2196 **/
2197 static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2198 {
2199 u32 hicr;
2200 u8 i;
2201
2202 /* Check that the host interface is enabled. */
2203 hicr = er32(HICR);
2204 if ((hicr & E1000_HICR_EN) == 0) {
2205 hw_dbg(hw, "E1000_HOST_EN bit disabled.\n");
2206 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2207 }
2208 /* check the previous command is completed */
2209 for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
2210 hicr = er32(HICR);
2211 if (!(hicr & E1000_HICR_C))
2212 break;
2213 mdelay(1);
2214 }
2215
2216 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
2217 hw_dbg(hw, "Previous command timeout failed .\n");
2218 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2219 }
2220
2221 return 0;
2222 }
2223
2224 /**
2225 * e1000e_check_mng_mode_generic - check management mode
2226 * @hw: pointer to the HW structure
2227 *
2228 * Reads the firmware semaphore register and returns true (>0) if
2229 * manageability is enabled, else false (0).
2230 **/
2231 bool e1000e_check_mng_mode_generic(struct e1000_hw *hw)
2232 {
2233 u32 fwsm = er32(FWSM);
2234
2235 return (fwsm & E1000_FWSM_MODE_MASK) ==
2236 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
2237 }
2238
2239 /**
2240 * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx
2241 * @hw: pointer to the HW structure
2242 *
2243 * Enables packet filtering on transmit packets if manageability is enabled
2244 * and host interface is enabled.
2245 **/
2246 bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2247 {
2248 struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
2249 u32 *buffer = (u32 *)&hw->mng_cookie;
2250 u32 offset;
2251 s32 ret_val, hdr_csum, csum;
2252 u8 i, len;
2253
2254 /* No manageability, no filtering */
2255 if (!e1000e_check_mng_mode(hw)) {
2256 hw->mac.tx_pkt_filtering = 0;
2257 return 0;
2258 }
2259
2260 /*
2261 * If we can't read from the host interface for whatever
2262 * reason, disable filtering.
2263 */
2264 ret_val = e1000_mng_enable_host_if(hw);
2265 if (ret_val != 0) {
2266 hw->mac.tx_pkt_filtering = 0;
2267 return ret_val;
2268 }
2269
2270 /* Read in the header. Length and offset are in dwords. */
2271 len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
2272 offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
2273 for (i = 0; i < len; i++)
2274 *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i);
2275 hdr_csum = hdr->checksum;
2276 hdr->checksum = 0;
2277 csum = e1000_calculate_checksum((u8 *)hdr,
2278 E1000_MNG_DHCP_COOKIE_LENGTH);
2279 /*
2280 * If either the checksums or signature don't match, then
2281 * the cookie area isn't considered valid, in which case we
2282 * take the safe route of assuming Tx filtering is enabled.
2283 */
2284 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
2285 hw->mac.tx_pkt_filtering = 1;
2286 return 1;
2287 }
2288
2289 /* Cookie area is valid, make the final check for filtering. */
2290 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
2291 hw->mac.tx_pkt_filtering = 0;
2292 return 0;
2293 }
2294
2295 hw->mac.tx_pkt_filtering = 1;
2296 return 1;
2297 }
2298
2299 /**
2300 * e1000_mng_write_cmd_header - Writes manageability command header
2301 * @hw: pointer to the HW structure
2302 * @hdr: pointer to the host interface command header
2303 *
2304 * Writes the command header after does the checksum calculation.
2305 **/
2306 static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
2307 struct e1000_host_mng_command_header *hdr)
2308 {
2309 u16 i, length = sizeof(struct e1000_host_mng_command_header);
2310
2311 /* Write the whole command header structure with new checksum. */
2312
2313 hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
2314
2315 length >>= 2;
2316 /* Write the relevant command block into the ram area. */
2317 for (i = 0; i < length; i++) {
2318 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i,
2319 *((u32 *) hdr + i));
2320 e1e_flush();
2321 }
2322
2323 return 0;
2324 }
2325
2326 /**
2327 * e1000_mng_host_if_write - Writes to the manageability host interface
2328 * @hw: pointer to the HW structure
2329 * @buffer: pointer to the host interface buffer
2330 * @length: size of the buffer
2331 * @offset: location in the buffer to write to
2332 * @sum: sum of the data (not checksum)
2333 *
2334 * This function writes the buffer content at the offset given on the host if.
2335 * It also does alignment considerations to do the writes in most efficient
2336 * way. Also fills up the sum of the buffer in *buffer parameter.
2337 **/
2338 static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
2339 u16 length, u16 offset, u8 *sum)
2340 {
2341 u8 *tmp;
2342 u8 *bufptr = buffer;
2343 u32 data = 0;
2344 u16 remaining, i, j, prev_bytes;
2345
2346 /* sum = only sum of the data and it is not checksum */
2347
2348 if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
2349 return -E1000_ERR_PARAM;
2350
2351 tmp = (u8 *)&data;
2352 prev_bytes = offset & 0x3;
2353 offset >>= 2;
2354
2355 if (prev_bytes) {
2356 data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
2357 for (j = prev_bytes; j < sizeof(u32); j++) {
2358 *(tmp + j) = *bufptr++;
2359 *sum += *(tmp + j);
2360 }
2361 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
2362 length -= j - prev_bytes;
2363 offset++;
2364 }
2365
2366 remaining = length & 0x3;
2367 length -= remaining;
2368
2369 /* Calculate length in DWORDs */
2370 length >>= 2;
2371
2372 /*
2373 * The device driver writes the relevant command block into the
2374 * ram area.
2375 */
2376 for (i = 0; i < length; i++) {
2377 for (j = 0; j < sizeof(u32); j++) {
2378 *(tmp + j) = *bufptr++;
2379 *sum += *(tmp + j);
2380 }
2381
2382 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2383 }
2384 if (remaining) {
2385 for (j = 0; j < sizeof(u32); j++) {
2386 if (j < remaining)
2387 *(tmp + j) = *bufptr++;
2388 else
2389 *(tmp + j) = 0;
2390
2391 *sum += *(tmp + j);
2392 }
2393 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2394 }
2395
2396 return 0;
2397 }
2398
2399 /**
2400 * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
2401 * @hw: pointer to the HW structure
2402 * @buffer: pointer to the host interface
2403 * @length: size of the buffer
2404 *
2405 * Writes the DHCP information to the host interface.
2406 **/
2407 s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
2408 {
2409 struct e1000_host_mng_command_header hdr;
2410 s32 ret_val;
2411 u32 hicr;
2412
2413 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
2414 hdr.command_length = length;
2415 hdr.reserved1 = 0;
2416 hdr.reserved2 = 0;
2417 hdr.checksum = 0;
2418
2419 /* Enable the host interface */
2420 ret_val = e1000_mng_enable_host_if(hw);
2421 if (ret_val)
2422 return ret_val;
2423
2424 /* Populate the host interface with the contents of "buffer". */
2425 ret_val = e1000_mng_host_if_write(hw, buffer, length,
2426 sizeof(hdr), &(hdr.checksum));
2427 if (ret_val)
2428 return ret_val;
2429
2430 /* Write the manageability command header */
2431 ret_val = e1000_mng_write_cmd_header(hw, &hdr);
2432 if (ret_val)
2433 return ret_val;
2434
2435 /* Tell the ARC a new command is pending. */
2436 hicr = er32(HICR);
2437 ew32(HICR, hicr | E1000_HICR_C);
2438
2439 return 0;
2440 }
2441
2442 /**
2443 * e1000e_enable_mng_pass_thru - Enable processing of ARP's
2444 * @hw: pointer to the HW structure
2445 *
2446 * Verifies the hardware needs to allow ARPs to be processed by the host.
2447 **/
2448 bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2449 {
2450 u32 manc;
2451 u32 fwsm, factps;
2452 bool ret_val = 0;
2453
2454 manc = er32(MANC);
2455
2456 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
2457 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
2458 return ret_val;
2459
2460 if (hw->mac.arc_subsystem_valid) {
2461 fwsm = er32(FWSM);
2462 factps = er32(FACTPS);
2463
2464 if (!(factps & E1000_FACTPS_MNGCG) &&
2465 ((fwsm & E1000_FWSM_MODE_MASK) ==
2466 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
2467 ret_val = 1;
2468 return ret_val;
2469 }
2470 } else {
2471 if ((manc & E1000_MANC_SMBUS_EN) &&
2472 !(manc & E1000_MANC_ASF_EN)) {
2473 ret_val = 1;
2474 return ret_val;
2475 }
2476 }
2477
2478 return ret_val;
2479 }
2480
2481 s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
2482 {
2483 s32 ret_val;
2484 u16 nvm_data;
2485
2486 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
2487 if (ret_val) {
2488 hw_dbg(hw, "NVM Read Error\n");
2489 return ret_val;
2490 }
2491 *pba_num = (u32)(nvm_data << 16);
2492
2493 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
2494 if (ret_val) {
2495 hw_dbg(hw, "NVM Read Error\n");
2496 return ret_val;
2497 }
2498 *pba_num |= nvm_data;
2499
2500 return 0;
2501 }
This page took 0.150016 seconds and 5 git commands to generate.