igb: Cleanups for messaging
[deliverable/linux.git] / drivers / net / ethernet / intel / igb / e1000_mac.c
1 /*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2014 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
17
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
20
21 Contact Information:
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25 *******************************************************************************/
26
27 #include <linux/if_ether.h>
28 #include <linux/delay.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32
33 #include "e1000_mac.h"
34
35 #include "igb.h"
36
37 static s32 igb_set_default_fc(struct e1000_hw *hw);
38 static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
39
40 /**
41 * igb_get_bus_info_pcie - Get PCIe bus information
42 * @hw: pointer to the HW structure
43 *
44 * Determines and stores the system bus information for a particular
45 * network interface. The following bus information is determined and stored:
46 * bus speed, bus width, type (PCIe), and PCIe function.
47 **/
48 s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
49 {
50 struct e1000_bus_info *bus = &hw->bus;
51 s32 ret_val;
52 u32 reg;
53 u16 pcie_link_status;
54
55 bus->type = e1000_bus_type_pci_express;
56
57 ret_val = igb_read_pcie_cap_reg(hw,
58 PCI_EXP_LNKSTA,
59 &pcie_link_status);
60 if (ret_val) {
61 bus->width = e1000_bus_width_unknown;
62 bus->speed = e1000_bus_speed_unknown;
63 } else {
64 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
65 case PCI_EXP_LNKSTA_CLS_2_5GB:
66 bus->speed = e1000_bus_speed_2500;
67 break;
68 case PCI_EXP_LNKSTA_CLS_5_0GB:
69 bus->speed = e1000_bus_speed_5000;
70 break;
71 default:
72 bus->speed = e1000_bus_speed_unknown;
73 break;
74 }
75
76 bus->width = (enum e1000_bus_width)((pcie_link_status &
77 PCI_EXP_LNKSTA_NLW) >>
78 PCI_EXP_LNKSTA_NLW_SHIFT);
79 }
80
81 reg = rd32(E1000_STATUS);
82 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
83
84 return 0;
85 }
86
87 /**
88 * igb_clear_vfta - Clear VLAN filter table
89 * @hw: pointer to the HW structure
90 *
91 * Clears the register array which contains the VLAN filter table by
92 * setting all the values to 0.
93 **/
94 void igb_clear_vfta(struct e1000_hw *hw)
95 {
96 u32 offset;
97
98 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
99 array_wr32(E1000_VFTA, offset, 0);
100 wrfl();
101 }
102 }
103
104 /**
105 * igb_write_vfta - Write value to VLAN filter table
106 * @hw: pointer to the HW structure
107 * @offset: register offset in VLAN filter table
108 * @value: register value written to VLAN filter table
109 *
110 * Writes value at the given offset in the register array which stores
111 * the VLAN filter table.
112 **/
113 static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
114 {
115 array_wr32(E1000_VFTA, offset, value);
116 wrfl();
117 }
118
119 /* Due to a hw errata, if the host tries to configure the VFTA register
120 * while performing queries from the BMC or DMA, then the VFTA in some
121 * cases won't be written.
122 */
123
124 /**
125 * igb_clear_vfta_i350 - Clear VLAN filter table
126 * @hw: pointer to the HW structure
127 *
128 * Clears the register array which contains the VLAN filter table by
129 * setting all the values to 0.
130 **/
131 void igb_clear_vfta_i350(struct e1000_hw *hw)
132 {
133 u32 offset;
134 int i;
135
136 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
137 for (i = 0; i < 10; i++)
138 array_wr32(E1000_VFTA, offset, 0);
139
140 wrfl();
141 }
142 }
143
144 /**
145 * igb_write_vfta_i350 - Write value to VLAN filter table
146 * @hw: pointer to the HW structure
147 * @offset: register offset in VLAN filter table
148 * @value: register value written to VLAN filter table
149 *
150 * Writes value at the given offset in the register array which stores
151 * the VLAN filter table.
152 **/
153 static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
154 {
155 int i;
156
157 for (i = 0; i < 10; i++)
158 array_wr32(E1000_VFTA, offset, value);
159
160 wrfl();
161 }
162
163 /**
164 * igb_init_rx_addrs - Initialize receive address's
165 * @hw: pointer to the HW structure
166 * @rar_count: receive address registers
167 *
168 * Setups the receive address registers by setting the base receive address
169 * register to the devices MAC address and clearing all the other receive
170 * address registers to 0.
171 **/
172 void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
173 {
174 u32 i;
175 u8 mac_addr[ETH_ALEN] = {0};
176
177 /* Setup the receive address */
178 hw_dbg("Programming MAC Address into RAR[0]\n");
179
180 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
181
182 /* Zero out the other (rar_entry_count - 1) receive addresses */
183 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
184 for (i = 1; i < rar_count; i++)
185 hw->mac.ops.rar_set(hw, mac_addr, i);
186 }
187
188 /**
189 * igb_vfta_set - enable or disable vlan in VLAN filter table
190 * @hw: pointer to the HW structure
191 * @vid: VLAN id to add or remove
192 * @add: if true add filter, if false remove
193 *
194 * Sets or clears a bit in the VLAN filter table array based on VLAN id
195 * and if we are adding or removing the filter
196 **/
197 s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
198 {
199 u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
200 u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
201 u32 vfta;
202 struct igb_adapter *adapter = hw->back;
203 s32 ret_val = 0;
204
205 vfta = adapter->shadow_vfta[index];
206
207 /* bit was set/cleared before we started */
208 if ((!!(vfta & mask)) == add) {
209 ret_val = -E1000_ERR_CONFIG;
210 } else {
211 if (add)
212 vfta |= mask;
213 else
214 vfta &= ~mask;
215 }
216 if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
217 igb_write_vfta_i350(hw, index, vfta);
218 else
219 igb_write_vfta(hw, index, vfta);
220 adapter->shadow_vfta[index] = vfta;
221
222 return ret_val;
223 }
224
225 /**
226 * igb_check_alt_mac_addr - Check for alternate MAC addr
227 * @hw: pointer to the HW structure
228 *
229 * Checks the nvm for an alternate MAC address. An alternate MAC address
230 * can be setup by pre-boot software and must be treated like a permanent
231 * address and must override the actual permanent MAC address. If an
232 * alternate MAC address is found it is saved in the hw struct and
233 * programmed into RAR0 and the function returns success, otherwise the
234 * function returns an error.
235 **/
236 s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
237 {
238 u32 i;
239 s32 ret_val = 0;
240 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
241 u8 alt_mac_addr[ETH_ALEN];
242
243 /* Alternate MAC address is handled by the option ROM for 82580
244 * and newer. SW support not required.
245 */
246 if (hw->mac.type >= e1000_82580)
247 goto out;
248
249 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
250 &nvm_alt_mac_addr_offset);
251 if (ret_val) {
252 hw_dbg("NVM Read Error\n");
253 goto out;
254 }
255
256 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
257 (nvm_alt_mac_addr_offset == 0x0000))
258 /* There is no Alternate MAC Address */
259 goto out;
260
261 if (hw->bus.func == E1000_FUNC_1)
262 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
263 if (hw->bus.func == E1000_FUNC_2)
264 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
265
266 if (hw->bus.func == E1000_FUNC_3)
267 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
268 for (i = 0; i < ETH_ALEN; i += 2) {
269 offset = nvm_alt_mac_addr_offset + (i >> 1);
270 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
271 if (ret_val) {
272 hw_dbg("NVM Read Error\n");
273 goto out;
274 }
275
276 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
277 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
278 }
279
280 /* if multicast bit is set, the alternate address will not be used */
281 if (is_multicast_ether_addr(alt_mac_addr)) {
282 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
283 goto out;
284 }
285
286 /* We have a valid alternate MAC address, and we want to treat it the
287 * same as the normal permanent MAC address stored by the HW into the
288 * RAR. Do this by mapping this address into RAR0.
289 */
290 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
291
292 out:
293 return ret_val;
294 }
295
296 /**
297 * igb_rar_set - Set receive address register
298 * @hw: pointer to the HW structure
299 * @addr: pointer to the receive address
300 * @index: receive address array register
301 *
302 * Sets the receive address array register at index to the address passed
303 * in by addr.
304 **/
305 void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
306 {
307 u32 rar_low, rar_high;
308
309 /* HW expects these in little endian so we reverse the byte order
310 * from network order (big endian) to little endian
311 */
312 rar_low = ((u32) addr[0] |
313 ((u32) addr[1] << 8) |
314 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
315
316 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
317
318 /* If MAC address zero, no need to set the AV bit */
319 if (rar_low || rar_high)
320 rar_high |= E1000_RAH_AV;
321
322 /* Some bridges will combine consecutive 32-bit writes into
323 * a single burst write, which will malfunction on some parts.
324 * The flushes avoid this.
325 */
326 wr32(E1000_RAL(index), rar_low);
327 wrfl();
328 wr32(E1000_RAH(index), rar_high);
329 wrfl();
330 }
331
332 /**
333 * igb_mta_set - Set multicast filter table address
334 * @hw: pointer to the HW structure
335 * @hash_value: determines the MTA register and bit to set
336 *
337 * The multicast table address is a register array of 32-bit registers.
338 * The hash_value is used to determine what register the bit is in, the
339 * current value is read, the new bit is OR'd in and the new value is
340 * written back into the register.
341 **/
342 void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
343 {
344 u32 hash_bit, hash_reg, mta;
345
346 /* The MTA is a register array of 32-bit registers. It is
347 * treated like an array of (32*mta_reg_count) bits. We want to
348 * set bit BitArray[hash_value]. So we figure out what register
349 * the bit is in, read it, OR in the new bit, then write
350 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
351 * mask to bits 31:5 of the hash value which gives us the
352 * register we're modifying. The hash bit within that register
353 * is determined by the lower 5 bits of the hash value.
354 */
355 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
356 hash_bit = hash_value & 0x1F;
357
358 mta = array_rd32(E1000_MTA, hash_reg);
359
360 mta |= (1 << hash_bit);
361
362 array_wr32(E1000_MTA, hash_reg, mta);
363 wrfl();
364 }
365
366 /**
367 * igb_hash_mc_addr - Generate a multicast hash value
368 * @hw: pointer to the HW structure
369 * @mc_addr: pointer to a multicast address
370 *
371 * Generates a multicast address hash value which is used to determine
372 * the multicast filter table array address and new table value. See
373 * igb_mta_set()
374 **/
375 static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
376 {
377 u32 hash_value, hash_mask;
378 u8 bit_shift = 0;
379
380 /* Register count multiplied by bits per register */
381 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
382
383 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
384 * where 0xFF would still fall within the hash mask.
385 */
386 while (hash_mask >> bit_shift != 0xFF)
387 bit_shift++;
388
389 /* The portion of the address that is used for the hash table
390 * is determined by the mc_filter_type setting.
391 * The algorithm is such that there is a total of 8 bits of shifting.
392 * The bit_shift for a mc_filter_type of 0 represents the number of
393 * left-shifts where the MSB of mc_addr[5] would still fall within
394 * the hash_mask. Case 0 does this exactly. Since there are a total
395 * of 8 bits of shifting, then mc_addr[4] will shift right the
396 * remaining number of bits. Thus 8 - bit_shift. The rest of the
397 * cases are a variation of this algorithm...essentially raising the
398 * number of bits to shift mc_addr[5] left, while still keeping the
399 * 8-bit shifting total.
400 *
401 * For example, given the following Destination MAC Address and an
402 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
403 * we can see that the bit_shift for case 0 is 4. These are the hash
404 * values resulting from each mc_filter_type...
405 * [0] [1] [2] [3] [4] [5]
406 * 01 AA 00 12 34 56
407 * LSB MSB
408 *
409 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
410 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
411 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
412 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
413 */
414 switch (hw->mac.mc_filter_type) {
415 default:
416 case 0:
417 break;
418 case 1:
419 bit_shift += 1;
420 break;
421 case 2:
422 bit_shift += 2;
423 break;
424 case 3:
425 bit_shift += 4;
426 break;
427 }
428
429 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
430 (((u16) mc_addr[5]) << bit_shift)));
431
432 return hash_value;
433 }
434
435 /**
436 * igb_update_mc_addr_list - Update Multicast addresses
437 * @hw: pointer to the HW structure
438 * @mc_addr_list: array of multicast addresses to program
439 * @mc_addr_count: number of multicast addresses to program
440 *
441 * Updates entire Multicast Table Array.
442 * The caller must have a packed mc_addr_list of multicast addresses.
443 **/
444 void igb_update_mc_addr_list(struct e1000_hw *hw,
445 u8 *mc_addr_list, u32 mc_addr_count)
446 {
447 u32 hash_value, hash_bit, hash_reg;
448 int i;
449
450 /* clear mta_shadow */
451 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
452
453 /* update mta_shadow from mc_addr_list */
454 for (i = 0; (u32) i < mc_addr_count; i++) {
455 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
456
457 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
458 hash_bit = hash_value & 0x1F;
459
460 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
461 mc_addr_list += (ETH_ALEN);
462 }
463
464 /* replace the entire MTA table */
465 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
466 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
467 wrfl();
468 }
469
470 /**
471 * igb_clear_hw_cntrs_base - Clear base hardware counters
472 * @hw: pointer to the HW structure
473 *
474 * Clears the base hardware counters by reading the counter registers.
475 **/
476 void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
477 {
478 rd32(E1000_CRCERRS);
479 rd32(E1000_SYMERRS);
480 rd32(E1000_MPC);
481 rd32(E1000_SCC);
482 rd32(E1000_ECOL);
483 rd32(E1000_MCC);
484 rd32(E1000_LATECOL);
485 rd32(E1000_COLC);
486 rd32(E1000_DC);
487 rd32(E1000_SEC);
488 rd32(E1000_RLEC);
489 rd32(E1000_XONRXC);
490 rd32(E1000_XONTXC);
491 rd32(E1000_XOFFRXC);
492 rd32(E1000_XOFFTXC);
493 rd32(E1000_FCRUC);
494 rd32(E1000_GPRC);
495 rd32(E1000_BPRC);
496 rd32(E1000_MPRC);
497 rd32(E1000_GPTC);
498 rd32(E1000_GORCL);
499 rd32(E1000_GORCH);
500 rd32(E1000_GOTCL);
501 rd32(E1000_GOTCH);
502 rd32(E1000_RNBC);
503 rd32(E1000_RUC);
504 rd32(E1000_RFC);
505 rd32(E1000_ROC);
506 rd32(E1000_RJC);
507 rd32(E1000_TORL);
508 rd32(E1000_TORH);
509 rd32(E1000_TOTL);
510 rd32(E1000_TOTH);
511 rd32(E1000_TPR);
512 rd32(E1000_TPT);
513 rd32(E1000_MPTC);
514 rd32(E1000_BPTC);
515 }
516
517 /**
518 * igb_check_for_copper_link - Check for link (Copper)
519 * @hw: pointer to the HW structure
520 *
521 * Checks to see of the link status of the hardware has changed. If a
522 * change in link status has been detected, then we read the PHY registers
523 * to get the current speed/duplex if link exists.
524 **/
525 s32 igb_check_for_copper_link(struct e1000_hw *hw)
526 {
527 struct e1000_mac_info *mac = &hw->mac;
528 s32 ret_val;
529 bool link;
530
531 /* We only want to go out to the PHY registers to see if Auto-Neg
532 * has completed and/or if our link status has changed. The
533 * get_link_status flag is set upon receiving a Link Status
534 * Change or Rx Sequence Error interrupt.
535 */
536 if (!mac->get_link_status) {
537 ret_val = 0;
538 goto out;
539 }
540
541 /* First we want to see if the MII Status Register reports
542 * link. If so, then we want to get the current speed/duplex
543 * of the PHY.
544 */
545 ret_val = igb_phy_has_link(hw, 1, 0, &link);
546 if (ret_val)
547 goto out;
548
549 if (!link)
550 goto out; /* No link detected */
551
552 mac->get_link_status = false;
553
554 /* Check if there was DownShift, must be checked
555 * immediately after link-up
556 */
557 igb_check_downshift(hw);
558
559 /* If we are forcing speed/duplex, then we simply return since
560 * we have already determined whether we have link or not.
561 */
562 if (!mac->autoneg) {
563 ret_val = -E1000_ERR_CONFIG;
564 goto out;
565 }
566
567 /* Auto-Neg is enabled. Auto Speed Detection takes care
568 * of MAC speed/duplex configuration. So we only need to
569 * configure Collision Distance in the MAC.
570 */
571 igb_config_collision_dist(hw);
572
573 /* Configure Flow Control now that Auto-Neg has completed.
574 * First, we need to restore the desired flow control
575 * settings because we may have had to re-autoneg with a
576 * different link partner.
577 */
578 ret_val = igb_config_fc_after_link_up(hw);
579 if (ret_val)
580 hw_dbg("Error configuring flow control\n");
581
582 out:
583 return ret_val;
584 }
585
586 /**
587 * igb_setup_link - Setup flow control and link settings
588 * @hw: pointer to the HW structure
589 *
590 * Determines which flow control settings to use, then configures flow
591 * control. Calls the appropriate media-specific link configuration
592 * function. Assuming the adapter has a valid link partner, a valid link
593 * should be established. Assumes the hardware has previously been reset
594 * and the transmitter and receiver are not enabled.
595 **/
596 s32 igb_setup_link(struct e1000_hw *hw)
597 {
598 s32 ret_val = 0;
599
600 /* In the case of the phy reset being blocked, we already have a link.
601 * We do not need to set it up again.
602 */
603 if (igb_check_reset_block(hw))
604 goto out;
605
606 /* If requested flow control is set to default, set flow control
607 * based on the EEPROM flow control settings.
608 */
609 if (hw->fc.requested_mode == e1000_fc_default) {
610 ret_val = igb_set_default_fc(hw);
611 if (ret_val)
612 goto out;
613 }
614
615 /* We want to save off the original Flow Control configuration just
616 * in case we get disconnected and then reconnected into a different
617 * hub or switch with different Flow Control capabilities.
618 */
619 hw->fc.current_mode = hw->fc.requested_mode;
620
621 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
622
623 /* Call the necessary media_type subroutine to configure the link. */
624 ret_val = hw->mac.ops.setup_physical_interface(hw);
625 if (ret_val)
626 goto out;
627
628 /* Initialize the flow control address, type, and PAUSE timer
629 * registers to their default values. This is done even if flow
630 * control is disabled, because it does not hurt anything to
631 * initialize these registers.
632 */
633 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
634 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
635 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
636 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
637
638 wr32(E1000_FCTTV, hw->fc.pause_time);
639
640 ret_val = igb_set_fc_watermarks(hw);
641
642 out:
643
644 return ret_val;
645 }
646
647 /**
648 * igb_config_collision_dist - Configure collision distance
649 * @hw: pointer to the HW structure
650 *
651 * Configures the collision distance to the default value and is used
652 * during link setup. Currently no func pointer exists and all
653 * implementations are handled in the generic version of this function.
654 **/
655 void igb_config_collision_dist(struct e1000_hw *hw)
656 {
657 u32 tctl;
658
659 tctl = rd32(E1000_TCTL);
660
661 tctl &= ~E1000_TCTL_COLD;
662 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
663
664 wr32(E1000_TCTL, tctl);
665 wrfl();
666 }
667
668 /**
669 * igb_set_fc_watermarks - Set flow control high/low watermarks
670 * @hw: pointer to the HW structure
671 *
672 * Sets the flow control high/low threshold (watermark) registers. If
673 * flow control XON frame transmission is enabled, then set XON frame
674 * tansmission as well.
675 **/
676 static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
677 {
678 s32 ret_val = 0;
679 u32 fcrtl = 0, fcrth = 0;
680
681 /* Set the flow control receive threshold registers. Normally,
682 * these registers will be set to a default threshold that may be
683 * adjusted later by the driver's runtime code. However, if the
684 * ability to transmit pause frames is not enabled, then these
685 * registers will be set to 0.
686 */
687 if (hw->fc.current_mode & e1000_fc_tx_pause) {
688 /* We need to set up the Receive Threshold high and low water
689 * marks as well as (optionally) enabling the transmission of
690 * XON frames.
691 */
692 fcrtl = hw->fc.low_water;
693 if (hw->fc.send_xon)
694 fcrtl |= E1000_FCRTL_XONE;
695
696 fcrth = hw->fc.high_water;
697 }
698 wr32(E1000_FCRTL, fcrtl);
699 wr32(E1000_FCRTH, fcrth);
700
701 return ret_val;
702 }
703
704 /**
705 * igb_set_default_fc - Set flow control default values
706 * @hw: pointer to the HW structure
707 *
708 * Read the EEPROM for the default values for flow control and store the
709 * values.
710 **/
711 static s32 igb_set_default_fc(struct e1000_hw *hw)
712 {
713 s32 ret_val = 0;
714 u16 lan_offset;
715 u16 nvm_data;
716
717 /* Read and store word 0x0F of the EEPROM. This word contains bits
718 * that determine the hardware's default PAUSE (flow control) mode,
719 * a bit that determines whether the HW defaults to enabling or
720 * disabling auto-negotiation, and the direction of the
721 * SW defined pins. If there is no SW over-ride of the flow
722 * control setting, then the variable hw->fc will
723 * be initialized based on a value in the EEPROM.
724 */
725 if (hw->mac.type == e1000_i350) {
726 lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
727 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
728 + lan_offset, 1, &nvm_data);
729 } else {
730 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
731 1, &nvm_data);
732 }
733
734 if (ret_val) {
735 hw_dbg("NVM Read Error\n");
736 goto out;
737 }
738
739 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
740 hw->fc.requested_mode = e1000_fc_none;
741 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
742 NVM_WORD0F_ASM_DIR)
743 hw->fc.requested_mode = e1000_fc_tx_pause;
744 else
745 hw->fc.requested_mode = e1000_fc_full;
746
747 out:
748 return ret_val;
749 }
750
751 /**
752 * igb_force_mac_fc - Force the MAC's flow control settings
753 * @hw: pointer to the HW structure
754 *
755 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
756 * device control register to reflect the adapter settings. TFCE and RFCE
757 * need to be explicitly set by software when a copper PHY is used because
758 * autonegotiation is managed by the PHY rather than the MAC. Software must
759 * also configure these bits when link is forced on a fiber connection.
760 **/
761 s32 igb_force_mac_fc(struct e1000_hw *hw)
762 {
763 u32 ctrl;
764 s32 ret_val = 0;
765
766 ctrl = rd32(E1000_CTRL);
767
768 /* Because we didn't get link via the internal auto-negotiation
769 * mechanism (we either forced link or we got link via PHY
770 * auto-neg), we have to manually enable/disable transmit an
771 * receive flow control.
772 *
773 * The "Case" statement below enables/disable flow control
774 * according to the "hw->fc.current_mode" parameter.
775 *
776 * The possible values of the "fc" parameter are:
777 * 0: Flow control is completely disabled
778 * 1: Rx flow control is enabled (we can receive pause
779 * frames but not send pause frames).
780 * 2: Tx flow control is enabled (we can send pause frames
781 * frames but we do not receive pause frames).
782 * 3: Both Rx and TX flow control (symmetric) is enabled.
783 * other: No other values should be possible at this point.
784 */
785 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
786
787 switch (hw->fc.current_mode) {
788 case e1000_fc_none:
789 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
790 break;
791 case e1000_fc_rx_pause:
792 ctrl &= (~E1000_CTRL_TFCE);
793 ctrl |= E1000_CTRL_RFCE;
794 break;
795 case e1000_fc_tx_pause:
796 ctrl &= (~E1000_CTRL_RFCE);
797 ctrl |= E1000_CTRL_TFCE;
798 break;
799 case e1000_fc_full:
800 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
801 break;
802 default:
803 hw_dbg("Flow control param set incorrectly\n");
804 ret_val = -E1000_ERR_CONFIG;
805 goto out;
806 }
807
808 wr32(E1000_CTRL, ctrl);
809
810 out:
811 return ret_val;
812 }
813
814 /**
815 * igb_config_fc_after_link_up - Configures flow control after link
816 * @hw: pointer to the HW structure
817 *
818 * Checks the status of auto-negotiation after link up to ensure that the
819 * speed and duplex were not forced. If the link needed to be forced, then
820 * flow control needs to be forced also. If auto-negotiation is enabled
821 * and did not fail, then we configure flow control based on our link
822 * partner.
823 **/
824 s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
825 {
826 struct e1000_mac_info *mac = &hw->mac;
827 s32 ret_val = 0;
828 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
829 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
830 u16 speed, duplex;
831
832 /* Check for the case where we have fiber media and auto-neg failed
833 * so we had to force link. In this case, we need to force the
834 * configuration of the MAC to match the "fc" parameter.
835 */
836 if (mac->autoneg_failed) {
837 if (hw->phy.media_type == e1000_media_type_internal_serdes)
838 ret_val = igb_force_mac_fc(hw);
839 } else {
840 if (hw->phy.media_type == e1000_media_type_copper)
841 ret_val = igb_force_mac_fc(hw);
842 }
843
844 if (ret_val) {
845 hw_dbg("Error forcing flow control settings\n");
846 goto out;
847 }
848
849 /* Check for the case where we have copper media and auto-neg is
850 * enabled. In this case, we need to check and see if Auto-Neg
851 * has completed, and if so, how the PHY and link partner has
852 * flow control configured.
853 */
854 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
855 /* Read the MII Status Register and check to see if AutoNeg
856 * has completed. We read this twice because this reg has
857 * some "sticky" (latched) bits.
858 */
859 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
860 &mii_status_reg);
861 if (ret_val)
862 goto out;
863 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
864 &mii_status_reg);
865 if (ret_val)
866 goto out;
867
868 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
869 hw_dbg("Copper PHY and Auto Neg has not completed.\n");
870 goto out;
871 }
872
873 /* The AutoNeg process has completed, so we now need to
874 * read both the Auto Negotiation Advertisement
875 * Register (Address 4) and the Auto_Negotiation Base
876 * Page Ability Register (Address 5) to determine how
877 * flow control was negotiated.
878 */
879 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
880 &mii_nway_adv_reg);
881 if (ret_val)
882 goto out;
883 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
884 &mii_nway_lp_ability_reg);
885 if (ret_val)
886 goto out;
887
888 /* Two bits in the Auto Negotiation Advertisement Register
889 * (Address 4) and two bits in the Auto Negotiation Base
890 * Page Ability Register (Address 5) determine flow control
891 * for both the PHY and the link partner. The following
892 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
893 * 1999, describes these PAUSE resolution bits and how flow
894 * control is determined based upon these settings.
895 * NOTE: DC = Don't Care
896 *
897 * LOCAL DEVICE | LINK PARTNER
898 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
899 *-------|---------|-------|---------|--------------------
900 * 0 | 0 | DC | DC | e1000_fc_none
901 * 0 | 1 | 0 | DC | e1000_fc_none
902 * 0 | 1 | 1 | 0 | e1000_fc_none
903 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
904 * 1 | 0 | 0 | DC | e1000_fc_none
905 * 1 | DC | 1 | DC | e1000_fc_full
906 * 1 | 1 | 0 | 0 | e1000_fc_none
907 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
908 *
909 * Are both PAUSE bits set to 1? If so, this implies
910 * Symmetric Flow Control is enabled at both ends. The
911 * ASM_DIR bits are irrelevant per the spec.
912 *
913 * For Symmetric Flow Control:
914 *
915 * LOCAL DEVICE | LINK PARTNER
916 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
917 *-------|---------|-------|---------|--------------------
918 * 1 | DC | 1 | DC | E1000_fc_full
919 *
920 */
921 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
922 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
923 /* Now we need to check if the user selected RX ONLY
924 * of pause frames. In this case, we had to advertise
925 * FULL flow control because we could not advertise RX
926 * ONLY. Hence, we must now check to see if we need to
927 * turn OFF the TRANSMISSION of PAUSE frames.
928 */
929 if (hw->fc.requested_mode == e1000_fc_full) {
930 hw->fc.current_mode = e1000_fc_full;
931 hw_dbg("Flow Control = FULL.\r\n");
932 } else {
933 hw->fc.current_mode = e1000_fc_rx_pause;
934 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
935 }
936 }
937 /* For receiving PAUSE frames ONLY.
938 *
939 * LOCAL DEVICE | LINK PARTNER
940 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
941 *-------|---------|-------|---------|--------------------
942 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
943 */
944 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
945 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
946 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
947 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
948 hw->fc.current_mode = e1000_fc_tx_pause;
949 hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
950 }
951 /* For transmitting PAUSE frames ONLY.
952 *
953 * LOCAL DEVICE | LINK PARTNER
954 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
955 *-------|---------|-------|---------|--------------------
956 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
957 */
958 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
959 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
960 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
961 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
962 hw->fc.current_mode = e1000_fc_rx_pause;
963 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
964 }
965 /* Per the IEEE spec, at this point flow control should be
966 * disabled. However, we want to consider that we could
967 * be connected to a legacy switch that doesn't advertise
968 * desired flow control, but can be forced on the link
969 * partner. So if we advertised no flow control, that is
970 * what we will resolve to. If we advertised some kind of
971 * receive capability (Rx Pause Only or Full Flow Control)
972 * and the link partner advertised none, we will configure
973 * ourselves to enable Rx Flow Control only. We can do
974 * this safely for two reasons: If the link partner really
975 * didn't want flow control enabled, and we enable Rx, no
976 * harm done since we won't be receiving any PAUSE frames
977 * anyway. If the intent on the link partner was to have
978 * flow control enabled, then by us enabling RX only, we
979 * can at least receive pause frames and process them.
980 * This is a good idea because in most cases, since we are
981 * predominantly a server NIC, more times than not we will
982 * be asked to delay transmission of packets than asking
983 * our link partner to pause transmission of frames.
984 */
985 else if ((hw->fc.requested_mode == e1000_fc_none) ||
986 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
987 (hw->fc.strict_ieee)) {
988 hw->fc.current_mode = e1000_fc_none;
989 hw_dbg("Flow Control = NONE.\r\n");
990 } else {
991 hw->fc.current_mode = e1000_fc_rx_pause;
992 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
993 }
994
995 /* Now we need to do one last check... If we auto-
996 * negotiated to HALF DUPLEX, flow control should not be
997 * enabled per IEEE 802.3 spec.
998 */
999 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
1000 if (ret_val) {
1001 hw_dbg("Error getting link speed and duplex\n");
1002 goto out;
1003 }
1004
1005 if (duplex == HALF_DUPLEX)
1006 hw->fc.current_mode = e1000_fc_none;
1007
1008 /* Now we call a subroutine to actually force the MAC
1009 * controller to use the correct flow control settings.
1010 */
1011 ret_val = igb_force_mac_fc(hw);
1012 if (ret_val) {
1013 hw_dbg("Error forcing flow control settings\n");
1014 goto out;
1015 }
1016 }
1017 /* Check for the case where we have SerDes media and auto-neg is
1018 * enabled. In this case, we need to check and see if Auto-Neg
1019 * has completed, and if so, how the PHY and link partner has
1020 * flow control configured.
1021 */
1022 if ((hw->phy.media_type == e1000_media_type_internal_serdes)
1023 && mac->autoneg) {
1024 /* Read the PCS_LSTS and check to see if AutoNeg
1025 * has completed.
1026 */
1027 pcs_status_reg = rd32(E1000_PCS_LSTAT);
1028
1029 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1030 hw_dbg("PCS Auto Neg has not completed.\n");
1031 return ret_val;
1032 }
1033
1034 /* The AutoNeg process has completed, so we now need to
1035 * read both the Auto Negotiation Advertisement
1036 * Register (PCS_ANADV) and the Auto_Negotiation Base
1037 * Page Ability Register (PCS_LPAB) to determine how
1038 * flow control was negotiated.
1039 */
1040 pcs_adv_reg = rd32(E1000_PCS_ANADV);
1041 pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
1042
1043 /* Two bits in the Auto Negotiation Advertisement Register
1044 * (PCS_ANADV) and two bits in the Auto Negotiation Base
1045 * Page Ability Register (PCS_LPAB) determine flow control
1046 * for both the PHY and the link partner. The following
1047 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1048 * 1999, describes these PAUSE resolution bits and how flow
1049 * control is determined based upon these settings.
1050 * NOTE: DC = Don't Care
1051 *
1052 * LOCAL DEVICE | LINK PARTNER
1053 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1054 *-------|---------|-------|---------|--------------------
1055 * 0 | 0 | DC | DC | e1000_fc_none
1056 * 0 | 1 | 0 | DC | e1000_fc_none
1057 * 0 | 1 | 1 | 0 | e1000_fc_none
1058 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1059 * 1 | 0 | 0 | DC | e1000_fc_none
1060 * 1 | DC | 1 | DC | e1000_fc_full
1061 * 1 | 1 | 0 | 0 | e1000_fc_none
1062 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1063 *
1064 * Are both PAUSE bits set to 1? If so, this implies
1065 * Symmetric Flow Control is enabled at both ends. The
1066 * ASM_DIR bits are irrelevant per the spec.
1067 *
1068 * For Symmetric Flow Control:
1069 *
1070 * LOCAL DEVICE | LINK PARTNER
1071 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1072 *-------|---------|-------|---------|--------------------
1073 * 1 | DC | 1 | DC | e1000_fc_full
1074 *
1075 */
1076 if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1077 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1078 /* Now we need to check if the user selected Rx ONLY
1079 * of pause frames. In this case, we had to advertise
1080 * FULL flow control because we could not advertise Rx
1081 * ONLY. Hence, we must now check to see if we need to
1082 * turn OFF the TRANSMISSION of PAUSE frames.
1083 */
1084 if (hw->fc.requested_mode == e1000_fc_full) {
1085 hw->fc.current_mode = e1000_fc_full;
1086 hw_dbg("Flow Control = FULL.\n");
1087 } else {
1088 hw->fc.current_mode = e1000_fc_rx_pause;
1089 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1090 }
1091 }
1092 /* For receiving PAUSE frames ONLY.
1093 *
1094 * LOCAL DEVICE | LINK PARTNER
1095 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1096 *-------|---------|-------|---------|--------------------
1097 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1098 */
1099 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1100 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1101 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1102 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1103 hw->fc.current_mode = e1000_fc_tx_pause;
1104 hw_dbg("Flow Control = Tx PAUSE frames only.\n");
1105 }
1106 /* For transmitting PAUSE frames ONLY.
1107 *
1108 * LOCAL DEVICE | LINK PARTNER
1109 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1110 *-------|---------|-------|---------|--------------------
1111 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1112 */
1113 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1114 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1115 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1116 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1117 hw->fc.current_mode = e1000_fc_rx_pause;
1118 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1119 } else {
1120 /* Per the IEEE spec, at this point flow control
1121 * should be disabled.
1122 */
1123 hw->fc.current_mode = e1000_fc_none;
1124 hw_dbg("Flow Control = NONE.\n");
1125 }
1126
1127 /* Now we call a subroutine to actually force the MAC
1128 * controller to use the correct flow control settings.
1129 */
1130 pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
1131 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1132 wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
1133
1134 ret_val = igb_force_mac_fc(hw);
1135 if (ret_val) {
1136 hw_dbg("Error forcing flow control settings\n");
1137 return ret_val;
1138 }
1139 }
1140
1141 out:
1142 return ret_val;
1143 }
1144
1145 /**
1146 * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex
1147 * @hw: pointer to the HW structure
1148 * @speed: stores the current speed
1149 * @duplex: stores the current duplex
1150 *
1151 * Read the status register for the current speed/duplex and store the current
1152 * speed and duplex for copper connections.
1153 **/
1154 s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1155 u16 *duplex)
1156 {
1157 u32 status;
1158
1159 status = rd32(E1000_STATUS);
1160 if (status & E1000_STATUS_SPEED_1000) {
1161 *speed = SPEED_1000;
1162 hw_dbg("1000 Mbs, ");
1163 } else if (status & E1000_STATUS_SPEED_100) {
1164 *speed = SPEED_100;
1165 hw_dbg("100 Mbs, ");
1166 } else {
1167 *speed = SPEED_10;
1168 hw_dbg("10 Mbs, ");
1169 }
1170
1171 if (status & E1000_STATUS_FD) {
1172 *duplex = FULL_DUPLEX;
1173 hw_dbg("Full Duplex\n");
1174 } else {
1175 *duplex = HALF_DUPLEX;
1176 hw_dbg("Half Duplex\n");
1177 }
1178
1179 return 0;
1180 }
1181
1182 /**
1183 * igb_get_hw_semaphore - Acquire hardware semaphore
1184 * @hw: pointer to the HW structure
1185 *
1186 * Acquire the HW semaphore to access the PHY or NVM
1187 **/
1188 s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1189 {
1190 u32 swsm;
1191 s32 ret_val = 0;
1192 s32 timeout = hw->nvm.word_size + 1;
1193 s32 i = 0;
1194
1195 /* Get the SW semaphore */
1196 while (i < timeout) {
1197 swsm = rd32(E1000_SWSM);
1198 if (!(swsm & E1000_SWSM_SMBI))
1199 break;
1200
1201 udelay(50);
1202 i++;
1203 }
1204
1205 if (i == timeout) {
1206 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1207 ret_val = -E1000_ERR_NVM;
1208 goto out;
1209 }
1210
1211 /* Get the FW semaphore. */
1212 for (i = 0; i < timeout; i++) {
1213 swsm = rd32(E1000_SWSM);
1214 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1215
1216 /* Semaphore acquired if bit latched */
1217 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1218 break;
1219
1220 udelay(50);
1221 }
1222
1223 if (i == timeout) {
1224 /* Release semaphores */
1225 igb_put_hw_semaphore(hw);
1226 hw_dbg("Driver can't access the NVM\n");
1227 ret_val = -E1000_ERR_NVM;
1228 goto out;
1229 }
1230
1231 out:
1232 return ret_val;
1233 }
1234
1235 /**
1236 * igb_put_hw_semaphore - Release hardware semaphore
1237 * @hw: pointer to the HW structure
1238 *
1239 * Release hardware semaphore used to access the PHY or NVM
1240 **/
1241 void igb_put_hw_semaphore(struct e1000_hw *hw)
1242 {
1243 u32 swsm;
1244
1245 swsm = rd32(E1000_SWSM);
1246
1247 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1248
1249 wr32(E1000_SWSM, swsm);
1250 }
1251
1252 /**
1253 * igb_get_auto_rd_done - Check for auto read completion
1254 * @hw: pointer to the HW structure
1255 *
1256 * Check EEPROM for Auto Read done bit.
1257 **/
1258 s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1259 {
1260 s32 i = 0;
1261 s32 ret_val = 0;
1262
1263
1264 while (i < AUTO_READ_DONE_TIMEOUT) {
1265 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1266 break;
1267 msleep(1);
1268 i++;
1269 }
1270
1271 if (i == AUTO_READ_DONE_TIMEOUT) {
1272 hw_dbg("Auto read by HW from NVM has not completed.\n");
1273 ret_val = -E1000_ERR_RESET;
1274 goto out;
1275 }
1276
1277 out:
1278 return ret_val;
1279 }
1280
1281 /**
1282 * igb_valid_led_default - Verify a valid default LED config
1283 * @hw: pointer to the HW structure
1284 * @data: pointer to the NVM (EEPROM)
1285 *
1286 * Read the EEPROM for the current default LED configuration. If the
1287 * LED configuration is not valid, set to a valid LED configuration.
1288 **/
1289 static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1290 {
1291 s32 ret_val;
1292
1293 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1294 if (ret_val) {
1295 hw_dbg("NVM Read Error\n");
1296 goto out;
1297 }
1298
1299 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1300 switch(hw->phy.media_type) {
1301 case e1000_media_type_internal_serdes:
1302 *data = ID_LED_DEFAULT_82575_SERDES;
1303 break;
1304 case e1000_media_type_copper:
1305 default:
1306 *data = ID_LED_DEFAULT;
1307 break;
1308 }
1309 }
1310 out:
1311 return ret_val;
1312 }
1313
1314 /**
1315 * igb_id_led_init -
1316 * @hw: pointer to the HW structure
1317 *
1318 **/
1319 s32 igb_id_led_init(struct e1000_hw *hw)
1320 {
1321 struct e1000_mac_info *mac = &hw->mac;
1322 s32 ret_val;
1323 const u32 ledctl_mask = 0x000000FF;
1324 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1325 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1326 u16 data, i, temp;
1327 const u16 led_mask = 0x0F;
1328
1329 /* i210 and i211 devices have different LED mechanism */
1330 if ((hw->mac.type == e1000_i210) ||
1331 (hw->mac.type == e1000_i211))
1332 ret_val = igb_valid_led_default_i210(hw, &data);
1333 else
1334 ret_val = igb_valid_led_default(hw, &data);
1335
1336 if (ret_val)
1337 goto out;
1338
1339 mac->ledctl_default = rd32(E1000_LEDCTL);
1340 mac->ledctl_mode1 = mac->ledctl_default;
1341 mac->ledctl_mode2 = mac->ledctl_default;
1342
1343 for (i = 0; i < 4; i++) {
1344 temp = (data >> (i << 2)) & led_mask;
1345 switch (temp) {
1346 case ID_LED_ON1_DEF2:
1347 case ID_LED_ON1_ON2:
1348 case ID_LED_ON1_OFF2:
1349 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1350 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1351 break;
1352 case ID_LED_OFF1_DEF2:
1353 case ID_LED_OFF1_ON2:
1354 case ID_LED_OFF1_OFF2:
1355 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1356 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1357 break;
1358 default:
1359 /* Do nothing */
1360 break;
1361 }
1362 switch (temp) {
1363 case ID_LED_DEF1_ON2:
1364 case ID_LED_ON1_ON2:
1365 case ID_LED_OFF1_ON2:
1366 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1367 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1368 break;
1369 case ID_LED_DEF1_OFF2:
1370 case ID_LED_ON1_OFF2:
1371 case ID_LED_OFF1_OFF2:
1372 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1373 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1374 break;
1375 default:
1376 /* Do nothing */
1377 break;
1378 }
1379 }
1380
1381 out:
1382 return ret_val;
1383 }
1384
1385 /**
1386 * igb_cleanup_led - Set LED config to default operation
1387 * @hw: pointer to the HW structure
1388 *
1389 * Remove the current LED configuration and set the LED configuration
1390 * to the default value, saved from the EEPROM.
1391 **/
1392 s32 igb_cleanup_led(struct e1000_hw *hw)
1393 {
1394 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1395 return 0;
1396 }
1397
1398 /**
1399 * igb_blink_led - Blink LED
1400 * @hw: pointer to the HW structure
1401 *
1402 * Blink the led's which are set to be on.
1403 **/
1404 s32 igb_blink_led(struct e1000_hw *hw)
1405 {
1406 u32 ledctl_blink = 0;
1407 u32 i;
1408
1409 if (hw->phy.media_type == e1000_media_type_fiber) {
1410 /* always blink LED0 for PCI-E fiber */
1411 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1412 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1413 } else {
1414 /* Set the blink bit for each LED that's "on" (0x0E)
1415 * (or "off" if inverted) in ledctl_mode2. The blink
1416 * logic in hardware only works when mode is set to "on"
1417 * so it must be changed accordingly when the mode is
1418 * "off" and inverted.
1419 */
1420 ledctl_blink = hw->mac.ledctl_mode2;
1421 for (i = 0; i < 32; i += 8) {
1422 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1423 E1000_LEDCTL_LED0_MODE_MASK;
1424 u32 led_default = hw->mac.ledctl_default >> i;
1425
1426 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1427 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1428 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1429 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1430 ledctl_blink &=
1431 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1432 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1433 E1000_LEDCTL_MODE_LED_ON) << i;
1434 }
1435 }
1436 }
1437
1438 wr32(E1000_LEDCTL, ledctl_blink);
1439
1440 return 0;
1441 }
1442
1443 /**
1444 * igb_led_off - Turn LED off
1445 * @hw: pointer to the HW structure
1446 *
1447 * Turn LED off.
1448 **/
1449 s32 igb_led_off(struct e1000_hw *hw)
1450 {
1451 switch (hw->phy.media_type) {
1452 case e1000_media_type_copper:
1453 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1454 break;
1455 default:
1456 break;
1457 }
1458
1459 return 0;
1460 }
1461
1462 /**
1463 * igb_disable_pcie_master - Disables PCI-express master access
1464 * @hw: pointer to the HW structure
1465 *
1466 * Returns 0 (0) if successful, else returns -10
1467 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
1468 * the master requests to be disabled.
1469 *
1470 * Disables PCI-Express master access and verifies there are no pending
1471 * requests.
1472 **/
1473 s32 igb_disable_pcie_master(struct e1000_hw *hw)
1474 {
1475 u32 ctrl;
1476 s32 timeout = MASTER_DISABLE_TIMEOUT;
1477 s32 ret_val = 0;
1478
1479 if (hw->bus.type != e1000_bus_type_pci_express)
1480 goto out;
1481
1482 ctrl = rd32(E1000_CTRL);
1483 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1484 wr32(E1000_CTRL, ctrl);
1485
1486 while (timeout) {
1487 if (!(rd32(E1000_STATUS) &
1488 E1000_STATUS_GIO_MASTER_ENABLE))
1489 break;
1490 udelay(100);
1491 timeout--;
1492 }
1493
1494 if (!timeout) {
1495 hw_dbg("Master requests are pending.\n");
1496 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1497 goto out;
1498 }
1499
1500 out:
1501 return ret_val;
1502 }
1503
1504 /**
1505 * igb_validate_mdi_setting - Verify MDI/MDIx settings
1506 * @hw: pointer to the HW structure
1507 *
1508 * Verify that when not using auto-negotitation that MDI/MDIx is correctly
1509 * set, which is forced to MDI mode only.
1510 **/
1511 s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1512 {
1513 s32 ret_val = 0;
1514
1515 /* All MDI settings are supported on 82580 and newer. */
1516 if (hw->mac.type >= e1000_82580)
1517 goto out;
1518
1519 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1520 hw_dbg("Invalid MDI setting detected\n");
1521 hw->phy.mdix = 1;
1522 ret_val = -E1000_ERR_CONFIG;
1523 goto out;
1524 }
1525
1526 out:
1527 return ret_val;
1528 }
1529
1530 /**
1531 * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register
1532 * @hw: pointer to the HW structure
1533 * @reg: 32bit register offset such as E1000_SCTL
1534 * @offset: register offset to write to
1535 * @data: data to write at register offset
1536 *
1537 * Writes an address/data control type register. There are several of these
1538 * and they all have the format address << 8 | data and bit 31 is polled for
1539 * completion.
1540 **/
1541 s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1542 u32 offset, u8 data)
1543 {
1544 u32 i, regvalue = 0;
1545 s32 ret_val = 0;
1546
1547 /* Set up the address and data */
1548 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1549 wr32(reg, regvalue);
1550
1551 /* Poll the ready bit to see if the MDI read completed */
1552 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1553 udelay(5);
1554 regvalue = rd32(reg);
1555 if (regvalue & E1000_GEN_CTL_READY)
1556 break;
1557 }
1558 if (!(regvalue & E1000_GEN_CTL_READY)) {
1559 hw_dbg("Reg %08x did not indicate ready\n", reg);
1560 ret_val = -E1000_ERR_PHY;
1561 goto out;
1562 }
1563
1564 out:
1565 return ret_val;
1566 }
1567
1568 /**
1569 * igb_enable_mng_pass_thru - Enable processing of ARP's
1570 * @hw: pointer to the HW structure
1571 *
1572 * Verifies the hardware needs to leave interface enabled so that frames can
1573 * be directed to and from the management interface.
1574 **/
1575 bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1576 {
1577 u32 manc;
1578 u32 fwsm, factps;
1579 bool ret_val = false;
1580
1581 if (!hw->mac.asf_firmware_present)
1582 goto out;
1583
1584 manc = rd32(E1000_MANC);
1585
1586 if (!(manc & E1000_MANC_RCV_TCO_EN))
1587 goto out;
1588
1589 if (hw->mac.arc_subsystem_valid) {
1590 fwsm = rd32(E1000_FWSM);
1591 factps = rd32(E1000_FACTPS);
1592
1593 if (!(factps & E1000_FACTPS_MNGCG) &&
1594 ((fwsm & E1000_FWSM_MODE_MASK) ==
1595 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1596 ret_val = true;
1597 goto out;
1598 }
1599 } else {
1600 if ((manc & E1000_MANC_SMBUS_EN) &&
1601 !(manc & E1000_MANC_ASF_EN)) {
1602 ret_val = true;
1603 goto out;
1604 }
1605 }
1606
1607 out:
1608 return ret_val;
1609 }
This page took 0.082585 seconds and 5 git commands to generate.