igb: Merge VLVF configuration into igb_vfta_set
[deliverable/linux.git] / drivers / net / ethernet / intel / igb / e1000_mac.c
1 /* Intel(R) Gigabit Ethernet Linux driver
2 * Copyright(c) 2007-2014 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * The full GNU General Public License is included in this distribution in
17 * the file called "COPYING".
18 *
19 * Contact Information:
20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 */
23
24 #include <linux/if_ether.h>
25 #include <linux/delay.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29
30 #include "e1000_mac.h"
31
32 #include "igb.h"
33
34 static s32 igb_set_default_fc(struct e1000_hw *hw);
35 static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
36
37 /**
38 * igb_get_bus_info_pcie - Get PCIe bus information
39 * @hw: pointer to the HW structure
40 *
41 * Determines and stores the system bus information for a particular
42 * network interface. The following bus information is determined and stored:
43 * bus speed, bus width, type (PCIe), and PCIe function.
44 **/
45 s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
46 {
47 struct e1000_bus_info *bus = &hw->bus;
48 s32 ret_val;
49 u32 reg;
50 u16 pcie_link_status;
51
52 bus->type = e1000_bus_type_pci_express;
53
54 ret_val = igb_read_pcie_cap_reg(hw,
55 PCI_EXP_LNKSTA,
56 &pcie_link_status);
57 if (ret_val) {
58 bus->width = e1000_bus_width_unknown;
59 bus->speed = e1000_bus_speed_unknown;
60 } else {
61 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
62 case PCI_EXP_LNKSTA_CLS_2_5GB:
63 bus->speed = e1000_bus_speed_2500;
64 break;
65 case PCI_EXP_LNKSTA_CLS_5_0GB:
66 bus->speed = e1000_bus_speed_5000;
67 break;
68 default:
69 bus->speed = e1000_bus_speed_unknown;
70 break;
71 }
72
73 bus->width = (enum e1000_bus_width)((pcie_link_status &
74 PCI_EXP_LNKSTA_NLW) >>
75 PCI_EXP_LNKSTA_NLW_SHIFT);
76 }
77
78 reg = rd32(E1000_STATUS);
79 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
80
81 return 0;
82 }
83
84 /**
85 * igb_clear_vfta - Clear VLAN filter table
86 * @hw: pointer to the HW structure
87 *
88 * Clears the register array which contains the VLAN filter table by
89 * setting all the values to 0.
90 **/
91 void igb_clear_vfta(struct e1000_hw *hw)
92 {
93 u32 offset;
94
95 for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;)
96 hw->mac.ops.write_vfta(hw, offset, 0);
97 }
98
99 /**
100 * igb_write_vfta - Write value to VLAN filter table
101 * @hw: pointer to the HW structure
102 * @offset: register offset in VLAN filter table
103 * @value: register value written to VLAN filter table
104 *
105 * Writes value at the given offset in the register array which stores
106 * the VLAN filter table.
107 **/
108 void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
109 {
110 struct igb_adapter *adapter = hw->back;
111
112 array_wr32(E1000_VFTA, offset, value);
113 wrfl();
114
115 adapter->shadow_vfta[offset] = value;
116 }
117
118 /**
119 * igb_init_rx_addrs - Initialize receive address's
120 * @hw: pointer to the HW structure
121 * @rar_count: receive address registers
122 *
123 * Setups the receive address registers by setting the base receive address
124 * register to the devices MAC address and clearing all the other receive
125 * address registers to 0.
126 **/
127 void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
128 {
129 u32 i;
130 u8 mac_addr[ETH_ALEN] = {0};
131
132 /* Setup the receive address */
133 hw_dbg("Programming MAC Address into RAR[0]\n");
134
135 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
136
137 /* Zero out the other (rar_entry_count - 1) receive addresses */
138 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
139 for (i = 1; i < rar_count; i++)
140 hw->mac.ops.rar_set(hw, mac_addr, i);
141 }
142
143 /**
144 * igb_find_vlvf_slot - find the VLAN id or the first empty slot
145 * @hw: pointer to hardware structure
146 * @vlan: VLAN id to write to VLAN filter
147 * @vlvf_bypass: skip VLVF if no match is found
148 *
149 * return the VLVF index where this VLAN id should be placed
150 *
151 **/
152 static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass)
153 {
154 s32 regindex, first_empty_slot;
155 u32 bits;
156
157 /* short cut the special case */
158 if (vlan == 0)
159 return 0;
160
161 /* if vlvf_bypass is set we don't want to use an empty slot, we
162 * will simply bypass the VLVF if there are no entries present in the
163 * VLVF that contain our VLAN
164 */
165 first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0;
166
167 /* Search for the VLAN id in the VLVF entries. Save off the first empty
168 * slot found along the way.
169 *
170 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
171 */
172 for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) {
173 bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK;
174 if (bits == vlan)
175 return regindex;
176 if (!first_empty_slot && !bits)
177 first_empty_slot = regindex;
178 }
179
180 return first_empty_slot ? : -E1000_ERR_NO_SPACE;
181 }
182
183 /**
184 * igb_vfta_set - enable or disable vlan in VLAN filter table
185 * @hw: pointer to the HW structure
186 * @vlan: VLAN id to add or remove
187 * @vind: VMDq output index that maps queue to VLAN id
188 * @vlan_on: if true add filter, if false remove
189 *
190 * Sets or clears a bit in the VLAN filter table array based on VLAN id
191 * and if we are adding or removing the filter
192 **/
193 s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
194 bool vlan_on, bool vlvf_bypass)
195 {
196 struct igb_adapter *adapter = hw->back;
197 u32 regidx, vfta_delta, vfta, bits;
198 s32 vlvf_index;
199
200 if ((vlan > 4095) || (vind > 7))
201 return -E1000_ERR_PARAM;
202
203 /* this is a 2 part operation - first the VFTA, then the
204 * VLVF and VLVFB if VT Mode is set
205 * We don't write the VFTA until we know the VLVF part succeeded.
206 */
207
208 /* Part 1
209 * The VFTA is a bitstring made up of 128 32-bit registers
210 * that enable the particular VLAN id, much like the MTA:
211 * bits[11-5]: which register
212 * bits[4-0]: which bit in the register
213 */
214 regidx = vlan / 32;
215 vfta_delta = 1 << (vlan % 32);
216 vfta = adapter->shadow_vfta[regidx];
217
218 /* vfta_delta represents the difference between the current value
219 * of vfta and the value we want in the register. Since the diff
220 * is an XOR mask we can just update vfta using an XOR.
221 */
222 vfta_delta &= vlan_on ? ~vfta : vfta;
223 vfta ^= vfta_delta;
224
225 /* Part 2
226 * If VT Mode is set
227 * Either vlan_on
228 * make sure the VLAN is in VLVF
229 * set the vind bit in the matching VLVFB
230 * Or !vlan_on
231 * clear the pool bit and possibly the vind
232 */
233 if (!adapter->vfs_allocated_count)
234 goto vfta_update;
235
236 vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass);
237 if (vlvf_index < 0) {
238 if (vlvf_bypass)
239 goto vfta_update;
240 return vlvf_index;
241 }
242
243 bits = rd32(E1000_VLVF(vlvf_index));
244
245 /* set the pool bit */
246 bits |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
247 if (vlan_on)
248 goto vlvf_update;
249
250 /* clear the pool bit */
251 bits ^= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
252
253 if (!(bits & E1000_VLVF_POOLSEL_MASK)) {
254 /* Clear VFTA first, then disable VLVF. Otherwise
255 * we run the risk of stray packets leaking into
256 * the PF via the default pool
257 */
258 if (vfta_delta)
259 hw->mac.ops.write_vfta(hw, regidx, vfta);
260
261 /* disable VLVF and clear remaining bit from pool */
262 wr32(E1000_VLVF(vlvf_index), 0);
263
264 return 0;
265 }
266
267 /* If there are still bits set in the VLVFB registers
268 * for the VLAN ID indicated we need to see if the
269 * caller is requesting that we clear the VFTA entry bit.
270 * If the caller has requested that we clear the VFTA
271 * entry bit but there are still pools/VFs using this VLAN
272 * ID entry then ignore the request. We're not worried
273 * about the case where we're turning the VFTA VLAN ID
274 * entry bit on, only when requested to turn it off as
275 * there may be multiple pools and/or VFs using the
276 * VLAN ID entry. In that case we cannot clear the
277 * VFTA bit until all pools/VFs using that VLAN ID have also
278 * been cleared. This will be indicated by "bits" being
279 * zero.
280 */
281 vfta_delta = 0;
282
283 vlvf_update:
284 /* record pool change and enable VLAN ID if not already enabled */
285 wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE);
286
287 vfta_update:
288 /* bit was set/cleared before we started */
289 if (vfta_delta)
290 hw->mac.ops.write_vfta(hw, regidx, vfta);
291
292 return 0;
293 }
294
295 /**
296 * igb_check_alt_mac_addr - Check for alternate MAC addr
297 * @hw: pointer to the HW structure
298 *
299 * Checks the nvm for an alternate MAC address. An alternate MAC address
300 * can be setup by pre-boot software and must be treated like a permanent
301 * address and must override the actual permanent MAC address. If an
302 * alternate MAC address is found it is saved in the hw struct and
303 * programmed into RAR0 and the function returns success, otherwise the
304 * function returns an error.
305 **/
306 s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
307 {
308 u32 i;
309 s32 ret_val = 0;
310 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
311 u8 alt_mac_addr[ETH_ALEN];
312
313 /* Alternate MAC address is handled by the option ROM for 82580
314 * and newer. SW support not required.
315 */
316 if (hw->mac.type >= e1000_82580)
317 goto out;
318
319 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
320 &nvm_alt_mac_addr_offset);
321 if (ret_val) {
322 hw_dbg("NVM Read Error\n");
323 goto out;
324 }
325
326 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
327 (nvm_alt_mac_addr_offset == 0x0000))
328 /* There is no Alternate MAC Address */
329 goto out;
330
331 if (hw->bus.func == E1000_FUNC_1)
332 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
333 if (hw->bus.func == E1000_FUNC_2)
334 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
335
336 if (hw->bus.func == E1000_FUNC_3)
337 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
338 for (i = 0; i < ETH_ALEN; i += 2) {
339 offset = nvm_alt_mac_addr_offset + (i >> 1);
340 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
341 if (ret_val) {
342 hw_dbg("NVM Read Error\n");
343 goto out;
344 }
345
346 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
347 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
348 }
349
350 /* if multicast bit is set, the alternate address will not be used */
351 if (is_multicast_ether_addr(alt_mac_addr)) {
352 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
353 goto out;
354 }
355
356 /* We have a valid alternate MAC address, and we want to treat it the
357 * same as the normal permanent MAC address stored by the HW into the
358 * RAR. Do this by mapping this address into RAR0.
359 */
360 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
361
362 out:
363 return ret_val;
364 }
365
366 /**
367 * igb_rar_set - Set receive address register
368 * @hw: pointer to the HW structure
369 * @addr: pointer to the receive address
370 * @index: receive address array register
371 *
372 * Sets the receive address array register at index to the address passed
373 * in by addr.
374 **/
375 void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
376 {
377 u32 rar_low, rar_high;
378
379 /* HW expects these in little endian so we reverse the byte order
380 * from network order (big endian) to little endian
381 */
382 rar_low = ((u32) addr[0] |
383 ((u32) addr[1] << 8) |
384 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
385
386 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
387
388 /* If MAC address zero, no need to set the AV bit */
389 if (rar_low || rar_high)
390 rar_high |= E1000_RAH_AV;
391
392 /* Some bridges will combine consecutive 32-bit writes into
393 * a single burst write, which will malfunction on some parts.
394 * The flushes avoid this.
395 */
396 wr32(E1000_RAL(index), rar_low);
397 wrfl();
398 wr32(E1000_RAH(index), rar_high);
399 wrfl();
400 }
401
402 /**
403 * igb_mta_set - Set multicast filter table address
404 * @hw: pointer to the HW structure
405 * @hash_value: determines the MTA register and bit to set
406 *
407 * The multicast table address is a register array of 32-bit registers.
408 * The hash_value is used to determine what register the bit is in, the
409 * current value is read, the new bit is OR'd in and the new value is
410 * written back into the register.
411 **/
412 void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
413 {
414 u32 hash_bit, hash_reg, mta;
415
416 /* The MTA is a register array of 32-bit registers. It is
417 * treated like an array of (32*mta_reg_count) bits. We want to
418 * set bit BitArray[hash_value]. So we figure out what register
419 * the bit is in, read it, OR in the new bit, then write
420 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
421 * mask to bits 31:5 of the hash value which gives us the
422 * register we're modifying. The hash bit within that register
423 * is determined by the lower 5 bits of the hash value.
424 */
425 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
426 hash_bit = hash_value & 0x1F;
427
428 mta = array_rd32(E1000_MTA, hash_reg);
429
430 mta |= (1 << hash_bit);
431
432 array_wr32(E1000_MTA, hash_reg, mta);
433 wrfl();
434 }
435
436 /**
437 * igb_hash_mc_addr - Generate a multicast hash value
438 * @hw: pointer to the HW structure
439 * @mc_addr: pointer to a multicast address
440 *
441 * Generates a multicast address hash value which is used to determine
442 * the multicast filter table array address and new table value. See
443 * igb_mta_set()
444 **/
445 static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
446 {
447 u32 hash_value, hash_mask;
448 u8 bit_shift = 0;
449
450 /* Register count multiplied by bits per register */
451 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
452
453 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
454 * where 0xFF would still fall within the hash mask.
455 */
456 while (hash_mask >> bit_shift != 0xFF)
457 bit_shift++;
458
459 /* The portion of the address that is used for the hash table
460 * is determined by the mc_filter_type setting.
461 * The algorithm is such that there is a total of 8 bits of shifting.
462 * The bit_shift for a mc_filter_type of 0 represents the number of
463 * left-shifts where the MSB of mc_addr[5] would still fall within
464 * the hash_mask. Case 0 does this exactly. Since there are a total
465 * of 8 bits of shifting, then mc_addr[4] will shift right the
466 * remaining number of bits. Thus 8 - bit_shift. The rest of the
467 * cases are a variation of this algorithm...essentially raising the
468 * number of bits to shift mc_addr[5] left, while still keeping the
469 * 8-bit shifting total.
470 *
471 * For example, given the following Destination MAC Address and an
472 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
473 * we can see that the bit_shift for case 0 is 4. These are the hash
474 * values resulting from each mc_filter_type...
475 * [0] [1] [2] [3] [4] [5]
476 * 01 AA 00 12 34 56
477 * LSB MSB
478 *
479 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
480 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
481 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
482 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
483 */
484 switch (hw->mac.mc_filter_type) {
485 default:
486 case 0:
487 break;
488 case 1:
489 bit_shift += 1;
490 break;
491 case 2:
492 bit_shift += 2;
493 break;
494 case 3:
495 bit_shift += 4;
496 break;
497 }
498
499 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
500 (((u16) mc_addr[5]) << bit_shift)));
501
502 return hash_value;
503 }
504
505 /**
506 * igb_update_mc_addr_list - Update Multicast addresses
507 * @hw: pointer to the HW structure
508 * @mc_addr_list: array of multicast addresses to program
509 * @mc_addr_count: number of multicast addresses to program
510 *
511 * Updates entire Multicast Table Array.
512 * The caller must have a packed mc_addr_list of multicast addresses.
513 **/
514 void igb_update_mc_addr_list(struct e1000_hw *hw,
515 u8 *mc_addr_list, u32 mc_addr_count)
516 {
517 u32 hash_value, hash_bit, hash_reg;
518 int i;
519
520 /* clear mta_shadow */
521 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
522
523 /* update mta_shadow from mc_addr_list */
524 for (i = 0; (u32) i < mc_addr_count; i++) {
525 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
526
527 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
528 hash_bit = hash_value & 0x1F;
529
530 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
531 mc_addr_list += (ETH_ALEN);
532 }
533
534 /* replace the entire MTA table */
535 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
536 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
537 wrfl();
538 }
539
540 /**
541 * igb_clear_hw_cntrs_base - Clear base hardware counters
542 * @hw: pointer to the HW structure
543 *
544 * Clears the base hardware counters by reading the counter registers.
545 **/
546 void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
547 {
548 rd32(E1000_CRCERRS);
549 rd32(E1000_SYMERRS);
550 rd32(E1000_MPC);
551 rd32(E1000_SCC);
552 rd32(E1000_ECOL);
553 rd32(E1000_MCC);
554 rd32(E1000_LATECOL);
555 rd32(E1000_COLC);
556 rd32(E1000_DC);
557 rd32(E1000_SEC);
558 rd32(E1000_RLEC);
559 rd32(E1000_XONRXC);
560 rd32(E1000_XONTXC);
561 rd32(E1000_XOFFRXC);
562 rd32(E1000_XOFFTXC);
563 rd32(E1000_FCRUC);
564 rd32(E1000_GPRC);
565 rd32(E1000_BPRC);
566 rd32(E1000_MPRC);
567 rd32(E1000_GPTC);
568 rd32(E1000_GORCL);
569 rd32(E1000_GORCH);
570 rd32(E1000_GOTCL);
571 rd32(E1000_GOTCH);
572 rd32(E1000_RNBC);
573 rd32(E1000_RUC);
574 rd32(E1000_RFC);
575 rd32(E1000_ROC);
576 rd32(E1000_RJC);
577 rd32(E1000_TORL);
578 rd32(E1000_TORH);
579 rd32(E1000_TOTL);
580 rd32(E1000_TOTH);
581 rd32(E1000_TPR);
582 rd32(E1000_TPT);
583 rd32(E1000_MPTC);
584 rd32(E1000_BPTC);
585 }
586
587 /**
588 * igb_check_for_copper_link - Check for link (Copper)
589 * @hw: pointer to the HW structure
590 *
591 * Checks to see of the link status of the hardware has changed. If a
592 * change in link status has been detected, then we read the PHY registers
593 * to get the current speed/duplex if link exists.
594 **/
595 s32 igb_check_for_copper_link(struct e1000_hw *hw)
596 {
597 struct e1000_mac_info *mac = &hw->mac;
598 s32 ret_val;
599 bool link;
600
601 /* We only want to go out to the PHY registers to see if Auto-Neg
602 * has completed and/or if our link status has changed. The
603 * get_link_status flag is set upon receiving a Link Status
604 * Change or Rx Sequence Error interrupt.
605 */
606 if (!mac->get_link_status) {
607 ret_val = 0;
608 goto out;
609 }
610
611 /* First we want to see if the MII Status Register reports
612 * link. If so, then we want to get the current speed/duplex
613 * of the PHY.
614 */
615 ret_val = igb_phy_has_link(hw, 1, 0, &link);
616 if (ret_val)
617 goto out;
618
619 if (!link)
620 goto out; /* No link detected */
621
622 mac->get_link_status = false;
623
624 /* Check if there was DownShift, must be checked
625 * immediately after link-up
626 */
627 igb_check_downshift(hw);
628
629 /* If we are forcing speed/duplex, then we simply return since
630 * we have already determined whether we have link or not.
631 */
632 if (!mac->autoneg) {
633 ret_val = -E1000_ERR_CONFIG;
634 goto out;
635 }
636
637 /* Auto-Neg is enabled. Auto Speed Detection takes care
638 * of MAC speed/duplex configuration. So we only need to
639 * configure Collision Distance in the MAC.
640 */
641 igb_config_collision_dist(hw);
642
643 /* Configure Flow Control now that Auto-Neg has completed.
644 * First, we need to restore the desired flow control
645 * settings because we may have had to re-autoneg with a
646 * different link partner.
647 */
648 ret_val = igb_config_fc_after_link_up(hw);
649 if (ret_val)
650 hw_dbg("Error configuring flow control\n");
651
652 out:
653 return ret_val;
654 }
655
656 /**
657 * igb_setup_link - Setup flow control and link settings
658 * @hw: pointer to the HW structure
659 *
660 * Determines which flow control settings to use, then configures flow
661 * control. Calls the appropriate media-specific link configuration
662 * function. Assuming the adapter has a valid link partner, a valid link
663 * should be established. Assumes the hardware has previously been reset
664 * and the transmitter and receiver are not enabled.
665 **/
666 s32 igb_setup_link(struct e1000_hw *hw)
667 {
668 s32 ret_val = 0;
669
670 /* In the case of the phy reset being blocked, we already have a link.
671 * We do not need to set it up again.
672 */
673 if (igb_check_reset_block(hw))
674 goto out;
675
676 /* If requested flow control is set to default, set flow control
677 * based on the EEPROM flow control settings.
678 */
679 if (hw->fc.requested_mode == e1000_fc_default) {
680 ret_val = igb_set_default_fc(hw);
681 if (ret_val)
682 goto out;
683 }
684
685 /* We want to save off the original Flow Control configuration just
686 * in case we get disconnected and then reconnected into a different
687 * hub or switch with different Flow Control capabilities.
688 */
689 hw->fc.current_mode = hw->fc.requested_mode;
690
691 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
692
693 /* Call the necessary media_type subroutine to configure the link. */
694 ret_val = hw->mac.ops.setup_physical_interface(hw);
695 if (ret_val)
696 goto out;
697
698 /* Initialize the flow control address, type, and PAUSE timer
699 * registers to their default values. This is done even if flow
700 * control is disabled, because it does not hurt anything to
701 * initialize these registers.
702 */
703 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
704 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
705 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
706 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
707
708 wr32(E1000_FCTTV, hw->fc.pause_time);
709
710 ret_val = igb_set_fc_watermarks(hw);
711
712 out:
713
714 return ret_val;
715 }
716
717 /**
718 * igb_config_collision_dist - Configure collision distance
719 * @hw: pointer to the HW structure
720 *
721 * Configures the collision distance to the default value and is used
722 * during link setup. Currently no func pointer exists and all
723 * implementations are handled in the generic version of this function.
724 **/
725 void igb_config_collision_dist(struct e1000_hw *hw)
726 {
727 u32 tctl;
728
729 tctl = rd32(E1000_TCTL);
730
731 tctl &= ~E1000_TCTL_COLD;
732 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
733
734 wr32(E1000_TCTL, tctl);
735 wrfl();
736 }
737
738 /**
739 * igb_set_fc_watermarks - Set flow control high/low watermarks
740 * @hw: pointer to the HW structure
741 *
742 * Sets the flow control high/low threshold (watermark) registers. If
743 * flow control XON frame transmission is enabled, then set XON frame
744 * tansmission as well.
745 **/
746 static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
747 {
748 s32 ret_val = 0;
749 u32 fcrtl = 0, fcrth = 0;
750
751 /* Set the flow control receive threshold registers. Normally,
752 * these registers will be set to a default threshold that may be
753 * adjusted later by the driver's runtime code. However, if the
754 * ability to transmit pause frames is not enabled, then these
755 * registers will be set to 0.
756 */
757 if (hw->fc.current_mode & e1000_fc_tx_pause) {
758 /* We need to set up the Receive Threshold high and low water
759 * marks as well as (optionally) enabling the transmission of
760 * XON frames.
761 */
762 fcrtl = hw->fc.low_water;
763 if (hw->fc.send_xon)
764 fcrtl |= E1000_FCRTL_XONE;
765
766 fcrth = hw->fc.high_water;
767 }
768 wr32(E1000_FCRTL, fcrtl);
769 wr32(E1000_FCRTH, fcrth);
770
771 return ret_val;
772 }
773
774 /**
775 * igb_set_default_fc - Set flow control default values
776 * @hw: pointer to the HW structure
777 *
778 * Read the EEPROM for the default values for flow control and store the
779 * values.
780 **/
781 static s32 igb_set_default_fc(struct e1000_hw *hw)
782 {
783 s32 ret_val = 0;
784 u16 lan_offset;
785 u16 nvm_data;
786
787 /* Read and store word 0x0F of the EEPROM. This word contains bits
788 * that determine the hardware's default PAUSE (flow control) mode,
789 * a bit that determines whether the HW defaults to enabling or
790 * disabling auto-negotiation, and the direction of the
791 * SW defined pins. If there is no SW over-ride of the flow
792 * control setting, then the variable hw->fc will
793 * be initialized based on a value in the EEPROM.
794 */
795 if (hw->mac.type == e1000_i350) {
796 lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
797 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
798 + lan_offset, 1, &nvm_data);
799 } else {
800 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
801 1, &nvm_data);
802 }
803
804 if (ret_val) {
805 hw_dbg("NVM Read Error\n");
806 goto out;
807 }
808
809 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
810 hw->fc.requested_mode = e1000_fc_none;
811 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
812 NVM_WORD0F_ASM_DIR)
813 hw->fc.requested_mode = e1000_fc_tx_pause;
814 else
815 hw->fc.requested_mode = e1000_fc_full;
816
817 out:
818 return ret_val;
819 }
820
821 /**
822 * igb_force_mac_fc - Force the MAC's flow control settings
823 * @hw: pointer to the HW structure
824 *
825 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
826 * device control register to reflect the adapter settings. TFCE and RFCE
827 * need to be explicitly set by software when a copper PHY is used because
828 * autonegotiation is managed by the PHY rather than the MAC. Software must
829 * also configure these bits when link is forced on a fiber connection.
830 **/
831 s32 igb_force_mac_fc(struct e1000_hw *hw)
832 {
833 u32 ctrl;
834 s32 ret_val = 0;
835
836 ctrl = rd32(E1000_CTRL);
837
838 /* Because we didn't get link via the internal auto-negotiation
839 * mechanism (we either forced link or we got link via PHY
840 * auto-neg), we have to manually enable/disable transmit an
841 * receive flow control.
842 *
843 * The "Case" statement below enables/disable flow control
844 * according to the "hw->fc.current_mode" parameter.
845 *
846 * The possible values of the "fc" parameter are:
847 * 0: Flow control is completely disabled
848 * 1: Rx flow control is enabled (we can receive pause
849 * frames but not send pause frames).
850 * 2: Tx flow control is enabled (we can send pause frames
851 * frames but we do not receive pause frames).
852 * 3: Both Rx and TX flow control (symmetric) is enabled.
853 * other: No other values should be possible at this point.
854 */
855 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
856
857 switch (hw->fc.current_mode) {
858 case e1000_fc_none:
859 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
860 break;
861 case e1000_fc_rx_pause:
862 ctrl &= (~E1000_CTRL_TFCE);
863 ctrl |= E1000_CTRL_RFCE;
864 break;
865 case e1000_fc_tx_pause:
866 ctrl &= (~E1000_CTRL_RFCE);
867 ctrl |= E1000_CTRL_TFCE;
868 break;
869 case e1000_fc_full:
870 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
871 break;
872 default:
873 hw_dbg("Flow control param set incorrectly\n");
874 ret_val = -E1000_ERR_CONFIG;
875 goto out;
876 }
877
878 wr32(E1000_CTRL, ctrl);
879
880 out:
881 return ret_val;
882 }
883
884 /**
885 * igb_config_fc_after_link_up - Configures flow control after link
886 * @hw: pointer to the HW structure
887 *
888 * Checks the status of auto-negotiation after link up to ensure that the
889 * speed and duplex were not forced. If the link needed to be forced, then
890 * flow control needs to be forced also. If auto-negotiation is enabled
891 * and did not fail, then we configure flow control based on our link
892 * partner.
893 **/
894 s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
895 {
896 struct e1000_mac_info *mac = &hw->mac;
897 s32 ret_val = 0;
898 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
899 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
900 u16 speed, duplex;
901
902 /* Check for the case where we have fiber media and auto-neg failed
903 * so we had to force link. In this case, we need to force the
904 * configuration of the MAC to match the "fc" parameter.
905 */
906 if (mac->autoneg_failed) {
907 if (hw->phy.media_type == e1000_media_type_internal_serdes)
908 ret_val = igb_force_mac_fc(hw);
909 } else {
910 if (hw->phy.media_type == e1000_media_type_copper)
911 ret_val = igb_force_mac_fc(hw);
912 }
913
914 if (ret_val) {
915 hw_dbg("Error forcing flow control settings\n");
916 goto out;
917 }
918
919 /* Check for the case where we have copper media and auto-neg is
920 * enabled. In this case, we need to check and see if Auto-Neg
921 * has completed, and if so, how the PHY and link partner has
922 * flow control configured.
923 */
924 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
925 /* Read the MII Status Register and check to see if AutoNeg
926 * has completed. We read this twice because this reg has
927 * some "sticky" (latched) bits.
928 */
929 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
930 &mii_status_reg);
931 if (ret_val)
932 goto out;
933 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
934 &mii_status_reg);
935 if (ret_val)
936 goto out;
937
938 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
939 hw_dbg("Copper PHY and Auto Neg has not completed.\n");
940 goto out;
941 }
942
943 /* The AutoNeg process has completed, so we now need to
944 * read both the Auto Negotiation Advertisement
945 * Register (Address 4) and the Auto_Negotiation Base
946 * Page Ability Register (Address 5) to determine how
947 * flow control was negotiated.
948 */
949 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
950 &mii_nway_adv_reg);
951 if (ret_val)
952 goto out;
953 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
954 &mii_nway_lp_ability_reg);
955 if (ret_val)
956 goto out;
957
958 /* Two bits in the Auto Negotiation Advertisement Register
959 * (Address 4) and two bits in the Auto Negotiation Base
960 * Page Ability Register (Address 5) determine flow control
961 * for both the PHY and the link partner. The following
962 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
963 * 1999, describes these PAUSE resolution bits and how flow
964 * control is determined based upon these settings.
965 * NOTE: DC = Don't Care
966 *
967 * LOCAL DEVICE | LINK PARTNER
968 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
969 *-------|---------|-------|---------|--------------------
970 * 0 | 0 | DC | DC | e1000_fc_none
971 * 0 | 1 | 0 | DC | e1000_fc_none
972 * 0 | 1 | 1 | 0 | e1000_fc_none
973 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
974 * 1 | 0 | 0 | DC | e1000_fc_none
975 * 1 | DC | 1 | DC | e1000_fc_full
976 * 1 | 1 | 0 | 0 | e1000_fc_none
977 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
978 *
979 * Are both PAUSE bits set to 1? If so, this implies
980 * Symmetric Flow Control is enabled at both ends. The
981 * ASM_DIR bits are irrelevant per the spec.
982 *
983 * For Symmetric Flow Control:
984 *
985 * LOCAL DEVICE | LINK PARTNER
986 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
987 *-------|---------|-------|---------|--------------------
988 * 1 | DC | 1 | DC | E1000_fc_full
989 *
990 */
991 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
992 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
993 /* Now we need to check if the user selected RX ONLY
994 * of pause frames. In this case, we had to advertise
995 * FULL flow control because we could not advertise RX
996 * ONLY. Hence, we must now check to see if we need to
997 * turn OFF the TRANSMISSION of PAUSE frames.
998 */
999 if (hw->fc.requested_mode == e1000_fc_full) {
1000 hw->fc.current_mode = e1000_fc_full;
1001 hw_dbg("Flow Control = FULL.\n");
1002 } else {
1003 hw->fc.current_mode = e1000_fc_rx_pause;
1004 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1005 }
1006 }
1007 /* For receiving PAUSE frames ONLY.
1008 *
1009 * LOCAL DEVICE | LINK PARTNER
1010 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1011 *-------|---------|-------|---------|--------------------
1012 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1013 */
1014 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1015 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1016 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1017 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1018 hw->fc.current_mode = e1000_fc_tx_pause;
1019 hw_dbg("Flow Control = TX PAUSE frames only.\n");
1020 }
1021 /* For transmitting PAUSE frames ONLY.
1022 *
1023 * LOCAL DEVICE | LINK PARTNER
1024 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1025 *-------|---------|-------|---------|--------------------
1026 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1027 */
1028 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1029 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1030 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1031 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1032 hw->fc.current_mode = e1000_fc_rx_pause;
1033 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1034 }
1035 /* Per the IEEE spec, at this point flow control should be
1036 * disabled. However, we want to consider that we could
1037 * be connected to a legacy switch that doesn't advertise
1038 * desired flow control, but can be forced on the link
1039 * partner. So if we advertised no flow control, that is
1040 * what we will resolve to. If we advertised some kind of
1041 * receive capability (Rx Pause Only or Full Flow Control)
1042 * and the link partner advertised none, we will configure
1043 * ourselves to enable Rx Flow Control only. We can do
1044 * this safely for two reasons: If the link partner really
1045 * didn't want flow control enabled, and we enable Rx, no
1046 * harm done since we won't be receiving any PAUSE frames
1047 * anyway. If the intent on the link partner was to have
1048 * flow control enabled, then by us enabling RX only, we
1049 * can at least receive pause frames and process them.
1050 * This is a good idea because in most cases, since we are
1051 * predominantly a server NIC, more times than not we will
1052 * be asked to delay transmission of packets than asking
1053 * our link partner to pause transmission of frames.
1054 */
1055 else if ((hw->fc.requested_mode == e1000_fc_none) ||
1056 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
1057 (hw->fc.strict_ieee)) {
1058 hw->fc.current_mode = e1000_fc_none;
1059 hw_dbg("Flow Control = NONE.\n");
1060 } else {
1061 hw->fc.current_mode = e1000_fc_rx_pause;
1062 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1063 }
1064
1065 /* Now we need to do one last check... If we auto-
1066 * negotiated to HALF DUPLEX, flow control should not be
1067 * enabled per IEEE 802.3 spec.
1068 */
1069 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
1070 if (ret_val) {
1071 hw_dbg("Error getting link speed and duplex\n");
1072 goto out;
1073 }
1074
1075 if (duplex == HALF_DUPLEX)
1076 hw->fc.current_mode = e1000_fc_none;
1077
1078 /* Now we call a subroutine to actually force the MAC
1079 * controller to use the correct flow control settings.
1080 */
1081 ret_val = igb_force_mac_fc(hw);
1082 if (ret_val) {
1083 hw_dbg("Error forcing flow control settings\n");
1084 goto out;
1085 }
1086 }
1087 /* Check for the case where we have SerDes media and auto-neg is
1088 * enabled. In this case, we need to check and see if Auto-Neg
1089 * has completed, and if so, how the PHY and link partner has
1090 * flow control configured.
1091 */
1092 if ((hw->phy.media_type == e1000_media_type_internal_serdes)
1093 && mac->autoneg) {
1094 /* Read the PCS_LSTS and check to see if AutoNeg
1095 * has completed.
1096 */
1097 pcs_status_reg = rd32(E1000_PCS_LSTAT);
1098
1099 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1100 hw_dbg("PCS Auto Neg has not completed.\n");
1101 return ret_val;
1102 }
1103
1104 /* The AutoNeg process has completed, so we now need to
1105 * read both the Auto Negotiation Advertisement
1106 * Register (PCS_ANADV) and the Auto_Negotiation Base
1107 * Page Ability Register (PCS_LPAB) to determine how
1108 * flow control was negotiated.
1109 */
1110 pcs_adv_reg = rd32(E1000_PCS_ANADV);
1111 pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
1112
1113 /* Two bits in the Auto Negotiation Advertisement Register
1114 * (PCS_ANADV) and two bits in the Auto Negotiation Base
1115 * Page Ability Register (PCS_LPAB) determine flow control
1116 * for both the PHY and the link partner. The following
1117 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1118 * 1999, describes these PAUSE resolution bits and how flow
1119 * control is determined based upon these settings.
1120 * NOTE: DC = Don't Care
1121 *
1122 * LOCAL DEVICE | LINK PARTNER
1123 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1124 *-------|---------|-------|---------|--------------------
1125 * 0 | 0 | DC | DC | e1000_fc_none
1126 * 0 | 1 | 0 | DC | e1000_fc_none
1127 * 0 | 1 | 1 | 0 | e1000_fc_none
1128 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1129 * 1 | 0 | 0 | DC | e1000_fc_none
1130 * 1 | DC | 1 | DC | e1000_fc_full
1131 * 1 | 1 | 0 | 0 | e1000_fc_none
1132 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1133 *
1134 * Are both PAUSE bits set to 1? If so, this implies
1135 * Symmetric Flow Control is enabled at both ends. The
1136 * ASM_DIR bits are irrelevant per the spec.
1137 *
1138 * For Symmetric Flow Control:
1139 *
1140 * LOCAL DEVICE | LINK PARTNER
1141 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1142 *-------|---------|-------|---------|--------------------
1143 * 1 | DC | 1 | DC | e1000_fc_full
1144 *
1145 */
1146 if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1147 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1148 /* Now we need to check if the user selected Rx ONLY
1149 * of pause frames. In this case, we had to advertise
1150 * FULL flow control because we could not advertise Rx
1151 * ONLY. Hence, we must now check to see if we need to
1152 * turn OFF the TRANSMISSION of PAUSE frames.
1153 */
1154 if (hw->fc.requested_mode == e1000_fc_full) {
1155 hw->fc.current_mode = e1000_fc_full;
1156 hw_dbg("Flow Control = FULL.\n");
1157 } else {
1158 hw->fc.current_mode = e1000_fc_rx_pause;
1159 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1160 }
1161 }
1162 /* For receiving PAUSE frames ONLY.
1163 *
1164 * LOCAL DEVICE | LINK PARTNER
1165 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1166 *-------|---------|-------|---------|--------------------
1167 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1168 */
1169 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1170 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1171 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1172 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1173 hw->fc.current_mode = e1000_fc_tx_pause;
1174 hw_dbg("Flow Control = Tx PAUSE frames only.\n");
1175 }
1176 /* For transmitting PAUSE frames ONLY.
1177 *
1178 * LOCAL DEVICE | LINK PARTNER
1179 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1180 *-------|---------|-------|---------|--------------------
1181 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1182 */
1183 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1184 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1185 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1186 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1187 hw->fc.current_mode = e1000_fc_rx_pause;
1188 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1189 } else {
1190 /* Per the IEEE spec, at this point flow control
1191 * should be disabled.
1192 */
1193 hw->fc.current_mode = e1000_fc_none;
1194 hw_dbg("Flow Control = NONE.\n");
1195 }
1196
1197 /* Now we call a subroutine to actually force the MAC
1198 * controller to use the correct flow control settings.
1199 */
1200 pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
1201 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1202 wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
1203
1204 ret_val = igb_force_mac_fc(hw);
1205 if (ret_val) {
1206 hw_dbg("Error forcing flow control settings\n");
1207 return ret_val;
1208 }
1209 }
1210
1211 out:
1212 return ret_val;
1213 }
1214
1215 /**
1216 * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex
1217 * @hw: pointer to the HW structure
1218 * @speed: stores the current speed
1219 * @duplex: stores the current duplex
1220 *
1221 * Read the status register for the current speed/duplex and store the current
1222 * speed and duplex for copper connections.
1223 **/
1224 s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1225 u16 *duplex)
1226 {
1227 u32 status;
1228
1229 status = rd32(E1000_STATUS);
1230 if (status & E1000_STATUS_SPEED_1000) {
1231 *speed = SPEED_1000;
1232 hw_dbg("1000 Mbs, ");
1233 } else if (status & E1000_STATUS_SPEED_100) {
1234 *speed = SPEED_100;
1235 hw_dbg("100 Mbs, ");
1236 } else {
1237 *speed = SPEED_10;
1238 hw_dbg("10 Mbs, ");
1239 }
1240
1241 if (status & E1000_STATUS_FD) {
1242 *duplex = FULL_DUPLEX;
1243 hw_dbg("Full Duplex\n");
1244 } else {
1245 *duplex = HALF_DUPLEX;
1246 hw_dbg("Half Duplex\n");
1247 }
1248
1249 return 0;
1250 }
1251
1252 /**
1253 * igb_get_hw_semaphore - Acquire hardware semaphore
1254 * @hw: pointer to the HW structure
1255 *
1256 * Acquire the HW semaphore to access the PHY or NVM
1257 **/
1258 s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1259 {
1260 u32 swsm;
1261 s32 ret_val = 0;
1262 s32 timeout = hw->nvm.word_size + 1;
1263 s32 i = 0;
1264
1265 /* Get the SW semaphore */
1266 while (i < timeout) {
1267 swsm = rd32(E1000_SWSM);
1268 if (!(swsm & E1000_SWSM_SMBI))
1269 break;
1270
1271 udelay(50);
1272 i++;
1273 }
1274
1275 if (i == timeout) {
1276 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1277 ret_val = -E1000_ERR_NVM;
1278 goto out;
1279 }
1280
1281 /* Get the FW semaphore. */
1282 for (i = 0; i < timeout; i++) {
1283 swsm = rd32(E1000_SWSM);
1284 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1285
1286 /* Semaphore acquired if bit latched */
1287 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1288 break;
1289
1290 udelay(50);
1291 }
1292
1293 if (i == timeout) {
1294 /* Release semaphores */
1295 igb_put_hw_semaphore(hw);
1296 hw_dbg("Driver can't access the NVM\n");
1297 ret_val = -E1000_ERR_NVM;
1298 goto out;
1299 }
1300
1301 out:
1302 return ret_val;
1303 }
1304
1305 /**
1306 * igb_put_hw_semaphore - Release hardware semaphore
1307 * @hw: pointer to the HW structure
1308 *
1309 * Release hardware semaphore used to access the PHY or NVM
1310 **/
1311 void igb_put_hw_semaphore(struct e1000_hw *hw)
1312 {
1313 u32 swsm;
1314
1315 swsm = rd32(E1000_SWSM);
1316
1317 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1318
1319 wr32(E1000_SWSM, swsm);
1320 }
1321
1322 /**
1323 * igb_get_auto_rd_done - Check for auto read completion
1324 * @hw: pointer to the HW structure
1325 *
1326 * Check EEPROM for Auto Read done bit.
1327 **/
1328 s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1329 {
1330 s32 i = 0;
1331 s32 ret_val = 0;
1332
1333
1334 while (i < AUTO_READ_DONE_TIMEOUT) {
1335 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1336 break;
1337 usleep_range(1000, 2000);
1338 i++;
1339 }
1340
1341 if (i == AUTO_READ_DONE_TIMEOUT) {
1342 hw_dbg("Auto read by HW from NVM has not completed.\n");
1343 ret_val = -E1000_ERR_RESET;
1344 goto out;
1345 }
1346
1347 out:
1348 return ret_val;
1349 }
1350
1351 /**
1352 * igb_valid_led_default - Verify a valid default LED config
1353 * @hw: pointer to the HW structure
1354 * @data: pointer to the NVM (EEPROM)
1355 *
1356 * Read the EEPROM for the current default LED configuration. If the
1357 * LED configuration is not valid, set to a valid LED configuration.
1358 **/
1359 static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1360 {
1361 s32 ret_val;
1362
1363 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1364 if (ret_val) {
1365 hw_dbg("NVM Read Error\n");
1366 goto out;
1367 }
1368
1369 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1370 switch (hw->phy.media_type) {
1371 case e1000_media_type_internal_serdes:
1372 *data = ID_LED_DEFAULT_82575_SERDES;
1373 break;
1374 case e1000_media_type_copper:
1375 default:
1376 *data = ID_LED_DEFAULT;
1377 break;
1378 }
1379 }
1380 out:
1381 return ret_val;
1382 }
1383
1384 /**
1385 * igb_id_led_init -
1386 * @hw: pointer to the HW structure
1387 *
1388 **/
1389 s32 igb_id_led_init(struct e1000_hw *hw)
1390 {
1391 struct e1000_mac_info *mac = &hw->mac;
1392 s32 ret_val;
1393 const u32 ledctl_mask = 0x000000FF;
1394 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1395 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1396 u16 data, i, temp;
1397 const u16 led_mask = 0x0F;
1398
1399 /* i210 and i211 devices have different LED mechanism */
1400 if ((hw->mac.type == e1000_i210) ||
1401 (hw->mac.type == e1000_i211))
1402 ret_val = igb_valid_led_default_i210(hw, &data);
1403 else
1404 ret_val = igb_valid_led_default(hw, &data);
1405
1406 if (ret_val)
1407 goto out;
1408
1409 mac->ledctl_default = rd32(E1000_LEDCTL);
1410 mac->ledctl_mode1 = mac->ledctl_default;
1411 mac->ledctl_mode2 = mac->ledctl_default;
1412
1413 for (i = 0; i < 4; i++) {
1414 temp = (data >> (i << 2)) & led_mask;
1415 switch (temp) {
1416 case ID_LED_ON1_DEF2:
1417 case ID_LED_ON1_ON2:
1418 case ID_LED_ON1_OFF2:
1419 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1420 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1421 break;
1422 case ID_LED_OFF1_DEF2:
1423 case ID_LED_OFF1_ON2:
1424 case ID_LED_OFF1_OFF2:
1425 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1426 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1427 break;
1428 default:
1429 /* Do nothing */
1430 break;
1431 }
1432 switch (temp) {
1433 case ID_LED_DEF1_ON2:
1434 case ID_LED_ON1_ON2:
1435 case ID_LED_OFF1_ON2:
1436 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1437 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1438 break;
1439 case ID_LED_DEF1_OFF2:
1440 case ID_LED_ON1_OFF2:
1441 case ID_LED_OFF1_OFF2:
1442 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1443 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1444 break;
1445 default:
1446 /* Do nothing */
1447 break;
1448 }
1449 }
1450
1451 out:
1452 return ret_val;
1453 }
1454
1455 /**
1456 * igb_cleanup_led - Set LED config to default operation
1457 * @hw: pointer to the HW structure
1458 *
1459 * Remove the current LED configuration and set the LED configuration
1460 * to the default value, saved from the EEPROM.
1461 **/
1462 s32 igb_cleanup_led(struct e1000_hw *hw)
1463 {
1464 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1465 return 0;
1466 }
1467
1468 /**
1469 * igb_blink_led - Blink LED
1470 * @hw: pointer to the HW structure
1471 *
1472 * Blink the led's which are set to be on.
1473 **/
1474 s32 igb_blink_led(struct e1000_hw *hw)
1475 {
1476 u32 ledctl_blink = 0;
1477 u32 i;
1478
1479 if (hw->phy.media_type == e1000_media_type_fiber) {
1480 /* always blink LED0 for PCI-E fiber */
1481 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1482 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1483 } else {
1484 /* Set the blink bit for each LED that's "on" (0x0E)
1485 * (or "off" if inverted) in ledctl_mode2. The blink
1486 * logic in hardware only works when mode is set to "on"
1487 * so it must be changed accordingly when the mode is
1488 * "off" and inverted.
1489 */
1490 ledctl_blink = hw->mac.ledctl_mode2;
1491 for (i = 0; i < 32; i += 8) {
1492 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1493 E1000_LEDCTL_LED0_MODE_MASK;
1494 u32 led_default = hw->mac.ledctl_default >> i;
1495
1496 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1497 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1498 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1499 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1500 ledctl_blink &=
1501 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1502 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1503 E1000_LEDCTL_MODE_LED_ON) << i;
1504 }
1505 }
1506 }
1507
1508 wr32(E1000_LEDCTL, ledctl_blink);
1509
1510 return 0;
1511 }
1512
1513 /**
1514 * igb_led_off - Turn LED off
1515 * @hw: pointer to the HW structure
1516 *
1517 * Turn LED off.
1518 **/
1519 s32 igb_led_off(struct e1000_hw *hw)
1520 {
1521 switch (hw->phy.media_type) {
1522 case e1000_media_type_copper:
1523 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1524 break;
1525 default:
1526 break;
1527 }
1528
1529 return 0;
1530 }
1531
1532 /**
1533 * igb_disable_pcie_master - Disables PCI-express master access
1534 * @hw: pointer to the HW structure
1535 *
1536 * Returns 0 (0) if successful, else returns -10
1537 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
1538 * the master requests to be disabled.
1539 *
1540 * Disables PCI-Express master access and verifies there are no pending
1541 * requests.
1542 **/
1543 s32 igb_disable_pcie_master(struct e1000_hw *hw)
1544 {
1545 u32 ctrl;
1546 s32 timeout = MASTER_DISABLE_TIMEOUT;
1547 s32 ret_val = 0;
1548
1549 if (hw->bus.type != e1000_bus_type_pci_express)
1550 goto out;
1551
1552 ctrl = rd32(E1000_CTRL);
1553 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1554 wr32(E1000_CTRL, ctrl);
1555
1556 while (timeout) {
1557 if (!(rd32(E1000_STATUS) &
1558 E1000_STATUS_GIO_MASTER_ENABLE))
1559 break;
1560 udelay(100);
1561 timeout--;
1562 }
1563
1564 if (!timeout) {
1565 hw_dbg("Master requests are pending.\n");
1566 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1567 goto out;
1568 }
1569
1570 out:
1571 return ret_val;
1572 }
1573
1574 /**
1575 * igb_validate_mdi_setting - Verify MDI/MDIx settings
1576 * @hw: pointer to the HW structure
1577 *
1578 * Verify that when not using auto-negotitation that MDI/MDIx is correctly
1579 * set, which is forced to MDI mode only.
1580 **/
1581 s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1582 {
1583 s32 ret_val = 0;
1584
1585 /* All MDI settings are supported on 82580 and newer. */
1586 if (hw->mac.type >= e1000_82580)
1587 goto out;
1588
1589 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1590 hw_dbg("Invalid MDI setting detected\n");
1591 hw->phy.mdix = 1;
1592 ret_val = -E1000_ERR_CONFIG;
1593 goto out;
1594 }
1595
1596 out:
1597 return ret_val;
1598 }
1599
1600 /**
1601 * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register
1602 * @hw: pointer to the HW structure
1603 * @reg: 32bit register offset such as E1000_SCTL
1604 * @offset: register offset to write to
1605 * @data: data to write at register offset
1606 *
1607 * Writes an address/data control type register. There are several of these
1608 * and they all have the format address << 8 | data and bit 31 is polled for
1609 * completion.
1610 **/
1611 s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1612 u32 offset, u8 data)
1613 {
1614 u32 i, regvalue = 0;
1615 s32 ret_val = 0;
1616
1617 /* Set up the address and data */
1618 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1619 wr32(reg, regvalue);
1620
1621 /* Poll the ready bit to see if the MDI read completed */
1622 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1623 udelay(5);
1624 regvalue = rd32(reg);
1625 if (regvalue & E1000_GEN_CTL_READY)
1626 break;
1627 }
1628 if (!(regvalue & E1000_GEN_CTL_READY)) {
1629 hw_dbg("Reg %08x did not indicate ready\n", reg);
1630 ret_val = -E1000_ERR_PHY;
1631 goto out;
1632 }
1633
1634 out:
1635 return ret_val;
1636 }
1637
1638 /**
1639 * igb_enable_mng_pass_thru - Enable processing of ARP's
1640 * @hw: pointer to the HW structure
1641 *
1642 * Verifies the hardware needs to leave interface enabled so that frames can
1643 * be directed to and from the management interface.
1644 **/
1645 bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1646 {
1647 u32 manc;
1648 u32 fwsm, factps;
1649 bool ret_val = false;
1650
1651 if (!hw->mac.asf_firmware_present)
1652 goto out;
1653
1654 manc = rd32(E1000_MANC);
1655
1656 if (!(manc & E1000_MANC_RCV_TCO_EN))
1657 goto out;
1658
1659 if (hw->mac.arc_subsystem_valid) {
1660 fwsm = rd32(E1000_FWSM);
1661 factps = rd32(E1000_FACTPS);
1662
1663 if (!(factps & E1000_FACTPS_MNGCG) &&
1664 ((fwsm & E1000_FWSM_MODE_MASK) ==
1665 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1666 ret_val = true;
1667 goto out;
1668 }
1669 } else {
1670 if ((manc & E1000_MANC_SMBUS_EN) &&
1671 !(manc & E1000_MANC_ASF_EN)) {
1672 ret_val = true;
1673 goto out;
1674 }
1675 }
1676
1677 out:
1678 return ret_val;
1679 }
This page took 0.09565 seconds and 5 git commands to generate.