igb: add 82576 MAC support
[deliverable/linux.git] / drivers / net / igb / e1000_mac.c
1 /*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 #include <linux/if_ether.h>
29 #include <linux/delay.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32
33 #include "e1000_mac.h"
34
35 #include "igb.h"
36
37 static s32 igb_set_default_fc(struct e1000_hw *hw);
38 static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
39
40 /**
41 * igb_remove_device - Free device specific structure
42 * @hw: pointer to the HW structure
43 *
44 * If a device specific structure was allocated, this function will
45 * free it.
46 **/
47 void igb_remove_device(struct e1000_hw *hw)
48 {
49 /* Freeing the dev_spec member of e1000_hw structure */
50 kfree(hw->dev_spec);
51 }
52
53 static void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
54 {
55 struct igb_adapter *adapter = hw->back;
56
57 pci_read_config_word(adapter->pdev, reg, value);
58 }
59
60 static s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
61 {
62 struct igb_adapter *adapter = hw->back;
63 u16 cap_offset;
64
65 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
66 if (!cap_offset)
67 return -E1000_ERR_CONFIG;
68
69 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
70
71 return 0;
72 }
73
74 /**
75 * igb_get_bus_info_pcie - Get PCIe bus information
76 * @hw: pointer to the HW structure
77 *
78 * Determines and stores the system bus information for a particular
79 * network interface. The following bus information is determined and stored:
80 * bus speed, bus width, type (PCIe), and PCIe function.
81 **/
82 s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
83 {
84 struct e1000_bus_info *bus = &hw->bus;
85 s32 ret_val;
86 u32 status;
87 u16 pcie_link_status, pci_header_type;
88
89 bus->type = e1000_bus_type_pci_express;
90 bus->speed = e1000_bus_speed_2500;
91
92 ret_val = igb_read_pcie_cap_reg(hw,
93 PCIE_LINK_STATUS,
94 &pcie_link_status);
95 if (ret_val)
96 bus->width = e1000_bus_width_unknown;
97 else
98 bus->width = (enum e1000_bus_width)((pcie_link_status &
99 PCIE_LINK_WIDTH_MASK) >>
100 PCIE_LINK_WIDTH_SHIFT);
101
102 igb_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
103 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
104 status = rd32(E1000_STATUS);
105 bus->func = (status & E1000_STATUS_FUNC_MASK)
106 >> E1000_STATUS_FUNC_SHIFT;
107 } else {
108 bus->func = 0;
109 }
110
111 return 0;
112 }
113
114 /**
115 * igb_clear_vfta - Clear VLAN filter table
116 * @hw: pointer to the HW structure
117 *
118 * Clears the register array which contains the VLAN filter table by
119 * setting all the values to 0.
120 **/
121 void igb_clear_vfta(struct e1000_hw *hw)
122 {
123 u32 offset;
124
125 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
126 array_wr32(E1000_VFTA, offset, 0);
127 wrfl();
128 }
129 }
130
131 /**
132 * igb_write_vfta - Write value to VLAN filter table
133 * @hw: pointer to the HW structure
134 * @offset: register offset in VLAN filter table
135 * @value: register value written to VLAN filter table
136 *
137 * Writes value at the given offset in the register array which stores
138 * the VLAN filter table.
139 **/
140 void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
141 {
142 array_wr32(E1000_VFTA, offset, value);
143 wrfl();
144 }
145
146 /**
147 * igb_init_rx_addrs - Initialize receive address's
148 * @hw: pointer to the HW structure
149 * @rar_count: receive address registers
150 *
151 * Setups the receive address registers by setting the base receive address
152 * register to the devices MAC address and clearing all the other receive
153 * address registers to 0.
154 **/
155 void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
156 {
157 u32 i;
158
159 /* Setup the receive address */
160 hw_dbg("Programming MAC Address into RAR[0]\n");
161
162 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
163
164 /* Zero out the other (rar_entry_count - 1) receive addresses */
165 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
166 for (i = 1; i < rar_count; i++) {
167 array_wr32(E1000_RA, (i << 1), 0);
168 wrfl();
169 array_wr32(E1000_RA, ((i << 1) + 1), 0);
170 wrfl();
171 }
172 }
173
174 /**
175 * igb_check_alt_mac_addr - Check for alternate MAC addr
176 * @hw: pointer to the HW structure
177 *
178 * Checks the nvm for an alternate MAC address. An alternate MAC address
179 * can be setup by pre-boot software and must be treated like a permanent
180 * address and must override the actual permanent MAC address. If an
181 * alternate MAC address is fopund it is saved in the hw struct and
182 * prgrammed into RAR0 and the cuntion returns success, otherwise the
183 * fucntion returns an error.
184 **/
185 s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
186 {
187 u32 i;
188 s32 ret_val = 0;
189 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
190 u8 alt_mac_addr[ETH_ALEN];
191
192 ret_val = hw->nvm.ops.read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
193 &nvm_alt_mac_addr_offset);
194 if (ret_val) {
195 hw_dbg("NVM Read Error\n");
196 goto out;
197 }
198
199 if (nvm_alt_mac_addr_offset == 0xFFFF) {
200 ret_val = -(E1000_NOT_IMPLEMENTED);
201 goto out;
202 }
203
204 if (hw->bus.func == E1000_FUNC_1)
205 nvm_alt_mac_addr_offset += ETH_ALEN/sizeof(u16);
206
207 for (i = 0; i < ETH_ALEN; i += 2) {
208 offset = nvm_alt_mac_addr_offset + (i >> 1);
209 ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data);
210 if (ret_val) {
211 hw_dbg("NVM Read Error\n");
212 goto out;
213 }
214
215 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
216 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
217 }
218
219 /* if multicast bit is set, the alternate address will not be used */
220 if (alt_mac_addr[0] & 0x01) {
221 ret_val = -(E1000_NOT_IMPLEMENTED);
222 goto out;
223 }
224
225 for (i = 0; i < ETH_ALEN; i++)
226 hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i];
227
228 hw->mac.ops.rar_set(hw, hw->mac.perm_addr, 0);
229
230 out:
231 return ret_val;
232 }
233
234 /**
235 * igb_rar_set - Set receive address register
236 * @hw: pointer to the HW structure
237 * @addr: pointer to the receive address
238 * @index: receive address array register
239 *
240 * Sets the receive address array register at index to the address passed
241 * in by addr.
242 **/
243 void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
244 {
245 u32 rar_low, rar_high;
246
247 /*
248 * HW expects these in little endian so we reverse the byte order
249 * from network order (big endian) to little endian
250 */
251 rar_low = ((u32) addr[0] |
252 ((u32) addr[1] << 8) |
253 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
254
255 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
256
257 if (!hw->mac.disable_av)
258 rar_high |= E1000_RAH_AV;
259
260 array_wr32(E1000_RA, (index << 1), rar_low);
261 array_wr32(E1000_RA, ((index << 1) + 1), rar_high);
262 }
263
264 /**
265 * igb_mta_set - Set multicast filter table address
266 * @hw: pointer to the HW structure
267 * @hash_value: determines the MTA register and bit to set
268 *
269 * The multicast table address is a register array of 32-bit registers.
270 * The hash_value is used to determine what register the bit is in, the
271 * current value is read, the new bit is OR'd in and the new value is
272 * written back into the register.
273 **/
274 static void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
275 {
276 u32 hash_bit, hash_reg, mta;
277
278 /*
279 * The MTA is a register array of 32-bit registers. It is
280 * treated like an array of (32*mta_reg_count) bits. We want to
281 * set bit BitArray[hash_value]. So we figure out what register
282 * the bit is in, read it, OR in the new bit, then write
283 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
284 * mask to bits 31:5 of the hash value which gives us the
285 * register we're modifying. The hash bit within that register
286 * is determined by the lower 5 bits of the hash value.
287 */
288 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
289 hash_bit = hash_value & 0x1F;
290
291 mta = array_rd32(E1000_MTA, hash_reg);
292
293 mta |= (1 << hash_bit);
294
295 array_wr32(E1000_MTA, hash_reg, mta);
296 wrfl();
297 }
298
299 /**
300 * igb_update_mc_addr_list - Update Multicast addresses
301 * @hw: pointer to the HW structure
302 * @mc_addr_list: array of multicast addresses to program
303 * @mc_addr_count: number of multicast addresses to program
304 * @rar_used_count: the first RAR register free to program
305 * @rar_count: total number of supported Receive Address Registers
306 *
307 * Updates the Receive Address Registers and Multicast Table Array.
308 * The caller must have a packed mc_addr_list of multicast addresses.
309 * The parameter rar_count will usually be hw->mac.rar_entry_count
310 * unless there are workarounds that change this.
311 **/
312 void igb_update_mc_addr_list(struct e1000_hw *hw,
313 u8 *mc_addr_list, u32 mc_addr_count,
314 u32 rar_used_count, u32 rar_count)
315 {
316 u32 hash_value;
317 u32 i;
318
319 /*
320 * Load the first set of multicast addresses into the exact
321 * filters (RAR). If there are not enough to fill the RAR
322 * array, clear the filters.
323 */
324 for (i = rar_used_count; i < rar_count; i++) {
325 if (mc_addr_count) {
326 hw->mac.ops.rar_set(hw, mc_addr_list, i);
327 mc_addr_count--;
328 mc_addr_list += ETH_ALEN;
329 } else {
330 array_wr32(E1000_RA, i << 1, 0);
331 wrfl();
332 array_wr32(E1000_RA, (i << 1) + 1, 0);
333 wrfl();
334 }
335 }
336
337 /* Clear the old settings from the MTA */
338 hw_dbg("Clearing MTA\n");
339 for (i = 0; i < hw->mac.mta_reg_count; i++) {
340 array_wr32(E1000_MTA, i, 0);
341 wrfl();
342 }
343
344 /* Load any remaining multicast addresses into the hash table. */
345 for (; mc_addr_count > 0; mc_addr_count--) {
346 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
347 hw_dbg("Hash value = 0x%03X\n", hash_value);
348 igb_mta_set(hw, hash_value);
349 mc_addr_list += ETH_ALEN;
350 }
351 }
352
353 /**
354 * igb_hash_mc_addr - Generate a multicast hash value
355 * @hw: pointer to the HW structure
356 * @mc_addr: pointer to a multicast address
357 *
358 * Generates a multicast address hash value which is used to determine
359 * the multicast filter table array address and new table value. See
360 * igb_mta_set()
361 **/
362 u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
363 {
364 u32 hash_value, hash_mask;
365 u8 bit_shift = 0;
366
367 /* Register count multiplied by bits per register */
368 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
369
370 /*
371 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
372 * where 0xFF would still fall within the hash mask.
373 */
374 while (hash_mask >> bit_shift != 0xFF)
375 bit_shift++;
376
377 /*
378 * The portion of the address that is used for the hash table
379 * is determined by the mc_filter_type setting.
380 * The algorithm is such that there is a total of 8 bits of shifting.
381 * The bit_shift for a mc_filter_type of 0 represents the number of
382 * left-shifts where the MSB of mc_addr[5] would still fall within
383 * the hash_mask. Case 0 does this exactly. Since there are a total
384 * of 8 bits of shifting, then mc_addr[4] will shift right the
385 * remaining number of bits. Thus 8 - bit_shift. The rest of the
386 * cases are a variation of this algorithm...essentially raising the
387 * number of bits to shift mc_addr[5] left, while still keeping the
388 * 8-bit shifting total.
389 *
390 * For example, given the following Destination MAC Address and an
391 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
392 * we can see that the bit_shift for case 0 is 4. These are the hash
393 * values resulting from each mc_filter_type...
394 * [0] [1] [2] [3] [4] [5]
395 * 01 AA 00 12 34 56
396 * LSB MSB
397 *
398 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
399 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
400 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
401 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
402 */
403 switch (hw->mac.mc_filter_type) {
404 default:
405 case 0:
406 break;
407 case 1:
408 bit_shift += 1;
409 break;
410 case 2:
411 bit_shift += 2;
412 break;
413 case 3:
414 bit_shift += 4;
415 break;
416 }
417
418 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
419 (((u16) mc_addr[5]) << bit_shift)));
420
421 return hash_value;
422 }
423
424 /**
425 * igb_clear_hw_cntrs_base - Clear base hardware counters
426 * @hw: pointer to the HW structure
427 *
428 * Clears the base hardware counters by reading the counter registers.
429 **/
430 void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
431 {
432 u32 temp;
433
434 temp = rd32(E1000_CRCERRS);
435 temp = rd32(E1000_SYMERRS);
436 temp = rd32(E1000_MPC);
437 temp = rd32(E1000_SCC);
438 temp = rd32(E1000_ECOL);
439 temp = rd32(E1000_MCC);
440 temp = rd32(E1000_LATECOL);
441 temp = rd32(E1000_COLC);
442 temp = rd32(E1000_DC);
443 temp = rd32(E1000_SEC);
444 temp = rd32(E1000_RLEC);
445 temp = rd32(E1000_XONRXC);
446 temp = rd32(E1000_XONTXC);
447 temp = rd32(E1000_XOFFRXC);
448 temp = rd32(E1000_XOFFTXC);
449 temp = rd32(E1000_FCRUC);
450 temp = rd32(E1000_GPRC);
451 temp = rd32(E1000_BPRC);
452 temp = rd32(E1000_MPRC);
453 temp = rd32(E1000_GPTC);
454 temp = rd32(E1000_GORCL);
455 temp = rd32(E1000_GORCH);
456 temp = rd32(E1000_GOTCL);
457 temp = rd32(E1000_GOTCH);
458 temp = rd32(E1000_RNBC);
459 temp = rd32(E1000_RUC);
460 temp = rd32(E1000_RFC);
461 temp = rd32(E1000_ROC);
462 temp = rd32(E1000_RJC);
463 temp = rd32(E1000_TORL);
464 temp = rd32(E1000_TORH);
465 temp = rd32(E1000_TOTL);
466 temp = rd32(E1000_TOTH);
467 temp = rd32(E1000_TPR);
468 temp = rd32(E1000_TPT);
469 temp = rd32(E1000_MPTC);
470 temp = rd32(E1000_BPTC);
471 }
472
473 /**
474 * igb_check_for_copper_link - Check for link (Copper)
475 * @hw: pointer to the HW structure
476 *
477 * Checks to see of the link status of the hardware has changed. If a
478 * change in link status has been detected, then we read the PHY registers
479 * to get the current speed/duplex if link exists.
480 **/
481 s32 igb_check_for_copper_link(struct e1000_hw *hw)
482 {
483 struct e1000_mac_info *mac = &hw->mac;
484 s32 ret_val;
485 bool link;
486
487 /*
488 * We only want to go out to the PHY registers to see if Auto-Neg
489 * has completed and/or if our link status has changed. The
490 * get_link_status flag is set upon receiving a Link Status
491 * Change or Rx Sequence Error interrupt.
492 */
493 if (!mac->get_link_status) {
494 ret_val = 0;
495 goto out;
496 }
497
498 /*
499 * First we want to see if the MII Status Register reports
500 * link. If so, then we want to get the current speed/duplex
501 * of the PHY.
502 */
503 ret_val = igb_phy_has_link(hw, 1, 0, &link);
504 if (ret_val)
505 goto out;
506
507 if (!link)
508 goto out; /* No link detected */
509
510 mac->get_link_status = false;
511
512 /*
513 * Check if there was DownShift, must be checked
514 * immediately after link-up
515 */
516 igb_check_downshift(hw);
517
518 /*
519 * If we are forcing speed/duplex, then we simply return since
520 * we have already determined whether we have link or not.
521 */
522 if (!mac->autoneg) {
523 ret_val = -E1000_ERR_CONFIG;
524 goto out;
525 }
526
527 /*
528 * Auto-Neg is enabled. Auto Speed Detection takes care
529 * of MAC speed/duplex configuration. So we only need to
530 * configure Collision Distance in the MAC.
531 */
532 igb_config_collision_dist(hw);
533
534 /*
535 * Configure Flow Control now that Auto-Neg has completed.
536 * First, we need to restore the desired flow control
537 * settings because we may have had to re-autoneg with a
538 * different link partner.
539 */
540 ret_val = igb_config_fc_after_link_up(hw);
541 if (ret_val)
542 hw_dbg("Error configuring flow control\n");
543
544 out:
545 return ret_val;
546 }
547
548 /**
549 * igb_setup_link - Setup flow control and link settings
550 * @hw: pointer to the HW structure
551 *
552 * Determines which flow control settings to use, then configures flow
553 * control. Calls the appropriate media-specific link configuration
554 * function. Assuming the adapter has a valid link partner, a valid link
555 * should be established. Assumes the hardware has previously been reset
556 * and the transmitter and receiver are not enabled.
557 **/
558 s32 igb_setup_link(struct e1000_hw *hw)
559 {
560 s32 ret_val = 0;
561
562 /*
563 * In the case of the phy reset being blocked, we already have a link.
564 * We do not need to set it up again.
565 */
566 if (igb_check_reset_block(hw))
567 goto out;
568
569 ret_val = igb_set_default_fc(hw);
570 if (ret_val)
571 goto out;
572
573 /*
574 * We want to save off the original Flow Control configuration just
575 * in case we get disconnected and then reconnected into a different
576 * hub or switch with different Flow Control capabilities.
577 */
578 hw->fc.original_type = hw->fc.type;
579
580 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.type);
581
582 /* Call the necessary media_type subroutine to configure the link. */
583 ret_val = hw->mac.ops.setup_physical_interface(hw);
584 if (ret_val)
585 goto out;
586
587 /*
588 * Initialize the flow control address, type, and PAUSE timer
589 * registers to their default values. This is done even if flow
590 * control is disabled, because it does not hurt anything to
591 * initialize these registers.
592 */
593 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
594 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
595 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
596 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
597
598 wr32(E1000_FCTTV, hw->fc.pause_time);
599
600 ret_val = igb_set_fc_watermarks(hw);
601
602 out:
603 return ret_val;
604 }
605
606 /**
607 * igb_config_collision_dist - Configure collision distance
608 * @hw: pointer to the HW structure
609 *
610 * Configures the collision distance to the default value and is used
611 * during link setup. Currently no func pointer exists and all
612 * implementations are handled in the generic version of this function.
613 **/
614 void igb_config_collision_dist(struct e1000_hw *hw)
615 {
616 u32 tctl;
617
618 tctl = rd32(E1000_TCTL);
619
620 tctl &= ~E1000_TCTL_COLD;
621 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
622
623 wr32(E1000_TCTL, tctl);
624 wrfl();
625 }
626
627 /**
628 * igb_set_fc_watermarks - Set flow control high/low watermarks
629 * @hw: pointer to the HW structure
630 *
631 * Sets the flow control high/low threshold (watermark) registers. If
632 * flow control XON frame transmission is enabled, then set XON frame
633 * tansmission as well.
634 **/
635 static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
636 {
637 s32 ret_val = 0;
638 u32 fcrtl = 0, fcrth = 0;
639
640 /*
641 * Set the flow control receive threshold registers. Normally,
642 * these registers will be set to a default threshold that may be
643 * adjusted later by the driver's runtime code. However, if the
644 * ability to transmit pause frames is not enabled, then these
645 * registers will be set to 0.
646 */
647 if (hw->fc.type & e1000_fc_tx_pause) {
648 /*
649 * We need to set up the Receive Threshold high and low water
650 * marks as well as (optionally) enabling the transmission of
651 * XON frames.
652 */
653 fcrtl = hw->fc.low_water;
654 if (hw->fc.send_xon)
655 fcrtl |= E1000_FCRTL_XONE;
656
657 fcrth = hw->fc.high_water;
658 }
659 wr32(E1000_FCRTL, fcrtl);
660 wr32(E1000_FCRTH, fcrth);
661
662 return ret_val;
663 }
664
665 /**
666 * igb_set_default_fc - Set flow control default values
667 * @hw: pointer to the HW structure
668 *
669 * Read the EEPROM for the default values for flow control and store the
670 * values.
671 **/
672 static s32 igb_set_default_fc(struct e1000_hw *hw)
673 {
674 s32 ret_val = 0;
675 u16 nvm_data;
676
677 /*
678 * Read and store word 0x0F of the EEPROM. This word contains bits
679 * that determine the hardware's default PAUSE (flow control) mode,
680 * a bit that determines whether the HW defaults to enabling or
681 * disabling auto-negotiation, and the direction of the
682 * SW defined pins. If there is no SW over-ride of the flow
683 * control setting, then the variable hw->fc will
684 * be initialized based on a value in the EEPROM.
685 */
686 ret_val = hw->nvm.ops.read_nvm(hw, NVM_INIT_CONTROL2_REG, 1,
687 &nvm_data);
688
689 if (ret_val) {
690 hw_dbg("NVM Read Error\n");
691 goto out;
692 }
693
694 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
695 hw->fc.type = e1000_fc_none;
696 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
697 NVM_WORD0F_ASM_DIR)
698 hw->fc.type = e1000_fc_tx_pause;
699 else
700 hw->fc.type = e1000_fc_full;
701
702 out:
703 return ret_val;
704 }
705
706 /**
707 * igb_force_mac_fc - Force the MAC's flow control settings
708 * @hw: pointer to the HW structure
709 *
710 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
711 * device control register to reflect the adapter settings. TFCE and RFCE
712 * need to be explicitly set by software when a copper PHY is used because
713 * autonegotiation is managed by the PHY rather than the MAC. Software must
714 * also configure these bits when link is forced on a fiber connection.
715 **/
716 s32 igb_force_mac_fc(struct e1000_hw *hw)
717 {
718 u32 ctrl;
719 s32 ret_val = 0;
720
721 ctrl = rd32(E1000_CTRL);
722
723 /*
724 * Because we didn't get link via the internal auto-negotiation
725 * mechanism (we either forced link or we got link via PHY
726 * auto-neg), we have to manually enable/disable transmit an
727 * receive flow control.
728 *
729 * The "Case" statement below enables/disable flow control
730 * according to the "hw->fc.type" parameter.
731 *
732 * The possible values of the "fc" parameter are:
733 * 0: Flow control is completely disabled
734 * 1: Rx flow control is enabled (we can receive pause
735 * frames but not send pause frames).
736 * 2: Tx flow control is enabled (we can send pause frames
737 * frames but we do not receive pause frames).
738 * 3: Both Rx and TX flow control (symmetric) is enabled.
739 * other: No other values should be possible at this point.
740 */
741 hw_dbg("hw->fc.type = %u\n", hw->fc.type);
742
743 switch (hw->fc.type) {
744 case e1000_fc_none:
745 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
746 break;
747 case e1000_fc_rx_pause:
748 ctrl &= (~E1000_CTRL_TFCE);
749 ctrl |= E1000_CTRL_RFCE;
750 break;
751 case e1000_fc_tx_pause:
752 ctrl &= (~E1000_CTRL_RFCE);
753 ctrl |= E1000_CTRL_TFCE;
754 break;
755 case e1000_fc_full:
756 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
757 break;
758 default:
759 hw_dbg("Flow control param set incorrectly\n");
760 ret_val = -E1000_ERR_CONFIG;
761 goto out;
762 }
763
764 wr32(E1000_CTRL, ctrl);
765
766 out:
767 return ret_val;
768 }
769
770 /**
771 * igb_config_fc_after_link_up - Configures flow control after link
772 * @hw: pointer to the HW structure
773 *
774 * Checks the status of auto-negotiation after link up to ensure that the
775 * speed and duplex were not forced. If the link needed to be forced, then
776 * flow control needs to be forced also. If auto-negotiation is enabled
777 * and did not fail, then we configure flow control based on our link
778 * partner.
779 **/
780 s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
781 {
782 struct e1000_mac_info *mac = &hw->mac;
783 s32 ret_val = 0;
784 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
785 u16 speed, duplex;
786
787 /*
788 * Check for the case where we have fiber media and auto-neg failed
789 * so we had to force link. In this case, we need to force the
790 * configuration of the MAC to match the "fc" parameter.
791 */
792 if (mac->autoneg_failed) {
793 if (hw->phy.media_type == e1000_media_type_fiber ||
794 hw->phy.media_type == e1000_media_type_internal_serdes)
795 ret_val = igb_force_mac_fc(hw);
796 } else {
797 if (hw->phy.media_type == e1000_media_type_copper)
798 ret_val = igb_force_mac_fc(hw);
799 }
800
801 if (ret_val) {
802 hw_dbg("Error forcing flow control settings\n");
803 goto out;
804 }
805
806 /*
807 * Check for the case where we have copper media and auto-neg is
808 * enabled. In this case, we need to check and see if Auto-Neg
809 * has completed, and if so, how the PHY and link partner has
810 * flow control configured.
811 */
812 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
813 /*
814 * Read the MII Status Register and check to see if AutoNeg
815 * has completed. We read this twice because this reg has
816 * some "sticky" (latched) bits.
817 */
818 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS,
819 &mii_status_reg);
820 if (ret_val)
821 goto out;
822 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS,
823 &mii_status_reg);
824 if (ret_val)
825 goto out;
826
827 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
828 hw_dbg("Copper PHY and Auto Neg "
829 "has not completed.\n");
830 goto out;
831 }
832
833 /*
834 * The AutoNeg process has completed, so we now need to
835 * read both the Auto Negotiation Advertisement
836 * Register (Address 4) and the Auto_Negotiation Base
837 * Page Ability Register (Address 5) to determine how
838 * flow control was negotiated.
839 */
840 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_AUTONEG_ADV,
841 &mii_nway_adv_reg);
842 if (ret_val)
843 goto out;
844 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_LP_ABILITY,
845 &mii_nway_lp_ability_reg);
846 if (ret_val)
847 goto out;
848
849 /*
850 * Two bits in the Auto Negotiation Advertisement Register
851 * (Address 4) and two bits in the Auto Negotiation Base
852 * Page Ability Register (Address 5) determine flow control
853 * for both the PHY and the link partner. The following
854 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
855 * 1999, describes these PAUSE resolution bits and how flow
856 * control is determined based upon these settings.
857 * NOTE: DC = Don't Care
858 *
859 * LOCAL DEVICE | LINK PARTNER
860 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
861 *-------|---------|-------|---------|--------------------
862 * 0 | 0 | DC | DC | e1000_fc_none
863 * 0 | 1 | 0 | DC | e1000_fc_none
864 * 0 | 1 | 1 | 0 | e1000_fc_none
865 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
866 * 1 | 0 | 0 | DC | e1000_fc_none
867 * 1 | DC | 1 | DC | e1000_fc_full
868 * 1 | 1 | 0 | 0 | e1000_fc_none
869 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
870 *
871 * Are both PAUSE bits set to 1? If so, this implies
872 * Symmetric Flow Control is enabled at both ends. The
873 * ASM_DIR bits are irrelevant per the spec.
874 *
875 * For Symmetric Flow Control:
876 *
877 * LOCAL DEVICE | LINK PARTNER
878 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
879 *-------|---------|-------|---------|--------------------
880 * 1 | DC | 1 | DC | E1000_fc_full
881 *
882 */
883 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
884 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
885 /*
886 * Now we need to check if the user selected RX ONLY
887 * of pause frames. In this case, we had to advertise
888 * FULL flow control because we could not advertise RX
889 * ONLY. Hence, we must now check to see if we need to
890 * turn OFF the TRANSMISSION of PAUSE frames.
891 */
892 if (hw->fc.original_type == e1000_fc_full) {
893 hw->fc.type = e1000_fc_full;
894 hw_dbg("Flow Control = FULL.\r\n");
895 } else {
896 hw->fc.type = e1000_fc_rx_pause;
897 hw_dbg("Flow Control = "
898 "RX PAUSE frames only.\r\n");
899 }
900 }
901 /*
902 * For receiving PAUSE frames ONLY.
903 *
904 * LOCAL DEVICE | LINK PARTNER
905 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
906 *-------|---------|-------|---------|--------------------
907 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
908 */
909 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
910 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
911 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
912 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
913 hw->fc.type = e1000_fc_tx_pause;
914 hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
915 }
916 /*
917 * For transmitting PAUSE frames ONLY.
918 *
919 * LOCAL DEVICE | LINK PARTNER
920 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
921 *-------|---------|-------|---------|--------------------
922 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
923 */
924 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
925 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
926 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
927 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
928 hw->fc.type = e1000_fc_rx_pause;
929 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
930 }
931 /*
932 * Per the IEEE spec, at this point flow control should be
933 * disabled. However, we want to consider that we could
934 * be connected to a legacy switch that doesn't advertise
935 * desired flow control, but can be forced on the link
936 * partner. So if we advertised no flow control, that is
937 * what we will resolve to. If we advertised some kind of
938 * receive capability (Rx Pause Only or Full Flow Control)
939 * and the link partner advertised none, we will configure
940 * ourselves to enable Rx Flow Control only. We can do
941 * this safely for two reasons: If the link partner really
942 * didn't want flow control enabled, and we enable Rx, no
943 * harm done since we won't be receiving any PAUSE frames
944 * anyway. If the intent on the link partner was to have
945 * flow control enabled, then by us enabling RX only, we
946 * can at least receive pause frames and process them.
947 * This is a good idea because in most cases, since we are
948 * predominantly a server NIC, more times than not we will
949 * be asked to delay transmission of packets than asking
950 * our link partner to pause transmission of frames.
951 */
952 else if ((hw->fc.original_type == e1000_fc_none ||
953 hw->fc.original_type == e1000_fc_tx_pause) ||
954 hw->fc.strict_ieee) {
955 hw->fc.type = e1000_fc_none;
956 hw_dbg("Flow Control = NONE.\r\n");
957 } else {
958 hw->fc.type = e1000_fc_rx_pause;
959 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
960 }
961
962 /*
963 * Now we need to do one last check... If we auto-
964 * negotiated to HALF DUPLEX, flow control should not be
965 * enabled per IEEE 802.3 spec.
966 */
967 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
968 if (ret_val) {
969 hw_dbg("Error getting link speed and duplex\n");
970 goto out;
971 }
972
973 if (duplex == HALF_DUPLEX)
974 hw->fc.type = e1000_fc_none;
975
976 /*
977 * Now we call a subroutine to actually force the MAC
978 * controller to use the correct flow control settings.
979 */
980 ret_val = igb_force_mac_fc(hw);
981 if (ret_val) {
982 hw_dbg("Error forcing flow control settings\n");
983 goto out;
984 }
985 }
986
987 out:
988 return ret_val;
989 }
990
991 /**
992 * igb_get_speed_and_duplex_copper - Retreive current speed/duplex
993 * @hw: pointer to the HW structure
994 * @speed: stores the current speed
995 * @duplex: stores the current duplex
996 *
997 * Read the status register for the current speed/duplex and store the current
998 * speed and duplex for copper connections.
999 **/
1000 s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1001 u16 *duplex)
1002 {
1003 u32 status;
1004
1005 status = rd32(E1000_STATUS);
1006 if (status & E1000_STATUS_SPEED_1000) {
1007 *speed = SPEED_1000;
1008 hw_dbg("1000 Mbs, ");
1009 } else if (status & E1000_STATUS_SPEED_100) {
1010 *speed = SPEED_100;
1011 hw_dbg("100 Mbs, ");
1012 } else {
1013 *speed = SPEED_10;
1014 hw_dbg("10 Mbs, ");
1015 }
1016
1017 if (status & E1000_STATUS_FD) {
1018 *duplex = FULL_DUPLEX;
1019 hw_dbg("Full Duplex\n");
1020 } else {
1021 *duplex = HALF_DUPLEX;
1022 hw_dbg("Half Duplex\n");
1023 }
1024
1025 return 0;
1026 }
1027
1028 /**
1029 * igb_get_hw_semaphore - Acquire hardware semaphore
1030 * @hw: pointer to the HW structure
1031 *
1032 * Acquire the HW semaphore to access the PHY or NVM
1033 **/
1034 s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1035 {
1036 u32 swsm;
1037 s32 ret_val = 0;
1038 s32 timeout = hw->nvm.word_size + 1;
1039 s32 i = 0;
1040
1041 /* Get the SW semaphore */
1042 while (i < timeout) {
1043 swsm = rd32(E1000_SWSM);
1044 if (!(swsm & E1000_SWSM_SMBI))
1045 break;
1046
1047 udelay(50);
1048 i++;
1049 }
1050
1051 if (i == timeout) {
1052 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1053 ret_val = -E1000_ERR_NVM;
1054 goto out;
1055 }
1056
1057 /* Get the FW semaphore. */
1058 for (i = 0; i < timeout; i++) {
1059 swsm = rd32(E1000_SWSM);
1060 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1061
1062 /* Semaphore acquired if bit latched */
1063 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1064 break;
1065
1066 udelay(50);
1067 }
1068
1069 if (i == timeout) {
1070 /* Release semaphores */
1071 igb_put_hw_semaphore(hw);
1072 hw_dbg("Driver can't access the NVM\n");
1073 ret_val = -E1000_ERR_NVM;
1074 goto out;
1075 }
1076
1077 out:
1078 return ret_val;
1079 }
1080
1081 /**
1082 * igb_put_hw_semaphore - Release hardware semaphore
1083 * @hw: pointer to the HW structure
1084 *
1085 * Release hardware semaphore used to access the PHY or NVM
1086 **/
1087 void igb_put_hw_semaphore(struct e1000_hw *hw)
1088 {
1089 u32 swsm;
1090
1091 swsm = rd32(E1000_SWSM);
1092
1093 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1094
1095 wr32(E1000_SWSM, swsm);
1096 }
1097
1098 /**
1099 * igb_get_auto_rd_done - Check for auto read completion
1100 * @hw: pointer to the HW structure
1101 *
1102 * Check EEPROM for Auto Read done bit.
1103 **/
1104 s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1105 {
1106 s32 i = 0;
1107 s32 ret_val = 0;
1108
1109
1110 while (i < AUTO_READ_DONE_TIMEOUT) {
1111 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1112 break;
1113 msleep(1);
1114 i++;
1115 }
1116
1117 if (i == AUTO_READ_DONE_TIMEOUT) {
1118 hw_dbg("Auto read by HW from NVM has not completed.\n");
1119 ret_val = -E1000_ERR_RESET;
1120 goto out;
1121 }
1122
1123 out:
1124 return ret_val;
1125 }
1126
1127 /**
1128 * igb_valid_led_default - Verify a valid default LED config
1129 * @hw: pointer to the HW structure
1130 * @data: pointer to the NVM (EEPROM)
1131 *
1132 * Read the EEPROM for the current default LED configuration. If the
1133 * LED configuration is not valid, set to a valid LED configuration.
1134 **/
1135 static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1136 {
1137 s32 ret_val;
1138
1139 ret_val = hw->nvm.ops.read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1140 if (ret_val) {
1141 hw_dbg("NVM Read Error\n");
1142 goto out;
1143 }
1144
1145 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1146 *data = ID_LED_DEFAULT;
1147
1148 out:
1149 return ret_val;
1150 }
1151
1152 /**
1153 * igb_id_led_init -
1154 * @hw: pointer to the HW structure
1155 *
1156 **/
1157 s32 igb_id_led_init(struct e1000_hw *hw)
1158 {
1159 struct e1000_mac_info *mac = &hw->mac;
1160 s32 ret_val;
1161 const u32 ledctl_mask = 0x000000FF;
1162 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1163 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1164 u16 data, i, temp;
1165 const u16 led_mask = 0x0F;
1166
1167 ret_val = igb_valid_led_default(hw, &data);
1168 if (ret_val)
1169 goto out;
1170
1171 mac->ledctl_default = rd32(E1000_LEDCTL);
1172 mac->ledctl_mode1 = mac->ledctl_default;
1173 mac->ledctl_mode2 = mac->ledctl_default;
1174
1175 for (i = 0; i < 4; i++) {
1176 temp = (data >> (i << 2)) & led_mask;
1177 switch (temp) {
1178 case ID_LED_ON1_DEF2:
1179 case ID_LED_ON1_ON2:
1180 case ID_LED_ON1_OFF2:
1181 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1182 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1183 break;
1184 case ID_LED_OFF1_DEF2:
1185 case ID_LED_OFF1_ON2:
1186 case ID_LED_OFF1_OFF2:
1187 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1188 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1189 break;
1190 default:
1191 /* Do nothing */
1192 break;
1193 }
1194 switch (temp) {
1195 case ID_LED_DEF1_ON2:
1196 case ID_LED_ON1_ON2:
1197 case ID_LED_OFF1_ON2:
1198 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1199 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1200 break;
1201 case ID_LED_DEF1_OFF2:
1202 case ID_LED_ON1_OFF2:
1203 case ID_LED_OFF1_OFF2:
1204 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1205 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1206 break;
1207 default:
1208 /* Do nothing */
1209 break;
1210 }
1211 }
1212
1213 out:
1214 return ret_val;
1215 }
1216
1217 /**
1218 * igb_cleanup_led - Set LED config to default operation
1219 * @hw: pointer to the HW structure
1220 *
1221 * Remove the current LED configuration and set the LED configuration
1222 * to the default value, saved from the EEPROM.
1223 **/
1224 s32 igb_cleanup_led(struct e1000_hw *hw)
1225 {
1226 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1227 return 0;
1228 }
1229
1230 /**
1231 * igb_blink_led - Blink LED
1232 * @hw: pointer to the HW structure
1233 *
1234 * Blink the led's which are set to be on.
1235 **/
1236 s32 igb_blink_led(struct e1000_hw *hw)
1237 {
1238 u32 ledctl_blink = 0;
1239 u32 i;
1240
1241 if (hw->phy.media_type == e1000_media_type_fiber) {
1242 /* always blink LED0 for PCI-E fiber */
1243 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1244 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1245 } else {
1246 /*
1247 * set the blink bit for each LED that's "on" (0x0E)
1248 * in ledctl_mode2
1249 */
1250 ledctl_blink = hw->mac.ledctl_mode2;
1251 for (i = 0; i < 4; i++)
1252 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1253 E1000_LEDCTL_MODE_LED_ON)
1254 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1255 (i * 8));
1256 }
1257
1258 wr32(E1000_LEDCTL, ledctl_blink);
1259
1260 return 0;
1261 }
1262
1263 /**
1264 * igb_led_off - Turn LED off
1265 * @hw: pointer to the HW structure
1266 *
1267 * Turn LED off.
1268 **/
1269 s32 igb_led_off(struct e1000_hw *hw)
1270 {
1271 u32 ctrl;
1272
1273 switch (hw->phy.media_type) {
1274 case e1000_media_type_fiber:
1275 ctrl = rd32(E1000_CTRL);
1276 ctrl |= E1000_CTRL_SWDPIN0;
1277 ctrl |= E1000_CTRL_SWDPIO0;
1278 wr32(E1000_CTRL, ctrl);
1279 break;
1280 case e1000_media_type_copper:
1281 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1282 break;
1283 default:
1284 break;
1285 }
1286
1287 return 0;
1288 }
1289
1290 /**
1291 * igb_disable_pcie_master - Disables PCI-express master access
1292 * @hw: pointer to the HW structure
1293 *
1294 * Returns 0 (0) if successful, else returns -10
1295 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued
1296 * the master requests to be disabled.
1297 *
1298 * Disables PCI-Express master access and verifies there are no pending
1299 * requests.
1300 **/
1301 s32 igb_disable_pcie_master(struct e1000_hw *hw)
1302 {
1303 u32 ctrl;
1304 s32 timeout = MASTER_DISABLE_TIMEOUT;
1305 s32 ret_val = 0;
1306
1307 if (hw->bus.type != e1000_bus_type_pci_express)
1308 goto out;
1309
1310 ctrl = rd32(E1000_CTRL);
1311 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1312 wr32(E1000_CTRL, ctrl);
1313
1314 while (timeout) {
1315 if (!(rd32(E1000_STATUS) &
1316 E1000_STATUS_GIO_MASTER_ENABLE))
1317 break;
1318 udelay(100);
1319 timeout--;
1320 }
1321
1322 if (!timeout) {
1323 hw_dbg("Master requests are pending.\n");
1324 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1325 goto out;
1326 }
1327
1328 out:
1329 return ret_val;
1330 }
1331
1332 /**
1333 * igb_reset_adaptive - Reset Adaptive Interframe Spacing
1334 * @hw: pointer to the HW structure
1335 *
1336 * Reset the Adaptive Interframe Spacing throttle to default values.
1337 **/
1338 void igb_reset_adaptive(struct e1000_hw *hw)
1339 {
1340 struct e1000_mac_info *mac = &hw->mac;
1341
1342 if (!mac->adaptive_ifs) {
1343 hw_dbg("Not in Adaptive IFS mode!\n");
1344 goto out;
1345 }
1346
1347 if (!mac->ifs_params_forced) {
1348 mac->current_ifs_val = 0;
1349 mac->ifs_min_val = IFS_MIN;
1350 mac->ifs_max_val = IFS_MAX;
1351 mac->ifs_step_size = IFS_STEP;
1352 mac->ifs_ratio = IFS_RATIO;
1353 }
1354
1355 mac->in_ifs_mode = false;
1356 wr32(E1000_AIT, 0);
1357 out:
1358 return;
1359 }
1360
1361 /**
1362 * igb_update_adaptive - Update Adaptive Interframe Spacing
1363 * @hw: pointer to the HW structure
1364 *
1365 * Update the Adaptive Interframe Spacing Throttle value based on the
1366 * time between transmitted packets and time between collisions.
1367 **/
1368 void igb_update_adaptive(struct e1000_hw *hw)
1369 {
1370 struct e1000_mac_info *mac = &hw->mac;
1371
1372 if (!mac->adaptive_ifs) {
1373 hw_dbg("Not in Adaptive IFS mode!\n");
1374 goto out;
1375 }
1376
1377 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1378 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1379 mac->in_ifs_mode = true;
1380 if (mac->current_ifs_val < mac->ifs_max_val) {
1381 if (!mac->current_ifs_val)
1382 mac->current_ifs_val = mac->ifs_min_val;
1383 else
1384 mac->current_ifs_val +=
1385 mac->ifs_step_size;
1386 wr32(E1000_AIT,
1387 mac->current_ifs_val);
1388 }
1389 }
1390 } else {
1391 if (mac->in_ifs_mode &&
1392 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1393 mac->current_ifs_val = 0;
1394 mac->in_ifs_mode = false;
1395 wr32(E1000_AIT, 0);
1396 }
1397 }
1398 out:
1399 return;
1400 }
1401
1402 /**
1403 * igb_validate_mdi_setting - Verify MDI/MDIx settings
1404 * @hw: pointer to the HW structure
1405 *
1406 * Verify that when not using auto-negotitation that MDI/MDIx is correctly
1407 * set, which is forced to MDI mode only.
1408 **/
1409 s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1410 {
1411 s32 ret_val = 0;
1412
1413 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1414 hw_dbg("Invalid MDI setting detected\n");
1415 hw->phy.mdix = 1;
1416 ret_val = -E1000_ERR_CONFIG;
1417 goto out;
1418 }
1419
1420 out:
1421 return ret_val;
1422 }
1423
1424 /**
1425 * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register
1426 * @hw: pointer to the HW structure
1427 * @reg: 32bit register offset such as E1000_SCTL
1428 * @offset: register offset to write to
1429 * @data: data to write at register offset
1430 *
1431 * Writes an address/data control type register. There are several of these
1432 * and they all have the format address << 8 | data and bit 31 is polled for
1433 * completion.
1434 **/
1435 s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1436 u32 offset, u8 data)
1437 {
1438 u32 i, regvalue = 0;
1439 s32 ret_val = 0;
1440
1441 /* Set up the address and data */
1442 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1443 wr32(reg, regvalue);
1444
1445 /* Poll the ready bit to see if the MDI read completed */
1446 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1447 udelay(5);
1448 regvalue = rd32(reg);
1449 if (regvalue & E1000_GEN_CTL_READY)
1450 break;
1451 }
1452 if (!(regvalue & E1000_GEN_CTL_READY)) {
1453 hw_dbg("Reg %08x did not indicate ready\n", reg);
1454 ret_val = -E1000_ERR_PHY;
1455 goto out;
1456 }
1457
1458 out:
1459 return ret_val;
1460 }
1461
1462 /**
1463 * igb_enable_mng_pass_thru - Enable processing of ARP's
1464 * @hw: pointer to the HW structure
1465 *
1466 * Verifies the hardware needs to allow ARPs to be processed by the host.
1467 **/
1468 bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1469 {
1470 u32 manc;
1471 u32 fwsm, factps;
1472 bool ret_val = false;
1473
1474 if (!hw->mac.asf_firmware_present)
1475 goto out;
1476
1477 manc = rd32(E1000_MANC);
1478
1479 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
1480 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
1481 goto out;
1482
1483 if (hw->mac.arc_subsystem_valid) {
1484 fwsm = rd32(E1000_FWSM);
1485 factps = rd32(E1000_FACTPS);
1486
1487 if (!(factps & E1000_FACTPS_MNGCG) &&
1488 ((fwsm & E1000_FWSM_MODE_MASK) ==
1489 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1490 ret_val = true;
1491 goto out;
1492 }
1493 } else {
1494 if ((manc & E1000_MANC_SMBUS_EN) &&
1495 !(manc & E1000_MANC_ASF_EN)) {
1496 ret_val = true;
1497 goto out;
1498 }
1499 }
1500
1501 out:
1502 return ret_val;
1503 }
This page took 0.061433 seconds and 5 git commands to generate.