Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[deliverable/linux.git] / drivers / net / igb / e1000_mac.c
1 /*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 #include <linux/if_ether.h>
29 #include <linux/delay.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32
33 #include "e1000_mac.h"
34
35 #include "igb.h"
36
37 static s32 igb_set_default_fc(struct e1000_hw *hw);
38 static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
39 static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
40
41 /**
42 * igb_remove_device - Free device specific structure
43 * @hw: pointer to the HW structure
44 *
45 * If a device specific structure was allocated, this function will
46 * free it.
47 **/
48 void igb_remove_device(struct e1000_hw *hw)
49 {
50 /* Freeing the dev_spec member of e1000_hw structure */
51 kfree(hw->dev_spec);
52 }
53
54 static void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
55 {
56 struct igb_adapter *adapter = hw->back;
57
58 pci_read_config_word(adapter->pdev, reg, value);
59 }
60
61 static s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
62 {
63 struct igb_adapter *adapter = hw->back;
64 u16 cap_offset;
65
66 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
67 if (!cap_offset)
68 return -E1000_ERR_CONFIG;
69
70 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
71
72 return 0;
73 }
74
75 /**
76 * igb_get_bus_info_pcie - Get PCIe bus information
77 * @hw: pointer to the HW structure
78 *
79 * Determines and stores the system bus information for a particular
80 * network interface. The following bus information is determined and stored:
81 * bus speed, bus width, type (PCIe), and PCIe function.
82 **/
83 s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
84 {
85 struct e1000_bus_info *bus = &hw->bus;
86 s32 ret_val;
87 u32 status;
88 u16 pcie_link_status, pci_header_type;
89
90 bus->type = e1000_bus_type_pci_express;
91 bus->speed = e1000_bus_speed_2500;
92
93 ret_val = igb_read_pcie_cap_reg(hw,
94 PCIE_LINK_STATUS,
95 &pcie_link_status);
96 if (ret_val)
97 bus->width = e1000_bus_width_unknown;
98 else
99 bus->width = (enum e1000_bus_width)((pcie_link_status &
100 PCIE_LINK_WIDTH_MASK) >>
101 PCIE_LINK_WIDTH_SHIFT);
102
103 igb_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
104 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
105 status = rd32(E1000_STATUS);
106 bus->func = (status & E1000_STATUS_FUNC_MASK)
107 >> E1000_STATUS_FUNC_SHIFT;
108 } else {
109 bus->func = 0;
110 }
111
112 return 0;
113 }
114
115 /**
116 * igb_clear_vfta - Clear VLAN filter table
117 * @hw: pointer to the HW structure
118 *
119 * Clears the register array which contains the VLAN filter table by
120 * setting all the values to 0.
121 **/
122 void igb_clear_vfta(struct e1000_hw *hw)
123 {
124 u32 offset;
125
126 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
127 array_wr32(E1000_VFTA, offset, 0);
128 wrfl();
129 }
130 }
131
132 /**
133 * igb_write_vfta - Write value to VLAN filter table
134 * @hw: pointer to the HW structure
135 * @offset: register offset in VLAN filter table
136 * @value: register value written to VLAN filter table
137 *
138 * Writes value at the given offset in the register array which stores
139 * the VLAN filter table.
140 **/
141 void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
142 {
143 array_wr32(E1000_VFTA, offset, value);
144 wrfl();
145 }
146
147 /**
148 * igb_init_rx_addrs - Initialize receive address's
149 * @hw: pointer to the HW structure
150 * @rar_count: receive address registers
151 *
152 * Setups the receive address registers by setting the base receive address
153 * register to the devices MAC address and clearing all the other receive
154 * address registers to 0.
155 **/
156 void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
157 {
158 u32 i;
159
160 /* Setup the receive address */
161 hw_dbg("Programming MAC Address into RAR[0]\n");
162
163 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
164
165 /* Zero out the other (rar_entry_count - 1) receive addresses */
166 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
167 for (i = 1; i < rar_count; i++) {
168 array_wr32(E1000_RA, (i << 1), 0);
169 wrfl();
170 array_wr32(E1000_RA, ((i << 1) + 1), 0);
171 wrfl();
172 }
173 }
174
175 /**
176 * igb_check_alt_mac_addr - Check for alternate MAC addr
177 * @hw: pointer to the HW structure
178 *
179 * Checks the nvm for an alternate MAC address. An alternate MAC address
180 * can be setup by pre-boot software and must be treated like a permanent
181 * address and must override the actual permanent MAC address. If an
182 * alternate MAC address is fopund it is saved in the hw struct and
183 * prgrammed into RAR0 and the cuntion returns success, otherwise the
184 * fucntion returns an error.
185 **/
186 s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
187 {
188 u32 i;
189 s32 ret_val = 0;
190 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
191 u8 alt_mac_addr[ETH_ALEN];
192
193 ret_val = hw->nvm.ops.read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
194 &nvm_alt_mac_addr_offset);
195 if (ret_val) {
196 hw_dbg("NVM Read Error\n");
197 goto out;
198 }
199
200 if (nvm_alt_mac_addr_offset == 0xFFFF) {
201 ret_val = -(E1000_NOT_IMPLEMENTED);
202 goto out;
203 }
204
205 if (hw->bus.func == E1000_FUNC_1)
206 nvm_alt_mac_addr_offset += ETH_ALEN/sizeof(u16);
207
208 for (i = 0; i < ETH_ALEN; i += 2) {
209 offset = nvm_alt_mac_addr_offset + (i >> 1);
210 ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data);
211 if (ret_val) {
212 hw_dbg("NVM Read Error\n");
213 goto out;
214 }
215
216 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
217 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
218 }
219
220 /* if multicast bit is set, the alternate address will not be used */
221 if (alt_mac_addr[0] & 0x01) {
222 ret_val = -(E1000_NOT_IMPLEMENTED);
223 goto out;
224 }
225
226 for (i = 0; i < ETH_ALEN; i++)
227 hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i];
228
229 hw->mac.ops.rar_set(hw, hw->mac.perm_addr, 0);
230
231 out:
232 return ret_val;
233 }
234
235 /**
236 * igb_rar_set - Set receive address register
237 * @hw: pointer to the HW structure
238 * @addr: pointer to the receive address
239 * @index: receive address array register
240 *
241 * Sets the receive address array register at index to the address passed
242 * in by addr.
243 **/
244 void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
245 {
246 u32 rar_low, rar_high;
247
248 /*
249 * HW expects these in little endian so we reverse the byte order
250 * from network order (big endian) to little endian
251 */
252 rar_low = ((u32) addr[0] |
253 ((u32) addr[1] << 8) |
254 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
255
256 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
257
258 if (!hw->mac.disable_av)
259 rar_high |= E1000_RAH_AV;
260
261 array_wr32(E1000_RA, (index << 1), rar_low);
262 array_wr32(E1000_RA, ((index << 1) + 1), rar_high);
263 }
264
265 /**
266 * igb_mta_set - Set multicast filter table address
267 * @hw: pointer to the HW structure
268 * @hash_value: determines the MTA register and bit to set
269 *
270 * The multicast table address is a register array of 32-bit registers.
271 * The hash_value is used to determine what register the bit is in, the
272 * current value is read, the new bit is OR'd in and the new value is
273 * written back into the register.
274 **/
275 static void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
276 {
277 u32 hash_bit, hash_reg, mta;
278
279 /*
280 * The MTA is a register array of 32-bit registers. It is
281 * treated like an array of (32*mta_reg_count) bits. We want to
282 * set bit BitArray[hash_value]. So we figure out what register
283 * the bit is in, read it, OR in the new bit, then write
284 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
285 * mask to bits 31:5 of the hash value which gives us the
286 * register we're modifying. The hash bit within that register
287 * is determined by the lower 5 bits of the hash value.
288 */
289 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
290 hash_bit = hash_value & 0x1F;
291
292 mta = array_rd32(E1000_MTA, hash_reg);
293
294 mta |= (1 << hash_bit);
295
296 array_wr32(E1000_MTA, hash_reg, mta);
297 wrfl();
298 }
299
300 /**
301 * igb_update_mc_addr_list - Update Multicast addresses
302 * @hw: pointer to the HW structure
303 * @mc_addr_list: array of multicast addresses to program
304 * @mc_addr_count: number of multicast addresses to program
305 * @rar_used_count: the first RAR register free to program
306 * @rar_count: total number of supported Receive Address Registers
307 *
308 * Updates the Receive Address Registers and Multicast Table Array.
309 * The caller must have a packed mc_addr_list of multicast addresses.
310 * The parameter rar_count will usually be hw->mac.rar_entry_count
311 * unless there are workarounds that change this.
312 **/
313 void igb_update_mc_addr_list(struct e1000_hw *hw,
314 u8 *mc_addr_list, u32 mc_addr_count,
315 u32 rar_used_count, u32 rar_count)
316 {
317 u32 hash_value;
318 u32 i;
319
320 /*
321 * Load the first set of multicast addresses into the exact
322 * filters (RAR). If there are not enough to fill the RAR
323 * array, clear the filters.
324 */
325 for (i = rar_used_count; i < rar_count; i++) {
326 if (mc_addr_count) {
327 hw->mac.ops.rar_set(hw, mc_addr_list, i);
328 mc_addr_count--;
329 mc_addr_list += ETH_ALEN;
330 } else {
331 array_wr32(E1000_RA, i << 1, 0);
332 wrfl();
333 array_wr32(E1000_RA, (i << 1) + 1, 0);
334 wrfl();
335 }
336 }
337
338 /* Clear the old settings from the MTA */
339 hw_dbg("Clearing MTA\n");
340 for (i = 0; i < hw->mac.mta_reg_count; i++) {
341 array_wr32(E1000_MTA, i, 0);
342 wrfl();
343 }
344
345 /* Load any remaining multicast addresses into the hash table. */
346 for (; mc_addr_count > 0; mc_addr_count--) {
347 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
348 hw_dbg("Hash value = 0x%03X\n", hash_value);
349 igb_mta_set(hw, hash_value);
350 mc_addr_list += ETH_ALEN;
351 }
352 }
353
354 /**
355 * igb_hash_mc_addr - Generate a multicast hash value
356 * @hw: pointer to the HW structure
357 * @mc_addr: pointer to a multicast address
358 *
359 * Generates a multicast address hash value which is used to determine
360 * the multicast filter table array address and new table value. See
361 * igb_mta_set()
362 **/
363 static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
364 {
365 u32 hash_value, hash_mask;
366 u8 bit_shift = 0;
367
368 /* Register count multiplied by bits per register */
369 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
370
371 /*
372 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
373 * where 0xFF would still fall within the hash mask.
374 */
375 while (hash_mask >> bit_shift != 0xFF)
376 bit_shift++;
377
378 /*
379 * The portion of the address that is used for the hash table
380 * is determined by the mc_filter_type setting.
381 * The algorithm is such that there is a total of 8 bits of shifting.
382 * The bit_shift for a mc_filter_type of 0 represents the number of
383 * left-shifts where the MSB of mc_addr[5] would still fall within
384 * the hash_mask. Case 0 does this exactly. Since there are a total
385 * of 8 bits of shifting, then mc_addr[4] will shift right the
386 * remaining number of bits. Thus 8 - bit_shift. The rest of the
387 * cases are a variation of this algorithm...essentially raising the
388 * number of bits to shift mc_addr[5] left, while still keeping the
389 * 8-bit shifting total.
390 *
391 * For example, given the following Destination MAC Address and an
392 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
393 * we can see that the bit_shift for case 0 is 4. These are the hash
394 * values resulting from each mc_filter_type...
395 * [0] [1] [2] [3] [4] [5]
396 * 01 AA 00 12 34 56
397 * LSB MSB
398 *
399 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
400 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
401 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
402 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
403 */
404 switch (hw->mac.mc_filter_type) {
405 default:
406 case 0:
407 break;
408 case 1:
409 bit_shift += 1;
410 break;
411 case 2:
412 bit_shift += 2;
413 break;
414 case 3:
415 bit_shift += 4;
416 break;
417 }
418
419 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
420 (((u16) mc_addr[5]) << bit_shift)));
421
422 return hash_value;
423 }
424
425 /**
426 * igb_clear_hw_cntrs_base - Clear base hardware counters
427 * @hw: pointer to the HW structure
428 *
429 * Clears the base hardware counters by reading the counter registers.
430 **/
431 void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
432 {
433 u32 temp;
434
435 temp = rd32(E1000_CRCERRS);
436 temp = rd32(E1000_SYMERRS);
437 temp = rd32(E1000_MPC);
438 temp = rd32(E1000_SCC);
439 temp = rd32(E1000_ECOL);
440 temp = rd32(E1000_MCC);
441 temp = rd32(E1000_LATECOL);
442 temp = rd32(E1000_COLC);
443 temp = rd32(E1000_DC);
444 temp = rd32(E1000_SEC);
445 temp = rd32(E1000_RLEC);
446 temp = rd32(E1000_XONRXC);
447 temp = rd32(E1000_XONTXC);
448 temp = rd32(E1000_XOFFRXC);
449 temp = rd32(E1000_XOFFTXC);
450 temp = rd32(E1000_FCRUC);
451 temp = rd32(E1000_GPRC);
452 temp = rd32(E1000_BPRC);
453 temp = rd32(E1000_MPRC);
454 temp = rd32(E1000_GPTC);
455 temp = rd32(E1000_GORCL);
456 temp = rd32(E1000_GORCH);
457 temp = rd32(E1000_GOTCL);
458 temp = rd32(E1000_GOTCH);
459 temp = rd32(E1000_RNBC);
460 temp = rd32(E1000_RUC);
461 temp = rd32(E1000_RFC);
462 temp = rd32(E1000_ROC);
463 temp = rd32(E1000_RJC);
464 temp = rd32(E1000_TORL);
465 temp = rd32(E1000_TORH);
466 temp = rd32(E1000_TOTL);
467 temp = rd32(E1000_TOTH);
468 temp = rd32(E1000_TPR);
469 temp = rd32(E1000_TPT);
470 temp = rd32(E1000_MPTC);
471 temp = rd32(E1000_BPTC);
472 }
473
474 /**
475 * igb_check_for_copper_link - Check for link (Copper)
476 * @hw: pointer to the HW structure
477 *
478 * Checks to see of the link status of the hardware has changed. If a
479 * change in link status has been detected, then we read the PHY registers
480 * to get the current speed/duplex if link exists.
481 **/
482 s32 igb_check_for_copper_link(struct e1000_hw *hw)
483 {
484 struct e1000_mac_info *mac = &hw->mac;
485 s32 ret_val;
486 bool link;
487
488 /*
489 * We only want to go out to the PHY registers to see if Auto-Neg
490 * has completed and/or if our link status has changed. The
491 * get_link_status flag is set upon receiving a Link Status
492 * Change or Rx Sequence Error interrupt.
493 */
494 if (!mac->get_link_status) {
495 ret_val = 0;
496 goto out;
497 }
498
499 /*
500 * First we want to see if the MII Status Register reports
501 * link. If so, then we want to get the current speed/duplex
502 * of the PHY.
503 */
504 ret_val = igb_phy_has_link(hw, 1, 0, &link);
505 if (ret_val)
506 goto out;
507
508 if (!link)
509 goto out; /* No link detected */
510
511 mac->get_link_status = false;
512
513 /*
514 * Check if there was DownShift, must be checked
515 * immediately after link-up
516 */
517 igb_check_downshift(hw);
518
519 /*
520 * If we are forcing speed/duplex, then we simply return since
521 * we have already determined whether we have link or not.
522 */
523 if (!mac->autoneg) {
524 ret_val = -E1000_ERR_CONFIG;
525 goto out;
526 }
527
528 /*
529 * Auto-Neg is enabled. Auto Speed Detection takes care
530 * of MAC speed/duplex configuration. So we only need to
531 * configure Collision Distance in the MAC.
532 */
533 igb_config_collision_dist(hw);
534
535 /*
536 * Configure Flow Control now that Auto-Neg has completed.
537 * First, we need to restore the desired flow control
538 * settings because we may have had to re-autoneg with a
539 * different link partner.
540 */
541 ret_val = igb_config_fc_after_link_up(hw);
542 if (ret_val)
543 hw_dbg("Error configuring flow control\n");
544
545 out:
546 return ret_val;
547 }
548
549 /**
550 * igb_setup_link - Setup flow control and link settings
551 * @hw: pointer to the HW structure
552 *
553 * Determines which flow control settings to use, then configures flow
554 * control. Calls the appropriate media-specific link configuration
555 * function. Assuming the adapter has a valid link partner, a valid link
556 * should be established. Assumes the hardware has previously been reset
557 * and the transmitter and receiver are not enabled.
558 **/
559 s32 igb_setup_link(struct e1000_hw *hw)
560 {
561 s32 ret_val = 0;
562
563 /*
564 * In the case of the phy reset being blocked, we already have a link.
565 * We do not need to set it up again.
566 */
567 if (igb_check_reset_block(hw))
568 goto out;
569
570 ret_val = igb_set_default_fc(hw);
571 if (ret_val)
572 goto out;
573
574 /*
575 * We want to save off the original Flow Control configuration just
576 * in case we get disconnected and then reconnected into a different
577 * hub or switch with different Flow Control capabilities.
578 */
579 hw->fc.original_type = hw->fc.type;
580
581 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.type);
582
583 /* Call the necessary media_type subroutine to configure the link. */
584 ret_val = hw->mac.ops.setup_physical_interface(hw);
585 if (ret_val)
586 goto out;
587
588 /*
589 * Initialize the flow control address, type, and PAUSE timer
590 * registers to their default values. This is done even if flow
591 * control is disabled, because it does not hurt anything to
592 * initialize these registers.
593 */
594 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
595 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
596 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
597 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
598
599 wr32(E1000_FCTTV, hw->fc.pause_time);
600
601 ret_val = igb_set_fc_watermarks(hw);
602
603 out:
604 return ret_val;
605 }
606
607 /**
608 * igb_config_collision_dist - Configure collision distance
609 * @hw: pointer to the HW structure
610 *
611 * Configures the collision distance to the default value and is used
612 * during link setup. Currently no func pointer exists and all
613 * implementations are handled in the generic version of this function.
614 **/
615 void igb_config_collision_dist(struct e1000_hw *hw)
616 {
617 u32 tctl;
618
619 tctl = rd32(E1000_TCTL);
620
621 tctl &= ~E1000_TCTL_COLD;
622 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
623
624 wr32(E1000_TCTL, tctl);
625 wrfl();
626 }
627
628 /**
629 * igb_set_fc_watermarks - Set flow control high/low watermarks
630 * @hw: pointer to the HW structure
631 *
632 * Sets the flow control high/low threshold (watermark) registers. If
633 * flow control XON frame transmission is enabled, then set XON frame
634 * tansmission as well.
635 **/
636 static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
637 {
638 s32 ret_val = 0;
639 u32 fcrtl = 0, fcrth = 0;
640
641 /*
642 * Set the flow control receive threshold registers. Normally,
643 * these registers will be set to a default threshold that may be
644 * adjusted later by the driver's runtime code. However, if the
645 * ability to transmit pause frames is not enabled, then these
646 * registers will be set to 0.
647 */
648 if (hw->fc.type & e1000_fc_tx_pause) {
649 /*
650 * We need to set up the Receive Threshold high and low water
651 * marks as well as (optionally) enabling the transmission of
652 * XON frames.
653 */
654 fcrtl = hw->fc.low_water;
655 if (hw->fc.send_xon)
656 fcrtl |= E1000_FCRTL_XONE;
657
658 fcrth = hw->fc.high_water;
659 }
660 wr32(E1000_FCRTL, fcrtl);
661 wr32(E1000_FCRTH, fcrth);
662
663 return ret_val;
664 }
665
666 /**
667 * igb_set_default_fc - Set flow control default values
668 * @hw: pointer to the HW structure
669 *
670 * Read the EEPROM for the default values for flow control and store the
671 * values.
672 **/
673 static s32 igb_set_default_fc(struct e1000_hw *hw)
674 {
675 s32 ret_val = 0;
676 u16 nvm_data;
677
678 /*
679 * Read and store word 0x0F of the EEPROM. This word contains bits
680 * that determine the hardware's default PAUSE (flow control) mode,
681 * a bit that determines whether the HW defaults to enabling or
682 * disabling auto-negotiation, and the direction of the
683 * SW defined pins. If there is no SW over-ride of the flow
684 * control setting, then the variable hw->fc will
685 * be initialized based on a value in the EEPROM.
686 */
687 ret_val = hw->nvm.ops.read_nvm(hw, NVM_INIT_CONTROL2_REG, 1,
688 &nvm_data);
689
690 if (ret_val) {
691 hw_dbg("NVM Read Error\n");
692 goto out;
693 }
694
695 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
696 hw->fc.type = e1000_fc_none;
697 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
698 NVM_WORD0F_ASM_DIR)
699 hw->fc.type = e1000_fc_tx_pause;
700 else
701 hw->fc.type = e1000_fc_full;
702
703 out:
704 return ret_val;
705 }
706
707 /**
708 * igb_force_mac_fc - Force the MAC's flow control settings
709 * @hw: pointer to the HW structure
710 *
711 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
712 * device control register to reflect the adapter settings. TFCE and RFCE
713 * need to be explicitly set by software when a copper PHY is used because
714 * autonegotiation is managed by the PHY rather than the MAC. Software must
715 * also configure these bits when link is forced on a fiber connection.
716 **/
717 s32 igb_force_mac_fc(struct e1000_hw *hw)
718 {
719 u32 ctrl;
720 s32 ret_val = 0;
721
722 ctrl = rd32(E1000_CTRL);
723
724 /*
725 * Because we didn't get link via the internal auto-negotiation
726 * mechanism (we either forced link or we got link via PHY
727 * auto-neg), we have to manually enable/disable transmit an
728 * receive flow control.
729 *
730 * The "Case" statement below enables/disable flow control
731 * according to the "hw->fc.type" parameter.
732 *
733 * The possible values of the "fc" parameter are:
734 * 0: Flow control is completely disabled
735 * 1: Rx flow control is enabled (we can receive pause
736 * frames but not send pause frames).
737 * 2: Tx flow control is enabled (we can send pause frames
738 * frames but we do not receive pause frames).
739 * 3: Both Rx and TX flow control (symmetric) is enabled.
740 * other: No other values should be possible at this point.
741 */
742 hw_dbg("hw->fc.type = %u\n", hw->fc.type);
743
744 switch (hw->fc.type) {
745 case e1000_fc_none:
746 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
747 break;
748 case e1000_fc_rx_pause:
749 ctrl &= (~E1000_CTRL_TFCE);
750 ctrl |= E1000_CTRL_RFCE;
751 break;
752 case e1000_fc_tx_pause:
753 ctrl &= (~E1000_CTRL_RFCE);
754 ctrl |= E1000_CTRL_TFCE;
755 break;
756 case e1000_fc_full:
757 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
758 break;
759 default:
760 hw_dbg("Flow control param set incorrectly\n");
761 ret_val = -E1000_ERR_CONFIG;
762 goto out;
763 }
764
765 wr32(E1000_CTRL, ctrl);
766
767 out:
768 return ret_val;
769 }
770
771 /**
772 * igb_config_fc_after_link_up - Configures flow control after link
773 * @hw: pointer to the HW structure
774 *
775 * Checks the status of auto-negotiation after link up to ensure that the
776 * speed and duplex were not forced. If the link needed to be forced, then
777 * flow control needs to be forced also. If auto-negotiation is enabled
778 * and did not fail, then we configure flow control based on our link
779 * partner.
780 **/
781 s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
782 {
783 struct e1000_mac_info *mac = &hw->mac;
784 s32 ret_val = 0;
785 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
786 u16 speed, duplex;
787
788 /*
789 * Check for the case where we have fiber media and auto-neg failed
790 * so we had to force link. In this case, we need to force the
791 * configuration of the MAC to match the "fc" parameter.
792 */
793 if (mac->autoneg_failed) {
794 if (hw->phy.media_type == e1000_media_type_fiber ||
795 hw->phy.media_type == e1000_media_type_internal_serdes)
796 ret_val = igb_force_mac_fc(hw);
797 } else {
798 if (hw->phy.media_type == e1000_media_type_copper)
799 ret_val = igb_force_mac_fc(hw);
800 }
801
802 if (ret_val) {
803 hw_dbg("Error forcing flow control settings\n");
804 goto out;
805 }
806
807 /*
808 * Check for the case where we have copper media and auto-neg is
809 * enabled. In this case, we need to check and see if Auto-Neg
810 * has completed, and if so, how the PHY and link partner has
811 * flow control configured.
812 */
813 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
814 /*
815 * Read the MII Status Register and check to see if AutoNeg
816 * has completed. We read this twice because this reg has
817 * some "sticky" (latched) bits.
818 */
819 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS,
820 &mii_status_reg);
821 if (ret_val)
822 goto out;
823 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS,
824 &mii_status_reg);
825 if (ret_val)
826 goto out;
827
828 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
829 hw_dbg("Copper PHY and Auto Neg "
830 "has not completed.\n");
831 goto out;
832 }
833
834 /*
835 * The AutoNeg process has completed, so we now need to
836 * read both the Auto Negotiation Advertisement
837 * Register (Address 4) and the Auto_Negotiation Base
838 * Page Ability Register (Address 5) to determine how
839 * flow control was negotiated.
840 */
841 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_AUTONEG_ADV,
842 &mii_nway_adv_reg);
843 if (ret_val)
844 goto out;
845 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_LP_ABILITY,
846 &mii_nway_lp_ability_reg);
847 if (ret_val)
848 goto out;
849
850 /*
851 * Two bits in the Auto Negotiation Advertisement Register
852 * (Address 4) and two bits in the Auto Negotiation Base
853 * Page Ability Register (Address 5) determine flow control
854 * for both the PHY and the link partner. The following
855 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
856 * 1999, describes these PAUSE resolution bits and how flow
857 * control is determined based upon these settings.
858 * NOTE: DC = Don't Care
859 *
860 * LOCAL DEVICE | LINK PARTNER
861 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
862 *-------|---------|-------|---------|--------------------
863 * 0 | 0 | DC | DC | e1000_fc_none
864 * 0 | 1 | 0 | DC | e1000_fc_none
865 * 0 | 1 | 1 | 0 | e1000_fc_none
866 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
867 * 1 | 0 | 0 | DC | e1000_fc_none
868 * 1 | DC | 1 | DC | e1000_fc_full
869 * 1 | 1 | 0 | 0 | e1000_fc_none
870 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
871 *
872 * Are both PAUSE bits set to 1? If so, this implies
873 * Symmetric Flow Control is enabled at both ends. The
874 * ASM_DIR bits are irrelevant per the spec.
875 *
876 * For Symmetric Flow Control:
877 *
878 * LOCAL DEVICE | LINK PARTNER
879 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
880 *-------|---------|-------|---------|--------------------
881 * 1 | DC | 1 | DC | E1000_fc_full
882 *
883 */
884 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
885 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
886 /*
887 * Now we need to check if the user selected RX ONLY
888 * of pause frames. In this case, we had to advertise
889 * FULL flow control because we could not advertise RX
890 * ONLY. Hence, we must now check to see if we need to
891 * turn OFF the TRANSMISSION of PAUSE frames.
892 */
893 if (hw->fc.original_type == e1000_fc_full) {
894 hw->fc.type = e1000_fc_full;
895 hw_dbg("Flow Control = FULL.\r\n");
896 } else {
897 hw->fc.type = e1000_fc_rx_pause;
898 hw_dbg("Flow Control = "
899 "RX PAUSE frames only.\r\n");
900 }
901 }
902 /*
903 * For receiving PAUSE frames ONLY.
904 *
905 * LOCAL DEVICE | LINK PARTNER
906 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
907 *-------|---------|-------|---------|--------------------
908 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
909 */
910 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
911 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
912 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
913 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
914 hw->fc.type = e1000_fc_tx_pause;
915 hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
916 }
917 /*
918 * For transmitting PAUSE frames ONLY.
919 *
920 * LOCAL DEVICE | LINK PARTNER
921 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
922 *-------|---------|-------|---------|--------------------
923 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
924 */
925 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
926 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
927 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
928 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
929 hw->fc.type = e1000_fc_rx_pause;
930 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
931 }
932 /*
933 * Per the IEEE spec, at this point flow control should be
934 * disabled. However, we want to consider that we could
935 * be connected to a legacy switch that doesn't advertise
936 * desired flow control, but can be forced on the link
937 * partner. So if we advertised no flow control, that is
938 * what we will resolve to. If we advertised some kind of
939 * receive capability (Rx Pause Only or Full Flow Control)
940 * and the link partner advertised none, we will configure
941 * ourselves to enable Rx Flow Control only. We can do
942 * this safely for two reasons: If the link partner really
943 * didn't want flow control enabled, and we enable Rx, no
944 * harm done since we won't be receiving any PAUSE frames
945 * anyway. If the intent on the link partner was to have
946 * flow control enabled, then by us enabling RX only, we
947 * can at least receive pause frames and process them.
948 * This is a good idea because in most cases, since we are
949 * predominantly a server NIC, more times than not we will
950 * be asked to delay transmission of packets than asking
951 * our link partner to pause transmission of frames.
952 */
953 else if ((hw->fc.original_type == e1000_fc_none ||
954 hw->fc.original_type == e1000_fc_tx_pause) ||
955 hw->fc.strict_ieee) {
956 hw->fc.type = e1000_fc_none;
957 hw_dbg("Flow Control = NONE.\r\n");
958 } else {
959 hw->fc.type = e1000_fc_rx_pause;
960 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
961 }
962
963 /*
964 * Now we need to do one last check... If we auto-
965 * negotiated to HALF DUPLEX, flow control should not be
966 * enabled per IEEE 802.3 spec.
967 */
968 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
969 if (ret_val) {
970 hw_dbg("Error getting link speed and duplex\n");
971 goto out;
972 }
973
974 if (duplex == HALF_DUPLEX)
975 hw->fc.type = e1000_fc_none;
976
977 /*
978 * Now we call a subroutine to actually force the MAC
979 * controller to use the correct flow control settings.
980 */
981 ret_val = igb_force_mac_fc(hw);
982 if (ret_val) {
983 hw_dbg("Error forcing flow control settings\n");
984 goto out;
985 }
986 }
987
988 out:
989 return ret_val;
990 }
991
992 /**
993 * igb_get_speed_and_duplex_copper - Retreive current speed/duplex
994 * @hw: pointer to the HW structure
995 * @speed: stores the current speed
996 * @duplex: stores the current duplex
997 *
998 * Read the status register for the current speed/duplex and store the current
999 * speed and duplex for copper connections.
1000 **/
1001 s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1002 u16 *duplex)
1003 {
1004 u32 status;
1005
1006 status = rd32(E1000_STATUS);
1007 if (status & E1000_STATUS_SPEED_1000) {
1008 *speed = SPEED_1000;
1009 hw_dbg("1000 Mbs, ");
1010 } else if (status & E1000_STATUS_SPEED_100) {
1011 *speed = SPEED_100;
1012 hw_dbg("100 Mbs, ");
1013 } else {
1014 *speed = SPEED_10;
1015 hw_dbg("10 Mbs, ");
1016 }
1017
1018 if (status & E1000_STATUS_FD) {
1019 *duplex = FULL_DUPLEX;
1020 hw_dbg("Full Duplex\n");
1021 } else {
1022 *duplex = HALF_DUPLEX;
1023 hw_dbg("Half Duplex\n");
1024 }
1025
1026 return 0;
1027 }
1028
1029 /**
1030 * igb_get_hw_semaphore - Acquire hardware semaphore
1031 * @hw: pointer to the HW structure
1032 *
1033 * Acquire the HW semaphore to access the PHY or NVM
1034 **/
1035 s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1036 {
1037 u32 swsm;
1038 s32 ret_val = 0;
1039 s32 timeout = hw->nvm.word_size + 1;
1040 s32 i = 0;
1041
1042 /* Get the SW semaphore */
1043 while (i < timeout) {
1044 swsm = rd32(E1000_SWSM);
1045 if (!(swsm & E1000_SWSM_SMBI))
1046 break;
1047
1048 udelay(50);
1049 i++;
1050 }
1051
1052 if (i == timeout) {
1053 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1054 ret_val = -E1000_ERR_NVM;
1055 goto out;
1056 }
1057
1058 /* Get the FW semaphore. */
1059 for (i = 0; i < timeout; i++) {
1060 swsm = rd32(E1000_SWSM);
1061 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1062
1063 /* Semaphore acquired if bit latched */
1064 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1065 break;
1066
1067 udelay(50);
1068 }
1069
1070 if (i == timeout) {
1071 /* Release semaphores */
1072 igb_put_hw_semaphore(hw);
1073 hw_dbg("Driver can't access the NVM\n");
1074 ret_val = -E1000_ERR_NVM;
1075 goto out;
1076 }
1077
1078 out:
1079 return ret_val;
1080 }
1081
1082 /**
1083 * igb_put_hw_semaphore - Release hardware semaphore
1084 * @hw: pointer to the HW structure
1085 *
1086 * Release hardware semaphore used to access the PHY or NVM
1087 **/
1088 void igb_put_hw_semaphore(struct e1000_hw *hw)
1089 {
1090 u32 swsm;
1091
1092 swsm = rd32(E1000_SWSM);
1093
1094 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1095
1096 wr32(E1000_SWSM, swsm);
1097 }
1098
1099 /**
1100 * igb_get_auto_rd_done - Check for auto read completion
1101 * @hw: pointer to the HW structure
1102 *
1103 * Check EEPROM for Auto Read done bit.
1104 **/
1105 s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1106 {
1107 s32 i = 0;
1108 s32 ret_val = 0;
1109
1110
1111 while (i < AUTO_READ_DONE_TIMEOUT) {
1112 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1113 break;
1114 msleep(1);
1115 i++;
1116 }
1117
1118 if (i == AUTO_READ_DONE_TIMEOUT) {
1119 hw_dbg("Auto read by HW from NVM has not completed.\n");
1120 ret_val = -E1000_ERR_RESET;
1121 goto out;
1122 }
1123
1124 out:
1125 return ret_val;
1126 }
1127
1128 /**
1129 * igb_valid_led_default - Verify a valid default LED config
1130 * @hw: pointer to the HW structure
1131 * @data: pointer to the NVM (EEPROM)
1132 *
1133 * Read the EEPROM for the current default LED configuration. If the
1134 * LED configuration is not valid, set to a valid LED configuration.
1135 **/
1136 static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1137 {
1138 s32 ret_val;
1139
1140 ret_val = hw->nvm.ops.read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1141 if (ret_val) {
1142 hw_dbg("NVM Read Error\n");
1143 goto out;
1144 }
1145
1146 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1147 *data = ID_LED_DEFAULT;
1148
1149 out:
1150 return ret_val;
1151 }
1152
1153 /**
1154 * igb_id_led_init -
1155 * @hw: pointer to the HW structure
1156 *
1157 **/
1158 s32 igb_id_led_init(struct e1000_hw *hw)
1159 {
1160 struct e1000_mac_info *mac = &hw->mac;
1161 s32 ret_val;
1162 const u32 ledctl_mask = 0x000000FF;
1163 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1164 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1165 u16 data, i, temp;
1166 const u16 led_mask = 0x0F;
1167
1168 ret_val = igb_valid_led_default(hw, &data);
1169 if (ret_val)
1170 goto out;
1171
1172 mac->ledctl_default = rd32(E1000_LEDCTL);
1173 mac->ledctl_mode1 = mac->ledctl_default;
1174 mac->ledctl_mode2 = mac->ledctl_default;
1175
1176 for (i = 0; i < 4; i++) {
1177 temp = (data >> (i << 2)) & led_mask;
1178 switch (temp) {
1179 case ID_LED_ON1_DEF2:
1180 case ID_LED_ON1_ON2:
1181 case ID_LED_ON1_OFF2:
1182 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1183 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1184 break;
1185 case ID_LED_OFF1_DEF2:
1186 case ID_LED_OFF1_ON2:
1187 case ID_LED_OFF1_OFF2:
1188 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1189 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1190 break;
1191 default:
1192 /* Do nothing */
1193 break;
1194 }
1195 switch (temp) {
1196 case ID_LED_DEF1_ON2:
1197 case ID_LED_ON1_ON2:
1198 case ID_LED_OFF1_ON2:
1199 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1200 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1201 break;
1202 case ID_LED_DEF1_OFF2:
1203 case ID_LED_ON1_OFF2:
1204 case ID_LED_OFF1_OFF2:
1205 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1206 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1207 break;
1208 default:
1209 /* Do nothing */
1210 break;
1211 }
1212 }
1213
1214 out:
1215 return ret_val;
1216 }
1217
1218 /**
1219 * igb_cleanup_led - Set LED config to default operation
1220 * @hw: pointer to the HW structure
1221 *
1222 * Remove the current LED configuration and set the LED configuration
1223 * to the default value, saved from the EEPROM.
1224 **/
1225 s32 igb_cleanup_led(struct e1000_hw *hw)
1226 {
1227 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1228 return 0;
1229 }
1230
1231 /**
1232 * igb_blink_led - Blink LED
1233 * @hw: pointer to the HW structure
1234 *
1235 * Blink the led's which are set to be on.
1236 **/
1237 s32 igb_blink_led(struct e1000_hw *hw)
1238 {
1239 u32 ledctl_blink = 0;
1240 u32 i;
1241
1242 if (hw->phy.media_type == e1000_media_type_fiber) {
1243 /* always blink LED0 for PCI-E fiber */
1244 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1245 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1246 } else {
1247 /*
1248 * set the blink bit for each LED that's "on" (0x0E)
1249 * in ledctl_mode2
1250 */
1251 ledctl_blink = hw->mac.ledctl_mode2;
1252 for (i = 0; i < 4; i++)
1253 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1254 E1000_LEDCTL_MODE_LED_ON)
1255 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1256 (i * 8));
1257 }
1258
1259 wr32(E1000_LEDCTL, ledctl_blink);
1260
1261 return 0;
1262 }
1263
1264 /**
1265 * igb_led_off - Turn LED off
1266 * @hw: pointer to the HW structure
1267 *
1268 * Turn LED off.
1269 **/
1270 s32 igb_led_off(struct e1000_hw *hw)
1271 {
1272 u32 ctrl;
1273
1274 switch (hw->phy.media_type) {
1275 case e1000_media_type_fiber:
1276 ctrl = rd32(E1000_CTRL);
1277 ctrl |= E1000_CTRL_SWDPIN0;
1278 ctrl |= E1000_CTRL_SWDPIO0;
1279 wr32(E1000_CTRL, ctrl);
1280 break;
1281 case e1000_media_type_copper:
1282 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1283 break;
1284 default:
1285 break;
1286 }
1287
1288 return 0;
1289 }
1290
1291 /**
1292 * igb_disable_pcie_master - Disables PCI-express master access
1293 * @hw: pointer to the HW structure
1294 *
1295 * Returns 0 (0) if successful, else returns -10
1296 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued
1297 * the master requests to be disabled.
1298 *
1299 * Disables PCI-Express master access and verifies there are no pending
1300 * requests.
1301 **/
1302 s32 igb_disable_pcie_master(struct e1000_hw *hw)
1303 {
1304 u32 ctrl;
1305 s32 timeout = MASTER_DISABLE_TIMEOUT;
1306 s32 ret_val = 0;
1307
1308 if (hw->bus.type != e1000_bus_type_pci_express)
1309 goto out;
1310
1311 ctrl = rd32(E1000_CTRL);
1312 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1313 wr32(E1000_CTRL, ctrl);
1314
1315 while (timeout) {
1316 if (!(rd32(E1000_STATUS) &
1317 E1000_STATUS_GIO_MASTER_ENABLE))
1318 break;
1319 udelay(100);
1320 timeout--;
1321 }
1322
1323 if (!timeout) {
1324 hw_dbg("Master requests are pending.\n");
1325 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1326 goto out;
1327 }
1328
1329 out:
1330 return ret_val;
1331 }
1332
1333 /**
1334 * igb_reset_adaptive - Reset Adaptive Interframe Spacing
1335 * @hw: pointer to the HW structure
1336 *
1337 * Reset the Adaptive Interframe Spacing throttle to default values.
1338 **/
1339 void igb_reset_adaptive(struct e1000_hw *hw)
1340 {
1341 struct e1000_mac_info *mac = &hw->mac;
1342
1343 if (!mac->adaptive_ifs) {
1344 hw_dbg("Not in Adaptive IFS mode!\n");
1345 goto out;
1346 }
1347
1348 if (!mac->ifs_params_forced) {
1349 mac->current_ifs_val = 0;
1350 mac->ifs_min_val = IFS_MIN;
1351 mac->ifs_max_val = IFS_MAX;
1352 mac->ifs_step_size = IFS_STEP;
1353 mac->ifs_ratio = IFS_RATIO;
1354 }
1355
1356 mac->in_ifs_mode = false;
1357 wr32(E1000_AIT, 0);
1358 out:
1359 return;
1360 }
1361
1362 /**
1363 * igb_update_adaptive - Update Adaptive Interframe Spacing
1364 * @hw: pointer to the HW structure
1365 *
1366 * Update the Adaptive Interframe Spacing Throttle value based on the
1367 * time between transmitted packets and time between collisions.
1368 **/
1369 void igb_update_adaptive(struct e1000_hw *hw)
1370 {
1371 struct e1000_mac_info *mac = &hw->mac;
1372
1373 if (!mac->adaptive_ifs) {
1374 hw_dbg("Not in Adaptive IFS mode!\n");
1375 goto out;
1376 }
1377
1378 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1379 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1380 mac->in_ifs_mode = true;
1381 if (mac->current_ifs_val < mac->ifs_max_val) {
1382 if (!mac->current_ifs_val)
1383 mac->current_ifs_val = mac->ifs_min_val;
1384 else
1385 mac->current_ifs_val +=
1386 mac->ifs_step_size;
1387 wr32(E1000_AIT,
1388 mac->current_ifs_val);
1389 }
1390 }
1391 } else {
1392 if (mac->in_ifs_mode &&
1393 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1394 mac->current_ifs_val = 0;
1395 mac->in_ifs_mode = false;
1396 wr32(E1000_AIT, 0);
1397 }
1398 }
1399 out:
1400 return;
1401 }
1402
1403 /**
1404 * igb_validate_mdi_setting - Verify MDI/MDIx settings
1405 * @hw: pointer to the HW structure
1406 *
1407 * Verify that when not using auto-negotitation that MDI/MDIx is correctly
1408 * set, which is forced to MDI mode only.
1409 **/
1410 s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1411 {
1412 s32 ret_val = 0;
1413
1414 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1415 hw_dbg("Invalid MDI setting detected\n");
1416 hw->phy.mdix = 1;
1417 ret_val = -E1000_ERR_CONFIG;
1418 goto out;
1419 }
1420
1421 out:
1422 return ret_val;
1423 }
1424
1425 /**
1426 * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register
1427 * @hw: pointer to the HW structure
1428 * @reg: 32bit register offset such as E1000_SCTL
1429 * @offset: register offset to write to
1430 * @data: data to write at register offset
1431 *
1432 * Writes an address/data control type register. There are several of these
1433 * and they all have the format address << 8 | data and bit 31 is polled for
1434 * completion.
1435 **/
1436 s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1437 u32 offset, u8 data)
1438 {
1439 u32 i, regvalue = 0;
1440 s32 ret_val = 0;
1441
1442 /* Set up the address and data */
1443 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1444 wr32(reg, regvalue);
1445
1446 /* Poll the ready bit to see if the MDI read completed */
1447 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1448 udelay(5);
1449 regvalue = rd32(reg);
1450 if (regvalue & E1000_GEN_CTL_READY)
1451 break;
1452 }
1453 if (!(regvalue & E1000_GEN_CTL_READY)) {
1454 hw_dbg("Reg %08x did not indicate ready\n", reg);
1455 ret_val = -E1000_ERR_PHY;
1456 goto out;
1457 }
1458
1459 out:
1460 return ret_val;
1461 }
1462
1463 /**
1464 * igb_enable_mng_pass_thru - Enable processing of ARP's
1465 * @hw: pointer to the HW structure
1466 *
1467 * Verifies the hardware needs to allow ARPs to be processed by the host.
1468 **/
1469 bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1470 {
1471 u32 manc;
1472 u32 fwsm, factps;
1473 bool ret_val = false;
1474
1475 if (!hw->mac.asf_firmware_present)
1476 goto out;
1477
1478 manc = rd32(E1000_MANC);
1479
1480 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
1481 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
1482 goto out;
1483
1484 if (hw->mac.arc_subsystem_valid) {
1485 fwsm = rd32(E1000_FWSM);
1486 factps = rd32(E1000_FACTPS);
1487
1488 if (!(factps & E1000_FACTPS_MNGCG) &&
1489 ((fwsm & E1000_FWSM_MODE_MASK) ==
1490 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1491 ret_val = true;
1492 goto out;
1493 }
1494 } else {
1495 if ((manc & E1000_MANC_SMBUS_EN) &&
1496 !(manc & E1000_MANC_ASF_EN)) {
1497 ret_val = true;
1498 goto out;
1499 }
1500 }
1501
1502 out:
1503 return ret_val;
1504 }
This page took 0.068279 seconds and 6 git commands to generate.