igb: Add i2c interface to igb.
[deliverable/linux.git] / drivers / net / ethernet / intel / igb / e1000_82575.c
CommitLineData
9d5c8243
AK
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
6e861326 4 Copyright(c) 2007-2012 Intel Corporation.
9d5c8243
AK
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28/* e1000_82575
29 * e1000_82576
30 */
31
82bbcdeb
JP
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
9d5c8243 34#include <linux/types.h>
2d064c06 35#include <linux/if_ether.h>
441fc6fd 36#include <linux/i2c.h>
9d5c8243
AK
37
38#include "e1000_mac.h"
39#include "e1000_82575.h"
f96a8a0b 40#include "e1000_i210.h"
9d5c8243
AK
41
42static s32 igb_get_invariants_82575(struct e1000_hw *);
43static s32 igb_acquire_phy_82575(struct e1000_hw *);
44static void igb_release_phy_82575(struct e1000_hw *);
45static s32 igb_acquire_nvm_82575(struct e1000_hw *);
46static void igb_release_nvm_82575(struct e1000_hw *);
47static s32 igb_check_for_link_82575(struct e1000_hw *);
48static s32 igb_get_cfg_done_82575(struct e1000_hw *);
49static s32 igb_init_hw_82575(struct e1000_hw *);
50static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
51static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
bb2ac47b
AD
52static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *);
53static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16);
9d5c8243 54static s32 igb_reset_hw_82575(struct e1000_hw *);
bb2ac47b 55static s32 igb_reset_hw_82580(struct e1000_hw *);
9d5c8243 56static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
da02cde1
CW
57static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool);
58static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool);
9d5c8243 59static s32 igb_setup_copper_link_82575(struct e1000_hw *);
2fb02a26 60static s32 igb_setup_serdes_link_82575(struct e1000_hw *);
9d5c8243
AK
61static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
62static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
63static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
9d5c8243
AK
64static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
65 u16 *);
66static s32 igb_get_phy_id_82575(struct e1000_hw *);
67static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
68static bool igb_sgmii_active_82575(struct e1000_hw *);
69static s32 igb_reset_init_script_82575(struct e1000_hw *);
70static s32 igb_read_mac_addr_82575(struct e1000_hw *);
009bc06e 71static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
99870a73 72static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw);
4322e561
CW
73static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
74static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
4322e561
CW
75static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
76static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
bb2ac47b
AD
77static const u16 e1000_82580_rxpbs_table[] =
78 { 36, 72, 144, 1, 2, 4, 8, 16,
79 35, 70, 140 };
80#define E1000_82580_RXPBS_TABLE_SIZE \
81 (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
82
4085f746
NN
83/**
84 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
85 * @hw: pointer to the HW structure
86 *
87 * Called to determine if the I2C pins are being used for I2C or as an
88 * external MDIO interface since the two options are mutually exclusive.
89 **/
90static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
91{
92 u32 reg = 0;
93 bool ext_mdio = false;
94
95 switch (hw->mac.type) {
96 case e1000_82575:
97 case e1000_82576:
98 reg = rd32(E1000_MDIC);
99 ext_mdio = !!(reg & E1000_MDIC_DEST);
100 break;
101 case e1000_82580:
102 case e1000_i350:
f96a8a0b
CW
103 case e1000_i210:
104 case e1000_i211:
4085f746
NN
105 reg = rd32(E1000_MDICNFG);
106 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
107 break;
108 default:
109 break;
110 }
111 return ext_mdio;
112}
113
9d5c8243
AK
114static s32 igb_get_invariants_82575(struct e1000_hw *hw)
115{
116 struct e1000_phy_info *phy = &hw->phy;
117 struct e1000_nvm_info *nvm = &hw->nvm;
118 struct e1000_mac_info *mac = &hw->mac;
c1889bfe 119 struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
9d5c8243
AK
120 u32 eecd;
121 s32 ret_val;
122 u16 size;
123 u32 ctrl_ext = 0;
124
125 switch (hw->device_id) {
126 case E1000_DEV_ID_82575EB_COPPER:
127 case E1000_DEV_ID_82575EB_FIBER_SERDES:
128 case E1000_DEV_ID_82575GB_QUAD_COPPER:
129 mac->type = e1000_82575;
130 break;
2d064c06 131 case E1000_DEV_ID_82576:
9eb2341d 132 case E1000_DEV_ID_82576_NS:
747d49ba 133 case E1000_DEV_ID_82576_NS_SERDES:
2d064c06
AD
134 case E1000_DEV_ID_82576_FIBER:
135 case E1000_DEV_ID_82576_SERDES:
c8ea5ea9 136 case E1000_DEV_ID_82576_QUAD_COPPER:
b894fa26 137 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
4703bf73 138 case E1000_DEV_ID_82576_SERDES_QUAD:
2d064c06
AD
139 mac->type = e1000_82576;
140 break;
bb2ac47b
AD
141 case E1000_DEV_ID_82580_COPPER:
142 case E1000_DEV_ID_82580_FIBER:
6493d24f 143 case E1000_DEV_ID_82580_QUAD_FIBER:
bb2ac47b
AD
144 case E1000_DEV_ID_82580_SERDES:
145 case E1000_DEV_ID_82580_SGMII:
146 case E1000_DEV_ID_82580_COPPER_DUAL:
308fb39a
JG
147 case E1000_DEV_ID_DH89XXCC_SGMII:
148 case E1000_DEV_ID_DH89XXCC_SERDES:
1b5dda33
GJ
149 case E1000_DEV_ID_DH89XXCC_BACKPLANE:
150 case E1000_DEV_ID_DH89XXCC_SFP:
bb2ac47b
AD
151 mac->type = e1000_82580;
152 break;
d2ba2ed8
AD
153 case E1000_DEV_ID_I350_COPPER:
154 case E1000_DEV_ID_I350_FIBER:
155 case E1000_DEV_ID_I350_SERDES:
156 case E1000_DEV_ID_I350_SGMII:
157 mac->type = e1000_i350;
158 break;
f96a8a0b
CW
159 case E1000_DEV_ID_I210_COPPER:
160 case E1000_DEV_ID_I210_COPPER_OEM1:
161 case E1000_DEV_ID_I210_COPPER_IT:
162 case E1000_DEV_ID_I210_FIBER:
163 case E1000_DEV_ID_I210_SERDES:
164 case E1000_DEV_ID_I210_SGMII:
165 mac->type = e1000_i210;
166 break;
167 case E1000_DEV_ID_I211_COPPER:
168 mac->type = e1000_i211;
169 break;
9d5c8243
AK
170 default:
171 return -E1000_ERR_MAC_INIT;
172 break;
173 }
174
9d5c8243
AK
175 /* Set media type */
176 /*
177 * The 82575 uses bits 22:23 for link mode. The mode can be changed
178 * based on the EEPROM. We cannot rely upon device ID. There
179 * is no distinguishable difference between fiber and internal
180 * SerDes mode on the 82575. There can be an external PHY attached
181 * on the SGMII interface. For this, we'll set sgmii_active to true.
182 */
183 phy->media_type = e1000_media_type_copper;
184 dev_spec->sgmii_active = false;
185
186 ctrl_ext = rd32(E1000_CTRL_EXT);
2fb02a26
AD
187 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
188 case E1000_CTRL_EXT_LINK_MODE_SGMII:
9d5c8243 189 dev_spec->sgmii_active = true;
2fb02a26 190 break;
bb2ac47b 191 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
2fb02a26
AD
192 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
193 hw->phy.media_type = e1000_media_type_internal_serdes;
2fb02a26
AD
194 break;
195 default:
2fb02a26 196 break;
9d5c8243 197 }
2fb02a26 198
9d5c8243
AK
199 /* Set mta register count */
200 mac->mta_reg_count = 128;
201 /* Set rar entry count */
f96a8a0b
CW
202 switch (mac->type) {
203 case e1000_82576:
2d064c06 204 mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
f96a8a0b
CW
205 break;
206 case e1000_82580:
bb2ac47b 207 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
f96a8a0b
CW
208 break;
209 case e1000_i350:
d2ba2ed8 210 mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
f96a8a0b
CW
211 break;
212 default:
213 mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
214 break;
215 }
bb2ac47b 216 /* reset */
d2ba2ed8 217 if (mac->type >= e1000_82580)
bb2ac47b
AD
218 mac->ops.reset_hw = igb_reset_hw_82580;
219 else
220 mac->ops.reset_hw = igb_reset_hw_82575;
f96a8a0b
CW
221
222 if (mac->type >= e1000_i210) {
223 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
224 mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
225 } else {
226 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
227 mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
228 }
229
9d5c8243
AK
230 /* Set if part includes ASF firmware */
231 mac->asf_firmware_present = true;
232 /* Set if manageability features are enabled. */
233 mac->arc_subsystem_valid =
234 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
235 ? true : false;
f96a8a0b
CW
236 /* enable EEE on i350 parts and later parts */
237 if (mac->type >= e1000_i350)
09b068d4
CW
238 dev_spec->eee_disable = false;
239 else
240 dev_spec->eee_disable = true;
9d5c8243
AK
241 /* physical interface link setup */
242 mac->ops.setup_physical_interface =
243 (hw->phy.media_type == e1000_media_type_copper)
244 ? igb_setup_copper_link_82575
2fb02a26 245 : igb_setup_serdes_link_82575;
9d5c8243
AK
246
247 /* NVM initialization */
248 eecd = rd32(E1000_EECD);
9d5c8243
AK
249 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
250 E1000_EECD_SIZE_EX_SHIFT);
251
252 /*
253 * Added to a constant, "size" becomes the left-shift value
254 * for setting word_size.
255 */
256 size += NVM_WORD_SIZE_BASE_SHIFT;
5c3cad75 257
925b11f7
CW
258 /*
259 * Check for invalid size
260 */
261 if ((hw->mac.type == e1000_82576) && (size > 15)) {
262 pr_notice("The NVM size is not valid, defaulting to 32K\n");
263 size = 15;
264 }
265
f96a8a0b
CW
266 nvm->word_size = 1 << size;
267 if (hw->mac.type < e1000_i210) {
268 nvm->opcode_bits = 8;
269 nvm->delay_usec = 1;
270 switch (nvm->override) {
271 case e1000_nvm_override_spi_large:
272 nvm->page_size = 32;
273 nvm->address_bits = 16;
274 break;
275 case e1000_nvm_override_spi_small:
276 nvm->page_size = 8;
277 nvm->address_bits = 8;
278 break;
279 default:
280 nvm->page_size = eecd
281 & E1000_EECD_ADDR_BITS ? 32 : 8;
282 nvm->address_bits = eecd
283 & E1000_EECD_ADDR_BITS ? 16 : 8;
284 break;
285 }
286 if (nvm->word_size == (1 << 15))
287 nvm->page_size = 128;
288
289 nvm->type = e1000_nvm_eeprom_spi;
290 } else
291 nvm->type = e1000_nvm_flash_hw;
292
4322e561 293 /* NVM Function Pointers */
4322e561
CW
294 switch (hw->mac.type) {
295 case e1000_82580:
296 nvm->ops.validate = igb_validate_nvm_checksum_82580;
297 nvm->ops.update = igb_update_nvm_checksum_82580;
f96a8a0b
CW
298 nvm->ops.acquire = igb_acquire_nvm_82575;
299 nvm->ops.release = igb_release_nvm_82575;
300 if (nvm->word_size < (1 << 15))
301 nvm->ops.read = igb_read_nvm_eerd;
302 else
303 nvm->ops.read = igb_read_nvm_spi;
304 nvm->ops.write = igb_write_nvm_spi;
4322e561
CW
305 break;
306 case e1000_i350:
307 nvm->ops.validate = igb_validate_nvm_checksum_i350;
308 nvm->ops.update = igb_update_nvm_checksum_i350;
f96a8a0b
CW
309 nvm->ops.acquire = igb_acquire_nvm_82575;
310 nvm->ops.release = igb_release_nvm_82575;
311 if (nvm->word_size < (1 << 15))
312 nvm->ops.read = igb_read_nvm_eerd;
313 else
314 nvm->ops.read = igb_read_nvm_spi;
315 nvm->ops.write = igb_write_nvm_spi;
316 break;
317 case e1000_i210:
318 nvm->ops.validate = igb_validate_nvm_checksum_i210;
319 nvm->ops.update = igb_update_nvm_checksum_i210;
320 nvm->ops.acquire = igb_acquire_nvm_i210;
321 nvm->ops.release = igb_release_nvm_i210;
322 nvm->ops.read = igb_read_nvm_srrd_i210;
ede4126e 323 nvm->ops.write = igb_write_nvm_srwr_i210;
f96a8a0b
CW
324 nvm->ops.valid_led_default = igb_valid_led_default_i210;
325 break;
326 case e1000_i211:
327 nvm->ops.acquire = igb_acquire_nvm_i210;
328 nvm->ops.release = igb_release_nvm_i210;
329 nvm->ops.read = igb_read_nvm_i211;
330 nvm->ops.valid_led_default = igb_valid_led_default_i210;
331 nvm->ops.validate = NULL;
332 nvm->ops.update = NULL;
333 nvm->ops.write = NULL;
4322e561
CW
334 break;
335 default:
336 nvm->ops.validate = igb_validate_nvm_checksum;
337 nvm->ops.update = igb_update_nvm_checksum;
f96a8a0b
CW
338 nvm->ops.acquire = igb_acquire_nvm_82575;
339 nvm->ops.release = igb_release_nvm_82575;
340 if (nvm->word_size < (1 << 15))
341 nvm->ops.read = igb_read_nvm_eerd;
342 else
343 nvm->ops.read = igb_read_nvm_spi;
344 nvm->ops.write = igb_write_nvm_spi;
345 break;
4322e561 346 }
9d5c8243 347
6b78bb1d
CW
348 /* if part supports SR-IOV then initialize mailbox parameters */
349 switch (mac->type) {
350 case e1000_82576:
351 case e1000_i350:
a0c98605 352 igb_init_mbx_params_pf(hw);
6b78bb1d
CW
353 break;
354 default:
355 break;
356 }
a0c98605 357
9d5c8243
AK
358 /* setup PHY parameters */
359 if (phy->media_type != e1000_media_type_copper) {
360 phy->type = e1000_phy_none;
361 return 0;
362 }
363
364 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
365 phy->reset_delay_us = 100;
366
99870a73
AD
367 ctrl_ext = rd32(E1000_CTRL_EXT);
368
9d5c8243 369 /* PHY function pointers */
99870a73 370 if (igb_sgmii_active_82575(hw)) {
4085f746 371 phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
99870a73
AD
372 ctrl_ext |= E1000_CTRL_I2C_ENA;
373 } else {
4085f746 374 phy->ops.reset = igb_phy_hw_reset;
99870a73
AD
375 ctrl_ext &= ~E1000_CTRL_I2C_ENA;
376 }
377
378 wr32(E1000_CTRL_EXT, ctrl_ext);
379 igb_reset_mdicnfg_82580(hw);
4085f746
NN
380
381 if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
382 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
383 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
f96a8a0b
CW
384 } else if ((hw->mac.type == e1000_82580)
385 || (hw->mac.type == e1000_i350)) {
4085f746
NN
386 phy->ops.read_reg = igb_read_phy_reg_82580;
387 phy->ops.write_reg = igb_write_phy_reg_82580;
f96a8a0b
CW
388 } else if (hw->phy.type >= e1000_phy_i210) {
389 phy->ops.read_reg = igb_read_phy_reg_gs40g;
390 phy->ops.write_reg = igb_write_phy_reg_gs40g;
9d5c8243 391 } else {
4085f746
NN
392 phy->ops.read_reg = igb_read_phy_reg_igp;
393 phy->ops.write_reg = igb_write_phy_reg_igp;
9d5c8243
AK
394 }
395
19e588e7
AD
396 /* set lan id */
397 hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
398 E1000_STATUS_FUNC_SHIFT;
399
9d5c8243
AK
400 /* Set phy->phy_addr and phy->id. */
401 ret_val = igb_get_phy_id_82575(hw);
402 if (ret_val)
403 return ret_val;
404
405 /* Verify phy id and set remaining function pointers */
406 switch (phy->id) {
308fb39a
JG
407 case I347AT4_E_PHY_ID:
408 case M88E1112_E_PHY_ID:
9d5c8243
AK
409 case M88E1111_I_PHY_ID:
410 phy->type = e1000_phy_m88;
411 phy->ops.get_phy_info = igb_get_phy_info_m88;
308fb39a
JG
412
413 if (phy->id == I347AT4_E_PHY_ID ||
414 phy->id == M88E1112_E_PHY_ID)
415 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
416 else
417 phy->ops.get_cable_length = igb_get_cable_length_m88;
418
f96a8a0b
CW
419 if (phy->id == I210_I_PHY_ID) {
420 phy->ops.get_cable_length =
421 igb_get_cable_length_m88_gen2;
422 phy->ops.set_d0_lplu_state =
423 igb_set_d0_lplu_state_82580;
424 phy->ops.set_d3_lplu_state =
425 igb_set_d3_lplu_state_82580;
426 }
9d5c8243
AK
427 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
428 break;
429 case IGP03E1000_E_PHY_ID:
430 phy->type = e1000_phy_igp_3;
431 phy->ops.get_phy_info = igb_get_phy_info_igp;
432 phy->ops.get_cable_length = igb_get_cable_length_igp_2;
433 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
434 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
435 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
436 break;
bb2ac47b 437 case I82580_I_PHY_ID:
d2ba2ed8 438 case I350_I_PHY_ID:
bb2ac47b
AD
439 phy->type = e1000_phy_82580;
440 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
441 phy->ops.get_cable_length = igb_get_cable_length_82580;
442 phy->ops.get_phy_info = igb_get_phy_info_82580;
da02cde1
CW
443 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
444 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
bb2ac47b 445 break;
f96a8a0b
CW
446 case I210_I_PHY_ID:
447 phy->type = e1000_phy_i210;
448 phy->ops.get_phy_info = igb_get_phy_info_m88;
449 phy->ops.check_polarity = igb_check_polarity_m88;
450 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
451 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
452 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
453 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
454 break;
9d5c8243
AK
455 default:
456 return -E1000_ERR_PHY;
457 }
458
459 return 0;
460}
461
462/**
733596be 463 * igb_acquire_phy_82575 - Acquire rights to access PHY
9d5c8243
AK
464 * @hw: pointer to the HW structure
465 *
466 * Acquire access rights to the correct PHY. This is a
467 * function pointer entry point called by the api module.
468 **/
469static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
470{
008c3422 471 u16 mask = E1000_SWFW_PHY0_SM;
9d5c8243 472
008c3422
AD
473 if (hw->bus.func == E1000_FUNC_1)
474 mask = E1000_SWFW_PHY1_SM;
ede3ef0d
NN
475 else if (hw->bus.func == E1000_FUNC_2)
476 mask = E1000_SWFW_PHY2_SM;
477 else if (hw->bus.func == E1000_FUNC_3)
478 mask = E1000_SWFW_PHY3_SM;
9d5c8243 479
f96a8a0b 480 return hw->mac.ops.acquire_swfw_sync(hw, mask);
9d5c8243
AK
481}
482
483/**
733596be 484 * igb_release_phy_82575 - Release rights to access PHY
9d5c8243
AK
485 * @hw: pointer to the HW structure
486 *
487 * A wrapper to release access rights to the correct PHY. This is a
488 * function pointer entry point called by the api module.
489 **/
490static void igb_release_phy_82575(struct e1000_hw *hw)
491{
008c3422
AD
492 u16 mask = E1000_SWFW_PHY0_SM;
493
494 if (hw->bus.func == E1000_FUNC_1)
495 mask = E1000_SWFW_PHY1_SM;
ede3ef0d
NN
496 else if (hw->bus.func == E1000_FUNC_2)
497 mask = E1000_SWFW_PHY2_SM;
498 else if (hw->bus.func == E1000_FUNC_3)
499 mask = E1000_SWFW_PHY3_SM;
9d5c8243 500
f96a8a0b 501 hw->mac.ops.release_swfw_sync(hw, mask);
9d5c8243
AK
502}
503
504/**
733596be 505 * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
9d5c8243
AK
506 * @hw: pointer to the HW structure
507 * @offset: register offset to be read
508 * @data: pointer to the read data
509 *
510 * Reads the PHY register at offset using the serial gigabit media independent
511 * interface and stores the retrieved information in data.
512 **/
513static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
514 u16 *data)
515{
bf6f7a92 516 s32 ret_val = -E1000_ERR_PARAM;
9d5c8243
AK
517
518 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
652fff32 519 hw_dbg("PHY Address %u is out of range\n", offset);
bf6f7a92 520 goto out;
9d5c8243
AK
521 }
522
bf6f7a92
AD
523 ret_val = hw->phy.ops.acquire(hw);
524 if (ret_val)
525 goto out;
9d5c8243 526
bf6f7a92 527 ret_val = igb_read_phy_reg_i2c(hw, offset, data);
9d5c8243 528
bf6f7a92
AD
529 hw->phy.ops.release(hw);
530
531out:
532 return ret_val;
9d5c8243
AK
533}
534
535/**
733596be 536 * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
9d5c8243
AK
537 * @hw: pointer to the HW structure
538 * @offset: register offset to write to
539 * @data: data to write at register offset
540 *
541 * Writes the data to PHY register at the offset using the serial gigabit
542 * media independent interface.
543 **/
544static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
545 u16 data)
546{
bf6f7a92
AD
547 s32 ret_val = -E1000_ERR_PARAM;
548
9d5c8243
AK
549
550 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
652fff32 551 hw_dbg("PHY Address %d is out of range\n", offset);
bf6f7a92 552 goto out;
9d5c8243
AK
553 }
554
bf6f7a92
AD
555 ret_val = hw->phy.ops.acquire(hw);
556 if (ret_val)
557 goto out;
9d5c8243 558
bf6f7a92 559 ret_val = igb_write_phy_reg_i2c(hw, offset, data);
9d5c8243 560
bf6f7a92
AD
561 hw->phy.ops.release(hw);
562
563out:
564 return ret_val;
9d5c8243
AK
565}
566
567/**
733596be 568 * igb_get_phy_id_82575 - Retrieve PHY addr and id
9d5c8243
AK
569 * @hw: pointer to the HW structure
570 *
652fff32 571 * Retrieves the PHY address and ID for both PHY's which do and do not use
9d5c8243
AK
572 * sgmi interface.
573 **/
574static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
575{
576 struct e1000_phy_info *phy = &hw->phy;
577 s32 ret_val = 0;
578 u16 phy_id;
2fb02a26 579 u32 ctrl_ext;
4085f746 580 u32 mdic;
9d5c8243
AK
581
582 /*
583 * For SGMII PHYs, we try the list of possible addresses until
584 * we find one that works. For non-SGMII PHYs
585 * (e.g. integrated copper PHYs), an address of 1 should
586 * work. The result of this function should mean phy->phy_addr
587 * and phy->id are set correctly.
588 */
589 if (!(igb_sgmii_active_82575(hw))) {
590 phy->addr = 1;
591 ret_val = igb_get_phy_id(hw);
592 goto out;
593 }
594
4085f746
NN
595 if (igb_sgmii_uses_mdio_82575(hw)) {
596 switch (hw->mac.type) {
597 case e1000_82575:
598 case e1000_82576:
599 mdic = rd32(E1000_MDIC);
600 mdic &= E1000_MDIC_PHY_MASK;
601 phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
602 break;
603 case e1000_82580:
604 case e1000_i350:
f96a8a0b
CW
605 case e1000_i210:
606 case e1000_i211:
4085f746
NN
607 mdic = rd32(E1000_MDICNFG);
608 mdic &= E1000_MDICNFG_PHY_MASK;
609 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
610 break;
611 default:
612 ret_val = -E1000_ERR_PHY;
613 goto out;
614 break;
615 }
616 ret_val = igb_get_phy_id(hw);
617 goto out;
618 }
619
2fb02a26
AD
620 /* Power on sgmii phy if it is disabled */
621 ctrl_ext = rd32(E1000_CTRL_EXT);
622 wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
623 wrfl();
624 msleep(300);
625
9d5c8243
AK
626 /*
627 * The address field in the I2CCMD register is 3 bits and 0 is invalid.
628 * Therefore, we need to test 1-7
629 */
630 for (phy->addr = 1; phy->addr < 8; phy->addr++) {
631 ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
632 if (ret_val == 0) {
652fff32
AK
633 hw_dbg("Vendor ID 0x%08X read at address %u\n",
634 phy_id, phy->addr);
9d5c8243
AK
635 /*
636 * At the time of this writing, The M88 part is
637 * the only supported SGMII PHY product.
638 */
639 if (phy_id == M88_VENDOR)
640 break;
641 } else {
652fff32 642 hw_dbg("PHY address %u was unreadable\n", phy->addr);
9d5c8243
AK
643 }
644 }
645
646 /* A valid PHY type couldn't be found. */
647 if (phy->addr == 8) {
648 phy->addr = 0;
649 ret_val = -E1000_ERR_PHY;
650 goto out;
2fb02a26
AD
651 } else {
652 ret_val = igb_get_phy_id(hw);
9d5c8243
AK
653 }
654
2fb02a26
AD
655 /* restore previous sfp cage power state */
656 wr32(E1000_CTRL_EXT, ctrl_ext);
9d5c8243
AK
657
658out:
659 return ret_val;
660}
661
662/**
733596be 663 * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset
9d5c8243
AK
664 * @hw: pointer to the HW structure
665 *
666 * Resets the PHY using the serial gigabit media independent interface.
667 **/
668static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
669{
670 s32 ret_val;
671
672 /*
673 * This isn't a true "hard" reset, but is the only reset
674 * available to us at this time.
675 */
676
652fff32 677 hw_dbg("Soft resetting SGMII attached PHY...\n");
9d5c8243
AK
678
679 /*
680 * SFP documentation requires the following to configure the SPF module
681 * to work on SGMII. No further documentation is given.
682 */
a8d2a0c2 683 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
9d5c8243
AK
684 if (ret_val)
685 goto out;
686
687 ret_val = igb_phy_sw_reset(hw);
688
689out:
690 return ret_val;
691}
692
693/**
733596be 694 * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
9d5c8243
AK
695 * @hw: pointer to the HW structure
696 * @active: true to enable LPLU, false to disable
697 *
698 * Sets the LPLU D0 state according to the active flag. When
699 * activating LPLU this function also disables smart speed
700 * and vice versa. LPLU will not be activated unless the
701 * device autonegotiation advertisement meets standards of
702 * either 10 or 10/100 or 10/100/1000 at all duplexes.
703 * This is a function pointer entry point only called by
704 * PHY setup routines.
705 **/
706static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
707{
708 struct e1000_phy_info *phy = &hw->phy;
709 s32 ret_val;
710 u16 data;
711
a8d2a0c2 712 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
9d5c8243
AK
713 if (ret_val)
714 goto out;
715
716 if (active) {
717 data |= IGP02E1000_PM_D0_LPLU;
a8d2a0c2 718 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
652fff32 719 data);
9d5c8243
AK
720 if (ret_val)
721 goto out;
722
723 /* When LPLU is enabled, we should disable SmartSpeed */
a8d2a0c2 724 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
652fff32 725 &data);
9d5c8243 726 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
a8d2a0c2 727 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
652fff32 728 data);
9d5c8243
AK
729 if (ret_val)
730 goto out;
731 } else {
732 data &= ~IGP02E1000_PM_D0_LPLU;
a8d2a0c2 733 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
652fff32 734 data);
9d5c8243
AK
735 /*
736 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
737 * during Dx states where the power conservation is most
738 * important. During driver activity we should enable
739 * SmartSpeed, so performance is maintained.
740 */
741 if (phy->smart_speed == e1000_smart_speed_on) {
a8d2a0c2 742 ret_val = phy->ops.read_reg(hw,
652fff32 743 IGP01E1000_PHY_PORT_CONFIG, &data);
9d5c8243
AK
744 if (ret_val)
745 goto out;
746
747 data |= IGP01E1000_PSCFR_SMART_SPEED;
a8d2a0c2 748 ret_val = phy->ops.write_reg(hw,
652fff32 749 IGP01E1000_PHY_PORT_CONFIG, data);
9d5c8243
AK
750 if (ret_val)
751 goto out;
752 } else if (phy->smart_speed == e1000_smart_speed_off) {
a8d2a0c2 753 ret_val = phy->ops.read_reg(hw,
652fff32 754 IGP01E1000_PHY_PORT_CONFIG, &data);
9d5c8243
AK
755 if (ret_val)
756 goto out;
757
758 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
a8d2a0c2 759 ret_val = phy->ops.write_reg(hw,
652fff32 760 IGP01E1000_PHY_PORT_CONFIG, data);
9d5c8243
AK
761 if (ret_val)
762 goto out;
763 }
764 }
765
766out:
767 return ret_val;
768}
769
da02cde1
CW
770/**
771 * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
772 * @hw: pointer to the HW structure
773 * @active: true to enable LPLU, false to disable
774 *
775 * Sets the LPLU D0 state according to the active flag. When
776 * activating LPLU this function also disables smart speed
777 * and vice versa. LPLU will not be activated unless the
778 * device autonegotiation advertisement meets standards of
779 * either 10 or 10/100 or 10/100/1000 at all duplexes.
780 * This is a function pointer entry point only called by
781 * PHY setup routines.
782 **/
783static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
784{
785 struct e1000_phy_info *phy = &hw->phy;
786 s32 ret_val = 0;
787 u16 data;
788
789 data = rd32(E1000_82580_PHY_POWER_MGMT);
790
791 if (active) {
792 data |= E1000_82580_PM_D0_LPLU;
793
794 /* When LPLU is enabled, we should disable SmartSpeed */
795 data &= ~E1000_82580_PM_SPD;
796 } else {
797 data &= ~E1000_82580_PM_D0_LPLU;
798
799 /*
800 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
801 * during Dx states where the power conservation is most
802 * important. During driver activity we should enable
803 * SmartSpeed, so performance is maintained.
804 */
805 if (phy->smart_speed == e1000_smart_speed_on)
806 data |= E1000_82580_PM_SPD;
807 else if (phy->smart_speed == e1000_smart_speed_off)
808 data &= ~E1000_82580_PM_SPD; }
809
810 wr32(E1000_82580_PHY_POWER_MGMT, data);
811 return ret_val;
812}
813
814/**
815 * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3
816 * @hw: pointer to the HW structure
817 * @active: boolean used to enable/disable lplu
818 *
819 * Success returns 0, Failure returns 1
820 *
821 * The low power link up (lplu) state is set to the power management level D3
822 * and SmartSpeed is disabled when active is true, else clear lplu for D3
823 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
824 * is used during Dx states where the power conservation is most important.
825 * During driver activity, SmartSpeed should be enabled so performance is
826 * maintained.
827 **/
828s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
829{
830 struct e1000_phy_info *phy = &hw->phy;
831 s32 ret_val = 0;
832 u16 data;
833
834 data = rd32(E1000_82580_PHY_POWER_MGMT);
835
836 if (!active) {
837 data &= ~E1000_82580_PM_D3_LPLU;
838 /*
839 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
840 * during Dx states where the power conservation is most
841 * important. During driver activity we should enable
842 * SmartSpeed, so performance is maintained.
843 */
844 if (phy->smart_speed == e1000_smart_speed_on)
845 data |= E1000_82580_PM_SPD;
846 else if (phy->smart_speed == e1000_smart_speed_off)
847 data &= ~E1000_82580_PM_SPD;
848 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
849 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
850 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
851 data |= E1000_82580_PM_D3_LPLU;
852 /* When LPLU is enabled, we should disable SmartSpeed */
853 data &= ~E1000_82580_PM_SPD;
854 }
855
856 wr32(E1000_82580_PHY_POWER_MGMT, data);
857 return ret_val;
858}
859
9d5c8243 860/**
733596be 861 * igb_acquire_nvm_82575 - Request for access to EEPROM
9d5c8243
AK
862 * @hw: pointer to the HW structure
863 *
652fff32 864 * Acquire the necessary semaphores for exclusive access to the EEPROM.
9d5c8243
AK
865 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
866 * Return successful if access grant bit set, else clear the request for
867 * EEPROM access and return -E1000_ERR_NVM (-1).
868 **/
869static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
870{
871 s32 ret_val;
872
f96a8a0b 873 ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
9d5c8243
AK
874 if (ret_val)
875 goto out;
876
877 ret_val = igb_acquire_nvm(hw);
878
879 if (ret_val)
f96a8a0b 880 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
9d5c8243
AK
881
882out:
883 return ret_val;
884}
885
886/**
733596be 887 * igb_release_nvm_82575 - Release exclusive access to EEPROM
9d5c8243
AK
888 * @hw: pointer to the HW structure
889 *
890 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
891 * then release the semaphores acquired.
892 **/
893static void igb_release_nvm_82575(struct e1000_hw *hw)
894{
895 igb_release_nvm(hw);
f96a8a0b 896 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
9d5c8243
AK
897}
898
899/**
733596be 900 * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
9d5c8243
AK
901 * @hw: pointer to the HW structure
902 * @mask: specifies which semaphore to acquire
903 *
904 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
905 * will also specify which port we're acquiring the lock for.
906 **/
907static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
908{
909 u32 swfw_sync;
910 u32 swmask = mask;
911 u32 fwmask = mask << 16;
912 s32 ret_val = 0;
913 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
914
915 while (i < timeout) {
916 if (igb_get_hw_semaphore(hw)) {
917 ret_val = -E1000_ERR_SWFW_SYNC;
918 goto out;
919 }
920
921 swfw_sync = rd32(E1000_SW_FW_SYNC);
922 if (!(swfw_sync & (fwmask | swmask)))
923 break;
924
925 /*
926 * Firmware currently using resource (fwmask)
927 * or other software thread using resource (swmask)
928 */
929 igb_put_hw_semaphore(hw);
930 mdelay(5);
931 i++;
932 }
933
934 if (i == timeout) {
652fff32 935 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
9d5c8243
AK
936 ret_val = -E1000_ERR_SWFW_SYNC;
937 goto out;
938 }
939
940 swfw_sync |= swmask;
941 wr32(E1000_SW_FW_SYNC, swfw_sync);
942
943 igb_put_hw_semaphore(hw);
944
945out:
946 return ret_val;
947}
948
949/**
733596be 950 * igb_release_swfw_sync_82575 - Release SW/FW semaphore
9d5c8243
AK
951 * @hw: pointer to the HW structure
952 * @mask: specifies which semaphore to acquire
953 *
954 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
955 * will also specify which port we're releasing the lock for.
956 **/
957static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
958{
959 u32 swfw_sync;
960
961 while (igb_get_hw_semaphore(hw) != 0);
962 /* Empty */
963
964 swfw_sync = rd32(E1000_SW_FW_SYNC);
965 swfw_sync &= ~mask;
966 wr32(E1000_SW_FW_SYNC, swfw_sync);
967
968 igb_put_hw_semaphore(hw);
969}
970
971/**
733596be 972 * igb_get_cfg_done_82575 - Read config done bit
9d5c8243
AK
973 * @hw: pointer to the HW structure
974 *
975 * Read the management control register for the config done bit for
976 * completion status. NOTE: silicon which is EEPROM-less will fail trying
977 * to read the config done bit, so an error is *ONLY* logged and returns
978 * 0. If we were to return with error, EEPROM-less silicon
979 * would not be able to be reset or change link.
980 **/
981static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
982{
983 s32 timeout = PHY_CFG_TIMEOUT;
984 s32 ret_val = 0;
985 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
986
987 if (hw->bus.func == 1)
988 mask = E1000_NVM_CFG_DONE_PORT_1;
bb2ac47b
AD
989 else if (hw->bus.func == E1000_FUNC_2)
990 mask = E1000_NVM_CFG_DONE_PORT_2;
991 else if (hw->bus.func == E1000_FUNC_3)
992 mask = E1000_NVM_CFG_DONE_PORT_3;
9d5c8243
AK
993
994 while (timeout) {
995 if (rd32(E1000_EEMNGCTL) & mask)
996 break;
997 msleep(1);
998 timeout--;
999 }
1000 if (!timeout)
652fff32 1001 hw_dbg("MNG configuration cycle has not completed.\n");
9d5c8243
AK
1002
1003 /* If EEPROM is not marked present, init the PHY manually */
1004 if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
1005 (hw->phy.type == e1000_phy_igp_3))
1006 igb_phy_init_script_igp3(hw);
1007
1008 return ret_val;
1009}
1010
1011/**
733596be 1012 * igb_check_for_link_82575 - Check for link
9d5c8243
AK
1013 * @hw: pointer to the HW structure
1014 *
1015 * If sgmii is enabled, then use the pcs register to determine link, otherwise
1016 * use the generic interface for determining link.
1017 **/
1018static s32 igb_check_for_link_82575(struct e1000_hw *hw)
1019{
1020 s32 ret_val;
1021 u16 speed, duplex;
1022
70d92f86 1023 if (hw->phy.media_type != e1000_media_type_copper) {
9d5c8243 1024 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
2d064c06 1025 &duplex);
5d0932a5
AD
1026 /*
1027 * Use this flag to determine if link needs to be checked or
1028 * not. If we have link clear the flag so that we do not
1029 * continue to check for link.
1030 */
1031 hw->mac.get_link_status = !hw->mac.serdes_has_link;
daf56e40
CW
1032
1033 /* Configure Flow Control now that Auto-Neg has completed.
1034 * First, we need to restore the desired flow control
1035 * settings because we may have had to re-autoneg with a
1036 * different link partner.
1037 */
1038 ret_val = igb_config_fc_after_link_up(hw);
1039 if (ret_val)
1040 hw_dbg("Error configuring flow control\n");
5d0932a5 1041 } else {
9d5c8243 1042 ret_val = igb_check_for_copper_link(hw);
5d0932a5 1043 }
9d5c8243
AK
1044
1045 return ret_val;
1046}
70d92f86 1047
88a268c1
NN
1048/**
1049 * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
1050 * @hw: pointer to the HW structure
1051 **/
1052void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
1053{
1054 u32 reg;
1055
1056
1057 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1058 !igb_sgmii_active_82575(hw))
1059 return;
1060
1061 /* Enable PCS to turn on link */
1062 reg = rd32(E1000_PCS_CFG0);
1063 reg |= E1000_PCS_CFG_PCS_EN;
1064 wr32(E1000_PCS_CFG0, reg);
1065
1066 /* Power up the laser */
1067 reg = rd32(E1000_CTRL_EXT);
1068 reg &= ~E1000_CTRL_EXT_SDP3_DATA;
1069 wr32(E1000_CTRL_EXT, reg);
1070
1071 /* flush the write to verify completion */
1072 wrfl();
1073 msleep(1);
1074}
1075
9d5c8243 1076/**
733596be 1077 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
9d5c8243
AK
1078 * @hw: pointer to the HW structure
1079 * @speed: stores the current speed
1080 * @duplex: stores the current duplex
1081 *
652fff32 1082 * Using the physical coding sub-layer (PCS), retrieve the current speed and
9d5c8243
AK
1083 * duplex, then store the values in the pointers provided.
1084 **/
1085static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
1086 u16 *duplex)
1087{
1088 struct e1000_mac_info *mac = &hw->mac;
1089 u32 pcs;
1090
1091 /* Set up defaults for the return values of this function */
1092 mac->serdes_has_link = false;
1093 *speed = 0;
1094 *duplex = 0;
1095
1096 /*
1097 * Read the PCS Status register for link state. For non-copper mode,
1098 * the status register is not accurate. The PCS status register is
1099 * used instead.
1100 */
1101 pcs = rd32(E1000_PCS_LSTAT);
1102
1103 /*
1104 * The link up bit determines when link is up on autoneg. The sync ok
1105 * gets set once both sides sync up and agree upon link. Stable link
1106 * can be determined by checking for both link up and link sync ok
1107 */
1108 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
1109 mac->serdes_has_link = true;
1110
1111 /* Detect and store PCS speed */
1112 if (pcs & E1000_PCS_LSTS_SPEED_1000) {
1113 *speed = SPEED_1000;
1114 } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
1115 *speed = SPEED_100;
1116 } else {
1117 *speed = SPEED_10;
1118 }
1119
1120 /* Detect and store PCS duplex */
1121 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
1122 *duplex = FULL_DUPLEX;
1123 } else {
1124 *duplex = HALF_DUPLEX;
1125 }
1126 }
1127
1128 return 0;
1129}
1130
2d064c06 1131/**
2fb02a26 1132 * igb_shutdown_serdes_link_82575 - Remove link during power down
9d5c8243 1133 * @hw: pointer to the HW structure
9d5c8243 1134 *
2d064c06
AD
1135 * In the case of fiber serdes, shut down optics and PCS on driver unload
1136 * when management pass thru is not enabled.
9d5c8243 1137 **/
2fb02a26 1138void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
9d5c8243 1139{
2d064c06
AD
1140 u32 reg;
1141
53c992fa 1142 if (hw->phy.media_type != e1000_media_type_internal_serdes &&
2fb02a26 1143 igb_sgmii_active_82575(hw))
2d064c06
AD
1144 return;
1145
53c992fa 1146 if (!igb_enable_mng_pass_thru(hw)) {
2d064c06
AD
1147 /* Disable PCS to turn off link */
1148 reg = rd32(E1000_PCS_CFG0);
1149 reg &= ~E1000_PCS_CFG_PCS_EN;
1150 wr32(E1000_PCS_CFG0, reg);
1151
1152 /* shutdown the laser */
1153 reg = rd32(E1000_CTRL_EXT);
2fb02a26 1154 reg |= E1000_CTRL_EXT_SDP3_DATA;
2d064c06
AD
1155 wr32(E1000_CTRL_EXT, reg);
1156
1157 /* flush the write to verify completion */
1158 wrfl();
1159 msleep(1);
1160 }
9d5c8243
AK
1161}
1162
1163/**
733596be 1164 * igb_reset_hw_82575 - Reset hardware
9d5c8243
AK
1165 * @hw: pointer to the HW structure
1166 *
1167 * This resets the hardware into a known state. This is a
1168 * function pointer entry point called by the api module.
1169 **/
1170static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1171{
1172 u32 ctrl, icr;
1173 s32 ret_val;
1174
1175 /*
1176 * Prevent the PCI-E bus from sticking if there is no TLP connection
1177 * on the last TLP read/write transaction when MAC is reset.
1178 */
1179 ret_val = igb_disable_pcie_master(hw);
1180 if (ret_val)
652fff32 1181 hw_dbg("PCI-E Master disable polling has failed.\n");
9d5c8243 1182
009bc06e
AD
1183 /* set the completion timeout for interface */
1184 ret_val = igb_set_pcie_completion_timeout(hw);
1185 if (ret_val) {
1186 hw_dbg("PCI-E Set completion timeout has failed.\n");
1187 }
1188
652fff32 1189 hw_dbg("Masking off all interrupts\n");
9d5c8243
AK
1190 wr32(E1000_IMC, 0xffffffff);
1191
1192 wr32(E1000_RCTL, 0);
1193 wr32(E1000_TCTL, E1000_TCTL_PSP);
1194 wrfl();
1195
1196 msleep(10);
1197
1198 ctrl = rd32(E1000_CTRL);
1199
652fff32 1200 hw_dbg("Issuing a global reset to MAC\n");
9d5c8243
AK
1201 wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
1202
1203 ret_val = igb_get_auto_rd_done(hw);
1204 if (ret_val) {
1205 /*
1206 * When auto config read does not complete, do not
1207 * return with an error. This can happen in situations
1208 * where there is no eeprom and prevents getting link.
1209 */
652fff32 1210 hw_dbg("Auto Read Done did not complete\n");
9d5c8243
AK
1211 }
1212
1213 /* If EEPROM is not present, run manual init scripts */
1214 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
1215 igb_reset_init_script_82575(hw);
1216
1217 /* Clear any pending interrupt events. */
1218 wr32(E1000_IMC, 0xffffffff);
1219 icr = rd32(E1000_ICR);
1220
5ac16659
AD
1221 /* Install any alternate MAC address into RAR0 */
1222 ret_val = igb_check_alt_mac_addr(hw);
9d5c8243
AK
1223
1224 return ret_val;
1225}
1226
1227/**
733596be 1228 * igb_init_hw_82575 - Initialize hardware
9d5c8243
AK
1229 * @hw: pointer to the HW structure
1230 *
1231 * This inits the hardware readying it for operation.
1232 **/
1233static s32 igb_init_hw_82575(struct e1000_hw *hw)
1234{
1235 struct e1000_mac_info *mac = &hw->mac;
1236 s32 ret_val;
1237 u16 i, rar_count = mac->rar_entry_count;
1238
1239 /* Initialize identification LED */
1240 ret_val = igb_id_led_init(hw);
1241 if (ret_val) {
652fff32 1242 hw_dbg("Error initializing identification LED\n");
9d5c8243
AK
1243 /* This is not fatal and we should not stop init due to this */
1244 }
1245
1246 /* Disabling VLAN filtering */
652fff32 1247 hw_dbg("Initializing the IEEE VLAN\n");
1128c756
CW
1248 if (hw->mac.type == e1000_i350)
1249 igb_clear_vfta_i350(hw);
1250 else
1251 igb_clear_vfta(hw);
9d5c8243
AK
1252
1253 /* Setup the receive address */
5ac16659
AD
1254 igb_init_rx_addrs(hw, rar_count);
1255
9d5c8243 1256 /* Zero out the Multicast HASH table */
652fff32 1257 hw_dbg("Zeroing the MTA\n");
9d5c8243
AK
1258 for (i = 0; i < mac->mta_reg_count; i++)
1259 array_wr32(E1000_MTA, i, 0);
1260
68d480c4
AD
1261 /* Zero out the Unicast HASH table */
1262 hw_dbg("Zeroing the UTA\n");
1263 for (i = 0; i < mac->uta_reg_count; i++)
1264 array_wr32(E1000_UTA, i, 0);
1265
9d5c8243
AK
1266 /* Setup link and flow control */
1267 ret_val = igb_setup_link(hw);
1268
1269 /*
1270 * Clear all of the statistics registers (clear on read). It is
1271 * important that we do this after we have tried to establish link
1272 * because the symbol error count will increment wildly if there
1273 * is no link.
1274 */
1275 igb_clear_hw_cntrs_82575(hw);
9d5c8243
AK
1276 return ret_val;
1277}
1278
1279/**
733596be 1280 * igb_setup_copper_link_82575 - Configure copper link settings
9d5c8243
AK
1281 * @hw: pointer to the HW structure
1282 *
1283 * Configures the link for auto-neg or forced speed and duplex. Then we check
1284 * for link, once link is established calls to configure collision distance
1285 * and flow control are called.
1286 **/
1287static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1288{
12645a19 1289 u32 ctrl;
9d5c8243 1290 s32 ret_val;
867eb39e 1291 u32 phpm_reg;
9d5c8243
AK
1292
1293 ctrl = rd32(E1000_CTRL);
1294 ctrl |= E1000_CTRL_SLU;
1295 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1296 wr32(E1000_CTRL, ctrl);
1297
867eb39e
CW
1298 /* Clear Go Link Disconnect bit */
1299 if (hw->mac.type >= e1000_82580) {
1300 phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
1301 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1302 wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
1303 }
1304
2fb02a26
AD
1305 ret_val = igb_setup_serdes_link_82575(hw);
1306 if (ret_val)
1307 goto out;
1308
1309 if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
bb2ac47b
AD
1310 /* allow time for SFP cage time to power up phy */
1311 msleep(300);
1312
2fb02a26
AD
1313 ret_val = hw->phy.ops.reset(hw);
1314 if (ret_val) {
1315 hw_dbg("Error resetting the PHY.\n");
1316 goto out;
1317 }
1318 }
9d5c8243 1319 switch (hw->phy.type) {
f96a8a0b 1320 case e1000_phy_i210:
9d5c8243 1321 case e1000_phy_m88:
308fb39a
JG
1322 if (hw->phy.id == I347AT4_E_PHY_ID ||
1323 hw->phy.id == M88E1112_E_PHY_ID)
1324 ret_val = igb_copper_link_setup_m88_gen2(hw);
1325 else
1326 ret_val = igb_copper_link_setup_m88(hw);
9d5c8243
AK
1327 break;
1328 case e1000_phy_igp_3:
1329 ret_val = igb_copper_link_setup_igp(hw);
9d5c8243 1330 break;
bb2ac47b
AD
1331 case e1000_phy_82580:
1332 ret_val = igb_copper_link_setup_82580(hw);
1333 break;
9d5c8243
AK
1334 default:
1335 ret_val = -E1000_ERR_PHY;
1336 break;
1337 }
1338
1339 if (ret_val)
1340 goto out;
1341
81fadd81 1342 ret_val = igb_setup_copper_link(hw);
9d5c8243
AK
1343out:
1344 return ret_val;
1345}
1346
1347/**
70d92f86 1348 * igb_setup_serdes_link_82575 - Setup link for serdes
9d5c8243
AK
1349 * @hw: pointer to the HW structure
1350 *
70d92f86
AD
1351 * Configure the physical coding sub-layer (PCS) link. The PCS link is
1352 * used on copper connections where the serialized gigabit media independent
1353 * interface (sgmii), or serdes fiber is being used. Configures the link
1354 * for auto-negotiation or forces speed/duplex.
9d5c8243 1355 **/
2fb02a26 1356static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
9d5c8243 1357{
daf56e40 1358 u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
bb2ac47b 1359 bool pcs_autoneg;
2c670b5b
CW
1360 s32 ret_val = E1000_SUCCESS;
1361 u16 data;
2fb02a26
AD
1362
1363 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1364 !igb_sgmii_active_82575(hw))
2c670b5b
CW
1365 return ret_val;
1366
9d5c8243
AK
1367
1368 /*
1369 * On the 82575, SerDes loopback mode persists until it is
1370 * explicitly turned off or a power cycle is performed. A read to
1371 * the register does not indicate its status. Therefore, we ensure
1372 * loopback mode is disabled during initialization.
1373 */
1374 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1375
2fb02a26 1376 /* power on the sfp cage if present */
bb2ac47b
AD
1377 ctrl_ext = rd32(E1000_CTRL_EXT);
1378 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1379 wr32(E1000_CTRL_EXT, ctrl_ext);
2fb02a26
AD
1380
1381 ctrl_reg = rd32(E1000_CTRL);
1382 ctrl_reg |= E1000_CTRL_SLU;
1383
1384 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
1385 /* set both sw defined pins */
1386 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1387
1388 /* Set switch control to serdes energy detect */
1389 reg = rd32(E1000_CONNSW);
1390 reg |= E1000_CONNSW_ENRGSRC;
1391 wr32(E1000_CONNSW, reg);
1392 }
1393
1394 reg = rd32(E1000_PCS_LCTL);
1395
bb2ac47b
AD
1396 /* default pcs_autoneg to the same setting as mac autoneg */
1397 pcs_autoneg = hw->mac.autoneg;
2fb02a26 1398
bb2ac47b
AD
1399 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1400 case E1000_CTRL_EXT_LINK_MODE_SGMII:
1401 /* sgmii mode lets the phy handle forcing speed/duplex */
1402 pcs_autoneg = true;
1403 /* autoneg time out should be disabled for SGMII mode */
2fb02a26 1404 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
bb2ac47b
AD
1405 break;
1406 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1407 /* disable PCS autoneg and support parallel detect only */
1408 pcs_autoneg = false;
1409 default:
2c670b5b
CW
1410 if (hw->mac.type == e1000_82575 ||
1411 hw->mac.type == e1000_82576) {
1412 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
1413 if (ret_val) {
1414 printk(KERN_DEBUG "NVM Read Error\n\n");
1415 return ret_val;
1416 }
1417
1418 if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
1419 pcs_autoneg = false;
1420 }
1421
bb2ac47b
AD
1422 /*
1423 * non-SGMII modes only supports a speed of 1000/Full for the
1424 * link so it is best to just force the MAC and let the pcs
1425 * link either autoneg or be forced to 1000/Full
1426 */
2fb02a26
AD
1427 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1428 E1000_CTRL_FD | E1000_CTRL_FRCDPX;
bb2ac47b
AD
1429
1430 /* set speed of 1000/Full if speed/duplex is forced */
1431 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
1432 break;
921aa749
AD
1433 }
1434
2fb02a26 1435 wr32(E1000_CTRL, ctrl_reg);
9d5c8243
AK
1436
1437 /*
1438 * New SerDes mode allows for forcing speed or autonegotiating speed
1439 * at 1gb. Autoneg should be default set by most drivers. This is the
1440 * mode that will be compatible with older link partners and switches.
1441 * However, both are supported by the hardware and some drivers/tools.
1442 */
9d5c8243
AK
1443 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1444 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1445
bb2ac47b 1446 if (pcs_autoneg) {
9d5c8243 1447 /* Set PCS register for autoneg */
bb2ac47b 1448 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
70d92f86 1449 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
daf56e40
CW
1450
1451 /* Disable force flow control for autoneg */
1452 reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
1453
1454 /* Configure flow control advertisement for autoneg */
1455 anadv_reg = rd32(E1000_PCS_ANADV);
1456 anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
1457 switch (hw->fc.requested_mode) {
1458 case e1000_fc_full:
1459 case e1000_fc_rx_pause:
1460 anadv_reg |= E1000_TXCW_ASM_DIR;
1461 anadv_reg |= E1000_TXCW_PAUSE;
1462 break;
1463 case e1000_fc_tx_pause:
1464 anadv_reg |= E1000_TXCW_ASM_DIR;
1465 break;
1466 default:
1467 break;
1468 }
1469 wr32(E1000_PCS_ANADV, anadv_reg);
1470
bb2ac47b 1471 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
9d5c8243 1472 } else {
bb2ac47b 1473 /* Set PCS register for forced link */
d68caec6 1474 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
bb2ac47b 1475
daf56e40
CW
1476 /* Force flow control for forced link */
1477 reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1478
bb2ac47b 1479 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
9d5c8243 1480 }
726c09e7 1481
9d5c8243
AK
1482 wr32(E1000_PCS_LCTL, reg);
1483
daf56e40 1484 if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
2fb02a26 1485 igb_force_mac_fc(hw);
9d5c8243 1486
2c670b5b 1487 return ret_val;
9d5c8243
AK
1488}
1489
1490/**
733596be 1491 * igb_sgmii_active_82575 - Return sgmii state
9d5c8243
AK
1492 * @hw: pointer to the HW structure
1493 *
1494 * 82575 silicon has a serialized gigabit media independent interface (sgmii)
1495 * which can be enabled for use in the embedded applications. Simply
1496 * return the current state of the sgmii interface.
1497 **/
1498static bool igb_sgmii_active_82575(struct e1000_hw *hw)
1499{
c1889bfe 1500 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
c1889bfe 1501 return dev_spec->sgmii_active;
9d5c8243
AK
1502}
1503
1504/**
733596be 1505 * igb_reset_init_script_82575 - Inits HW defaults after reset
9d5c8243
AK
1506 * @hw: pointer to the HW structure
1507 *
1508 * Inits recommended HW defaults after a reset when there is no EEPROM
1509 * detected. This is only for the 82575.
1510 **/
1511static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
1512{
1513 if (hw->mac.type == e1000_82575) {
652fff32 1514 hw_dbg("Running reset init script for 82575\n");
9d5c8243
AK
1515 /* SerDes configuration via SERDESCTRL */
1516 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
1517 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
1518 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
1519 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
1520
1521 /* CCM configuration via CCMCTL register */
1522 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
1523 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
1524
1525 /* PCIe lanes configuration */
1526 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
1527 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
1528 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
1529 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
1530
1531 /* PCIe PLL Configuration */
1532 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
1533 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
1534 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
1535 }
1536
1537 return 0;
1538}
1539
1540/**
733596be 1541 * igb_read_mac_addr_82575 - Read device MAC address
9d5c8243
AK
1542 * @hw: pointer to the HW structure
1543 **/
1544static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1545{
1546 s32 ret_val = 0;
1547
22896639
AD
1548 /*
1549 * If there's an alternate MAC address place it in RAR0
1550 * so that it will override the Si installed default perm
1551 * address.
1552 */
1553 ret_val = igb_check_alt_mac_addr(hw);
1554 if (ret_val)
1555 goto out;
1556
1557 ret_val = igb_read_mac_addr(hw);
9d5c8243 1558
22896639 1559out:
9d5c8243
AK
1560 return ret_val;
1561}
1562
88a268c1
NN
1563/**
1564 * igb_power_down_phy_copper_82575 - Remove link during PHY power down
1565 * @hw: pointer to the HW structure
1566 *
1567 * In the case of a PHY power down to save power, or to turn off link during a
1568 * driver unload, or wake on lan is not enabled, remove the link.
1569 **/
1570void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
1571{
1572 /* If the management interface is not enabled, then power down */
1573 if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
1574 igb_power_down_phy_copper(hw);
88a268c1
NN
1575}
1576
9d5c8243 1577/**
733596be 1578 * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
9d5c8243
AK
1579 * @hw: pointer to the HW structure
1580 *
1581 * Clears the hardware counters by reading the counter registers.
1582 **/
1583static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
1584{
9d5c8243
AK
1585 igb_clear_hw_cntrs_base(hw);
1586
cc9073bb
AD
1587 rd32(E1000_PRC64);
1588 rd32(E1000_PRC127);
1589 rd32(E1000_PRC255);
1590 rd32(E1000_PRC511);
1591 rd32(E1000_PRC1023);
1592 rd32(E1000_PRC1522);
1593 rd32(E1000_PTC64);
1594 rd32(E1000_PTC127);
1595 rd32(E1000_PTC255);
1596 rd32(E1000_PTC511);
1597 rd32(E1000_PTC1023);
1598 rd32(E1000_PTC1522);
1599
1600 rd32(E1000_ALGNERRC);
1601 rd32(E1000_RXERRC);
1602 rd32(E1000_TNCRS);
1603 rd32(E1000_CEXTERR);
1604 rd32(E1000_TSCTC);
1605 rd32(E1000_TSCTFC);
1606
1607 rd32(E1000_MGTPRC);
1608 rd32(E1000_MGTPDC);
1609 rd32(E1000_MGTPTC);
1610
1611 rd32(E1000_IAC);
1612 rd32(E1000_ICRXOC);
1613
1614 rd32(E1000_ICRXPTC);
1615 rd32(E1000_ICRXATC);
1616 rd32(E1000_ICTXPTC);
1617 rd32(E1000_ICTXATC);
1618 rd32(E1000_ICTXQEC);
1619 rd32(E1000_ICTXQMTC);
1620 rd32(E1000_ICRXDMTC);
1621
1622 rd32(E1000_CBTMPC);
1623 rd32(E1000_HTDPMC);
1624 rd32(E1000_CBRMPC);
1625 rd32(E1000_RPTHC);
1626 rd32(E1000_HGPTC);
1627 rd32(E1000_HTCBDPC);
1628 rd32(E1000_HGORCL);
1629 rd32(E1000_HGORCH);
1630 rd32(E1000_HGOTCL);
1631 rd32(E1000_HGOTCH);
1632 rd32(E1000_LENERRS);
9d5c8243
AK
1633
1634 /* This register should not be read in copper configurations */
2fb02a26
AD
1635 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
1636 igb_sgmii_active_82575(hw))
cc9073bb 1637 rd32(E1000_SCVPC);
9d5c8243
AK
1638}
1639
662d7205
AD
1640/**
1641 * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
1642 * @hw: pointer to the HW structure
1643 *
1644 * After rx enable if managability is enabled then there is likely some
1645 * bad data at the start of the fifo and possibly in the DMA fifo. This
1646 * function clears the fifos and flushes any packets that came in as rx was
1647 * being enabled.
1648 **/
1649void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
1650{
1651 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1652 int i, ms_wait;
1653
1654 if (hw->mac.type != e1000_82575 ||
1655 !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
1656 return;
1657
1658 /* Disable all RX queues */
1659 for (i = 0; i < 4; i++) {
1660 rxdctl[i] = rd32(E1000_RXDCTL(i));
1661 wr32(E1000_RXDCTL(i),
1662 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1663 }
1664 /* Poll all queues to verify they have shut down */
1665 for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1666 msleep(1);
1667 rx_enabled = 0;
1668 for (i = 0; i < 4; i++)
1669 rx_enabled |= rd32(E1000_RXDCTL(i));
1670 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
1671 break;
1672 }
1673
1674 if (ms_wait == 10)
1675 hw_dbg("Queue disable timed out after 10ms\n");
1676
1677 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
1678 * incoming packets are rejected. Set enable and wait 2ms so that
1679 * any packet that was coming in as RCTL.EN was set is flushed
1680 */
1681 rfctl = rd32(E1000_RFCTL);
1682 wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
1683
1684 rlpml = rd32(E1000_RLPML);
1685 wr32(E1000_RLPML, 0);
1686
1687 rctl = rd32(E1000_RCTL);
1688 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
1689 temp_rctl |= E1000_RCTL_LPE;
1690
1691 wr32(E1000_RCTL, temp_rctl);
1692 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1693 wrfl();
1694 msleep(2);
1695
1696 /* Enable RX queues that were previously enabled and restore our
1697 * previous state
1698 */
1699 for (i = 0; i < 4; i++)
1700 wr32(E1000_RXDCTL(i), rxdctl[i]);
1701 wr32(E1000_RCTL, rctl);
1702 wrfl();
1703
1704 wr32(E1000_RLPML, rlpml);
1705 wr32(E1000_RFCTL, rfctl);
1706
1707 /* Flush receive errors generated by workaround */
1708 rd32(E1000_ROC);
1709 rd32(E1000_RNBC);
1710 rd32(E1000_MPC);
1711}
1712
009bc06e
AD
1713/**
1714 * igb_set_pcie_completion_timeout - set pci-e completion timeout
1715 * @hw: pointer to the HW structure
1716 *
1717 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
1718 * however the hardware default for these parts is 500us to 1ms which is less
1719 * than the 10ms recommended by the pci-e spec. To address this we need to
1720 * increase the value to either 10ms to 200ms for capability version 1 config,
1721 * or 16ms to 55ms for version 2.
1722 **/
1723static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
1724{
1725 u32 gcr = rd32(E1000_GCR);
1726 s32 ret_val = 0;
1727 u16 pcie_devctl2;
1728
1729 /* only take action if timeout value is defaulted to 0 */
1730 if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
1731 goto out;
1732
1733 /*
1734 * if capababilities version is type 1 we can write the
1735 * timeout of 10ms to 200ms through the GCR register
1736 */
1737 if (!(gcr & E1000_GCR_CAP_VER2)) {
1738 gcr |= E1000_GCR_CMPL_TMOUT_10ms;
1739 goto out;
1740 }
1741
1742 /*
1743 * for version 2 capabilities we need to write the config space
1744 * directly in order to set the completion timeout value for
1745 * 16ms to 55ms
1746 */
1747 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1748 &pcie_devctl2);
1749 if (ret_val)
1750 goto out;
1751
1752 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
1753
1754 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1755 &pcie_devctl2);
1756out:
1757 /* disable completion timeout resend */
1758 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
1759
1760 wr32(E1000_GCR, gcr);
1761 return ret_val;
1762}
1763
13800469
GR
1764/**
1765 * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
1766 * @hw: pointer to the hardware struct
1767 * @enable: state to enter, either enabled or disabled
1768 * @pf: Physical Function pool - do not set anti-spoofing for the PF
1769 *
1770 * enables/disables L2 switch anti-spoofing functionality.
1771 **/
1772void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
1773{
1774 u32 dtxswc;
1775
1776 switch (hw->mac.type) {
1777 case e1000_82576:
1778 case e1000_i350:
1779 dtxswc = rd32(E1000_DTXSWC);
1780 if (enable) {
1781 dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
1782 E1000_DTXSWC_VLAN_SPOOF_MASK);
1783 /* The PF can spoof - it has to in order to
1784 * support emulation mode NICs */
1785 dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
1786 } else {
1787 dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
1788 E1000_DTXSWC_VLAN_SPOOF_MASK);
1789 }
1790 wr32(E1000_DTXSWC, dtxswc);
1791 break;
1792 default:
1793 break;
1794 }
1795}
1796
4ae196df
AD
1797/**
1798 * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
1799 * @hw: pointer to the hardware struct
1800 * @enable: state to enter, either enabled or disabled
1801 *
1802 * enables/disables L2 switch loopback functionality.
1803 **/
1804void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1805{
ca2e3e7e
AA
1806 u32 dtxswc;
1807
1808 switch (hw->mac.type) {
1809 case e1000_82576:
1810 dtxswc = rd32(E1000_DTXSWC);
1811 if (enable)
1812 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1813 else
1814 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1815 wr32(E1000_DTXSWC, dtxswc);
1816 break;
1817 case e1000_i350:
1818 dtxswc = rd32(E1000_TXSWC);
1819 if (enable)
1820 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1821 else
1822 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1823 wr32(E1000_TXSWC, dtxswc);
1824 break;
1825 default:
1826 /* Currently no other hardware supports loopback */
1827 break;
1828 }
4ae196df 1829
4ae196df 1830
4ae196df
AD
1831}
1832
1833/**
1834 * igb_vmdq_set_replication_pf - enable or disable vmdq replication
1835 * @hw: pointer to the hardware struct
1836 * @enable: state to enter, either enabled or disabled
1837 *
1838 * enables/disables replication of packets across multiple pools.
1839 **/
1840void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1841{
1842 u32 vt_ctl = rd32(E1000_VT_CTL);
1843
1844 if (enable)
1845 vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
1846 else
1847 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
1848
1849 wr32(E1000_VT_CTL, vt_ctl);
1850}
1851
bb2ac47b
AD
1852/**
1853 * igb_read_phy_reg_82580 - Read 82580 MDI control register
1854 * @hw: pointer to the HW structure
1855 * @offset: register offset to be read
1856 * @data: pointer to the read data
1857 *
1858 * Reads the MDI control register in the PHY at offset and stores the
1859 * information read to data.
1860 **/
1861static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1862{
bb2ac47b
AD
1863 s32 ret_val;
1864
1865
1866 ret_val = hw->phy.ops.acquire(hw);
1867 if (ret_val)
1868 goto out;
1869
bb2ac47b
AD
1870 ret_val = igb_read_phy_reg_mdic(hw, offset, data);
1871
1872 hw->phy.ops.release(hw);
1873
1874out:
1875 return ret_val;
1876}
1877
1878/**
1879 * igb_write_phy_reg_82580 - Write 82580 MDI control register
1880 * @hw: pointer to the HW structure
1881 * @offset: register offset to write to
1882 * @data: data to write to register at offset
1883 *
1884 * Writes data to MDI control register in the PHY at offset.
1885 **/
1886static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
1887{
bb2ac47b
AD
1888 s32 ret_val;
1889
1890
1891 ret_val = hw->phy.ops.acquire(hw);
1892 if (ret_val)
1893 goto out;
1894
bb2ac47b
AD
1895 ret_val = igb_write_phy_reg_mdic(hw, offset, data);
1896
1897 hw->phy.ops.release(hw);
1898
1899out:
1900 return ret_val;
1901}
1902
08451e25
NN
1903/**
1904 * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
1905 * @hw: pointer to the HW structure
1906 *
1907 * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
1908 * the values found in the EEPROM. This addresses an issue in which these
1909 * bits are not restored from EEPROM after reset.
1910 **/
1911static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
1912{
1913 s32 ret_val = 0;
1914 u32 mdicnfg;
1b5dda33 1915 u16 nvm_data = 0;
08451e25
NN
1916
1917 if (hw->mac.type != e1000_82580)
1918 goto out;
1919 if (!igb_sgmii_active_82575(hw))
1920 goto out;
1921
1922 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1923 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1924 &nvm_data);
1925 if (ret_val) {
1926 hw_dbg("NVM Read Error\n");
1927 goto out;
1928 }
1929
1930 mdicnfg = rd32(E1000_MDICNFG);
1931 if (nvm_data & NVM_WORD24_EXT_MDIO)
1932 mdicnfg |= E1000_MDICNFG_EXT_MDIO;
1933 if (nvm_data & NVM_WORD24_COM_MDIO)
1934 mdicnfg |= E1000_MDICNFG_COM_MDIO;
1935 wr32(E1000_MDICNFG, mdicnfg);
1936out:
1937 return ret_val;
1938}
1939
bb2ac47b
AD
1940/**
1941 * igb_reset_hw_82580 - Reset hardware
1942 * @hw: pointer to the HW structure
1943 *
1944 * This resets function or entire device (all ports, etc.)
1945 * to a known state.
1946 **/
1947static s32 igb_reset_hw_82580(struct e1000_hw *hw)
1948{
1949 s32 ret_val = 0;
1950 /* BH SW mailbox bit in SW_FW_SYNC */
1951 u16 swmbsw_mask = E1000_SW_SYNCH_MB;
1952 u32 ctrl, icr;
1953 bool global_device_reset = hw->dev_spec._82575.global_device_reset;
1954
1955
1956 hw->dev_spec._82575.global_device_reset = false;
1957
a0483e2e
CW
1958 /* due to hw errata, global device reset doesn't always
1959 * work on 82580
1960 */
1961 if (hw->mac.type == e1000_82580)
1962 global_device_reset = false;
1963
bb2ac47b
AD
1964 /* Get current control state. */
1965 ctrl = rd32(E1000_CTRL);
1966
1967 /*
1968 * Prevent the PCI-E bus from sticking if there is no TLP connection
1969 * on the last TLP read/write transaction when MAC is reset.
1970 */
1971 ret_val = igb_disable_pcie_master(hw);
1972 if (ret_val)
1973 hw_dbg("PCI-E Master disable polling has failed.\n");
1974
1975 hw_dbg("Masking off all interrupts\n");
1976 wr32(E1000_IMC, 0xffffffff);
1977 wr32(E1000_RCTL, 0);
1978 wr32(E1000_TCTL, E1000_TCTL_PSP);
1979 wrfl();
1980
1981 msleep(10);
1982
1983 /* Determine whether or not a global dev reset is requested */
1984 if (global_device_reset &&
f96a8a0b 1985 hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask))
bb2ac47b
AD
1986 global_device_reset = false;
1987
1988 if (global_device_reset &&
1989 !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
1990 ctrl |= E1000_CTRL_DEV_RST;
1991 else
1992 ctrl |= E1000_CTRL_RST;
1993
1994 wr32(E1000_CTRL, ctrl);
064b4330 1995 wrfl();
bb2ac47b
AD
1996
1997 /* Add delay to insure DEV_RST has time to complete */
1998 if (global_device_reset)
1999 msleep(5);
2000
2001 ret_val = igb_get_auto_rd_done(hw);
2002 if (ret_val) {
2003 /*
2004 * When auto config read does not complete, do not
2005 * return with an error. This can happen in situations
2006 * where there is no eeprom and prevents getting link.
2007 */
2008 hw_dbg("Auto Read Done did not complete\n");
2009 }
2010
2011 /* If EEPROM is not present, run manual init scripts */
2012 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
2013 igb_reset_init_script_82575(hw);
2014
2015 /* clear global device reset status bit */
2016 wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
2017
2018 /* Clear any pending interrupt events. */
2019 wr32(E1000_IMC, 0xffffffff);
2020 icr = rd32(E1000_ICR);
2021
08451e25
NN
2022 ret_val = igb_reset_mdicnfg_82580(hw);
2023 if (ret_val)
2024 hw_dbg("Could not reset MDICNFG based on EEPROM\n");
2025
bb2ac47b
AD
2026 /* Install any alternate MAC address into RAR0 */
2027 ret_val = igb_check_alt_mac_addr(hw);
2028
2029 /* Release semaphore */
2030 if (global_device_reset)
f96a8a0b 2031 hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
bb2ac47b
AD
2032
2033 return ret_val;
2034}
2035
2036/**
2037 * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
2038 * @data: data received by reading RXPBS register
2039 *
2040 * The 82580 uses a table based approach for packet buffer allocation sizes.
2041 * This function converts the retrieved value into the correct table value
2042 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
2043 * 0x0 36 72 144 1 2 4 8 16
2044 * 0x8 35 70 140 rsv rsv rsv rsv rsv
2045 */
2046u16 igb_rxpbs_adjust_82580(u32 data)
2047{
2048 u16 ret_val = 0;
2049
2050 if (data < E1000_82580_RXPBS_TABLE_SIZE)
2051 ret_val = e1000_82580_rxpbs_table[data];
2052
2053 return ret_val;
2054}
2055
4322e561
CW
2056/**
2057 * igb_validate_nvm_checksum_with_offset - Validate EEPROM
2058 * checksum
2059 * @hw: pointer to the HW structure
2060 * @offset: offset in words of the checksum protected region
2061 *
2062 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2063 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
2064 **/
bed45a6e
ET
2065static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
2066 u16 offset)
4322e561
CW
2067{
2068 s32 ret_val = 0;
2069 u16 checksum = 0;
2070 u16 i, nvm_data;
2071
2072 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
2073 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2074 if (ret_val) {
2075 hw_dbg("NVM Read Error\n");
2076 goto out;
2077 }
2078 checksum += nvm_data;
2079 }
2080
2081 if (checksum != (u16) NVM_SUM) {
2082 hw_dbg("NVM Checksum Invalid\n");
2083 ret_val = -E1000_ERR_NVM;
2084 goto out;
2085 }
2086
2087out:
2088 return ret_val;
2089}
2090
2091/**
2092 * igb_update_nvm_checksum_with_offset - Update EEPROM
2093 * checksum
2094 * @hw: pointer to the HW structure
2095 * @offset: offset in words of the checksum protected region
2096 *
2097 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
2098 * up to the checksum. Then calculates the EEPROM checksum and writes the
2099 * value to the EEPROM.
2100 **/
bed45a6e 2101static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
4322e561
CW
2102{
2103 s32 ret_val;
2104 u16 checksum = 0;
2105 u16 i, nvm_data;
2106
2107 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
2108 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2109 if (ret_val) {
2110 hw_dbg("NVM Read Error while updating checksum.\n");
2111 goto out;
2112 }
2113 checksum += nvm_data;
2114 }
2115 checksum = (u16) NVM_SUM - checksum;
2116 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
2117 &checksum);
2118 if (ret_val)
2119 hw_dbg("NVM Write Error while updating checksum.\n");
2120
2121out:
2122 return ret_val;
2123}
2124
2125/**
2126 * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
2127 * @hw: pointer to the HW structure
2128 *
2129 * Calculates the EEPROM section checksum by reading/adding each word of
2130 * the EEPROM and then verifies that the sum of the EEPROM is
2131 * equal to 0xBABA.
2132 **/
2133static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
2134{
2135 s32 ret_val = 0;
2136 u16 eeprom_regions_count = 1;
2137 u16 j, nvm_data;
2138 u16 nvm_offset;
2139
2140 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2141 if (ret_val) {
2142 hw_dbg("NVM Read Error\n");
2143 goto out;
2144 }
2145
2146 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
34a0326e 2147 /* if checksums compatibility bit is set validate checksums
4322e561
CW
2148 * for all 4 ports. */
2149 eeprom_regions_count = 4;
2150 }
2151
2152 for (j = 0; j < eeprom_regions_count; j++) {
2153 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2154 ret_val = igb_validate_nvm_checksum_with_offset(hw,
2155 nvm_offset);
2156 if (ret_val != 0)
2157 goto out;
2158 }
2159
2160out:
2161 return ret_val;
2162}
2163
2164/**
2165 * igb_update_nvm_checksum_82580 - Update EEPROM checksum
2166 * @hw: pointer to the HW structure
2167 *
2168 * Updates the EEPROM section checksums for all 4 ports by reading/adding
2169 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
2170 * checksum and writes the value to the EEPROM.
2171 **/
2172static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
2173{
2174 s32 ret_val;
2175 u16 j, nvm_data;
2176 u16 nvm_offset;
2177
2178 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2179 if (ret_val) {
2180 hw_dbg("NVM Read Error while updating checksum"
2181 " compatibility bit.\n");
2182 goto out;
2183 }
2184
2185 if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
2186 /* set compatibility bit to validate checksums appropriately */
2187 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
2188 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
2189 &nvm_data);
2190 if (ret_val) {
2191 hw_dbg("NVM Write Error while updating checksum"
2192 " compatibility bit.\n");
2193 goto out;
2194 }
2195 }
2196
2197 for (j = 0; j < 4; j++) {
2198 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2199 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2200 if (ret_val)
2201 goto out;
2202 }
2203
2204out:
2205 return ret_val;
2206}
2207
2208/**
2209 * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
2210 * @hw: pointer to the HW structure
2211 *
2212 * Calculates the EEPROM section checksum by reading/adding each word of
2213 * the EEPROM and then verifies that the sum of the EEPROM is
2214 * equal to 0xBABA.
2215 **/
2216static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
2217{
2218 s32 ret_val = 0;
2219 u16 j;
2220 u16 nvm_offset;
2221
2222 for (j = 0; j < 4; j++) {
2223 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2224 ret_val = igb_validate_nvm_checksum_with_offset(hw,
2225 nvm_offset);
2226 if (ret_val != 0)
2227 goto out;
2228 }
2229
2230out:
2231 return ret_val;
2232}
2233
2234/**
2235 * igb_update_nvm_checksum_i350 - Update EEPROM checksum
2236 * @hw: pointer to the HW structure
2237 *
2238 * Updates the EEPROM section checksums for all 4 ports by reading/adding
2239 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
2240 * checksum and writes the value to the EEPROM.
2241 **/
2242static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
2243{
2244 s32 ret_val = 0;
2245 u16 j;
2246 u16 nvm_offset;
2247
2248 for (j = 0; j < 4; j++) {
2249 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2250 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2251 if (ret_val != 0)
2252 goto out;
2253 }
2254
2255out:
2256 return ret_val;
2257}
34a0326e 2258
09b068d4
CW
2259/**
2260 * igb_set_eee_i350 - Enable/disable EEE support
2261 * @hw: pointer to the HW structure
2262 *
2263 * Enable/disable EEE based on setting in dev_spec structure.
2264 *
2265 **/
2266s32 igb_set_eee_i350(struct e1000_hw *hw)
2267{
2268 s32 ret_val = 0;
e5461112 2269 u32 ipcnfg, eeer;
09b068d4 2270
e5461112
AA
2271 if ((hw->mac.type < e1000_i350) ||
2272 (hw->phy.media_type != e1000_media_type_copper))
09b068d4
CW
2273 goto out;
2274 ipcnfg = rd32(E1000_IPCNFG);
2275 eeer = rd32(E1000_EEER);
2276
2277 /* enable or disable per user setting */
2278 if (!(hw->dev_spec._82575.eee_disable)) {
40b20122
CW
2279 u32 eee_su = rd32(E1000_EEE_SU);
2280
2281 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
2282 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
09b068d4
CW
2283 E1000_EEER_LPI_FC);
2284
40b20122
CW
2285 /* This bit should not be set in normal operation. */
2286 if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
2287 hw_dbg("LPI Clock Stop Bit should not be set!\n");
2288
e5461112 2289
09b068d4
CW
2290 } else {
2291 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2292 E1000_IPCNFG_EEE_100M_AN);
2293 eeer &= ~(E1000_EEER_TX_LPI_EN |
2294 E1000_EEER_RX_LPI_EN |
2295 E1000_EEER_LPI_FC);
2296 }
2297 wr32(E1000_IPCNFG, ipcnfg);
2298 wr32(E1000_EEER, eeer);
e5461112
AA
2299 rd32(E1000_IPCNFG);
2300 rd32(E1000_EEER);
09b068d4
CW
2301out:
2302
2303 return ret_val;
2304}
4322e561 2305
9d5c8243 2306static struct e1000_mac_operations e1000_mac_ops_82575 = {
9d5c8243
AK
2307 .init_hw = igb_init_hw_82575,
2308 .check_for_link = igb_check_for_link_82575,
2d064c06 2309 .rar_set = igb_rar_set,
9d5c8243
AK
2310 .read_mac_addr = igb_read_mac_addr_82575,
2311 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
2312};
2313
2314static struct e1000_phy_operations e1000_phy_ops_82575 = {
a8d2a0c2 2315 .acquire = igb_acquire_phy_82575,
9d5c8243 2316 .get_cfg_done = igb_get_cfg_done_82575,
a8d2a0c2 2317 .release = igb_release_phy_82575,
441fc6fd
CW
2318 .write_i2c_byte = igb_write_i2c_byte,
2319 .read_i2c_byte = igb_read_i2c_byte,
9d5c8243
AK
2320};
2321
2322static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
312c75ae
AD
2323 .acquire = igb_acquire_nvm_82575,
2324 .read = igb_read_nvm_eerd,
2325 .release = igb_release_nvm_82575,
2326 .write = igb_write_nvm_spi,
9d5c8243
AK
2327};
2328
2329const struct e1000_info e1000_82575_info = {
2330 .get_invariants = igb_get_invariants_82575,
2331 .mac_ops = &e1000_mac_ops_82575,
2332 .phy_ops = &e1000_phy_ops_82575,
2333 .nvm_ops = &e1000_nvm_ops_82575,
2334};
2335
This page took 0.733253 seconds and 5 git commands to generate.