e1000e: Driver workaround for IPv6 Header Extension Erratum.
[deliverable/linux.git] / drivers / net / ethernet / intel / e1000e / ich8lan.c
1 /*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 /*
30 * 82562G 10/100 Network Connection
31 * 82562G-2 10/100 Network Connection
32 * 82562GT 10/100 Network Connection
33 * 82562GT-2 10/100 Network Connection
34 * 82562V 10/100 Network Connection
35 * 82562V-2 10/100 Network Connection
36 * 82566DC-2 Gigabit Network Connection
37 * 82566DC Gigabit Network Connection
38 * 82566DM-2 Gigabit Network Connection
39 * 82566DM Gigabit Network Connection
40 * 82566MC Gigabit Network Connection
41 * 82566MM Gigabit Network Connection
42 * 82567LM Gigabit Network Connection
43 * 82567LF Gigabit Network Connection
44 * 82567V Gigabit Network Connection
45 * 82567LM-2 Gigabit Network Connection
46 * 82567LF-2 Gigabit Network Connection
47 * 82567V-2 Gigabit Network Connection
48 * 82567LF-3 Gigabit Network Connection
49 * 82567LM-3 Gigabit Network Connection
50 * 82567LM-4 Gigabit Network Connection
51 * 82577LM Gigabit Network Connection
52 * 82577LC Gigabit Network Connection
53 * 82578DM Gigabit Network Connection
54 * 82578DC Gigabit Network Connection
55 * 82579LM Gigabit Network Connection
56 * 82579V Gigabit Network Connection
57 */
58
59 #include "e1000.h"
60
61 #define ICH_FLASH_GFPREG 0x0000
62 #define ICH_FLASH_HSFSTS 0x0004
63 #define ICH_FLASH_HSFCTL 0x0006
64 #define ICH_FLASH_FADDR 0x0008
65 #define ICH_FLASH_FDATA0 0x0010
66 #define ICH_FLASH_PR0 0x0074
67
68 #define ICH_FLASH_READ_COMMAND_TIMEOUT 500
69 #define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500
70 #define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000
71 #define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
72 #define ICH_FLASH_CYCLE_REPEAT_COUNT 10
73
74 #define ICH_CYCLE_READ 0
75 #define ICH_CYCLE_WRITE 2
76 #define ICH_CYCLE_ERASE 3
77
78 #define FLASH_GFPREG_BASE_MASK 0x1FFF
79 #define FLASH_SECTOR_ADDR_SHIFT 12
80
81 #define ICH_FLASH_SEG_SIZE_256 256
82 #define ICH_FLASH_SEG_SIZE_4K 4096
83 #define ICH_FLASH_SEG_SIZE_8K 8192
84 #define ICH_FLASH_SEG_SIZE_64K 65536
85
86
87 #define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
88 /* FW established a valid mode */
89 #define E1000_ICH_FWSM_FW_VALID 0x00008000
90
91 #define E1000_ICH_MNG_IAMT_MODE 0x2
92
93 #define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
94 (ID_LED_DEF1_OFF2 << 8) | \
95 (ID_LED_DEF1_ON2 << 4) | \
96 (ID_LED_DEF1_DEF2))
97
98 #define E1000_ICH_NVM_SIG_WORD 0x13
99 #define E1000_ICH_NVM_SIG_MASK 0xC000
100 #define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
101 #define E1000_ICH_NVM_SIG_VALUE 0x80
102
103 #define E1000_ICH8_LAN_INIT_TIMEOUT 1500
104
105 #define E1000_FEXTNVM_SW_CONFIG 1
106 #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
107
108 #define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000
109 #define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000
110
111 #define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
112 #define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
113 #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
114
115 #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
116
117 #define E1000_ICH_RAR_ENTRIES 7
118 #define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
119
120 #define PHY_PAGE_SHIFT 5
121 #define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
122 ((reg) & MAX_PHY_REG_ADDRESS))
123 #define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
124 #define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
125
126 #define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
127 #define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
128 #define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
129
130 #define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
131
132 #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
133
134 /* SMBus Address Phy Register */
135 #define HV_SMB_ADDR PHY_REG(768, 26)
136 #define HV_SMB_ADDR_MASK 0x007F
137 #define HV_SMB_ADDR_PEC_EN 0x0200
138 #define HV_SMB_ADDR_VALID 0x0080
139
140 /* PHY Power Management Control */
141 #define HV_PM_CTRL PHY_REG(770, 17)
142 #define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
143
144 /* PHY Low Power Idle Control */
145 #define I82579_LPI_CTRL PHY_REG(772, 20)
146 #define I82579_LPI_CTRL_ENABLE_MASK 0x6000
147 #define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
148
149 /* EMI Registers */
150 #define I82579_EMI_ADDR 0x10
151 #define I82579_EMI_DATA 0x11
152 #define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
153 #define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */
154 #define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
155
156 /* Strapping Option Register - RO */
157 #define E1000_STRAP 0x0000C
158 #define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
159 #define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
160
161 /* OEM Bits Phy Register */
162 #define HV_OEM_BITS PHY_REG(768, 25)
163 #define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
164 #define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
165 #define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
166
167 #define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
168 #define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
169
170 /* KMRN Mode Control */
171 #define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
172 #define HV_KMRN_MDIO_SLOW 0x0400
173
174 /* KMRN FIFO Control and Status */
175 #define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
176 #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
177 #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
178
179 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
180 /* Offset 04h HSFSTS */
181 union ich8_hws_flash_status {
182 struct ich8_hsfsts {
183 u16 flcdone :1; /* bit 0 Flash Cycle Done */
184 u16 flcerr :1; /* bit 1 Flash Cycle Error */
185 u16 dael :1; /* bit 2 Direct Access error Log */
186 u16 berasesz :2; /* bit 4:3 Sector Erase Size */
187 u16 flcinprog :1; /* bit 5 flash cycle in Progress */
188 u16 reserved1 :2; /* bit 13:6 Reserved */
189 u16 reserved2 :6; /* bit 13:6 Reserved */
190 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
191 u16 flockdn :1; /* bit 15 Flash Config Lock-Down */
192 } hsf_status;
193 u16 regval;
194 };
195
196 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
197 /* Offset 06h FLCTL */
198 union ich8_hws_flash_ctrl {
199 struct ich8_hsflctl {
200 u16 flcgo :1; /* 0 Flash Cycle Go */
201 u16 flcycle :2; /* 2:1 Flash Cycle */
202 u16 reserved :5; /* 7:3 Reserved */
203 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
204 u16 flockdn :6; /* 15:10 Reserved */
205 } hsf_ctrl;
206 u16 regval;
207 };
208
209 /* ICH Flash Region Access Permissions */
210 union ich8_hws_flash_regacc {
211 struct ich8_flracc {
212 u32 grra :8; /* 0:7 GbE region Read Access */
213 u32 grwa :8; /* 8:15 GbE region Write Access */
214 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
215 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
216 } hsf_flregacc;
217 u16 regval;
218 };
219
220 /* ICH Flash Protected Region */
221 union ich8_flash_protected_range {
222 struct ich8_pr {
223 u32 base:13; /* 0:12 Protected Range Base */
224 u32 reserved1:2; /* 13:14 Reserved */
225 u32 rpe:1; /* 15 Read Protection Enable */
226 u32 limit:13; /* 16:28 Protected Range Limit */
227 u32 reserved2:2; /* 29:30 Reserved */
228 u32 wpe:1; /* 31 Write Protection Enable */
229 } range;
230 u32 regval;
231 };
232
233 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
234 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
235 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
236 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
237 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
238 u32 offset, u8 byte);
239 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
240 u8 *data);
241 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
242 u16 *data);
243 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
244 u8 size, u16 *data);
245 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
246 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
247 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
248 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
249 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
250 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
251 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
252 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
253 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
254 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
255 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
256 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
257 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
258 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
259 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
260 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
261 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
262 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
263 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
264 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
265 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
266
267 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
268 {
269 return readw(hw->flash_address + reg);
270 }
271
272 static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
273 {
274 return readl(hw->flash_address + reg);
275 }
276
277 static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
278 {
279 writew(val, hw->flash_address + reg);
280 }
281
282 static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
283 {
284 writel(val, hw->flash_address + reg);
285 }
286
287 #define er16flash(reg) __er16flash(hw, (reg))
288 #define er32flash(reg) __er32flash(hw, (reg))
289 #define ew16flash(reg, val) __ew16flash(hw, (reg), (val))
290 #define ew32flash(reg, val) __ew32flash(hw, (reg), (val))
291
292 /**
293 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
294 * @hw: pointer to the HW structure
295 *
296 * Test access to the PHY registers by reading the PHY ID registers. If
297 * the PHY ID is already known (e.g. resume path) compare it with known ID,
298 * otherwise assume the read PHY ID is correct if it is valid.
299 *
300 * Assumes the sw/fw/hw semaphore is already acquired.
301 **/
302 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
303 {
304 u16 phy_reg;
305 u32 phy_id;
306
307 e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
308 phy_id = (u32)(phy_reg << 16);
309 e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
310 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
311
312 if (hw->phy.id) {
313 if (hw->phy.id == phy_id)
314 return true;
315 } else {
316 if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK))
317 hw->phy.id = phy_id;
318 return true;
319 }
320
321 return false;
322 }
323
324 /**
325 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
326 * @hw: pointer to the HW structure
327 *
328 * Workarounds/flow necessary for PHY initialization during driver load
329 * and resume paths.
330 **/
331 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
332 {
333 u32 mac_reg, fwsm = er32(FWSM);
334 s32 ret_val;
335
336 ret_val = hw->phy.ops.acquire(hw);
337 if (ret_val) {
338 e_dbg("Failed to initialize PHY flow\n");
339 return ret_val;
340 }
341
342 /*
343 * The MAC-PHY interconnect may be in SMBus mode. If the PHY is
344 * inaccessible and resetting the PHY is not blocked, toggle the
345 * LANPHYPC Value bit to force the interconnect to PCIe mode.
346 */
347 switch (hw->mac.type) {
348 case e1000_pch2lan:
349 /*
350 * Gate automatic PHY configuration by hardware on
351 * non-managed 82579
352 */
353 if (!(fwsm & E1000_ICH_FWSM_FW_VALID))
354 e1000_gate_hw_phy_config_ich8lan(hw, true);
355
356 if (e1000_phy_is_accessible_pchlan(hw))
357 break;
358
359 /* fall-through */
360 case e1000_pchlan:
361 if ((hw->mac.type == e1000_pchlan) &&
362 (fwsm & E1000_ICH_FWSM_FW_VALID))
363 break;
364
365 if (hw->phy.ops.check_reset_block(hw)) {
366 e_dbg("Required LANPHYPC toggle blocked by ME\n");
367 break;
368 }
369
370 e_dbg("Toggling LANPHYPC\n");
371
372 /* Set Phy Config Counter to 50msec */
373 mac_reg = er32(FEXTNVM3);
374 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
375 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
376 ew32(FEXTNVM3, mac_reg);
377
378 /* Toggle LANPHYPC Value bit */
379 mac_reg = er32(CTRL);
380 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
381 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
382 ew32(CTRL, mac_reg);
383 e1e_flush();
384 udelay(10);
385 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
386 ew32(CTRL, mac_reg);
387 e1e_flush();
388 msleep(50);
389 break;
390 default:
391 break;
392 }
393
394 hw->phy.ops.release(hw);
395
396 /*
397 * Reset the PHY before any access to it. Doing so, ensures
398 * that the PHY is in a known good state before we read/write
399 * PHY registers. The generic reset is sufficient here,
400 * because we haven't determined the PHY type yet.
401 */
402 ret_val = e1000e_phy_hw_reset_generic(hw);
403
404 /* Ungate automatic PHY configuration on non-managed 82579 */
405 if ((hw->mac.type == e1000_pch2lan) &&
406 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
407 usleep_range(10000, 20000);
408 e1000_gate_hw_phy_config_ich8lan(hw, false);
409 }
410
411 return ret_val;
412 }
413
414 /**
415 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
416 * @hw: pointer to the HW structure
417 *
418 * Initialize family-specific PHY parameters and function pointers.
419 **/
420 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
421 {
422 struct e1000_phy_info *phy = &hw->phy;
423 s32 ret_val = 0;
424
425 phy->addr = 1;
426 phy->reset_delay_us = 100;
427
428 phy->ops.set_page = e1000_set_page_igp;
429 phy->ops.read_reg = e1000_read_phy_reg_hv;
430 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
431 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
432 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
433 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
434 phy->ops.write_reg = e1000_write_phy_reg_hv;
435 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
436 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
437 phy->ops.power_up = e1000_power_up_phy_copper;
438 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
439 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
440
441 phy->id = e1000_phy_unknown;
442
443 ret_val = e1000_init_phy_workarounds_pchlan(hw);
444 if (ret_val)
445 return ret_val;
446
447 if (phy->id == e1000_phy_unknown)
448 switch (hw->mac.type) {
449 default:
450 ret_val = e1000e_get_phy_id(hw);
451 if (ret_val)
452 return ret_val;
453 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
454 break;
455 /* fall-through */
456 case e1000_pch2lan:
457 /*
458 * In case the PHY needs to be in mdio slow mode,
459 * set slow mode and try to get the PHY id again.
460 */
461 ret_val = e1000_set_mdio_slow_mode_hv(hw);
462 if (ret_val)
463 return ret_val;
464 ret_val = e1000e_get_phy_id(hw);
465 if (ret_val)
466 return ret_val;
467 break;
468 }
469 phy->type = e1000e_get_phy_type_from_id(phy->id);
470
471 switch (phy->type) {
472 case e1000_phy_82577:
473 case e1000_phy_82579:
474 phy->ops.check_polarity = e1000_check_polarity_82577;
475 phy->ops.force_speed_duplex =
476 e1000_phy_force_speed_duplex_82577;
477 phy->ops.get_cable_length = e1000_get_cable_length_82577;
478 phy->ops.get_info = e1000_get_phy_info_82577;
479 phy->ops.commit = e1000e_phy_sw_reset;
480 break;
481 case e1000_phy_82578:
482 phy->ops.check_polarity = e1000_check_polarity_m88;
483 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
484 phy->ops.get_cable_length = e1000e_get_cable_length_m88;
485 phy->ops.get_info = e1000e_get_phy_info_m88;
486 break;
487 default:
488 ret_val = -E1000_ERR_PHY;
489 break;
490 }
491
492 return ret_val;
493 }
494
495 /**
496 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
497 * @hw: pointer to the HW structure
498 *
499 * Initialize family-specific PHY parameters and function pointers.
500 **/
501 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
502 {
503 struct e1000_phy_info *phy = &hw->phy;
504 s32 ret_val;
505 u16 i = 0;
506
507 phy->addr = 1;
508 phy->reset_delay_us = 100;
509
510 phy->ops.power_up = e1000_power_up_phy_copper;
511 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
512
513 /*
514 * We may need to do this twice - once for IGP and if that fails,
515 * we'll set BM func pointers and try again
516 */
517 ret_val = e1000e_determine_phy_address(hw);
518 if (ret_val) {
519 phy->ops.write_reg = e1000e_write_phy_reg_bm;
520 phy->ops.read_reg = e1000e_read_phy_reg_bm;
521 ret_val = e1000e_determine_phy_address(hw);
522 if (ret_val) {
523 e_dbg("Cannot determine PHY addr. Erroring out\n");
524 return ret_val;
525 }
526 }
527
528 phy->id = 0;
529 while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
530 (i++ < 100)) {
531 usleep_range(1000, 2000);
532 ret_val = e1000e_get_phy_id(hw);
533 if (ret_val)
534 return ret_val;
535 }
536
537 /* Verify phy id */
538 switch (phy->id) {
539 case IGP03E1000_E_PHY_ID:
540 phy->type = e1000_phy_igp_3;
541 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
542 phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
543 phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
544 phy->ops.get_info = e1000e_get_phy_info_igp;
545 phy->ops.check_polarity = e1000_check_polarity_igp;
546 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
547 break;
548 case IFE_E_PHY_ID:
549 case IFE_PLUS_E_PHY_ID:
550 case IFE_C_E_PHY_ID:
551 phy->type = e1000_phy_ife;
552 phy->autoneg_mask = E1000_ALL_NOT_GIG;
553 phy->ops.get_info = e1000_get_phy_info_ife;
554 phy->ops.check_polarity = e1000_check_polarity_ife;
555 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
556 break;
557 case BME1000_E_PHY_ID:
558 phy->type = e1000_phy_bm;
559 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
560 phy->ops.read_reg = e1000e_read_phy_reg_bm;
561 phy->ops.write_reg = e1000e_write_phy_reg_bm;
562 phy->ops.commit = e1000e_phy_sw_reset;
563 phy->ops.get_info = e1000e_get_phy_info_m88;
564 phy->ops.check_polarity = e1000_check_polarity_m88;
565 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
566 break;
567 default:
568 return -E1000_ERR_PHY;
569 break;
570 }
571
572 return 0;
573 }
574
575 /**
576 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
577 * @hw: pointer to the HW structure
578 *
579 * Initialize family-specific NVM parameters and function
580 * pointers.
581 **/
582 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
583 {
584 struct e1000_nvm_info *nvm = &hw->nvm;
585 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
586 u32 gfpreg, sector_base_addr, sector_end_addr;
587 u16 i;
588
589 /* Can't read flash registers if the register set isn't mapped. */
590 if (!hw->flash_address) {
591 e_dbg("ERROR: Flash registers not mapped\n");
592 return -E1000_ERR_CONFIG;
593 }
594
595 nvm->type = e1000_nvm_flash_sw;
596
597 gfpreg = er32flash(ICH_FLASH_GFPREG);
598
599 /*
600 * sector_X_addr is a "sector"-aligned address (4096 bytes)
601 * Add 1 to sector_end_addr since this sector is included in
602 * the overall size.
603 */
604 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
605 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
606
607 /* flash_base_addr is byte-aligned */
608 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
609
610 /*
611 * find total size of the NVM, then cut in half since the total
612 * size represents two separate NVM banks.
613 */
614 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
615 << FLASH_SECTOR_ADDR_SHIFT;
616 nvm->flash_bank_size /= 2;
617 /* Adjust to word count */
618 nvm->flash_bank_size /= sizeof(u16);
619
620 nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
621
622 /* Clear shadow ram */
623 for (i = 0; i < nvm->word_size; i++) {
624 dev_spec->shadow_ram[i].modified = false;
625 dev_spec->shadow_ram[i].value = 0xFFFF;
626 }
627
628 return 0;
629 }
630
631 /**
632 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
633 * @hw: pointer to the HW structure
634 *
635 * Initialize family-specific MAC parameters and function
636 * pointers.
637 **/
638 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
639 {
640 struct e1000_mac_info *mac = &hw->mac;
641
642 /* Set media type function pointer */
643 hw->phy.media_type = e1000_media_type_copper;
644
645 /* Set mta register count */
646 mac->mta_reg_count = 32;
647 /* Set rar entry count */
648 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
649 if (mac->type == e1000_ich8lan)
650 mac->rar_entry_count--;
651 /* FWSM register */
652 mac->has_fwsm = true;
653 /* ARC subsystem not supported */
654 mac->arc_subsystem_valid = false;
655 /* Adaptive IFS supported */
656 mac->adaptive_ifs = true;
657
658 /* LED operations */
659 switch (mac->type) {
660 case e1000_ich8lan:
661 case e1000_ich9lan:
662 case e1000_ich10lan:
663 /* check management mode */
664 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
665 /* ID LED init */
666 mac->ops.id_led_init = e1000e_id_led_init_generic;
667 /* blink LED */
668 mac->ops.blink_led = e1000e_blink_led_generic;
669 /* setup LED */
670 mac->ops.setup_led = e1000e_setup_led_generic;
671 /* cleanup LED */
672 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
673 /* turn on/off LED */
674 mac->ops.led_on = e1000_led_on_ich8lan;
675 mac->ops.led_off = e1000_led_off_ich8lan;
676 break;
677 case e1000_pch2lan:
678 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
679 mac->ops.rar_set = e1000_rar_set_pch2lan;
680 /* fall-through */
681 case e1000_pchlan:
682 /* check management mode */
683 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
684 /* ID LED init */
685 mac->ops.id_led_init = e1000_id_led_init_pchlan;
686 /* setup LED */
687 mac->ops.setup_led = e1000_setup_led_pchlan;
688 /* cleanup LED */
689 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
690 /* turn on/off LED */
691 mac->ops.led_on = e1000_led_on_pchlan;
692 mac->ops.led_off = e1000_led_off_pchlan;
693 break;
694 default:
695 break;
696 }
697
698 /* Enable PCS Lock-loss workaround for ICH8 */
699 if (mac->type == e1000_ich8lan)
700 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
701
702 /* Gate automatic PHY configuration by hardware on managed 82579 */
703 if ((mac->type == e1000_pch2lan) &&
704 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
705 e1000_gate_hw_phy_config_ich8lan(hw, true);
706
707 return 0;
708 }
709
710 /**
711 * e1000_set_eee_pchlan - Enable/disable EEE support
712 * @hw: pointer to the HW structure
713 *
714 * Enable/disable EEE based on setting in dev_spec structure. The bits in
715 * the LPI Control register will remain set only if/when link is up.
716 **/
717 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
718 {
719 s32 ret_val = 0;
720 u16 phy_reg;
721
722 if (hw->phy.type != e1000_phy_82579)
723 return 0;
724
725 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
726 if (ret_val)
727 return ret_val;
728
729 if (hw->dev_spec.ich8lan.eee_disable)
730 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
731 else
732 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
733
734 return e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
735 }
736
737 /**
738 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
739 * @hw: pointer to the HW structure
740 *
741 * Checks to see of the link status of the hardware has changed. If a
742 * change in link status has been detected, then we read the PHY registers
743 * to get the current speed/duplex if link exists.
744 **/
745 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
746 {
747 struct e1000_mac_info *mac = &hw->mac;
748 s32 ret_val;
749 bool link;
750 u16 phy_reg;
751
752 /*
753 * We only want to go out to the PHY registers to see if Auto-Neg
754 * has completed and/or if our link status has changed. The
755 * get_link_status flag is set upon receiving a Link Status
756 * Change or Rx Sequence Error interrupt.
757 */
758 if (!mac->get_link_status)
759 return 0;
760
761 /*
762 * First we want to see if the MII Status Register reports
763 * link. If so, then we want to get the current speed/duplex
764 * of the PHY.
765 */
766 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
767 if (ret_val)
768 return ret_val;
769
770 if (hw->mac.type == e1000_pchlan) {
771 ret_val = e1000_k1_gig_workaround_hv(hw, link);
772 if (ret_val)
773 return ret_val;
774 }
775
776 if (!link)
777 return 0; /* No link detected */
778
779 mac->get_link_status = false;
780
781 switch (hw->mac.type) {
782 case e1000_pch2lan:
783 ret_val = e1000_k1_workaround_lv(hw);
784 if (ret_val)
785 return ret_val;
786 /* fall-thru */
787 case e1000_pchlan:
788 if (hw->phy.type == e1000_phy_82578) {
789 ret_val = e1000_link_stall_workaround_hv(hw);
790 if (ret_val)
791 return ret_val;
792 }
793
794 /*
795 * Workaround for PCHx parts in half-duplex:
796 * Set the number of preambles removed from the packet
797 * when it is passed from the PHY to the MAC to prevent
798 * the MAC from misinterpreting the packet type.
799 */
800 e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
801 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
802
803 if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
804 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
805
806 e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
807 break;
808 default:
809 break;
810 }
811
812 /*
813 * Check if there was DownShift, must be checked
814 * immediately after link-up
815 */
816 e1000e_check_downshift(hw);
817
818 /* Enable/Disable EEE after link up */
819 ret_val = e1000_set_eee_pchlan(hw);
820 if (ret_val)
821 return ret_val;
822
823 /*
824 * If we are forcing speed/duplex, then we simply return since
825 * we have already determined whether we have link or not.
826 */
827 if (!mac->autoneg)
828 return -E1000_ERR_CONFIG;
829
830 /*
831 * Auto-Neg is enabled. Auto Speed Detection takes care
832 * of MAC speed/duplex configuration. So we only need to
833 * configure Collision Distance in the MAC.
834 */
835 mac->ops.config_collision_dist(hw);
836
837 /*
838 * Configure Flow Control now that Auto-Neg has completed.
839 * First, we need to restore the desired flow control
840 * settings because we may have had to re-autoneg with a
841 * different link partner.
842 */
843 ret_val = e1000e_config_fc_after_link_up(hw);
844 if (ret_val)
845 e_dbg("Error configuring flow control\n");
846
847 return ret_val;
848 }
849
850 static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
851 {
852 struct e1000_hw *hw = &adapter->hw;
853 s32 rc;
854
855 rc = e1000_init_mac_params_ich8lan(hw);
856 if (rc)
857 return rc;
858
859 rc = e1000_init_nvm_params_ich8lan(hw);
860 if (rc)
861 return rc;
862
863 switch (hw->mac.type) {
864 case e1000_ich8lan:
865 case e1000_ich9lan:
866 case e1000_ich10lan:
867 rc = e1000_init_phy_params_ich8lan(hw);
868 break;
869 case e1000_pchlan:
870 case e1000_pch2lan:
871 rc = e1000_init_phy_params_pchlan(hw);
872 break;
873 default:
874 break;
875 }
876 if (rc)
877 return rc;
878
879 /*
880 * Disable Jumbo Frame support on parts with Intel 10/100 PHY or
881 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
882 */
883 if ((adapter->hw.phy.type == e1000_phy_ife) ||
884 ((adapter->hw.mac.type >= e1000_pch2lan) &&
885 (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
886 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
887 adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
888
889 hw->mac.ops.blink_led = NULL;
890 }
891
892 if ((adapter->hw.mac.type == e1000_ich8lan) &&
893 (adapter->hw.phy.type != e1000_phy_ife))
894 adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
895
896 /* Enable workaround for 82579 w/ ME enabled */
897 if ((adapter->hw.mac.type == e1000_pch2lan) &&
898 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
899 adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
900
901 /* Disable EEE by default until IEEE802.3az spec is finalized */
902 if (adapter->flags2 & FLAG2_HAS_EEE)
903 adapter->hw.dev_spec.ich8lan.eee_disable = true;
904
905 return 0;
906 }
907
908 static DEFINE_MUTEX(nvm_mutex);
909
910 /**
911 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
912 * @hw: pointer to the HW structure
913 *
914 * Acquires the mutex for performing NVM operations.
915 **/
916 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
917 {
918 mutex_lock(&nvm_mutex);
919
920 return 0;
921 }
922
923 /**
924 * e1000_release_nvm_ich8lan - Release NVM mutex
925 * @hw: pointer to the HW structure
926 *
927 * Releases the mutex used while performing NVM operations.
928 **/
929 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
930 {
931 mutex_unlock(&nvm_mutex);
932 }
933
934 /**
935 * e1000_acquire_swflag_ich8lan - Acquire software control flag
936 * @hw: pointer to the HW structure
937 *
938 * Acquires the software control flag for performing PHY and select
939 * MAC CSR accesses.
940 **/
941 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
942 {
943 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
944 s32 ret_val = 0;
945
946 if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
947 &hw->adapter->state)) {
948 e_dbg("contention for Phy access\n");
949 return -E1000_ERR_PHY;
950 }
951
952 while (timeout) {
953 extcnf_ctrl = er32(EXTCNF_CTRL);
954 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
955 break;
956
957 mdelay(1);
958 timeout--;
959 }
960
961 if (!timeout) {
962 e_dbg("SW has already locked the resource.\n");
963 ret_val = -E1000_ERR_CONFIG;
964 goto out;
965 }
966
967 timeout = SW_FLAG_TIMEOUT;
968
969 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
970 ew32(EXTCNF_CTRL, extcnf_ctrl);
971
972 while (timeout) {
973 extcnf_ctrl = er32(EXTCNF_CTRL);
974 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
975 break;
976
977 mdelay(1);
978 timeout--;
979 }
980
981 if (!timeout) {
982 e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
983 er32(FWSM), extcnf_ctrl);
984 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
985 ew32(EXTCNF_CTRL, extcnf_ctrl);
986 ret_val = -E1000_ERR_CONFIG;
987 goto out;
988 }
989
990 out:
991 if (ret_val)
992 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
993
994 return ret_val;
995 }
996
997 /**
998 * e1000_release_swflag_ich8lan - Release software control flag
999 * @hw: pointer to the HW structure
1000 *
1001 * Releases the software control flag for performing PHY and select
1002 * MAC CSR accesses.
1003 **/
1004 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1005 {
1006 u32 extcnf_ctrl;
1007
1008 extcnf_ctrl = er32(EXTCNF_CTRL);
1009
1010 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1011 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1012 ew32(EXTCNF_CTRL, extcnf_ctrl);
1013 } else {
1014 e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
1015 }
1016
1017 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1018 }
1019
1020 /**
1021 * e1000_check_mng_mode_ich8lan - Checks management mode
1022 * @hw: pointer to the HW structure
1023 *
1024 * This checks if the adapter has any manageability enabled.
1025 * This is a function pointer entry point only called by read/write
1026 * routines for the PHY and NVM parts.
1027 **/
1028 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1029 {
1030 u32 fwsm;
1031
1032 fwsm = er32(FWSM);
1033 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1034 ((fwsm & E1000_FWSM_MODE_MASK) ==
1035 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1036 }
1037
1038 /**
1039 * e1000_check_mng_mode_pchlan - Checks management mode
1040 * @hw: pointer to the HW structure
1041 *
1042 * This checks if the adapter has iAMT enabled.
1043 * This is a function pointer entry point only called by read/write
1044 * routines for the PHY and NVM parts.
1045 **/
1046 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1047 {
1048 u32 fwsm;
1049
1050 fwsm = er32(FWSM);
1051 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1052 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1053 }
1054
1055 /**
1056 * e1000_rar_set_pch2lan - Set receive address register
1057 * @hw: pointer to the HW structure
1058 * @addr: pointer to the receive address
1059 * @index: receive address array register
1060 *
1061 * Sets the receive address array register at index to the address passed
1062 * in by addr. For 82579, RAR[0] is the base address register that is to
1063 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1064 * Use SHRA[0-3] in place of those reserved for ME.
1065 **/
1066 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1067 {
1068 u32 rar_low, rar_high;
1069
1070 /*
1071 * HW expects these in little endian so we reverse the byte order
1072 * from network order (big endian) to little endian
1073 */
1074 rar_low = ((u32)addr[0] |
1075 ((u32)addr[1] << 8) |
1076 ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1077
1078 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1079
1080 /* If MAC address zero, no need to set the AV bit */
1081 if (rar_low || rar_high)
1082 rar_high |= E1000_RAH_AV;
1083
1084 if (index == 0) {
1085 ew32(RAL(index), rar_low);
1086 e1e_flush();
1087 ew32(RAH(index), rar_high);
1088 e1e_flush();
1089 return;
1090 }
1091
1092 if (index < hw->mac.rar_entry_count) {
1093 s32 ret_val;
1094
1095 ret_val = e1000_acquire_swflag_ich8lan(hw);
1096 if (ret_val)
1097 goto out;
1098
1099 ew32(SHRAL(index - 1), rar_low);
1100 e1e_flush();
1101 ew32(SHRAH(index - 1), rar_high);
1102 e1e_flush();
1103
1104 e1000_release_swflag_ich8lan(hw);
1105
1106 /* verify the register updates */
1107 if ((er32(SHRAL(index - 1)) == rar_low) &&
1108 (er32(SHRAH(index - 1)) == rar_high))
1109 return;
1110
1111 e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1112 (index - 1), er32(FWSM));
1113 }
1114
1115 out:
1116 e_dbg("Failed to write receive address at index %d\n", index);
1117 }
1118
1119 /**
1120 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1121 * @hw: pointer to the HW structure
1122 *
1123 * Checks if firmware is blocking the reset of the PHY.
1124 * This is a function pointer entry point only called by
1125 * reset routines.
1126 **/
1127 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1128 {
1129 u32 fwsm;
1130
1131 fwsm = er32(FWSM);
1132
1133 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET;
1134 }
1135
1136 /**
1137 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1138 * @hw: pointer to the HW structure
1139 *
1140 * Assumes semaphore already acquired.
1141 *
1142 **/
1143 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1144 {
1145 u16 phy_data;
1146 u32 strap = er32(STRAP);
1147 s32 ret_val = 0;
1148
1149 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1150
1151 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1152 if (ret_val)
1153 return ret_val;
1154
1155 phy_data &= ~HV_SMB_ADDR_MASK;
1156 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1157 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1158
1159 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1160 }
1161
1162 /**
1163 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1164 * @hw: pointer to the HW structure
1165 *
1166 * SW should configure the LCD from the NVM extended configuration region
1167 * as a workaround for certain parts.
1168 **/
1169 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1170 {
1171 struct e1000_phy_info *phy = &hw->phy;
1172 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1173 s32 ret_val = 0;
1174 u16 word_addr, reg_data, reg_addr, phy_page = 0;
1175
1176 /*
1177 * Initialize the PHY from the NVM on ICH platforms. This
1178 * is needed due to an issue where the NVM configuration is
1179 * not properly autoloaded after power transitions.
1180 * Therefore, after each PHY reset, we will load the
1181 * configuration data out of the NVM manually.
1182 */
1183 switch (hw->mac.type) {
1184 case e1000_ich8lan:
1185 if (phy->type != e1000_phy_igp_3)
1186 return ret_val;
1187
1188 if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
1189 (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
1190 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1191 break;
1192 }
1193 /* Fall-thru */
1194 case e1000_pchlan:
1195 case e1000_pch2lan:
1196 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1197 break;
1198 default:
1199 return ret_val;
1200 }
1201
1202 ret_val = hw->phy.ops.acquire(hw);
1203 if (ret_val)
1204 return ret_val;
1205
1206 data = er32(FEXTNVM);
1207 if (!(data & sw_cfg_mask))
1208 goto release;
1209
1210 /*
1211 * Make sure HW does not configure LCD from PHY
1212 * extended configuration before SW configuration
1213 */
1214 data = er32(EXTCNF_CTRL);
1215 if (!(hw->mac.type == e1000_pch2lan)) {
1216 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
1217 goto release;
1218 }
1219
1220 cnf_size = er32(EXTCNF_SIZE);
1221 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1222 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1223 if (!cnf_size)
1224 goto release;
1225
1226 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1227 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1228
1229 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
1230 (hw->mac.type == e1000_pchlan)) ||
1231 (hw->mac.type == e1000_pch2lan)) {
1232 /*
1233 * HW configures the SMBus address and LEDs when the
1234 * OEM and LCD Write Enable bits are set in the NVM.
1235 * When both NVM bits are cleared, SW will configure
1236 * them instead.
1237 */
1238 ret_val = e1000_write_smbus_addr(hw);
1239 if (ret_val)
1240 goto release;
1241
1242 data = er32(LEDCTL);
1243 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1244 (u16)data);
1245 if (ret_val)
1246 goto release;
1247 }
1248
1249 /* Configure LCD from extended configuration region. */
1250
1251 /* cnf_base_addr is in DWORD */
1252 word_addr = (u16)(cnf_base_addr << 1);
1253
1254 for (i = 0; i < cnf_size; i++) {
1255 ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
1256 &reg_data);
1257 if (ret_val)
1258 goto release;
1259
1260 ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
1261 1, &reg_addr);
1262 if (ret_val)
1263 goto release;
1264
1265 /* Save off the PHY page for future writes. */
1266 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1267 phy_page = reg_data;
1268 continue;
1269 }
1270
1271 reg_addr &= PHY_REG_MASK;
1272 reg_addr |= phy_page;
1273
1274 ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
1275 if (ret_val)
1276 goto release;
1277 }
1278
1279 release:
1280 hw->phy.ops.release(hw);
1281 return ret_val;
1282 }
1283
1284 /**
1285 * e1000_k1_gig_workaround_hv - K1 Si workaround
1286 * @hw: pointer to the HW structure
1287 * @link: link up bool flag
1288 *
1289 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1290 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
1291 * If link is down, the function will restore the default K1 setting located
1292 * in the NVM.
1293 **/
1294 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1295 {
1296 s32 ret_val = 0;
1297 u16 status_reg = 0;
1298 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1299
1300 if (hw->mac.type != e1000_pchlan)
1301 return 0;
1302
1303 /* Wrap the whole flow with the sw flag */
1304 ret_val = hw->phy.ops.acquire(hw);
1305 if (ret_val)
1306 return ret_val;
1307
1308 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1309 if (link) {
1310 if (hw->phy.type == e1000_phy_82578) {
1311 ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
1312 &status_reg);
1313 if (ret_val)
1314 goto release;
1315
1316 status_reg &= BM_CS_STATUS_LINK_UP |
1317 BM_CS_STATUS_RESOLVED |
1318 BM_CS_STATUS_SPEED_MASK;
1319
1320 if (status_reg == (BM_CS_STATUS_LINK_UP |
1321 BM_CS_STATUS_RESOLVED |
1322 BM_CS_STATUS_SPEED_1000))
1323 k1_enable = false;
1324 }
1325
1326 if (hw->phy.type == e1000_phy_82577) {
1327 ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
1328 if (ret_val)
1329 goto release;
1330
1331 status_reg &= HV_M_STATUS_LINK_UP |
1332 HV_M_STATUS_AUTONEG_COMPLETE |
1333 HV_M_STATUS_SPEED_MASK;
1334
1335 if (status_reg == (HV_M_STATUS_LINK_UP |
1336 HV_M_STATUS_AUTONEG_COMPLETE |
1337 HV_M_STATUS_SPEED_1000))
1338 k1_enable = false;
1339 }
1340
1341 /* Link stall fix for link up */
1342 ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
1343 if (ret_val)
1344 goto release;
1345
1346 } else {
1347 /* Link stall fix for link down */
1348 ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
1349 if (ret_val)
1350 goto release;
1351 }
1352
1353 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1354
1355 release:
1356 hw->phy.ops.release(hw);
1357
1358 return ret_val;
1359 }
1360
1361 /**
1362 * e1000_configure_k1_ich8lan - Configure K1 power state
1363 * @hw: pointer to the HW structure
1364 * @enable: K1 state to configure
1365 *
1366 * Configure the K1 power state based on the provided parameter.
1367 * Assumes semaphore already acquired.
1368 *
1369 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1370 **/
1371 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1372 {
1373 s32 ret_val = 0;
1374 u32 ctrl_reg = 0;
1375 u32 ctrl_ext = 0;
1376 u32 reg = 0;
1377 u16 kmrn_reg = 0;
1378
1379 ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1380 &kmrn_reg);
1381 if (ret_val)
1382 return ret_val;
1383
1384 if (k1_enable)
1385 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1386 else
1387 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1388
1389 ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1390 kmrn_reg);
1391 if (ret_val)
1392 return ret_val;
1393
1394 udelay(20);
1395 ctrl_ext = er32(CTRL_EXT);
1396 ctrl_reg = er32(CTRL);
1397
1398 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1399 reg |= E1000_CTRL_FRCSPD;
1400 ew32(CTRL, reg);
1401
1402 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1403 e1e_flush();
1404 udelay(20);
1405 ew32(CTRL, ctrl_reg);
1406 ew32(CTRL_EXT, ctrl_ext);
1407 e1e_flush();
1408 udelay(20);
1409
1410 return 0;
1411 }
1412
1413 /**
1414 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1415 * @hw: pointer to the HW structure
1416 * @d0_state: boolean if entering d0 or d3 device state
1417 *
1418 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1419 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1420 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1421 **/
1422 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1423 {
1424 s32 ret_val = 0;
1425 u32 mac_reg;
1426 u16 oem_reg;
1427
1428 if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
1429 return ret_val;
1430
1431 ret_val = hw->phy.ops.acquire(hw);
1432 if (ret_val)
1433 return ret_val;
1434
1435 if (!(hw->mac.type == e1000_pch2lan)) {
1436 mac_reg = er32(EXTCNF_CTRL);
1437 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1438 goto release;
1439 }
1440
1441 mac_reg = er32(FEXTNVM);
1442 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1443 goto release;
1444
1445 mac_reg = er32(PHY_CTRL);
1446
1447 ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
1448 if (ret_val)
1449 goto release;
1450
1451 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1452
1453 if (d0_state) {
1454 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1455 oem_reg |= HV_OEM_BITS_GBE_DIS;
1456
1457 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1458 oem_reg |= HV_OEM_BITS_LPLU;
1459 } else {
1460 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
1461 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
1462 oem_reg |= HV_OEM_BITS_GBE_DIS;
1463
1464 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
1465 E1000_PHY_CTRL_NOND0A_LPLU))
1466 oem_reg |= HV_OEM_BITS_LPLU;
1467 }
1468
1469 /* Set Restart auto-neg to activate the bits */
1470 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
1471 !hw->phy.ops.check_reset_block(hw))
1472 oem_reg |= HV_OEM_BITS_RESTART_AN;
1473
1474 ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
1475
1476 release:
1477 hw->phy.ops.release(hw);
1478
1479 return ret_val;
1480 }
1481
1482
1483 /**
1484 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1485 * @hw: pointer to the HW structure
1486 **/
1487 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1488 {
1489 s32 ret_val;
1490 u16 data;
1491
1492 ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
1493 if (ret_val)
1494 return ret_val;
1495
1496 data |= HV_KMRN_MDIO_SLOW;
1497
1498 ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
1499
1500 return ret_val;
1501 }
1502
1503 /**
1504 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1505 * done after every PHY reset.
1506 **/
1507 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1508 {
1509 s32 ret_val = 0;
1510 u16 phy_data;
1511
1512 if (hw->mac.type != e1000_pchlan)
1513 return 0;
1514
1515 /* Set MDIO slow mode before any other MDIO access */
1516 if (hw->phy.type == e1000_phy_82577) {
1517 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1518 if (ret_val)
1519 return ret_val;
1520 }
1521
1522 if (((hw->phy.type == e1000_phy_82577) &&
1523 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1524 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1525 /* Disable generation of early preamble */
1526 ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
1527 if (ret_val)
1528 return ret_val;
1529
1530 /* Preamble tuning for SSC */
1531 ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
1532 if (ret_val)
1533 return ret_val;
1534 }
1535
1536 if (hw->phy.type == e1000_phy_82578) {
1537 /*
1538 * Return registers to default by doing a soft reset then
1539 * writing 0x3140 to the control register.
1540 */
1541 if (hw->phy.revision < 2) {
1542 e1000e_phy_sw_reset(hw);
1543 ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140);
1544 }
1545 }
1546
1547 /* Select page 0 */
1548 ret_val = hw->phy.ops.acquire(hw);
1549 if (ret_val)
1550 return ret_val;
1551
1552 hw->phy.addr = 1;
1553 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1554 hw->phy.ops.release(hw);
1555 if (ret_val)
1556 return ret_val;
1557
1558 /*
1559 * Configure the K1 Si workaround during phy reset assuming there is
1560 * link so that it disables K1 if link is in 1Gbps.
1561 */
1562 ret_val = e1000_k1_gig_workaround_hv(hw, true);
1563 if (ret_val)
1564 return ret_val;
1565
1566 /* Workaround for link disconnects on a busy hub in half duplex */
1567 ret_val = hw->phy.ops.acquire(hw);
1568 if (ret_val)
1569 return ret_val;
1570 ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
1571 if (ret_val)
1572 goto release;
1573 ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
1574 release:
1575 hw->phy.ops.release(hw);
1576
1577 return ret_val;
1578 }
1579
1580 /**
1581 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1582 * @hw: pointer to the HW structure
1583 **/
1584 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1585 {
1586 u32 mac_reg;
1587 u16 i, phy_reg = 0;
1588 s32 ret_val;
1589
1590 ret_val = hw->phy.ops.acquire(hw);
1591 if (ret_val)
1592 return;
1593 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1594 if (ret_val)
1595 goto release;
1596
1597 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1598 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1599 mac_reg = er32(RAL(i));
1600 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
1601 (u16)(mac_reg & 0xFFFF));
1602 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
1603 (u16)((mac_reg >> 16) & 0xFFFF));
1604
1605 mac_reg = er32(RAH(i));
1606 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
1607 (u16)(mac_reg & 0xFFFF));
1608 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
1609 (u16)((mac_reg & E1000_RAH_AV)
1610 >> 16));
1611 }
1612
1613 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1614
1615 release:
1616 hw->phy.ops.release(hw);
1617 }
1618
1619 /**
1620 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1621 * with 82579 PHY
1622 * @hw: pointer to the HW structure
1623 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
1624 **/
1625 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1626 {
1627 s32 ret_val = 0;
1628 u16 phy_reg, data;
1629 u32 mac_reg;
1630 u16 i;
1631
1632 if (hw->mac.type != e1000_pch2lan)
1633 return 0;
1634
1635 /* disable Rx path while enabling/disabling workaround */
1636 e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
1637 ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
1638 if (ret_val)
1639 return ret_val;
1640
1641 if (enable) {
1642 /*
1643 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1644 * SHRAL/H) and initial CRC values to the MAC
1645 */
1646 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1647 u8 mac_addr[ETH_ALEN] = {0};
1648 u32 addr_high, addr_low;
1649
1650 addr_high = er32(RAH(i));
1651 if (!(addr_high & E1000_RAH_AV))
1652 continue;
1653 addr_low = er32(RAL(i));
1654 mac_addr[0] = (addr_low & 0xFF);
1655 mac_addr[1] = ((addr_low >> 8) & 0xFF);
1656 mac_addr[2] = ((addr_low >> 16) & 0xFF);
1657 mac_addr[3] = ((addr_low >> 24) & 0xFF);
1658 mac_addr[4] = (addr_high & 0xFF);
1659 mac_addr[5] = ((addr_high >> 8) & 0xFF);
1660
1661 ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
1662 }
1663
1664 /* Write Rx addresses to the PHY */
1665 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1666
1667 /* Enable jumbo frame workaround in the MAC */
1668 mac_reg = er32(FFLT_DBG);
1669 mac_reg &= ~(1 << 14);
1670 mac_reg |= (7 << 15);
1671 ew32(FFLT_DBG, mac_reg);
1672
1673 mac_reg = er32(RCTL);
1674 mac_reg |= E1000_RCTL_SECRC;
1675 ew32(RCTL, mac_reg);
1676
1677 ret_val = e1000e_read_kmrn_reg(hw,
1678 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1679 &data);
1680 if (ret_val)
1681 return ret_val;
1682 ret_val = e1000e_write_kmrn_reg(hw,
1683 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1684 data | (1 << 0));
1685 if (ret_val)
1686 return ret_val;
1687 ret_val = e1000e_read_kmrn_reg(hw,
1688 E1000_KMRNCTRLSTA_HD_CTRL,
1689 &data);
1690 if (ret_val)
1691 return ret_val;
1692 data &= ~(0xF << 8);
1693 data |= (0xB << 8);
1694 ret_val = e1000e_write_kmrn_reg(hw,
1695 E1000_KMRNCTRLSTA_HD_CTRL,
1696 data);
1697 if (ret_val)
1698 return ret_val;
1699
1700 /* Enable jumbo frame workaround in the PHY */
1701 e1e_rphy(hw, PHY_REG(769, 23), &data);
1702 data &= ~(0x7F << 5);
1703 data |= (0x37 << 5);
1704 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1705 if (ret_val)
1706 return ret_val;
1707 e1e_rphy(hw, PHY_REG(769, 16), &data);
1708 data &= ~(1 << 13);
1709 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1710 if (ret_val)
1711 return ret_val;
1712 e1e_rphy(hw, PHY_REG(776, 20), &data);
1713 data &= ~(0x3FF << 2);
1714 data |= (0x1A << 2);
1715 ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
1716 if (ret_val)
1717 return ret_val;
1718 ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100);
1719 if (ret_val)
1720 return ret_val;
1721 e1e_rphy(hw, HV_PM_CTRL, &data);
1722 ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
1723 if (ret_val)
1724 return ret_val;
1725 } else {
1726 /* Write MAC register values back to h/w defaults */
1727 mac_reg = er32(FFLT_DBG);
1728 mac_reg &= ~(0xF << 14);
1729 ew32(FFLT_DBG, mac_reg);
1730
1731 mac_reg = er32(RCTL);
1732 mac_reg &= ~E1000_RCTL_SECRC;
1733 ew32(RCTL, mac_reg);
1734
1735 ret_val = e1000e_read_kmrn_reg(hw,
1736 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1737 &data);
1738 if (ret_val)
1739 return ret_val;
1740 ret_val = e1000e_write_kmrn_reg(hw,
1741 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1742 data & ~(1 << 0));
1743 if (ret_val)
1744 return ret_val;
1745 ret_val = e1000e_read_kmrn_reg(hw,
1746 E1000_KMRNCTRLSTA_HD_CTRL,
1747 &data);
1748 if (ret_val)
1749 return ret_val;
1750 data &= ~(0xF << 8);
1751 data |= (0xB << 8);
1752 ret_val = e1000e_write_kmrn_reg(hw,
1753 E1000_KMRNCTRLSTA_HD_CTRL,
1754 data);
1755 if (ret_val)
1756 return ret_val;
1757
1758 /* Write PHY register values back to h/w defaults */
1759 e1e_rphy(hw, PHY_REG(769, 23), &data);
1760 data &= ~(0x7F << 5);
1761 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1762 if (ret_val)
1763 return ret_val;
1764 e1e_rphy(hw, PHY_REG(769, 16), &data);
1765 data |= (1 << 13);
1766 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1767 if (ret_val)
1768 return ret_val;
1769 e1e_rphy(hw, PHY_REG(776, 20), &data);
1770 data &= ~(0x3FF << 2);
1771 data |= (0x8 << 2);
1772 ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
1773 if (ret_val)
1774 return ret_val;
1775 ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
1776 if (ret_val)
1777 return ret_val;
1778 e1e_rphy(hw, HV_PM_CTRL, &data);
1779 ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
1780 if (ret_val)
1781 return ret_val;
1782 }
1783
1784 /* re-enable Rx path after enabling/disabling workaround */
1785 return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
1786 }
1787
1788 /**
1789 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1790 * done after every PHY reset.
1791 **/
1792 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1793 {
1794 s32 ret_val = 0;
1795
1796 if (hw->mac.type != e1000_pch2lan)
1797 return 0;
1798
1799 /* Set MDIO slow mode before any other MDIO access */
1800 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1801
1802 ret_val = hw->phy.ops.acquire(hw);
1803 if (ret_val)
1804 return ret_val;
1805 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_THRESHOLD);
1806 if (ret_val)
1807 goto release;
1808 /* set MSE higher to enable link to stay up when noise is high */
1809 ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0034);
1810 if (ret_val)
1811 goto release;
1812 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_LINK_DOWN);
1813 if (ret_val)
1814 goto release;
1815 /* drop link after 5 times MSE threshold was reached */
1816 ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0005);
1817 release:
1818 hw->phy.ops.release(hw);
1819
1820 return ret_val;
1821 }
1822
1823 /**
1824 * e1000_k1_gig_workaround_lv - K1 Si workaround
1825 * @hw: pointer to the HW structure
1826 *
1827 * Workaround to set the K1 beacon duration for 82579 parts
1828 **/
1829 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1830 {
1831 s32 ret_val = 0;
1832 u16 status_reg = 0;
1833 u32 mac_reg;
1834 u16 phy_reg;
1835
1836 if (hw->mac.type != e1000_pch2lan)
1837 return 0;
1838
1839 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
1840 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
1841 if (ret_val)
1842 return ret_val;
1843
1844 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1845 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1846 mac_reg = er32(FEXTNVM4);
1847 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1848
1849 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
1850 if (ret_val)
1851 return ret_val;
1852
1853 if (status_reg & HV_M_STATUS_SPEED_1000) {
1854 u16 pm_phy_reg;
1855
1856 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1857 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1858 /* LV 1G Packet drop issue wa */
1859 ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
1860 if (ret_val)
1861 return ret_val;
1862 pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
1863 ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
1864 if (ret_val)
1865 return ret_val;
1866 } else {
1867 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1868 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1869 }
1870 ew32(FEXTNVM4, mac_reg);
1871 ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
1872 }
1873
1874 return ret_val;
1875 }
1876
1877 /**
1878 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1879 * @hw: pointer to the HW structure
1880 * @gate: boolean set to true to gate, false to ungate
1881 *
1882 * Gate/ungate the automatic PHY configuration via hardware; perform
1883 * the configuration via software instead.
1884 **/
1885 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1886 {
1887 u32 extcnf_ctrl;
1888
1889 if (hw->mac.type != e1000_pch2lan)
1890 return;
1891
1892 extcnf_ctrl = er32(EXTCNF_CTRL);
1893
1894 if (gate)
1895 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1896 else
1897 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1898
1899 ew32(EXTCNF_CTRL, extcnf_ctrl);
1900 }
1901
1902 /**
1903 * e1000_lan_init_done_ich8lan - Check for PHY config completion
1904 * @hw: pointer to the HW structure
1905 *
1906 * Check the appropriate indication the MAC has finished configuring the
1907 * PHY after a software reset.
1908 **/
1909 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1910 {
1911 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
1912
1913 /* Wait for basic configuration completes before proceeding */
1914 do {
1915 data = er32(STATUS);
1916 data &= E1000_STATUS_LAN_INIT_DONE;
1917 udelay(100);
1918 } while ((!data) && --loop);
1919
1920 /*
1921 * If basic configuration is incomplete before the above loop
1922 * count reaches 0, loading the configuration from NVM will
1923 * leave the PHY in a bad state possibly resulting in no link.
1924 */
1925 if (loop == 0)
1926 e_dbg("LAN_INIT_DONE not set, increase timeout\n");
1927
1928 /* Clear the Init Done bit for the next init event */
1929 data = er32(STATUS);
1930 data &= ~E1000_STATUS_LAN_INIT_DONE;
1931 ew32(STATUS, data);
1932 }
1933
1934 /**
1935 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
1936 * @hw: pointer to the HW structure
1937 **/
1938 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1939 {
1940 s32 ret_val = 0;
1941 u16 reg;
1942
1943 if (hw->phy.ops.check_reset_block(hw))
1944 return 0;
1945
1946 /* Allow time for h/w to get to quiescent state after reset */
1947 usleep_range(10000, 20000);
1948
1949 /* Perform any necessary post-reset workarounds */
1950 switch (hw->mac.type) {
1951 case e1000_pchlan:
1952 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1953 if (ret_val)
1954 return ret_val;
1955 break;
1956 case e1000_pch2lan:
1957 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
1958 if (ret_val)
1959 return ret_val;
1960 break;
1961 default:
1962 break;
1963 }
1964
1965 /* Clear the host wakeup bit after lcd reset */
1966 if (hw->mac.type >= e1000_pchlan) {
1967 e1e_rphy(hw, BM_PORT_GEN_CFG, &reg);
1968 reg &= ~BM_WUC_HOST_WU_BIT;
1969 e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
1970 }
1971
1972 /* Configure the LCD with the extended configuration region in NVM */
1973 ret_val = e1000_sw_lcd_config_ich8lan(hw);
1974 if (ret_val)
1975 return ret_val;
1976
1977 /* Configure the LCD with the OEM bits in NVM */
1978 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1979
1980 if (hw->mac.type == e1000_pch2lan) {
1981 /* Ungate automatic PHY configuration on non-managed 82579 */
1982 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
1983 usleep_range(10000, 20000);
1984 e1000_gate_hw_phy_config_ich8lan(hw, false);
1985 }
1986
1987 /* Set EEE LPI Update Timer to 200usec */
1988 ret_val = hw->phy.ops.acquire(hw);
1989 if (ret_val)
1990 return ret_val;
1991 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
1992 I82579_LPI_UPDATE_TIMER);
1993 if (!ret_val)
1994 ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x1387);
1995 hw->phy.ops.release(hw);
1996 }
1997
1998 return ret_val;
1999 }
2000
2001 /**
2002 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2003 * @hw: pointer to the HW structure
2004 *
2005 * Resets the PHY
2006 * This is a function pointer entry point called by drivers
2007 * or other shared routines.
2008 **/
2009 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2010 {
2011 s32 ret_val = 0;
2012
2013 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2014 if ((hw->mac.type == e1000_pch2lan) &&
2015 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
2016 e1000_gate_hw_phy_config_ich8lan(hw, true);
2017
2018 ret_val = e1000e_phy_hw_reset_generic(hw);
2019 if (ret_val)
2020 return ret_val;
2021
2022 return e1000_post_phy_reset_ich8lan(hw);
2023 }
2024
2025 /**
2026 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2027 * @hw: pointer to the HW structure
2028 * @active: true to enable LPLU, false to disable
2029 *
2030 * Sets the LPLU state according to the active flag. For PCH, if OEM write
2031 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2032 * the phy speed. This function will manually set the LPLU bit and restart
2033 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
2034 * since it configures the same bit.
2035 **/
2036 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2037 {
2038 s32 ret_val = 0;
2039 u16 oem_reg;
2040
2041 ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
2042 if (ret_val)
2043 return ret_val;
2044
2045 if (active)
2046 oem_reg |= HV_OEM_BITS_LPLU;
2047 else
2048 oem_reg &= ~HV_OEM_BITS_LPLU;
2049
2050 if (!hw->phy.ops.check_reset_block(hw))
2051 oem_reg |= HV_OEM_BITS_RESTART_AN;
2052
2053 return e1e_wphy(hw, HV_OEM_BITS, oem_reg);
2054 }
2055
2056 /**
2057 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2058 * @hw: pointer to the HW structure
2059 * @active: true to enable LPLU, false to disable
2060 *
2061 * Sets the LPLU D0 state according to the active flag. When
2062 * activating LPLU this function also disables smart speed
2063 * and vice versa. LPLU will not be activated unless the
2064 * device autonegotiation advertisement meets standards of
2065 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2066 * This is a function pointer entry point only called by
2067 * PHY setup routines.
2068 **/
2069 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2070 {
2071 struct e1000_phy_info *phy = &hw->phy;
2072 u32 phy_ctrl;
2073 s32 ret_val = 0;
2074 u16 data;
2075
2076 if (phy->type == e1000_phy_ife)
2077 return 0;
2078
2079 phy_ctrl = er32(PHY_CTRL);
2080
2081 if (active) {
2082 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2083 ew32(PHY_CTRL, phy_ctrl);
2084
2085 if (phy->type != e1000_phy_igp_3)
2086 return 0;
2087
2088 /*
2089 * Call gig speed drop workaround on LPLU before accessing
2090 * any PHY registers
2091 */
2092 if (hw->mac.type == e1000_ich8lan)
2093 e1000e_gig_downshift_workaround_ich8lan(hw);
2094
2095 /* When LPLU is enabled, we should disable SmartSpeed */
2096 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
2097 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2098 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
2099 if (ret_val)
2100 return ret_val;
2101 } else {
2102 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2103 ew32(PHY_CTRL, phy_ctrl);
2104
2105 if (phy->type != e1000_phy_igp_3)
2106 return 0;
2107
2108 /*
2109 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2110 * during Dx states where the power conservation is most
2111 * important. During driver activity we should enable
2112 * SmartSpeed, so performance is maintained.
2113 */
2114 if (phy->smart_speed == e1000_smart_speed_on) {
2115 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2116 &data);
2117 if (ret_val)
2118 return ret_val;
2119
2120 data |= IGP01E1000_PSCFR_SMART_SPEED;
2121 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2122 data);
2123 if (ret_val)
2124 return ret_val;
2125 } else if (phy->smart_speed == e1000_smart_speed_off) {
2126 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2127 &data);
2128 if (ret_val)
2129 return ret_val;
2130
2131 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2132 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2133 data);
2134 if (ret_val)
2135 return ret_val;
2136 }
2137 }
2138
2139 return 0;
2140 }
2141
2142 /**
2143 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2144 * @hw: pointer to the HW structure
2145 * @active: true to enable LPLU, false to disable
2146 *
2147 * Sets the LPLU D3 state according to the active flag. When
2148 * activating LPLU this function also disables smart speed
2149 * and vice versa. LPLU will not be activated unless the
2150 * device autonegotiation advertisement meets standards of
2151 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2152 * This is a function pointer entry point only called by
2153 * PHY setup routines.
2154 **/
2155 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2156 {
2157 struct e1000_phy_info *phy = &hw->phy;
2158 u32 phy_ctrl;
2159 s32 ret_val = 0;
2160 u16 data;
2161
2162 phy_ctrl = er32(PHY_CTRL);
2163
2164 if (!active) {
2165 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2166 ew32(PHY_CTRL, phy_ctrl);
2167
2168 if (phy->type != e1000_phy_igp_3)
2169 return 0;
2170
2171 /*
2172 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2173 * during Dx states where the power conservation is most
2174 * important. During driver activity we should enable
2175 * SmartSpeed, so performance is maintained.
2176 */
2177 if (phy->smart_speed == e1000_smart_speed_on) {
2178 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2179 &data);
2180 if (ret_val)
2181 return ret_val;
2182
2183 data |= IGP01E1000_PSCFR_SMART_SPEED;
2184 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2185 data);
2186 if (ret_val)
2187 return ret_val;
2188 } else if (phy->smart_speed == e1000_smart_speed_off) {
2189 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2190 &data);
2191 if (ret_val)
2192 return ret_val;
2193
2194 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2195 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2196 data);
2197 if (ret_val)
2198 return ret_val;
2199 }
2200 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2201 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2202 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2203 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2204 ew32(PHY_CTRL, phy_ctrl);
2205
2206 if (phy->type != e1000_phy_igp_3)
2207 return 0;
2208
2209 /*
2210 * Call gig speed drop workaround on LPLU before accessing
2211 * any PHY registers
2212 */
2213 if (hw->mac.type == e1000_ich8lan)
2214 e1000e_gig_downshift_workaround_ich8lan(hw);
2215
2216 /* When LPLU is enabled, we should disable SmartSpeed */
2217 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
2218 if (ret_val)
2219 return ret_val;
2220
2221 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2222 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
2223 }
2224
2225 return ret_val;
2226 }
2227
2228 /**
2229 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2230 * @hw: pointer to the HW structure
2231 * @bank: pointer to the variable that returns the active bank
2232 *
2233 * Reads signature byte from the NVM using the flash access registers.
2234 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2235 **/
2236 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2237 {
2238 u32 eecd;
2239 struct e1000_nvm_info *nvm = &hw->nvm;
2240 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2241 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2242 u8 sig_byte = 0;
2243 s32 ret_val;
2244
2245 switch (hw->mac.type) {
2246 case e1000_ich8lan:
2247 case e1000_ich9lan:
2248 eecd = er32(EECD);
2249 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2250 E1000_EECD_SEC1VAL_VALID_MASK) {
2251 if (eecd & E1000_EECD_SEC1VAL)
2252 *bank = 1;
2253 else
2254 *bank = 0;
2255
2256 return 0;
2257 }
2258 e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n");
2259 /* fall-thru */
2260 default:
2261 /* set bank to 0 in case flash read fails */
2262 *bank = 0;
2263
2264 /* Check bank 0 */
2265 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2266 &sig_byte);
2267 if (ret_val)
2268 return ret_val;
2269 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2270 E1000_ICH_NVM_SIG_VALUE) {
2271 *bank = 0;
2272 return 0;
2273 }
2274
2275 /* Check bank 1 */
2276 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2277 bank1_offset,
2278 &sig_byte);
2279 if (ret_val)
2280 return ret_val;
2281 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2282 E1000_ICH_NVM_SIG_VALUE) {
2283 *bank = 1;
2284 return 0;
2285 }
2286
2287 e_dbg("ERROR: No valid NVM bank present\n");
2288 return -E1000_ERR_NVM;
2289 }
2290 }
2291
2292 /**
2293 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
2294 * @hw: pointer to the HW structure
2295 * @offset: The offset (in bytes) of the word(s) to read.
2296 * @words: Size of data to read in words
2297 * @data: Pointer to the word(s) to read at offset.
2298 *
2299 * Reads a word(s) from the NVM using the flash access registers.
2300 **/
2301 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2302 u16 *data)
2303 {
2304 struct e1000_nvm_info *nvm = &hw->nvm;
2305 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2306 u32 act_offset;
2307 s32 ret_val = 0;
2308 u32 bank = 0;
2309 u16 i, word;
2310
2311 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2312 (words == 0)) {
2313 e_dbg("nvm parameter(s) out of bounds\n");
2314 ret_val = -E1000_ERR_NVM;
2315 goto out;
2316 }
2317
2318 nvm->ops.acquire(hw);
2319
2320 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2321 if (ret_val) {
2322 e_dbg("Could not detect valid bank, assuming bank 0\n");
2323 bank = 0;
2324 }
2325
2326 act_offset = (bank) ? nvm->flash_bank_size : 0;
2327 act_offset += offset;
2328
2329 ret_val = 0;
2330 for (i = 0; i < words; i++) {
2331 if (dev_spec->shadow_ram[offset+i].modified) {
2332 data[i] = dev_spec->shadow_ram[offset+i].value;
2333 } else {
2334 ret_val = e1000_read_flash_word_ich8lan(hw,
2335 act_offset + i,
2336 &word);
2337 if (ret_val)
2338 break;
2339 data[i] = word;
2340 }
2341 }
2342
2343 nvm->ops.release(hw);
2344
2345 out:
2346 if (ret_val)
2347 e_dbg("NVM read error: %d\n", ret_val);
2348
2349 return ret_val;
2350 }
2351
2352 /**
2353 * e1000_flash_cycle_init_ich8lan - Initialize flash
2354 * @hw: pointer to the HW structure
2355 *
2356 * This function does initial flash setup so that a new read/write/erase cycle
2357 * can be started.
2358 **/
2359 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2360 {
2361 union ich8_hws_flash_status hsfsts;
2362 s32 ret_val = -E1000_ERR_NVM;
2363
2364 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2365
2366 /* Check if the flash descriptor is valid */
2367 if (!hsfsts.hsf_status.fldesvalid) {
2368 e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n");
2369 return -E1000_ERR_NVM;
2370 }
2371
2372 /* Clear FCERR and DAEL in hw status by writing 1 */
2373 hsfsts.hsf_status.flcerr = 1;
2374 hsfsts.hsf_status.dael = 1;
2375
2376 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2377
2378 /*
2379 * Either we should have a hardware SPI cycle in progress
2380 * bit to check against, in order to start a new cycle or
2381 * FDONE bit should be changed in the hardware so that it
2382 * is 1 after hardware reset, which can then be used as an
2383 * indication whether a cycle is in progress or has been
2384 * completed.
2385 */
2386
2387 if (!hsfsts.hsf_status.flcinprog) {
2388 /*
2389 * There is no cycle running at present,
2390 * so we can start a cycle.
2391 * Begin by setting Flash Cycle Done.
2392 */
2393 hsfsts.hsf_status.flcdone = 1;
2394 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2395 ret_val = 0;
2396 } else {
2397 s32 i;
2398
2399 /*
2400 * Otherwise poll for sometime so the current
2401 * cycle has a chance to end before giving up.
2402 */
2403 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2404 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2405 if (!hsfsts.hsf_status.flcinprog) {
2406 ret_val = 0;
2407 break;
2408 }
2409 udelay(1);
2410 }
2411 if (!ret_val) {
2412 /*
2413 * Successful in waiting for previous cycle to timeout,
2414 * now set the Flash Cycle Done.
2415 */
2416 hsfsts.hsf_status.flcdone = 1;
2417 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2418 } else {
2419 e_dbg("Flash controller busy, cannot get access\n");
2420 }
2421 }
2422
2423 return ret_val;
2424 }
2425
2426 /**
2427 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2428 * @hw: pointer to the HW structure
2429 * @timeout: maximum time to wait for completion
2430 *
2431 * This function starts a flash cycle and waits for its completion.
2432 **/
2433 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2434 {
2435 union ich8_hws_flash_ctrl hsflctl;
2436 union ich8_hws_flash_status hsfsts;
2437 u32 i = 0;
2438
2439 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2440 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2441 hsflctl.hsf_ctrl.flcgo = 1;
2442 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2443
2444 /* wait till FDONE bit is set to 1 */
2445 do {
2446 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2447 if (hsfsts.hsf_status.flcdone)
2448 break;
2449 udelay(1);
2450 } while (i++ < timeout);
2451
2452 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
2453 return 0;
2454
2455 return -E1000_ERR_NVM;
2456 }
2457
2458 /**
2459 * e1000_read_flash_word_ich8lan - Read word from flash
2460 * @hw: pointer to the HW structure
2461 * @offset: offset to data location
2462 * @data: pointer to the location for storing the data
2463 *
2464 * Reads the flash word at offset into data. Offset is converted
2465 * to bytes before read.
2466 **/
2467 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2468 u16 *data)
2469 {
2470 /* Must convert offset into bytes. */
2471 offset <<= 1;
2472
2473 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2474 }
2475
2476 /**
2477 * e1000_read_flash_byte_ich8lan - Read byte from flash
2478 * @hw: pointer to the HW structure
2479 * @offset: The offset of the byte to read.
2480 * @data: Pointer to a byte to store the value read.
2481 *
2482 * Reads a single byte from the NVM using the flash access registers.
2483 **/
2484 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2485 u8 *data)
2486 {
2487 s32 ret_val;
2488 u16 word = 0;
2489
2490 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2491 if (ret_val)
2492 return ret_val;
2493
2494 *data = (u8)word;
2495
2496 return 0;
2497 }
2498
2499 /**
2500 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
2501 * @hw: pointer to the HW structure
2502 * @offset: The offset (in bytes) of the byte or word to read.
2503 * @size: Size of data to read, 1=byte 2=word
2504 * @data: Pointer to the word to store the value read.
2505 *
2506 * Reads a byte or word from the NVM using the flash access registers.
2507 **/
2508 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2509 u8 size, u16 *data)
2510 {
2511 union ich8_hws_flash_status hsfsts;
2512 union ich8_hws_flash_ctrl hsflctl;
2513 u32 flash_linear_addr;
2514 u32 flash_data = 0;
2515 s32 ret_val = -E1000_ERR_NVM;
2516 u8 count = 0;
2517
2518 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2519 return -E1000_ERR_NVM;
2520
2521 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2522 hw->nvm.flash_base_addr;
2523
2524 do {
2525 udelay(1);
2526 /* Steps */
2527 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2528 if (ret_val)
2529 break;
2530
2531 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2532 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2533 hsflctl.hsf_ctrl.fldbcount = size - 1;
2534 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2535 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2536
2537 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
2538
2539 ret_val = e1000_flash_cycle_ich8lan(hw,
2540 ICH_FLASH_READ_COMMAND_TIMEOUT);
2541
2542 /*
2543 * Check if FCERR is set to 1, if set to 1, clear it
2544 * and try the whole sequence a few more times, else
2545 * read in (shift in) the Flash Data0, the order is
2546 * least significant byte first msb to lsb
2547 */
2548 if (!ret_val) {
2549 flash_data = er32flash(ICH_FLASH_FDATA0);
2550 if (size == 1)
2551 *data = (u8)(flash_data & 0x000000FF);
2552 else if (size == 2)
2553 *data = (u16)(flash_data & 0x0000FFFF);
2554 break;
2555 } else {
2556 /*
2557 * If we've gotten here, then things are probably
2558 * completely hosed, but if the error condition is
2559 * detected, it won't hurt to give it another try...
2560 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2561 */
2562 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2563 if (hsfsts.hsf_status.flcerr) {
2564 /* Repeat for some time before giving up. */
2565 continue;
2566 } else if (!hsfsts.hsf_status.flcdone) {
2567 e_dbg("Timeout error - flash cycle did not complete.\n");
2568 break;
2569 }
2570 }
2571 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2572
2573 return ret_val;
2574 }
2575
2576 /**
2577 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
2578 * @hw: pointer to the HW structure
2579 * @offset: The offset (in bytes) of the word(s) to write.
2580 * @words: Size of data to write in words
2581 * @data: Pointer to the word(s) to write at offset.
2582 *
2583 * Writes a byte or word to the NVM using the flash access registers.
2584 **/
2585 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2586 u16 *data)
2587 {
2588 struct e1000_nvm_info *nvm = &hw->nvm;
2589 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2590 u16 i;
2591
2592 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2593 (words == 0)) {
2594 e_dbg("nvm parameter(s) out of bounds\n");
2595 return -E1000_ERR_NVM;
2596 }
2597
2598 nvm->ops.acquire(hw);
2599
2600 for (i = 0; i < words; i++) {
2601 dev_spec->shadow_ram[offset+i].modified = true;
2602 dev_spec->shadow_ram[offset+i].value = data[i];
2603 }
2604
2605 nvm->ops.release(hw);
2606
2607 return 0;
2608 }
2609
2610 /**
2611 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2612 * @hw: pointer to the HW structure
2613 *
2614 * The NVM checksum is updated by calling the generic update_nvm_checksum,
2615 * which writes the checksum to the shadow ram. The changes in the shadow
2616 * ram are then committed to the EEPROM by processing each bank at a time
2617 * checking for the modified bit and writing only the pending changes.
2618 * After a successful commit, the shadow ram is cleared and is ready for
2619 * future writes.
2620 **/
2621 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2622 {
2623 struct e1000_nvm_info *nvm = &hw->nvm;
2624 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2625 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2626 s32 ret_val;
2627 u16 data;
2628
2629 ret_val = e1000e_update_nvm_checksum_generic(hw);
2630 if (ret_val)
2631 goto out;
2632
2633 if (nvm->type != e1000_nvm_flash_sw)
2634 goto out;
2635
2636 nvm->ops.acquire(hw);
2637
2638 /*
2639 * We're writing to the opposite bank so if we're on bank 1,
2640 * write to bank 0 etc. We also need to erase the segment that
2641 * is going to be written
2642 */
2643 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2644 if (ret_val) {
2645 e_dbg("Could not detect valid bank, assuming bank 0\n");
2646 bank = 0;
2647 }
2648
2649 if (bank == 0) {
2650 new_bank_offset = nvm->flash_bank_size;
2651 old_bank_offset = 0;
2652 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2653 if (ret_val)
2654 goto release;
2655 } else {
2656 old_bank_offset = nvm->flash_bank_size;
2657 new_bank_offset = 0;
2658 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2659 if (ret_val)
2660 goto release;
2661 }
2662
2663 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
2664 /*
2665 * Determine whether to write the value stored
2666 * in the other NVM bank or a modified value stored
2667 * in the shadow RAM
2668 */
2669 if (dev_spec->shadow_ram[i].modified) {
2670 data = dev_spec->shadow_ram[i].value;
2671 } else {
2672 ret_val = e1000_read_flash_word_ich8lan(hw, i +
2673 old_bank_offset,
2674 &data);
2675 if (ret_val)
2676 break;
2677 }
2678
2679 /*
2680 * If the word is 0x13, then make sure the signature bits
2681 * (15:14) are 11b until the commit has completed.
2682 * This will allow us to write 10b which indicates the
2683 * signature is valid. We want to do this after the write
2684 * has completed so that we don't mark the segment valid
2685 * while the write is still in progress
2686 */
2687 if (i == E1000_ICH_NVM_SIG_WORD)
2688 data |= E1000_ICH_NVM_SIG_MASK;
2689
2690 /* Convert offset to bytes. */
2691 act_offset = (i + new_bank_offset) << 1;
2692
2693 udelay(100);
2694 /* Write the bytes to the new bank. */
2695 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2696 act_offset,
2697 (u8)data);
2698 if (ret_val)
2699 break;
2700
2701 udelay(100);
2702 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2703 act_offset + 1,
2704 (u8)(data >> 8));
2705 if (ret_val)
2706 break;
2707 }
2708
2709 /*
2710 * Don't bother writing the segment valid bits if sector
2711 * programming failed.
2712 */
2713 if (ret_val) {
2714 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
2715 e_dbg("Flash commit failed.\n");
2716 goto release;
2717 }
2718
2719 /*
2720 * Finally validate the new segment by setting bit 15:14
2721 * to 10b in word 0x13 , this can be done without an
2722 * erase as well since these bits are 11 to start with
2723 * and we need to change bit 14 to 0b
2724 */
2725 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2726 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2727 if (ret_val)
2728 goto release;
2729
2730 data &= 0xBFFF;
2731 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2732 act_offset * 2 + 1,
2733 (u8)(data >> 8));
2734 if (ret_val)
2735 goto release;
2736
2737 /*
2738 * And invalidate the previously valid segment by setting
2739 * its signature word (0x13) high_byte to 0b. This can be
2740 * done without an erase because flash erase sets all bits
2741 * to 1's. We can write 1's to 0's without an erase
2742 */
2743 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2744 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2745 if (ret_val)
2746 goto release;
2747
2748 /* Great! Everything worked, we can now clear the cached entries. */
2749 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
2750 dev_spec->shadow_ram[i].modified = false;
2751 dev_spec->shadow_ram[i].value = 0xFFFF;
2752 }
2753
2754 release:
2755 nvm->ops.release(hw);
2756
2757 /*
2758 * Reload the EEPROM, or else modifications will not appear
2759 * until after the next adapter reset.
2760 */
2761 if (!ret_val) {
2762 nvm->ops.reload(hw);
2763 usleep_range(10000, 20000);
2764 }
2765
2766 out:
2767 if (ret_val)
2768 e_dbg("NVM update error: %d\n", ret_val);
2769
2770 return ret_val;
2771 }
2772
2773 /**
2774 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2775 * @hw: pointer to the HW structure
2776 *
2777 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2778 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
2779 * calculated, in which case we need to calculate the checksum and set bit 6.
2780 **/
2781 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2782 {
2783 s32 ret_val;
2784 u16 data;
2785
2786 /*
2787 * Read 0x19 and check bit 6. If this bit is 0, the checksum
2788 * needs to be fixed. This bit is an indication that the NVM
2789 * was prepared by OEM software and did not calculate the
2790 * checksum...a likely scenario.
2791 */
2792 ret_val = e1000_read_nvm(hw, 0x19, 1, &data);
2793 if (ret_val)
2794 return ret_val;
2795
2796 if (!(data & 0x40)) {
2797 data |= 0x40;
2798 ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
2799 if (ret_val)
2800 return ret_val;
2801 ret_val = e1000e_update_nvm_checksum(hw);
2802 if (ret_val)
2803 return ret_val;
2804 }
2805
2806 return e1000e_validate_nvm_checksum_generic(hw);
2807 }
2808
2809 /**
2810 * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
2811 * @hw: pointer to the HW structure
2812 *
2813 * To prevent malicious write/erase of the NVM, set it to be read-only
2814 * so that the hardware ignores all write/erase cycles of the NVM via
2815 * the flash control registers. The shadow-ram copy of the NVM will
2816 * still be updated, however any updates to this copy will not stick
2817 * across driver reloads.
2818 **/
2819 void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
2820 {
2821 struct e1000_nvm_info *nvm = &hw->nvm;
2822 union ich8_flash_protected_range pr0;
2823 union ich8_hws_flash_status hsfsts;
2824 u32 gfpreg;
2825
2826 nvm->ops.acquire(hw);
2827
2828 gfpreg = er32flash(ICH_FLASH_GFPREG);
2829
2830 /* Write-protect GbE Sector of NVM */
2831 pr0.regval = er32flash(ICH_FLASH_PR0);
2832 pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
2833 pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
2834 pr0.range.wpe = true;
2835 ew32flash(ICH_FLASH_PR0, pr0.regval);
2836
2837 /*
2838 * Lock down a subset of GbE Flash Control Registers, e.g.
2839 * PR0 to prevent the write-protection from being lifted.
2840 * Once FLOCKDN is set, the registers protected by it cannot
2841 * be written until FLOCKDN is cleared by a hardware reset.
2842 */
2843 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2844 hsfsts.hsf_status.flockdn = true;
2845 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2846
2847 nvm->ops.release(hw);
2848 }
2849
2850 /**
2851 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2852 * @hw: pointer to the HW structure
2853 * @offset: The offset (in bytes) of the byte/word to read.
2854 * @size: Size of data to read, 1=byte 2=word
2855 * @data: The byte(s) to write to the NVM.
2856 *
2857 * Writes one/two bytes to the NVM using the flash access registers.
2858 **/
2859 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2860 u8 size, u16 data)
2861 {
2862 union ich8_hws_flash_status hsfsts;
2863 union ich8_hws_flash_ctrl hsflctl;
2864 u32 flash_linear_addr;
2865 u32 flash_data = 0;
2866 s32 ret_val;
2867 u8 count = 0;
2868
2869 if (size < 1 || size > 2 || data > size * 0xff ||
2870 offset > ICH_FLASH_LINEAR_ADDR_MASK)
2871 return -E1000_ERR_NVM;
2872
2873 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2874 hw->nvm.flash_base_addr;
2875
2876 do {
2877 udelay(1);
2878 /* Steps */
2879 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2880 if (ret_val)
2881 break;
2882
2883 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2884 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2885 hsflctl.hsf_ctrl.fldbcount = size -1;
2886 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
2887 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2888
2889 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
2890
2891 if (size == 1)
2892 flash_data = (u32)data & 0x00FF;
2893 else
2894 flash_data = (u32)data;
2895
2896 ew32flash(ICH_FLASH_FDATA0, flash_data);
2897
2898 /*
2899 * check if FCERR is set to 1 , if set to 1, clear it
2900 * and try the whole sequence a few more times else done
2901 */
2902 ret_val = e1000_flash_cycle_ich8lan(hw,
2903 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
2904 if (!ret_val)
2905 break;
2906
2907 /*
2908 * If we're here, then things are most likely
2909 * completely hosed, but if the error condition
2910 * is detected, it won't hurt to give it another
2911 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2912 */
2913 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2914 if (hsfsts.hsf_status.flcerr)
2915 /* Repeat for some time before giving up. */
2916 continue;
2917 if (!hsfsts.hsf_status.flcdone) {
2918 e_dbg("Timeout error - flash cycle did not complete.\n");
2919 break;
2920 }
2921 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2922
2923 return ret_val;
2924 }
2925
2926 /**
2927 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
2928 * @hw: pointer to the HW structure
2929 * @offset: The index of the byte to read.
2930 * @data: The byte to write to the NVM.
2931 *
2932 * Writes a single byte to the NVM using the flash access registers.
2933 **/
2934 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2935 u8 data)
2936 {
2937 u16 word = (u16)data;
2938
2939 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
2940 }
2941
2942 /**
2943 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
2944 * @hw: pointer to the HW structure
2945 * @offset: The offset of the byte to write.
2946 * @byte: The byte to write to the NVM.
2947 *
2948 * Writes a single byte to the NVM using the flash access registers.
2949 * Goes through a retry algorithm before giving up.
2950 **/
2951 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
2952 u32 offset, u8 byte)
2953 {
2954 s32 ret_val;
2955 u16 program_retries;
2956
2957 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2958 if (!ret_val)
2959 return ret_val;
2960
2961 for (program_retries = 0; program_retries < 100; program_retries++) {
2962 e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
2963 udelay(100);
2964 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2965 if (!ret_val)
2966 break;
2967 }
2968 if (program_retries == 100)
2969 return -E1000_ERR_NVM;
2970
2971 return 0;
2972 }
2973
2974 /**
2975 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
2976 * @hw: pointer to the HW structure
2977 * @bank: 0 for first bank, 1 for second bank, etc.
2978 *
2979 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
2980 * bank N is 4096 * N + flash_reg_addr.
2981 **/
2982 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2983 {
2984 struct e1000_nvm_info *nvm = &hw->nvm;
2985 union ich8_hws_flash_status hsfsts;
2986 union ich8_hws_flash_ctrl hsflctl;
2987 u32 flash_linear_addr;
2988 /* bank size is in 16bit words - adjust to bytes */
2989 u32 flash_bank_size = nvm->flash_bank_size * 2;
2990 s32 ret_val;
2991 s32 count = 0;
2992 s32 j, iteration, sector_size;
2993
2994 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2995
2996 /*
2997 * Determine HW Sector size: Read BERASE bits of hw flash status
2998 * register
2999 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3000 * consecutive sectors. The start index for the nth Hw sector
3001 * can be calculated as = bank * 4096 + n * 256
3002 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3003 * The start index for the nth Hw sector can be calculated
3004 * as = bank * 4096
3005 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3006 * (ich9 only, otherwise error condition)
3007 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3008 */
3009 switch (hsfsts.hsf_status.berasesz) {
3010 case 0:
3011 /* Hw sector size 256 */
3012 sector_size = ICH_FLASH_SEG_SIZE_256;
3013 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3014 break;
3015 case 1:
3016 sector_size = ICH_FLASH_SEG_SIZE_4K;
3017 iteration = 1;
3018 break;
3019 case 2:
3020 sector_size = ICH_FLASH_SEG_SIZE_8K;
3021 iteration = 1;
3022 break;
3023 case 3:
3024 sector_size = ICH_FLASH_SEG_SIZE_64K;
3025 iteration = 1;
3026 break;
3027 default:
3028 return -E1000_ERR_NVM;
3029 }
3030
3031 /* Start with the base address, then add the sector offset. */
3032 flash_linear_addr = hw->nvm.flash_base_addr;
3033 flash_linear_addr += (bank) ? flash_bank_size : 0;
3034
3035 for (j = 0; j < iteration ; j++) {
3036 do {
3037 /* Steps */
3038 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3039 if (ret_val)
3040 return ret_val;
3041
3042 /*
3043 * Write a value 11 (block Erase) in Flash
3044 * Cycle field in hw flash control
3045 */
3046 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3047 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3048 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3049
3050 /*
3051 * Write the last 24 bits of an index within the
3052 * block into Flash Linear address field in Flash
3053 * Address.
3054 */
3055 flash_linear_addr += (j * sector_size);
3056 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3057
3058 ret_val = e1000_flash_cycle_ich8lan(hw,
3059 ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3060 if (!ret_val)
3061 break;
3062
3063 /*
3064 * Check if FCERR is set to 1. If 1,
3065 * clear it and try the whole sequence
3066 * a few more times else Done
3067 */
3068 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3069 if (hsfsts.hsf_status.flcerr)
3070 /* repeat for some time before giving up */
3071 continue;
3072 else if (!hsfsts.hsf_status.flcdone)
3073 return ret_val;
3074 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3075 }
3076
3077 return 0;
3078 }
3079
3080 /**
3081 * e1000_valid_led_default_ich8lan - Set the default LED settings
3082 * @hw: pointer to the HW structure
3083 * @data: Pointer to the LED settings
3084 *
3085 * Reads the LED default settings from the NVM to data. If the NVM LED
3086 * settings is all 0's or F's, set the LED default to a valid LED default
3087 * setting.
3088 **/
3089 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3090 {
3091 s32 ret_val;
3092
3093 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
3094 if (ret_val) {
3095 e_dbg("NVM Read Error\n");
3096 return ret_val;
3097 }
3098
3099 if (*data == ID_LED_RESERVED_0000 ||
3100 *data == ID_LED_RESERVED_FFFF)
3101 *data = ID_LED_DEFAULT_ICH8LAN;
3102
3103 return 0;
3104 }
3105
3106 /**
3107 * e1000_id_led_init_pchlan - store LED configurations
3108 * @hw: pointer to the HW structure
3109 *
3110 * PCH does not control LEDs via the LEDCTL register, rather it uses
3111 * the PHY LED configuration register.
3112 *
3113 * PCH also does not have an "always on" or "always off" mode which
3114 * complicates the ID feature. Instead of using the "on" mode to indicate
3115 * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()),
3116 * use "link_up" mode. The LEDs will still ID on request if there is no
3117 * link based on logic in e1000_led_[on|off]_pchlan().
3118 **/
3119 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3120 {
3121 struct e1000_mac_info *mac = &hw->mac;
3122 s32 ret_val;
3123 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3124 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3125 u16 data, i, temp, shift;
3126
3127 /* Get default ID LED modes */
3128 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3129 if (ret_val)
3130 return ret_val;
3131
3132 mac->ledctl_default = er32(LEDCTL);
3133 mac->ledctl_mode1 = mac->ledctl_default;
3134 mac->ledctl_mode2 = mac->ledctl_default;
3135
3136 for (i = 0; i < 4; i++) {
3137 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3138 shift = (i * 5);
3139 switch (temp) {
3140 case ID_LED_ON1_DEF2:
3141 case ID_LED_ON1_ON2:
3142 case ID_LED_ON1_OFF2:
3143 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3144 mac->ledctl_mode1 |= (ledctl_on << shift);
3145 break;
3146 case ID_LED_OFF1_DEF2:
3147 case ID_LED_OFF1_ON2:
3148 case ID_LED_OFF1_OFF2:
3149 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3150 mac->ledctl_mode1 |= (ledctl_off << shift);
3151 break;
3152 default:
3153 /* Do nothing */
3154 break;
3155 }
3156 switch (temp) {
3157 case ID_LED_DEF1_ON2:
3158 case ID_LED_ON1_ON2:
3159 case ID_LED_OFF1_ON2:
3160 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3161 mac->ledctl_mode2 |= (ledctl_on << shift);
3162 break;
3163 case ID_LED_DEF1_OFF2:
3164 case ID_LED_ON1_OFF2:
3165 case ID_LED_OFF1_OFF2:
3166 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3167 mac->ledctl_mode2 |= (ledctl_off << shift);
3168 break;
3169 default:
3170 /* Do nothing */
3171 break;
3172 }
3173 }
3174
3175 return 0;
3176 }
3177
3178 /**
3179 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3180 * @hw: pointer to the HW structure
3181 *
3182 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3183 * register, so the the bus width is hard coded.
3184 **/
3185 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3186 {
3187 struct e1000_bus_info *bus = &hw->bus;
3188 s32 ret_val;
3189
3190 ret_val = e1000e_get_bus_info_pcie(hw);
3191
3192 /*
3193 * ICH devices are "PCI Express"-ish. They have
3194 * a configuration space, but do not contain
3195 * PCI Express Capability registers, so bus width
3196 * must be hardcoded.
3197 */
3198 if (bus->width == e1000_bus_width_unknown)
3199 bus->width = e1000_bus_width_pcie_x1;
3200
3201 return ret_val;
3202 }
3203
3204 /**
3205 * e1000_reset_hw_ich8lan - Reset the hardware
3206 * @hw: pointer to the HW structure
3207 *
3208 * Does a full reset of the hardware which includes a reset of the PHY and
3209 * MAC.
3210 **/
3211 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3212 {
3213 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3214 u16 kum_cfg;
3215 u32 ctrl, reg;
3216 s32 ret_val;
3217
3218 /*
3219 * Prevent the PCI-E bus from sticking if there is no TLP connection
3220 * on the last TLP read/write transaction when MAC is reset.
3221 */
3222 ret_val = e1000e_disable_pcie_master(hw);
3223 if (ret_val)
3224 e_dbg("PCI-E Master disable polling has failed.\n");
3225
3226 e_dbg("Masking off all interrupts\n");
3227 ew32(IMC, 0xffffffff);
3228
3229 /*
3230 * Disable the Transmit and Receive units. Then delay to allow
3231 * any pending transactions to complete before we hit the MAC
3232 * with the global reset.
3233 */
3234 ew32(RCTL, 0);
3235 ew32(TCTL, E1000_TCTL_PSP);
3236 e1e_flush();
3237
3238 usleep_range(10000, 20000);
3239
3240 /* Workaround for ICH8 bit corruption issue in FIFO memory */
3241 if (hw->mac.type == e1000_ich8lan) {
3242 /* Set Tx and Rx buffer allocation to 8k apiece. */
3243 ew32(PBA, E1000_PBA_8K);
3244 /* Set Packet Buffer Size to 16k. */
3245 ew32(PBS, E1000_PBS_16K);
3246 }
3247
3248 if (hw->mac.type == e1000_pchlan) {
3249 /* Save the NVM K1 bit setting */
3250 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
3251 if (ret_val)
3252 return ret_val;
3253
3254 if (kum_cfg & E1000_NVM_K1_ENABLE)
3255 dev_spec->nvm_k1_enabled = true;
3256 else
3257 dev_spec->nvm_k1_enabled = false;
3258 }
3259
3260 ctrl = er32(CTRL);
3261
3262 if (!hw->phy.ops.check_reset_block(hw)) {
3263 /*
3264 * Full-chip reset requires MAC and PHY reset at the same
3265 * time to make sure the interface between MAC and the
3266 * external PHY is reset.
3267 */
3268 ctrl |= E1000_CTRL_PHY_RST;
3269
3270 /*
3271 * Gate automatic PHY configuration by hardware on
3272 * non-managed 82579
3273 */
3274 if ((hw->mac.type == e1000_pch2lan) &&
3275 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
3276 e1000_gate_hw_phy_config_ich8lan(hw, true);
3277 }
3278 ret_val = e1000_acquire_swflag_ich8lan(hw);
3279 e_dbg("Issuing a global reset to ich8lan\n");
3280 ew32(CTRL, (ctrl | E1000_CTRL_RST));
3281 /* cannot issue a flush here because it hangs the hardware */
3282 msleep(20);
3283
3284 /* Set Phy Config Counter to 50msec */
3285 if (hw->mac.type == e1000_pch2lan) {
3286 reg = er32(FEXTNVM3);
3287 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
3288 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
3289 ew32(FEXTNVM3, reg);
3290 }
3291
3292 if (!ret_val)
3293 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
3294
3295 if (ctrl & E1000_CTRL_PHY_RST) {
3296 ret_val = hw->phy.ops.get_cfg_done(hw);
3297 if (ret_val)
3298 return ret_val;
3299
3300 ret_val = e1000_post_phy_reset_ich8lan(hw);
3301 if (ret_val)
3302 return ret_val;
3303 }
3304
3305 /*
3306 * For PCH, this write will make sure that any noise
3307 * will be detected as a CRC error and be dropped rather than show up
3308 * as a bad packet to the DMA engine.
3309 */
3310 if (hw->mac.type == e1000_pchlan)
3311 ew32(CRC_OFFSET, 0x65656565);
3312
3313 ew32(IMC, 0xffffffff);
3314 er32(ICR);
3315
3316 reg = er32(KABGTXD);
3317 reg |= E1000_KABGTXD_BGSQLBIAS;
3318 ew32(KABGTXD, reg);
3319
3320 return 0;
3321 }
3322
3323 /**
3324 * e1000_init_hw_ich8lan - Initialize the hardware
3325 * @hw: pointer to the HW structure
3326 *
3327 * Prepares the hardware for transmit and receive by doing the following:
3328 * - initialize hardware bits
3329 * - initialize LED identification
3330 * - setup receive address registers
3331 * - setup flow control
3332 * - setup transmit descriptors
3333 * - clear statistics
3334 **/
3335 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3336 {
3337 struct e1000_mac_info *mac = &hw->mac;
3338 u32 ctrl_ext, txdctl, snoop;
3339 s32 ret_val;
3340 u16 i;
3341
3342 e1000_initialize_hw_bits_ich8lan(hw);
3343
3344 /* Initialize identification LED */
3345 ret_val = mac->ops.id_led_init(hw);
3346 if (ret_val)
3347 e_dbg("Error initializing identification LED\n");
3348 /* This is not fatal and we should not stop init due to this */
3349
3350 /* Setup the receive address. */
3351 e1000e_init_rx_addrs(hw, mac->rar_entry_count);
3352
3353 /* Zero out the Multicast HASH table */
3354 e_dbg("Zeroing the MTA\n");
3355 for (i = 0; i < mac->mta_reg_count; i++)
3356 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3357
3358 /*
3359 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3360 * the ME. Disable wakeup by clearing the host wakeup bit.
3361 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3362 */
3363 if (hw->phy.type == e1000_phy_82578) {
3364 e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
3365 i &= ~BM_WUC_HOST_WU_BIT;
3366 e1e_wphy(hw, BM_PORT_GEN_CFG, i);
3367 ret_val = e1000_phy_hw_reset_ich8lan(hw);
3368 if (ret_val)
3369 return ret_val;
3370 }
3371
3372 /* Setup link and flow control */
3373 ret_val = mac->ops.setup_link(hw);
3374
3375 /* Set the transmit descriptor write-back policy for both queues */
3376 txdctl = er32(TXDCTL(0));
3377 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3378 E1000_TXDCTL_FULL_TX_DESC_WB;
3379 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3380 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3381 ew32(TXDCTL(0), txdctl);
3382 txdctl = er32(TXDCTL(1));
3383 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3384 E1000_TXDCTL_FULL_TX_DESC_WB;
3385 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3386 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3387 ew32(TXDCTL(1), txdctl);
3388
3389 /*
3390 * ICH8 has opposite polarity of no_snoop bits.
3391 * By default, we should use snoop behavior.
3392 */
3393 if (mac->type == e1000_ich8lan)
3394 snoop = PCIE_ICH8_SNOOP_ALL;
3395 else
3396 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3397 e1000e_set_pcie_no_snoop(hw, snoop);
3398
3399 ctrl_ext = er32(CTRL_EXT);
3400 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3401 ew32(CTRL_EXT, ctrl_ext);
3402
3403 /*
3404 * Clear all of the statistics registers (clear on read). It is
3405 * important that we do this after we have tried to establish link
3406 * because the symbol error count will increment wildly if there
3407 * is no link.
3408 */
3409 e1000_clear_hw_cntrs_ich8lan(hw);
3410
3411 return ret_val;
3412 }
3413 /**
3414 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3415 * @hw: pointer to the HW structure
3416 *
3417 * Sets/Clears required hardware bits necessary for correctly setting up the
3418 * hardware for transmit and receive.
3419 **/
3420 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3421 {
3422 u32 reg;
3423
3424 /* Extended Device Control */
3425 reg = er32(CTRL_EXT);
3426 reg |= (1 << 22);
3427 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3428 if (hw->mac.type >= e1000_pchlan)
3429 reg |= E1000_CTRL_EXT_PHYPDEN;
3430 ew32(CTRL_EXT, reg);
3431
3432 /* Transmit Descriptor Control 0 */
3433 reg = er32(TXDCTL(0));
3434 reg |= (1 << 22);
3435 ew32(TXDCTL(0), reg);
3436
3437 /* Transmit Descriptor Control 1 */
3438 reg = er32(TXDCTL(1));
3439 reg |= (1 << 22);
3440 ew32(TXDCTL(1), reg);
3441
3442 /* Transmit Arbitration Control 0 */
3443 reg = er32(TARC(0));
3444 if (hw->mac.type == e1000_ich8lan)
3445 reg |= (1 << 28) | (1 << 29);
3446 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3447 ew32(TARC(0), reg);
3448
3449 /* Transmit Arbitration Control 1 */
3450 reg = er32(TARC(1));
3451 if (er32(TCTL) & E1000_TCTL_MULR)
3452 reg &= ~(1 << 28);
3453 else
3454 reg |= (1 << 28);
3455 reg |= (1 << 24) | (1 << 26) | (1 << 30);
3456 ew32(TARC(1), reg);
3457
3458 /* Device Status */
3459 if (hw->mac.type == e1000_ich8lan) {
3460 reg = er32(STATUS);
3461 reg &= ~(1 << 31);
3462 ew32(STATUS, reg);
3463 }
3464
3465 /*
3466 * work-around descriptor data corruption issue during nfs v2 udp
3467 * traffic, just disable the nfs filtering capability
3468 */
3469 reg = er32(RFCTL);
3470 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3471
3472 /*
3473 * Disable IPv6 extension header parsing because some malformed
3474 * IPv6 headers can hang the Rx.
3475 */
3476 if (hw->mac.type == e1000_ich8lan)
3477 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
3478 ew32(RFCTL, reg);
3479 }
3480
3481 /**
3482 * e1000_setup_link_ich8lan - Setup flow control and link settings
3483 * @hw: pointer to the HW structure
3484 *
3485 * Determines which flow control settings to use, then configures flow
3486 * control. Calls the appropriate media-specific link configuration
3487 * function. Assuming the adapter has a valid link partner, a valid link
3488 * should be established. Assumes the hardware has previously been reset
3489 * and the transmitter and receiver are not enabled.
3490 **/
3491 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3492 {
3493 s32 ret_val;
3494
3495 if (hw->phy.ops.check_reset_block(hw))
3496 return 0;
3497
3498 /*
3499 * ICH parts do not have a word in the NVM to determine
3500 * the default flow control setting, so we explicitly
3501 * set it to full.
3502 */
3503 if (hw->fc.requested_mode == e1000_fc_default) {
3504 /* Workaround h/w hang when Tx flow control enabled */
3505 if (hw->mac.type == e1000_pchlan)
3506 hw->fc.requested_mode = e1000_fc_rx_pause;
3507 else
3508 hw->fc.requested_mode = e1000_fc_full;
3509 }
3510
3511 /*
3512 * Save off the requested flow control mode for use later. Depending
3513 * on the link partner's capabilities, we may or may not use this mode.
3514 */
3515 hw->fc.current_mode = hw->fc.requested_mode;
3516
3517 e_dbg("After fix-ups FlowControl is now = %x\n",
3518 hw->fc.current_mode);
3519
3520 /* Continue to configure the copper link. */
3521 ret_val = hw->mac.ops.setup_physical_interface(hw);
3522 if (ret_val)
3523 return ret_val;
3524
3525 ew32(FCTTV, hw->fc.pause_time);
3526 if ((hw->phy.type == e1000_phy_82578) ||
3527 (hw->phy.type == e1000_phy_82579) ||
3528 (hw->phy.type == e1000_phy_82577)) {
3529 ew32(FCRTV_PCH, hw->fc.refresh_time);
3530
3531 ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
3532 hw->fc.pause_time);
3533 if (ret_val)
3534 return ret_val;
3535 }
3536
3537 return e1000e_set_fc_watermarks(hw);
3538 }
3539
3540 /**
3541 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3542 * @hw: pointer to the HW structure
3543 *
3544 * Configures the kumeran interface to the PHY to wait the appropriate time
3545 * when polling the PHY, then call the generic setup_copper_link to finish
3546 * configuring the copper link.
3547 **/
3548 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3549 {
3550 u32 ctrl;
3551 s32 ret_val;
3552 u16 reg_data;
3553
3554 ctrl = er32(CTRL);
3555 ctrl |= E1000_CTRL_SLU;
3556 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3557 ew32(CTRL, ctrl);
3558
3559 /*
3560 * Set the mac to wait the maximum time between each iteration
3561 * and increase the max iterations when polling the phy;
3562 * this fixes erroneous timeouts at 10Mbps.
3563 */
3564 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
3565 if (ret_val)
3566 return ret_val;
3567 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
3568 &reg_data);
3569 if (ret_val)
3570 return ret_val;
3571 reg_data |= 0x3F;
3572 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
3573 reg_data);
3574 if (ret_val)
3575 return ret_val;
3576
3577 switch (hw->phy.type) {
3578 case e1000_phy_igp_3:
3579 ret_val = e1000e_copper_link_setup_igp(hw);
3580 if (ret_val)
3581 return ret_val;
3582 break;
3583 case e1000_phy_bm:
3584 case e1000_phy_82578:
3585 ret_val = e1000e_copper_link_setup_m88(hw);
3586 if (ret_val)
3587 return ret_val;
3588 break;
3589 case e1000_phy_82577:
3590 case e1000_phy_82579:
3591 ret_val = e1000_copper_link_setup_82577(hw);
3592 if (ret_val)
3593 return ret_val;
3594 break;
3595 case e1000_phy_ife:
3596 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
3597 if (ret_val)
3598 return ret_val;
3599
3600 reg_data &= ~IFE_PMC_AUTO_MDIX;
3601
3602 switch (hw->phy.mdix) {
3603 case 1:
3604 reg_data &= ~IFE_PMC_FORCE_MDIX;
3605 break;
3606 case 2:
3607 reg_data |= IFE_PMC_FORCE_MDIX;
3608 break;
3609 case 0:
3610 default:
3611 reg_data |= IFE_PMC_AUTO_MDIX;
3612 break;
3613 }
3614 ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
3615 if (ret_val)
3616 return ret_val;
3617 break;
3618 default:
3619 break;
3620 }
3621
3622 return e1000e_setup_copper_link(hw);
3623 }
3624
3625 /**
3626 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3627 * @hw: pointer to the HW structure
3628 * @speed: pointer to store current link speed
3629 * @duplex: pointer to store the current link duplex
3630 *
3631 * Calls the generic get_speed_and_duplex to retrieve the current link
3632 * information and then calls the Kumeran lock loss workaround for links at
3633 * gigabit speeds.
3634 **/
3635 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3636 u16 *duplex)
3637 {
3638 s32 ret_val;
3639
3640 ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
3641 if (ret_val)
3642 return ret_val;
3643
3644 if ((hw->mac.type == e1000_ich8lan) &&
3645 (hw->phy.type == e1000_phy_igp_3) &&
3646 (*speed == SPEED_1000)) {
3647 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3648 }
3649
3650 return ret_val;
3651 }
3652
3653 /**
3654 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3655 * @hw: pointer to the HW structure
3656 *
3657 * Work-around for 82566 Kumeran PCS lock loss:
3658 * On link status change (i.e. PCI reset, speed change) and link is up and
3659 * speed is gigabit-
3660 * 0) if workaround is optionally disabled do nothing
3661 * 1) wait 1ms for Kumeran link to come up
3662 * 2) check Kumeran Diagnostic register PCS lock loss bit
3663 * 3) if not set the link is locked (all is good), otherwise...
3664 * 4) reset the PHY
3665 * 5) repeat up to 10 times
3666 * Note: this is only called for IGP3 copper when speed is 1gb.
3667 **/
3668 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3669 {
3670 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3671 u32 phy_ctrl;
3672 s32 ret_val;
3673 u16 i, data;
3674 bool link;
3675
3676 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
3677 return 0;
3678
3679 /*
3680 * Make sure link is up before proceeding. If not just return.
3681 * Attempting this while link is negotiating fouled up link
3682 * stability
3683 */
3684 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
3685 if (!link)
3686 return 0;
3687
3688 for (i = 0; i < 10; i++) {
3689 /* read once to clear */
3690 ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
3691 if (ret_val)
3692 return ret_val;
3693 /* and again to get new status */
3694 ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
3695 if (ret_val)
3696 return ret_val;
3697
3698 /* check for PCS lock */
3699 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
3700 return 0;
3701
3702 /* Issue PHY reset */
3703 e1000_phy_hw_reset(hw);
3704 mdelay(5);
3705 }
3706 /* Disable GigE link negotiation */
3707 phy_ctrl = er32(PHY_CTRL);
3708 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3709 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3710 ew32(PHY_CTRL, phy_ctrl);
3711
3712 /*
3713 * Call gig speed drop workaround on Gig disable before accessing
3714 * any PHY registers
3715 */
3716 e1000e_gig_downshift_workaround_ich8lan(hw);
3717
3718 /* unable to acquire PCS lock */
3719 return -E1000_ERR_PHY;
3720 }
3721
3722 /**
3723 * e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3724 * @hw: pointer to the HW structure
3725 * @state: boolean value used to set the current Kumeran workaround state
3726 *
3727 * If ICH8, set the current Kumeran workaround state (enabled - true
3728 * /disabled - false).
3729 **/
3730 void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3731 bool state)
3732 {
3733 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3734
3735 if (hw->mac.type != e1000_ich8lan) {
3736 e_dbg("Workaround applies to ICH8 only.\n");
3737 return;
3738 }
3739
3740 dev_spec->kmrn_lock_loss_workaround_enabled = state;
3741 }
3742
3743 /**
3744 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3745 * @hw: pointer to the HW structure
3746 *
3747 * Workaround for 82566 power-down on D3 entry:
3748 * 1) disable gigabit link
3749 * 2) write VR power-down enable
3750 * 3) read it back
3751 * Continue if successful, else issue LCD reset and repeat
3752 **/
3753 void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3754 {
3755 u32 reg;
3756 u16 data;
3757 u8 retry = 0;
3758
3759 if (hw->phy.type != e1000_phy_igp_3)
3760 return;
3761
3762 /* Try the workaround twice (if needed) */
3763 do {
3764 /* Disable link */
3765 reg = er32(PHY_CTRL);
3766 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3767 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3768 ew32(PHY_CTRL, reg);
3769
3770 /*
3771 * Call gig speed drop workaround on Gig disable before
3772 * accessing any PHY registers
3773 */
3774 if (hw->mac.type == e1000_ich8lan)
3775 e1000e_gig_downshift_workaround_ich8lan(hw);
3776
3777 /* Write VR power-down enable */
3778 e1e_rphy(hw, IGP3_VR_CTRL, &data);
3779 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3780 e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3781
3782 /* Read it back and test */
3783 e1e_rphy(hw, IGP3_VR_CTRL, &data);
3784 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3785 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3786 break;
3787
3788 /* Issue PHY reset and repeat at most one more time */
3789 reg = er32(CTRL);
3790 ew32(CTRL, reg | E1000_CTRL_PHY_RST);
3791 retry++;
3792 } while (retry);
3793 }
3794
3795 /**
3796 * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3797 * @hw: pointer to the HW structure
3798 *
3799 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3800 * LPLU, Gig disable, MDIC PHY reset):
3801 * 1) Set Kumeran Near-end loopback
3802 * 2) Clear Kumeran Near-end loopback
3803 * Should only be called for ICH8[m] devices with any 1G Phy.
3804 **/
3805 void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3806 {
3807 s32 ret_val;
3808 u16 reg_data;
3809
3810 if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife))
3811 return;
3812
3813 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3814 &reg_data);
3815 if (ret_val)
3816 return;
3817 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3818 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3819 reg_data);
3820 if (ret_val)
3821 return;
3822 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3823 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3824 reg_data);
3825 }
3826
3827 /**
3828 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
3829 * @hw: pointer to the HW structure
3830 *
3831 * During S0 to Sx transition, it is possible the link remains at gig
3832 * instead of negotiating to a lower speed. Before going to Sx, set
3833 * 'Gig Disable' to force link speed negotiation to a lower speed based on
3834 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
3835 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
3836 * needs to be written.
3837 **/
3838 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3839 {
3840 u32 phy_ctrl;
3841 s32 ret_val;
3842
3843 phy_ctrl = er32(PHY_CTRL);
3844 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
3845 ew32(PHY_CTRL, phy_ctrl);
3846
3847 if (hw->mac.type == e1000_ich8lan)
3848 e1000e_gig_downshift_workaround_ich8lan(hw);
3849
3850 if (hw->mac.type >= e1000_pchlan) {
3851 e1000_oem_bits_config_ich8lan(hw, false);
3852
3853 /* Reset PHY to activate OEM bits on 82577/8 */
3854 if (hw->mac.type == e1000_pchlan)
3855 e1000e_phy_hw_reset_generic(hw);
3856
3857 ret_val = hw->phy.ops.acquire(hw);
3858 if (ret_val)
3859 return;
3860 e1000_write_smbus_addr(hw);
3861 hw->phy.ops.release(hw);
3862 }
3863 }
3864
3865 /**
3866 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
3867 * @hw: pointer to the HW structure
3868 *
3869 * During Sx to S0 transitions on non-managed devices or managed devices
3870 * on which PHY resets are not blocked, if the PHY registers cannot be
3871 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
3872 * the PHY.
3873 **/
3874 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
3875 {
3876 s32 ret_val;
3877
3878 if (hw->mac.type < e1000_pch2lan)
3879 return;
3880
3881 ret_val = e1000_init_phy_workarounds_pchlan(hw);
3882 if (ret_val) {
3883 e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val);
3884 return;
3885 }
3886 }
3887
3888 /**
3889 * e1000_cleanup_led_ich8lan - Restore the default LED operation
3890 * @hw: pointer to the HW structure
3891 *
3892 * Return the LED back to the default configuration.
3893 **/
3894 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
3895 {
3896 if (hw->phy.type == e1000_phy_ife)
3897 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
3898
3899 ew32(LEDCTL, hw->mac.ledctl_default);
3900 return 0;
3901 }
3902
3903 /**
3904 * e1000_led_on_ich8lan - Turn LEDs on
3905 * @hw: pointer to the HW structure
3906 *
3907 * Turn on the LEDs.
3908 **/
3909 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
3910 {
3911 if (hw->phy.type == e1000_phy_ife)
3912 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3913 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
3914
3915 ew32(LEDCTL, hw->mac.ledctl_mode2);
3916 return 0;
3917 }
3918
3919 /**
3920 * e1000_led_off_ich8lan - Turn LEDs off
3921 * @hw: pointer to the HW structure
3922 *
3923 * Turn off the LEDs.
3924 **/
3925 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3926 {
3927 if (hw->phy.type == e1000_phy_ife)
3928 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3929 (IFE_PSCL_PROBE_MODE |
3930 IFE_PSCL_PROBE_LEDS_OFF));
3931
3932 ew32(LEDCTL, hw->mac.ledctl_mode1);
3933 return 0;
3934 }
3935
3936 /**
3937 * e1000_setup_led_pchlan - Configures SW controllable LED
3938 * @hw: pointer to the HW structure
3939 *
3940 * This prepares the SW controllable LED for use.
3941 **/
3942 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3943 {
3944 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
3945 }
3946
3947 /**
3948 * e1000_cleanup_led_pchlan - Restore the default LED operation
3949 * @hw: pointer to the HW structure
3950 *
3951 * Return the LED back to the default configuration.
3952 **/
3953 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
3954 {
3955 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
3956 }
3957
3958 /**
3959 * e1000_led_on_pchlan - Turn LEDs on
3960 * @hw: pointer to the HW structure
3961 *
3962 * Turn on the LEDs.
3963 **/
3964 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
3965 {
3966 u16 data = (u16)hw->mac.ledctl_mode2;
3967 u32 i, led;
3968
3969 /*
3970 * If no link, then turn LED on by setting the invert bit
3971 * for each LED that's mode is "link_up" in ledctl_mode2.
3972 */
3973 if (!(er32(STATUS) & E1000_STATUS_LU)) {
3974 for (i = 0; i < 3; i++) {
3975 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
3976 if ((led & E1000_PHY_LED0_MODE_MASK) !=
3977 E1000_LEDCTL_MODE_LINK_UP)
3978 continue;
3979 if (led & E1000_PHY_LED0_IVRT)
3980 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
3981 else
3982 data |= (E1000_PHY_LED0_IVRT << (i * 5));
3983 }
3984 }
3985
3986 return e1e_wphy(hw, HV_LED_CONFIG, data);
3987 }
3988
3989 /**
3990 * e1000_led_off_pchlan - Turn LEDs off
3991 * @hw: pointer to the HW structure
3992 *
3993 * Turn off the LEDs.
3994 **/
3995 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
3996 {
3997 u16 data = (u16)hw->mac.ledctl_mode1;
3998 u32 i, led;
3999
4000 /*
4001 * If no link, then turn LED off by clearing the invert bit
4002 * for each LED that's mode is "link_up" in ledctl_mode1.
4003 */
4004 if (!(er32(STATUS) & E1000_STATUS_LU)) {
4005 for (i = 0; i < 3; i++) {
4006 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4007 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4008 E1000_LEDCTL_MODE_LINK_UP)
4009 continue;
4010 if (led & E1000_PHY_LED0_IVRT)
4011 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4012 else
4013 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4014 }
4015 }
4016
4017 return e1e_wphy(hw, HV_LED_CONFIG, data);
4018 }
4019
4020 /**
4021 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4022 * @hw: pointer to the HW structure
4023 *
4024 * Read appropriate register for the config done bit for completion status
4025 * and configure the PHY through s/w for EEPROM-less parts.
4026 *
4027 * NOTE: some silicon which is EEPROM-less will fail trying to read the
4028 * config done bit, so only an error is logged and continues. If we were
4029 * to return with error, EEPROM-less silicon would not be able to be reset
4030 * or change link.
4031 **/
4032 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4033 {
4034 s32 ret_val = 0;
4035 u32 bank = 0;
4036 u32 status;
4037
4038 e1000e_get_cfg_done(hw);
4039
4040 /* Wait for indication from h/w that it has completed basic config */
4041 if (hw->mac.type >= e1000_ich10lan) {
4042 e1000_lan_init_done_ich8lan(hw);
4043 } else {
4044 ret_val = e1000e_get_auto_rd_done(hw);
4045 if (ret_val) {
4046 /*
4047 * When auto config read does not complete, do not
4048 * return with an error. This can happen in situations
4049 * where there is no eeprom and prevents getting link.
4050 */
4051 e_dbg("Auto Read Done did not complete\n");
4052 ret_val = 0;
4053 }
4054 }
4055
4056 /* Clear PHY Reset Asserted bit */
4057 status = er32(STATUS);
4058 if (status & E1000_STATUS_PHYRA)
4059 ew32(STATUS, status & ~E1000_STATUS_PHYRA);
4060 else
4061 e_dbg("PHY Reset Asserted not set - needs delay\n");
4062
4063 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
4064 if (hw->mac.type <= e1000_ich9lan) {
4065 if (!(er32(EECD) & E1000_EECD_PRES) &&
4066 (hw->phy.type == e1000_phy_igp_3)) {
4067 e1000e_phy_init_script_igp3(hw);
4068 }
4069 } else {
4070 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4071 /* Maybe we should do a basic PHY config */
4072 e_dbg("EEPROM not present\n");
4073 ret_val = -E1000_ERR_CONFIG;
4074 }
4075 }
4076
4077 return ret_val;
4078 }
4079
4080 /**
4081 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4082 * @hw: pointer to the HW structure
4083 *
4084 * In the case of a PHY power down to save power, or to turn off link during a
4085 * driver unload, or wake on lan is not enabled, remove the link.
4086 **/
4087 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4088 {
4089 /* If the management interface is not enabled, then power down */
4090 if (!(hw->mac.ops.check_mng_mode(hw) ||
4091 hw->phy.ops.check_reset_block(hw)))
4092 e1000_power_down_phy_copper(hw);
4093 }
4094
4095 /**
4096 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4097 * @hw: pointer to the HW structure
4098 *
4099 * Clears hardware counters specific to the silicon family and calls
4100 * clear_hw_cntrs_generic to clear all general purpose counters.
4101 **/
4102 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4103 {
4104 u16 phy_data;
4105 s32 ret_val;
4106
4107 e1000e_clear_hw_cntrs_base(hw);
4108
4109 er32(ALGNERRC);
4110 er32(RXERRC);
4111 er32(TNCRS);
4112 er32(CEXTERR);
4113 er32(TSCTC);
4114 er32(TSCTFC);
4115
4116 er32(MGTPRC);
4117 er32(MGTPDC);
4118 er32(MGTPTC);
4119
4120 er32(IAC);
4121 er32(ICRXOC);
4122
4123 /* Clear PHY statistics registers */
4124 if ((hw->phy.type == e1000_phy_82578) ||
4125 (hw->phy.type == e1000_phy_82579) ||
4126 (hw->phy.type == e1000_phy_82577)) {
4127 ret_val = hw->phy.ops.acquire(hw);
4128 if (ret_val)
4129 return;
4130 ret_val = hw->phy.ops.set_page(hw,
4131 HV_STATS_PAGE << IGP_PAGE_SHIFT);
4132 if (ret_val)
4133 goto release;
4134 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4135 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4136 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4137 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4138 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4139 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4140 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4141 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4142 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4143 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4144 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4145 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4146 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4147 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4148 release:
4149 hw->phy.ops.release(hw);
4150 }
4151 }
4152
4153 static const struct e1000_mac_operations ich8_mac_ops = {
4154 /* check_mng_mode dependent on mac type */
4155 .check_for_link = e1000_check_for_copper_link_ich8lan,
4156 /* cleanup_led dependent on mac type */
4157 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
4158 .get_bus_info = e1000_get_bus_info_ich8lan,
4159 .set_lan_id = e1000_set_lan_id_single_port,
4160 .get_link_up_info = e1000_get_link_up_info_ich8lan,
4161 /* led_on dependent on mac type */
4162 /* led_off dependent on mac type */
4163 .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
4164 .reset_hw = e1000_reset_hw_ich8lan,
4165 .init_hw = e1000_init_hw_ich8lan,
4166 .setup_link = e1000_setup_link_ich8lan,
4167 .setup_physical_interface= e1000_setup_copper_link_ich8lan,
4168 /* id_led_init dependent on mac type */
4169 .config_collision_dist = e1000e_config_collision_dist_generic,
4170 .rar_set = e1000e_rar_set_generic,
4171 };
4172
4173 static const struct e1000_phy_operations ich8_phy_ops = {
4174 .acquire = e1000_acquire_swflag_ich8lan,
4175 .check_reset_block = e1000_check_reset_block_ich8lan,
4176 .commit = NULL,
4177 .get_cfg_done = e1000_get_cfg_done_ich8lan,
4178 .get_cable_length = e1000e_get_cable_length_igp_2,
4179 .read_reg = e1000e_read_phy_reg_igp,
4180 .release = e1000_release_swflag_ich8lan,
4181 .reset = e1000_phy_hw_reset_ich8lan,
4182 .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan,
4183 .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan,
4184 .write_reg = e1000e_write_phy_reg_igp,
4185 };
4186
4187 static const struct e1000_nvm_operations ich8_nvm_ops = {
4188 .acquire = e1000_acquire_nvm_ich8lan,
4189 .read = e1000_read_nvm_ich8lan,
4190 .release = e1000_release_nvm_ich8lan,
4191 .reload = e1000e_reload_nvm_generic,
4192 .update = e1000_update_nvm_checksum_ich8lan,
4193 .valid_led_default = e1000_valid_led_default_ich8lan,
4194 .validate = e1000_validate_nvm_checksum_ich8lan,
4195 .write = e1000_write_nvm_ich8lan,
4196 };
4197
4198 const struct e1000_info e1000_ich8_info = {
4199 .mac = e1000_ich8lan,
4200 .flags = FLAG_HAS_WOL
4201 | FLAG_IS_ICH
4202 | FLAG_HAS_CTRLEXT_ON_LOAD
4203 | FLAG_HAS_AMT
4204 | FLAG_HAS_FLASH
4205 | FLAG_APME_IN_WUC,
4206 .pba = 8,
4207 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
4208 .get_variants = e1000_get_variants_ich8lan,
4209 .mac_ops = &ich8_mac_ops,
4210 .phy_ops = &ich8_phy_ops,
4211 .nvm_ops = &ich8_nvm_ops,
4212 };
4213
4214 const struct e1000_info e1000_ich9_info = {
4215 .mac = e1000_ich9lan,
4216 .flags = FLAG_HAS_JUMBO_FRAMES
4217 | FLAG_IS_ICH
4218 | FLAG_HAS_WOL
4219 | FLAG_HAS_CTRLEXT_ON_LOAD
4220 | FLAG_HAS_AMT
4221 | FLAG_HAS_FLASH
4222 | FLAG_APME_IN_WUC,
4223 .pba = 18,
4224 .max_hw_frame_size = DEFAULT_JUMBO,
4225 .get_variants = e1000_get_variants_ich8lan,
4226 .mac_ops = &ich8_mac_ops,
4227 .phy_ops = &ich8_phy_ops,
4228 .nvm_ops = &ich8_nvm_ops,
4229 };
4230
4231 const struct e1000_info e1000_ich10_info = {
4232 .mac = e1000_ich10lan,
4233 .flags = FLAG_HAS_JUMBO_FRAMES
4234 | FLAG_IS_ICH
4235 | FLAG_HAS_WOL
4236 | FLAG_HAS_CTRLEXT_ON_LOAD
4237 | FLAG_HAS_AMT
4238 | FLAG_HAS_FLASH
4239 | FLAG_APME_IN_WUC,
4240 .pba = 18,
4241 .max_hw_frame_size = DEFAULT_JUMBO,
4242 .get_variants = e1000_get_variants_ich8lan,
4243 .mac_ops = &ich8_mac_ops,
4244 .phy_ops = &ich8_phy_ops,
4245 .nvm_ops = &ich8_nvm_ops,
4246 };
4247
4248 const struct e1000_info e1000_pch_info = {
4249 .mac = e1000_pchlan,
4250 .flags = FLAG_IS_ICH
4251 | FLAG_HAS_WOL
4252 | FLAG_HAS_CTRLEXT_ON_LOAD
4253 | FLAG_HAS_AMT
4254 | FLAG_HAS_FLASH
4255 | FLAG_HAS_JUMBO_FRAMES
4256 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
4257 | FLAG_APME_IN_WUC,
4258 .flags2 = FLAG2_HAS_PHY_STATS,
4259 .pba = 26,
4260 .max_hw_frame_size = 4096,
4261 .get_variants = e1000_get_variants_ich8lan,
4262 .mac_ops = &ich8_mac_ops,
4263 .phy_ops = &ich8_phy_ops,
4264 .nvm_ops = &ich8_nvm_ops,
4265 };
4266
4267 const struct e1000_info e1000_pch2_info = {
4268 .mac = e1000_pch2lan,
4269 .flags = FLAG_IS_ICH
4270 | FLAG_HAS_WOL
4271 | FLAG_HAS_CTRLEXT_ON_LOAD
4272 | FLAG_HAS_AMT
4273 | FLAG_HAS_FLASH
4274 | FLAG_HAS_JUMBO_FRAMES
4275 | FLAG_APME_IN_WUC,
4276 .flags2 = FLAG2_HAS_PHY_STATS
4277 | FLAG2_HAS_EEE,
4278 .pba = 26,
4279 .max_hw_frame_size = DEFAULT_JUMBO,
4280 .get_variants = e1000_get_variants_ich8lan,
4281 .mac_ops = &ich8_mac_ops,
4282 .phy_ops = &ich8_phy_ops,
4283 .nvm_ops = &ich8_nvm_ops,
4284 };
This page took 0.129167 seconds and 5 git commands to generate.