Commit | Line | Data |
---|---|---|
f96a8a0b CW |
1 | /******************************************************************************* |
2 | ||
3 | Intel(R) Gigabit Ethernet Linux driver | |
4b9ea462 | 4 | Copyright(c) 2007-2013 Intel Corporation. |
f96a8a0b CW |
5 | |
6 | This program is free software; you can redistribute it and/or modify it | |
7 | under the terms and conditions of the GNU General Public License, | |
8 | version 2, as published by the Free Software Foundation. | |
9 | ||
10 | This program is distributed in the hope it will be useful, but WITHOUT | |
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | more details. | |
14 | ||
15 | You should have received a copy of the GNU General Public License along with | |
16 | this program; if not, write to the Free Software Foundation, Inc., | |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
18 | ||
19 | The full GNU General Public License is included in this distribution in | |
20 | the file called "COPYING". | |
21 | ||
22 | Contact Information: | |
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
25 | ||
26 | ******************************************************************************/ | |
27 | ||
28 | /* e1000_i210 | |
29 | * e1000_i211 | |
30 | */ | |
31 | ||
32 | #include <linux/types.h> | |
33 | #include <linux/if_ether.h> | |
34 | ||
35 | #include "e1000_hw.h" | |
36 | #include "e1000_i210.h" | |
37 | ||
7916a53d CW |
38 | /** |
39 | * igb_get_hw_semaphore_i210 - Acquire hardware semaphore | |
40 | * @hw: pointer to the HW structure | |
41 | * | |
42 | * Acquire the HW semaphore to access the PHY or NVM | |
43 | */ | |
44 | static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) | |
45 | { | |
46 | u32 swsm; | |
7916a53d CW |
47 | s32 timeout = hw->nvm.word_size + 1; |
48 | s32 i = 0; | |
49 | ||
d44e7a9a MV |
50 | /* Get the SW semaphore */ |
51 | while (i < timeout) { | |
52 | swsm = rd32(E1000_SWSM); | |
53 | if (!(swsm & E1000_SWSM_SMBI)) | |
54 | break; | |
55 | ||
56 | udelay(50); | |
57 | i++; | |
58 | } | |
59 | ||
60 | if (i == timeout) { | |
61 | /* In rare circumstances, the SW semaphore may already be held | |
62 | * unintentionally. Clear the semaphore once before giving up. | |
63 | */ | |
64 | if (hw->dev_spec._82575.clear_semaphore_once) { | |
65 | hw->dev_spec._82575.clear_semaphore_once = false; | |
66 | igb_put_hw_semaphore(hw); | |
67 | for (i = 0; i < timeout; i++) { | |
68 | swsm = rd32(E1000_SWSM); | |
69 | if (!(swsm & E1000_SWSM_SMBI)) | |
70 | break; | |
71 | ||
72 | udelay(50); | |
73 | } | |
74 | } | |
75 | ||
76 | /* If we do not have the semaphore here, we have to give up. */ | |
77 | if (i == timeout) { | |
78 | hw_dbg("Driver can't access device - SMBI bit is set.\n"); | |
79 | return -E1000_ERR_NVM; | |
80 | } | |
81 | } | |
82 | ||
7916a53d CW |
83 | /* Get the FW semaphore. */ |
84 | for (i = 0; i < timeout; i++) { | |
85 | swsm = rd32(E1000_SWSM); | |
86 | wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); | |
87 | ||
88 | /* Semaphore acquired if bit latched */ | |
89 | if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) | |
90 | break; | |
91 | ||
92 | udelay(50); | |
93 | } | |
94 | ||
95 | if (i == timeout) { | |
96 | /* Release semaphores */ | |
97 | igb_put_hw_semaphore(hw); | |
98 | hw_dbg("Driver can't access the NVM\n"); | |
d44e7a9a | 99 | return -E1000_ERR_NVM; |
7916a53d CW |
100 | } |
101 | ||
d44e7a9a | 102 | return E1000_SUCCESS; |
7916a53d | 103 | } |
f96a8a0b CW |
104 | |
105 | /** | |
106 | * igb_acquire_nvm_i210 - Request for access to EEPROM | |
107 | * @hw: pointer to the HW structure | |
108 | * | |
109 | * Acquire the necessary semaphores for exclusive access to the EEPROM. | |
110 | * Set the EEPROM access request bit and wait for EEPROM access grant bit. | |
111 | * Return successful if access grant bit set, else clear the request for | |
112 | * EEPROM access and return -E1000_ERR_NVM (-1). | |
113 | **/ | |
114 | s32 igb_acquire_nvm_i210(struct e1000_hw *hw) | |
115 | { | |
116 | return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); | |
117 | } | |
118 | ||
119 | /** | |
120 | * igb_release_nvm_i210 - Release exclusive access to EEPROM | |
121 | * @hw: pointer to the HW structure | |
122 | * | |
123 | * Stop any current commands to the EEPROM and clear the EEPROM request bit, | |
124 | * then release the semaphores acquired. | |
125 | **/ | |
126 | void igb_release_nvm_i210(struct e1000_hw *hw) | |
127 | { | |
128 | igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); | |
129 | } | |
130 | ||
131 | /** | |
132 | * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore | |
133 | * @hw: pointer to the HW structure | |
134 | * @mask: specifies which semaphore to acquire | |
135 | * | |
136 | * Acquire the SW/FW semaphore to access the PHY or NVM. The mask | |
137 | * will also specify which port we're acquiring the lock for. | |
138 | **/ | |
139 | s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) | |
140 | { | |
141 | u32 swfw_sync; | |
142 | u32 swmask = mask; | |
143 | u32 fwmask = mask << 16; | |
144 | s32 ret_val = E1000_SUCCESS; | |
145 | s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ | |
146 | ||
147 | while (i < timeout) { | |
148 | if (igb_get_hw_semaphore_i210(hw)) { | |
149 | ret_val = -E1000_ERR_SWFW_SYNC; | |
150 | goto out; | |
151 | } | |
152 | ||
153 | swfw_sync = rd32(E1000_SW_FW_SYNC); | |
d44e7a9a | 154 | if (!(swfw_sync & (fwmask | swmask))) |
f96a8a0b CW |
155 | break; |
156 | ||
b980ac18 | 157 | /* Firmware currently using resource (fwmask) */ |
d44e7a9a | 158 | igb_put_hw_semaphore(hw); |
f96a8a0b CW |
159 | mdelay(5); |
160 | i++; | |
161 | } | |
162 | ||
163 | if (i == timeout) { | |
164 | hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); | |
165 | ret_val = -E1000_ERR_SWFW_SYNC; | |
166 | goto out; | |
167 | } | |
168 | ||
169 | swfw_sync |= swmask; | |
170 | wr32(E1000_SW_FW_SYNC, swfw_sync); | |
171 | ||
d44e7a9a | 172 | igb_put_hw_semaphore(hw); |
f96a8a0b CW |
173 | out: |
174 | return ret_val; | |
175 | } | |
176 | ||
177 | /** | |
178 | * igb_release_swfw_sync_i210 - Release SW/FW semaphore | |
179 | * @hw: pointer to the HW structure | |
180 | * @mask: specifies which semaphore to acquire | |
181 | * | |
182 | * Release the SW/FW semaphore used to access the PHY or NVM. The mask | |
183 | * will also specify which port we're releasing the lock for. | |
184 | **/ | |
185 | void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) | |
186 | { | |
187 | u32 swfw_sync; | |
188 | ||
189 | while (igb_get_hw_semaphore_i210(hw) != E1000_SUCCESS) | |
190 | ; /* Empty */ | |
191 | ||
192 | swfw_sync = rd32(E1000_SW_FW_SYNC); | |
193 | swfw_sync &= ~mask; | |
194 | wr32(E1000_SW_FW_SYNC, swfw_sync); | |
195 | ||
d44e7a9a | 196 | igb_put_hw_semaphore(hw); |
f96a8a0b CW |
197 | } |
198 | ||
f96a8a0b CW |
199 | /** |
200 | * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register | |
201 | * @hw: pointer to the HW structure | |
202 | * @offset: offset of word in the Shadow Ram to read | |
203 | * @words: number of words to read | |
204 | * @data: word read from the Shadow Ram | |
205 | * | |
206 | * Reads a 16 bit word from the Shadow Ram using the EERD register. | |
207 | * Uses necessary synchronization semaphores. | |
208 | **/ | |
209 | s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, | |
210 | u16 *data) | |
211 | { | |
212 | s32 status = E1000_SUCCESS; | |
213 | u16 i, count; | |
214 | ||
215 | /* We cannot hold synchronization semaphores for too long, | |
216 | * because of forceful takeover procedure. However it is more efficient | |
b980ac18 JK |
217 | * to read in bursts than synchronizing access for each word. |
218 | */ | |
f96a8a0b CW |
219 | for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { |
220 | count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? | |
221 | E1000_EERD_EEWR_MAX_COUNT : (words - i); | |
222 | if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { | |
223 | status = igb_read_nvm_eerd(hw, offset, count, | |
224 | data + i); | |
225 | hw->nvm.ops.release(hw); | |
226 | } else { | |
227 | status = E1000_ERR_SWFW_SYNC; | |
228 | } | |
229 | ||
230 | if (status != E1000_SUCCESS) | |
231 | break; | |
232 | } | |
233 | ||
234 | return status; | |
235 | } | |
236 | ||
f96a8a0b CW |
237 | /** |
238 | * igb_write_nvm_srwr - Write to Shadow Ram using EEWR | |
239 | * @hw: pointer to the HW structure | |
240 | * @offset: offset within the Shadow Ram to be written to | |
241 | * @words: number of words to write | |
242 | * @data: 16 bit word(s) to be written to the Shadow Ram | |
243 | * | |
244 | * Writes data to Shadow Ram at offset using EEWR register. | |
245 | * | |
246 | * If igb_update_nvm_checksum is not called after this function , the | |
247 | * Shadow Ram will most likely contain an invalid checksum. | |
248 | **/ | |
249 | static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, | |
250 | u16 *data) | |
251 | { | |
252 | struct e1000_nvm_info *nvm = &hw->nvm; | |
253 | u32 i, k, eewr = 0; | |
254 | u32 attempts = 100000; | |
255 | s32 ret_val = E1000_SUCCESS; | |
256 | ||
b980ac18 | 257 | /* A check for invalid values: offset too large, too many words, |
f96a8a0b CW |
258 | * too many words for the offset, and not enough words. |
259 | */ | |
260 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | |
261 | (words == 0)) { | |
262 | hw_dbg("nvm parameter(s) out of bounds\n"); | |
263 | ret_val = -E1000_ERR_NVM; | |
264 | goto out; | |
265 | } | |
266 | ||
267 | for (i = 0; i < words; i++) { | |
268 | eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | | |
269 | (data[i] << E1000_NVM_RW_REG_DATA) | | |
270 | E1000_NVM_RW_REG_START; | |
271 | ||
272 | wr32(E1000_SRWR, eewr); | |
273 | ||
274 | for (k = 0; k < attempts; k++) { | |
275 | if (E1000_NVM_RW_REG_DONE & | |
276 | rd32(E1000_SRWR)) { | |
277 | ret_val = E1000_SUCCESS; | |
278 | break; | |
279 | } | |
280 | udelay(5); | |
281 | } | |
282 | ||
283 | if (ret_val != E1000_SUCCESS) { | |
284 | hw_dbg("Shadow RAM write EEWR timed out\n"); | |
285 | break; | |
286 | } | |
287 | } | |
288 | ||
289 | out: | |
290 | return ret_val; | |
291 | } | |
292 | ||
7916a53d CW |
293 | /** |
294 | * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR | |
295 | * @hw: pointer to the HW structure | |
296 | * @offset: offset within the Shadow RAM to be written to | |
297 | * @words: number of words to write | |
298 | * @data: 16 bit word(s) to be written to the Shadow RAM | |
299 | * | |
300 | * Writes data to Shadow RAM at offset using EEWR register. | |
301 | * | |
302 | * If e1000_update_nvm_checksum is not called after this function , the | |
303 | * data will not be committed to FLASH and also Shadow RAM will most likely | |
304 | * contain an invalid checksum. | |
305 | * | |
306 | * If error code is returned, data and Shadow RAM may be inconsistent - buffer | |
307 | * partially written. | |
b980ac18 | 308 | **/ |
7916a53d CW |
309 | s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, |
310 | u16 *data) | |
311 | { | |
312 | s32 status = E1000_SUCCESS; | |
313 | u16 i, count; | |
314 | ||
315 | /* We cannot hold synchronization semaphores for too long, | |
316 | * because of forceful takeover procedure. However it is more efficient | |
317 | * to write in bursts than synchronizing access for each word. | |
318 | */ | |
319 | for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { | |
320 | count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? | |
321 | E1000_EERD_EEWR_MAX_COUNT : (words - i); | |
322 | if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { | |
323 | status = igb_write_nvm_srwr(hw, offset, count, | |
324 | data + i); | |
325 | hw->nvm.ops.release(hw); | |
326 | } else { | |
327 | status = E1000_ERR_SWFW_SYNC; | |
328 | } | |
329 | ||
330 | if (status != E1000_SUCCESS) | |
331 | break; | |
332 | } | |
333 | ||
334 | return status; | |
335 | } | |
336 | ||
f96a8a0b CW |
337 | /** |
338 | * igb_read_nvm_i211 - Read NVM wrapper function for I211 | |
339 | * @hw: pointer to the HW structure | |
5c17a203 | 340 | * @words: number of words to read |
f96a8a0b CW |
341 | * @data: pointer to the data read |
342 | * | |
343 | * Wrapper function to return data formerly found in the NVM. | |
344 | **/ | |
345 | s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words, | |
346 | u16 *data) | |
347 | { | |
348 | s32 ret_val = E1000_SUCCESS; | |
349 | ||
350 | /* Only the MAC addr is required to be present in the iNVM */ | |
351 | switch (offset) { | |
352 | case NVM_MAC_ADDR: | |
353 | ret_val = igb_read_invm_i211(hw, offset, &data[0]); | |
354 | ret_val |= igb_read_invm_i211(hw, offset+1, &data[1]); | |
355 | ret_val |= igb_read_invm_i211(hw, offset+2, &data[2]); | |
356 | if (ret_val != E1000_SUCCESS) | |
357 | hw_dbg("MAC Addr not found in iNVM\n"); | |
358 | break; | |
f96a8a0b | 359 | case NVM_INIT_CTRL_2: |
1720ee3e CW |
360 | ret_val = igb_read_invm_i211(hw, (u8)offset, data); |
361 | if (ret_val != E1000_SUCCESS) { | |
362 | *data = NVM_INIT_CTRL_2_DEFAULT_I211; | |
363 | ret_val = E1000_SUCCESS; | |
364 | } | |
365 | break; | |
f96a8a0b | 366 | case NVM_INIT_CTRL_4: |
1720ee3e CW |
367 | ret_val = igb_read_invm_i211(hw, (u8)offset, data); |
368 | if (ret_val != E1000_SUCCESS) { | |
369 | *data = NVM_INIT_CTRL_4_DEFAULT_I211; | |
370 | ret_val = E1000_SUCCESS; | |
371 | } | |
372 | break; | |
f96a8a0b | 373 | case NVM_LED_1_CFG: |
1720ee3e CW |
374 | ret_val = igb_read_invm_i211(hw, (u8)offset, data); |
375 | if (ret_val != E1000_SUCCESS) { | |
376 | *data = NVM_LED_1_CFG_DEFAULT_I211; | |
377 | ret_val = E1000_SUCCESS; | |
378 | } | |
379 | break; | |
f96a8a0b CW |
380 | case NVM_LED_0_2_CFG: |
381 | igb_read_invm_i211(hw, offset, data); | |
1720ee3e CW |
382 | if (ret_val != E1000_SUCCESS) { |
383 | *data = NVM_LED_0_2_CFG_DEFAULT_I211; | |
384 | ret_val = E1000_SUCCESS; | |
385 | } | |
f96a8a0b | 386 | break; |
1720ee3e CW |
387 | case NVM_ID_LED_SETTINGS: |
388 | ret_val = igb_read_invm_i211(hw, (u8)offset, data); | |
389 | if (ret_val != E1000_SUCCESS) { | |
390 | *data = ID_LED_RESERVED_FFFF; | |
391 | ret_val = E1000_SUCCESS; | |
392 | } | |
f96a8a0b CW |
393 | case NVM_SUB_DEV_ID: |
394 | *data = hw->subsystem_device_id; | |
395 | break; | |
396 | case NVM_SUB_VEN_ID: | |
397 | *data = hw->subsystem_vendor_id; | |
398 | break; | |
399 | case NVM_DEV_ID: | |
400 | *data = hw->device_id; | |
401 | break; | |
402 | case NVM_VEN_ID: | |
403 | *data = hw->vendor_id; | |
404 | break; | |
405 | default: | |
406 | hw_dbg("NVM word 0x%02x is not mapped.\n", offset); | |
407 | *data = NVM_RESERVED_WORD; | |
408 | break; | |
409 | } | |
410 | return ret_val; | |
411 | } | |
412 | ||
413 | /** | |
414 | * igb_read_invm_i211 - Reads OTP | |
415 | * @hw: pointer to the HW structure | |
416 | * @address: the word address (aka eeprom offset) to read | |
417 | * @data: pointer to the data read | |
418 | * | |
419 | * Reads 16-bit words from the OTP. Return error when the word is not | |
420 | * stored in OTP. | |
421 | **/ | |
422 | s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data) | |
423 | { | |
424 | s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; | |
425 | u32 invm_dword; | |
426 | u16 i; | |
427 | u8 record_type, word_address; | |
428 | ||
429 | for (i = 0; i < E1000_INVM_SIZE; i++) { | |
430 | invm_dword = rd32(E1000_INVM_DATA_REG(i)); | |
431 | /* Get record type */ | |
432 | record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); | |
433 | if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) | |
434 | break; | |
435 | if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) | |
436 | i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; | |
437 | if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) | |
438 | i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; | |
439 | if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { | |
440 | word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); | |
441 | if (word_address == (u8)address) { | |
442 | *data = INVM_DWORD_TO_WORD_DATA(invm_dword); | |
443 | hw_dbg("Read INVM Word 0x%02x = %x", | |
444 | address, *data); | |
445 | status = E1000_SUCCESS; | |
446 | break; | |
447 | } | |
448 | } | |
449 | } | |
450 | if (status != E1000_SUCCESS) | |
451 | hw_dbg("Requested word 0x%02x not found in OTP\n", address); | |
452 | return status; | |
453 | } | |
454 | ||
09e77287 CW |
455 | /** |
456 | * igb_read_invm_version - Reads iNVM version and image type | |
457 | * @hw: pointer to the HW structure | |
458 | * @invm_ver: version structure for the version read | |
459 | * | |
460 | * Reads iNVM version and image type. | |
461 | **/ | |
462 | s32 igb_read_invm_version(struct e1000_hw *hw, | |
463 | struct e1000_fw_version *invm_ver) { | |
464 | u32 *record = NULL; | |
465 | u32 *next_record = NULL; | |
466 | u32 i = 0; | |
467 | u32 invm_dword = 0; | |
468 | u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / | |
469 | E1000_INVM_RECORD_SIZE_IN_BYTES); | |
470 | u32 buffer[E1000_INVM_SIZE]; | |
471 | s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; | |
472 | u16 version = 0; | |
473 | ||
474 | /* Read iNVM memory */ | |
475 | for (i = 0; i < E1000_INVM_SIZE; i++) { | |
476 | invm_dword = rd32(E1000_INVM_DATA_REG(i)); | |
477 | buffer[i] = invm_dword; | |
478 | } | |
479 | ||
480 | /* Read version number */ | |
481 | for (i = 1; i < invm_blocks; i++) { | |
482 | record = &buffer[invm_blocks - i]; | |
483 | next_record = &buffer[invm_blocks - i + 1]; | |
484 | ||
485 | /* Check if we have first version location used */ | |
486 | if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { | |
487 | version = 0; | |
488 | status = E1000_SUCCESS; | |
489 | break; | |
490 | } | |
491 | /* Check if we have second version location used */ | |
492 | else if ((i == 1) && | |
493 | ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { | |
494 | version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; | |
495 | status = E1000_SUCCESS; | |
496 | break; | |
497 | } | |
498 | /* Check if we have odd version location | |
499 | * used and it is the last one used | |
500 | */ | |
501 | else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && | |
502 | ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && | |
503 | (i != 1))) { | |
504 | version = (*next_record & E1000_INVM_VER_FIELD_TWO) | |
505 | >> 13; | |
506 | status = E1000_SUCCESS; | |
507 | break; | |
508 | } | |
509 | /* Check if we have even version location | |
510 | * used and it is the last one used | |
511 | */ | |
512 | else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && | |
513 | ((*record & 0x3) == 0)) { | |
514 | version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; | |
515 | status = E1000_SUCCESS; | |
516 | break; | |
517 | } | |
518 | } | |
519 | ||
520 | if (status == E1000_SUCCESS) { | |
521 | invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) | |
522 | >> E1000_INVM_MAJOR_SHIFT; | |
523 | invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; | |
524 | } | |
525 | /* Read Image Type */ | |
526 | for (i = 1; i < invm_blocks; i++) { | |
527 | record = &buffer[invm_blocks - i]; | |
528 | next_record = &buffer[invm_blocks - i + 1]; | |
529 | ||
530 | /* Check if we have image type in first location used */ | |
531 | if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { | |
532 | invm_ver->invm_img_type = 0; | |
533 | status = E1000_SUCCESS; | |
534 | break; | |
535 | } | |
536 | /* Check if we have image type in first location used */ | |
537 | else if ((((*record & 0x3) == 0) && | |
538 | ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || | |
539 | ((((*record & 0x3) != 0) && (i != 1)))) { | |
540 | invm_ver->invm_img_type = | |
541 | (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; | |
542 | status = E1000_SUCCESS; | |
543 | break; | |
544 | } | |
545 | } | |
546 | return status; | |
547 | } | |
548 | ||
f96a8a0b CW |
549 | /** |
550 | * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum | |
551 | * @hw: pointer to the HW structure | |
552 | * | |
553 | * Calculates the EEPROM checksum by reading/adding each word of the EEPROM | |
554 | * and then verifies that the sum of the EEPROM is equal to 0xBABA. | |
555 | **/ | |
556 | s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) | |
557 | { | |
558 | s32 status = E1000_SUCCESS; | |
559 | s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); | |
560 | ||
561 | if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { | |
562 | ||
b980ac18 | 563 | /* Replace the read function with semaphore grabbing with |
f96a8a0b CW |
564 | * the one that skips this for a while. |
565 | * We have semaphore taken already here. | |
566 | */ | |
567 | read_op_ptr = hw->nvm.ops.read; | |
568 | hw->nvm.ops.read = igb_read_nvm_eerd; | |
569 | ||
570 | status = igb_validate_nvm_checksum(hw); | |
571 | ||
572 | /* Revert original read operation. */ | |
573 | hw->nvm.ops.read = read_op_ptr; | |
574 | ||
575 | hw->nvm.ops.release(hw); | |
576 | } else { | |
577 | status = E1000_ERR_SWFW_SYNC; | |
578 | } | |
579 | ||
580 | return status; | |
581 | } | |
582 | ||
f96a8a0b CW |
583 | /** |
584 | * igb_update_nvm_checksum_i210 - Update EEPROM checksum | |
585 | * @hw: pointer to the HW structure | |
586 | * | |
587 | * Updates the EEPROM checksum by reading/adding each word of the EEPROM | |
588 | * up to the checksum. Then calculates the EEPROM checksum and writes the | |
589 | * value to the EEPROM. Next commit EEPROM data onto the Flash. | |
590 | **/ | |
591 | s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) | |
592 | { | |
593 | s32 ret_val = E1000_SUCCESS; | |
594 | u16 checksum = 0; | |
595 | u16 i, nvm_data; | |
596 | ||
b980ac18 | 597 | /* Read the first word from the EEPROM. If this times out or fails, do |
f96a8a0b CW |
598 | * not continue or we could be in for a very long wait while every |
599 | * EEPROM read fails | |
600 | */ | |
601 | ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); | |
602 | if (ret_val != E1000_SUCCESS) { | |
603 | hw_dbg("EEPROM read failed\n"); | |
604 | goto out; | |
605 | } | |
606 | ||
607 | if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { | |
b980ac18 | 608 | /* Do not use hw->nvm.ops.write, hw->nvm.ops.read |
f96a8a0b CW |
609 | * because we do not want to take the synchronization |
610 | * semaphores twice here. | |
611 | */ | |
612 | ||
613 | for (i = 0; i < NVM_CHECKSUM_REG; i++) { | |
614 | ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data); | |
615 | if (ret_val) { | |
616 | hw->nvm.ops.release(hw); | |
617 | hw_dbg("NVM Read Error while updating checksum.\n"); | |
618 | goto out; | |
619 | } | |
620 | checksum += nvm_data; | |
621 | } | |
622 | checksum = (u16) NVM_SUM - checksum; | |
623 | ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, | |
624 | &checksum); | |
625 | if (ret_val != E1000_SUCCESS) { | |
626 | hw->nvm.ops.release(hw); | |
627 | hw_dbg("NVM Write Error while updating checksum.\n"); | |
628 | goto out; | |
629 | } | |
630 | ||
631 | hw->nvm.ops.release(hw); | |
632 | ||
633 | ret_val = igb_update_flash_i210(hw); | |
634 | } else { | |
635 | ret_val = -E1000_ERR_SWFW_SYNC; | |
636 | } | |
637 | out: | |
638 | return ret_val; | |
639 | } | |
640 | ||
7916a53d CW |
641 | /** |
642 | * igb_pool_flash_update_done_i210 - Pool FLUDONE status. | |
643 | * @hw: pointer to the HW structure | |
644 | * | |
b980ac18 | 645 | **/ |
7916a53d CW |
646 | static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) |
647 | { | |
648 | s32 ret_val = -E1000_ERR_NVM; | |
649 | u32 i, reg; | |
650 | ||
651 | for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { | |
652 | reg = rd32(E1000_EECD); | |
653 | if (reg & E1000_EECD_FLUDONE_I210) { | |
654 | ret_val = E1000_SUCCESS; | |
655 | break; | |
656 | } | |
657 | udelay(5); | |
658 | } | |
659 | ||
660 | return ret_val; | |
661 | } | |
662 | ||
f96a8a0b CW |
663 | /** |
664 | * igb_update_flash_i210 - Commit EEPROM to the flash | |
665 | * @hw: pointer to the HW structure | |
666 | * | |
667 | **/ | |
668 | s32 igb_update_flash_i210(struct e1000_hw *hw) | |
669 | { | |
670 | s32 ret_val = E1000_SUCCESS; | |
671 | u32 flup; | |
672 | ||
673 | ret_val = igb_pool_flash_update_done_i210(hw); | |
674 | if (ret_val == -E1000_ERR_NVM) { | |
675 | hw_dbg("Flash update time out\n"); | |
676 | goto out; | |
677 | } | |
678 | ||
679 | flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210; | |
680 | wr32(E1000_EECD, flup); | |
681 | ||
682 | ret_val = igb_pool_flash_update_done_i210(hw); | |
683 | if (ret_val == E1000_SUCCESS) | |
684 | hw_dbg("Flash update complete\n"); | |
685 | else | |
686 | hw_dbg("Flash update time out\n"); | |
687 | ||
688 | out: | |
689 | return ret_val; | |
690 | } | |
691 | ||
f96a8a0b CW |
692 | /** |
693 | * igb_valid_led_default_i210 - Verify a valid default LED config | |
694 | * @hw: pointer to the HW structure | |
695 | * @data: pointer to the NVM (EEPROM) | |
696 | * | |
697 | * Read the EEPROM for the current default LED configuration. If the | |
698 | * LED configuration is not valid, set to a valid LED configuration. | |
699 | **/ | |
700 | s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data) | |
701 | { | |
702 | s32 ret_val; | |
703 | ||
704 | ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); | |
705 | if (ret_val) { | |
706 | hw_dbg("NVM Read Error\n"); | |
707 | goto out; | |
708 | } | |
709 | ||
710 | if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { | |
711 | switch (hw->phy.media_type) { | |
712 | case e1000_media_type_internal_serdes: | |
713 | *data = ID_LED_DEFAULT_I210_SERDES; | |
714 | break; | |
715 | case e1000_media_type_copper: | |
716 | default: | |
717 | *data = ID_LED_DEFAULT_I210; | |
718 | break; | |
719 | } | |
720 | } | |
721 | out: | |
722 | return ret_val; | |
723 | } | |
87371b9d MV |
724 | |
725 | /** | |
726 | * __igb_access_xmdio_reg - Read/write XMDIO register | |
727 | * @hw: pointer to the HW structure | |
728 | * @address: XMDIO address to program | |
729 | * @dev_addr: device address to program | |
730 | * @data: pointer to value to read/write from/to the XMDIO address | |
731 | * @read: boolean flag to indicate read or write | |
732 | **/ | |
733 | static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, | |
734 | u8 dev_addr, u16 *data, bool read) | |
735 | { | |
736 | s32 ret_val = E1000_SUCCESS; | |
737 | ||
738 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); | |
739 | if (ret_val) | |
740 | return ret_val; | |
741 | ||
742 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); | |
743 | if (ret_val) | |
744 | return ret_val; | |
745 | ||
746 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | | |
747 | dev_addr); | |
748 | if (ret_val) | |
749 | return ret_val; | |
750 | ||
751 | if (read) | |
752 | ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); | |
753 | else | |
754 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); | |
755 | if (ret_val) | |
756 | return ret_val; | |
757 | ||
758 | /* Recalibrate the device back to 0 */ | |
759 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); | |
760 | if (ret_val) | |
761 | return ret_val; | |
762 | ||
763 | return ret_val; | |
764 | } | |
765 | ||
766 | /** | |
767 | * igb_read_xmdio_reg - Read XMDIO register | |
768 | * @hw: pointer to the HW structure | |
769 | * @addr: XMDIO address to program | |
770 | * @dev_addr: device address to program | |
771 | * @data: value to be read from the EMI address | |
772 | **/ | |
773 | s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) | |
774 | { | |
775 | return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true); | |
776 | } | |
777 | ||
778 | /** | |
779 | * igb_write_xmdio_reg - Write XMDIO register | |
780 | * @hw: pointer to the HW structure | |
781 | * @addr: XMDIO address to program | |
782 | * @dev_addr: device address to program | |
783 | * @data: value to be written to the XMDIO address | |
784 | **/ | |
785 | s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) | |
786 | { | |
787 | return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); | |
788 | } |