Commit | Line | Data |
---|---|---|
11afc1b1 PW |
1 | /******************************************************************************* |
2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | |
8c47eaa7 | 4 | Copyright(c) 1999 - 2010 Intel Corporation. |
11afc1b1 PW |
5 | |
6 | This program is free software; you can redistribute it and/or modify it | |
7 | under the terms and conditions of the GNU General Public License, | |
8 | version 2, as published by the Free Software Foundation. | |
9 | ||
10 | This program is distributed in the hope it will be useful, but WITHOUT | |
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | more details. | |
14 | ||
15 | You should have received a copy of the GNU General Public License along with | |
16 | this program; if not, write to the Free Software Foundation, Inc., | |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
18 | ||
19 | The full GNU General Public License is included in this distribution in | |
20 | the file called "COPYING". | |
21 | ||
22 | Contact Information: | |
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
25 | ||
26 | *******************************************************************************/ | |
27 | ||
28 | #include <linux/pci.h> | |
29 | #include <linux/delay.h> | |
30 | #include <linux/sched.h> | |
31 | ||
32 | #include "ixgbe.h" | |
33 | #include "ixgbe_phy.h" | |
096a58fd | 34 | #include "ixgbe_mbx.h" |
11afc1b1 PW |
35 | |
36 | #define IXGBE_82599_MAX_TX_QUEUES 128 | |
37 | #define IXGBE_82599_MAX_RX_QUEUES 128 | |
38 | #define IXGBE_82599_RAR_ENTRIES 128 | |
39 | #define IXGBE_82599_MC_TBL_SIZE 128 | |
40 | #define IXGBE_82599_VFT_TBL_SIZE 128 | |
41 | ||
5d5b7c39 ET |
42 | static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); |
43 | static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); | |
44 | static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); | |
45 | static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, | |
46 | ixgbe_link_speed speed, | |
47 | bool autoneg, | |
48 | bool autoneg_wait_to_complete); | |
cd7e1f0b DS |
49 | static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, |
50 | ixgbe_link_speed speed, | |
51 | bool autoneg, | |
52 | bool autoneg_wait_to_complete); | |
5d5b7c39 ET |
53 | static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, |
54 | bool autoneg_wait_to_complete); | |
55 | static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, | |
8620a103 MC |
56 | ixgbe_link_speed speed, |
57 | bool autoneg, | |
58 | bool autoneg_wait_to_complete); | |
8620a103 MC |
59 | static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, |
60 | ixgbe_link_speed speed, | |
61 | bool autoneg, | |
62 | bool autoneg_wait_to_complete); | |
794caeb2 | 63 | static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); |
11afc1b1 | 64 | |
7b25cdba | 65 | static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) |
11afc1b1 PW |
66 | { |
67 | struct ixgbe_mac_info *mac = &hw->mac; | |
c6ecf39a DS |
68 | |
69 | /* enable the laser control functions for SFP+ fiber */ | |
70 | if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) { | |
61fac744 PW |
71 | mac->ops.disable_tx_laser = |
72 | &ixgbe_disable_tx_laser_multispeed_fiber; | |
73 | mac->ops.enable_tx_laser = | |
74 | &ixgbe_enable_tx_laser_multispeed_fiber; | |
1097cd17 | 75 | mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; |
11afc1b1 | 76 | } else { |
61fac744 PW |
77 | mac->ops.disable_tx_laser = NULL; |
78 | mac->ops.enable_tx_laser = NULL; | |
1097cd17 | 79 | mac->ops.flap_tx_laser = NULL; |
c6ecf39a DS |
80 | } |
81 | ||
82 | if (hw->phy.multispeed_fiber) { | |
83 | /* Set up dual speed SFP+ support */ | |
84 | mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; | |
85 | } else { | |
cd7e1f0b DS |
86 | if ((mac->ops.get_media_type(hw) == |
87 | ixgbe_media_type_backplane) && | |
88 | (hw->phy.smart_speed == ixgbe_smart_speed_auto || | |
89 | hw->phy.smart_speed == ixgbe_smart_speed_on)) | |
90 | mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; | |
91 | else | |
92 | mac->ops.setup_link = &ixgbe_setup_mac_link_82599; | |
11afc1b1 PW |
93 | } |
94 | } | |
95 | ||
7b25cdba | 96 | static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) |
11afc1b1 PW |
97 | { |
98 | s32 ret_val = 0; | |
a7f5a5fc DS |
99 | u32 reg_anlp1 = 0; |
100 | u32 i = 0; | |
11afc1b1 PW |
101 | u16 list_offset, data_offset, data_value; |
102 | ||
103 | if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { | |
104 | ixgbe_init_mac_link_ops_82599(hw); | |
553b4497 PW |
105 | |
106 | hw->phy.ops.reset = NULL; | |
107 | ||
11afc1b1 PW |
108 | ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, |
109 | &data_offset); | |
110 | ||
111 | if (ret_val != 0) | |
112 | goto setup_sfp_out; | |
113 | ||
aa5aec88 PWJ |
114 | /* PHY config will finish before releasing the semaphore */ |
115 | ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); | |
116 | if (ret_val != 0) { | |
117 | ret_val = IXGBE_ERR_SWFW_SYNC; | |
118 | goto setup_sfp_out; | |
119 | } | |
120 | ||
11afc1b1 PW |
121 | hw->eeprom.ops.read(hw, ++data_offset, &data_value); |
122 | while (data_value != 0xffff) { | |
123 | IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); | |
124 | IXGBE_WRITE_FLUSH(hw); | |
125 | hw->eeprom.ops.read(hw, ++data_offset, &data_value); | |
126 | } | |
aa5aec88 PWJ |
127 | |
128 | /* Release the semaphore */ | |
129 | ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); | |
130 | /* Delay obtaining semaphore again to allow FW access */ | |
131 | msleep(hw->eeprom.semaphore_delay); | |
a7f5a5fc DS |
132 | |
133 | /* Now restart DSP by setting Restart_AN and clearing LMS */ | |
134 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, | |
135 | IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | | |
136 | IXGBE_AUTOC_AN_RESTART)); | |
137 | ||
138 | /* Wait for AN to leave state 0 */ | |
139 | for (i = 0; i < 10; i++) { | |
140 | msleep(4); | |
141 | reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); | |
142 | if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) | |
143 | break; | |
144 | } | |
145 | if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { | |
146 | hw_dbg(hw, "sfp module setup not complete\n"); | |
147 | ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; | |
148 | goto setup_sfp_out; | |
149 | } | |
150 | ||
151 | /* Restart DSP by setting Restart_AN and return to SFI mode */ | |
152 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, | |
153 | IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | | |
154 | IXGBE_AUTOC_AN_RESTART)); | |
11afc1b1 PW |
155 | } |
156 | ||
157 | setup_sfp_out: | |
158 | return ret_val; | |
159 | } | |
160 | ||
11afc1b1 PW |
161 | static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) |
162 | { | |
163 | struct ixgbe_mac_info *mac = &hw->mac; | |
11afc1b1 | 164 | |
04f165ef | 165 | ixgbe_init_mac_link_ops_82599(hw); |
11afc1b1 | 166 | |
04f165ef PW |
167 | mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; |
168 | mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; | |
169 | mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; | |
170 | mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; | |
171 | mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; | |
21ce849b | 172 | mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); |
11afc1b1 | 173 | |
04f165ef PW |
174 | return 0; |
175 | } | |
11afc1b1 | 176 | |
04f165ef PW |
177 | /** |
178 | * ixgbe_init_phy_ops_82599 - PHY/SFP specific init | |
179 | * @hw: pointer to hardware structure | |
180 | * | |
181 | * Initialize any function pointers that were not able to be | |
182 | * set during get_invariants because the PHY/SFP type was | |
183 | * not known. Perform the SFP init if necessary. | |
184 | * | |
185 | **/ | |
7b25cdba | 186 | static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) |
04f165ef PW |
187 | { |
188 | struct ixgbe_mac_info *mac = &hw->mac; | |
189 | struct ixgbe_phy_info *phy = &hw->phy; | |
190 | s32 ret_val = 0; | |
11afc1b1 | 191 | |
04f165ef PW |
192 | /* Identify the PHY or SFP module */ |
193 | ret_val = phy->ops.identify(hw); | |
194 | ||
195 | /* Setup function pointers based on detected SFP module and speeds */ | |
196 | ixgbe_init_mac_link_ops_82599(hw); | |
11afc1b1 PW |
197 | |
198 | /* If copper media, overwrite with copper function pointers */ | |
199 | if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { | |
200 | mac->ops.setup_link = &ixgbe_setup_copper_link_82599; | |
11afc1b1 | 201 | mac->ops.get_link_capabilities = |
a391f1d5 | 202 | &ixgbe_get_copper_link_capabilities_generic; |
11afc1b1 PW |
203 | } |
204 | ||
04f165ef | 205 | /* Set necessary function pointers based on phy type */ |
11afc1b1 PW |
206 | switch (hw->phy.type) { |
207 | case ixgbe_phy_tn: | |
208 | phy->ops.check_link = &ixgbe_check_phy_link_tnx; | |
209 | phy->ops.get_firmware_version = | |
04f165ef | 210 | &ixgbe_get_phy_firmware_version_tnx; |
11afc1b1 | 211 | break; |
fe15e8e1 DS |
212 | case ixgbe_phy_aq: |
213 | phy->ops.get_firmware_version = | |
214 | &ixgbe_get_phy_firmware_version_generic; | |
215 | break; | |
11afc1b1 PW |
216 | default: |
217 | break; | |
218 | } | |
219 | ||
11afc1b1 PW |
220 | return ret_val; |
221 | } | |
222 | ||
223 | /** | |
224 | * ixgbe_get_link_capabilities_82599 - Determines link capabilities | |
225 | * @hw: pointer to hardware structure | |
226 | * @speed: pointer to link speed | |
227 | * @negotiation: true when autoneg or autotry is enabled | |
228 | * | |
229 | * Determines the link capabilities by reading the AUTOC register. | |
230 | **/ | |
7b25cdba DS |
231 | static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, |
232 | ixgbe_link_speed *speed, | |
233 | bool *negotiation) | |
11afc1b1 PW |
234 | { |
235 | s32 status = 0; | |
1eb99d5a | 236 | u32 autoc = 0; |
11afc1b1 | 237 | |
cb836a97 DS |
238 | /* Determine 1G link capabilities off of SFP+ type */ |
239 | if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || | |
240 | hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { | |
241 | *speed = IXGBE_LINK_SPEED_1GB_FULL; | |
242 | *negotiation = true; | |
243 | goto out; | |
244 | } | |
245 | ||
1eb99d5a PW |
246 | /* |
247 | * Determine link capabilities based on the stored value of AUTOC, | |
248 | * which represents EEPROM defaults. If AUTOC value has not been | |
249 | * stored, use the current register value. | |
250 | */ | |
251 | if (hw->mac.orig_link_settings_stored) | |
252 | autoc = hw->mac.orig_autoc; | |
253 | else | |
254 | autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); | |
255 | ||
256 | switch (autoc & IXGBE_AUTOC_LMS_MASK) { | |
11afc1b1 PW |
257 | case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: |
258 | *speed = IXGBE_LINK_SPEED_1GB_FULL; | |
259 | *negotiation = false; | |
260 | break; | |
261 | ||
262 | case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: | |
263 | *speed = IXGBE_LINK_SPEED_10GB_FULL; | |
264 | *negotiation = false; | |
265 | break; | |
266 | ||
267 | case IXGBE_AUTOC_LMS_1G_AN: | |
268 | *speed = IXGBE_LINK_SPEED_1GB_FULL; | |
269 | *negotiation = true; | |
270 | break; | |
271 | ||
272 | case IXGBE_AUTOC_LMS_10G_SERIAL: | |
273 | *speed = IXGBE_LINK_SPEED_10GB_FULL; | |
274 | *negotiation = false; | |
275 | break; | |
276 | ||
277 | case IXGBE_AUTOC_LMS_KX4_KX_KR: | |
278 | case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: | |
279 | *speed = IXGBE_LINK_SPEED_UNKNOWN; | |
1eb99d5a | 280 | if (autoc & IXGBE_AUTOC_KR_SUPP) |
11afc1b1 | 281 | *speed |= IXGBE_LINK_SPEED_10GB_FULL; |
1eb99d5a | 282 | if (autoc & IXGBE_AUTOC_KX4_SUPP) |
11afc1b1 | 283 | *speed |= IXGBE_LINK_SPEED_10GB_FULL; |
1eb99d5a | 284 | if (autoc & IXGBE_AUTOC_KX_SUPP) |
11afc1b1 PW |
285 | *speed |= IXGBE_LINK_SPEED_1GB_FULL; |
286 | *negotiation = true; | |
287 | break; | |
288 | ||
289 | case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: | |
290 | *speed = IXGBE_LINK_SPEED_100_FULL; | |
1eb99d5a | 291 | if (autoc & IXGBE_AUTOC_KR_SUPP) |
11afc1b1 | 292 | *speed |= IXGBE_LINK_SPEED_10GB_FULL; |
1eb99d5a | 293 | if (autoc & IXGBE_AUTOC_KX4_SUPP) |
11afc1b1 | 294 | *speed |= IXGBE_LINK_SPEED_10GB_FULL; |
1eb99d5a | 295 | if (autoc & IXGBE_AUTOC_KX_SUPP) |
11afc1b1 PW |
296 | *speed |= IXGBE_LINK_SPEED_1GB_FULL; |
297 | *negotiation = true; | |
298 | break; | |
299 | ||
300 | case IXGBE_AUTOC_LMS_SGMII_1G_100M: | |
301 | *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; | |
302 | *negotiation = false; | |
303 | break; | |
304 | ||
305 | default: | |
306 | status = IXGBE_ERR_LINK_SETUP; | |
307 | goto out; | |
308 | break; | |
309 | } | |
310 | ||
311 | if (hw->phy.multispeed_fiber) { | |
312 | *speed |= IXGBE_LINK_SPEED_10GB_FULL | | |
313 | IXGBE_LINK_SPEED_1GB_FULL; | |
314 | *negotiation = true; | |
315 | } | |
316 | ||
317 | out: | |
318 | return status; | |
319 | } | |
320 | ||
11afc1b1 PW |
321 | /** |
322 | * ixgbe_get_media_type_82599 - Get media type | |
323 | * @hw: pointer to hardware structure | |
324 | * | |
325 | * Returns the media type (fiber, copper, backplane) | |
326 | **/ | |
7b25cdba | 327 | static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) |
11afc1b1 PW |
328 | { |
329 | enum ixgbe_media_type media_type; | |
330 | ||
331 | /* Detect if there is a copper PHY attached. */ | |
21cc5b4f ET |
332 | switch (hw->phy.type) { |
333 | case ixgbe_phy_cu_unknown: | |
334 | case ixgbe_phy_tn: | |
335 | case ixgbe_phy_aq: | |
11afc1b1 PW |
336 | media_type = ixgbe_media_type_copper; |
337 | goto out; | |
21cc5b4f ET |
338 | default: |
339 | break; | |
11afc1b1 PW |
340 | } |
341 | ||
342 | switch (hw->device_id) { | |
11afc1b1 | 343 | case IXGBE_DEV_ID_82599_KX4: |
dbfec662 | 344 | case IXGBE_DEV_ID_82599_KX4_MEZZ: |
312eb931 | 345 | case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: |
74757d49 | 346 | case IXGBE_DEV_ID_82599_KR: |
dbffcb21 | 347 | case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: |
1fcf03e6 | 348 | case IXGBE_DEV_ID_82599_XAUI_LOM: |
11afc1b1 PW |
349 | /* Default device ID is mezzanine card KX/KX4 */ |
350 | media_type = ixgbe_media_type_backplane; | |
351 | break; | |
352 | case IXGBE_DEV_ID_82599_SFP: | |
dbffcb21 | 353 | case IXGBE_DEV_ID_82599_SFP_FCOE: |
38ad1c8e | 354 | case IXGBE_DEV_ID_82599_SFP_EM: |
11afc1b1 PW |
355 | media_type = ixgbe_media_type_fiber; |
356 | break; | |
8911184f | 357 | case IXGBE_DEV_ID_82599_CX4: |
6b1be199 | 358 | media_type = ixgbe_media_type_cx4; |
8911184f | 359 | break; |
21cc5b4f ET |
360 | case IXGBE_DEV_ID_82599_T3_LOM: |
361 | media_type = ixgbe_media_type_copper; | |
362 | break; | |
11afc1b1 PW |
363 | default: |
364 | media_type = ixgbe_media_type_unknown; | |
365 | break; | |
366 | } | |
367 | out: | |
368 | return media_type; | |
369 | } | |
370 | ||
371 | /** | |
8620a103 | 372 | * ixgbe_start_mac_link_82599 - Setup MAC link settings |
11afc1b1 | 373 | * @hw: pointer to hardware structure |
8620a103 | 374 | * @autoneg_wait_to_complete: true when waiting for completion is needed |
11afc1b1 PW |
375 | * |
376 | * Configures link settings based on values in the ixgbe_hw struct. | |
377 | * Restarts the link. Performs autonegotiation if needed. | |
378 | **/ | |
5d5b7c39 | 379 | static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, |
8620a103 | 380 | bool autoneg_wait_to_complete) |
11afc1b1 PW |
381 | { |
382 | u32 autoc_reg; | |
383 | u32 links_reg; | |
384 | u32 i; | |
385 | s32 status = 0; | |
386 | ||
387 | /* Restart link */ | |
388 | autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); | |
389 | autoc_reg |= IXGBE_AUTOC_AN_RESTART; | |
390 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); | |
391 | ||
392 | /* Only poll for autoneg to complete if specified to do so */ | |
8620a103 | 393 | if (autoneg_wait_to_complete) { |
11afc1b1 PW |
394 | if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == |
395 | IXGBE_AUTOC_LMS_KX4_KX_KR || | |
396 | (autoc_reg & IXGBE_AUTOC_LMS_MASK) == | |
397 | IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || | |
398 | (autoc_reg & IXGBE_AUTOC_LMS_MASK) == | |
399 | IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { | |
400 | links_reg = 0; /* Just in case Autoneg time = 0 */ | |
401 | for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { | |
402 | links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); | |
403 | if (links_reg & IXGBE_LINKS_KX_AN_COMP) | |
404 | break; | |
405 | msleep(100); | |
406 | } | |
407 | if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { | |
408 | status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; | |
409 | hw_dbg(hw, "Autoneg did not complete.\n"); | |
410 | } | |
411 | } | |
412 | } | |
413 | ||
11afc1b1 PW |
414 | /* Add delay to filter out noises during initial link setup */ |
415 | msleep(50); | |
416 | ||
417 | return status; | |
418 | } | |
419 | ||
8c7bea32 ET |
420 | /** |
421 | * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser | |
422 | * @hw: pointer to hardware structure | |
423 | * | |
424 | * The base drivers may require better control over SFP+ module | |
425 | * PHY states. This includes selectively shutting down the Tx | |
426 | * laser on the PHY, effectively halting physical link. | |
427 | **/ | |
5d5b7c39 | 428 | static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) |
61fac744 PW |
429 | { |
430 | u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); | |
431 | ||
432 | /* Disable tx laser; allow 100us to go dark per spec */ | |
433 | esdp_reg |= IXGBE_ESDP_SDP3; | |
434 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | |
435 | IXGBE_WRITE_FLUSH(hw); | |
436 | udelay(100); | |
437 | } | |
438 | ||
439 | /** | |
440 | * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser | |
441 | * @hw: pointer to hardware structure | |
442 | * | |
443 | * The base drivers may require better control over SFP+ module | |
444 | * PHY states. This includes selectively turning on the Tx | |
445 | * laser on the PHY, effectively starting physical link. | |
446 | **/ | |
5d5b7c39 | 447 | static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) |
61fac744 PW |
448 | { |
449 | u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); | |
450 | ||
451 | /* Enable tx laser; allow 100ms to light up */ | |
452 | esdp_reg &= ~IXGBE_ESDP_SDP3; | |
453 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | |
454 | IXGBE_WRITE_FLUSH(hw); | |
455 | msleep(100); | |
456 | } | |
457 | ||
1097cd17 MC |
458 | /** |
459 | * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser | |
460 | * @hw: pointer to hardware structure | |
461 | * | |
462 | * When the driver changes the link speeds that it can support, | |
463 | * it sets autotry_restart to true to indicate that we need to | |
464 | * initiate a new autotry session with the link partner. To do | |
465 | * so, we set the speed then disable and re-enable the tx laser, to | |
466 | * alert the link partner that it also needs to restart autotry on its | |
467 | * end. This is consistent with true clause 37 autoneg, which also | |
468 | * involves a loss of signal. | |
469 | **/ | |
5d5b7c39 | 470 | static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) |
1097cd17 | 471 | { |
1097cd17 MC |
472 | hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n"); |
473 | ||
474 | if (hw->mac.autotry_restart) { | |
61fac744 PW |
475 | ixgbe_disable_tx_laser_multispeed_fiber(hw); |
476 | ixgbe_enable_tx_laser_multispeed_fiber(hw); | |
1097cd17 MC |
477 | hw->mac.autotry_restart = false; |
478 | } | |
479 | } | |
480 | ||
11afc1b1 | 481 | /** |
8620a103 | 482 | * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed |
11afc1b1 PW |
483 | * @hw: pointer to hardware structure |
484 | * @speed: new link speed | |
485 | * @autoneg: true if autonegotiation enabled | |
486 | * @autoneg_wait_to_complete: true when waiting for completion is needed | |
487 | * | |
488 | * Set the link speed in the AUTOC register and restarts link. | |
489 | **/ | |
8620a103 MC |
490 | s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, |
491 | ixgbe_link_speed speed, | |
492 | bool autoneg, | |
493 | bool autoneg_wait_to_complete) | |
11afc1b1 PW |
494 | { |
495 | s32 status = 0; | |
496 | ixgbe_link_speed phy_link_speed; | |
497 | ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; | |
498 | u32 speedcnt = 0; | |
499 | u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); | |
500 | bool link_up = false; | |
501 | bool negotiation; | |
50ac58ba | 502 | int i; |
11afc1b1 PW |
503 | |
504 | /* Mask off requested but non-supported speeds */ | |
505 | hw->mac.ops.get_link_capabilities(hw, &phy_link_speed, &negotiation); | |
506 | speed &= phy_link_speed; | |
507 | ||
508 | /* | |
509 | * Try each speed one by one, highest priority first. We do this in | |
510 | * software because 10gb fiber doesn't support speed autonegotiation. | |
511 | */ | |
512 | if (speed & IXGBE_LINK_SPEED_10GB_FULL) { | |
513 | speedcnt++; | |
514 | highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; | |
515 | ||
50ac58ba PWJ |
516 | /* If we already have link at this speed, just jump out */ |
517 | hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); | |
518 | ||
519 | if ((phy_link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) | |
520 | goto out; | |
521 | ||
522 | /* Set the module link speed */ | |
11afc1b1 PW |
523 | esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); |
524 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | |
1097cd17 | 525 | IXGBE_WRITE_FLUSH(hw); |
11afc1b1 | 526 | |
50ac58ba PWJ |
527 | /* Allow module to change analog characteristics (1G->10G) */ |
528 | msleep(40); | |
11afc1b1 | 529 | |
8620a103 MC |
530 | status = ixgbe_setup_mac_link_82599(hw, |
531 | IXGBE_LINK_SPEED_10GB_FULL, | |
532 | autoneg, | |
533 | autoneg_wait_to_complete); | |
50ac58ba | 534 | if (status != 0) |
c3c74327 | 535 | return status; |
50ac58ba PWJ |
536 | |
537 | /* Flap the tx laser if it has not already been done */ | |
1097cd17 | 538 | hw->mac.ops.flap_tx_laser(hw); |
50ac58ba | 539 | |
cd7e1f0b DS |
540 | /* |
541 | * Wait for the controller to acquire link. Per IEEE 802.3ap, | |
542 | * Section 73.10.2, we may have to wait up to 500ms if KR is | |
543 | * attempted. 82599 uses the same timing for 10g SFI. | |
544 | */ | |
50ac58ba PWJ |
545 | for (i = 0; i < 5; i++) { |
546 | /* Wait for the link partner to also set speed */ | |
547 | msleep(100); | |
548 | ||
549 | /* If we have link, just jump out */ | |
550 | hw->mac.ops.check_link(hw, &phy_link_speed, | |
551 | &link_up, false); | |
552 | if (link_up) | |
553 | goto out; | |
554 | } | |
11afc1b1 PW |
555 | } |
556 | ||
557 | if (speed & IXGBE_LINK_SPEED_1GB_FULL) { | |
558 | speedcnt++; | |
559 | if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) | |
560 | highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; | |
561 | ||
50ac58ba PWJ |
562 | /* If we already have link at this speed, just jump out */ |
563 | hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); | |
564 | ||
565 | if ((phy_link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) | |
566 | goto out; | |
567 | ||
568 | /* Set the module link speed */ | |
11afc1b1 PW |
569 | esdp_reg &= ~IXGBE_ESDP_SDP5; |
570 | esdp_reg |= IXGBE_ESDP_SDP5_DIR; | |
571 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | |
1097cd17 | 572 | IXGBE_WRITE_FLUSH(hw); |
11afc1b1 | 573 | |
50ac58ba PWJ |
574 | /* Allow module to change analog characteristics (10G->1G) */ |
575 | msleep(40); | |
11afc1b1 | 576 | |
8620a103 | 577 | status = ixgbe_setup_mac_link_82599(hw, |
50ac58ba PWJ |
578 | IXGBE_LINK_SPEED_1GB_FULL, |
579 | autoneg, | |
580 | autoneg_wait_to_complete); | |
581 | if (status != 0) | |
c3c74327 | 582 | return status; |
50ac58ba PWJ |
583 | |
584 | /* Flap the tx laser if it has not already been done */ | |
1097cd17 | 585 | hw->mac.ops.flap_tx_laser(hw); |
50ac58ba PWJ |
586 | |
587 | /* Wait for the link partner to also set speed */ | |
588 | msleep(100); | |
11afc1b1 PW |
589 | |
590 | /* If we have link, just jump out */ | |
591 | hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); | |
592 | if (link_up) | |
593 | goto out; | |
594 | } | |
595 | ||
596 | /* | |
597 | * We didn't get link. Configure back to the highest speed we tried, | |
598 | * (if there was more than one). We call ourselves back with just the | |
599 | * single highest speed that the user requested. | |
600 | */ | |
601 | if (speedcnt > 1) | |
8620a103 MC |
602 | status = ixgbe_setup_mac_link_multispeed_fiber(hw, |
603 | highest_link_speed, | |
604 | autoneg, | |
605 | autoneg_wait_to_complete); | |
11afc1b1 PW |
606 | |
607 | out: | |
c3c74327 MC |
608 | /* Set autoneg_advertised value based on input link speed */ |
609 | hw->phy.autoneg_advertised = 0; | |
610 | ||
611 | if (speed & IXGBE_LINK_SPEED_10GB_FULL) | |
612 | hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; | |
613 | ||
614 | if (speed & IXGBE_LINK_SPEED_1GB_FULL) | |
615 | hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; | |
616 | ||
11afc1b1 PW |
617 | return status; |
618 | } | |
619 | ||
cd7e1f0b DS |
620 | /** |
621 | * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed | |
622 | * @hw: pointer to hardware structure | |
623 | * @speed: new link speed | |
624 | * @autoneg: true if autonegotiation enabled | |
625 | * @autoneg_wait_to_complete: true when waiting for completion is needed | |
626 | * | |
627 | * Implements the Intel SmartSpeed algorithm. | |
628 | **/ | |
629 | static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, | |
630 | ixgbe_link_speed speed, bool autoneg, | |
631 | bool autoneg_wait_to_complete) | |
632 | { | |
633 | s32 status = 0; | |
634 | ixgbe_link_speed link_speed; | |
635 | s32 i, j; | |
636 | bool link_up = false; | |
637 | u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); | |
c4ee6a53 | 638 | struct ixgbe_adapter *adapter = hw->back; |
cd7e1f0b DS |
639 | |
640 | hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n"); | |
641 | ||
642 | /* Set autoneg_advertised value based on input link speed */ | |
643 | hw->phy.autoneg_advertised = 0; | |
644 | ||
645 | if (speed & IXGBE_LINK_SPEED_10GB_FULL) | |
646 | hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; | |
647 | ||
648 | if (speed & IXGBE_LINK_SPEED_1GB_FULL) | |
649 | hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; | |
650 | ||
651 | if (speed & IXGBE_LINK_SPEED_100_FULL) | |
652 | hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; | |
653 | ||
654 | /* | |
655 | * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the | |
656 | * autoneg advertisement if link is unable to be established at the | |
657 | * highest negotiated rate. This can sometimes happen due to integrity | |
658 | * issues with the physical media connection. | |
659 | */ | |
660 | ||
661 | /* First, try to get link with full advertisement */ | |
662 | hw->phy.smart_speed_active = false; | |
663 | for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { | |
664 | status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, | |
665 | autoneg_wait_to_complete); | |
666 | if (status) | |
667 | goto out; | |
668 | ||
669 | /* | |
670 | * Wait for the controller to acquire link. Per IEEE 802.3ap, | |
671 | * Section 73.10.2, we may have to wait up to 500ms if KR is | |
672 | * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per | |
673 | * Table 9 in the AN MAS. | |
674 | */ | |
675 | for (i = 0; i < 5; i++) { | |
676 | mdelay(100); | |
677 | ||
678 | /* If we have link, just jump out */ | |
679 | hw->mac.ops.check_link(hw, &link_speed, | |
680 | &link_up, false); | |
681 | if (link_up) | |
682 | goto out; | |
683 | } | |
684 | } | |
685 | ||
686 | /* | |
687 | * We didn't get link. If we advertised KR plus one of KX4/KX | |
688 | * (or BX4/BX), then disable KR and try again. | |
689 | */ | |
690 | if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || | |
691 | ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) | |
692 | goto out; | |
693 | ||
694 | /* Turn SmartSpeed on to disable KR support */ | |
695 | hw->phy.smart_speed_active = true; | |
696 | status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, | |
697 | autoneg_wait_to_complete); | |
698 | if (status) | |
699 | goto out; | |
700 | ||
701 | /* | |
702 | * Wait for the controller to acquire link. 600ms will allow for | |
703 | * the AN link_fail_inhibit_timer as well for multiple cycles of | |
704 | * parallel detect, both 10g and 1g. This allows for the maximum | |
705 | * connect attempts as defined in the AN MAS table 73-7. | |
706 | */ | |
707 | for (i = 0; i < 6; i++) { | |
708 | mdelay(100); | |
709 | ||
710 | /* If we have link, just jump out */ | |
711 | hw->mac.ops.check_link(hw, &link_speed, | |
712 | &link_up, false); | |
713 | if (link_up) | |
714 | goto out; | |
715 | } | |
716 | ||
717 | /* We didn't get link. Turn SmartSpeed back off. */ | |
718 | hw->phy.smart_speed_active = false; | |
719 | status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, | |
720 | autoneg_wait_to_complete); | |
721 | ||
722 | out: | |
c4ee6a53 | 723 | if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) |
396e799c | 724 | e_info(hw, "Smartspeed has downgraded the link speed from " |
849c4542 | 725 | "the maximum advertised\n"); |
cd7e1f0b DS |
726 | return status; |
727 | } | |
728 | ||
11afc1b1 | 729 | /** |
8620a103 | 730 | * ixgbe_setup_mac_link_82599 - Set MAC link speed |
11afc1b1 PW |
731 | * @hw: pointer to hardware structure |
732 | * @speed: new link speed | |
733 | * @autoneg: true if autonegotiation enabled | |
734 | * @autoneg_wait_to_complete: true when waiting for completion is needed | |
735 | * | |
736 | * Set the link speed in the AUTOC register and restarts link. | |
737 | **/ | |
5d5b7c39 | 738 | static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, |
8620a103 MC |
739 | ixgbe_link_speed speed, bool autoneg, |
740 | bool autoneg_wait_to_complete) | |
11afc1b1 PW |
741 | { |
742 | s32 status = 0; | |
743 | u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); | |
744 | u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); | |
50ac58ba | 745 | u32 start_autoc = autoc; |
1eb99d5a | 746 | u32 orig_autoc = 0; |
11afc1b1 PW |
747 | u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; |
748 | u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; | |
749 | u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; | |
750 | u32 links_reg; | |
751 | u32 i; | |
752 | ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; | |
753 | ||
754 | /* Check to see if speed passed in is supported. */ | |
755 | hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg); | |
756 | speed &= link_capabilities; | |
757 | ||
50ac58ba PWJ |
758 | if (speed == IXGBE_LINK_SPEED_UNKNOWN) { |
759 | status = IXGBE_ERR_LINK_SETUP; | |
760 | goto out; | |
761 | } | |
762 | ||
1eb99d5a PW |
763 | /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ |
764 | if (hw->mac.orig_link_settings_stored) | |
765 | orig_autoc = hw->mac.orig_autoc; | |
766 | else | |
767 | orig_autoc = autoc; | |
768 | ||
50ac58ba PWJ |
769 | if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || |
770 | link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || | |
771 | link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { | |
11afc1b1 PW |
772 | /* Set KX4/KX/KR support according to speed requested */ |
773 | autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); | |
774 | if (speed & IXGBE_LINK_SPEED_10GB_FULL) | |
1eb99d5a | 775 | if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) |
11afc1b1 | 776 | autoc |= IXGBE_AUTOC_KX4_SUPP; |
cd7e1f0b DS |
777 | if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && |
778 | (hw->phy.smart_speed_active == false)) | |
11afc1b1 PW |
779 | autoc |= IXGBE_AUTOC_KR_SUPP; |
780 | if (speed & IXGBE_LINK_SPEED_1GB_FULL) | |
781 | autoc |= IXGBE_AUTOC_KX_SUPP; | |
782 | } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && | |
783 | (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || | |
784 | link_mode == IXGBE_AUTOC_LMS_1G_AN)) { | |
785 | /* Switch from 1G SFI to 10G SFI if requested */ | |
786 | if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && | |
787 | (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { | |
788 | autoc &= ~IXGBE_AUTOC_LMS_MASK; | |
789 | autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; | |
790 | } | |
791 | } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && | |
792 | (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { | |
793 | /* Switch from 10G SFI to 1G SFI if requested */ | |
794 | if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && | |
795 | (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { | |
796 | autoc &= ~IXGBE_AUTOC_LMS_MASK; | |
797 | if (autoneg) | |
798 | autoc |= IXGBE_AUTOC_LMS_1G_AN; | |
799 | else | |
800 | autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; | |
801 | } | |
802 | } | |
803 | ||
50ac58ba | 804 | if (autoc != start_autoc) { |
11afc1b1 PW |
805 | /* Restart link */ |
806 | autoc |= IXGBE_AUTOC_AN_RESTART; | |
807 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); | |
808 | ||
809 | /* Only poll for autoneg to complete if specified to do so */ | |
810 | if (autoneg_wait_to_complete) { | |
811 | if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || | |
812 | link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || | |
813 | link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { | |
814 | links_reg = 0; /*Just in case Autoneg time=0*/ | |
815 | for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { | |
816 | links_reg = | |
817 | IXGBE_READ_REG(hw, IXGBE_LINKS); | |
818 | if (links_reg & IXGBE_LINKS_KX_AN_COMP) | |
819 | break; | |
820 | msleep(100); | |
821 | } | |
822 | if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { | |
823 | status = | |
824 | IXGBE_ERR_AUTONEG_NOT_COMPLETE; | |
825 | hw_dbg(hw, "Autoneg did not " | |
826 | "complete.\n"); | |
827 | } | |
828 | } | |
829 | } | |
830 | ||
11afc1b1 PW |
831 | /* Add delay to filter out noises during initial link setup */ |
832 | msleep(50); | |
833 | } | |
834 | ||
50ac58ba | 835 | out: |
11afc1b1 PW |
836 | return status; |
837 | } | |
838 | ||
839 | /** | |
8620a103 | 840 | * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field |
11afc1b1 PW |
841 | * @hw: pointer to hardware structure |
842 | * @speed: new link speed | |
843 | * @autoneg: true if autonegotiation enabled | |
844 | * @autoneg_wait_to_complete: true if waiting is needed to complete | |
845 | * | |
846 | * Restarts link on PHY and MAC based on settings passed in. | |
847 | **/ | |
8620a103 MC |
848 | static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, |
849 | ixgbe_link_speed speed, | |
850 | bool autoneg, | |
851 | bool autoneg_wait_to_complete) | |
11afc1b1 PW |
852 | { |
853 | s32 status; | |
854 | ||
855 | /* Setup the PHY according to input speed */ | |
856 | status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, | |
857 | autoneg_wait_to_complete); | |
858 | /* Set up MAC */ | |
8620a103 | 859 | ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); |
11afc1b1 PW |
860 | |
861 | return status; | |
862 | } | |
863 | ||
864 | /** | |
865 | * ixgbe_reset_hw_82599 - Perform hardware reset | |
866 | * @hw: pointer to hardware structure | |
867 | * | |
868 | * Resets the hardware by resetting the transmit and receive units, masks | |
869 | * and clears all interrupts, perform a PHY reset, and perform a link (MAC) | |
870 | * reset. | |
871 | **/ | |
7b25cdba | 872 | static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) |
11afc1b1 PW |
873 | { |
874 | s32 status = 0; | |
c9205697 | 875 | u32 ctrl; |
11afc1b1 PW |
876 | u32 i; |
877 | u32 autoc; | |
878 | u32 autoc2; | |
879 | ||
880 | /* Call adapter stop to disable tx/rx and clear interrupts */ | |
881 | hw->mac.ops.stop_adapter(hw); | |
882 | ||
553b4497 | 883 | /* PHY ops must be identified and initialized prior to reset */ |
04f165ef | 884 | |
553b4497 PW |
885 | /* Init PHY and function pointers, perform SFP setup */ |
886 | status = hw->phy.ops.init(hw); | |
04f165ef | 887 | |
553b4497 PW |
888 | if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) |
889 | goto reset_hw_out; | |
04f165ef | 890 | |
553b4497 PW |
891 | /* Setup SFP module if there is one present. */ |
892 | if (hw->phy.sfp_setup_needed) { | |
893 | status = hw->mac.ops.setup_sfp(hw); | |
894 | hw->phy.sfp_setup_needed = false; | |
04f165ef | 895 | } |
11afc1b1 | 896 | |
553b4497 PW |
897 | /* Reset PHY */ |
898 | if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) | |
899 | hw->phy.ops.reset(hw); | |
900 | ||
11afc1b1 PW |
901 | /* |
902 | * Prevent the PCI-E bus from from hanging by disabling PCI-E master | |
903 | * access and verify no pending requests before reset | |
904 | */ | |
a4297dc2 | 905 | ixgbe_disable_pcie_master(hw); |
11afc1b1 | 906 | |
a4297dc2 | 907 | mac_reset_top: |
11afc1b1 PW |
908 | /* |
909 | * Issue global reset to the MAC. This needs to be a SW reset. | |
910 | * If link reset is used, it might reset the MAC when mng is using it | |
911 | */ | |
912 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | |
913 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); | |
914 | IXGBE_WRITE_FLUSH(hw); | |
915 | ||
916 | /* Poll for reset bit to self-clear indicating reset is complete */ | |
917 | for (i = 0; i < 10; i++) { | |
918 | udelay(1); | |
919 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | |
920 | if (!(ctrl & IXGBE_CTRL_RST)) | |
921 | break; | |
922 | } | |
923 | if (ctrl & IXGBE_CTRL_RST) { | |
924 | status = IXGBE_ERR_RESET_FAILED; | |
925 | hw_dbg(hw, "Reset polling failed to complete.\n"); | |
926 | } | |
11afc1b1 | 927 | |
a4297dc2 ET |
928 | /* |
929 | * Double resets are required for recovery from certain error | |
930 | * conditions. Between resets, it is necessary to stall to allow time | |
931 | * for any pending HW events to complete. We use 1usec since that is | |
932 | * what is needed for ixgbe_disable_pcie_master(). The second reset | |
933 | * then clears out any effects of those events. | |
934 | */ | |
935 | if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { | |
936 | hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; | |
937 | udelay(1); | |
938 | goto mac_reset_top; | |
939 | } | |
940 | ||
11afc1b1 PW |
941 | msleep(50); |
942 | ||
11afc1b1 PW |
943 | /* |
944 | * Store the original AUTOC/AUTOC2 values if they have not been | |
945 | * stored off yet. Otherwise restore the stored original | |
946 | * values since the reset operation sets back to defaults. | |
947 | */ | |
948 | autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); | |
949 | autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); | |
950 | if (hw->mac.orig_link_settings_stored == false) { | |
951 | hw->mac.orig_autoc = autoc; | |
952 | hw->mac.orig_autoc2 = autoc2; | |
953 | hw->mac.orig_link_settings_stored = true; | |
4df10466 | 954 | } else { |
11afc1b1 PW |
955 | if (autoc != hw->mac.orig_autoc) |
956 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | | |
957 | IXGBE_AUTOC_AN_RESTART)); | |
958 | ||
959 | if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != | |
960 | (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { | |
961 | autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; | |
962 | autoc2 |= (hw->mac.orig_autoc2 & | |
963 | IXGBE_AUTOC2_UPPER_MASK); | |
964 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); | |
965 | } | |
966 | } | |
967 | ||
278675d8 ET |
968 | /* Store the permanent mac address */ |
969 | hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); | |
970 | ||
aca6bee7 WJP |
971 | /* |
972 | * Store MAC address from RAR0, clear receive address registers, and | |
973 | * clear the multicast table. Also reset num_rar_entries to 128, | |
974 | * since we modify this value when programming the SAN MAC address. | |
975 | */ | |
976 | hw->mac.num_rar_entries = 128; | |
977 | hw->mac.ops.init_rx_addrs(hw); | |
978 | ||
0365e6e4 PW |
979 | /* Store the permanent SAN mac address */ |
980 | hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); | |
981 | ||
aca6bee7 WJP |
982 | /* Add the SAN MAC address to the RAR only if it's a valid address */ |
983 | if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { | |
984 | hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, | |
985 | hw->mac.san_addr, 0, IXGBE_RAH_AV); | |
986 | ||
987 | /* Reserve the last RAR for the SAN MAC address */ | |
988 | hw->mac.num_rar_entries--; | |
989 | } | |
990 | ||
383ff34b YZ |
991 | /* Store the alternative WWNN/WWPN prefix */ |
992 | hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, | |
993 | &hw->mac.wwpn_prefix); | |
994 | ||
04f165ef | 995 | reset_hw_out: |
11afc1b1 PW |
996 | return status; |
997 | } | |
998 | ||
ffff4772 PWJ |
999 | /** |
1000 | * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. | |
1001 | * @hw: pointer to hardware structure | |
1002 | **/ | |
1003 | s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) | |
1004 | { | |
1005 | int i; | |
1006 | u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); | |
1007 | fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; | |
1008 | ||
1009 | /* | |
1010 | * Before starting reinitialization process, | |
1011 | * FDIRCMD.CMD must be zero. | |
1012 | */ | |
1013 | for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { | |
1014 | if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & | |
1015 | IXGBE_FDIRCMD_CMD_MASK)) | |
1016 | break; | |
1017 | udelay(10); | |
1018 | } | |
1019 | if (i >= IXGBE_FDIRCMD_CMD_POLL) { | |
905e4a41 | 1020 | hw_dbg(hw, "Flow Director previous command isn't complete, " |
d6dbee86 | 1021 | "aborting table re-initialization.\n"); |
ffff4772 PWJ |
1022 | return IXGBE_ERR_FDIR_REINIT_FAILED; |
1023 | } | |
1024 | ||
1025 | IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); | |
1026 | IXGBE_WRITE_FLUSH(hw); | |
1027 | /* | |
1028 | * 82599 adapters flow director init flow cannot be restarted, | |
1029 | * Workaround 82599 silicon errata by performing the following steps | |
1030 | * before re-writing the FDIRCTRL control register with the same value. | |
1031 | * - write 1 to bit 8 of FDIRCMD register & | |
1032 | * - write 0 to bit 8 of FDIRCMD register | |
1033 | */ | |
1034 | IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, | |
1035 | (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | | |
1036 | IXGBE_FDIRCMD_CLEARHT)); | |
1037 | IXGBE_WRITE_FLUSH(hw); | |
1038 | IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, | |
1039 | (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & | |
1040 | ~IXGBE_FDIRCMD_CLEARHT)); | |
1041 | IXGBE_WRITE_FLUSH(hw); | |
1042 | /* | |
1043 | * Clear FDIR Hash register to clear any leftover hashes | |
1044 | * waiting to be programmed. | |
1045 | */ | |
1046 | IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); | |
1047 | IXGBE_WRITE_FLUSH(hw); | |
1048 | ||
1049 | IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); | |
1050 | IXGBE_WRITE_FLUSH(hw); | |
1051 | ||
1052 | /* Poll init-done after we write FDIRCTRL register */ | |
1053 | for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { | |
1054 | if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & | |
1055 | IXGBE_FDIRCTRL_INIT_DONE) | |
1056 | break; | |
1057 | udelay(10); | |
1058 | } | |
1059 | if (i >= IXGBE_FDIR_INIT_DONE_POLL) { | |
1060 | hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); | |
1061 | return IXGBE_ERR_FDIR_REINIT_FAILED; | |
1062 | } | |
1063 | ||
1064 | /* Clear FDIR statistics registers (read to clear) */ | |
1065 | IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); | |
1066 | IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); | |
1067 | IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); | |
1068 | IXGBE_READ_REG(hw, IXGBE_FDIRMISS); | |
1069 | IXGBE_READ_REG(hw, IXGBE_FDIRLEN); | |
1070 | ||
1071 | return 0; | |
1072 | } | |
1073 | ||
1074 | /** | |
1075 | * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters | |
1076 | * @hw: pointer to hardware structure | |
1077 | * @pballoc: which mode to allocate filters with | |
1078 | **/ | |
1079 | s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) | |
1080 | { | |
1081 | u32 fdirctrl = 0; | |
1082 | u32 pbsize; | |
1083 | int i; | |
1084 | ||
1085 | /* | |
1086 | * Before enabling Flow Director, the Rx Packet Buffer size | |
1087 | * must be reduced. The new value is the current size minus | |
1088 | * flow director memory usage size. | |
1089 | */ | |
1090 | pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); | |
1091 | IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), | |
1092 | (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); | |
1093 | ||
1094 | /* | |
1095 | * The defaults in the HW for RX PB 1-7 are not zero and so should be | |
b595076a | 1096 | * initialized to zero for non DCB mode otherwise actual total RX PB |
ffff4772 PWJ |
1097 | * would be bigger than programmed and filter space would run into |
1098 | * the PB 0 region. | |
1099 | */ | |
1100 | for (i = 1; i < 8; i++) | |
1101 | IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); | |
1102 | ||
1103 | /* Send interrupt when 64 filters are left */ | |
1104 | fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; | |
1105 | ||
1106 | /* Set the maximum length per hash bucket to 0xA filters */ | |
1107 | fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT; | |
1108 | ||
1109 | switch (pballoc) { | |
1110 | case IXGBE_FDIR_PBALLOC_64K: | |
1111 | /* 8k - 1 signature filters */ | |
1112 | fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; | |
1113 | break; | |
1114 | case IXGBE_FDIR_PBALLOC_128K: | |
1115 | /* 16k - 1 signature filters */ | |
1116 | fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; | |
1117 | break; | |
1118 | case IXGBE_FDIR_PBALLOC_256K: | |
1119 | /* 32k - 1 signature filters */ | |
1120 | fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; | |
1121 | break; | |
1122 | default: | |
1123 | /* bad value */ | |
1124 | return IXGBE_ERR_CONFIG; | |
1125 | }; | |
1126 | ||
1127 | /* Move the flexible bytes to use the ethertype - shift 6 words */ | |
1128 | fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); | |
1129 | ||
ffff4772 PWJ |
1130 | |
1131 | /* Prime the keys for hashing */ | |
905e4a41 AD |
1132 | IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); |
1133 | IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); | |
ffff4772 PWJ |
1134 | |
1135 | /* | |
1136 | * Poll init-done after we write the register. Estimated times: | |
1137 | * 10G: PBALLOC = 11b, timing is 60us | |
1138 | * 1G: PBALLOC = 11b, timing is 600us | |
1139 | * 100M: PBALLOC = 11b, timing is 6ms | |
1140 | * | |
1141 | * Multiple these timings by 4 if under full Rx load | |
1142 | * | |
1143 | * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for | |
1144 | * 1 msec per poll time. If we're at line rate and drop to 100M, then | |
1145 | * this might not finish in our poll time, but we can live with that | |
1146 | * for now. | |
1147 | */ | |
1148 | IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); | |
1149 | IXGBE_WRITE_FLUSH(hw); | |
1150 | for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { | |
1151 | if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & | |
1152 | IXGBE_FDIRCTRL_INIT_DONE) | |
1153 | break; | |
1154 | msleep(1); | |
1155 | } | |
1156 | if (i >= IXGBE_FDIR_INIT_DONE_POLL) | |
1157 | hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); | |
1158 | ||
1159 | return 0; | |
1160 | } | |
1161 | ||
1162 | /** | |
1163 | * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters | |
1164 | * @hw: pointer to hardware structure | |
1165 | * @pballoc: which mode to allocate filters with | |
1166 | **/ | |
1167 | s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) | |
1168 | { | |
1169 | u32 fdirctrl = 0; | |
1170 | u32 pbsize; | |
1171 | int i; | |
1172 | ||
1173 | /* | |
1174 | * Before enabling Flow Director, the Rx Packet Buffer size | |
1175 | * must be reduced. The new value is the current size minus | |
1176 | * flow director memory usage size. | |
1177 | */ | |
1178 | pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); | |
1179 | IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), | |
1180 | (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); | |
1181 | ||
1182 | /* | |
1183 | * The defaults in the HW for RX PB 1-7 are not zero and so should be | |
b595076a | 1184 | * initialized to zero for non DCB mode otherwise actual total RX PB |
ffff4772 PWJ |
1185 | * would be bigger than programmed and filter space would run into |
1186 | * the PB 0 region. | |
1187 | */ | |
1188 | for (i = 1; i < 8; i++) | |
1189 | IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); | |
1190 | ||
1191 | /* Send interrupt when 64 filters are left */ | |
1192 | fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; | |
1193 | ||
9a713e7c PW |
1194 | /* Initialize the drop queue to Rx queue 127 */ |
1195 | fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT); | |
1196 | ||
ffff4772 PWJ |
1197 | switch (pballoc) { |
1198 | case IXGBE_FDIR_PBALLOC_64K: | |
1199 | /* 2k - 1 perfect filters */ | |
1200 | fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; | |
1201 | break; | |
1202 | case IXGBE_FDIR_PBALLOC_128K: | |
1203 | /* 4k - 1 perfect filters */ | |
1204 | fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; | |
1205 | break; | |
1206 | case IXGBE_FDIR_PBALLOC_256K: | |
1207 | /* 8k - 1 perfect filters */ | |
1208 | fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; | |
1209 | break; | |
1210 | default: | |
1211 | /* bad value */ | |
1212 | return IXGBE_ERR_CONFIG; | |
1213 | }; | |
1214 | ||
1215 | /* Turn perfect match filtering on */ | |
1216 | fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH; | |
1217 | fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; | |
1218 | ||
1219 | /* Move the flexible bytes to use the ethertype - shift 6 words */ | |
1220 | fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); | |
1221 | ||
1222 | /* Prime the keys for hashing */ | |
905e4a41 AD |
1223 | IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); |
1224 | IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); | |
ffff4772 PWJ |
1225 | |
1226 | /* | |
1227 | * Poll init-done after we write the register. Estimated times: | |
1228 | * 10G: PBALLOC = 11b, timing is 60us | |
1229 | * 1G: PBALLOC = 11b, timing is 600us | |
1230 | * 100M: PBALLOC = 11b, timing is 6ms | |
1231 | * | |
1232 | * Multiple these timings by 4 if under full Rx load | |
1233 | * | |
1234 | * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for | |
1235 | * 1 msec per poll time. If we're at line rate and drop to 100M, then | |
1236 | * this might not finish in our poll time, but we can live with that | |
1237 | * for now. | |
1238 | */ | |
1239 | ||
1240 | /* Set the maximum length per hash bucket to 0xA filters */ | |
1241 | fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT); | |
1242 | ||
1243 | IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); | |
1244 | IXGBE_WRITE_FLUSH(hw); | |
1245 | for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { | |
1246 | if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & | |
1247 | IXGBE_FDIRCTRL_INIT_DONE) | |
1248 | break; | |
1249 | msleep(1); | |
1250 | } | |
1251 | if (i >= IXGBE_FDIR_INIT_DONE_POLL) | |
1252 | hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n"); | |
1253 | ||
1254 | return 0; | |
1255 | } | |
1256 | ||
1257 | ||
1258 | /** | |
1259 | * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR | |
1260 | * @stream: input bitstream to compute the hash on | |
1261 | * @key: 32-bit hash key | |
1262 | **/ | |
905e4a41 AD |
1263 | static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, |
1264 | u32 key) | |
ffff4772 PWJ |
1265 | { |
1266 | /* | |
1267 | * The algorithm is as follows: | |
1268 | * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350 | |
1269 | * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n] | |
1270 | * and A[n] x B[n] is bitwise AND between same length strings | |
1271 | * | |
1272 | * K[n] is 16 bits, defined as: | |
1273 | * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15] | |
1274 | * for n modulo 32 < 15, K[n] = | |
1275 | * K[(n % 32:0) | (31:31 - (14 - (n % 32)))] | |
1276 | * | |
1277 | * S[n] is 16 bits, defined as: | |
1278 | * for n >= 15, S[n] = S[n:n - 15] | |
1279 | * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))] | |
1280 | * | |
1281 | * To simplify for programming, the algorithm is implemented | |
1282 | * in software this way: | |
1283 | * | |
905e4a41 AD |
1284 | * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0] |
1285 | * | |
1286 | * for (i = 0; i < 352; i+=32) | |
1287 | * hi_hash_dword[31:0] ^= Stream[(i+31):i]; | |
1288 | * | |
1289 | * lo_hash_dword[15:0] ^= Stream[15:0]; | |
1290 | * lo_hash_dword[15:0] ^= hi_hash_dword[31:16]; | |
1291 | * lo_hash_dword[31:16] ^= hi_hash_dword[15:0]; | |
ffff4772 | 1292 | * |
905e4a41 | 1293 | * hi_hash_dword[31:0] ^= Stream[351:320]; |
ffff4772 | 1294 | * |
905e4a41 AD |
1295 | * if(key[0]) |
1296 | * hash[15:0] ^= Stream[15:0]; | |
1297 | * | |
1298 | * for (i = 0; i < 16; i++) { | |
1299 | * if (key[i]) | |
1300 | * hash[15:0] ^= lo_hash_dword[(i+15):i]; | |
1301 | * if (key[i + 16]) | |
1302 | * hash[15:0] ^= hi_hash_dword[(i+15):i]; | |
ffff4772 | 1303 | * } |
905e4a41 | 1304 | * |
ffff4772 | 1305 | */ |
905e4a41 AD |
1306 | __be32 common_hash_dword = 0; |
1307 | u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; | |
1308 | u32 hash_result = 0; | |
1309 | u8 i; | |
ffff4772 | 1310 | |
905e4a41 AD |
1311 | /* record the flow_vm_vlan bits as they are a key part to the hash */ |
1312 | flow_vm_vlan = ntohl(atr_input->dword_stream[0]); | |
ffff4772 | 1313 | |
905e4a41 AD |
1314 | /* generate common hash dword */ |
1315 | for (i = 10; i; i -= 2) | |
1316 | common_hash_dword ^= atr_input->dword_stream[i] ^ | |
1317 | atr_input->dword_stream[i - 1]; | |
ffff4772 | 1318 | |
905e4a41 | 1319 | hi_hash_dword = ntohl(common_hash_dword); |
ffff4772 | 1320 | |
905e4a41 AD |
1321 | /* low dword is word swapped version of common */ |
1322 | lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); | |
ffff4772 | 1323 | |
905e4a41 AD |
1324 | /* apply flow ID/VM pool/VLAN ID bits to hash words */ |
1325 | hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); | |
ffff4772 | 1326 | |
905e4a41 AD |
1327 | /* Process bits 0 and 16 */ |
1328 | if (key & 0x0001) hash_result ^= lo_hash_dword; | |
1329 | if (key & 0x00010000) hash_result ^= hi_hash_dword; | |
ffff4772 PWJ |
1330 | |
1331 | /* | |
905e4a41 AD |
1332 | * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to |
1333 | * delay this because bit 0 of the stream should not be processed | |
1334 | * so we do not add the vlan until after bit 0 was processed | |
ffff4772 | 1335 | */ |
905e4a41 | 1336 | lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); |
ffff4772 | 1337 | |
905e4a41 AD |
1338 | |
1339 | /* process the remaining 30 bits in the key 2 bits at a time */ | |
1340 | for (i = 15; i; i-- ) { | |
1341 | if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i; | |
1342 | if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i; | |
ffff4772 PWJ |
1343 | } |
1344 | ||
905e4a41 | 1345 | return hash_result & IXGBE_ATR_HASH_MASK; |
ffff4772 PWJ |
1346 | } |
1347 | ||
69830529 AD |
1348 | /* |
1349 | * These defines allow us to quickly generate all of the necessary instructions | |
1350 | * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION | |
1351 | * for values 0 through 15 | |
1352 | */ | |
1353 | #define IXGBE_ATR_COMMON_HASH_KEY \ | |
1354 | (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) | |
1355 | #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ | |
1356 | do { \ | |
1357 | u32 n = (_n); \ | |
1358 | if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ | |
1359 | common_hash ^= lo_hash_dword >> n; \ | |
1360 | else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ | |
1361 | bucket_hash ^= lo_hash_dword >> n; \ | |
1362 | else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ | |
1363 | sig_hash ^= lo_hash_dword << (16 - n); \ | |
1364 | if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ | |
1365 | common_hash ^= hi_hash_dword >> n; \ | |
1366 | else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ | |
1367 | bucket_hash ^= hi_hash_dword >> n; \ | |
1368 | else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ | |
1369 | sig_hash ^= hi_hash_dword << (16 - n); \ | |
1370 | } while (0); | |
1371 | ||
1372 | /** | |
1373 | * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash | |
1374 | * @stream: input bitstream to compute the hash on | |
1375 | * | |
1376 | * This function is almost identical to the function above but contains | |
1377 | * several optomizations such as unwinding all of the loops, letting the | |
1378 | * compiler work out all of the conditional ifs since the keys are static | |
1379 | * defines, and computing two keys at once since the hashed dword stream | |
1380 | * will be the same for both keys. | |
1381 | **/ | |
1382 | static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, | |
1383 | union ixgbe_atr_hash_dword common) | |
1384 | { | |
1385 | u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; | |
1386 | u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; | |
1387 | ||
1388 | /* record the flow_vm_vlan bits as they are a key part to the hash */ | |
1389 | flow_vm_vlan = ntohl(input.dword); | |
1390 | ||
1391 | /* generate common hash dword */ | |
1392 | hi_hash_dword = ntohl(common.dword); | |
1393 | ||
1394 | /* low dword is word swapped version of common */ | |
1395 | lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); | |
1396 | ||
1397 | /* apply flow ID/VM pool/VLAN ID bits to hash words */ | |
1398 | hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); | |
1399 | ||
1400 | /* Process bits 0 and 16 */ | |
1401 | IXGBE_COMPUTE_SIG_HASH_ITERATION(0); | |
1402 | ||
1403 | /* | |
1404 | * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to | |
1405 | * delay this because bit 0 of the stream should not be processed | |
1406 | * so we do not add the vlan until after bit 0 was processed | |
1407 | */ | |
1408 | lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); | |
1409 | ||
1410 | /* Process remaining 30 bit of the key */ | |
1411 | IXGBE_COMPUTE_SIG_HASH_ITERATION(1); | |
1412 | IXGBE_COMPUTE_SIG_HASH_ITERATION(2); | |
1413 | IXGBE_COMPUTE_SIG_HASH_ITERATION(3); | |
1414 | IXGBE_COMPUTE_SIG_HASH_ITERATION(4); | |
1415 | IXGBE_COMPUTE_SIG_HASH_ITERATION(5); | |
1416 | IXGBE_COMPUTE_SIG_HASH_ITERATION(6); | |
1417 | IXGBE_COMPUTE_SIG_HASH_ITERATION(7); | |
1418 | IXGBE_COMPUTE_SIG_HASH_ITERATION(8); | |
1419 | IXGBE_COMPUTE_SIG_HASH_ITERATION(9); | |
1420 | IXGBE_COMPUTE_SIG_HASH_ITERATION(10); | |
1421 | IXGBE_COMPUTE_SIG_HASH_ITERATION(11); | |
1422 | IXGBE_COMPUTE_SIG_HASH_ITERATION(12); | |
1423 | IXGBE_COMPUTE_SIG_HASH_ITERATION(13); | |
1424 | IXGBE_COMPUTE_SIG_HASH_ITERATION(14); | |
1425 | IXGBE_COMPUTE_SIG_HASH_ITERATION(15); | |
1426 | ||
1427 | /* combine common_hash result with signature and bucket hashes */ | |
1428 | bucket_hash ^= common_hash; | |
1429 | bucket_hash &= IXGBE_ATR_HASH_MASK; | |
1430 | ||
1431 | sig_hash ^= common_hash << 16; | |
1432 | sig_hash &= IXGBE_ATR_HASH_MASK << 16; | |
1433 | ||
1434 | /* return completed signature hash */ | |
1435 | return sig_hash ^ bucket_hash; | |
1436 | } | |
1437 | ||
ffff4772 PWJ |
1438 | /** |
1439 | * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter | |
1440 | * @hw: pointer to hardware structure | |
69830529 AD |
1441 | * @input: unique input dword |
1442 | * @common: compressed common input dword | |
ffff4772 PWJ |
1443 | * @queue: queue index to direct traffic to |
1444 | **/ | |
1445 | s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, | |
69830529 AD |
1446 | union ixgbe_atr_hash_dword input, |
1447 | union ixgbe_atr_hash_dword common, | |
ffff4772 PWJ |
1448 | u8 queue) |
1449 | { | |
1450 | u64 fdirhashcmd; | |
905e4a41 | 1451 | u32 fdircmd; |
ffff4772 PWJ |
1452 | |
1453 | /* | |
905e4a41 AD |
1454 | * Get the flow_type in order to program FDIRCMD properly |
1455 | * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 | |
ffff4772 | 1456 | */ |
69830529 | 1457 | switch (input.formatted.flow_type) { |
905e4a41 AD |
1458 | case IXGBE_ATR_FLOW_TYPE_TCPV4: |
1459 | case IXGBE_ATR_FLOW_TYPE_UDPV4: | |
1460 | case IXGBE_ATR_FLOW_TYPE_SCTPV4: | |
1461 | case IXGBE_ATR_FLOW_TYPE_TCPV6: | |
1462 | case IXGBE_ATR_FLOW_TYPE_UDPV6: | |
1463 | case IXGBE_ATR_FLOW_TYPE_SCTPV6: | |
ffff4772 PWJ |
1464 | break; |
1465 | default: | |
905e4a41 | 1466 | hw_dbg(hw, " Error on flow type input\n"); |
ffff4772 PWJ |
1467 | return IXGBE_ERR_CONFIG; |
1468 | } | |
1469 | ||
905e4a41 AD |
1470 | /* configure FDIRCMD register */ |
1471 | fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | | |
1472 | IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; | |
69830529 | 1473 | fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; |
905e4a41 AD |
1474 | fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; |
1475 | ||
1476 | /* | |
1477 | * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits | |
1478 | * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. | |
1479 | */ | |
1480 | fdirhashcmd = (u64)fdircmd << 32; | |
69830529 | 1481 | fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); |
ffff4772 PWJ |
1482 | |
1483 | IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); | |
1484 | ||
69830529 AD |
1485 | hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); |
1486 | ||
ffff4772 PWJ |
1487 | return 0; |
1488 | } | |
1489 | ||
45b9f509 AD |
1490 | /** |
1491 | * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks | |
1492 | * @input_mask: mask to be bit swapped | |
1493 | * | |
1494 | * The source and destination port masks for flow director are bit swapped | |
1495 | * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to | |
1496 | * generate a correctly swapped value we need to bit swap the mask and that | |
1497 | * is what is accomplished by this function. | |
1498 | **/ | |
1499 | static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks) | |
1500 | { | |
1501 | u32 mask = ntohs(input_masks->dst_port_mask); | |
1502 | mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; | |
1503 | mask |= ntohs(input_masks->src_port_mask); | |
1504 | mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); | |
1505 | mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); | |
1506 | mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); | |
1507 | return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); | |
1508 | } | |
1509 | ||
1510 | /* | |
1511 | * These two macros are meant to address the fact that we have registers | |
1512 | * that are either all or in part big-endian. As a result on big-endian | |
1513 | * systems we will end up byte swapping the value to little-endian before | |
1514 | * it is byte swapped again and written to the hardware in the original | |
1515 | * big-endian format. | |
1516 | */ | |
1517 | #define IXGBE_STORE_AS_BE32(_value) \ | |
1518 | (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ | |
1519 | (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) | |
1520 | ||
1521 | #define IXGBE_WRITE_REG_BE32(a, reg, value) \ | |
1522 | IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) | |
1523 | ||
1524 | #define IXGBE_STORE_AS_BE16(_value) \ | |
1525 | (((u16)(_value) >> 8) | ((u16)(_value) << 8)) | |
1526 | ||
ffff4772 PWJ |
1527 | /** |
1528 | * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter | |
1529 | * @hw: pointer to hardware structure | |
1530 | * @input: input bitstream | |
9a713e7c PW |
1531 | * @input_masks: bitwise masks for relevant fields |
1532 | * @soft_id: software index into the silicon hash tables for filter storage | |
ffff4772 PWJ |
1533 | * @queue: queue index to direct traffic to |
1534 | * | |
1535 | * Note that the caller to this function must lock before calling, since the | |
1536 | * hardware writes must be protected from one another. | |
1537 | **/ | |
1538 | s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, | |
905e4a41 | 1539 | union ixgbe_atr_input *input, |
9a713e7c PW |
1540 | struct ixgbe_atr_input_masks *input_masks, |
1541 | u16 soft_id, u8 queue) | |
ffff4772 | 1542 | { |
ffff4772 | 1543 | u32 fdirhash; |
45b9f509 AD |
1544 | u32 fdircmd; |
1545 | u32 fdirport, fdirtcpm; | |
1546 | u32 fdirvlan; | |
1547 | /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */ | |
1548 | u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX | | |
1549 | IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6; | |
ffff4772 PWJ |
1550 | |
1551 | /* | |
45b9f509 | 1552 | * Check flow_type formatting, and bail out before we touch the hardware |
ffff4772 PWJ |
1553 | * if there's a configuration issue |
1554 | */ | |
45b9f509 AD |
1555 | switch (input->formatted.flow_type) { |
1556 | case IXGBE_ATR_FLOW_TYPE_IPV4: | |
1557 | /* use the L4 protocol mask for raw IPv4/IPv6 traffic */ | |
1558 | fdirm |= IXGBE_FDIRM_L4P; | |
1559 | case IXGBE_ATR_FLOW_TYPE_SCTPV4: | |
1560 | if (input_masks->dst_port_mask || input_masks->src_port_mask) { | |
1561 | hw_dbg(hw, " Error on src/dst port mask\n"); | |
1562 | return IXGBE_ERR_CONFIG; | |
1563 | } | |
1564 | case IXGBE_ATR_FLOW_TYPE_TCPV4: | |
1565 | case IXGBE_ATR_FLOW_TYPE_UDPV4: | |
ffff4772 PWJ |
1566 | break; |
1567 | default: | |
45b9f509 | 1568 | hw_dbg(hw, " Error on flow type input\n"); |
ffff4772 PWJ |
1569 | return IXGBE_ERR_CONFIG; |
1570 | } | |
1571 | ||
9a713e7c | 1572 | /* |
45b9f509 AD |
1573 | * Program the relevant mask registers. If src/dst_port or src/dst_addr |
1574 | * are zero, then assume a full mask for that field. Also assume that | |
1575 | * a VLAN of 0 is unspecified, so mask that out as well. L4type | |
1576 | * cannot be masked out in this implementation. | |
9a713e7c PW |
1577 | * |
1578 | * This also assumes IPv4 only. IPv6 masking isn't supported at this | |
1579 | * point in time. | |
1580 | */ | |
45b9f509 AD |
1581 | |
1582 | /* Program FDIRM */ | |
1583 | switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) { | |
1584 | case 0xEFFF: | |
1585 | /* Unmask VLAN ID - bit 0 and fall through to unmask prio */ | |
1586 | fdirm &= ~IXGBE_FDIRM_VLANID; | |
1587 | case 0xE000: | |
1588 | /* Unmask VLAN prio - bit 1 */ | |
1589 | fdirm &= ~IXGBE_FDIRM_VLANP; | |
9a713e7c | 1590 | break; |
45b9f509 AD |
1591 | case 0x0FFF: |
1592 | /* Unmask VLAN ID - bit 0 */ | |
1593 | fdirm &= ~IXGBE_FDIRM_VLANID; | |
9a713e7c | 1594 | break; |
45b9f509 AD |
1595 | case 0x0000: |
1596 | /* do nothing, vlans already masked */ | |
9a713e7c | 1597 | break; |
45b9f509 AD |
1598 | default: |
1599 | hw_dbg(hw, " Error on VLAN mask\n"); | |
1600 | return IXGBE_ERR_CONFIG; | |
9a713e7c PW |
1601 | } |
1602 | ||
45b9f509 AD |
1603 | if (input_masks->flex_mask & 0xFFFF) { |
1604 | if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) { | |
1605 | hw_dbg(hw, " Error on flexible byte mask\n"); | |
1606 | return IXGBE_ERR_CONFIG; | |
1607 | } | |
1608 | /* Unmask Flex Bytes - bit 4 */ | |
1609 | fdirm &= ~IXGBE_FDIRM_FLEX; | |
1610 | } | |
9a713e7c PW |
1611 | |
1612 | /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ | |
9a713e7c | 1613 | IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); |
ffff4772 | 1614 | |
45b9f509 AD |
1615 | /* store the TCP/UDP port masks, bit reversed from port layout */ |
1616 | fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks); | |
1617 | ||
1618 | /* write both the same so that UDP and TCP use the same mask */ | |
1619 | IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); | |
1620 | IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); | |
1621 | ||
1622 | /* store source and destination IP masks (big-enian) */ | |
1623 | IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, | |
1624 | ~input_masks->src_ip_mask[0]); | |
1625 | IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, | |
1626 | ~input_masks->dst_ip_mask[0]); | |
1627 | ||
1628 | /* Apply masks to input data */ | |
1629 | input->formatted.vlan_id &= input_masks->vlan_id_mask; | |
1630 | input->formatted.flex_bytes &= input_masks->flex_mask; | |
1631 | input->formatted.src_port &= input_masks->src_port_mask; | |
1632 | input->formatted.dst_port &= input_masks->dst_port_mask; | |
1633 | input->formatted.src_ip[0] &= input_masks->src_ip_mask[0]; | |
1634 | input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0]; | |
1635 | ||
1636 | /* record vlan (little-endian) and flex_bytes(big-endian) */ | |
1637 | fdirvlan = | |
1638 | IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes)); | |
1639 | fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; | |
1640 | fdirvlan |= ntohs(input->formatted.vlan_id); | |
1641 | IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); | |
1642 | ||
1643 | /* record source and destination port (little-endian)*/ | |
1644 | fdirport = ntohs(input->formatted.dst_port); | |
1645 | fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; | |
1646 | fdirport |= ntohs(input->formatted.src_port); | |
1647 | IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); | |
1648 | ||
1649 | /* record the first 32 bits of the destination address (big-endian) */ | |
1650 | IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); | |
1651 | ||
1652 | /* record the source address (big-endian) */ | |
1653 | IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); | |
1654 | ||
1655 | /* configure FDIRCMD register */ | |
1656 | fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | | |
1657 | IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; | |
1658 | fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; | |
1659 | fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; | |
1660 | ||
1661 | /* we only want the bucket hash so drop the upper 16 bits */ | |
1662 | fdirhash = ixgbe_atr_compute_hash_82599(input, | |
1663 | IXGBE_ATR_BUCKET_HASH_KEY); | |
1664 | fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; | |
ffff4772 PWJ |
1665 | |
1666 | IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); | |
1667 | IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); | |
1668 | ||
1669 | return 0; | |
1670 | } | |
45b9f509 | 1671 | |
11afc1b1 PW |
1672 | /** |
1673 | * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register | |
1674 | * @hw: pointer to hardware structure | |
1675 | * @reg: analog register to read | |
1676 | * @val: read value | |
1677 | * | |
1678 | * Performs read operation to Omer analog register specified. | |
1679 | **/ | |
7b25cdba | 1680 | static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) |
11afc1b1 PW |
1681 | { |
1682 | u32 core_ctl; | |
1683 | ||
1684 | IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | | |
1685 | (reg << 8)); | |
1686 | IXGBE_WRITE_FLUSH(hw); | |
1687 | udelay(10); | |
1688 | core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); | |
1689 | *val = (u8)core_ctl; | |
1690 | ||
1691 | return 0; | |
1692 | } | |
1693 | ||
1694 | /** | |
1695 | * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register | |
1696 | * @hw: pointer to hardware structure | |
1697 | * @reg: atlas register to write | |
1698 | * @val: value to write | |
1699 | * | |
1700 | * Performs write operation to Omer analog register specified. | |
1701 | **/ | |
7b25cdba | 1702 | static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) |
11afc1b1 PW |
1703 | { |
1704 | u32 core_ctl; | |
1705 | ||
1706 | core_ctl = (reg << 8) | val; | |
1707 | IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); | |
1708 | IXGBE_WRITE_FLUSH(hw); | |
1709 | udelay(10); | |
1710 | ||
1711 | return 0; | |
1712 | } | |
1713 | ||
1714 | /** | |
1715 | * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx | |
1716 | * @hw: pointer to hardware structure | |
1717 | * | |
1718 | * Starts the hardware using the generic start_hw function. | |
1719 | * Then performs device-specific: | |
1720 | * Clears the rate limiter registers. | |
1721 | **/ | |
7b25cdba | 1722 | static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) |
11afc1b1 PW |
1723 | { |
1724 | u32 q_num; | |
794caeb2 | 1725 | s32 ret_val; |
11afc1b1 | 1726 | |
794caeb2 | 1727 | ret_val = ixgbe_start_hw_generic(hw); |
11afc1b1 PW |
1728 | |
1729 | /* Clear the rate limiters */ | |
1730 | for (q_num = 0; q_num < hw->mac.max_tx_queues; q_num++) { | |
1731 | IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, q_num); | |
1732 | IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); | |
1733 | } | |
1734 | IXGBE_WRITE_FLUSH(hw); | |
1735 | ||
50ac58ba PWJ |
1736 | /* We need to run link autotry after the driver loads */ |
1737 | hw->mac.autotry_restart = true; | |
1738 | ||
794caeb2 PWJ |
1739 | if (ret_val == 0) |
1740 | ret_val = ixgbe_verify_fw_version_82599(hw); | |
1741 | ||
1742 | return ret_val; | |
11afc1b1 PW |
1743 | } |
1744 | ||
1745 | /** | |
1746 | * ixgbe_identify_phy_82599 - Get physical layer module | |
1747 | * @hw: pointer to hardware structure | |
1748 | * | |
1749 | * Determines the physical layer module found on the current adapter. | |
21cc5b4f ET |
1750 | * If PHY already detected, maintains current PHY type in hw struct, |
1751 | * otherwise executes the PHY detection routine. | |
11afc1b1 | 1752 | **/ |
21cc5b4f | 1753 | s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) |
11afc1b1 PW |
1754 | { |
1755 | s32 status = IXGBE_ERR_PHY_ADDR_INVALID; | |
21cc5b4f ET |
1756 | |
1757 | /* Detect PHY if not unknown - returns success if already detected. */ | |
11afc1b1 | 1758 | status = ixgbe_identify_phy_generic(hw); |
21cc5b4f ET |
1759 | if (status != 0) { |
1760 | /* 82599 10GBASE-T requires an external PHY */ | |
1761 | if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) | |
1762 | goto out; | |
1763 | else | |
1764 | status = ixgbe_identify_sfp_module_generic(hw); | |
1765 | } | |
1766 | ||
1767 | /* Set PHY type none if no PHY detected */ | |
1768 | if (hw->phy.type == ixgbe_phy_unknown) { | |
1769 | hw->phy.type = ixgbe_phy_none; | |
1770 | status = 0; | |
1771 | } | |
1772 | ||
1773 | /* Return error if SFP module has been detected but is not supported */ | |
1774 | if (hw->phy.type == ixgbe_phy_sfp_unsupported) | |
1775 | status = IXGBE_ERR_SFP_NOT_SUPPORTED; | |
1776 | ||
1777 | out: | |
11afc1b1 PW |
1778 | return status; |
1779 | } | |
1780 | ||
1781 | /** | |
1782 | * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type | |
1783 | * @hw: pointer to hardware structure | |
1784 | * | |
1785 | * Determines physical layer capabilities of the current configuration. | |
1786 | **/ | |
7b25cdba | 1787 | static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) |
11afc1b1 PW |
1788 | { |
1789 | u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; | |
04193058 PWJ |
1790 | u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); |
1791 | u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); | |
1792 | u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; | |
1793 | u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; | |
1794 | u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; | |
1795 | u16 ext_ability = 0; | |
1339b9e9 | 1796 | u8 comp_codes_10g = 0; |
cb836a97 | 1797 | u8 comp_codes_1g = 0; |
11afc1b1 | 1798 | |
04193058 PWJ |
1799 | hw->phy.ops.identify(hw); |
1800 | ||
21cc5b4f ET |
1801 | switch (hw->phy.type) { |
1802 | case ixgbe_phy_tn: | |
1803 | case ixgbe_phy_aq: | |
1804 | case ixgbe_phy_cu_unknown: | |
6b73e10d | 1805 | hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, |
21cc5b4f | 1806 | &ext_ability); |
6b73e10d | 1807 | if (ext_ability & MDIO_PMA_EXTABLE_10GBT) |
04193058 | 1808 | physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; |
6b73e10d | 1809 | if (ext_ability & MDIO_PMA_EXTABLE_1000BT) |
04193058 | 1810 | physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; |
6b73e10d | 1811 | if (ext_ability & MDIO_PMA_EXTABLE_100BTX) |
04193058 PWJ |
1812 | physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; |
1813 | goto out; | |
21cc5b4f ET |
1814 | default: |
1815 | break; | |
04193058 PWJ |
1816 | } |
1817 | ||
1818 | switch (autoc & IXGBE_AUTOC_LMS_MASK) { | |
1819 | case IXGBE_AUTOC_LMS_1G_AN: | |
1820 | case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: | |
1821 | if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { | |
1822 | physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | | |
1823 | IXGBE_PHYSICAL_LAYER_1000BASE_BX; | |
1824 | goto out; | |
1825 | } else | |
1826 | /* SFI mode so read SFP module */ | |
1827 | goto sfp_check; | |
11afc1b1 | 1828 | break; |
04193058 PWJ |
1829 | case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: |
1830 | if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) | |
1831 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; | |
1832 | else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) | |
1833 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; | |
1fcf03e6 PWJ |
1834 | else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) |
1835 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; | |
04193058 PWJ |
1836 | goto out; |
1837 | break; | |
1838 | case IXGBE_AUTOC_LMS_10G_SERIAL: | |
1839 | if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { | |
1840 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; | |
1841 | goto out; | |
1842 | } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) | |
1843 | goto sfp_check; | |
1844 | break; | |
1845 | case IXGBE_AUTOC_LMS_KX4_KX_KR: | |
1846 | case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: | |
1847 | if (autoc & IXGBE_AUTOC_KX_SUPP) | |
1848 | physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; | |
1849 | if (autoc & IXGBE_AUTOC_KX4_SUPP) | |
1850 | physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; | |
1851 | if (autoc & IXGBE_AUTOC_KR_SUPP) | |
1852 | physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; | |
1853 | goto out; | |
1854 | break; | |
1855 | default: | |
1856 | goto out; | |
1857 | break; | |
1858 | } | |
11afc1b1 | 1859 | |
04193058 PWJ |
1860 | sfp_check: |
1861 | /* SFP check must be done last since DA modules are sometimes used to | |
1862 | * test KR mode - we need to id KR mode correctly before SFP module. | |
1863 | * Call identify_sfp because the pluggable module may have changed */ | |
1864 | hw->phy.ops.identify_sfp(hw); | |
1865 | if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) | |
1866 | goto out; | |
1867 | ||
1868 | switch (hw->phy.type) { | |
ea0a04df DS |
1869 | case ixgbe_phy_sfp_passive_tyco: |
1870 | case ixgbe_phy_sfp_passive_unknown: | |
04193058 PWJ |
1871 | physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; |
1872 | break; | |
ea0a04df DS |
1873 | case ixgbe_phy_sfp_ftl_active: |
1874 | case ixgbe_phy_sfp_active_unknown: | |
1875 | physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; | |
1876 | break; | |
04193058 PWJ |
1877 | case ixgbe_phy_sfp_avago: |
1878 | case ixgbe_phy_sfp_ftl: | |
1879 | case ixgbe_phy_sfp_intel: | |
1880 | case ixgbe_phy_sfp_unknown: | |
cb836a97 DS |
1881 | hw->phy.ops.read_i2c_eeprom(hw, |
1882 | IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); | |
04193058 PWJ |
1883 | hw->phy.ops.read_i2c_eeprom(hw, |
1884 | IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); | |
1885 | if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) | |
11afc1b1 | 1886 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; |
04193058 | 1887 | else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) |
11afc1b1 | 1888 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; |
cb836a97 DS |
1889 | else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) |
1890 | physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; | |
11afc1b1 PW |
1891 | break; |
1892 | default: | |
11afc1b1 PW |
1893 | break; |
1894 | } | |
1895 | ||
04193058 | 1896 | out: |
11afc1b1 PW |
1897 | return physical_layer; |
1898 | } | |
1899 | ||
1900 | /** | |
1901 | * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 | |
1902 | * @hw: pointer to hardware structure | |
1903 | * @regval: register value to write to RXCTRL | |
1904 | * | |
1905 | * Enables the Rx DMA unit for 82599 | |
1906 | **/ | |
7b25cdba | 1907 | static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) |
11afc1b1 PW |
1908 | { |
1909 | #define IXGBE_MAX_SECRX_POLL 30 | |
1910 | int i; | |
1911 | int secrxreg; | |
1912 | ||
1913 | /* | |
1914 | * Workaround for 82599 silicon errata when enabling the Rx datapath. | |
1915 | * If traffic is incoming before we enable the Rx unit, it could hang | |
1916 | * the Rx DMA unit. Therefore, make sure the security engine is | |
1917 | * completely disabled prior to enabling the Rx unit. | |
1918 | */ | |
1919 | secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); | |
1920 | secrxreg |= IXGBE_SECRXCTRL_RX_DIS; | |
1921 | IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); | |
1922 | for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { | |
1923 | secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); | |
1924 | if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) | |
1925 | break; | |
1926 | else | |
8c7bea32 | 1927 | /* Use interrupt-safe sleep just in case */ |
11afc1b1 PW |
1928 | udelay(10); |
1929 | } | |
1930 | ||
1931 | /* For informational purposes only */ | |
1932 | if (i >= IXGBE_MAX_SECRX_POLL) | |
1933 | hw_dbg(hw, "Rx unit being enabled before security " | |
1934 | "path fully disabled. Continuing with init.\n"); | |
1935 | ||
1936 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); | |
1937 | secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); | |
1938 | secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; | |
1939 | IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); | |
1940 | IXGBE_WRITE_FLUSH(hw); | |
1941 | ||
1942 | return 0; | |
1943 | } | |
1944 | ||
04193058 PWJ |
1945 | /** |
1946 | * ixgbe_get_device_caps_82599 - Get additional device capabilities | |
1947 | * @hw: pointer to hardware structure | |
1948 | * @device_caps: the EEPROM word with the extra device capabilities | |
1949 | * | |
1950 | * This function will read the EEPROM location for the device capabilities, | |
1951 | * and return the word through device_caps. | |
1952 | **/ | |
7b25cdba | 1953 | static s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps) |
04193058 PWJ |
1954 | { |
1955 | hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); | |
1956 | ||
1957 | return 0; | |
1958 | } | |
1959 | ||
794caeb2 PWJ |
1960 | /** |
1961 | * ixgbe_verify_fw_version_82599 - verify fw version for 82599 | |
1962 | * @hw: pointer to hardware structure | |
1963 | * | |
1964 | * Verifies that installed the firmware version is 0.6 or higher | |
1965 | * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. | |
1966 | * | |
1967 | * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or | |
1968 | * if the FW version is not supported. | |
1969 | **/ | |
1970 | static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) | |
1971 | { | |
1972 | s32 status = IXGBE_ERR_EEPROM_VERSION; | |
1973 | u16 fw_offset, fw_ptp_cfg_offset; | |
1974 | u16 fw_version = 0; | |
1975 | ||
1976 | /* firmware check is only necessary for SFI devices */ | |
1977 | if (hw->phy.media_type != ixgbe_media_type_fiber) { | |
1978 | status = 0; | |
1979 | goto fw_version_out; | |
1980 | } | |
1981 | ||
1982 | /* get the offset to the Firmware Module block */ | |
1983 | hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); | |
1984 | ||
1985 | if ((fw_offset == 0) || (fw_offset == 0xFFFF)) | |
1986 | goto fw_version_out; | |
1987 | ||
1988 | /* get the offset to the Pass Through Patch Configuration block */ | |
1989 | hw->eeprom.ops.read(hw, (fw_offset + | |
1990 | IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), | |
1991 | &fw_ptp_cfg_offset); | |
1992 | ||
1993 | if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) | |
1994 | goto fw_version_out; | |
1995 | ||
1996 | /* get the firmware version */ | |
1997 | hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + | |
1998 | IXGBE_FW_PATCH_VERSION_4), | |
1999 | &fw_version); | |
2000 | ||
2001 | if (fw_version > 0x5) | |
2002 | status = 0; | |
2003 | ||
2004 | fw_version_out: | |
2005 | return status; | |
2006 | } | |
2007 | ||
11afc1b1 PW |
2008 | static struct ixgbe_mac_operations mac_ops_82599 = { |
2009 | .init_hw = &ixgbe_init_hw_generic, | |
2010 | .reset_hw = &ixgbe_reset_hw_82599, | |
2011 | .start_hw = &ixgbe_start_hw_82599, | |
2012 | .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, | |
2013 | .get_media_type = &ixgbe_get_media_type_82599, | |
2014 | .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599, | |
2015 | .enable_rx_dma = &ixgbe_enable_rx_dma_82599, | |
2016 | .get_mac_addr = &ixgbe_get_mac_addr_generic, | |
21ce849b | 2017 | .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, |
04193058 | 2018 | .get_device_caps = &ixgbe_get_device_caps_82599, |
a391f1d5 | 2019 | .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, |
11afc1b1 PW |
2020 | .stop_adapter = &ixgbe_stop_adapter_generic, |
2021 | .get_bus_info = &ixgbe_get_bus_info_generic, | |
2022 | .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, | |
2023 | .read_analog_reg8 = &ixgbe_read_analog_reg8_82599, | |
2024 | .write_analog_reg8 = &ixgbe_write_analog_reg8_82599, | |
2025 | .setup_link = &ixgbe_setup_mac_link_82599, | |
21ce849b | 2026 | .check_link = &ixgbe_check_mac_link_generic, |
11afc1b1 PW |
2027 | .get_link_capabilities = &ixgbe_get_link_capabilities_82599, |
2028 | .led_on = &ixgbe_led_on_generic, | |
2029 | .led_off = &ixgbe_led_off_generic, | |
87c12017 PW |
2030 | .blink_led_start = &ixgbe_blink_led_start_generic, |
2031 | .blink_led_stop = &ixgbe_blink_led_stop_generic, | |
11afc1b1 PW |
2032 | .set_rar = &ixgbe_set_rar_generic, |
2033 | .clear_rar = &ixgbe_clear_rar_generic, | |
21ce849b MC |
2034 | .set_vmdq = &ixgbe_set_vmdq_generic, |
2035 | .clear_vmdq = &ixgbe_clear_vmdq_generic, | |
11afc1b1 | 2036 | .init_rx_addrs = &ixgbe_init_rx_addrs_generic, |
11afc1b1 PW |
2037 | .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, |
2038 | .enable_mc = &ixgbe_enable_mc_generic, | |
2039 | .disable_mc = &ixgbe_disable_mc_generic, | |
21ce849b MC |
2040 | .clear_vfta = &ixgbe_clear_vfta_generic, |
2041 | .set_vfta = &ixgbe_set_vfta_generic, | |
2042 | .fc_enable = &ixgbe_fc_enable_generic, | |
2043 | .init_uta_tables = &ixgbe_init_uta_tables_generic, | |
11afc1b1 | 2044 | .setup_sfp = &ixgbe_setup_sfp_modules_82599, |
a985b6c3 GR |
2045 | .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, |
2046 | .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, | |
11afc1b1 PW |
2047 | }; |
2048 | ||
2049 | static struct ixgbe_eeprom_operations eeprom_ops_82599 = { | |
2050 | .init_params = &ixgbe_init_eeprom_params_generic, | |
21ce849b | 2051 | .read = &ixgbe_read_eerd_generic, |
11afc1b1 | 2052 | .write = &ixgbe_write_eeprom_generic, |
a391f1d5 | 2053 | .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, |
11afc1b1 PW |
2054 | .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, |
2055 | .update_checksum = &ixgbe_update_eeprom_checksum_generic, | |
2056 | }; | |
2057 | ||
2058 | static struct ixgbe_phy_operations phy_ops_82599 = { | |
2059 | .identify = &ixgbe_identify_phy_82599, | |
2060 | .identify_sfp = &ixgbe_identify_sfp_module_generic, | |
21ce849b | 2061 | .init = &ixgbe_init_phy_ops_82599, |
11afc1b1 PW |
2062 | .reset = &ixgbe_reset_phy_generic, |
2063 | .read_reg = &ixgbe_read_phy_reg_generic, | |
2064 | .write_reg = &ixgbe_write_phy_reg_generic, | |
2065 | .setup_link = &ixgbe_setup_phy_link_generic, | |
2066 | .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, | |
2067 | .read_i2c_byte = &ixgbe_read_i2c_byte_generic, | |
2068 | .write_i2c_byte = &ixgbe_write_i2c_byte_generic, | |
2069 | .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, | |
2070 | .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, | |
119fc60a | 2071 | .check_overtemp = &ixgbe_tn_check_overtemp, |
11afc1b1 PW |
2072 | }; |
2073 | ||
2074 | struct ixgbe_info ixgbe_82599_info = { | |
2075 | .mac = ixgbe_mac_82599EB, | |
2076 | .get_invariants = &ixgbe_get_invariants_82599, | |
2077 | .mac_ops = &mac_ops_82599, | |
2078 | .eeprom_ops = &eeprom_ops_82599, | |
2079 | .phy_ops = &phy_ops_82599, | |
a391f1d5 | 2080 | .mbx_ops = &mbx_ops_generic, |
11afc1b1 | 2081 | }; |