Commit | Line | Data |
---|---|---|
f931551b | 1 | /* |
ecd4b48a BH |
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. |
3 | * All rights reserved. | |
f931551b RC |
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | /* | |
35 | * This file contains all of the code that is specific to the SerDes | |
36 | * on the QLogic_IB 7220 chip. | |
37 | */ | |
38 | ||
39 | #include <linux/pci.h> | |
40 | #include <linux/delay.h> | |
e4dd23d7 | 41 | #include <linux/module.h> |
ecd4b48a | 42 | #include <linux/firmware.h> |
f931551b RC |
43 | |
44 | #include "qib.h" | |
45 | #include "qib_7220.h" | |
46 | ||
ecd4b48a BH |
47 | #define SD7220_FW_NAME "qlogic/sd7220.fw" |
48 | MODULE_FIRMWARE(SD7220_FW_NAME); | |
49 | ||
f931551b RC |
50 | /* |
51 | * Same as in qib_iba7220.c, but just the registers needed here. | |
52 | * Could move whole set to qib_7220.h, but decided better to keep | |
53 | * local. | |
54 | */ | |
55 | #define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64)) | |
56 | #define kr_hwerrclear KREG_IDX(HwErrClear) | |
57 | #define kr_hwerrmask KREG_IDX(HwErrMask) | |
58 | #define kr_hwerrstatus KREG_IDX(HwErrStatus) | |
59 | #define kr_ibcstatus KREG_IDX(IBCStatus) | |
60 | #define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl) | |
61 | #define kr_scratch KREG_IDX(Scratch) | |
62 | #define kr_xgxs_cfg KREG_IDX(XGXSCfg) | |
63 | /* these are used only here, not in qib_iba7220.c */ | |
64 | #define kr_ibsd_epb_access_ctrl KREG_IDX(ibsd_epb_access_ctrl) | |
65 | #define kr_ibsd_epb_transaction_reg KREG_IDX(ibsd_epb_transaction_reg) | |
66 | #define kr_pciesd_epb_transaction_reg KREG_IDX(pciesd_epb_transaction_reg) | |
67 | #define kr_pciesd_epb_access_ctrl KREG_IDX(pciesd_epb_access_ctrl) | |
68 | #define kr_serdes_ddsrxeq0 KREG_IDX(SerDes_DDSRXEQ0) | |
69 | ||
70 | /* | |
71 | * The IBSerDesMappTable is a memory that holds values to be stored in | |
72 | * various SerDes registers by IBC. | |
73 | */ | |
74 | #define kr_serdes_maptable KREG_IDX(IBSerDesMappTable) | |
75 | ||
76 | /* | |
77 | * Below used for sdnum parameter, selecting one of the two sections | |
78 | * used for PCIe, or the single SerDes used for IB. | |
79 | */ | |
80 | #define PCIE_SERDES0 0 | |
81 | #define PCIE_SERDES1 1 | |
82 | ||
83 | /* | |
84 | * The EPB requires addressing in a particular form. EPB_LOC() is intended | |
85 | * to make #definitions a little more readable. | |
86 | */ | |
87 | #define EPB_ADDR_SHF 8 | |
88 | #define EPB_LOC(chn, elt, reg) \ | |
89 | (((elt & 0xf) | ((chn & 7) << 4) | ((reg & 0x3f) << 9)) << \ | |
90 | EPB_ADDR_SHF) | |
91 | #define EPB_IB_QUAD0_CS_SHF (25) | |
92 | #define EPB_IB_QUAD0_CS (1U << EPB_IB_QUAD0_CS_SHF) | |
93 | #define EPB_IB_UC_CS_SHF (26) | |
94 | #define EPB_PCIE_UC_CS_SHF (27) | |
95 | #define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8)) | |
96 | ||
97 | /* Forward declarations. */ | |
98 | static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc, | |
99 | u32 data, u32 mask); | |
100 | static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val, | |
101 | int mask); | |
102 | static int qib_sd_trimdone_poll(struct qib_devdata *dd); | |
103 | static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where); | |
104 | static int qib_sd_setvals(struct qib_devdata *dd); | |
105 | static int qib_sd_early(struct qib_devdata *dd); | |
106 | static int qib_sd_dactrim(struct qib_devdata *dd); | |
107 | static int qib_internal_presets(struct qib_devdata *dd); | |
108 | /* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */ | |
109 | static int qib_sd_trimself(struct qib_devdata *dd, int val); | |
110 | static int epb_access(struct qib_devdata *dd, int sdnum, int claim); | |
ecd4b48a BH |
111 | static int qib_sd7220_ib_load(struct qib_devdata *dd, |
112 | const struct firmware *fw); | |
113 | static int qib_sd7220_ib_vfy(struct qib_devdata *dd, | |
114 | const struct firmware *fw); | |
f931551b RC |
115 | |
116 | /* | |
117 | * Below keeps track of whether the "once per power-on" initialization has | |
118 | * been done, because uC code Version 1.32.17 or higher allows the uC to | |
119 | * be reset at will, and Automatic Equalization may require it. So the | |
120 | * state of the reset "pin", is no longer valid. Instead, we check for the | |
121 | * actual uC code having been loaded. | |
122 | */ | |
ecd4b48a BH |
123 | static int qib_ibsd_ucode_loaded(struct qib_pportdata *ppd, |
124 | const struct firmware *fw) | |
f931551b RC |
125 | { |
126 | struct qib_devdata *dd = ppd->dd; | |
ecd4b48a BH |
127 | |
128 | if (!dd->cspec->serdes_first_init_done && | |
129 | qib_sd7220_ib_vfy(dd, fw) > 0) | |
f931551b RC |
130 | dd->cspec->serdes_first_init_done = 1; |
131 | return dd->cspec->serdes_first_init_done; | |
132 | } | |
133 | ||
134 | /* repeat #define for local use. "Real" #define is in qib_iba7220.c */ | |
135 | #define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL | |
136 | #define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF)) | |
137 | #define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF)) | |
138 | #define UC_PAR_CLR_D 8 | |
139 | #define UC_PAR_CLR_M 0xC | |
140 | #define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS) | |
141 | #define START_EQ1(chan) EPB_LOC(chan, 7, 0x27) | |
142 | ||
143 | void qib_sd7220_clr_ibpar(struct qib_devdata *dd) | |
144 | { | |
145 | int ret; | |
146 | ||
147 | /* clear, then re-enable parity errs */ | |
148 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, | |
149 | UC_PAR_CLR_D, UC_PAR_CLR_M); | |
150 | if (ret < 0) { | |
151 | qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n"); | |
152 | goto bail; | |
153 | } | |
154 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0, | |
155 | UC_PAR_CLR_M); | |
156 | ||
157 | qib_read_kreg32(dd, kr_scratch); | |
158 | udelay(4); | |
159 | qib_write_kreg(dd, kr_hwerrclear, | |
160 | QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR); | |
161 | qib_read_kreg32(dd, kr_scratch); | |
162 | bail: | |
163 | return; | |
164 | } | |
165 | ||
166 | /* | |
167 | * After a reset or other unusual event, the epb interface may need | |
168 | * to be re-synchronized, between the host and the uC. | |
169 | * returns <0 for failure to resync within IBSD_RESYNC_TRIES (not expected) | |
170 | */ | |
171 | #define IBSD_RESYNC_TRIES 3 | |
172 | #define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS) | |
173 | #define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS) | |
174 | ||
175 | static int qib_resync_ibepb(struct qib_devdata *dd) | |
176 | { | |
177 | int ret, pat, tries, chn; | |
178 | u32 loc; | |
179 | ||
180 | ret = -1; | |
181 | chn = 0; | |
182 | for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) { | |
183 | loc = IB_PGUDP(chn); | |
184 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); | |
185 | if (ret < 0) { | |
186 | qib_dev_err(dd, "Failed read in resync\n"); | |
187 | continue; | |
188 | } | |
189 | if (ret != 0xF0 && ret != 0x55 && tries == 0) | |
190 | qib_dev_err(dd, "unexpected pattern in resync\n"); | |
191 | pat = ret ^ 0xA5; /* alternate F0 and 55 */ | |
192 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF); | |
193 | if (ret < 0) { | |
194 | qib_dev_err(dd, "Failed write in resync\n"); | |
195 | continue; | |
196 | } | |
197 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); | |
198 | if (ret < 0) { | |
199 | qib_dev_err(dd, "Failed re-read in resync\n"); | |
200 | continue; | |
201 | } | |
202 | if (ret != pat) { | |
203 | qib_dev_err(dd, "Failed compare1 in resync\n"); | |
204 | continue; | |
205 | } | |
206 | loc = IB_CMUDONE(chn); | |
207 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); | |
208 | if (ret < 0) { | |
209 | qib_dev_err(dd, "Failed CMUDONE rd in resync\n"); | |
210 | continue; | |
211 | } | |
212 | if ((ret & 0x70) != ((chn << 4) | 0x40)) { | |
213 | qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n", | |
214 | ret, chn); | |
215 | continue; | |
216 | } | |
217 | if (++chn == 4) | |
218 | break; /* Success */ | |
219 | } | |
220 | return (ret > 0) ? 0 : ret; | |
221 | } | |
222 | ||
223 | /* | |
224 | * Localize the stuff that should be done to change IB uC reset | |
225 | * returns <0 for errors. | |
226 | */ | |
227 | static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst) | |
228 | { | |
229 | u64 rst_val; | |
230 | int ret = 0; | |
231 | unsigned long flags; | |
232 | ||
233 | rst_val = qib_read_kreg64(dd, kr_ibserdesctrl); | |
234 | if (assert_rst) { | |
235 | /* | |
236 | * Vendor recommends "interrupting" uC before reset, to | |
237 | * minimize possible glitches. | |
238 | */ | |
239 | spin_lock_irqsave(&dd->cspec->sdepb_lock, flags); | |
240 | epb_access(dd, IB_7220_SERDES, 1); | |
241 | rst_val |= 1ULL; | |
242 | /* Squelch possible parity error from _asserting_ reset */ | |
243 | qib_write_kreg(dd, kr_hwerrmask, | |
244 | dd->cspec->hwerrmask & | |
245 | ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR); | |
246 | qib_write_kreg(dd, kr_ibserdesctrl, rst_val); | |
247 | /* flush write, delay to ensure it took effect */ | |
248 | qib_read_kreg32(dd, kr_scratch); | |
249 | udelay(2); | |
250 | /* once it's reset, can remove interrupt */ | |
251 | epb_access(dd, IB_7220_SERDES, -1); | |
252 | spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); | |
253 | } else { | |
254 | /* | |
255 | * Before we de-assert reset, we need to deal with | |
256 | * possible glitch on the Parity-error line. | |
257 | * Suppress it around the reset, both in chip-level | |
258 | * hwerrmask and in IB uC control reg. uC will allow | |
259 | * it again during startup. | |
260 | */ | |
261 | u64 val; | |
262 | rst_val &= ~(1ULL); | |
263 | qib_write_kreg(dd, kr_hwerrmask, | |
264 | dd->cspec->hwerrmask & | |
265 | ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR); | |
266 | ||
267 | ret = qib_resync_ibepb(dd); | |
268 | if (ret < 0) | |
269 | qib_dev_err(dd, "unable to re-sync IB EPB\n"); | |
270 | ||
271 | /* set uC control regs to suppress parity errs */ | |
272 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1); | |
273 | if (ret < 0) | |
274 | goto bail; | |
275 | /* IB uC code past Version 1.32.17 allow suppression of wdog */ | |
276 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, | |
277 | 0x80); | |
278 | if (ret < 0) { | |
279 | qib_dev_err(dd, "Failed to set WDOG disable\n"); | |
280 | goto bail; | |
281 | } | |
282 | qib_write_kreg(dd, kr_ibserdesctrl, rst_val); | |
283 | /* flush write, delay for startup */ | |
284 | qib_read_kreg32(dd, kr_scratch); | |
285 | udelay(1); | |
286 | /* clear, then re-enable parity errs */ | |
287 | qib_sd7220_clr_ibpar(dd); | |
288 | val = qib_read_kreg64(dd, kr_hwerrstatus); | |
289 | if (val & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) { | |
290 | qib_dev_err(dd, "IBUC Parity still set after RST\n"); | |
291 | dd->cspec->hwerrmask &= | |
292 | ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR; | |
293 | } | |
294 | qib_write_kreg(dd, kr_hwerrmask, | |
295 | dd->cspec->hwerrmask); | |
296 | } | |
297 | ||
298 | bail: | |
299 | return ret; | |
300 | } | |
301 | ||
302 | static void qib_sd_trimdone_monitor(struct qib_devdata *dd, | |
865b64be | 303 | const char *where) |
f931551b RC |
304 | { |
305 | int ret, chn, baduns; | |
306 | u64 val; | |
307 | ||
308 | if (!where) | |
309 | where = "?"; | |
310 | ||
311 | /* give time for reset to settle out in EPB */ | |
312 | udelay(2); | |
313 | ||
314 | ret = qib_resync_ibepb(dd); | |
315 | if (ret < 0) | |
316 | qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where); | |
317 | ||
318 | /* Do "sacrificial read" to get EPB in sane state after reset */ | |
319 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0); | |
320 | if (ret < 0) | |
321 | qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where); | |
322 | ||
323 | /* Check/show "summary" Trim-done bit in IBCStatus */ | |
324 | val = qib_read_kreg64(dd, kr_ibcstatus); | |
325 | if (!(val & (1ULL << 11))) | |
326 | qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where); | |
327 | /* | |
328 | * Do "dummy read/mod/wr" to get EPB in sane state after reset | |
329 | * The default value for MPREG6 is 0. | |
330 | */ | |
331 | udelay(2); | |
332 | ||
333 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80); | |
334 | if (ret < 0) | |
335 | qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where); | |
336 | udelay(10); | |
337 | ||
338 | baduns = 0; | |
339 | ||
340 | for (chn = 3; chn >= 0; --chn) { | |
341 | /* Read CTRL reg for each channel to check TRIMDONE */ | |
342 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, | |
343 | IB_CTRL2(chn), 0, 0); | |
344 | if (ret < 0) | |
345 | qib_dev_err(dd, "Failed checking TRIMDONE, chn %d" | |
346 | " (%s)\n", chn, where); | |
347 | ||
348 | if (!(ret & 0x10)) { | |
349 | int probe; | |
350 | ||
351 | baduns |= (1 << chn); | |
352 | qib_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)." | |
353 | " (%s)\n", chn, ret, where); | |
354 | probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES, | |
355 | IB_PGUDP(0), 0, 0); | |
356 | qib_dev_err(dd, "probe is %d (%02X)\n", | |
357 | probe, probe); | |
358 | probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES, | |
359 | IB_CTRL2(chn), 0, 0); | |
360 | qib_dev_err(dd, "re-read: %d (%02X)\n", | |
361 | probe, probe); | |
362 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, | |
363 | IB_CTRL2(chn), 0x10, 0x10); | |
364 | if (ret < 0) | |
365 | qib_dev_err(dd, | |
366 | "Err on TRIMDONE rewrite1\n"); | |
367 | } | |
368 | } | |
369 | for (chn = 3; chn >= 0; --chn) { | |
370 | /* Read CTRL reg for each channel to check TRIMDONE */ | |
371 | if (baduns & (1 << chn)) { | |
372 | qib_dev_err(dd, | |
373 | "Reseting TRIMDONE on chn %d (%s)\n", | |
374 | chn, where); | |
375 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, | |
376 | IB_CTRL2(chn), 0x10, 0x10); | |
377 | if (ret < 0) | |
378 | qib_dev_err(dd, "Failed re-setting " | |
379 | "TRIMDONE, chn %d (%s)\n", | |
380 | chn, where); | |
381 | } | |
382 | } | |
383 | } | |
384 | ||
385 | /* | |
386 | * Below is portion of IBA7220-specific bringup_serdes() that actually | |
387 | * deals with registers and memory within the SerDes itself. | |
388 | * Post IB uC code version 1.32.17, was_reset being 1 is not really | |
389 | * informative, so we double-check. | |
390 | */ | |
391 | int qib_sd7220_init(struct qib_devdata *dd) | |
392 | { | |
ecd4b48a | 393 | const struct firmware *fw; |
f931551b RC |
394 | int ret = 1; /* default to failure */ |
395 | int first_reset, was_reset; | |
396 | ||
397 | /* SERDES MPU reset recorded in D0 */ | |
398 | was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1); | |
399 | if (!was_reset) { | |
400 | /* entered with reset not asserted, we need to do it */ | |
401 | qib_ibsd_reset(dd, 1); | |
402 | qib_sd_trimdone_monitor(dd, "Driver-reload"); | |
403 | } | |
ecd4b48a BH |
404 | |
405 | ret = request_firmware(&fw, SD7220_FW_NAME, &dd->pcidev->dev); | |
406 | if (ret) { | |
407 | qib_dev_err(dd, "Failed to load IB SERDES image\n"); | |
408 | goto done; | |
409 | } | |
410 | ||
f931551b | 411 | /* Substitute our deduced value for was_reset */ |
ecd4b48a | 412 | ret = qib_ibsd_ucode_loaded(dd->pport, fw); |
f931551b RC |
413 | if (ret < 0) |
414 | goto bail; | |
415 | ||
416 | first_reset = !ret; /* First reset if IBSD uCode not yet loaded */ | |
417 | /* | |
418 | * Alter some regs per vendor latest doc, reset-defaults | |
419 | * are not right for IB. | |
420 | */ | |
421 | ret = qib_sd_early(dd); | |
422 | if (ret < 0) { | |
423 | qib_dev_err(dd, "Failed to set IB SERDES early defaults\n"); | |
424 | goto bail; | |
425 | } | |
426 | /* | |
427 | * Set DAC manual trim IB. | |
428 | * We only do this once after chip has been reset (usually | |
429 | * same as once per system boot). | |
430 | */ | |
431 | if (first_reset) { | |
432 | ret = qib_sd_dactrim(dd); | |
433 | if (ret < 0) { | |
434 | qib_dev_err(dd, "Failed IB SERDES DAC trim\n"); | |
435 | goto bail; | |
436 | } | |
437 | } | |
438 | /* | |
439 | * Set various registers (DDS and RXEQ) that will be | |
440 | * controlled by IBC (in 1.2 mode) to reasonable preset values | |
441 | * Calling the "internal" version avoids the "check for needed" | |
442 | * and "trimdone monitor" that might be counter-productive. | |
443 | */ | |
444 | ret = qib_internal_presets(dd); | |
445 | if (ret < 0) { | |
446 | qib_dev_err(dd, "Failed to set IB SERDES presets\n"); | |
447 | goto bail; | |
448 | } | |
449 | ret = qib_sd_trimself(dd, 0x80); | |
450 | if (ret < 0) { | |
451 | qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n"); | |
452 | goto bail; | |
453 | } | |
454 | ||
455 | /* Load image, then try to verify */ | |
456 | ret = 0; /* Assume success */ | |
457 | if (first_reset) { | |
458 | int vfy; | |
459 | int trim_done; | |
460 | ||
ecd4b48a | 461 | ret = qib_sd7220_ib_load(dd, fw); |
f931551b RC |
462 | if (ret < 0) { |
463 | qib_dev_err(dd, "Failed to load IB SERDES image\n"); | |
464 | goto bail; | |
465 | } else { | |
466 | /* Loaded image, try to verify */ | |
ecd4b48a | 467 | vfy = qib_sd7220_ib_vfy(dd, fw); |
f931551b RC |
468 | if (vfy != ret) { |
469 | qib_dev_err(dd, "SERDES PRAM VFY failed\n"); | |
470 | goto bail; | |
471 | } /* end if verified */ | |
472 | } /* end if loaded */ | |
473 | ||
474 | /* | |
475 | * Loaded and verified. Almost good... | |
476 | * hold "success" in ret | |
477 | */ | |
478 | ret = 0; | |
479 | /* | |
480 | * Prev steps all worked, continue bringup | |
481 | * De-assert RESET to uC, only in first reset, to allow | |
482 | * trimming. | |
483 | * | |
484 | * Since our default setup sets START_EQ1 to | |
485 | * PRESET, we need to clear that for this very first run. | |
486 | */ | |
487 | ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38); | |
488 | if (ret < 0) { | |
489 | qib_dev_err(dd, "Failed clearing START_EQ1\n"); | |
490 | goto bail; | |
491 | } | |
492 | ||
493 | qib_ibsd_reset(dd, 0); | |
494 | /* | |
495 | * If this is not the first reset, trimdone should be set | |
496 | * already. We may need to check about this. | |
497 | */ | |
498 | trim_done = qib_sd_trimdone_poll(dd); | |
499 | /* | |
500 | * Whether or not trimdone succeeded, we need to put the | |
501 | * uC back into reset to avoid a possible fight with the | |
502 | * IBC state-machine. | |
503 | */ | |
504 | qib_ibsd_reset(dd, 1); | |
505 | ||
506 | if (!trim_done) { | |
507 | qib_dev_err(dd, "No TRIMDONE seen\n"); | |
508 | goto bail; | |
509 | } | |
510 | /* | |
511 | * DEBUG: check each time we reset if trimdone bits have | |
512 | * gotten cleared, and re-set them. | |
513 | */ | |
514 | qib_sd_trimdone_monitor(dd, "First-reset"); | |
515 | /* Remember so we do not re-do the load, dactrim, etc. */ | |
516 | dd->cspec->serdes_first_init_done = 1; | |
517 | } | |
518 | /* | |
519 | * setup for channel training and load values for | |
520 | * RxEq and DDS in tables used by IBC in IB1.2 mode | |
521 | */ | |
522 | ret = 0; | |
523 | if (qib_sd_setvals(dd) >= 0) | |
524 | goto done; | |
525 | bail: | |
526 | ret = 1; | |
527 | done: | |
528 | /* start relock timer regardless, but start at 1 second */ | |
529 | set_7220_relock_poll(dd, -1); | |
ecd4b48a BH |
530 | |
531 | release_firmware(fw); | |
f931551b RC |
532 | return ret; |
533 | } | |
534 | ||
535 | #define EPB_ACC_REQ 1 | |
536 | #define EPB_ACC_GNT 0x100 | |
537 | #define EPB_DATA_MASK 0xFF | |
538 | #define EPB_RD (1ULL << 24) | |
539 | #define EPB_TRANS_RDY (1ULL << 31) | |
540 | #define EPB_TRANS_ERR (1ULL << 30) | |
541 | #define EPB_TRANS_TRIES 5 | |
542 | ||
543 | /* | |
544 | * query, claim, release ownership of the EPB (External Parallel Bus) | |
545 | * for a specified SERDES. | |
546 | * the "claim" parameter is >0 to claim, <0 to release, 0 to query. | |
547 | * Returns <0 for errors, >0 if we had ownership, else 0. | |
548 | */ | |
549 | static int epb_access(struct qib_devdata *dd, int sdnum, int claim) | |
550 | { | |
551 | u16 acc; | |
552 | u64 accval; | |
553 | int owned = 0; | |
554 | u64 oct_sel = 0; | |
555 | ||
556 | switch (sdnum) { | |
557 | case IB_7220_SERDES: | |
558 | /* | |
559 | * The IB SERDES "ownership" is fairly simple. A single each | |
560 | * request/grant. | |
561 | */ | |
562 | acc = kr_ibsd_epb_access_ctrl; | |
563 | break; | |
564 | ||
565 | case PCIE_SERDES0: | |
566 | case PCIE_SERDES1: | |
567 | /* PCIe SERDES has two "octants", need to select which */ | |
568 | acc = kr_pciesd_epb_access_ctrl; | |
569 | oct_sel = (2 << (sdnum - PCIE_SERDES0)); | |
570 | break; | |
571 | ||
572 | default: | |
573 | return 0; | |
574 | } | |
575 | ||
576 | /* Make sure any outstanding transaction was seen */ | |
577 | qib_read_kreg32(dd, kr_scratch); | |
578 | udelay(15); | |
579 | ||
580 | accval = qib_read_kreg32(dd, acc); | |
581 | ||
582 | owned = !!(accval & EPB_ACC_GNT); | |
583 | if (claim < 0) { | |
584 | /* Need to release */ | |
585 | u64 pollval; | |
586 | /* | |
587 | * The only writeable bits are the request and CS. | |
588 | * Both should be clear | |
589 | */ | |
590 | u64 newval = 0; | |
591 | qib_write_kreg(dd, acc, newval); | |
592 | /* First read after write is not trustworthy */ | |
593 | pollval = qib_read_kreg32(dd, acc); | |
594 | udelay(5); | |
595 | pollval = qib_read_kreg32(dd, acc); | |
596 | if (pollval & EPB_ACC_GNT) | |
597 | owned = -1; | |
598 | } else if (claim > 0) { | |
599 | /* Need to claim */ | |
600 | u64 pollval; | |
601 | u64 newval = EPB_ACC_REQ | oct_sel; | |
602 | qib_write_kreg(dd, acc, newval); | |
603 | /* First read after write is not trustworthy */ | |
604 | pollval = qib_read_kreg32(dd, acc); | |
605 | udelay(5); | |
606 | pollval = qib_read_kreg32(dd, acc); | |
607 | if (!(pollval & EPB_ACC_GNT)) | |
608 | owned = -1; | |
609 | } | |
610 | return owned; | |
611 | } | |
612 | ||
613 | /* | |
614 | * Lemma to deal with race condition of write..read to epb regs | |
615 | */ | |
616 | static int epb_trans(struct qib_devdata *dd, u16 reg, u64 i_val, u64 *o_vp) | |
617 | { | |
618 | int tries; | |
619 | u64 transval; | |
620 | ||
621 | qib_write_kreg(dd, reg, i_val); | |
622 | /* Throw away first read, as RDY bit may be stale */ | |
623 | transval = qib_read_kreg64(dd, reg); | |
624 | ||
625 | for (tries = EPB_TRANS_TRIES; tries; --tries) { | |
626 | transval = qib_read_kreg32(dd, reg); | |
627 | if (transval & EPB_TRANS_RDY) | |
628 | break; | |
629 | udelay(5); | |
630 | } | |
631 | if (transval & EPB_TRANS_ERR) | |
632 | return -1; | |
633 | if (tries > 0 && o_vp) | |
634 | *o_vp = transval; | |
635 | return tries; | |
636 | } | |
637 | ||
638 | /** | |
639 | * qib_sd7220_reg_mod - modify SERDES register | |
640 | * @dd: the qlogic_ib device | |
641 | * @sdnum: which SERDES to access | |
642 | * @loc: location - channel, element, register, as packed by EPB_LOC() macro. | |
643 | * @wd: Write Data - value to set in register | |
644 | * @mask: ones where data should be spliced into reg. | |
645 | * | |
646 | * Basic register read/modify/write, with un-needed acesses elided. That is, | |
647 | * a mask of zero will prevent write, while a mask of 0xFF will prevent read. | |
648 | * returns current (presumed, if a write was done) contents of selected | |
649 | * register, or <0 if errors. | |
650 | */ | |
651 | static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc, | |
652 | u32 wd, u32 mask) | |
653 | { | |
654 | u16 trans; | |
655 | u64 transval; | |
656 | int owned; | |
657 | int tries, ret; | |
658 | unsigned long flags; | |
659 | ||
660 | switch (sdnum) { | |
661 | case IB_7220_SERDES: | |
662 | trans = kr_ibsd_epb_transaction_reg; | |
663 | break; | |
664 | ||
665 | case PCIE_SERDES0: | |
666 | case PCIE_SERDES1: | |
667 | trans = kr_pciesd_epb_transaction_reg; | |
668 | break; | |
669 | ||
670 | default: | |
671 | return -1; | |
672 | } | |
673 | ||
674 | /* | |
675 | * All access is locked in software (vs other host threads) and | |
676 | * hardware (vs uC access). | |
677 | */ | |
678 | spin_lock_irqsave(&dd->cspec->sdepb_lock, flags); | |
679 | ||
680 | owned = epb_access(dd, sdnum, 1); | |
681 | if (owned < 0) { | |
682 | spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); | |
683 | return -1; | |
684 | } | |
685 | ret = 0; | |
686 | for (tries = EPB_TRANS_TRIES; tries; --tries) { | |
687 | transval = qib_read_kreg32(dd, trans); | |
688 | if (transval & EPB_TRANS_RDY) | |
689 | break; | |
690 | udelay(5); | |
691 | } | |
692 | ||
693 | if (tries > 0) { | |
694 | tries = 1; /* to make read-skip work */ | |
695 | if (mask != 0xFF) { | |
696 | /* | |
697 | * Not a pure write, so need to read. | |
698 | * loc encodes chip-select as well as address | |
699 | */ | |
700 | transval = loc | EPB_RD; | |
701 | tries = epb_trans(dd, trans, transval, &transval); | |
702 | } | |
703 | if (tries > 0 && mask != 0) { | |
704 | /* | |
705 | * Not a pure read, so need to write. | |
706 | */ | |
707 | wd = (wd & mask) | (transval & ~mask); | |
708 | transval = loc | (wd & EPB_DATA_MASK); | |
709 | tries = epb_trans(dd, trans, transval, &transval); | |
710 | } | |
711 | } | |
712 | /* else, failed to see ready, what error-handling? */ | |
713 | ||
714 | /* | |
715 | * Release bus. Failure is an error. | |
716 | */ | |
717 | if (epb_access(dd, sdnum, -1) < 0) | |
718 | ret = -1; | |
719 | else | |
720 | ret = transval & EPB_DATA_MASK; | |
721 | ||
722 | spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); | |
723 | if (tries <= 0) | |
724 | ret = -1; | |
725 | return ret; | |
726 | } | |
727 | ||
728 | #define EPB_ROM_R (2) | |
729 | #define EPB_ROM_W (1) | |
730 | /* | |
731 | * Below, all uC-related, use appropriate UC_CS, depending | |
732 | * on which SerDes is used. | |
733 | */ | |
734 | #define EPB_UC_CTL EPB_LOC(6, 0, 0) | |
735 | #define EPB_MADDRL EPB_LOC(6, 0, 2) | |
736 | #define EPB_MADDRH EPB_LOC(6, 0, 3) | |
737 | #define EPB_ROMDATA EPB_LOC(6, 0, 4) | |
738 | #define EPB_RAMDATA EPB_LOC(6, 0, 5) | |
739 | ||
740 | /* Transfer date to/from uC Program RAM of IB or PCIe SerDes */ | |
741 | static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc, | |
742 | u8 *buf, int cnt, int rd_notwr) | |
743 | { | |
744 | u16 trans; | |
745 | u64 transval; | |
746 | u64 csbit; | |
747 | int owned; | |
748 | int tries; | |
749 | int sofar; | |
750 | int addr; | |
751 | int ret; | |
752 | unsigned long flags; | |
753 | const char *op; | |
754 | ||
755 | /* Pick appropriate transaction reg and "Chip select" for this serdes */ | |
756 | switch (sdnum) { | |
757 | case IB_7220_SERDES: | |
758 | csbit = 1ULL << EPB_IB_UC_CS_SHF; | |
759 | trans = kr_ibsd_epb_transaction_reg; | |
760 | break; | |
761 | ||
762 | case PCIE_SERDES0: | |
763 | case PCIE_SERDES1: | |
764 | /* PCIe SERDES has uC "chip select" in different bit, too */ | |
765 | csbit = 1ULL << EPB_PCIE_UC_CS_SHF; | |
766 | trans = kr_pciesd_epb_transaction_reg; | |
767 | break; | |
768 | ||
769 | default: | |
770 | return -1; | |
771 | } | |
772 | ||
773 | op = rd_notwr ? "Rd" : "Wr"; | |
774 | spin_lock_irqsave(&dd->cspec->sdepb_lock, flags); | |
775 | ||
776 | owned = epb_access(dd, sdnum, 1); | |
777 | if (owned < 0) { | |
778 | spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); | |
779 | return -1; | |
780 | } | |
781 | ||
782 | /* | |
783 | * In future code, we may need to distinguish several address ranges, | |
784 | * and select various memories based on this. For now, just trim | |
785 | * "loc" (location including address and memory select) to | |
786 | * "addr" (address within memory). we will only support PRAM | |
787 | * The memory is 8KB. | |
788 | */ | |
789 | addr = loc & 0x1FFF; | |
790 | for (tries = EPB_TRANS_TRIES; tries; --tries) { | |
791 | transval = qib_read_kreg32(dd, trans); | |
792 | if (transval & EPB_TRANS_RDY) | |
793 | break; | |
794 | udelay(5); | |
795 | } | |
796 | ||
797 | sofar = 0; | |
798 | if (tries > 0) { | |
799 | /* | |
800 | * Every "memory" access is doubly-indirect. | |
801 | * We set two bytes of address, then read/write | |
802 | * one or mores bytes of data. | |
803 | */ | |
804 | ||
805 | /* First, we set control to "Read" or "Write" */ | |
806 | transval = csbit | EPB_UC_CTL | | |
807 | (rd_notwr ? EPB_ROM_R : EPB_ROM_W); | |
808 | tries = epb_trans(dd, trans, transval, &transval); | |
809 | while (tries > 0 && sofar < cnt) { | |
810 | if (!sofar) { | |
811 | /* Only set address at start of chunk */ | |
812 | int addrbyte = (addr + sofar) >> 8; | |
813 | transval = csbit | EPB_MADDRH | addrbyte; | |
814 | tries = epb_trans(dd, trans, transval, | |
815 | &transval); | |
816 | if (tries <= 0) | |
817 | break; | |
818 | addrbyte = (addr + sofar) & 0xFF; | |
819 | transval = csbit | EPB_MADDRL | addrbyte; | |
820 | tries = epb_trans(dd, trans, transval, | |
821 | &transval); | |
822 | if (tries <= 0) | |
823 | break; | |
824 | } | |
825 | ||
826 | if (rd_notwr) | |
827 | transval = csbit | EPB_ROMDATA | EPB_RD; | |
828 | else | |
829 | transval = csbit | EPB_ROMDATA | buf[sofar]; | |
830 | tries = epb_trans(dd, trans, transval, &transval); | |
831 | if (tries <= 0) | |
832 | break; | |
833 | if (rd_notwr) | |
834 | buf[sofar] = transval & EPB_DATA_MASK; | |
835 | ++sofar; | |
836 | } | |
837 | /* Finally, clear control-bit for Read or Write */ | |
838 | transval = csbit | EPB_UC_CTL; | |
839 | tries = epb_trans(dd, trans, transval, &transval); | |
840 | } | |
841 | ||
842 | ret = sofar; | |
843 | /* Release bus. Failure is an error */ | |
844 | if (epb_access(dd, sdnum, -1) < 0) | |
845 | ret = -1; | |
846 | ||
847 | spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); | |
848 | if (tries <= 0) | |
849 | ret = -1; | |
850 | return ret; | |
851 | } | |
852 | ||
853 | #define PROG_CHUNK 64 | |
854 | ||
ecd4b48a BH |
855 | static int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum, |
856 | const u8 *img, int len, int offset) | |
f931551b RC |
857 | { |
858 | int cnt, sofar, req; | |
859 | ||
860 | sofar = 0; | |
861 | while (sofar < len) { | |
862 | req = len - sofar; | |
863 | if (req > PROG_CHUNK) | |
864 | req = PROG_CHUNK; | |
865 | cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar, | |
ecd4b48a | 866 | (u8 *)img + sofar, req, 0); |
f931551b RC |
867 | if (cnt < req) { |
868 | sofar = -1; | |
869 | break; | |
870 | } | |
871 | sofar += req; | |
872 | } | |
873 | return sofar; | |
874 | } | |
875 | ||
876 | #define VFY_CHUNK 64 | |
877 | #define SD_PRAM_ERROR_LIMIT 42 | |
878 | ||
ecd4b48a BH |
879 | static int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum, |
880 | const u8 *img, int len, int offset) | |
f931551b RC |
881 | { |
882 | int cnt, sofar, req, idx, errors; | |
883 | unsigned char readback[VFY_CHUNK]; | |
884 | ||
885 | errors = 0; | |
886 | sofar = 0; | |
887 | while (sofar < len) { | |
888 | req = len - sofar; | |
889 | if (req > VFY_CHUNK) | |
890 | req = VFY_CHUNK; | |
891 | cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset, | |
892 | readback, req, 1); | |
893 | if (cnt < req) { | |
894 | /* failed in read itself */ | |
895 | sofar = -1; | |
896 | break; | |
897 | } | |
898 | for (idx = 0; idx < cnt; ++idx) { | |
899 | if (readback[idx] != img[idx+sofar]) | |
900 | ++errors; | |
901 | } | |
902 | sofar += cnt; | |
903 | } | |
904 | return errors ? -errors : sofar; | |
905 | } | |
906 | ||
ecd4b48a BH |
907 | static int |
908 | qib_sd7220_ib_load(struct qib_devdata *dd, const struct firmware *fw) | |
909 | { | |
910 | return qib_sd7220_prog_ld(dd, IB_7220_SERDES, fw->data, fw->size, 0); | |
911 | } | |
912 | ||
913 | static int | |
914 | qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw) | |
915 | { | |
916 | return qib_sd7220_prog_vfy(dd, IB_7220_SERDES, fw->data, fw->size, 0); | |
917 | } | |
918 | ||
f931551b RC |
919 | /* |
920 | * IRQ not set up at this point in init, so we poll. | |
921 | */ | |
922 | #define IB_SERDES_TRIM_DONE (1ULL << 11) | |
923 | #define TRIM_TMO (30) | |
924 | ||
925 | static int qib_sd_trimdone_poll(struct qib_devdata *dd) | |
926 | { | |
927 | int trim_tmo, ret; | |
928 | uint64_t val; | |
929 | ||
930 | /* | |
931 | * Default to failure, so IBC will not start | |
932 | * without IB_SERDES_TRIM_DONE. | |
933 | */ | |
934 | ret = 0; | |
935 | for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) { | |
936 | val = qib_read_kreg64(dd, kr_ibcstatus); | |
937 | if (val & IB_SERDES_TRIM_DONE) { | |
938 | ret = 1; | |
939 | break; | |
940 | } | |
941 | msleep(10); | |
942 | } | |
943 | if (trim_tmo >= TRIM_TMO) { | |
944 | qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo); | |
945 | ret = 0; | |
946 | } | |
947 | return ret; | |
948 | } | |
949 | ||
950 | #define TX_FAST_ELT (9) | |
951 | ||
952 | /* | |
953 | * Set the "negotiation" values for SERDES. These are used by the IB1.2 | |
954 | * link negotiation. Macros below are attempt to keep the values a | |
955 | * little more human-editable. | |
956 | * First, values related to Drive De-emphasis Settings. | |
957 | */ | |
958 | ||
959 | #define NUM_DDS_REGS 6 | |
960 | #define DDS_REG_MAP 0x76A910 /* LSB-first list of regs (in elt 9) to mod */ | |
961 | ||
962 | #define DDS_VAL(amp_d, main_d, ipst_d, ipre_d, amp_s, main_s, ipst_s, ipre_s) \ | |
963 | { { ((amp_d & 0x1F) << 1) | 1, ((amp_s & 0x1F) << 1) | 1, \ | |
964 | (main_d << 3) | 4 | (ipre_d >> 2), \ | |
965 | (main_s << 3) | 4 | (ipre_s >> 2), \ | |
966 | ((ipst_d & 0xF) << 1) | ((ipre_d & 3) << 6) | 0x21, \ | |
967 | ((ipst_s & 0xF) << 1) | ((ipre_s & 3) << 6) | 0x21 } } | |
968 | ||
969 | static struct dds_init { | |
970 | uint8_t reg_vals[NUM_DDS_REGS]; | |
971 | } dds_init_vals[] = { | |
972 | /* DDR(FDR) SDR(HDR) */ | |
973 | /* Vendor recommends below for 3m cable */ | |
974 | #define DDS_3M 0 | |
975 | DDS_VAL(31, 19, 12, 0, 29, 22, 9, 0), | |
976 | DDS_VAL(31, 12, 15, 4, 31, 15, 15, 1), | |
977 | DDS_VAL(31, 13, 15, 3, 31, 16, 15, 0), | |
978 | DDS_VAL(31, 14, 15, 2, 31, 17, 14, 0), | |
979 | DDS_VAL(31, 15, 15, 1, 31, 18, 13, 0), | |
980 | DDS_VAL(31, 16, 15, 0, 31, 19, 12, 0), | |
981 | DDS_VAL(31, 17, 14, 0, 31, 20, 11, 0), | |
982 | DDS_VAL(31, 18, 13, 0, 30, 21, 10, 0), | |
983 | DDS_VAL(31, 20, 11, 0, 28, 23, 8, 0), | |
984 | DDS_VAL(31, 21, 10, 0, 27, 24, 7, 0), | |
985 | DDS_VAL(31, 22, 9, 0, 26, 25, 6, 0), | |
986 | DDS_VAL(30, 23, 8, 0, 25, 26, 5, 0), | |
987 | DDS_VAL(29, 24, 7, 0, 23, 27, 4, 0), | |
988 | /* Vendor recommends below for 1m cable */ | |
989 | #define DDS_1M 13 | |
990 | DDS_VAL(28, 25, 6, 0, 21, 28, 3, 0), | |
991 | DDS_VAL(27, 26, 5, 0, 19, 29, 2, 0), | |
992 | DDS_VAL(25, 27, 4, 0, 17, 30, 1, 0) | |
993 | }; | |
994 | ||
995 | /* | |
996 | * Now the RXEQ section of the table. | |
997 | */ | |
998 | /* Hardware packs an element number and register address thus: */ | |
999 | #define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4)) | |
1000 | #define RXEQ_VAL(elt, adr, val0, val1, val2, val3) \ | |
1001 | {RXEQ_INIT_RDESC((elt), (adr)), {(val0), (val1), (val2), (val3)} } | |
1002 | ||
1003 | #define RXEQ_VAL_ALL(elt, adr, val) \ | |
1004 | {RXEQ_INIT_RDESC((elt), (adr)), {(val), (val), (val), (val)} } | |
1005 | ||
1006 | #define RXEQ_SDR_DFELTH 0 | |
1007 | #define RXEQ_SDR_TLTH 0 | |
1008 | #define RXEQ_SDR_G1CNT_Z1CNT 0x11 | |
1009 | #define RXEQ_SDR_ZCNT 23 | |
1010 | ||
1011 | static struct rxeq_init { | |
1012 | u16 rdesc; /* in form used in SerDesDDSRXEQ */ | |
1013 | u8 rdata[4]; | |
1014 | } rxeq_init_vals[] = { | |
1015 | /* Set Rcv Eq. to Preset node */ | |
1016 | RXEQ_VAL_ALL(7, 0x27, 0x10), | |
1017 | /* Set DFELTHFDR/HDR thresholds */ | |
1018 | RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR, was 0, 1, 2, 3 */ | |
1019 | RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */ | |
1020 | /* Set TLTHFDR/HDR theshold */ | |
1021 | RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR, was 0, 2, 4, 6 */ | |
1022 | RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR, was 0, 1, 2, 3 */ | |
1023 | /* Set Preamp setting 2 (ZFR/ZCNT) */ | |
1024 | RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR, was 12, 16, 20, 24 */ | |
1025 | RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR, was 12, 16, 20, 24 */ | |
1026 | /* Set Preamp DC gain and Setting 1 (GFR/GHR) */ | |
1027 | RXEQ_VAL(7, 0x1E, 16, 16, 16, 16), /* FDR, was 16, 17, 18, 20 */ | |
1028 | RXEQ_VAL(7, 0x1F, 16, 16, 16, 16), /* HDR, was 16, 17, 18, 20 */ | |
1029 | /* Toggle RELOCK (in VCDL_CTRL0) to lock to data */ | |
1030 | RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */ | |
1031 | RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */ | |
1032 | }; | |
1033 | ||
1034 | /* There are 17 values from vendor, but IBC only accesses the first 16 */ | |
1035 | #define DDS_ROWS (16) | |
1036 | #define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals) | |
1037 | ||
1038 | static int qib_sd_setvals(struct qib_devdata *dd) | |
1039 | { | |
1040 | int idx, midx; | |
1041 | int min_idx; /* Minimum index for this portion of table */ | |
1042 | uint32_t dds_reg_map; | |
1043 | u64 __iomem *taddr, *iaddr; | |
1044 | uint64_t data; | |
1045 | uint64_t sdctl; | |
1046 | ||
1047 | taddr = dd->kregbase + kr_serdes_maptable; | |
1048 | iaddr = dd->kregbase + kr_serdes_ddsrxeq0; | |
1049 | ||
1050 | /* | |
1051 | * Init the DDS section of the table. | |
1052 | * Each "row" of the table provokes NUM_DDS_REG writes, to the | |
1053 | * registers indicated in DDS_REG_MAP. | |
1054 | */ | |
1055 | sdctl = qib_read_kreg64(dd, kr_ibserdesctrl); | |
1056 | sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8); | |
1057 | sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13); | |
1058 | qib_write_kreg(dd, kr_ibserdesctrl, sdctl); | |
1059 | ||
1060 | /* | |
1061 | * Iterate down table within loop for each register to store. | |
1062 | */ | |
1063 | dds_reg_map = DDS_REG_MAP; | |
1064 | for (idx = 0; idx < NUM_DDS_REGS; ++idx) { | |
1065 | data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT; | |
1066 | writeq(data, iaddr + idx); | |
1067 | mmiowb(); | |
1068 | qib_read_kreg32(dd, kr_scratch); | |
1069 | dds_reg_map >>= 4; | |
1070 | for (midx = 0; midx < DDS_ROWS; ++midx) { | |
1071 | u64 __iomem *daddr = taddr + ((midx << 4) + idx); | |
1072 | data = dds_init_vals[midx].reg_vals[idx]; | |
1073 | writeq(data, daddr); | |
1074 | mmiowb(); | |
1075 | qib_read_kreg32(dd, kr_scratch); | |
1076 | } /* End inner for (vals for this reg, each row) */ | |
1077 | } /* end outer for (regs to be stored) */ | |
1078 | ||
1079 | /* | |
1080 | * Init the RXEQ section of the table. | |
1081 | * This runs in a different order, as the pattern of | |
1082 | * register references is more complex, but there are only | |
1083 | * four "data" values per register. | |
1084 | */ | |
1085 | min_idx = idx; /* RXEQ indices pick up where DDS left off */ | |
1086 | taddr += 0x100; /* RXEQ data is in second half of table */ | |
1087 | /* Iterate through RXEQ register addresses */ | |
1088 | for (idx = 0; idx < RXEQ_ROWS; ++idx) { | |
1089 | int didx; /* "destination" */ | |
1090 | int vidx; | |
1091 | ||
1092 | /* didx is offset by min_idx to address RXEQ range of regs */ | |
1093 | didx = idx + min_idx; | |
1094 | /* Store the next RXEQ register address */ | |
1095 | writeq(rxeq_init_vals[idx].rdesc, iaddr + didx); | |
1096 | mmiowb(); | |
1097 | qib_read_kreg32(dd, kr_scratch); | |
1098 | /* Iterate through RXEQ values */ | |
1099 | for (vidx = 0; vidx < 4; vidx++) { | |
1100 | data = rxeq_init_vals[idx].rdata[vidx]; | |
1101 | writeq(data, taddr + (vidx << 6) + idx); | |
1102 | mmiowb(); | |
1103 | qib_read_kreg32(dd, kr_scratch); | |
1104 | } | |
1105 | } /* end outer for (Reg-writes for RXEQ) */ | |
1106 | return 0; | |
1107 | } | |
1108 | ||
1109 | #define CMUCTRL5 EPB_LOC(7, 0, 0x15) | |
1110 | #define RXHSCTRL0(chan) EPB_LOC(chan, 6, 0) | |
1111 | #define VCDL_DAC2(chan) EPB_LOC(chan, 6, 5) | |
1112 | #define VCDL_CTRL0(chan) EPB_LOC(chan, 6, 6) | |
1113 | #define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8) | |
1114 | #define START_EQ2(chan) EPB_LOC(chan, 7, 0x28) | |
1115 | ||
1116 | /* | |
1117 | * Repeat a "store" across all channels of the IB SerDes. | |
1118 | * Although nominally it inherits the "read value" of the last | |
1119 | * channel it modified, the only really useful return is <0 for | |
1120 | * failure, >= 0 for success. The parameter 'loc' is assumed to | |
1121 | * be the location in some channel of the register to be modified | |
1122 | * The caller can specify use of the "gang write" option of EPB, | |
1123 | * in which case we use the specified channel data for any fields | |
1124 | * not explicitely written. | |
1125 | */ | |
1126 | static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val, | |
1127 | int mask) | |
1128 | { | |
1129 | int ret = -1; | |
1130 | int chnl; | |
1131 | ||
1132 | if (loc & EPB_GLOBAL_WR) { | |
1133 | /* | |
1134 | * Our caller has assured us that we can set all four | |
1135 | * channels at once. Trust that. If mask is not 0xFF, | |
1136 | * we will read the _specified_ channel for our starting | |
1137 | * value. | |
1138 | */ | |
1139 | loc |= (1U << EPB_IB_QUAD0_CS_SHF); | |
1140 | chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7; | |
1141 | if (mask != 0xFF) { | |
1142 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, | |
1143 | loc & ~EPB_GLOBAL_WR, 0, 0); | |
1144 | if (ret < 0) { | |
1145 | int sloc = loc >> EPB_ADDR_SHF; | |
1146 | ||
1147 | qib_dev_err(dd, "pre-read failed: elt %d," | |
1148 | " addr 0x%X, chnl %d\n", | |
1149 | (sloc & 0xF), | |
1150 | (sloc >> 9) & 0x3f, chnl); | |
1151 | return ret; | |
1152 | } | |
1153 | val = (ret & ~mask) | (val & mask); | |
1154 | } | |
1155 | loc &= ~(7 << (4+EPB_ADDR_SHF)); | |
1156 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF); | |
1157 | if (ret < 0) { | |
1158 | int sloc = loc >> EPB_ADDR_SHF; | |
1159 | ||
1160 | qib_dev_err(dd, "Global WR failed: elt %d," | |
1161 | " addr 0x%X, val %02X\n", | |
1162 | (sloc & 0xF), (sloc >> 9) & 0x3f, val); | |
1163 | } | |
1164 | return ret; | |
1165 | } | |
1166 | /* Clear "channel" and set CS so we can simply iterate */ | |
1167 | loc &= ~(7 << (4+EPB_ADDR_SHF)); | |
1168 | loc |= (1U << EPB_IB_QUAD0_CS_SHF); | |
1169 | for (chnl = 0; chnl < 4; ++chnl) { | |
1170 | int cloc = loc | (chnl << (4+EPB_ADDR_SHF)); | |
1171 | ||
1172 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask); | |
1173 | if (ret < 0) { | |
1174 | int sloc = loc >> EPB_ADDR_SHF; | |
1175 | ||
1176 | qib_dev_err(dd, "Write failed: elt %d," | |
1177 | " addr 0x%X, chnl %d, val 0x%02X," | |
1178 | " mask 0x%02X\n", | |
1179 | (sloc & 0xF), (sloc >> 9) & 0x3f, chnl, | |
1180 | val & 0xFF, mask & 0xFF); | |
1181 | break; | |
1182 | } | |
1183 | } | |
1184 | return ret; | |
1185 | } | |
1186 | ||
1187 | /* | |
1188 | * Set the Tx values normally modified by IBC in IB1.2 mode to default | |
1189 | * values, as gotten from first row of init table. | |
1190 | */ | |
1191 | static int set_dds_vals(struct qib_devdata *dd, struct dds_init *ddi) | |
1192 | { | |
1193 | int ret; | |
1194 | int idx, reg, data; | |
1195 | uint32_t regmap; | |
1196 | ||
1197 | regmap = DDS_REG_MAP; | |
1198 | for (idx = 0; idx < NUM_DDS_REGS; ++idx) { | |
1199 | reg = (regmap & 0xF); | |
1200 | regmap >>= 4; | |
1201 | data = ddi->reg_vals[idx]; | |
1202 | /* Vendor says RMW not needed for these regs, use 0xFF mask */ | |
1203 | ret = ibsd_mod_allchnls(dd, EPB_LOC(0, 9, reg), data, 0xFF); | |
1204 | if (ret < 0) | |
1205 | break; | |
1206 | } | |
1207 | return ret; | |
1208 | } | |
1209 | ||
1210 | /* | |
1211 | * Set the Rx values normally modified by IBC in IB1.2 mode to default | |
1212 | * values, as gotten from selected column of init table. | |
1213 | */ | |
1214 | static int set_rxeq_vals(struct qib_devdata *dd, int vsel) | |
1215 | { | |
1216 | int ret; | |
1217 | int ridx; | |
1218 | int cnt = ARRAY_SIZE(rxeq_init_vals); | |
1219 | ||
1220 | for (ridx = 0; ridx < cnt; ++ridx) { | |
1221 | int elt, reg, val, loc; | |
1222 | ||
1223 | elt = rxeq_init_vals[ridx].rdesc & 0xF; | |
1224 | reg = rxeq_init_vals[ridx].rdesc >> 4; | |
1225 | loc = EPB_LOC(0, elt, reg); | |
1226 | val = rxeq_init_vals[ridx].rdata[vsel]; | |
1227 | /* mask of 0xFF, because hardware does full-byte store. */ | |
1228 | ret = ibsd_mod_allchnls(dd, loc, val, 0xFF); | |
1229 | if (ret < 0) | |
1230 | break; | |
1231 | } | |
1232 | return ret; | |
1233 | } | |
1234 | ||
1235 | /* | |
1236 | * Set the default values (row 0) for DDR Driver Demphasis. | |
1237 | * we do this initially and whenever we turn off IB-1.2 | |
1238 | * | |
1239 | * The "default" values for Rx equalization are also stored to | |
1240 | * SerDes registers. Formerly (and still default), we used set 2. | |
1241 | * For experimenting with cables and link-partners, we allow changing | |
1242 | * that via a module parameter. | |
1243 | */ | |
1244 | static unsigned qib_rxeq_set = 2; | |
1245 | module_param_named(rxeq_default_set, qib_rxeq_set, uint, | |
1246 | S_IWUSR | S_IRUGO); | |
1247 | MODULE_PARM_DESC(rxeq_default_set, | |
1248 | "Which set [0..3] of Rx Equalization values is default"); | |
1249 | ||
1250 | static int qib_internal_presets(struct qib_devdata *dd) | |
1251 | { | |
1252 | int ret = 0; | |
1253 | ||
1254 | ret = set_dds_vals(dd, dds_init_vals + DDS_3M); | |
1255 | ||
1256 | if (ret < 0) | |
1257 | qib_dev_err(dd, "Failed to set default DDS values\n"); | |
1258 | ret = set_rxeq_vals(dd, qib_rxeq_set & 3); | |
1259 | if (ret < 0) | |
1260 | qib_dev_err(dd, "Failed to set default RXEQ values\n"); | |
1261 | return ret; | |
1262 | } | |
1263 | ||
1264 | int qib_sd7220_presets(struct qib_devdata *dd) | |
1265 | { | |
1266 | int ret = 0; | |
1267 | ||
1268 | if (!dd->cspec->presets_needed) | |
1269 | return ret; | |
1270 | dd->cspec->presets_needed = 0; | |
1271 | /* Assert uC reset, so we don't clash with it. */ | |
1272 | qib_ibsd_reset(dd, 1); | |
1273 | udelay(2); | |
1274 | qib_sd_trimdone_monitor(dd, "link-down"); | |
1275 | ||
1276 | ret = qib_internal_presets(dd); | |
1277 | return ret; | |
1278 | } | |
1279 | ||
1280 | static int qib_sd_trimself(struct qib_devdata *dd, int val) | |
1281 | { | |
1282 | int loc = CMUCTRL5 | (1U << EPB_IB_QUAD0_CS_SHF); | |
1283 | ||
1284 | return qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF); | |
1285 | } | |
1286 | ||
1287 | static int qib_sd_early(struct qib_devdata *dd) | |
1288 | { | |
1289 | int ret; | |
1290 | ||
1291 | ret = ibsd_mod_allchnls(dd, RXHSCTRL0(0) | EPB_GLOBAL_WR, 0xD4, 0xFF); | |
1292 | if (ret < 0) | |
1293 | goto bail; | |
1294 | ret = ibsd_mod_allchnls(dd, START_EQ1(0) | EPB_GLOBAL_WR, 0x10, 0xFF); | |
1295 | if (ret < 0) | |
1296 | goto bail; | |
1297 | ret = ibsd_mod_allchnls(dd, START_EQ2(0) | EPB_GLOBAL_WR, 0x30, 0xFF); | |
1298 | bail: | |
1299 | return ret; | |
1300 | } | |
1301 | ||
1302 | #define BACTRL(chnl) EPB_LOC(chnl, 6, 0x0E) | |
1303 | #define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6) | |
1304 | #define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF) | |
1305 | ||
1306 | static int qib_sd_dactrim(struct qib_devdata *dd) | |
1307 | { | |
1308 | int ret; | |
1309 | ||
1310 | ret = ibsd_mod_allchnls(dd, VCDL_DAC2(0) | EPB_GLOBAL_WR, 0x2D, 0xFF); | |
1311 | if (ret < 0) | |
1312 | goto bail; | |
1313 | ||
1314 | /* more fine-tuning of what will be default */ | |
1315 | ret = ibsd_mod_allchnls(dd, VCDL_CTRL2(0), 3, 0xF); | |
1316 | if (ret < 0) | |
1317 | goto bail; | |
1318 | ||
1319 | ret = ibsd_mod_allchnls(dd, BACTRL(0) | EPB_GLOBAL_WR, 0x40, 0xFF); | |
1320 | if (ret < 0) | |
1321 | goto bail; | |
1322 | ||
1323 | ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x04, 0xFF); | |
1324 | if (ret < 0) | |
1325 | goto bail; | |
1326 | ||
1327 | ret = ibsd_mod_allchnls(dd, RXHSSTATUS(0) | EPB_GLOBAL_WR, 0x04, 0xFF); | |
1328 | if (ret < 0) | |
1329 | goto bail; | |
1330 | ||
1331 | /* | |
1332 | * Delay for max possible number of steps, with slop. | |
1333 | * Each step is about 4usec. | |
1334 | */ | |
1335 | udelay(415); | |
1336 | ||
1337 | ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x00, 0xFF); | |
1338 | ||
1339 | bail: | |
1340 | return ret; | |
1341 | } | |
1342 | ||
1343 | #define RELOCK_FIRST_MS 3 | |
1344 | #define RXLSPPM(chan) EPB_LOC(chan, 0, 2) | |
1345 | void toggle_7220_rclkrls(struct qib_devdata *dd) | |
1346 | { | |
1347 | int loc = RXLSPPM(0) | EPB_GLOBAL_WR; | |
1348 | int ret; | |
1349 | ||
1350 | ret = ibsd_mod_allchnls(dd, loc, 0, 0x80); | |
1351 | if (ret < 0) | |
1352 | qib_dev_err(dd, "RCLKRLS failed to clear D7\n"); | |
1353 | else { | |
1354 | udelay(1); | |
1355 | ibsd_mod_allchnls(dd, loc, 0x80, 0x80); | |
1356 | } | |
1357 | /* And again for good measure */ | |
1358 | udelay(1); | |
1359 | ret = ibsd_mod_allchnls(dd, loc, 0, 0x80); | |
1360 | if (ret < 0) | |
1361 | qib_dev_err(dd, "RCLKRLS failed to clear D7\n"); | |
1362 | else { | |
1363 | udelay(1); | |
1364 | ibsd_mod_allchnls(dd, loc, 0x80, 0x80); | |
1365 | } | |
1366 | /* Now reset xgxs and IBC to complete the recovery */ | |
1367 | dd->f_xgxs_reset(dd->pport); | |
1368 | } | |
1369 | ||
1370 | /* | |
1371 | * Shut down the timer that polls for relock occasions, if needed | |
1372 | * this is "hooked" from qib_7220_quiet_serdes(), which is called | |
1373 | * just before qib_shutdown_device() in qib_driver.c shuts down all | |
1374 | * the other timers | |
1375 | */ | |
1376 | void shutdown_7220_relock_poll(struct qib_devdata *dd) | |
1377 | { | |
1378 | if (dd->cspec->relock_timer_active) | |
1379 | del_timer_sync(&dd->cspec->relock_timer); | |
1380 | } | |
1381 | ||
1382 | static unsigned qib_relock_by_timer = 1; | |
1383 | module_param_named(relock_by_timer, qib_relock_by_timer, uint, | |
1384 | S_IWUSR | S_IRUGO); | |
1385 | MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up"); | |
1386 | ||
1387 | static void qib_run_relock(unsigned long opaque) | |
1388 | { | |
1389 | struct qib_devdata *dd = (struct qib_devdata *)opaque; | |
1390 | struct qib_pportdata *ppd = dd->pport; | |
1391 | struct qib_chip_specific *cs = dd->cspec; | |
1392 | int timeoff; | |
1393 | ||
1394 | /* | |
1395 | * Check link-training state for "stuck" state, when down. | |
1396 | * if found, try relock and schedule another try at | |
1397 | * exponentially growing delay, maxed at one second. | |
1398 | * if not stuck, our work is done. | |
1399 | */ | |
1400 | if ((dd->flags & QIB_INITTED) && !(ppd->lflags & | |
1401 | (QIBL_IB_AUTONEG_INPROG | QIBL_LINKINIT | QIBL_LINKARMED | | |
1402 | QIBL_LINKACTIVE))) { | |
1403 | if (qib_relock_by_timer) { | |
1404 | if (!(ppd->lflags & QIBL_IB_LINK_DISABLED)) | |
1405 | toggle_7220_rclkrls(dd); | |
1406 | } | |
1407 | /* re-set timer for next check */ | |
1408 | timeoff = cs->relock_interval << 1; | |
1409 | if (timeoff > HZ) | |
1410 | timeoff = HZ; | |
1411 | cs->relock_interval = timeoff; | |
1412 | } else | |
1413 | timeoff = HZ; | |
1414 | mod_timer(&cs->relock_timer, jiffies + timeoff); | |
1415 | } | |
1416 | ||
1417 | void set_7220_relock_poll(struct qib_devdata *dd, int ibup) | |
1418 | { | |
1419 | struct qib_chip_specific *cs = dd->cspec; | |
1420 | ||
1421 | if (ibup) { | |
1422 | /* We are now up, relax timer to 1 second interval */ | |
1423 | if (cs->relock_timer_active) { | |
1424 | cs->relock_interval = HZ; | |
1425 | mod_timer(&cs->relock_timer, jiffies + HZ); | |
1426 | } | |
1427 | } else { | |
1428 | /* Transition to down, (re-)set timer to short interval. */ | |
1429 | unsigned int timeout; | |
1430 | ||
1431 | timeout = msecs_to_jiffies(RELOCK_FIRST_MS); | |
1432 | if (timeout == 0) | |
1433 | timeout = 1; | |
1434 | /* If timer has not yet been started, do so. */ | |
1435 | if (!cs->relock_timer_active) { | |
1436 | cs->relock_timer_active = 1; | |
1437 | init_timer(&cs->relock_timer); | |
1438 | cs->relock_timer.function = qib_run_relock; | |
1439 | cs->relock_timer.data = (unsigned long) dd; | |
1440 | cs->relock_interval = timeout; | |
1441 | cs->relock_timer.expires = jiffies + timeout; | |
1442 | add_timer(&cs->relock_timer); | |
1443 | } else { | |
1444 | cs->relock_interval = timeout; | |
1445 | mod_timer(&cs->relock_timer, jiffies + timeout); | |
1446 | } | |
1447 | } | |
1448 | } |