Commit | Line | Data |
---|---|---|
f931551b | 1 | /* |
1fb9fed6 MM |
2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved. | |
f931551b RC |
4 | * |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
34 | /* | |
35 | * This file contains all of the code that is specific to the | |
36 | * InfiniPath 7322 chip | |
37 | */ | |
38 | ||
39 | #include <linux/interrupt.h> | |
40 | #include <linux/pci.h> | |
41 | #include <linux/delay.h> | |
42 | #include <linux/io.h> | |
43 | #include <linux/jiffies.h> | |
e4dd23d7 | 44 | #include <linux/module.h> |
f931551b RC |
45 | #include <rdma/ib_verbs.h> |
46 | #include <rdma/ib_smi.h> | |
8469ba39 MM |
47 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
48 | #include <linux/dca.h> | |
49 | #endif | |
f931551b RC |
50 | |
51 | #include "qib.h" | |
52 | #include "qib_7322_regs.h" | |
53 | #include "qib_qsfp.h" | |
54 | ||
55 | #include "qib_mad.h" | |
1fb9fed6 | 56 | #include "qib_verbs.h" |
f931551b | 57 | |
7fac3301 MM |
58 | #undef pr_fmt |
59 | #define pr_fmt(fmt) QIB_DRV_NAME " " fmt | |
60 | ||
f931551b RC |
61 | static void qib_setup_7322_setextled(struct qib_pportdata *, u32); |
62 | static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t); | |
63 | static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op); | |
64 | static irqreturn_t qib_7322intr(int irq, void *data); | |
65 | static irqreturn_t qib_7322bufavail(int irq, void *data); | |
66 | static irqreturn_t sdma_intr(int irq, void *data); | |
67 | static irqreturn_t sdma_idle_intr(int irq, void *data); | |
68 | static irqreturn_t sdma_progress_intr(int irq, void *data); | |
69 | static irqreturn_t sdma_cleanup_intr(int irq, void *data); | |
70 | static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32, | |
71 | struct qib_ctxtdata *rcd); | |
72 | static u8 qib_7322_phys_portstate(u64); | |
73 | static u32 qib_7322_iblink_state(u64); | |
74 | static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd, | |
75 | u16 linitcmd); | |
76 | static void force_h1(struct qib_pportdata *); | |
77 | static void adj_tx_serdes(struct qib_pportdata *); | |
78 | static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8); | |
79 | static void qib_7322_mini_pcs_reset(struct qib_pportdata *); | |
80 | ||
81 | static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32); | |
82 | static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned); | |
a0a234d4 MM |
83 | static void serdes_7322_los_enable(struct qib_pportdata *, int); |
84 | static int serdes_7322_init_old(struct qib_pportdata *); | |
85 | static int serdes_7322_init_new(struct qib_pportdata *); | |
0b3ddf38 | 86 | static void dump_sdma_7322_state(struct qib_pportdata *); |
f931551b RC |
87 | |
88 | #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb)) | |
89 | ||
90 | /* LE2 serdes values for different cases */ | |
91 | #define LE2_DEFAULT 5 | |
92 | #define LE2_5m 4 | |
93 | #define LE2_QME 0 | |
94 | ||
95 | /* Below is special-purpose, so only really works for the IB SerDes blocks. */ | |
96 | #define IBSD(hw_pidx) (hw_pidx + 2) | |
97 | ||
98 | /* these are variables for documentation and experimentation purposes */ | |
99 | static const unsigned rcv_int_timeout = 375; | |
100 | static const unsigned rcv_int_count = 16; | |
101 | static const unsigned sdma_idle_cnt = 64; | |
102 | ||
103 | /* Time to stop altering Rx Equalization parameters, after link up. */ | |
104 | #define RXEQ_DISABLE_MSECS 2500 | |
105 | ||
106 | /* | |
107 | * Number of VLs we are configured to use (to allow for more | |
108 | * credits per vl, etc.) | |
109 | */ | |
110 | ushort qib_num_cfg_vls = 2; | |
111 | module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO); | |
112 | MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)"); | |
113 | ||
114 | static ushort qib_chase = 1; | |
115 | module_param_named(chase, qib_chase, ushort, S_IRUGO); | |
116 | MODULE_PARM_DESC(chase, "Enable state chase handling"); | |
117 | ||
118 | static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */ | |
119 | module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO); | |
120 | MODULE_PARM_DESC(long_attenuation, \ | |
121 | "attenuation cutoff (dB) for long copper cable setup"); | |
122 | ||
123 | static ushort qib_singleport; | |
124 | module_param_named(singleport, qib_singleport, ushort, S_IRUGO); | |
125 | MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); | |
126 | ||
e67306a3 MM |
127 | static ushort qib_krcvq01_no_msi; |
128 | module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO); | |
129 | MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2"); | |
130 | ||
0a43e117 MM |
131 | /* |
132 | * Receive header queue sizes | |
133 | */ | |
134 | static unsigned qib_rcvhdrcnt; | |
135 | module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO); | |
136 | MODULE_PARM_DESC(rcvhdrcnt, "receive header count"); | |
137 | ||
138 | static unsigned qib_rcvhdrsize; | |
139 | module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO); | |
140 | MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words"); | |
141 | ||
142 | static unsigned qib_rcvhdrentsize; | |
143 | module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO); | |
144 | MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words"); | |
145 | ||
f931551b RC |
146 | #define MAX_ATTEN_LEN 64 /* plenty for any real system */ |
147 | /* for read back, default index is ~5m copper cable */ | |
a77fcf89 RC |
148 | static char txselect_list[MAX_ATTEN_LEN] = "10"; |
149 | static struct kparam_string kp_txselect = { | |
150 | .string = txselect_list, | |
f931551b RC |
151 | .maxlen = MAX_ATTEN_LEN |
152 | }; | |
a77fcf89 RC |
153 | static int setup_txselect(const char *, struct kernel_param *); |
154 | module_param_call(txselect, setup_txselect, param_get_string, | |
155 | &kp_txselect, S_IWUSR | S_IRUGO); | |
156 | MODULE_PARM_DESC(txselect, \ | |
157 | "Tx serdes indices (for no QSFP or invalid QSFP data)"); | |
f931551b RC |
158 | |
159 | #define BOARD_QME7342 5 | |
160 | #define BOARD_QMH7342 6 | |
0e6bbba5 | 161 | #define BOARD_QMH7360 9 |
f931551b RC |
162 | #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ |
163 | BOARD_QMH7342) | |
164 | #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ | |
165 | BOARD_QME7342) | |
166 | ||
167 | #define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64)) | |
168 | ||
169 | #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64))) | |
170 | ||
171 | #define MASK_ACROSS(lsb, msb) \ | |
172 | (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb)) | |
173 | ||
174 | #define SYM_RMASK(regname, fldname) ((u64) \ | |
175 | QIB_7322_##regname##_##fldname##_RMASK) | |
176 | ||
177 | #define SYM_MASK(regname, fldname) ((u64) \ | |
178 | QIB_7322_##regname##_##fldname##_RMASK << \ | |
179 | QIB_7322_##regname##_##fldname##_LSB) | |
180 | ||
181 | #define SYM_FIELD(value, regname, fldname) ((u64) \ | |
182 | (((value) >> SYM_LSB(regname, fldname)) & \ | |
183 | SYM_RMASK(regname, fldname))) | |
184 | ||
185 | /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */ | |
186 | #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \ | |
187 | (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits)) | |
188 | ||
189 | #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask) | |
190 | #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask) | |
191 | #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask) | |
192 | #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask) | |
193 | #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port) | |
194 | /* Below because most, but not all, fields of IntMask have that full suffix */ | |
195 | #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port) | |
196 | ||
197 | ||
198 | #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB) | |
199 | ||
200 | /* | |
201 | * the size bits give us 2^N, in KB units. 0 marks as invalid, | |
202 | * and 7 is reserved. We currently use only 2KB and 4KB | |
203 | */ | |
204 | #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB | |
205 | #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */ | |
206 | #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */ | |
207 | #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */ | |
208 | ||
209 | #define SendIBSLIDAssignMask \ | |
210 | QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK | |
211 | #define SendIBSLMCMask \ | |
212 | QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK | |
213 | ||
214 | #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn) | |
215 | #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn) | |
216 | #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn) | |
217 | #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn) | |
218 | #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN) | |
219 | #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN) | |
220 | ||
221 | #define _QIB_GPIO_SDA_NUM 1 | |
222 | #define _QIB_GPIO_SCL_NUM 0 | |
223 | #define QIB_EEPROM_WEN_NUM 14 | |
224 | #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */ | |
225 | ||
226 | /* HW counter clock is at 4nsec */ | |
227 | #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000 | |
228 | ||
229 | /* full speed IB port 1 only */ | |
230 | #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR) | |
231 | #define PORT_SPD_CAP_SHIFT 3 | |
232 | ||
233 | /* full speed featuremask, both ports */ | |
234 | #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT)) | |
235 | ||
236 | /* | |
237 | * This file contains almost all the chip-specific register information and | |
238 | * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip. | |
239 | */ | |
240 | ||
241 | /* Use defines to tie machine-generated names to lower-case names */ | |
242 | #define kr_contextcnt KREG_IDX(ContextCnt) | |
243 | #define kr_control KREG_IDX(Control) | |
244 | #define kr_counterregbase KREG_IDX(CntrRegBase) | |
245 | #define kr_errclear KREG_IDX(ErrClear) | |
246 | #define kr_errmask KREG_IDX(ErrMask) | |
247 | #define kr_errstatus KREG_IDX(ErrStatus) | |
248 | #define kr_extctrl KREG_IDX(EXTCtrl) | |
249 | #define kr_extstatus KREG_IDX(EXTStatus) | |
250 | #define kr_gpio_clear KREG_IDX(GPIOClear) | |
251 | #define kr_gpio_mask KREG_IDX(GPIOMask) | |
252 | #define kr_gpio_out KREG_IDX(GPIOOut) | |
253 | #define kr_gpio_status KREG_IDX(GPIOStatus) | |
254 | #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl) | |
255 | #define kr_debugportval KREG_IDX(DebugPortValueReg) | |
256 | #define kr_fmask KREG_IDX(feature_mask) | |
257 | #define kr_act_fmask KREG_IDX(active_feature_mask) | |
258 | #define kr_hwerrclear KREG_IDX(HwErrClear) | |
259 | #define kr_hwerrmask KREG_IDX(HwErrMask) | |
260 | #define kr_hwerrstatus KREG_IDX(HwErrStatus) | |
261 | #define kr_intclear KREG_IDX(IntClear) | |
262 | #define kr_intmask KREG_IDX(IntMask) | |
263 | #define kr_intredirect KREG_IDX(IntRedirect0) | |
264 | #define kr_intstatus KREG_IDX(IntStatus) | |
265 | #define kr_pagealign KREG_IDX(PageAlign) | |
266 | #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0) | |
267 | #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */ | |
268 | #define kr_rcvegrbase KREG_IDX(RcvEgrBase) | |
269 | #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt) | |
270 | #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt) | |
271 | #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize) | |
272 | #define kr_rcvhdrsize KREG_IDX(RcvHdrSize) | |
273 | #define kr_rcvtidbase KREG_IDX(RcvTIDBase) | |
274 | #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt) | |
275 | #define kr_revision KREG_IDX(Revision) | |
276 | #define kr_scratch KREG_IDX(Scratch) | |
277 | #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */ | |
278 | #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */ | |
279 | #define kr_sendctrl KREG_IDX(SendCtrl) | |
280 | #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */ | |
281 | #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */ | |
282 | #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr) | |
283 | #define kr_sendpiobufbase KREG_IDX(SendBufBase) | |
284 | #define kr_sendpiobufcnt KREG_IDX(SendBufCnt) | |
285 | #define kr_sendpiosize KREG_IDX(SendBufSize) | |
286 | #define kr_sendregbase KREG_IDX(SendRegBase) | |
287 | #define kr_sendbufavail0 KREG_IDX(SendBufAvail0) | |
288 | #define kr_userregbase KREG_IDX(UserRegBase) | |
289 | #define kr_intgranted KREG_IDX(Int_Granted) | |
290 | #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int) | |
291 | #define kr_intblocked KREG_IDX(IntBlocked) | |
292 | #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG) | |
293 | ||
294 | /* | |
295 | * per-port kernel registers. Access only with qib_read_kreg_port() | |
296 | * or qib_write_kreg_port() | |
297 | */ | |
298 | #define krp_errclear KREG_IBPORT_IDX(ErrClear) | |
299 | #define krp_errmask KREG_IBPORT_IDX(ErrMask) | |
300 | #define krp_errstatus KREG_IBPORT_IDX(ErrStatus) | |
301 | #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0) | |
302 | #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit) | |
303 | #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID) | |
304 | #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig) | |
305 | #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA) | |
306 | #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB) | |
307 | #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC) | |
308 | #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA) | |
309 | #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB) | |
310 | #define krp_txestatus KREG_IBPORT_IDX(TXEStatus) | |
311 | #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0) | |
312 | #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl) | |
313 | #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey) | |
314 | #define krp_psinterval KREG_IBPORT_IDX(PSInterval) | |
315 | #define krp_psstart KREG_IBPORT_IDX(PSStart) | |
316 | #define krp_psstat KREG_IBPORT_IDX(PSStat) | |
317 | #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP) | |
318 | #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl) | |
319 | #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt) | |
320 | #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA) | |
321 | #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0) | |
322 | #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15) | |
323 | #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl) | |
324 | #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl) | |
325 | #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase) | |
326 | #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0) | |
327 | #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1) | |
328 | #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2) | |
329 | #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0) | |
330 | #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1) | |
331 | #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2) | |
332 | #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt) | |
333 | #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead) | |
334 | #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr) | |
335 | #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt) | |
336 | #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen) | |
337 | #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld) | |
338 | #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt) | |
339 | #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus) | |
340 | #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail) | |
341 | #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom) | |
342 | #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign) | |
343 | #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask) | |
344 | #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX) | |
345 | #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD) | |
346 | #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE) | |
347 | #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl) | |
348 | ||
349 | /* | |
b595076a | 350 | * Per-context kernel registers. Access only with qib_read_kreg_ctxt() |
f931551b RC |
351 | * or qib_write_kreg_ctxt() |
352 | */ | |
353 | #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0) | |
354 | #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0) | |
355 | ||
356 | /* | |
357 | * TID Flow table, per context. Reduces | |
358 | * number of hdrq updates to one per flow (or on errors). | |
359 | * context 0 and 1 share same memory, but have distinct | |
360 | * addresses. Since for now, we never use expected sends | |
361 | * on kernel contexts, we don't worry about that (we initialize | |
362 | * those entries for ctxt 0/1 on driver load twice, for example). | |
363 | */ | |
364 | #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */ | |
365 | #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0)) | |
366 | ||
367 | /* these are the error bits in the tid flows, and are W1C */ | |
368 | #define TIDFLOW_ERRBITS ( \ | |
369 | (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \ | |
370 | SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \ | |
371 | (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \ | |
372 | SYM_LSB(RcvTIDFlowTable0, SeqMismatch))) | |
373 | ||
374 | /* Most (not all) Counters are per-IBport. | |
375 | * Requires LBIntCnt is at offset 0 in the group | |
376 | */ | |
377 | #define CREG_IDX(regname) \ | |
378 | ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64)) | |
379 | ||
380 | #define crp_badformat CREG_IDX(RxVersionErrCnt) | |
381 | #define crp_err_rlen CREG_IDX(RxLenErrCnt) | |
382 | #define crp_erricrc CREG_IDX(RxICRCErrCnt) | |
383 | #define crp_errlink CREG_IDX(RxLinkMalformCnt) | |
384 | #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt) | |
385 | #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt) | |
386 | #define crp_errvcrc CREG_IDX(RxVCRCErrCnt) | |
387 | #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt) | |
388 | #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt) | |
389 | #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt) | |
390 | #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt) | |
391 | #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt) | |
392 | #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt) | |
393 | #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt) | |
394 | #define crp_pktrcv CREG_IDX(RxDataPktCnt) | |
395 | #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt) | |
396 | #define crp_pktsend CREG_IDX(TxDataPktCnt) | |
397 | #define crp_pktsendflow CREG_IDX(TxFlowPktCnt) | |
398 | #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount) | |
399 | #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount) | |
400 | #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount) | |
401 | #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount) | |
402 | #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount) | |
403 | #define crp_rcvebp CREG_IDX(RxEBPCnt) | |
404 | #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt) | |
405 | #define crp_rcvovfl CREG_IDX(RxBufOvflCnt) | |
406 | #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt) | |
407 | #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt) | |
408 | #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt) | |
409 | #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt) | |
410 | #define crp_rxvlerr CREG_IDX(RxVlErrCnt) | |
411 | #define crp_sendstall CREG_IDX(TxFlowStallCnt) | |
412 | #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt) | |
413 | #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt) | |
414 | #define crp_txlenerr CREG_IDX(TxLenErrCnt) | |
f931551b RC |
415 | #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt) |
416 | #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt) | |
417 | #define crp_txunderrun CREG_IDX(TxUnderrunCnt) | |
418 | #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt) | |
419 | #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt) | |
420 | #define crp_wordrcv CREG_IDX(RxDwordCnt) | |
421 | #define crp_wordsend CREG_IDX(TxDwordCnt) | |
422 | #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut) | |
423 | ||
424 | /* these are the (few) counters that are not port-specific */ | |
425 | #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \ | |
426 | QIB_7322_LBIntCnt_OFFS) / sizeof(u64)) | |
427 | #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt) | |
428 | #define cr_lbint CREG_DEVIDX(LBIntCnt) | |
429 | #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt) | |
430 | #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt) | |
431 | #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt) | |
432 | #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt) | |
433 | #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt) | |
434 | ||
435 | /* no chip register for # of IB ports supported, so define */ | |
436 | #define NUM_IB_PORTS 2 | |
437 | ||
438 | /* 1 VL15 buffer per hardware IB port, no register for this, so define */ | |
439 | #define NUM_VL15_BUFS NUM_IB_PORTS | |
440 | ||
441 | /* | |
442 | * context 0 and 1 are special, and there is no chip register that | |
443 | * defines this value, so we have to define it here. | |
444 | * These are all allocated to either 0 or 1 for single port | |
445 | * hardware configuration, otherwise each gets half | |
446 | */ | |
447 | #define KCTXT0_EGRCNT 2048 | |
448 | ||
449 | /* values for vl and port fields in PBC, 7322-specific */ | |
450 | #define PBC_PORT_SEL_LSB 26 | |
451 | #define PBC_PORT_SEL_RMASK 1 | |
452 | #define PBC_VL_NUM_LSB 27 | |
453 | #define PBC_VL_NUM_RMASK 7 | |
454 | #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */ | |
455 | #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */ | |
456 | ||
457 | static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = { | |
458 | [IB_RATE_2_5_GBPS] = 16, | |
459 | [IB_RATE_5_GBPS] = 8, | |
460 | [IB_RATE_10_GBPS] = 4, | |
461 | [IB_RATE_20_GBPS] = 2, | |
462 | [IB_RATE_30_GBPS] = 2, | |
463 | [IB_RATE_40_GBPS] = 1 | |
464 | }; | |
465 | ||
466 | #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive) | |
467 | #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive) | |
468 | ||
469 | /* link training states, from IBC */ | |
470 | #define IB_7322_LT_STATE_DISABLED 0x00 | |
471 | #define IB_7322_LT_STATE_LINKUP 0x01 | |
472 | #define IB_7322_LT_STATE_POLLACTIVE 0x02 | |
473 | #define IB_7322_LT_STATE_POLLQUIET 0x03 | |
474 | #define IB_7322_LT_STATE_SLEEPDELAY 0x04 | |
475 | #define IB_7322_LT_STATE_SLEEPQUIET 0x05 | |
476 | #define IB_7322_LT_STATE_CFGDEBOUNCE 0x08 | |
477 | #define IB_7322_LT_STATE_CFGRCVFCFG 0x09 | |
478 | #define IB_7322_LT_STATE_CFGWAITRMT 0x0a | |
479 | #define IB_7322_LT_STATE_CFGIDLE 0x0b | |
480 | #define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c | |
481 | #define IB_7322_LT_STATE_TXREVLANES 0x0d | |
482 | #define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e | |
483 | #define IB_7322_LT_STATE_RECOVERIDLE 0x0f | |
484 | #define IB_7322_LT_STATE_CFGENH 0x10 | |
485 | #define IB_7322_LT_STATE_CFGTEST 0x11 | |
31264484 MH |
486 | #define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12 |
487 | #define IB_7322_LT_STATE_CFGWAITENH 0x13 | |
f931551b RC |
488 | |
489 | /* link state machine states from IBC */ | |
490 | #define IB_7322_L_STATE_DOWN 0x0 | |
491 | #define IB_7322_L_STATE_INIT 0x1 | |
492 | #define IB_7322_L_STATE_ARM 0x2 | |
493 | #define IB_7322_L_STATE_ACTIVE 0x3 | |
494 | #define IB_7322_L_STATE_ACT_DEFER 0x4 | |
495 | ||
496 | static const u8 qib_7322_physportstate[0x20] = { | |
497 | [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED, | |
498 | [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP, | |
499 | [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL, | |
500 | [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL, | |
501 | [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP, | |
502 | [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP, | |
503 | [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN, | |
504 | [IB_7322_LT_STATE_CFGRCVFCFG] = | |
505 | IB_PHYSPORTSTATE_CFG_TRAIN, | |
506 | [IB_7322_LT_STATE_CFGWAITRMT] = | |
507 | IB_PHYSPORTSTATE_CFG_TRAIN, | |
508 | [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE, | |
509 | [IB_7322_LT_STATE_RECOVERRETRAIN] = | |
510 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | |
511 | [IB_7322_LT_STATE_RECOVERWAITRMT] = | |
512 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | |
513 | [IB_7322_LT_STATE_RECOVERIDLE] = | |
514 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | |
515 | [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH, | |
516 | [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN, | |
31264484 MH |
517 | [IB_7322_LT_STATE_CFGWAITRMTTEST] = |
518 | IB_PHYSPORTSTATE_CFG_TRAIN, | |
519 | [IB_7322_LT_STATE_CFGWAITENH] = | |
520 | IB_PHYSPORTSTATE_CFG_WAIT_ENH, | |
f931551b RC |
521 | [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, |
522 | [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, | |
523 | [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, | |
524 | [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN | |
525 | }; | |
526 | ||
8469ba39 MM |
527 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
528 | struct qib_irq_notify { | |
529 | int rcv; | |
530 | void *arg; | |
531 | struct irq_affinity_notify notify; | |
532 | }; | |
533 | #endif | |
534 | ||
f931551b RC |
535 | struct qib_chip_specific { |
536 | u64 __iomem *cregbase; | |
537 | u64 *cntrs; | |
538 | spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */ | |
539 | spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */ | |
540 | u64 main_int_mask; /* clear bits which have dedicated handlers */ | |
541 | u64 int_enable_mask; /* for per port interrupts in single port mode */ | |
542 | u64 errormask; | |
543 | u64 hwerrmask; | |
544 | u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */ | |
545 | u64 gpio_mask; /* shadow the gpio mask register */ | |
546 | u64 extctrl; /* shadow the gpio output enable, etc... */ | |
547 | u32 ncntrs; | |
548 | u32 nportcntrs; | |
549 | u32 cntrnamelen; | |
550 | u32 portcntrnamelen; | |
551 | u32 numctxts; | |
552 | u32 rcvegrcnt; | |
553 | u32 updthresh; /* current AvailUpdThld */ | |
554 | u32 updthresh_dflt; /* default AvailUpdThld */ | |
555 | u32 r1; | |
556 | int irq; | |
557 | u32 num_msix_entries; | |
558 | u32 sdmabufcnt; | |
559 | u32 lastbuf_for_pio; | |
560 | u32 stay_in_freeze; | |
561 | u32 recovery_ports_initted; | |
8469ba39 MM |
562 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
563 | u32 dca_ctrl; | |
564 | int rhdr_cpu[18]; | |
565 | int sdma_cpu[2]; | |
566 | u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */ | |
567 | #endif | |
a778f3fd | 568 | struct qib_msix_entry *msix_entries; |
f931551b RC |
569 | unsigned long *sendchkenable; |
570 | unsigned long *sendgrhchk; | |
571 | unsigned long *sendibchk; | |
572 | u32 rcvavail_timeout[18]; | |
573 | char emsgbuf[128]; /* for device error interrupt msg buffer */ | |
574 | }; | |
575 | ||
576 | /* Table of entries in "human readable" form Tx Emphasis. */ | |
577 | struct txdds_ent { | |
578 | u8 amp; | |
579 | u8 pre; | |
580 | u8 main; | |
581 | u8 post; | |
582 | }; | |
583 | ||
584 | struct vendor_txdds_ent { | |
585 | u8 oui[QSFP_VOUI_LEN]; | |
586 | u8 *partnum; | |
587 | struct txdds_ent sdr; | |
588 | struct txdds_ent ddr; | |
589 | struct txdds_ent qdr; | |
590 | }; | |
591 | ||
592 | static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); | |
593 | ||
594 | #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ | |
22baa407 | 595 | #define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */ |
e706203c | 596 | #define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */ |
f931551b RC |
597 | #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ |
598 | ||
599 | #define H1_FORCE_VAL 8 | |
a77fcf89 RC |
600 | #define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */ |
601 | #define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */ | |
f931551b RC |
602 | |
603 | /* The static and dynamic registers are paired, and the pairs indexed by spd */ | |
604 | #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \ | |
605 | + ((spd) * 2)) | |
606 | ||
607 | #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */ | |
608 | #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */ | |
609 | #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */ | |
610 | #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */ | |
611 | #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */ | |
612 | ||
f931551b RC |
613 | struct qib_chippport_specific { |
614 | u64 __iomem *kpregbase; | |
615 | u64 __iomem *cpregbase; | |
616 | u64 *portcntrs; | |
617 | struct qib_pportdata *ppd; | |
618 | wait_queue_head_t autoneg_wait; | |
619 | struct delayed_work autoneg_work; | |
620 | struct delayed_work ipg_work; | |
621 | struct timer_list chase_timer; | |
622 | /* | |
623 | * these 5 fields are used to establish deltas for IB symbol | |
624 | * errors and linkrecovery errors. They can be reported on | |
625 | * some chips during link negotiation prior to INIT, and with | |
626 | * DDR when faking DDR negotiations with non-IBTA switches. | |
627 | * The chip counters are adjusted at driver unload if there is | |
628 | * a non-zero delta. | |
629 | */ | |
630 | u64 ibdeltainprog; | |
631 | u64 ibsymdelta; | |
632 | u64 ibsymsnap; | |
633 | u64 iblnkerrdelta; | |
634 | u64 iblnkerrsnap; | |
635 | u64 iblnkdownsnap; | |
636 | u64 iblnkdowndelta; | |
637 | u64 ibmalfdelta; | |
638 | u64 ibmalfsnap; | |
639 | u64 ibcctrl_a; /* krp_ibcctrl_a shadow */ | |
640 | u64 ibcctrl_b; /* krp_ibcctrl_b shadow */ | |
8482d5d1 MM |
641 | unsigned long qdr_dfe_time; |
642 | unsigned long chase_end; | |
f931551b RC |
643 | u32 autoneg_tries; |
644 | u32 recovery_init; | |
645 | u32 qdr_dfe_on; | |
646 | u32 qdr_reforce; | |
647 | /* | |
648 | * Per-bay per-channel rcv QMH H1 values and Tx values for QDR. | |
649 | * entry zero is unused, to simplify indexing | |
650 | */ | |
a77fcf89 RC |
651 | u8 h1_val; |
652 | u8 no_eep; /* txselect table index to use if no qsfp info */ | |
f931551b RC |
653 | u8 ipg_tries; |
654 | u8 ibmalfusesnap; | |
655 | struct qib_qsfp_data qsfp_data; | |
656 | char epmsgbuf[192]; /* for port error interrupt msg buffer */ | |
0b3ddf38 | 657 | char sdmamsgbuf[192]; /* for per-port sdma error messages */ |
f931551b RC |
658 | }; |
659 | ||
660 | static struct { | |
661 | const char *name; | |
662 | irq_handler_t handler; | |
663 | int lsb; | |
664 | int port; /* 0 if not port-specific, else port # */ | |
8469ba39 | 665 | int dca; |
f931551b | 666 | } irq_table[] = { |
8469ba39 | 667 | { "", qib_7322intr, -1, 0, 0 }, |
a778f3fd | 668 | { " (buf avail)", qib_7322bufavail, |
8469ba39 | 669 | SYM_LSB(IntStatus, SendBufAvail), 0, 0}, |
a778f3fd | 670 | { " (sdma 0)", sdma_intr, |
8469ba39 | 671 | SYM_LSB(IntStatus, SDmaInt_0), 1, 1 }, |
a778f3fd | 672 | { " (sdma 1)", sdma_intr, |
8469ba39 | 673 | SYM_LSB(IntStatus, SDmaInt_1), 2, 1 }, |
a778f3fd | 674 | { " (sdmaI 0)", sdma_idle_intr, |
8469ba39 | 675 | SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1}, |
a778f3fd | 676 | { " (sdmaI 1)", sdma_idle_intr, |
8469ba39 | 677 | SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1}, |
a778f3fd | 678 | { " (sdmaP 0)", sdma_progress_intr, |
8469ba39 | 679 | SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 }, |
a778f3fd | 680 | { " (sdmaP 1)", sdma_progress_intr, |
8469ba39 | 681 | SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 }, |
a778f3fd | 682 | { " (sdmaC 0)", sdma_cleanup_intr, |
8469ba39 | 683 | SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 }, |
a778f3fd | 684 | { " (sdmaC 1)", sdma_cleanup_intr, |
8469ba39 | 685 | SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0}, |
f931551b RC |
686 | }; |
687 | ||
8469ba39 MM |
688 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
689 | ||
690 | static const struct dca_reg_map { | |
691 | int shadow_inx; | |
692 | int lsb; | |
693 | u64 mask; | |
694 | u16 regno; | |
695 | } dca_rcvhdr_reg_map[] = { | |
696 | { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH), | |
697 | ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) }, | |
698 | { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH), | |
699 | ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) }, | |
700 | { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH), | |
701 | ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) }, | |
702 | { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH), | |
703 | ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) }, | |
704 | { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH), | |
705 | ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) }, | |
706 | { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH), | |
707 | ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) }, | |
708 | { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH), | |
709 | ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) }, | |
710 | { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH), | |
711 | ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) }, | |
712 | { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH), | |
713 | ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) }, | |
714 | { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH), | |
715 | ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) }, | |
716 | { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH), | |
717 | ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) }, | |
718 | { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH), | |
719 | ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) }, | |
720 | { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH), | |
721 | ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) }, | |
722 | { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH), | |
723 | ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) }, | |
724 | { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH), | |
725 | ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) }, | |
726 | { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH), | |
727 | ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) }, | |
728 | { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH), | |
729 | ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) }, | |
730 | { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH), | |
731 | ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) }, | |
732 | }; | |
733 | #endif | |
734 | ||
f931551b RC |
735 | /* ibcctrl bits */ |
736 | #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 | |
737 | /* cycle through TS1/TS2 till OK */ | |
738 | #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2 | |
739 | /* wait for TS1, then go on */ | |
740 | #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3 | |
741 | #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16 | |
742 | ||
743 | #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */ | |
744 | #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */ | |
745 | #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */ | |
746 | ||
747 | #define BLOB_7322_IBCHG 0x101 | |
748 | ||
749 | static inline void qib_write_kreg(const struct qib_devdata *dd, | |
750 | const u32 regno, u64 value); | |
751 | static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32); | |
752 | static void write_7322_initregs(struct qib_devdata *); | |
753 | static void write_7322_init_portregs(struct qib_pportdata *); | |
754 | static void setup_7322_link_recovery(struct qib_pportdata *, u32); | |
755 | static void check_7322_rxe_status(struct qib_pportdata *); | |
756 | static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *); | |
8469ba39 MM |
757 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
758 | static void qib_setup_dca(struct qib_devdata *dd); | |
759 | static void setup_dca_notifier(struct qib_devdata *dd, | |
760 | struct qib_msix_entry *m); | |
761 | static void reset_dca_notifier(struct qib_devdata *dd, | |
762 | struct qib_msix_entry *m); | |
763 | #endif | |
f931551b RC |
764 | |
765 | /** | |
766 | * qib_read_ureg32 - read 32-bit virtualized per-context register | |
767 | * @dd: device | |
768 | * @regno: register number | |
769 | * @ctxt: context number | |
770 | * | |
771 | * Return the contents of a register that is virtualized to be per context. | |
772 | * Returns -1 on errors (not distinguishable from valid contents at | |
773 | * runtime; we may add a separate error variable at some point). | |
774 | */ | |
775 | static inline u32 qib_read_ureg32(const struct qib_devdata *dd, | |
776 | enum qib_ureg regno, int ctxt) | |
777 | { | |
778 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | |
779 | return 0; | |
780 | return readl(regno + (u64 __iomem *)( | |
781 | (dd->ureg_align * ctxt) + (dd->userbase ? | |
782 | (char __iomem *)dd->userbase : | |
783 | (char __iomem *)dd->kregbase + dd->uregbase))); | |
784 | } | |
785 | ||
786 | /** | |
787 | * qib_read_ureg - read virtualized per-context register | |
788 | * @dd: device | |
789 | * @regno: register number | |
790 | * @ctxt: context number | |
791 | * | |
792 | * Return the contents of a register that is virtualized to be per context. | |
793 | * Returns -1 on errors (not distinguishable from valid contents at | |
794 | * runtime; we may add a separate error variable at some point). | |
795 | */ | |
796 | static inline u64 qib_read_ureg(const struct qib_devdata *dd, | |
797 | enum qib_ureg regno, int ctxt) | |
798 | { | |
799 | ||
800 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | |
801 | return 0; | |
802 | return readq(regno + (u64 __iomem *)( | |
803 | (dd->ureg_align * ctxt) + (dd->userbase ? | |
804 | (char __iomem *)dd->userbase : | |
805 | (char __iomem *)dd->kregbase + dd->uregbase))); | |
806 | } | |
807 | ||
808 | /** | |
809 | * qib_write_ureg - write virtualized per-context register | |
810 | * @dd: device | |
811 | * @regno: register number | |
812 | * @value: value | |
813 | * @ctxt: context | |
814 | * | |
815 | * Write the contents of a register that is virtualized to be per context. | |
816 | */ | |
817 | static inline void qib_write_ureg(const struct qib_devdata *dd, | |
818 | enum qib_ureg regno, u64 value, int ctxt) | |
819 | { | |
820 | u64 __iomem *ubase; | |
821 | if (dd->userbase) | |
822 | ubase = (u64 __iomem *) | |
823 | ((char __iomem *) dd->userbase + | |
824 | dd->ureg_align * ctxt); | |
825 | else | |
826 | ubase = (u64 __iomem *) | |
827 | (dd->uregbase + | |
828 | (char __iomem *) dd->kregbase + | |
829 | dd->ureg_align * ctxt); | |
830 | ||
831 | if (dd->kregbase && (dd->flags & QIB_PRESENT)) | |
832 | writeq(value, &ubase[regno]); | |
833 | } | |
834 | ||
835 | static inline u32 qib_read_kreg32(const struct qib_devdata *dd, | |
836 | const u32 regno) | |
837 | { | |
838 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | |
839 | return -1; | |
840 | return readl((u32 __iomem *) &dd->kregbase[regno]); | |
841 | } | |
842 | ||
843 | static inline u64 qib_read_kreg64(const struct qib_devdata *dd, | |
844 | const u32 regno) | |
845 | { | |
846 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | |
847 | return -1; | |
848 | return readq(&dd->kregbase[regno]); | |
849 | } | |
850 | ||
851 | static inline void qib_write_kreg(const struct qib_devdata *dd, | |
852 | const u32 regno, u64 value) | |
853 | { | |
854 | if (dd->kregbase && (dd->flags & QIB_PRESENT)) | |
855 | writeq(value, &dd->kregbase[regno]); | |
856 | } | |
857 | ||
858 | /* | |
859 | * not many sanity checks for the port-specific kernel register routines, | |
860 | * since they are only used when it's known to be safe. | |
861 | */ | |
862 | static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd, | |
863 | const u16 regno) | |
864 | { | |
865 | if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT)) | |
866 | return 0ULL; | |
867 | return readq(&ppd->cpspec->kpregbase[regno]); | |
868 | } | |
869 | ||
870 | static inline void qib_write_kreg_port(const struct qib_pportdata *ppd, | |
871 | const u16 regno, u64 value) | |
872 | { | |
873 | if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase && | |
874 | (ppd->dd->flags & QIB_PRESENT)) | |
875 | writeq(value, &ppd->cpspec->kpregbase[regno]); | |
876 | } | |
877 | ||
878 | /** | |
879 | * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register | |
880 | * @dd: the qlogic_ib device | |
881 | * @regno: the register number to write | |
882 | * @ctxt: the context containing the register | |
883 | * @value: the value to write | |
884 | */ | |
885 | static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd, | |
886 | const u16 regno, unsigned ctxt, | |
887 | u64 value) | |
888 | { | |
889 | qib_write_kreg(dd, regno + ctxt, value); | |
890 | } | |
891 | ||
892 | static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno) | |
893 | { | |
894 | if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) | |
895 | return 0; | |
896 | return readq(&dd->cspec->cregbase[regno]); | |
897 | ||
898 | ||
899 | } | |
900 | ||
901 | static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno) | |
902 | { | |
903 | if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) | |
904 | return 0; | |
905 | return readl(&dd->cspec->cregbase[regno]); | |
906 | ||
907 | ||
908 | } | |
909 | ||
910 | static inline void write_7322_creg_port(const struct qib_pportdata *ppd, | |
911 | u16 regno, u64 value) | |
912 | { | |
913 | if (ppd->cpspec && ppd->cpspec->cpregbase && | |
914 | (ppd->dd->flags & QIB_PRESENT)) | |
915 | writeq(value, &ppd->cpspec->cpregbase[regno]); | |
916 | } | |
917 | ||
918 | static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd, | |
919 | u16 regno) | |
920 | { | |
921 | if (!ppd->cpspec || !ppd->cpspec->cpregbase || | |
922 | !(ppd->dd->flags & QIB_PRESENT)) | |
923 | return 0; | |
924 | return readq(&ppd->cpspec->cpregbase[regno]); | |
925 | } | |
926 | ||
927 | static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd, | |
928 | u16 regno) | |
929 | { | |
930 | if (!ppd->cpspec || !ppd->cpspec->cpregbase || | |
931 | !(ppd->dd->flags & QIB_PRESENT)) | |
932 | return 0; | |
933 | return readl(&ppd->cpspec->cpregbase[regno]); | |
934 | } | |
935 | ||
936 | /* bits in Control register */ | |
937 | #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset) | |
938 | #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn) | |
939 | ||
940 | /* bits in general interrupt regs */ | |
941 | #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask) | |
942 | #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17) | |
943 | #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB) | |
944 | #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask) | |
945 | #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17) | |
946 | #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB) | |
947 | #define QIB_I_C_ERROR INT_MASK(Err) | |
948 | ||
949 | #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1)) | |
950 | #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail) | |
951 | #define QIB_I_GPIO INT_MASK(AssertGPIO) | |
952 | #define QIB_I_P_SDMAINT(pidx) \ | |
953 | (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \ | |
954 | INT_MASK_P(SDmaProgress, pidx) | \ | |
955 | INT_MASK_PM(SDmaCleanupDone, pidx)) | |
956 | ||
957 | /* Interrupt bits that are "per port" */ | |
958 | #define QIB_I_P_BITSEXTANT(pidx) \ | |
959 | (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \ | |
960 | INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \ | |
961 | INT_MASK_P(SDmaProgress, pidx) | \ | |
962 | INT_MASK_PM(SDmaCleanupDone, pidx)) | |
963 | ||
964 | /* Interrupt bits that are common to a device */ | |
965 | /* currently unused: QIB_I_SPIOSENT */ | |
966 | #define QIB_I_C_BITSEXTANT \ | |
967 | (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \ | |
968 | QIB_I_SPIOSENT | \ | |
969 | QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO) | |
970 | ||
971 | #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \ | |
972 | QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1)) | |
973 | ||
974 | /* | |
975 | * Error bits that are "per port". | |
976 | */ | |
977 | #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged) | |
978 | #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr) | |
979 | #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr) | |
980 | #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr) | |
981 | #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr) | |
982 | #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr) | |
983 | #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr) | |
984 | #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr) | |
985 | #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr) | |
986 | #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr) | |
987 | #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr) | |
988 | #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr) | |
989 | #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr) | |
990 | #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr) | |
991 | #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr) | |
992 | #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr) | |
993 | #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr) | |
994 | #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr) | |
995 | #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr) | |
996 | #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr) | |
997 | #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr) | |
998 | #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr) | |
999 | #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr) | |
1000 | #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr) | |
1001 | #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr) | |
1002 | #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr) | |
1003 | #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr) | |
1004 | #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr) | |
1005 | ||
1006 | #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr) | |
1007 | #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr) | |
1008 | #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr) | |
1009 | #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr) | |
1010 | #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr) | |
1011 | #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr) | |
1012 | #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr) | |
1013 | #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr) | |
1014 | #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr) | |
1015 | #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr) | |
1016 | #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr) | |
1017 | ||
1018 | /* Error bits that are common to a device */ | |
1019 | #define QIB_E_RESET ERR_MASK(ResetNegated) | |
1020 | #define QIB_E_HARDWARE ERR_MASK(HardwareErr) | |
1021 | #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr) | |
1022 | ||
1023 | ||
1024 | /* | |
1025 | * Per chip (rather than per-port) errors. Most either do | |
1026 | * nothing but trigger a print (because they self-recover, or | |
1027 | * always occur in tandem with other errors that handle the | |
1028 | * issue), or because they indicate errors with no recovery, | |
1029 | * but we want to know that they happened. | |
1030 | */ | |
1031 | #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr) | |
1032 | #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd) | |
1033 | #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr) | |
1034 | #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr) | |
1035 | #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr) | |
1036 | #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr) | |
1037 | #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr) | |
1038 | #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr) | |
1039 | ||
1040 | /* SDMA chip errors (not per port) | |
1041 | * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get | |
1042 | * the SDMAHALT error immediately, so we just print the dup error via the | |
1043 | * E_AUTO mechanism. This is true of most of the per-port fatal errors | |
1044 | * as well, but since this is port-independent, by definition, it's | |
1045 | * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per | |
1046 | * packet send errors, and so are handled in the same manner as other | |
1047 | * per-packet errors. | |
1048 | */ | |
1049 | #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err) | |
1050 | #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr) | |
1051 | #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr) | |
1052 | ||
1053 | /* | |
1054 | * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS | |
1055 | * it is used to print "common" packet errors. | |
1056 | */ | |
1057 | #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\ | |
1058 | QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\ | |
1059 | QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\ | |
1060 | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \ | |
1061 | QIB_E_P_REBP) | |
1062 | ||
1063 | /* Error Bits that Packet-related (Receive, per-port) */ | |
1064 | #define QIB_E_P_RPKTERRS (\ | |
1065 | QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \ | |
1066 | QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \ | |
1067 | QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\ | |
1068 | QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \ | |
1069 | QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \ | |
1070 | QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP) | |
1071 | ||
1072 | /* | |
1073 | * Error bits that are Send-related (per port) | |
1074 | * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling). | |
1075 | * All of these potentially need to have a buffer disarmed | |
1076 | */ | |
1077 | #define QIB_E_P_SPKTERRS (\ | |
1078 | QIB_E_P_SUNEXP_PKTNUM |\ | |
1079 | QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\ | |
1080 | QIB_E_P_SMAXPKTLEN |\ | |
1081 | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \ | |
1082 | QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \ | |
1083 | QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL) | |
1084 | ||
1085 | #define QIB_E_SPKTERRS ( \ | |
1086 | QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \ | |
1087 | ERR_MASK_N(SendUnsupportedVLErr) | \ | |
1088 | QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT) | |
1089 | ||
1090 | #define QIB_E_P_SDMAERRS ( \ | |
1091 | QIB_E_P_SDMAHALT | \ | |
1092 | QIB_E_P_SDMADESCADDRMISALIGN | \ | |
1093 | QIB_E_P_SDMAUNEXPDATA | \ | |
1094 | QIB_E_P_SDMAMISSINGDW | \ | |
1095 | QIB_E_P_SDMADWEN | \ | |
1096 | QIB_E_P_SDMARPYTAG | \ | |
1097 | QIB_E_P_SDMA1STDESC | \ | |
1098 | QIB_E_P_SDMABASE | \ | |
1099 | QIB_E_P_SDMATAILOUTOFBOUND | \ | |
1100 | QIB_E_P_SDMAOUTOFBOUND | \ | |
1101 | QIB_E_P_SDMAGENMISMATCH) | |
1102 | ||
1103 | /* | |
1104 | * This sets some bits more than once, but makes it more obvious which | |
1105 | * bits are not handled under other categories, and the repeat definition | |
1106 | * is not a problem. | |
1107 | */ | |
1108 | #define QIB_E_P_BITSEXTANT ( \ | |
1109 | QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \ | |
1110 | QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \ | |
1111 | QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \ | |
1112 | QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \ | |
1113 | ) | |
1114 | ||
1115 | /* | |
1116 | * These are errors that can occur when the link | |
1117 | * changes state while a packet is being sent or received. This doesn't | |
1118 | * cover things like EBP or VCRC that can be the result of a sending | |
1119 | * having the link change state, so we receive a "known bad" packet. | |
1120 | * All of these are "per port", so renamed: | |
1121 | */ | |
1122 | #define QIB_E_P_LINK_PKTERRS (\ | |
1123 | QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\ | |
1124 | QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\ | |
1125 | QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\ | |
1126 | QIB_E_P_RUNEXPCHAR) | |
1127 | ||
1128 | /* | |
1129 | * This sets some bits more than once, but makes it more obvious which | |
1130 | * bits are not handled under other categories (such as QIB_E_SPKTERRS), | |
1131 | * and the repeat definition is not a problem. | |
1132 | */ | |
1133 | #define QIB_E_C_BITSEXTANT (\ | |
1134 | QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\ | |
1135 | QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\ | |
1136 | QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE) | |
1137 | ||
1138 | /* Likewise Neuter E_SPKT_ERRS_IGNORE */ | |
1139 | #define E_SPKT_ERRS_IGNORE 0 | |
1140 | ||
1141 | #define QIB_EXTS_MEMBIST_DISABLED \ | |
1142 | SYM_MASK(EXTStatus, MemBISTDisabled) | |
1143 | #define QIB_EXTS_MEMBIST_ENDTEST \ | |
1144 | SYM_MASK(EXTStatus, MemBISTEndTest) | |
1145 | ||
1146 | #define QIB_E_SPIOARMLAUNCH \ | |
1147 | ERR_MASK(SendArmLaunchErr) | |
1148 | ||
1149 | #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd) | |
1150 | #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd) | |
1151 | ||
1152 | /* | |
1153 | * IBTA_1_2 is set when multiple speeds are enabled (normal), | |
1154 | * and also if forced QDR (only QDR enabled). It's enabled for the | |
1155 | * forced QDR case so that scrambling will be enabled by the TS3 | |
1156 | * exchange, when supported by both sides of the link. | |
1157 | */ | |
1158 | #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE) | |
1159 | #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED) | |
1160 | #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR) | |
1161 | #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | |
1162 | #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | |
1163 | #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \ | |
1164 | SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)) | |
1165 | #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR) | |
1166 | ||
1167 | #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod) | |
1168 | #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod) | |
1169 | ||
1170 | #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS) | |
1171 | #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS)) | |
1172 | #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS)) | |
1173 | ||
1174 | #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP) | |
1175 | #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP) | |
1176 | #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \ | |
1177 | SYM_MASK(IBCCtrlB_0, HRTBT_ENB)) | |
1178 | #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \ | |
1179 | SYM_LSB(IBCCtrlB_0, HRTBT_ENB)) | |
1180 | #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB) | |
1181 | ||
1182 | #define IBA7322_REDIRECT_VEC_PER_REG 12 | |
1183 | ||
1184 | #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En) | |
1185 | #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En) | |
1186 | #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En) | |
1187 | #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En) | |
1188 | #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En) | |
1189 | ||
1190 | #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */ | |
1191 | ||
1192 | #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \ | |
e67306a3 | 1193 | .msg = #fldname , .sz = sizeof(#fldname) } |
f931551b | 1194 | #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \ |
e67306a3 | 1195 | fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) } |
f931551b RC |
1196 | static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = { |
1197 | HWE_AUTO_P(IBSerdesPClkNotDetect, 1), | |
1198 | HWE_AUTO_P(IBSerdesPClkNotDetect, 0), | |
1199 | HWE_AUTO(PCIESerdesPClkNotDetect), | |
1200 | HWE_AUTO(PowerOnBISTFailed), | |
1201 | HWE_AUTO(TempsenseTholdReached), | |
1202 | HWE_AUTO(MemoryErr), | |
1203 | HWE_AUTO(PCIeBusParityErr), | |
1204 | HWE_AUTO(PcieCplTimeout), | |
1205 | HWE_AUTO(PciePoisonedTLP), | |
1206 | HWE_AUTO_P(SDmaMemReadErr, 1), | |
1207 | HWE_AUTO_P(SDmaMemReadErr, 0), | |
1208 | HWE_AUTO_P(IBCBusFromSPCParityErr, 1), | |
b9e03e04 | 1209 | HWE_AUTO_P(IBCBusToSPCParityErr, 1), |
f931551b | 1210 | HWE_AUTO_P(IBCBusFromSPCParityErr, 0), |
b9e03e04 | 1211 | HWE_AUTO(statusValidNoEop), |
f931551b | 1212 | HWE_AUTO(LATriggered), |
e67306a3 | 1213 | { .mask = 0, .sz = 0 } |
f931551b RC |
1214 | }; |
1215 | ||
1216 | #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \ | |
e67306a3 | 1217 | .msg = #fldname, .sz = sizeof(#fldname) } |
f931551b | 1218 | #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \ |
e67306a3 | 1219 | .msg = #fldname, .sz = sizeof(#fldname) } |
f931551b | 1220 | static const struct qib_hwerror_msgs qib_7322error_msgs[] = { |
e67306a3 MM |
1221 | E_AUTO(RcvEgrFullErr), |
1222 | E_AUTO(RcvHdrFullErr), | |
f931551b RC |
1223 | E_AUTO(ResetNegated), |
1224 | E_AUTO(HardwareErr), | |
1225 | E_AUTO(InvalidAddrErr), | |
1226 | E_AUTO(SDmaVL15Err), | |
1227 | E_AUTO(SBufVL15MisUseErr), | |
1228 | E_AUTO(InvalidEEPCmd), | |
1229 | E_AUTO(RcvContextShareErr), | |
1230 | E_AUTO(SendVLMismatchErr), | |
1231 | E_AUTO(SendArmLaunchErr), | |
1232 | E_AUTO(SendSpecialTriggerErr), | |
1233 | E_AUTO(SDmaWrongPortErr), | |
1234 | E_AUTO(SDmaBufMaskDuplicateErr), | |
e67306a3 | 1235 | { .mask = 0, .sz = 0 } |
f931551b RC |
1236 | }; |
1237 | ||
1238 | static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = { | |
1239 | E_P_AUTO(IBStatusChanged), | |
1240 | E_P_AUTO(SHeadersErr), | |
1241 | E_P_AUTO(VL15BufMisuseErr), | |
1242 | /* | |
1243 | * SDmaHaltErr is not really an error, make it clearer; | |
1244 | */ | |
e67306a3 MM |
1245 | {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted", |
1246 | .sz = 11}, | |
f931551b RC |
1247 | E_P_AUTO(SDmaDescAddrMisalignErr), |
1248 | E_P_AUTO(SDmaUnexpDataErr), | |
1249 | E_P_AUTO(SDmaMissingDwErr), | |
1250 | E_P_AUTO(SDmaDwEnErr), | |
1251 | E_P_AUTO(SDmaRpyTagErr), | |
1252 | E_P_AUTO(SDma1stDescErr), | |
1253 | E_P_AUTO(SDmaBaseErr), | |
1254 | E_P_AUTO(SDmaTailOutOfBoundErr), | |
1255 | E_P_AUTO(SDmaOutOfBoundErr), | |
1256 | E_P_AUTO(SDmaGenMismatchErr), | |
1257 | E_P_AUTO(SendBufMisuseErr), | |
1258 | E_P_AUTO(SendUnsupportedVLErr), | |
1259 | E_P_AUTO(SendUnexpectedPktNumErr), | |
1260 | E_P_AUTO(SendDroppedDataPktErr), | |
1261 | E_P_AUTO(SendDroppedSmpPktErr), | |
1262 | E_P_AUTO(SendPktLenErr), | |
1263 | E_P_AUTO(SendUnderRunErr), | |
1264 | E_P_AUTO(SendMaxPktLenErr), | |
1265 | E_P_AUTO(SendMinPktLenErr), | |
1266 | E_P_AUTO(RcvIBLostLinkErr), | |
1267 | E_P_AUTO(RcvHdrErr), | |
1268 | E_P_AUTO(RcvHdrLenErr), | |
1269 | E_P_AUTO(RcvBadTidErr), | |
1270 | E_P_AUTO(RcvBadVersionErr), | |
1271 | E_P_AUTO(RcvIBFlowErr), | |
1272 | E_P_AUTO(RcvEBPErr), | |
1273 | E_P_AUTO(RcvUnsupportedVLErr), | |
1274 | E_P_AUTO(RcvUnexpectedCharErr), | |
1275 | E_P_AUTO(RcvShortPktLenErr), | |
1276 | E_P_AUTO(RcvLongPktLenErr), | |
1277 | E_P_AUTO(RcvMaxPktLenErr), | |
1278 | E_P_AUTO(RcvMinPktLenErr), | |
1279 | E_P_AUTO(RcvICRCErr), | |
1280 | E_P_AUTO(RcvVCRCErr), | |
1281 | E_P_AUTO(RcvFormatErr), | |
e67306a3 | 1282 | { .mask = 0, .sz = 0 } |
f931551b RC |
1283 | }; |
1284 | ||
1285 | /* | |
1286 | * Below generates "auto-message" for interrupts not specific to any port or | |
1287 | * context | |
1288 | */ | |
1289 | #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \ | |
e67306a3 | 1290 | .msg = #fldname, .sz = sizeof(#fldname) } |
f931551b RC |
1291 | /* Below generates "auto-message" for interrupts specific to a port */ |
1292 | #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\ | |
1293 | SYM_LSB(IntMask, fldname##Mask##_0), \ | |
1294 | SYM_LSB(IntMask, fldname##Mask##_1)), \ | |
e67306a3 | 1295 | .msg = #fldname "_P", .sz = sizeof(#fldname "_P") } |
f931551b RC |
1296 | /* For some reason, the SerDesTrimDone bits are reversed */ |
1297 | #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\ | |
1298 | SYM_LSB(IntMask, fldname##Mask##_1), \ | |
1299 | SYM_LSB(IntMask, fldname##Mask##_0)), \ | |
e67306a3 | 1300 | .msg = #fldname "_P", .sz = sizeof(#fldname "_P") } |
f931551b RC |
1301 | /* |
1302 | * Below generates "auto-message" for interrupts specific to a context, | |
1303 | * with ctxt-number appended | |
1304 | */ | |
1305 | #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\ | |
1306 | SYM_LSB(IntMask, fldname##0IntMask), \ | |
1307 | SYM_LSB(IntMask, fldname##17IntMask)), \ | |
e67306a3 | 1308 | .msg = #fldname "_C", .sz = sizeof(#fldname "_C") } |
f931551b RC |
1309 | |
1310 | static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = { | |
1311 | INTR_AUTO_P(SDmaInt), | |
1312 | INTR_AUTO_P(SDmaProgressInt), | |
1313 | INTR_AUTO_P(SDmaIdleInt), | |
1314 | INTR_AUTO_P(SDmaCleanupDone), | |
1315 | INTR_AUTO_C(RcvUrg), | |
1316 | INTR_AUTO_P(ErrInt), | |
1317 | INTR_AUTO(ErrInt), /* non-port-specific errs */ | |
1318 | INTR_AUTO(AssertGPIOInt), | |
1319 | INTR_AUTO_P(SendDoneInt), | |
1320 | INTR_AUTO(SendBufAvailInt), | |
1321 | INTR_AUTO_C(RcvAvail), | |
e67306a3 | 1322 | { .mask = 0, .sz = 0 } |
f931551b RC |
1323 | }; |
1324 | ||
1325 | #define TXSYMPTOM_AUTO_P(fldname) \ | |
e67306a3 MM |
1326 | { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \ |
1327 | .msg = #fldname, .sz = sizeof(#fldname) } | |
f931551b RC |
1328 | static const struct qib_hwerror_msgs hdrchk_msgs[] = { |
1329 | TXSYMPTOM_AUTO_P(NonKeyPacket), | |
1330 | TXSYMPTOM_AUTO_P(GRHFail), | |
1331 | TXSYMPTOM_AUTO_P(PkeyFail), | |
1332 | TXSYMPTOM_AUTO_P(QPFail), | |
1333 | TXSYMPTOM_AUTO_P(SLIDFail), | |
1334 | TXSYMPTOM_AUTO_P(RawIPV6), | |
1335 | TXSYMPTOM_AUTO_P(PacketTooSmall), | |
e67306a3 | 1336 | { .mask = 0, .sz = 0 } |
f931551b RC |
1337 | }; |
1338 | ||
1339 | #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */ | |
1340 | ||
1341 | /* | |
1342 | * Called when we might have an error that is specific to a particular | |
1343 | * PIO buffer, and may need to cancel that buffer, so it can be re-used, | |
1344 | * because we don't need to force the update of pioavail | |
1345 | */ | |
1346 | static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd) | |
1347 | { | |
1348 | struct qib_devdata *dd = ppd->dd; | |
1349 | u32 i; | |
1350 | int any; | |
1351 | u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; | |
1352 | u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG; | |
1353 | unsigned long sbuf[4]; | |
1354 | ||
1355 | /* | |
1356 | * It's possible that sendbuffererror could have bits set; might | |
1357 | * have already done this as a result of hardware error handling. | |
1358 | */ | |
1359 | any = 0; | |
1360 | for (i = 0; i < regcnt; ++i) { | |
1361 | sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i); | |
1362 | if (sbuf[i]) { | |
1363 | any = 1; | |
1364 | qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]); | |
1365 | } | |
1366 | } | |
1367 | ||
1368 | if (any) | |
1369 | qib_disarm_piobufs_set(dd, sbuf, piobcnt); | |
1370 | } | |
1371 | ||
1372 | /* No txe_recover yet, if ever */ | |
1373 | ||
1374 | /* No decode__errors yet */ | |
1375 | static void err_decode(char *msg, size_t len, u64 errs, | |
1376 | const struct qib_hwerror_msgs *msp) | |
1377 | { | |
1378 | u64 these, lmask; | |
1379 | int took, multi, n = 0; | |
1380 | ||
e67306a3 | 1381 | while (errs && msp && msp->mask) { |
f931551b RC |
1382 | multi = (msp->mask & (msp->mask - 1)); |
1383 | while (errs & msp->mask) { | |
1384 | these = (errs & msp->mask); | |
1385 | lmask = (these & (these - 1)) ^ these; | |
1386 | if (len) { | |
1387 | if (n++) { | |
1388 | /* separate the strings */ | |
1389 | *msg++ = ','; | |
1390 | len--; | |
1391 | } | |
e67306a3 MM |
1392 | BUG_ON(!msp->sz); |
1393 | /* msp->sz counts the nul */ | |
1394 | took = min_t(size_t, msp->sz - (size_t)1, len); | |
1395 | memcpy(msg, msp->msg, took); | |
f931551b RC |
1396 | len -= took; |
1397 | msg += took; | |
e67306a3 MM |
1398 | if (len) |
1399 | *msg = '\0'; | |
f931551b RC |
1400 | } |
1401 | errs &= ~lmask; | |
1402 | if (len && multi) { | |
1403 | /* More than one bit this mask */ | |
1404 | int idx = -1; | |
1405 | ||
1406 | while (lmask & msp->mask) { | |
1407 | ++idx; | |
1408 | lmask >>= 1; | |
1409 | } | |
1410 | took = scnprintf(msg, len, "_%d", idx); | |
1411 | len -= took; | |
1412 | msg += took; | |
1413 | } | |
1414 | } | |
1415 | ++msp; | |
1416 | } | |
1417 | /* If some bits are left, show in hex. */ | |
1418 | if (len && errs) | |
1419 | snprintf(msg, len, "%sMORE:%llX", n ? "," : "", | |
1420 | (unsigned long long) errs); | |
1421 | } | |
1422 | ||
1423 | /* only called if r1 set */ | |
1424 | static void flush_fifo(struct qib_pportdata *ppd) | |
1425 | { | |
1426 | struct qib_devdata *dd = ppd->dd; | |
1427 | u32 __iomem *piobuf; | |
1428 | u32 bufn; | |
1429 | u32 *hdr; | |
1430 | u64 pbc; | |
1431 | const unsigned hdrwords = 7; | |
1432 | static struct qib_ib_header ibhdr = { | |
1433 | .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH), | |
1434 | .lrh[1] = IB_LID_PERMISSIVE, | |
1435 | .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC), | |
1436 | .lrh[3] = IB_LID_PERMISSIVE, | |
1437 | .u.oth.bth[0] = cpu_to_be32( | |
1438 | (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY), | |
1439 | .u.oth.bth[1] = cpu_to_be32(0), | |
1440 | .u.oth.bth[2] = cpu_to_be32(0), | |
1441 | .u.oth.u.ud.deth[0] = cpu_to_be32(0), | |
1442 | .u.oth.u.ud.deth[1] = cpu_to_be32(0), | |
1443 | }; | |
1444 | ||
1445 | /* | |
1446 | * Send a dummy VL15 packet to flush the launch FIFO. | |
1447 | * This will not actually be sent since the TxeBypassIbc bit is set. | |
1448 | */ | |
1449 | pbc = PBC_7322_VL15_SEND | | |
1450 | (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) | | |
1451 | (hdrwords + SIZE_OF_CRC); | |
1452 | piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn); | |
1453 | if (!piobuf) | |
1454 | return; | |
1455 | writeq(pbc, piobuf); | |
1456 | hdr = (u32 *) &ibhdr; | |
1457 | if (dd->flags & QIB_PIO_FLUSH_WC) { | |
1458 | qib_flush_wc(); | |
1459 | qib_pio_copy(piobuf + 2, hdr, hdrwords - 1); | |
1460 | qib_flush_wc(); | |
1461 | __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1); | |
1462 | qib_flush_wc(); | |
1463 | } else | |
1464 | qib_pio_copy(piobuf + 2, hdr, hdrwords); | |
1465 | qib_sendbuf_done(dd, bufn); | |
1466 | } | |
1467 | ||
1468 | /* | |
1469 | * This is called with interrupts disabled and sdma_lock held. | |
1470 | */ | |
1471 | static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op) | |
1472 | { | |
1473 | struct qib_devdata *dd = ppd->dd; | |
1474 | u64 set_sendctrl = 0; | |
1475 | u64 clr_sendctrl = 0; | |
1476 | ||
1477 | if (op & QIB_SDMA_SENDCTRL_OP_ENABLE) | |
1478 | set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable); | |
1479 | else | |
1480 | clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable); | |
1481 | ||
1482 | if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE) | |
1483 | set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable); | |
1484 | else | |
1485 | clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable); | |
1486 | ||
1487 | if (op & QIB_SDMA_SENDCTRL_OP_HALT) | |
1488 | set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt); | |
1489 | else | |
1490 | clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt); | |
1491 | ||
1492 | if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) | |
1493 | set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) | | |
1494 | SYM_MASK(SendCtrl_0, TxeAbortIbc) | | |
1495 | SYM_MASK(SendCtrl_0, TxeDrainRmFifo); | |
1496 | else | |
1497 | clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) | | |
1498 | SYM_MASK(SendCtrl_0, TxeAbortIbc) | | |
1499 | SYM_MASK(SendCtrl_0, TxeDrainRmFifo); | |
1500 | ||
1501 | spin_lock(&dd->sendctrl_lock); | |
1502 | ||
1503 | /* If we are draining everything, block sends first */ | |
1504 | if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) { | |
1505 | ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable); | |
1506 | qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); | |
1507 | qib_write_kreg(dd, kr_scratch, 0); | |
1508 | } | |
1509 | ||
1510 | ppd->p_sendctrl |= set_sendctrl; | |
1511 | ppd->p_sendctrl &= ~clr_sendctrl; | |
1512 | ||
1513 | if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP) | |
1514 | qib_write_kreg_port(ppd, krp_sendctrl, | |
1515 | ppd->p_sendctrl | | |
1516 | SYM_MASK(SendCtrl_0, SDmaCleanup)); | |
1517 | else | |
1518 | qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); | |
1519 | qib_write_kreg(dd, kr_scratch, 0); | |
1520 | ||
1521 | if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) { | |
1522 | ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable); | |
1523 | qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); | |
1524 | qib_write_kreg(dd, kr_scratch, 0); | |
1525 | } | |
1526 | ||
1527 | spin_unlock(&dd->sendctrl_lock); | |
1528 | ||
1529 | if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1) | |
1530 | flush_fifo(ppd); | |
1531 | } | |
1532 | ||
1533 | static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd) | |
1534 | { | |
1535 | __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned); | |
1536 | } | |
1537 | ||
1538 | static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd) | |
1539 | { | |
1540 | /* | |
1541 | * Set SendDmaLenGen and clear and set | |
1542 | * the MSB of the generation count to enable generation checking | |
1543 | * and load the internal generation counter. | |
1544 | */ | |
1545 | qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt); | |
1546 | qib_write_kreg_port(ppd, krp_senddmalengen, | |
1547 | ppd->sdma_descq_cnt | | |
1548 | (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB)); | |
1549 | } | |
1550 | ||
1551 | /* | |
1552 | * Must be called with sdma_lock held, or before init finished. | |
1553 | */ | |
1554 | static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail) | |
1555 | { | |
1556 | /* Commit writes to memory and advance the tail on the chip */ | |
1557 | wmb(); | |
1558 | ppd->sdma_descq_tail = tail; | |
1559 | qib_write_kreg_port(ppd, krp_senddmatail, tail); | |
1560 | } | |
1561 | ||
1562 | /* | |
1563 | * This is called with interrupts disabled and sdma_lock held. | |
1564 | */ | |
1565 | static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd) | |
1566 | { | |
1567 | /* | |
1568 | * Drain all FIFOs. | |
1569 | * The hardware doesn't require this but we do it so that verbs | |
1570 | * and user applications don't wait for link active to send stale | |
1571 | * data. | |
1572 | */ | |
1573 | sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH); | |
1574 | ||
1575 | qib_sdma_7322_setlengen(ppd); | |
1576 | qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */ | |
1577 | ppd->sdma_head_dma[0] = 0; | |
1578 | qib_7322_sdma_sendctrl(ppd, | |
1579 | ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP); | |
1580 | } | |
1581 | ||
1582 | #define DISABLES_SDMA ( \ | |
1583 | QIB_E_P_SDMAHALT | \ | |
1584 | QIB_E_P_SDMADESCADDRMISALIGN | \ | |
1585 | QIB_E_P_SDMAMISSINGDW | \ | |
1586 | QIB_E_P_SDMADWEN | \ | |
1587 | QIB_E_P_SDMARPYTAG | \ | |
1588 | QIB_E_P_SDMA1STDESC | \ | |
1589 | QIB_E_P_SDMABASE | \ | |
1590 | QIB_E_P_SDMATAILOUTOFBOUND | \ | |
1591 | QIB_E_P_SDMAOUTOFBOUND | \ | |
1592 | QIB_E_P_SDMAGENMISMATCH) | |
1593 | ||
1594 | static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs) | |
1595 | { | |
1596 | unsigned long flags; | |
1597 | struct qib_devdata *dd = ppd->dd; | |
1598 | ||
1599 | errs &= QIB_E_P_SDMAERRS; | |
b268e4db MM |
1600 | err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf), |
1601 | errs, qib_7322p_error_msgs); | |
f931551b RC |
1602 | |
1603 | if (errs & QIB_E_P_SDMAUNEXPDATA) | |
1604 | qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit, | |
1605 | ppd->port); | |
1606 | ||
1607 | spin_lock_irqsave(&ppd->sdma_lock, flags); | |
1608 | ||
0b3ddf38 DL |
1609 | if (errs != QIB_E_P_SDMAHALT) { |
1610 | /* SDMA errors have QIB_E_P_SDMAHALT and another bit set */ | |
1611 | qib_dev_porterr(dd, ppd->port, | |
1612 | "SDMA %s 0x%016llx %s\n", | |
1613 | qib_sdma_state_names[ppd->sdma_state.current_state], | |
1614 | errs, ppd->cpspec->sdmamsgbuf); | |
1615 | dump_sdma_7322_state(ppd); | |
1616 | } | |
1617 | ||
f931551b RC |
1618 | switch (ppd->sdma_state.current_state) { |
1619 | case qib_sdma_state_s00_hw_down: | |
1620 | break; | |
1621 | ||
1622 | case qib_sdma_state_s10_hw_start_up_wait: | |
1623 | if (errs & QIB_E_P_SDMAHALT) | |
1624 | __qib_sdma_process_event(ppd, | |
1625 | qib_sdma_event_e20_hw_started); | |
1626 | break; | |
1627 | ||
1628 | case qib_sdma_state_s20_idle: | |
1629 | break; | |
1630 | ||
1631 | case qib_sdma_state_s30_sw_clean_up_wait: | |
1632 | break; | |
1633 | ||
1634 | case qib_sdma_state_s40_hw_clean_up_wait: | |
1635 | if (errs & QIB_E_P_SDMAHALT) | |
1636 | __qib_sdma_process_event(ppd, | |
1637 | qib_sdma_event_e50_hw_cleaned); | |
1638 | break; | |
1639 | ||
1640 | case qib_sdma_state_s50_hw_halt_wait: | |
1641 | if (errs & QIB_E_P_SDMAHALT) | |
1642 | __qib_sdma_process_event(ppd, | |
1643 | qib_sdma_event_e60_hw_halted); | |
1644 | break; | |
1645 | ||
1646 | case qib_sdma_state_s99_running: | |
1647 | __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted); | |
1648 | __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted); | |
1649 | break; | |
1650 | } | |
1651 | ||
1652 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | |
1653 | } | |
1654 | ||
1655 | /* | |
1656 | * handle per-device errors (not per-port errors) | |
1657 | */ | |
1658 | static noinline void handle_7322_errors(struct qib_devdata *dd) | |
1659 | { | |
1660 | char *msg; | |
1661 | u64 iserr = 0; | |
1662 | u64 errs; | |
1663 | u64 mask; | |
1664 | int log_idx; | |
1665 | ||
1666 | qib_stats.sps_errints++; | |
1667 | errs = qib_read_kreg64(dd, kr_errstatus); | |
1668 | if (!errs) { | |
7fac3301 MM |
1669 | qib_devinfo(dd->pcidev, |
1670 | "device error interrupt, but no error bits set!\n"); | |
f931551b RC |
1671 | goto done; |
1672 | } | |
1673 | ||
1674 | /* don't report errors that are masked */ | |
1675 | errs &= dd->cspec->errormask; | |
1676 | msg = dd->cspec->emsgbuf; | |
1677 | ||
1678 | /* do these first, they are most important */ | |
1679 | if (errs & QIB_E_HARDWARE) { | |
1680 | *msg = '\0'; | |
1681 | qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); | |
1682 | } else | |
1683 | for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) | |
1684 | if (errs & dd->eep_st_masks[log_idx].errs_to_log) | |
1685 | qib_inc_eeprom_err(dd, log_idx, 1); | |
1686 | ||
1687 | if (errs & QIB_E_SPKTERRS) { | |
1688 | qib_disarm_7322_senderrbufs(dd->pport); | |
1689 | qib_stats.sps_txerrs++; | |
1690 | } else if (errs & QIB_E_INVALIDADDR) | |
1691 | qib_stats.sps_txerrs++; | |
1692 | else if (errs & QIB_E_ARMLAUNCH) { | |
1693 | qib_stats.sps_txerrs++; | |
1694 | qib_disarm_7322_senderrbufs(dd->pport); | |
1695 | } | |
1696 | qib_write_kreg(dd, kr_errclear, errs); | |
1697 | ||
1698 | /* | |
1699 | * The ones we mask off are handled specially below | |
1700 | * or above. Also mask SDMADISABLED by default as it | |
1701 | * is too chatty. | |
1702 | */ | |
1703 | mask = QIB_E_HARDWARE; | |
1704 | *msg = '\0'; | |
1705 | ||
1706 | err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask, | |
1707 | qib_7322error_msgs); | |
1708 | ||
1709 | /* | |
1710 | * Getting reset is a tragedy for all ports. Mark the device | |
1711 | * _and_ the ports as "offline" in way meaningful to each. | |
1712 | */ | |
1713 | if (errs & QIB_E_RESET) { | |
1714 | int pidx; | |
1715 | ||
7fac3301 MM |
1716 | qib_dev_err(dd, |
1717 | "Got reset, requires re-init (unload and reload driver)\n"); | |
f931551b RC |
1718 | dd->flags &= ~QIB_INITTED; /* needs re-init */ |
1719 | /* mark as having had error */ | |
1720 | *dd->devstatusp |= QIB_STATUS_HWERROR; | |
1721 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | |
1722 | if (dd->pport[pidx].link_speed_supported) | |
1723 | *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF; | |
1724 | } | |
1725 | ||
1726 | if (*msg && iserr) | |
1727 | qib_dev_err(dd, "%s error\n", msg); | |
1728 | ||
1729 | /* | |
1730 | * If there were hdrq or egrfull errors, wake up any processes | |
1731 | * waiting in poll. We used to try to check which contexts had | |
1732 | * the overflow, but given the cost of that and the chip reads | |
1733 | * to support it, it's better to just wake everybody up if we | |
1734 | * get an overflow; waiters can poll again if it's not them. | |
1735 | */ | |
1736 | if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) { | |
1737 | qib_handle_urcv(dd, ~0U); | |
1738 | if (errs & ERR_MASK(RcvEgrFullErr)) | |
1739 | qib_stats.sps_buffull++; | |
1740 | else | |
1741 | qib_stats.sps_hdrfull++; | |
1742 | } | |
1743 | ||
1744 | done: | |
1745 | return; | |
1746 | } | |
1747 | ||
e67306a3 MM |
1748 | static void qib_error_tasklet(unsigned long data) |
1749 | { | |
1750 | struct qib_devdata *dd = (struct qib_devdata *)data; | |
1751 | ||
1752 | handle_7322_errors(dd); | |
1753 | qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); | |
1754 | } | |
1755 | ||
f931551b RC |
1756 | static void reenable_chase(unsigned long opaque) |
1757 | { | |
1758 | struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; | |
1759 | ||
1760 | ppd->cpspec->chase_timer.expires = 0; | |
1761 | qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, | |
1762 | QLOGIC_IB_IBCC_LINKINITCMD_POLL); | |
1763 | } | |
1764 | ||
8482d5d1 MM |
1765 | static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow, |
1766 | u8 ibclt) | |
f931551b RC |
1767 | { |
1768 | ppd->cpspec->chase_end = 0; | |
1769 | ||
1770 | if (!qib_chase) | |
1771 | return; | |
1772 | ||
1773 | qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, | |
1774 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | |
1775 | ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME; | |
1776 | add_timer(&ppd->cpspec->chase_timer); | |
1777 | } | |
1778 | ||
1779 | static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst) | |
1780 | { | |
1781 | u8 ibclt; | |
8482d5d1 | 1782 | unsigned long tnow; |
f931551b RC |
1783 | |
1784 | ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState); | |
1785 | ||
1786 | /* | |
1787 | * Detect and handle the state chase issue, where we can | |
1788 | * get stuck if we are unlucky on timing on both sides of | |
1789 | * the link. If we are, we disable, set a timer, and | |
1790 | * then re-enable. | |
1791 | */ | |
1792 | switch (ibclt) { | |
1793 | case IB_7322_LT_STATE_CFGRCVFCFG: | |
1794 | case IB_7322_LT_STATE_CFGWAITRMT: | |
1795 | case IB_7322_LT_STATE_TXREVLANES: | |
1796 | case IB_7322_LT_STATE_CFGENH: | |
8482d5d1 | 1797 | tnow = jiffies; |
f931551b | 1798 | if (ppd->cpspec->chase_end && |
8482d5d1 | 1799 | time_after(tnow, ppd->cpspec->chase_end)) |
f931551b RC |
1800 | disable_chase(ppd, tnow, ibclt); |
1801 | else if (!ppd->cpspec->chase_end) | |
1802 | ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME; | |
1803 | break; | |
1804 | default: | |
1805 | ppd->cpspec->chase_end = 0; | |
1806 | break; | |
1807 | } | |
1808 | ||
31264484 MH |
1809 | if (((ibclt >= IB_7322_LT_STATE_CFGTEST && |
1810 | ibclt <= IB_7322_LT_STATE_CFGWAITENH) || | |
1811 | ibclt == IB_7322_LT_STATE_LINKUP) && | |
f931551b RC |
1812 | (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) { |
1813 | force_h1(ppd); | |
1814 | ppd->cpspec->qdr_reforce = 1; | |
a0a234d4 MM |
1815 | if (!ppd->dd->cspec->r1) |
1816 | serdes_7322_los_enable(ppd, 0); | |
f931551b RC |
1817 | } else if (ppd->cpspec->qdr_reforce && |
1818 | (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) && | |
1819 | (ibclt == IB_7322_LT_STATE_CFGENH || | |
1820 | ibclt == IB_7322_LT_STATE_CFGIDLE || | |
1821 | ibclt == IB_7322_LT_STATE_LINKUP)) | |
1822 | force_h1(ppd); | |
1823 | ||
1824 | if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) && | |
1825 | ppd->link_speed_enabled == QIB_IB_QDR && | |
1826 | (ibclt == IB_7322_LT_STATE_CFGTEST || | |
1827 | ibclt == IB_7322_LT_STATE_CFGENH || | |
1828 | (ibclt >= IB_7322_LT_STATE_POLLACTIVE && | |
1829 | ibclt <= IB_7322_LT_STATE_SLEEPQUIET))) | |
1830 | adj_tx_serdes(ppd); | |
1831 | ||
a0a234d4 MM |
1832 | if (ibclt != IB_7322_LT_STATE_LINKUP) { |
1833 | u8 ltstate = qib_7322_phys_portstate(ibcst); | |
1834 | u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, | |
1835 | LinkTrainingState); | |
1836 | if (!ppd->dd->cspec->r1 && | |
1837 | pibclt == IB_7322_LT_STATE_LINKUP && | |
1838 | ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER && | |
1839 | ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN && | |
1840 | ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && | |
1841 | ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) | |
1842 | /* If the link went down (but no into recovery, | |
1843 | * turn LOS back on */ | |
1844 | serdes_7322_los_enable(ppd, 1); | |
1845 | if (!ppd->cpspec->qdr_dfe_on && | |
1846 | ibclt <= IB_7322_LT_STATE_SLEEPQUIET) { | |
1847 | ppd->cpspec->qdr_dfe_on = 1; | |
1848 | ppd->cpspec->qdr_dfe_time = 0; | |
1849 | /* On link down, reenable QDR adaptation */ | |
1850 | qib_write_kreg_port(ppd, krp_static_adapt_dis(2), | |
1851 | ppd->dd->cspec->r1 ? | |
1852 | QDR_STATIC_ADAPT_DOWN_R1 : | |
1853 | QDR_STATIC_ADAPT_DOWN); | |
7fac3301 MM |
1854 | pr_info( |
1855 | "IB%u:%u re-enabled QDR adaptation ibclt %x\n", | |
1856 | ppd->dd->unit, ppd->port, ibclt); | |
a0a234d4 | 1857 | } |
f931551b RC |
1858 | } |
1859 | } | |
1860 | ||
f2d255a0 MM |
1861 | static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32); |
1862 | ||
f931551b RC |
1863 | /* |
1864 | * This is per-pport error handling. | |
1865 | * will likely get it's own MSIx interrupt (one for each port, | |
1866 | * although just a single handler). | |
1867 | */ | |
1868 | static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) | |
1869 | { | |
1870 | char *msg; | |
1871 | u64 ignore_this_time = 0, iserr = 0, errs, fmask; | |
1872 | struct qib_devdata *dd = ppd->dd; | |
1873 | ||
1874 | /* do this as soon as possible */ | |
1875 | fmask = qib_read_kreg64(dd, kr_act_fmask); | |
1876 | if (!fmask) | |
1877 | check_7322_rxe_status(ppd); | |
1878 | ||
1879 | errs = qib_read_kreg_port(ppd, krp_errstatus); | |
1880 | if (!errs) | |
1881 | qib_devinfo(dd->pcidev, | |
1882 | "Port%d error interrupt, but no error bits set!\n", | |
1883 | ppd->port); | |
1884 | if (!fmask) | |
1885 | errs &= ~QIB_E_P_IBSTATUSCHANGED; | |
1886 | if (!errs) | |
1887 | goto done; | |
1888 | ||
1889 | msg = ppd->cpspec->epmsgbuf; | |
1890 | *msg = '\0'; | |
1891 | ||
1892 | if (errs & ~QIB_E_P_BITSEXTANT) { | |
1893 | err_decode(msg, sizeof ppd->cpspec->epmsgbuf, | |
1894 | errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs); | |
1895 | if (!*msg) | |
1896 | snprintf(msg, sizeof ppd->cpspec->epmsgbuf, | |
1897 | "no others"); | |
7fac3301 MM |
1898 | qib_dev_porterr(dd, ppd->port, |
1899 | "error interrupt with unknown errors 0x%016Lx set (and %s)\n", | |
1900 | (errs & ~QIB_E_P_BITSEXTANT), msg); | |
f931551b RC |
1901 | *msg = '\0'; |
1902 | } | |
1903 | ||
1904 | if (errs & QIB_E_P_SHDR) { | |
1905 | u64 symptom; | |
1906 | ||
1907 | /* determine cause, then write to clear */ | |
1908 | symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom); | |
1909 | qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0); | |
1910 | err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom, | |
1911 | hdrchk_msgs); | |
1912 | *msg = '\0'; | |
1913 | /* senderrbuf cleared in SPKTERRS below */ | |
1914 | } | |
1915 | ||
1916 | if (errs & QIB_E_P_SPKTERRS) { | |
1917 | if ((errs & QIB_E_P_LINK_PKTERRS) && | |
1918 | !(ppd->lflags & QIBL_LINKACTIVE)) { | |
1919 | /* | |
1920 | * This can happen when trying to bring the link | |
1921 | * up, but the IB link changes state at the "wrong" | |
1922 | * time. The IB logic then complains that the packet | |
1923 | * isn't valid. We don't want to confuse people, so | |
1924 | * we just don't print them, except at debug | |
1925 | */ | |
1926 | err_decode(msg, sizeof ppd->cpspec->epmsgbuf, | |
1927 | (errs & QIB_E_P_LINK_PKTERRS), | |
1928 | qib_7322p_error_msgs); | |
1929 | *msg = '\0'; | |
1930 | ignore_this_time = errs & QIB_E_P_LINK_PKTERRS; | |
1931 | } | |
1932 | qib_disarm_7322_senderrbufs(ppd); | |
1933 | } else if ((errs & QIB_E_P_LINK_PKTERRS) && | |
1934 | !(ppd->lflags & QIBL_LINKACTIVE)) { | |
1935 | /* | |
1936 | * This can happen when SMA is trying to bring the link | |
1937 | * up, but the IB link changes state at the "wrong" time. | |
1938 | * The IB logic then complains that the packet isn't | |
1939 | * valid. We don't want to confuse people, so we just | |
1940 | * don't print them, except at debug | |
1941 | */ | |
1942 | err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs, | |
1943 | qib_7322p_error_msgs); | |
1944 | ignore_this_time = errs & QIB_E_P_LINK_PKTERRS; | |
1945 | *msg = '\0'; | |
1946 | } | |
1947 | ||
1948 | qib_write_kreg_port(ppd, krp_errclear, errs); | |
1949 | ||
1950 | errs &= ~ignore_this_time; | |
1951 | if (!errs) | |
1952 | goto done; | |
1953 | ||
1954 | if (errs & QIB_E_P_RPKTERRS) | |
1955 | qib_stats.sps_rcverrs++; | |
1956 | if (errs & QIB_E_P_SPKTERRS) | |
1957 | qib_stats.sps_txerrs++; | |
1958 | ||
1959 | iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS); | |
1960 | ||
1961 | if (errs & QIB_E_P_SDMAERRS) | |
1962 | sdma_7322_p_errors(ppd, errs); | |
1963 | ||
1964 | if (errs & QIB_E_P_IBSTATUSCHANGED) { | |
1965 | u64 ibcs; | |
1966 | u8 ltstate; | |
1967 | ||
1968 | ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a); | |
1969 | ltstate = qib_7322_phys_portstate(ibcs); | |
1970 | ||
1971 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) | |
1972 | handle_serdes_issues(ppd, ibcs); | |
1973 | if (!(ppd->cpspec->ibcctrl_a & | |
1974 | SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) { | |
1975 | /* | |
1976 | * We got our interrupt, so init code should be | |
1977 | * happy and not try alternatives. Now squelch | |
1978 | * other "chatter" from link-negotiation (pre Init) | |
1979 | */ | |
1980 | ppd->cpspec->ibcctrl_a |= | |
1981 | SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn); | |
1982 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | |
1983 | ppd->cpspec->ibcctrl_a); | |
1984 | } | |
1985 | ||
1986 | /* Update our picture of width and speed from chip */ | |
1987 | ppd->link_width_active = | |
1988 | (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ? | |
1989 | IB_WIDTH_4X : IB_WIDTH_1X; | |
1990 | ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0, | |
1991 | LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs & | |
1992 | SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ? | |
1993 | QIB_IB_DDR : QIB_IB_SDR; | |
1994 | ||
1995 | if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate != | |
1996 | IB_PHYSPORTSTATE_DISABLED) | |
1997 | qib_set_ib_7322_lstate(ppd, 0, | |
1998 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | |
d70585f7 | 1999 | else |
f931551b RC |
2000 | /* |
2001 | * Since going into a recovery state causes the link | |
2002 | * state to go down and since recovery is transitory, | |
2003 | * it is better if we "miss" ever seeing the link | |
2004 | * training state go into recovery (i.e., ignore this | |
2005 | * transition for link state special handling purposes) | |
2006 | * without updating lastibcstat. | |
2007 | */ | |
2008 | if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER && | |
2009 | ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN && | |
2010 | ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && | |
2011 | ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) | |
2012 | qib_handle_e_ibstatuschanged(ppd, ibcs); | |
2013 | } | |
2014 | if (*msg && iserr) | |
2015 | qib_dev_porterr(dd, ppd->port, "%s error\n", msg); | |
2016 | ||
2017 | if (ppd->state_wanted & ppd->lflags) | |
2018 | wake_up_interruptible(&ppd->state_wait); | |
2019 | done: | |
2020 | return; | |
2021 | } | |
2022 | ||
2023 | /* enable/disable chip from delivering interrupts */ | |
2024 | static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable) | |
2025 | { | |
2026 | if (enable) { | |
2027 | if (dd->flags & QIB_BADINTR) | |
2028 | return; | |
2029 | qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask); | |
2030 | /* cause any pending enabled interrupts to be re-delivered */ | |
2031 | qib_write_kreg(dd, kr_intclear, 0ULL); | |
2032 | if (dd->cspec->num_msix_entries) { | |
2033 | /* and same for MSIx */ | |
2034 | u64 val = qib_read_kreg64(dd, kr_intgranted); | |
2035 | if (val) | |
2036 | qib_write_kreg(dd, kr_intgranted, val); | |
2037 | } | |
2038 | } else | |
2039 | qib_write_kreg(dd, kr_intmask, 0ULL); | |
2040 | } | |
2041 | ||
2042 | /* | |
2043 | * Try to cleanup as much as possible for anything that might have gone | |
2044 | * wrong while in freeze mode, such as pio buffers being written by user | |
2045 | * processes (causing armlaunch), send errors due to going into freeze mode, | |
2046 | * etc., and try to avoid causing extra interrupts while doing so. | |
2047 | * Forcibly update the in-memory pioavail register copies after cleanup | |
2048 | * because the chip won't do it while in freeze mode (the register values | |
2049 | * themselves are kept correct). | |
2050 | * Make sure that we don't lose any important interrupts by using the chip | |
2051 | * feature that says that writing 0 to a bit in *clear that is set in | |
2052 | * *status will cause an interrupt to be generated again (if allowed by | |
2053 | * the *mask value). | |
2054 | * This is in chip-specific code because of all of the register accesses, | |
2055 | * even though the details are similar on most chips. | |
2056 | */ | |
2057 | static void qib_7322_clear_freeze(struct qib_devdata *dd) | |
2058 | { | |
2059 | int pidx; | |
2060 | ||
2061 | /* disable error interrupts, to avoid confusion */ | |
2062 | qib_write_kreg(dd, kr_errmask, 0ULL); | |
2063 | ||
2064 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | |
2065 | if (dd->pport[pidx].link_speed_supported) | |
2066 | qib_write_kreg_port(dd->pport + pidx, krp_errmask, | |
2067 | 0ULL); | |
2068 | ||
2069 | /* also disable interrupts; errormask is sometimes overwriten */ | |
2070 | qib_7322_set_intr_state(dd, 0); | |
2071 | ||
2072 | /* clear the freeze, and be sure chip saw it */ | |
2073 | qib_write_kreg(dd, kr_control, dd->control); | |
2074 | qib_read_kreg32(dd, kr_scratch); | |
2075 | ||
2076 | /* | |
2077 | * Force new interrupt if any hwerr, error or interrupt bits are | |
2078 | * still set, and clear "safe" send packet errors related to freeze | |
2079 | * and cancelling sends. Re-enable error interrupts before possible | |
2080 | * force of re-interrupt on pending interrupts. | |
2081 | */ | |
2082 | qib_write_kreg(dd, kr_hwerrclear, 0ULL); | |
2083 | qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE); | |
2084 | qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); | |
2085 | /* We need to purge per-port errs and reset mask, too */ | |
2086 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
2087 | if (!dd->pport[pidx].link_speed_supported) | |
2088 | continue; | |
2089 | qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull); | |
2090 | qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull); | |
2091 | } | |
2092 | qib_7322_set_intr_state(dd, 1); | |
2093 | } | |
2094 | ||
2095 | /* no error handling to speak of */ | |
2096 | /** | |
2097 | * qib_7322_handle_hwerrors - display hardware errors. | |
2098 | * @dd: the qlogic_ib device | |
2099 | * @msg: the output buffer | |
2100 | * @msgl: the size of the output buffer | |
2101 | * | |
2102 | * Use same msg buffer as regular errors to avoid excessive stack | |
2103 | * use. Most hardware errors are catastrophic, but for right now, | |
2104 | * we'll print them and continue. We reuse the same message buffer as | |
2105 | * qib_handle_errors() to avoid excessive stack usage. | |
2106 | */ | |
2107 | static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, | |
2108 | size_t msgl) | |
2109 | { | |
2110 | u64 hwerrs; | |
2111 | u32 ctrl; | |
2112 | int isfatal = 0; | |
2113 | ||
2114 | hwerrs = qib_read_kreg64(dd, kr_hwerrstatus); | |
2115 | if (!hwerrs) | |
2116 | goto bail; | |
2117 | if (hwerrs == ~0ULL) { | |
7fac3301 MM |
2118 | qib_dev_err(dd, |
2119 | "Read of hardware error status failed (all bits set); ignoring\n"); | |
f931551b RC |
2120 | goto bail; |
2121 | } | |
2122 | qib_stats.sps_hwerrs++; | |
2123 | ||
2124 | /* Always clear the error status register, except BIST fail */ | |
2125 | qib_write_kreg(dd, kr_hwerrclear, hwerrs & | |
2126 | ~HWE_MASK(PowerOnBISTFailed)); | |
2127 | ||
2128 | hwerrs &= dd->cspec->hwerrmask; | |
2129 | ||
2130 | /* no EEPROM logging, yet */ | |
2131 | ||
2132 | if (hwerrs) | |
7fac3301 MM |
2133 | qib_devinfo(dd->pcidev, |
2134 | "Hardware error: hwerr=0x%llx (cleared)\n", | |
2135 | (unsigned long long) hwerrs); | |
f931551b RC |
2136 | |
2137 | ctrl = qib_read_kreg32(dd, kr_control); | |
2138 | if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) { | |
2139 | /* | |
2140 | * No recovery yet... | |
2141 | */ | |
2142 | if ((hwerrs & ~HWE_MASK(LATriggered)) || | |
2143 | dd->cspec->stay_in_freeze) { | |
2144 | /* | |
2145 | * If any set that we aren't ignoring only make the | |
2146 | * complaint once, in case it's stuck or recurring, | |
2147 | * and we get here multiple times | |
2148 | * Force link down, so switch knows, and | |
2149 | * LEDs are turned off. | |
2150 | */ | |
2151 | if (dd->flags & QIB_INITTED) | |
2152 | isfatal = 1; | |
2153 | } else | |
2154 | qib_7322_clear_freeze(dd); | |
2155 | } | |
2156 | ||
2157 | if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { | |
2158 | isfatal = 1; | |
7fac3301 MM |
2159 | strlcpy(msg, |
2160 | "[Memory BIST test failed, InfiniPath hardware unusable]", | |
2161 | msgl); | |
f931551b RC |
2162 | /* ignore from now on, so disable until driver reloaded */ |
2163 | dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); | |
2164 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | |
2165 | } | |
2166 | ||
2167 | err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs); | |
2168 | ||
2169 | /* Ignore esoteric PLL failures et al. */ | |
2170 | ||
2171 | qib_dev_err(dd, "%s hardware error\n", msg); | |
2172 | ||
0b3ddf38 DL |
2173 | if (hwerrs & |
2174 | (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) | | |
2175 | SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) { | |
2176 | int pidx = 0; | |
2177 | int err; | |
2178 | unsigned long flags; | |
2179 | struct qib_pportdata *ppd = dd->pport; | |
2180 | for (; pidx < dd->num_pports; ++pidx, ppd++) { | |
2181 | err = 0; | |
2182 | if (pidx == 0 && (hwerrs & | |
2183 | SYM_MASK(HwErrMask, SDmaMemReadErrMask_0))) | |
2184 | err++; | |
2185 | if (pidx == 1 && (hwerrs & | |
2186 | SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) | |
2187 | err++; | |
2188 | if (err) { | |
2189 | spin_lock_irqsave(&ppd->sdma_lock, flags); | |
2190 | dump_sdma_7322_state(ppd); | |
2191 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | |
2192 | } | |
2193 | } | |
2194 | } | |
2195 | ||
f931551b | 2196 | if (isfatal && !dd->diag_client) { |
7fac3301 MM |
2197 | qib_dev_err(dd, |
2198 | "Fatal Hardware Error, no longer usable, SN %.16s\n", | |
2199 | dd->serial); | |
f931551b RC |
2200 | /* |
2201 | * for /sys status file and user programs to print; if no | |
2202 | * trailing brace is copied, we'll know it was truncated. | |
2203 | */ | |
2204 | if (dd->freezemsg) | |
2205 | snprintf(dd->freezemsg, dd->freezelen, | |
2206 | "{%s}", msg); | |
2207 | qib_disable_after_error(dd); | |
2208 | } | |
2209 | bail:; | |
2210 | } | |
2211 | ||
2212 | /** | |
2213 | * qib_7322_init_hwerrors - enable hardware errors | |
2214 | * @dd: the qlogic_ib device | |
2215 | * | |
2216 | * now that we have finished initializing everything that might reasonably | |
2217 | * cause a hardware error, and cleared those errors bits as they occur, | |
2218 | * we can enable hardware errors in the mask (potentially enabling | |
2219 | * freeze mode), and enable hardware errors as errors (along with | |
2220 | * everything else) in errormask | |
2221 | */ | |
2222 | static void qib_7322_init_hwerrors(struct qib_devdata *dd) | |
2223 | { | |
2224 | int pidx; | |
2225 | u64 extsval; | |
2226 | ||
2227 | extsval = qib_read_kreg64(dd, kr_extstatus); | |
2228 | if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED | | |
2229 | QIB_EXTS_MEMBIST_ENDTEST))) | |
2230 | qib_dev_err(dd, "MemBIST did not complete!\n"); | |
2231 | ||
2232 | /* never clear BIST failure, so reported on each driver load */ | |
2233 | qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed)); | |
2234 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | |
2235 | ||
2236 | /* clear all */ | |
2237 | qib_write_kreg(dd, kr_errclear, ~0ULL); | |
2238 | /* enable errors that are masked, at least this first time. */ | |
2239 | qib_write_kreg(dd, kr_errmask, ~0ULL); | |
2240 | dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask); | |
2241 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | |
2242 | if (dd->pport[pidx].link_speed_supported) | |
2243 | qib_write_kreg_port(dd->pport + pidx, krp_errmask, | |
2244 | ~0ULL); | |
2245 | } | |
2246 | ||
2247 | /* | |
2248 | * Disable and enable the armlaunch error. Used for PIO bandwidth testing | |
2249 | * on chips that are count-based, rather than trigger-based. There is no | |
2250 | * reference counting, but that's also fine, given the intended use. | |
2251 | * Only chip-specific because it's all register accesses | |
2252 | */ | |
2253 | static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable) | |
2254 | { | |
2255 | if (enable) { | |
2256 | qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH); | |
2257 | dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH; | |
2258 | } else | |
2259 | dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH; | |
2260 | qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); | |
2261 | } | |
2262 | ||
2263 | /* | |
2264 | * Formerly took parameter <which> in pre-shifted, | |
2265 | * pre-merged form with LinkCmd and LinkInitCmd | |
2266 | * together, and assuming the zero was NOP. | |
2267 | */ | |
2268 | static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd, | |
2269 | u16 linitcmd) | |
2270 | { | |
2271 | u64 mod_wd; | |
2272 | struct qib_devdata *dd = ppd->dd; | |
2273 | unsigned long flags; | |
2274 | ||
2275 | if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) { | |
2276 | /* | |
2277 | * If we are told to disable, note that so link-recovery | |
2278 | * code does not attempt to bring us back up. | |
2279 | * Also reset everything that we can, so we start | |
2280 | * completely clean when re-enabled (before we | |
2281 | * actually issue the disable to the IBC) | |
2282 | */ | |
2283 | qib_7322_mini_pcs_reset(ppd); | |
2284 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
2285 | ppd->lflags |= QIBL_IB_LINK_DISABLED; | |
2286 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
2287 | } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) { | |
2288 | /* | |
2289 | * Any other linkinitcmd will lead to LINKDOWN and then | |
2290 | * to INIT (if all is well), so clear flag to let | |
2291 | * link-recovery code attempt to bring us back up. | |
2292 | */ | |
2293 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
2294 | ppd->lflags &= ~QIBL_IB_LINK_DISABLED; | |
2295 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
2296 | /* | |
2297 | * Clear status change interrupt reduction so the | |
2298 | * new state is seen. | |
2299 | */ | |
2300 | ppd->cpspec->ibcctrl_a &= | |
2301 | ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn); | |
2302 | } | |
2303 | ||
2304 | mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) | | |
2305 | (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); | |
2306 | ||
2307 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a | | |
2308 | mod_wd); | |
2309 | /* write to chip to prevent back-to-back writes of ibc reg */ | |
2310 | qib_write_kreg(dd, kr_scratch, 0); | |
2311 | ||
2312 | } | |
2313 | ||
2314 | /* | |
2315 | * The total RCV buffer memory is 64KB, used for both ports, and is | |
2316 | * in units of 64 bytes (same as IB flow control credit unit). | |
2317 | * The consumedVL unit in the same registers are in 32 byte units! | |
2318 | * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks, | |
2319 | * and we can therefore allocate just 9 IB credits for 2 VL15 packets | |
2320 | * in krp_rxcreditvl15, rather than 10. | |
2321 | */ | |
2322 | #define RCV_BUF_UNITSZ 64 | |
2323 | #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports)) | |
2324 | ||
2325 | static void set_vls(struct qib_pportdata *ppd) | |
2326 | { | |
2327 | int i, numvls, totcred, cred_vl, vl0extra; | |
2328 | struct qib_devdata *dd = ppd->dd; | |
2329 | u64 val; | |
2330 | ||
2331 | numvls = qib_num_vls(ppd->vls_operational); | |
2332 | ||
2333 | /* | |
2334 | * Set up per-VL credits. Below is kluge based on these assumptions: | |
2335 | * 1) port is disabled at the time early_init is called. | |
2336 | * 2) give VL15 17 credits, for two max-plausible packets. | |
2337 | * 3) Give VL0-N the rest, with any rounding excess used for VL0 | |
2338 | */ | |
2339 | /* 2 VL15 packets @ 288 bytes each (including IB headers) */ | |
2340 | totcred = NUM_RCV_BUF_UNITS(dd); | |
2341 | cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ; | |
2342 | totcred -= cred_vl; | |
2343 | qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl); | |
2344 | cred_vl = totcred / numvls; | |
2345 | vl0extra = totcred - cred_vl * numvls; | |
2346 | qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra); | |
2347 | for (i = 1; i < numvls; i++) | |
2348 | qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl); | |
2349 | for (; i < 8; i++) /* no buffer space for other VLs */ | |
2350 | qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0); | |
2351 | ||
2352 | /* Notify IBC that credits need to be recalculated */ | |
2353 | val = qib_read_kreg_port(ppd, krp_ibsdtestiftx); | |
2354 | val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE); | |
2355 | qib_write_kreg_port(ppd, krp_ibsdtestiftx, val); | |
2356 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
2357 | val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE); | |
2358 | qib_write_kreg_port(ppd, krp_ibsdtestiftx, val); | |
2359 | ||
2360 | for (i = 0; i < numvls; i++) | |
2361 | val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i); | |
2362 | val = qib_read_kreg_port(ppd, krp_rxcreditvl15); | |
2363 | ||
2364 | /* Change the number of operational VLs */ | |
2365 | ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a & | |
2366 | ~SYM_MASK(IBCCtrlA_0, NumVLane)) | | |
2367 | ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane)); | |
2368 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); | |
2369 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
2370 | } | |
2371 | ||
2372 | /* | |
2373 | * The code that deals with actual SerDes is in serdes_7322_init(). | |
2374 | * Compared to the code for iba7220, it is minimal. | |
2375 | */ | |
2376 | static int serdes_7322_init(struct qib_pportdata *ppd); | |
2377 | ||
2378 | /** | |
2379 | * qib_7322_bringup_serdes - bring up the serdes | |
2380 | * @ppd: physical port on the qlogic_ib device | |
2381 | */ | |
2382 | static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) | |
2383 | { | |
2384 | struct qib_devdata *dd = ppd->dd; | |
2385 | u64 val, guid, ibc; | |
2386 | unsigned long flags; | |
2387 | int ret = 0; | |
2388 | ||
2389 | /* | |
2390 | * SerDes model not in Pd, but still need to | |
2391 | * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere | |
2392 | * eventually. | |
2393 | */ | |
2394 | /* Put IBC in reset, sends disabled (should be in reset already) */ | |
2395 | ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn); | |
2396 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); | |
2397 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
2398 | ||
2f75e12c MM |
2399 | /* ensure previous Tx parameters are not still forced */ |
2400 | qib_write_kreg_port(ppd, krp_tx_deemph_override, | |
2401 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
2402 | reset_tx_deemphasis_override)); | |
2403 | ||
f931551b RC |
2404 | if (qib_compat_ddr_negotiate) { |
2405 | ppd->cpspec->ibdeltainprog = 1; | |
2406 | ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, | |
2407 | crp_ibsymbolerr); | |
2408 | ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd, | |
2409 | crp_iblinkerrrecov); | |
2410 | } | |
2411 | ||
2412 | /* flowcontrolwatermark is in units of KBytes */ | |
2413 | ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark); | |
2414 | /* | |
2415 | * Flow control is sent this often, even if no changes in | |
2416 | * buffer space occur. Units are 128ns for this chip. | |
2417 | * Set to 3usec. | |
2418 | */ | |
2419 | ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod); | |
2420 | /* max error tolerance */ | |
2421 | ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold); | |
2422 | /* IB credit flow control. */ | |
2423 | ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold); | |
2424 | /* | |
2425 | * set initial max size pkt IBC will send, including ICRC; it's the | |
2426 | * PIO buffer size in dwords, less 1; also see qib_set_mtu() | |
2427 | */ | |
2428 | ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << | |
2429 | SYM_LSB(IBCCtrlA_0, MaxPktLen); | |
2430 | ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */ | |
2431 | ||
f931551b RC |
2432 | /* |
2433 | * Reset the PCS interface to the serdes (and also ibc, which is still | |
2434 | * in reset from above). Writes new value of ibcctrl_a as last step. | |
2435 | */ | |
2436 | qib_7322_mini_pcs_reset(ppd); | |
f931551b RC |
2437 | |
2438 | if (!ppd->cpspec->ibcctrl_b) { | |
2439 | unsigned lse = ppd->link_speed_enabled; | |
2440 | ||
2441 | /* | |
2442 | * Not on re-init after reset, establish shadow | |
2443 | * and force initial config. | |
2444 | */ | |
2445 | ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd, | |
2446 | krp_ibcctrl_b); | |
2447 | ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR | | |
2448 | IBA7322_IBC_SPEED_DDR | | |
2449 | IBA7322_IBC_SPEED_SDR | | |
2450 | IBA7322_IBC_WIDTH_AUTONEG | | |
2451 | SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED)); | |
2452 | if (lse & (lse - 1)) /* Muliple speeds enabled */ | |
2453 | ppd->cpspec->ibcctrl_b |= | |
2454 | (lse << IBA7322_IBC_SPEED_LSB) | | |
2455 | IBA7322_IBC_IBTA_1_2_MASK | | |
2456 | IBA7322_IBC_MAX_SPEED_MASK; | |
2457 | else | |
2458 | ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ? | |
2459 | IBA7322_IBC_SPEED_QDR | | |
2460 | IBA7322_IBC_IBTA_1_2_MASK : | |
2461 | (lse == QIB_IB_DDR) ? | |
2462 | IBA7322_IBC_SPEED_DDR : | |
2463 | IBA7322_IBC_SPEED_SDR; | |
2464 | if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) == | |
2465 | (IB_WIDTH_1X | IB_WIDTH_4X)) | |
2466 | ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG; | |
2467 | else | |
2468 | ppd->cpspec->ibcctrl_b |= | |
2469 | ppd->link_width_enabled == IB_WIDTH_4X ? | |
2470 | IBA7322_IBC_WIDTH_4X_ONLY : | |
2471 | IBA7322_IBC_WIDTH_1X_ONLY; | |
2472 | ||
2473 | /* always enable these on driver reload, not sticky */ | |
2474 | ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK | | |
2475 | IBA7322_IBC_HRTBT_MASK); | |
2476 | } | |
2477 | qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); | |
2478 | ||
2479 | /* setup so we have more time at CFGTEST to change H1 */ | |
2480 | val = qib_read_kreg_port(ppd, krp_ibcctrl_c); | |
2481 | val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH); | |
2482 | val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH); | |
2483 | qib_write_kreg_port(ppd, krp_ibcctrl_c, val); | |
2484 | ||
2485 | serdes_7322_init(ppd); | |
2486 | ||
2487 | guid = be64_to_cpu(ppd->guid); | |
2488 | if (!guid) { | |
2489 | if (dd->base_guid) | |
2490 | guid = be64_to_cpu(dd->base_guid) + ppd->port - 1; | |
2491 | ppd->guid = cpu_to_be64(guid); | |
2492 | } | |
2493 | ||
2494 | qib_write_kreg_port(ppd, krp_hrtbt_guid, guid); | |
2495 | /* write to chip to prevent back-to-back writes of ibc reg */ | |
2496 | qib_write_kreg(dd, kr_scratch, 0); | |
2497 | ||
2498 | /* Enable port */ | |
2499 | ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn); | |
2500 | set_vls(ppd); | |
2501 | ||
8ee887d7 MM |
2502 | /* initially come up DISABLED, without sending anything. */ |
2503 | val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << | |
2504 | QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); | |
2505 | qib_write_kreg_port(ppd, krp_ibcctrl_a, val); | |
2506 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
2507 | /* clear the linkinit cmds */ | |
2508 | ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd); | |
2509 | ||
f931551b RC |
2510 | /* be paranoid against later code motion, etc. */ |
2511 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | |
2512 | ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable); | |
2513 | qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); | |
2514 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | |
2515 | ||
2516 | /* Also enable IBSTATUSCHG interrupt. */ | |
2517 | val = qib_read_kreg_port(ppd, krp_errmask); | |
2518 | qib_write_kreg_port(ppd, krp_errmask, | |
2519 | val | ERR_MASK_N(IBStatusChanged)); | |
2520 | ||
2521 | /* Always zero until we start messing with SerDes for real */ | |
2522 | return ret; | |
2523 | } | |
2524 | ||
2525 | /** | |
2526 | * qib_7322_quiet_serdes - set serdes to txidle | |
2527 | * @dd: the qlogic_ib device | |
2528 | * Called when driver is being unloaded | |
2529 | */ | |
2530 | static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd) | |
2531 | { | |
2532 | u64 val; | |
2533 | unsigned long flags; | |
2534 | ||
2535 | qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | |
2536 | ||
2537 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
2538 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; | |
2539 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
2540 | wake_up(&ppd->cpspec->autoneg_wait); | |
f0626710 | 2541 | cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); |
f931551b | 2542 | if (ppd->dd->cspec->r1) |
f0626710 | 2543 | cancel_delayed_work_sync(&ppd->cpspec->ipg_work); |
f931551b RC |
2544 | |
2545 | ppd->cpspec->chase_end = 0; | |
2546 | if (ppd->cpspec->chase_timer.data) /* if initted */ | |
2547 | del_timer_sync(&ppd->cpspec->chase_timer); | |
2548 | ||
2549 | /* | |
2550 | * Despite the name, actually disables IBC as well. Do it when | |
2551 | * we are as sure as possible that no more packets can be | |
2552 | * received, following the down and the PCS reset. | |
2553 | * The actual disabling happens in qib_7322_mini_pci_reset(), | |
2554 | * along with the PCS being reset. | |
2555 | */ | |
2556 | ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn); | |
2557 | qib_7322_mini_pcs_reset(ppd); | |
2558 | ||
2559 | /* | |
2560 | * Update the adjusted counters so the adjustment persists | |
2561 | * across driver reload. | |
2562 | */ | |
2563 | if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta || | |
2564 | ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) { | |
2565 | struct qib_devdata *dd = ppd->dd; | |
2566 | u64 diagc; | |
2567 | ||
2568 | /* enable counter writes */ | |
2569 | diagc = qib_read_kreg64(dd, kr_hwdiagctrl); | |
2570 | qib_write_kreg(dd, kr_hwdiagctrl, | |
2571 | diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable)); | |
2572 | ||
2573 | if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) { | |
2574 | val = read_7322_creg32_port(ppd, crp_ibsymbolerr); | |
2575 | if (ppd->cpspec->ibdeltainprog) | |
2576 | val -= val - ppd->cpspec->ibsymsnap; | |
2577 | val -= ppd->cpspec->ibsymdelta; | |
2578 | write_7322_creg_port(ppd, crp_ibsymbolerr, val); | |
2579 | } | |
2580 | if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) { | |
2581 | val = read_7322_creg32_port(ppd, crp_iblinkerrrecov); | |
2582 | if (ppd->cpspec->ibdeltainprog) | |
2583 | val -= val - ppd->cpspec->iblnkerrsnap; | |
2584 | val -= ppd->cpspec->iblnkerrdelta; | |
2585 | write_7322_creg_port(ppd, crp_iblinkerrrecov, val); | |
2586 | } | |
2587 | if (ppd->cpspec->iblnkdowndelta) { | |
2588 | val = read_7322_creg32_port(ppd, crp_iblinkdown); | |
2589 | val += ppd->cpspec->iblnkdowndelta; | |
2590 | write_7322_creg_port(ppd, crp_iblinkdown, val); | |
2591 | } | |
2592 | /* | |
2593 | * No need to save ibmalfdelta since IB perfcounters | |
2594 | * are cleared on driver reload. | |
2595 | */ | |
2596 | ||
2597 | /* and disable counter writes */ | |
2598 | qib_write_kreg(dd, kr_hwdiagctrl, diagc); | |
2599 | } | |
2600 | } | |
2601 | ||
2602 | /** | |
2603 | * qib_setup_7322_setextled - set the state of the two external LEDs | |
2604 | * @ppd: physical port on the qlogic_ib device | |
2605 | * @on: whether the link is up or not | |
2606 | * | |
2607 | * The exact combo of LEDs if on is true is determined by looking | |
2608 | * at the ibcstatus. | |
2609 | * | |
2610 | * These LEDs indicate the physical and logical state of IB link. | |
2611 | * For this chip (at least with recommended board pinouts), LED1 | |
2612 | * is Yellow (logical state) and LED2 is Green (physical state), | |
2613 | * | |
2614 | * Note: We try to match the Mellanox HCA LED behavior as best | |
2615 | * we can. Green indicates physical link state is OK (something is | |
2616 | * plugged in, and we can train). | |
2617 | * Amber indicates the link is logically up (ACTIVE). | |
2618 | * Mellanox further blinks the amber LED to indicate data packet | |
2619 | * activity, but we have no hardware support for that, so it would | |
2620 | * require waking up every 10-20 msecs and checking the counters | |
2621 | * on the chip, and then turning the LED off if appropriate. That's | |
2622 | * visible overhead, so not something we will do. | |
2623 | */ | |
2624 | static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on) | |
2625 | { | |
2626 | struct qib_devdata *dd = ppd->dd; | |
2627 | u64 extctl, ledblink = 0, val; | |
2628 | unsigned long flags; | |
2629 | int yel, grn; | |
2630 | ||
2631 | /* | |
2632 | * The diags use the LED to indicate diag info, so we leave | |
2633 | * the external LED alone when the diags are running. | |
2634 | */ | |
2635 | if (dd->diag_client) | |
2636 | return; | |
2637 | ||
2638 | /* Allow override of LED display for, e.g. Locating system in rack */ | |
2639 | if (ppd->led_override) { | |
2640 | grn = (ppd->led_override & QIB_LED_PHYS); | |
2641 | yel = (ppd->led_override & QIB_LED_LOG); | |
2642 | } else if (on) { | |
2643 | val = qib_read_kreg_port(ppd, krp_ibcstatus_a); | |
2644 | grn = qib_7322_phys_portstate(val) == | |
2645 | IB_PHYSPORTSTATE_LINKUP; | |
2646 | yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE; | |
2647 | } else { | |
2648 | grn = 0; | |
2649 | yel = 0; | |
2650 | } | |
2651 | ||
2652 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | |
2653 | extctl = dd->cspec->extctrl & (ppd->port == 1 ? | |
2654 | ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK); | |
2655 | if (grn) { | |
2656 | extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN; | |
2657 | /* | |
2658 | * Counts are in chip clock (4ns) periods. | |
2659 | * This is 1/16 sec (66.6ms) on, | |
2660 | * 3/16 sec (187.5 ms) off, with packets rcvd. | |
2661 | */ | |
2662 | ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) | | |
2663 | ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT); | |
2664 | } | |
2665 | if (yel) | |
2666 | extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL; | |
2667 | dd->cspec->extctrl = extctl; | |
2668 | qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); | |
2669 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | |
2670 | ||
2671 | if (ledblink) /* blink the LED on packet receive */ | |
2672 | qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink); | |
2673 | } | |
2674 | ||
8469ba39 MM |
2675 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
2676 | ||
2677 | static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event) | |
2678 | { | |
2679 | switch (event) { | |
2680 | case DCA_PROVIDER_ADD: | |
2681 | if (dd->flags & QIB_DCA_ENABLED) | |
2682 | break; | |
2683 | if (!dca_add_requester(&dd->pcidev->dev)) { | |
2684 | qib_devinfo(dd->pcidev, "DCA enabled\n"); | |
2685 | dd->flags |= QIB_DCA_ENABLED; | |
2686 | qib_setup_dca(dd); | |
2687 | } | |
2688 | break; | |
2689 | case DCA_PROVIDER_REMOVE: | |
2690 | if (dd->flags & QIB_DCA_ENABLED) { | |
2691 | dca_remove_requester(&dd->pcidev->dev); | |
2692 | dd->flags &= ~QIB_DCA_ENABLED; | |
2693 | dd->cspec->dca_ctrl = 0; | |
2694 | qib_write_kreg(dd, KREG_IDX(DCACtrlA), | |
2695 | dd->cspec->dca_ctrl); | |
2696 | } | |
2697 | break; | |
2698 | } | |
2699 | return 0; | |
2700 | } | |
2701 | ||
2702 | static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu) | |
2703 | { | |
2704 | struct qib_devdata *dd = rcd->dd; | |
2705 | struct qib_chip_specific *cspec = dd->cspec; | |
2706 | ||
2707 | if (!(dd->flags & QIB_DCA_ENABLED)) | |
2708 | return; | |
2709 | if (cspec->rhdr_cpu[rcd->ctxt] != cpu) { | |
2710 | const struct dca_reg_map *rmp; | |
2711 | ||
2712 | cspec->rhdr_cpu[rcd->ctxt] = cpu; | |
2713 | rmp = &dca_rcvhdr_reg_map[rcd->ctxt]; | |
2714 | cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask; | |
2715 | cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |= | |
2716 | (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb; | |
2717 | qib_devinfo(dd->pcidev, | |
2718 | "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu, | |
2719 | (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]); | |
2720 | qib_write_kreg(dd, rmp->regno, | |
2721 | cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]); | |
2722 | cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable); | |
2723 | qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); | |
2724 | } | |
2725 | } | |
2726 | ||
2727 | static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu) | |
2728 | { | |
2729 | struct qib_devdata *dd = ppd->dd; | |
2730 | struct qib_chip_specific *cspec = dd->cspec; | |
2731 | unsigned pidx = ppd->port - 1; | |
2732 | ||
2733 | if (!(dd->flags & QIB_DCA_ENABLED)) | |
2734 | return; | |
2735 | if (cspec->sdma_cpu[pidx] != cpu) { | |
2736 | cspec->sdma_cpu[pidx] = cpu; | |
2737 | cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ? | |
2738 | SYM_MASK(DCACtrlF, SendDma1DCAOPH) : | |
2739 | SYM_MASK(DCACtrlF, SendDma0DCAOPH)); | |
2740 | cspec->dca_rcvhdr_ctrl[4] |= | |
2741 | (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << | |
2742 | (ppd->hw_pidx ? | |
2743 | SYM_LSB(DCACtrlF, SendDma1DCAOPH) : | |
2744 | SYM_LSB(DCACtrlF, SendDma0DCAOPH)); | |
2745 | qib_devinfo(dd->pcidev, | |
2746 | "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu, | |
2747 | (long long) cspec->dca_rcvhdr_ctrl[4]); | |
2748 | qib_write_kreg(dd, KREG_IDX(DCACtrlF), | |
2749 | cspec->dca_rcvhdr_ctrl[4]); | |
2750 | cspec->dca_ctrl |= ppd->hw_pidx ? | |
2751 | SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) : | |
2752 | SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable); | |
2753 | qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); | |
2754 | } | |
2755 | } | |
2756 | ||
2757 | static void qib_setup_dca(struct qib_devdata *dd) | |
2758 | { | |
2759 | struct qib_chip_specific *cspec = dd->cspec; | |
2760 | int i; | |
2761 | ||
2762 | for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++) | |
2763 | cspec->rhdr_cpu[i] = -1; | |
2764 | for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++) | |
2765 | cspec->sdma_cpu[i] = -1; | |
2766 | cspec->dca_rcvhdr_ctrl[0] = | |
2767 | (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) | | |
2768 | (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) | | |
2769 | (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) | | |
2770 | (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt)); | |
2771 | cspec->dca_rcvhdr_ctrl[1] = | |
2772 | (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) | | |
2773 | (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) | | |
2774 | (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) | | |
2775 | (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt)); | |
2776 | cspec->dca_rcvhdr_ctrl[2] = | |
2777 | (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) | | |
2778 | (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) | | |
2779 | (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) | | |
2780 | (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt)); | |
2781 | cspec->dca_rcvhdr_ctrl[3] = | |
2782 | (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) | | |
2783 | (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) | | |
2784 | (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) | | |
2785 | (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt)); | |
2786 | cspec->dca_rcvhdr_ctrl[4] = | |
2787 | (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) | | |
2788 | (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt)); | |
2789 | for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++) | |
2790 | qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i, | |
2791 | cspec->dca_rcvhdr_ctrl[i]); | |
2792 | for (i = 0; i < cspec->num_msix_entries; i++) | |
2793 | setup_dca_notifier(dd, &cspec->msix_entries[i]); | |
2794 | } | |
2795 | ||
2796 | static void qib_irq_notifier_notify(struct irq_affinity_notify *notify, | |
2797 | const cpumask_t *mask) | |
2798 | { | |
2799 | struct qib_irq_notify *n = | |
2800 | container_of(notify, struct qib_irq_notify, notify); | |
2801 | int cpu = cpumask_first(mask); | |
2802 | ||
2803 | if (n->rcv) { | |
2804 | struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; | |
2805 | qib_update_rhdrq_dca(rcd, cpu); | |
2806 | } else { | |
2807 | struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; | |
2808 | qib_update_sdma_dca(ppd, cpu); | |
2809 | } | |
2810 | } | |
2811 | ||
2812 | static void qib_irq_notifier_release(struct kref *ref) | |
2813 | { | |
2814 | struct qib_irq_notify *n = | |
2815 | container_of(ref, struct qib_irq_notify, notify.kref); | |
2816 | struct qib_devdata *dd; | |
2817 | ||
2818 | if (n->rcv) { | |
2819 | struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; | |
2820 | dd = rcd->dd; | |
2821 | } else { | |
2822 | struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; | |
2823 | dd = ppd->dd; | |
2824 | } | |
2825 | qib_devinfo(dd->pcidev, | |
2826 | "release on HCA notify 0x%p n 0x%p\n", ref, n); | |
2827 | kfree(n); | |
2828 | } | |
2829 | #endif | |
2830 | ||
f931551b RC |
2831 | /* |
2832 | * Disable MSIx interrupt if enabled, call generic MSIx code | |
2833 | * to cleanup, and clear pending MSIx interrupts. | |
2834 | * Used for fallback to INTx, after reset, and when MSIx setup fails. | |
2835 | */ | |
2836 | static void qib_7322_nomsix(struct qib_devdata *dd) | |
2837 | { | |
2838 | u64 intgranted; | |
2839 | int n; | |
2840 | ||
2841 | dd->cspec->main_int_mask = ~0ULL; | |
2842 | n = dd->cspec->num_msix_entries; | |
2843 | if (n) { | |
2844 | int i; | |
2845 | ||
2846 | dd->cspec->num_msix_entries = 0; | |
a778f3fd | 2847 | for (i = 0; i < n; i++) { |
8469ba39 MM |
2848 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
2849 | reset_dca_notifier(dd, &dd->cspec->msix_entries[i]); | |
2850 | #endif | |
a778f3fd MM |
2851 | irq_set_affinity_hint( |
2852 | dd->cspec->msix_entries[i].msix.vector, NULL); | |
2853 | free_cpumask_var(dd->cspec->msix_entries[i].mask); | |
2854 | free_irq(dd->cspec->msix_entries[i].msix.vector, | |
2855 | dd->cspec->msix_entries[i].arg); | |
2856 | } | |
f931551b RC |
2857 | qib_nomsix(dd); |
2858 | } | |
2859 | /* make sure no MSIx interrupts are left pending */ | |
2860 | intgranted = qib_read_kreg64(dd, kr_intgranted); | |
2861 | if (intgranted) | |
2862 | qib_write_kreg(dd, kr_intgranted, intgranted); | |
2863 | } | |
2864 | ||
2865 | static void qib_7322_free_irq(struct qib_devdata *dd) | |
2866 | { | |
2867 | if (dd->cspec->irq) { | |
2868 | free_irq(dd->cspec->irq, dd); | |
2869 | dd->cspec->irq = 0; | |
2870 | } | |
2871 | qib_7322_nomsix(dd); | |
2872 | } | |
2873 | ||
2874 | static void qib_setup_7322_cleanup(struct qib_devdata *dd) | |
2875 | { | |
2876 | int i; | |
2877 | ||
8469ba39 MM |
2878 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
2879 | if (dd->flags & QIB_DCA_ENABLED) { | |
2880 | dca_remove_requester(&dd->pcidev->dev); | |
2881 | dd->flags &= ~QIB_DCA_ENABLED; | |
2882 | dd->cspec->dca_ctrl = 0; | |
2883 | qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl); | |
2884 | } | |
2885 | #endif | |
2886 | ||
f931551b RC |
2887 | qib_7322_free_irq(dd); |
2888 | kfree(dd->cspec->cntrs); | |
2889 | kfree(dd->cspec->sendchkenable); | |
2890 | kfree(dd->cspec->sendgrhchk); | |
2891 | kfree(dd->cspec->sendibchk); | |
2892 | kfree(dd->cspec->msix_entries); | |
f931551b RC |
2893 | for (i = 0; i < dd->num_pports; i++) { |
2894 | unsigned long flags; | |
2895 | u32 mask = QSFP_GPIO_MOD_PRS_N | | |
2896 | (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT); | |
2897 | ||
2898 | kfree(dd->pport[i].cpspec->portcntrs); | |
2899 | if (dd->flags & QIB_HAS_QSFP) { | |
2900 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | |
2901 | dd->cspec->gpio_mask &= ~mask; | |
2902 | qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); | |
2903 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | |
2904 | qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data); | |
2905 | } | |
2906 | if (dd->pport[i].ibport_data.smi_ah) | |
2907 | ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah); | |
2908 | } | |
2909 | } | |
2910 | ||
2911 | /* handle SDMA interrupts */ | |
2912 | static void sdma_7322_intr(struct qib_devdata *dd, u64 istat) | |
2913 | { | |
2914 | struct qib_pportdata *ppd0 = &dd->pport[0]; | |
2915 | struct qib_pportdata *ppd1 = &dd->pport[1]; | |
2916 | u64 intr0 = istat & (INT_MASK_P(SDma, 0) | | |
2917 | INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0)); | |
2918 | u64 intr1 = istat & (INT_MASK_P(SDma, 1) | | |
2919 | INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1)); | |
2920 | ||
2921 | if (intr0) | |
2922 | qib_sdma_intr(ppd0); | |
2923 | if (intr1) | |
2924 | qib_sdma_intr(ppd1); | |
2925 | ||
2926 | if (istat & INT_MASK_PM(SDmaCleanupDone, 0)) | |
2927 | qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started); | |
2928 | if (istat & INT_MASK_PM(SDmaCleanupDone, 1)) | |
2929 | qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started); | |
2930 | } | |
2931 | ||
2932 | /* | |
2933 | * Set or clear the Send buffer available interrupt enable bit. | |
2934 | */ | |
2935 | static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint) | |
2936 | { | |
2937 | unsigned long flags; | |
2938 | ||
2939 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | |
2940 | if (needint) | |
2941 | dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail); | |
2942 | else | |
2943 | dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail); | |
2944 | qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); | |
2945 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
2946 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | |
2947 | } | |
2948 | ||
2949 | /* | |
2950 | * Somehow got an interrupt with reserved bits set in interrupt status. | |
2951 | * Print a message so we know it happened, then clear them. | |
2952 | * keep mainline interrupt handler cache-friendly | |
2953 | */ | |
2954 | static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat) | |
2955 | { | |
2956 | u64 kills; | |
2957 | char msg[128]; | |
2958 | ||
2959 | kills = istat & ~QIB_I_BITSEXTANT; | |
7fac3301 MM |
2960 | qib_dev_err(dd, |
2961 | "Clearing reserved interrupt(s) 0x%016llx: %s\n", | |
2962 | (unsigned long long) kills, msg); | |
f931551b RC |
2963 | qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills)); |
2964 | } | |
2965 | ||
2966 | /* keep mainline interrupt handler cache-friendly */ | |
2967 | static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd) | |
2968 | { | |
2969 | u32 gpiostatus; | |
2970 | int handled = 0; | |
2971 | int pidx; | |
2972 | ||
2973 | /* | |
2974 | * Boards for this chip currently don't use GPIO interrupts, | |
2975 | * so clear by writing GPIOstatus to GPIOclear, and complain | |
2976 | * to developer. To avoid endless repeats, clear | |
2977 | * the bits in the mask, since there is some kind of | |
2978 | * programming error or chip problem. | |
2979 | */ | |
2980 | gpiostatus = qib_read_kreg32(dd, kr_gpio_status); | |
2981 | /* | |
2982 | * In theory, writing GPIOstatus to GPIOclear could | |
2983 | * have a bad side-effect on some diagnostic that wanted | |
2984 | * to poll for a status-change, but the various shadows | |
2985 | * make that problematic at best. Diags will just suppress | |
2986 | * all GPIO interrupts during such tests. | |
2987 | */ | |
2988 | qib_write_kreg(dd, kr_gpio_clear, gpiostatus); | |
2989 | /* | |
2990 | * Check for QSFP MOD_PRS changes | |
2991 | * only works for single port if IB1 != pidx1 | |
2992 | */ | |
2993 | for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP); | |
2994 | ++pidx) { | |
2995 | struct qib_pportdata *ppd; | |
2996 | struct qib_qsfp_data *qd; | |
2997 | u32 mask; | |
2998 | if (!dd->pport[pidx].link_speed_supported) | |
2999 | continue; | |
3000 | mask = QSFP_GPIO_MOD_PRS_N; | |
3001 | ppd = dd->pport + pidx; | |
3002 | mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); | |
3003 | if (gpiostatus & dd->cspec->gpio_mask & mask) { | |
3004 | u64 pins; | |
3005 | qd = &ppd->cpspec->qsfp_data; | |
3006 | gpiostatus &= ~mask; | |
3007 | pins = qib_read_kreg64(dd, kr_extstatus); | |
3008 | pins >>= SYM_LSB(EXTStatus, GPIOIn); | |
3009 | if (!(pins & mask)) { | |
3010 | ++handled; | |
8482d5d1 | 3011 | qd->t_insert = jiffies; |
f0626710 | 3012 | queue_work(ib_wq, &qd->work); |
f931551b RC |
3013 | } |
3014 | } | |
3015 | } | |
3016 | ||
3017 | if (gpiostatus && !handled) { | |
3018 | const u32 mask = qib_read_kreg32(dd, kr_gpio_mask); | |
3019 | u32 gpio_irq = mask & gpiostatus; | |
3020 | ||
3021 | /* | |
3022 | * Clear any troublemakers, and update chip from shadow | |
3023 | */ | |
3024 | dd->cspec->gpio_mask &= ~gpio_irq; | |
3025 | qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); | |
3026 | } | |
3027 | } | |
3028 | ||
3029 | /* | |
3030 | * Handle errors and unusual events first, separate function | |
3031 | * to improve cache hits for fast path interrupt handling. | |
3032 | */ | |
3033 | static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat) | |
3034 | { | |
3035 | if (istat & ~QIB_I_BITSEXTANT) | |
3036 | unknown_7322_ibits(dd, istat); | |
3037 | if (istat & QIB_I_GPIO) | |
3038 | unknown_7322_gpio_intr(dd); | |
e67306a3 MM |
3039 | if (istat & QIB_I_C_ERROR) { |
3040 | qib_write_kreg(dd, kr_errmask, 0ULL); | |
3041 | tasklet_schedule(&dd->error_tasklet); | |
3042 | } | |
f931551b RC |
3043 | if (istat & INT_MASK_P(Err, 0) && dd->rcd[0]) |
3044 | handle_7322_p_errors(dd->rcd[0]->ppd); | |
3045 | if (istat & INT_MASK_P(Err, 1) && dd->rcd[1]) | |
3046 | handle_7322_p_errors(dd->rcd[1]->ppd); | |
3047 | } | |
3048 | ||
3049 | /* | |
3050 | * Dynamically adjust the rcv int timeout for a context based on incoming | |
3051 | * packet rate. | |
3052 | */ | |
3053 | static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts) | |
3054 | { | |
3055 | struct qib_devdata *dd = rcd->dd; | |
3056 | u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt]; | |
3057 | ||
3058 | /* | |
3059 | * Dynamically adjust idle timeout on chip | |
3060 | * based on number of packets processed. | |
3061 | */ | |
3062 | if (npkts < rcv_int_count && timeout > 2) | |
3063 | timeout >>= 1; | |
3064 | else if (npkts >= rcv_int_count && timeout < rcv_int_timeout) | |
3065 | timeout = min(timeout << 1, rcv_int_timeout); | |
3066 | else | |
3067 | return; | |
3068 | ||
3069 | dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout; | |
3070 | qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout); | |
3071 | } | |
3072 | ||
3073 | /* | |
3074 | * This is the main interrupt handler. | |
3075 | * It will normally only be used for low frequency interrupts but may | |
3076 | * have to handle all interrupts if INTx is enabled or fewer than normal | |
3077 | * MSIx interrupts were allocated. | |
3078 | * This routine should ignore the interrupt bits for any of the | |
3079 | * dedicated MSIx handlers. | |
3080 | */ | |
3081 | static irqreturn_t qib_7322intr(int irq, void *data) | |
3082 | { | |
3083 | struct qib_devdata *dd = data; | |
3084 | irqreturn_t ret; | |
3085 | u64 istat; | |
3086 | u64 ctxtrbits; | |
3087 | u64 rmask; | |
3088 | unsigned i; | |
3089 | u32 npkts; | |
3090 | ||
3091 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) { | |
3092 | /* | |
3093 | * This return value is not great, but we do not want the | |
3094 | * interrupt core code to remove our interrupt handler | |
3095 | * because we don't appear to be handling an interrupt | |
3096 | * during a chip reset. | |
3097 | */ | |
3098 | ret = IRQ_HANDLED; | |
3099 | goto bail; | |
3100 | } | |
3101 | ||
3102 | istat = qib_read_kreg64(dd, kr_intstatus); | |
3103 | ||
3104 | if (unlikely(istat == ~0ULL)) { | |
3105 | qib_bad_intrstatus(dd); | |
3106 | qib_dev_err(dd, "Interrupt status all f's, skipping\n"); | |
3107 | /* don't know if it was our interrupt or not */ | |
3108 | ret = IRQ_NONE; | |
3109 | goto bail; | |
3110 | } | |
3111 | ||
3112 | istat &= dd->cspec->main_int_mask; | |
3113 | if (unlikely(!istat)) { | |
3114 | /* already handled, or shared and not us */ | |
3115 | ret = IRQ_NONE; | |
3116 | goto bail; | |
3117 | } | |
3118 | ||
1ed88dd7 | 3119 | this_cpu_inc(*dd->int_counter); |
f931551b RC |
3120 | |
3121 | /* handle "errors" of various kinds first, device ahead of port */ | |
3122 | if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO | | |
3123 | QIB_I_C_ERROR | INT_MASK_P(Err, 0) | | |
3124 | INT_MASK_P(Err, 1)))) | |
3125 | unlikely_7322_intr(dd, istat); | |
3126 | ||
3127 | /* | |
3128 | * Clear the interrupt bits we found set, relatively early, so we | |
3129 | * "know" know the chip will have seen this by the time we process | |
3130 | * the queue, and will re-interrupt if necessary. The processor | |
3131 | * itself won't take the interrupt again until we return. | |
3132 | */ | |
3133 | qib_write_kreg(dd, kr_intclear, istat); | |
3134 | ||
3135 | /* | |
3136 | * Handle kernel receive queues before checking for pio buffers | |
3137 | * available since receives can overflow; piobuf waiters can afford | |
3138 | * a few extra cycles, since they were waiting anyway. | |
3139 | */ | |
3140 | ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK); | |
3141 | if (ctxtrbits) { | |
3142 | rmask = (1ULL << QIB_I_RCVAVAIL_LSB) | | |
3143 | (1ULL << QIB_I_RCVURG_LSB); | |
3144 | for (i = 0; i < dd->first_user_ctxt; i++) { | |
3145 | if (ctxtrbits & rmask) { | |
3146 | ctxtrbits &= ~rmask; | |
44d75d3d | 3147 | if (dd->rcd[i]) |
f931551b | 3148 | qib_kreceive(dd->rcd[i], NULL, &npkts); |
f931551b RC |
3149 | } |
3150 | rmask <<= 1; | |
3151 | } | |
3152 | if (ctxtrbits) { | |
3153 | ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) | | |
3154 | (ctxtrbits >> QIB_I_RCVURG_LSB); | |
3155 | qib_handle_urcv(dd, ctxtrbits); | |
3156 | } | |
3157 | } | |
3158 | ||
3159 | if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1))) | |
3160 | sdma_7322_intr(dd, istat); | |
3161 | ||
3162 | if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED)) | |
3163 | qib_ib_piobufavail(dd); | |
3164 | ||
3165 | ret = IRQ_HANDLED; | |
3166 | bail: | |
3167 | return ret; | |
3168 | } | |
3169 | ||
3170 | /* | |
3171 | * Dedicated receive packet available interrupt handler. | |
3172 | */ | |
3173 | static irqreturn_t qib_7322pintr(int irq, void *data) | |
3174 | { | |
3175 | struct qib_ctxtdata *rcd = data; | |
3176 | struct qib_devdata *dd = rcd->dd; | |
3177 | u32 npkts; | |
3178 | ||
3179 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | |
3180 | /* | |
3181 | * This return value is not great, but we do not want the | |
3182 | * interrupt core code to remove our interrupt handler | |
3183 | * because we don't appear to be handling an interrupt | |
3184 | * during a chip reset. | |
3185 | */ | |
3186 | return IRQ_HANDLED; | |
3187 | ||
1ed88dd7 | 3188 | this_cpu_inc(*dd->int_counter); |
f931551b | 3189 | |
f931551b RC |
3190 | /* Clear the interrupt bit we expect to be set. */ |
3191 | qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) | | |
3192 | (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); | |
3193 | ||
3194 | qib_kreceive(rcd, NULL, &npkts); | |
f931551b RC |
3195 | |
3196 | return IRQ_HANDLED; | |
3197 | } | |
3198 | ||
3199 | /* | |
3200 | * Dedicated Send buffer available interrupt handler. | |
3201 | */ | |
3202 | static irqreturn_t qib_7322bufavail(int irq, void *data) | |
3203 | { | |
3204 | struct qib_devdata *dd = data; | |
3205 | ||
3206 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | |
3207 | /* | |
3208 | * This return value is not great, but we do not want the | |
3209 | * interrupt core code to remove our interrupt handler | |
3210 | * because we don't appear to be handling an interrupt | |
3211 | * during a chip reset. | |
3212 | */ | |
3213 | return IRQ_HANDLED; | |
3214 | ||
1ed88dd7 | 3215 | this_cpu_inc(*dd->int_counter); |
f931551b RC |
3216 | |
3217 | /* Clear the interrupt bit we expect to be set. */ | |
3218 | qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL); | |
3219 | ||
3220 | /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */ | |
3221 | if (dd->flags & QIB_INITTED) | |
3222 | qib_ib_piobufavail(dd); | |
3223 | else | |
3224 | qib_wantpiobuf_7322_intr(dd, 0); | |
3225 | ||
3226 | return IRQ_HANDLED; | |
3227 | } | |
3228 | ||
3229 | /* | |
3230 | * Dedicated Send DMA interrupt handler. | |
3231 | */ | |
3232 | static irqreturn_t sdma_intr(int irq, void *data) | |
3233 | { | |
3234 | struct qib_pportdata *ppd = data; | |
3235 | struct qib_devdata *dd = ppd->dd; | |
3236 | ||
3237 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | |
3238 | /* | |
3239 | * This return value is not great, but we do not want the | |
3240 | * interrupt core code to remove our interrupt handler | |
3241 | * because we don't appear to be handling an interrupt | |
3242 | * during a chip reset. | |
3243 | */ | |
3244 | return IRQ_HANDLED; | |
3245 | ||
1ed88dd7 | 3246 | this_cpu_inc(*dd->int_counter); |
f931551b | 3247 | |
f931551b RC |
3248 | /* Clear the interrupt bit we expect to be set. */ |
3249 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | |
3250 | INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0)); | |
3251 | qib_sdma_intr(ppd); | |
3252 | ||
3253 | return IRQ_HANDLED; | |
3254 | } | |
3255 | ||
3256 | /* | |
3257 | * Dedicated Send DMA idle interrupt handler. | |
3258 | */ | |
3259 | static irqreturn_t sdma_idle_intr(int irq, void *data) | |
3260 | { | |
3261 | struct qib_pportdata *ppd = data; | |
3262 | struct qib_devdata *dd = ppd->dd; | |
3263 | ||
3264 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | |
3265 | /* | |
3266 | * This return value is not great, but we do not want the | |
3267 | * interrupt core code to remove our interrupt handler | |
3268 | * because we don't appear to be handling an interrupt | |
3269 | * during a chip reset. | |
3270 | */ | |
3271 | return IRQ_HANDLED; | |
3272 | ||
1ed88dd7 | 3273 | this_cpu_inc(*dd->int_counter); |
f931551b | 3274 | |
f931551b RC |
3275 | /* Clear the interrupt bit we expect to be set. */ |
3276 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | |
3277 | INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0)); | |
3278 | qib_sdma_intr(ppd); | |
3279 | ||
3280 | return IRQ_HANDLED; | |
3281 | } | |
3282 | ||
3283 | /* | |
3284 | * Dedicated Send DMA progress interrupt handler. | |
3285 | */ | |
3286 | static irqreturn_t sdma_progress_intr(int irq, void *data) | |
3287 | { | |
3288 | struct qib_pportdata *ppd = data; | |
3289 | struct qib_devdata *dd = ppd->dd; | |
3290 | ||
3291 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | |
3292 | /* | |
3293 | * This return value is not great, but we do not want the | |
3294 | * interrupt core code to remove our interrupt handler | |
3295 | * because we don't appear to be handling an interrupt | |
3296 | * during a chip reset. | |
3297 | */ | |
3298 | return IRQ_HANDLED; | |
3299 | ||
1ed88dd7 | 3300 | this_cpu_inc(*dd->int_counter); |
f931551b | 3301 | |
f931551b RC |
3302 | /* Clear the interrupt bit we expect to be set. */ |
3303 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | |
3304 | INT_MASK_P(SDmaProgress, 1) : | |
3305 | INT_MASK_P(SDmaProgress, 0)); | |
3306 | qib_sdma_intr(ppd); | |
3307 | ||
3308 | return IRQ_HANDLED; | |
3309 | } | |
3310 | ||
3311 | /* | |
3312 | * Dedicated Send DMA cleanup interrupt handler. | |
3313 | */ | |
3314 | static irqreturn_t sdma_cleanup_intr(int irq, void *data) | |
3315 | { | |
3316 | struct qib_pportdata *ppd = data; | |
3317 | struct qib_devdata *dd = ppd->dd; | |
3318 | ||
3319 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | |
3320 | /* | |
3321 | * This return value is not great, but we do not want the | |
3322 | * interrupt core code to remove our interrupt handler | |
3323 | * because we don't appear to be handling an interrupt | |
3324 | * during a chip reset. | |
3325 | */ | |
3326 | return IRQ_HANDLED; | |
3327 | ||
1ed88dd7 | 3328 | this_cpu_inc(*dd->int_counter); |
f931551b | 3329 | |
f931551b RC |
3330 | /* Clear the interrupt bit we expect to be set. */ |
3331 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | |
3332 | INT_MASK_PM(SDmaCleanupDone, 1) : | |
3333 | INT_MASK_PM(SDmaCleanupDone, 0)); | |
3334 | qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started); | |
3335 | ||
3336 | return IRQ_HANDLED; | |
3337 | } | |
3338 | ||
8469ba39 MM |
3339 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
3340 | ||
3341 | static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m) | |
3342 | { | |
3343 | if (!m->dca) | |
3344 | return; | |
3345 | qib_devinfo(dd->pcidev, | |
3346 | "Disabling notifier on HCA %d irq %d\n", | |
3347 | dd->unit, | |
3348 | m->msix.vector); | |
3349 | irq_set_affinity_notifier( | |
3350 | m->msix.vector, | |
3351 | NULL); | |
3352 | m->notifier = NULL; | |
3353 | } | |
3354 | ||
3355 | static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m) | |
3356 | { | |
3357 | struct qib_irq_notify *n; | |
3358 | ||
3359 | if (!m->dca) | |
3360 | return; | |
3361 | n = kzalloc(sizeof(*n), GFP_KERNEL); | |
3362 | if (n) { | |
3363 | int ret; | |
3364 | ||
3365 | m->notifier = n; | |
3366 | n->notify.irq = m->msix.vector; | |
3367 | n->notify.notify = qib_irq_notifier_notify; | |
3368 | n->notify.release = qib_irq_notifier_release; | |
3369 | n->arg = m->arg; | |
3370 | n->rcv = m->rcv; | |
3371 | qib_devinfo(dd->pcidev, | |
3372 | "set notifier irq %d rcv %d notify %p\n", | |
3373 | n->notify.irq, n->rcv, &n->notify); | |
3374 | ret = irq_set_affinity_notifier( | |
3375 | n->notify.irq, | |
3376 | &n->notify); | |
3377 | if (ret) { | |
3378 | m->notifier = NULL; | |
3379 | kfree(n); | |
3380 | } | |
3381 | } | |
3382 | } | |
3383 | ||
3384 | #endif | |
3385 | ||
f931551b RC |
3386 | /* |
3387 | * Set up our chip-specific interrupt handler. | |
3388 | * The interrupt type has already been setup, so | |
3389 | * we just need to do the registration and error checking. | |
3390 | * If we are using MSIx interrupts, we may fall back to | |
3391 | * INTx later, if the interrupt handler doesn't get called | |
3392 | * within 1/2 second (see verify_interrupt()). | |
3393 | */ | |
3394 | static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend) | |
3395 | { | |
3396 | int ret, i, msixnum; | |
3397 | u64 redirect[6]; | |
3398 | u64 mask; | |
a778f3fd MM |
3399 | const struct cpumask *local_mask; |
3400 | int firstcpu, secondcpu = 0, currrcvcpu = 0; | |
f931551b RC |
3401 | |
3402 | if (!dd->num_pports) | |
3403 | return; | |
3404 | ||
3405 | if (clearpend) { | |
3406 | /* | |
3407 | * if not switching interrupt types, be sure interrupts are | |
3408 | * disabled, and then clear anything pending at this point, | |
3409 | * because we are starting clean. | |
3410 | */ | |
3411 | qib_7322_set_intr_state(dd, 0); | |
3412 | ||
3413 | /* clear the reset error, init error/hwerror mask */ | |
3414 | qib_7322_init_hwerrors(dd); | |
3415 | ||
3416 | /* clear any interrupt bits that might be set */ | |
3417 | qib_write_kreg(dd, kr_intclear, ~0ULL); | |
3418 | ||
3419 | /* make sure no pending MSIx intr, and clear diag reg */ | |
3420 | qib_write_kreg(dd, kr_intgranted, ~0ULL); | |
3421 | qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL); | |
3422 | } | |
3423 | ||
3424 | if (!dd->cspec->num_msix_entries) { | |
3425 | /* Try to get INTx interrupt */ | |
3426 | try_intx: | |
3427 | if (!dd->pcidev->irq) { | |
7fac3301 MM |
3428 | qib_dev_err(dd, |
3429 | "irq is 0, BIOS error? Interrupts won't work\n"); | |
f931551b RC |
3430 | goto bail; |
3431 | } | |
3432 | ret = request_irq(dd->pcidev->irq, qib_7322intr, | |
3433 | IRQF_SHARED, QIB_DRV_NAME, dd); | |
3434 | if (ret) { | |
7fac3301 MM |
3435 | qib_dev_err(dd, |
3436 | "Couldn't setup INTx interrupt (irq=%d): %d\n", | |
3437 | dd->pcidev->irq, ret); | |
f931551b RC |
3438 | goto bail; |
3439 | } | |
3440 | dd->cspec->irq = dd->pcidev->irq; | |
3441 | dd->cspec->main_int_mask = ~0ULL; | |
3442 | goto bail; | |
3443 | } | |
3444 | ||
3445 | /* Try to get MSIx interrupts */ | |
3446 | memset(redirect, 0, sizeof redirect); | |
3447 | mask = ~0ULL; | |
3448 | msixnum = 0; | |
a778f3fd MM |
3449 | local_mask = cpumask_of_pcibus(dd->pcidev->bus); |
3450 | firstcpu = cpumask_first(local_mask); | |
3451 | if (firstcpu >= nr_cpu_ids || | |
3452 | cpumask_weight(local_mask) == num_online_cpus()) { | |
3453 | local_mask = topology_core_cpumask(0); | |
3454 | firstcpu = cpumask_first(local_mask); | |
3455 | } | |
3456 | if (firstcpu < nr_cpu_ids) { | |
3457 | secondcpu = cpumask_next(firstcpu, local_mask); | |
3458 | if (secondcpu >= nr_cpu_ids) | |
3459 | secondcpu = firstcpu; | |
3460 | currrcvcpu = secondcpu; | |
3461 | } | |
f931551b RC |
3462 | for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) { |
3463 | irq_handler_t handler; | |
f931551b RC |
3464 | void *arg; |
3465 | u64 val; | |
3466 | int lsb, reg, sh; | |
8469ba39 MM |
3467 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
3468 | int dca = 0; | |
3469 | #endif | |
f931551b | 3470 | |
a778f3fd MM |
3471 | dd->cspec->msix_entries[msixnum]. |
3472 | name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1] | |
3473 | = '\0'; | |
f931551b RC |
3474 | if (i < ARRAY_SIZE(irq_table)) { |
3475 | if (irq_table[i].port) { | |
3476 | /* skip if for a non-configured port */ | |
3477 | if (irq_table[i].port > dd->num_pports) | |
3478 | continue; | |
3479 | arg = dd->pport + irq_table[i].port - 1; | |
3480 | } else | |
3481 | arg = dd; | |
8469ba39 MM |
3482 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
3483 | dca = irq_table[i].dca; | |
3484 | #endif | |
f931551b RC |
3485 | lsb = irq_table[i].lsb; |
3486 | handler = irq_table[i].handler; | |
a778f3fd MM |
3487 | snprintf(dd->cspec->msix_entries[msixnum].name, |
3488 | sizeof(dd->cspec->msix_entries[msixnum].name) | |
3489 | - 1, | |
3490 | QIB_DRV_NAME "%d%s", dd->unit, | |
3491 | irq_table[i].name); | |
f931551b RC |
3492 | } else { |
3493 | unsigned ctxt; | |
3494 | ||
3495 | ctxt = i - ARRAY_SIZE(irq_table); | |
3496 | /* per krcvq context receive interrupt */ | |
3497 | arg = dd->rcd[ctxt]; | |
3498 | if (!arg) | |
3499 | continue; | |
e67306a3 MM |
3500 | if (qib_krcvq01_no_msi && ctxt < 2) |
3501 | continue; | |
8469ba39 MM |
3502 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
3503 | dca = 1; | |
3504 | #endif | |
f931551b RC |
3505 | lsb = QIB_I_RCVAVAIL_LSB + ctxt; |
3506 | handler = qib_7322pintr; | |
a778f3fd MM |
3507 | snprintf(dd->cspec->msix_entries[msixnum].name, |
3508 | sizeof(dd->cspec->msix_entries[msixnum].name) | |
3509 | - 1, | |
3510 | QIB_DRV_NAME "%d (kctx)", dd->unit); | |
f931551b | 3511 | } |
a778f3fd MM |
3512 | ret = request_irq( |
3513 | dd->cspec->msix_entries[msixnum].msix.vector, | |
3514 | handler, 0, dd->cspec->msix_entries[msixnum].name, | |
3515 | arg); | |
f931551b RC |
3516 | if (ret) { |
3517 | /* | |
3518 | * Shouldn't happen since the enable said we could | |
3519 | * have as many as we are trying to setup here. | |
3520 | */ | |
7fac3301 MM |
3521 | qib_dev_err(dd, |
3522 | "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n", | |
3523 | msixnum, | |
a778f3fd MM |
3524 | dd->cspec->msix_entries[msixnum].msix.vector, |
3525 | ret); | |
f931551b RC |
3526 | qib_7322_nomsix(dd); |
3527 | goto try_intx; | |
3528 | } | |
a778f3fd | 3529 | dd->cspec->msix_entries[msixnum].arg = arg; |
8469ba39 MM |
3530 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
3531 | dd->cspec->msix_entries[msixnum].dca = dca; | |
3532 | dd->cspec->msix_entries[msixnum].rcv = | |
3533 | handler == qib_7322pintr; | |
3534 | #endif | |
f931551b RC |
3535 | if (lsb >= 0) { |
3536 | reg = lsb / IBA7322_REDIRECT_VEC_PER_REG; | |
3537 | sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) * | |
3538 | SYM_LSB(IntRedirect0, vec1); | |
3539 | mask &= ~(1ULL << lsb); | |
3540 | redirect[reg] |= ((u64) msixnum) << sh; | |
3541 | } | |
3542 | val = qib_read_kreg64(dd, 2 * msixnum + 1 + | |
3543 | (QIB_7322_MsixTable_OFFS / sizeof(u64))); | |
a778f3fd MM |
3544 | if (firstcpu < nr_cpu_ids && |
3545 | zalloc_cpumask_var( | |
3546 | &dd->cspec->msix_entries[msixnum].mask, | |
3547 | GFP_KERNEL)) { | |
3548 | if (handler == qib_7322pintr) { | |
3549 | cpumask_set_cpu(currrcvcpu, | |
3550 | dd->cspec->msix_entries[msixnum].mask); | |
3551 | currrcvcpu = cpumask_next(currrcvcpu, | |
3552 | local_mask); | |
3553 | if (currrcvcpu >= nr_cpu_ids) | |
3554 | currrcvcpu = secondcpu; | |
3555 | } else { | |
3556 | cpumask_set_cpu(firstcpu, | |
3557 | dd->cspec->msix_entries[msixnum].mask); | |
3558 | } | |
3559 | irq_set_affinity_hint( | |
3560 | dd->cspec->msix_entries[msixnum].msix.vector, | |
3561 | dd->cspec->msix_entries[msixnum].mask); | |
3562 | } | |
f931551b RC |
3563 | msixnum++; |
3564 | } | |
3565 | /* Initialize the vector mapping */ | |
3566 | for (i = 0; i < ARRAY_SIZE(redirect); i++) | |
3567 | qib_write_kreg(dd, kr_intredirect + i, redirect[i]); | |
3568 | dd->cspec->main_int_mask = mask; | |
e67306a3 MM |
3569 | tasklet_init(&dd->error_tasklet, qib_error_tasklet, |
3570 | (unsigned long)dd); | |
f931551b RC |
3571 | bail:; |
3572 | } | |
3573 | ||
3574 | /** | |
3575 | * qib_7322_boardname - fill in the board name and note features | |
3576 | * @dd: the qlogic_ib device | |
3577 | * | |
3578 | * info will be based on the board revision register | |
3579 | */ | |
3580 | static unsigned qib_7322_boardname(struct qib_devdata *dd) | |
3581 | { | |
3582 | /* Will need enumeration of board-types here */ | |
3583 | char *n; | |
3584 | u32 boardid, namelen; | |
3585 | unsigned features = DUAL_PORT_CAP; | |
3586 | ||
3587 | boardid = SYM_FIELD(dd->revision, Revision, BoardID); | |
3588 | ||
3589 | switch (boardid) { | |
3590 | case 0: | |
3591 | n = "InfiniPath_QLE7342_Emulation"; | |
3592 | break; | |
3593 | case 1: | |
3594 | n = "InfiniPath_QLE7340"; | |
3595 | dd->flags |= QIB_HAS_QSFP; | |
3596 | features = PORT_SPD_CAP; | |
3597 | break; | |
3598 | case 2: | |
3599 | n = "InfiniPath_QLE7342"; | |
3600 | dd->flags |= QIB_HAS_QSFP; | |
3601 | break; | |
3602 | case 3: | |
3603 | n = "InfiniPath_QMI7342"; | |
3604 | break; | |
3605 | case 4: | |
3606 | n = "InfiniPath_Unsupported7342"; | |
3607 | qib_dev_err(dd, "Unsupported version of QMH7342\n"); | |
3608 | features = 0; | |
3609 | break; | |
3610 | case BOARD_QMH7342: | |
3611 | n = "InfiniPath_QMH7342"; | |
3612 | features = 0x24; | |
3613 | break; | |
3614 | case BOARD_QME7342: | |
3615 | n = "InfiniPath_QME7342"; | |
3616 | break; | |
f509f9c1 MM |
3617 | case 8: |
3618 | n = "InfiniPath_QME7362"; | |
3619 | dd->flags |= QIB_HAS_QSFP; | |
3620 | break; | |
0e6bbba5 VA |
3621 | case BOARD_QMH7360: |
3622 | n = "Intel IB QDR 1P FLR-QSFP Adptr"; | |
3623 | dd->flags |= QIB_HAS_QSFP; | |
3624 | break; | |
f931551b RC |
3625 | case 15: |
3626 | n = "InfiniPath_QLE7342_TEST"; | |
3627 | dd->flags |= QIB_HAS_QSFP; | |
3628 | break; | |
3629 | default: | |
3630 | n = "InfiniPath_QLE73xy_UNKNOWN"; | |
3631 | qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid); | |
3632 | break; | |
3633 | } | |
3634 | dd->board_atten = 1; /* index into txdds_Xdr */ | |
3635 | ||
3636 | namelen = strlen(n) + 1; | |
3637 | dd->boardname = kmalloc(namelen, GFP_KERNEL); | |
3638 | if (!dd->boardname) | |
3639 | qib_dev_err(dd, "Failed allocation for board name: %s\n", n); | |
3640 | else | |
3641 | snprintf(dd->boardname, namelen, "%s", n); | |
3642 | ||
3643 | snprintf(dd->boardversion, sizeof(dd->boardversion), | |
3644 | "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", | |
3645 | QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname, | |
3646 | (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch), | |
3647 | dd->majrev, dd->minrev, | |
3648 | (unsigned)SYM_FIELD(dd->revision, Revision_R, SW)); | |
3649 | ||
3650 | if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) { | |
7fac3301 MM |
3651 | qib_devinfo(dd->pcidev, |
3652 | "IB%u: Forced to single port mode by module parameter\n", | |
3653 | dd->unit); | |
f931551b RC |
3654 | features &= PORT_SPD_CAP; |
3655 | } | |
3656 | ||
3657 | return features; | |
3658 | } | |
3659 | ||
3660 | /* | |
3661 | * This routine sleeps, so it can only be called from user context, not | |
3662 | * from interrupt context. | |
3663 | */ | |
3664 | static int qib_do_7322_reset(struct qib_devdata *dd) | |
3665 | { | |
3666 | u64 val; | |
3667 | u64 *msix_vecsave; | |
3668 | int i, msix_entries, ret = 1; | |
3669 | u16 cmdval; | |
3670 | u8 int_line, clinesz; | |
3671 | unsigned long flags; | |
3672 | ||
3673 | /* Use dev_err so it shows up in logs, etc. */ | |
3674 | qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit); | |
3675 | ||
3676 | qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz); | |
3677 | ||
3678 | msix_entries = dd->cspec->num_msix_entries; | |
3679 | ||
3680 | /* no interrupts till re-initted */ | |
3681 | qib_7322_set_intr_state(dd, 0); | |
3682 | ||
3683 | if (msix_entries) { | |
3684 | qib_7322_nomsix(dd); | |
3685 | /* can be up to 512 bytes, too big for stack */ | |
3686 | msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries * | |
3687 | sizeof(u64), GFP_KERNEL); | |
3688 | if (!msix_vecsave) | |
3689 | qib_dev_err(dd, "No mem to save MSIx data\n"); | |
3690 | } else | |
3691 | msix_vecsave = NULL; | |
3692 | ||
3693 | /* | |
3694 | * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector | |
3695 | * info that is set up by the BIOS, so we have to save and restore | |
3696 | * it ourselves. There is some risk something could change it, | |
3697 | * after we save it, but since we have disabled the MSIx, it | |
3698 | * shouldn't be touched... | |
3699 | */ | |
3700 | for (i = 0; i < msix_entries; i++) { | |
3701 | u64 vecaddr, vecdata; | |
3702 | vecaddr = qib_read_kreg64(dd, 2 * i + | |
3703 | (QIB_7322_MsixTable_OFFS / sizeof(u64))); | |
3704 | vecdata = qib_read_kreg64(dd, 1 + 2 * i + | |
3705 | (QIB_7322_MsixTable_OFFS / sizeof(u64))); | |
3706 | if (msix_vecsave) { | |
3707 | msix_vecsave[2 * i] = vecaddr; | |
3708 | /* save it without the masked bit set */ | |
3709 | msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL; | |
3710 | } | |
3711 | } | |
3712 | ||
3713 | dd->pport->cpspec->ibdeltainprog = 0; | |
3714 | dd->pport->cpspec->ibsymdelta = 0; | |
3715 | dd->pport->cpspec->iblnkerrdelta = 0; | |
3716 | dd->pport->cpspec->ibmalfdelta = 0; | |
1ed88dd7 MM |
3717 | /* so we check interrupts work again */ |
3718 | dd->z_int_counter = qib_int_counter(dd); | |
f931551b RC |
3719 | |
3720 | /* | |
3721 | * Keep chip from being accessed until we are ready. Use | |
3722 | * writeq() directly, to allow the write even though QIB_PRESENT | |
e9c54999 | 3723 | * isn't set. |
f931551b RC |
3724 | */ |
3725 | dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR); | |
3726 | dd->flags |= QIB_DOING_RESET; | |
3727 | val = dd->control | QLOGIC_IB_C_RESET; | |
3728 | writeq(val, &dd->kregbase[kr_control]); | |
3729 | ||
3730 | for (i = 1; i <= 5; i++) { | |
3731 | /* | |
3732 | * Allow MBIST, etc. to complete; longer on each retry. | |
3733 | * We sometimes get machine checks from bus timeout if no | |
3734 | * response, so for now, make it *really* long. | |
3735 | */ | |
3736 | msleep(1000 + (1 + i) * 3000); | |
3737 | ||
3738 | qib_pcie_reenable(dd, cmdval, int_line, clinesz); | |
3739 | ||
3740 | /* | |
3741 | * Use readq directly, so we don't need to mark it as PRESENT | |
3742 | * until we get a successful indication that all is well. | |
3743 | */ | |
3744 | val = readq(&dd->kregbase[kr_revision]); | |
3745 | if (val == dd->revision) | |
3746 | break; | |
3747 | if (i == 5) { | |
7fac3301 MM |
3748 | qib_dev_err(dd, |
3749 | "Failed to initialize after reset, unusable\n"); | |
f931551b RC |
3750 | ret = 0; |
3751 | goto bail; | |
3752 | } | |
3753 | } | |
3754 | ||
3755 | dd->flags |= QIB_PRESENT; /* it's back */ | |
3756 | ||
3757 | if (msix_entries) { | |
3758 | /* restore the MSIx vector address and data if saved above */ | |
3759 | for (i = 0; i < msix_entries; i++) { | |
a778f3fd | 3760 | dd->cspec->msix_entries[i].msix.entry = i; |
f931551b RC |
3761 | if (!msix_vecsave || !msix_vecsave[2 * i]) |
3762 | continue; | |
3763 | qib_write_kreg(dd, 2 * i + | |
3764 | (QIB_7322_MsixTable_OFFS / sizeof(u64)), | |
3765 | msix_vecsave[2 * i]); | |
3766 | qib_write_kreg(dd, 1 + 2 * i + | |
3767 | (QIB_7322_MsixTable_OFFS / sizeof(u64)), | |
3768 | msix_vecsave[1 + 2 * i]); | |
3769 | } | |
3770 | } | |
3771 | ||
3772 | /* initialize the remaining registers. */ | |
3773 | for (i = 0; i < dd->num_pports; ++i) | |
3774 | write_7322_init_portregs(&dd->pport[i]); | |
3775 | write_7322_initregs(dd); | |
3776 | ||
3777 | if (qib_pcie_params(dd, dd->lbus_width, | |
3778 | &dd->cspec->num_msix_entries, | |
3779 | dd->cspec->msix_entries)) | |
7fac3301 MM |
3780 | qib_dev_err(dd, |
3781 | "Reset failed to setup PCIe or interrupts; continuing anyway\n"); | |
f931551b RC |
3782 | |
3783 | qib_setup_7322_interrupt(dd, 1); | |
3784 | ||
3785 | for (i = 0; i < dd->num_pports; ++i) { | |
3786 | struct qib_pportdata *ppd = &dd->pport[i]; | |
3787 | ||
3788 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
3789 | ppd->lflags |= QIBL_IB_FORCE_NOTIFY; | |
3790 | ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; | |
3791 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
3792 | } | |
3793 | ||
3794 | bail: | |
3795 | dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */ | |
3796 | kfree(msix_vecsave); | |
3797 | return ret; | |
3798 | } | |
3799 | ||
3800 | /** | |
3801 | * qib_7322_put_tid - write a TID to the chip | |
3802 | * @dd: the qlogic_ib device | |
3803 | * @tidptr: pointer to the expected TID (in chip) to update | |
3804 | * @tidtype: 0 for eager, 1 for expected | |
3805 | * @pa: physical address of in memory buffer; tidinvalid if freeing | |
3806 | */ | |
3807 | static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, | |
3808 | u32 type, unsigned long pa) | |
3809 | { | |
3810 | if (!(dd->flags & QIB_PRESENT)) | |
3811 | return; | |
3812 | if (pa != dd->tidinvalid) { | |
3813 | u64 chippa = pa >> IBA7322_TID_PA_SHIFT; | |
3814 | ||
3815 | /* paranoia checks */ | |
3816 | if (pa != (chippa << IBA7322_TID_PA_SHIFT)) { | |
3817 | qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n", | |
3818 | pa); | |
3819 | return; | |
3820 | } | |
3821 | if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) { | |
7fac3301 MM |
3822 | qib_dev_err(dd, |
3823 | "Physical page address 0x%lx larger than supported\n", | |
3824 | pa); | |
f931551b RC |
3825 | return; |
3826 | } | |
3827 | ||
3828 | if (type == RCVHQ_RCV_TYPE_EAGER) | |
3829 | chippa |= dd->tidtemplate; | |
3830 | else /* for now, always full 4KB page */ | |
3831 | chippa |= IBA7322_TID_SZ_4K; | |
3832 | pa = chippa; | |
3833 | } | |
3834 | writeq(pa, tidptr); | |
3835 | mmiowb(); | |
3836 | } | |
3837 | ||
3838 | /** | |
3839 | * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager | |
3840 | * @dd: the qlogic_ib device | |
3841 | * @ctxt: the ctxt | |
3842 | * | |
3843 | * clear all TID entries for a ctxt, expected and eager. | |
3844 | * Used from qib_close(). | |
3845 | */ | |
3846 | static void qib_7322_clear_tids(struct qib_devdata *dd, | |
3847 | struct qib_ctxtdata *rcd) | |
3848 | { | |
3849 | u64 __iomem *tidbase; | |
3850 | unsigned long tidinv; | |
3851 | u32 ctxt; | |
3852 | int i; | |
3853 | ||
3854 | if (!dd->kregbase || !rcd) | |
3855 | return; | |
3856 | ||
3857 | ctxt = rcd->ctxt; | |
3858 | ||
3859 | tidinv = dd->tidinvalid; | |
3860 | tidbase = (u64 __iomem *) | |
3861 | ((char __iomem *) dd->kregbase + | |
3862 | dd->rcvtidbase + | |
3863 | ctxt * dd->rcvtidcnt * sizeof(*tidbase)); | |
3864 | ||
3865 | for (i = 0; i < dd->rcvtidcnt; i++) | |
3866 | qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, | |
3867 | tidinv); | |
3868 | ||
3869 | tidbase = (u64 __iomem *) | |
3870 | ((char __iomem *) dd->kregbase + | |
3871 | dd->rcvegrbase + | |
3872 | rcd->rcvegr_tid_base * sizeof(*tidbase)); | |
3873 | ||
3874 | for (i = 0; i < rcd->rcvegrcnt; i++) | |
3875 | qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER, | |
3876 | tidinv); | |
3877 | } | |
3878 | ||
3879 | /** | |
3880 | * qib_7322_tidtemplate - setup constants for TID updates | |
3881 | * @dd: the qlogic_ib device | |
3882 | * | |
3883 | * We setup stuff that we use a lot, to avoid calculating each time | |
3884 | */ | |
3885 | static void qib_7322_tidtemplate(struct qib_devdata *dd) | |
3886 | { | |
3887 | /* | |
3888 | * For now, we always allocate 4KB buffers (at init) so we can | |
3889 | * receive max size packets. We may want a module parameter to | |
3890 | * specify 2KB or 4KB and/or make it per port instead of per device | |
3891 | * for those who want to reduce memory footprint. Note that the | |
3892 | * rcvhdrentsize size must be large enough to hold the largest | |
3893 | * IB header (currently 96 bytes) that we expect to handle (plus of | |
3894 | * course the 2 dwords of RHF). | |
3895 | */ | |
3896 | if (dd->rcvegrbufsize == 2048) | |
3897 | dd->tidtemplate = IBA7322_TID_SZ_2K; | |
3898 | else if (dd->rcvegrbufsize == 4096) | |
3899 | dd->tidtemplate = IBA7322_TID_SZ_4K; | |
3900 | dd->tidinvalid = 0; | |
3901 | } | |
3902 | ||
3903 | /** | |
3904 | * qib_init_7322_get_base_info - set chip-specific flags for user code | |
3905 | * @rcd: the qlogic_ib ctxt | |
3906 | * @kbase: qib_base_info pointer | |
3907 | * | |
3908 | * We set the PCIE flag because the lower bandwidth on PCIe vs | |
3909 | * HyperTransport can affect some user packet algorithims. | |
3910 | */ | |
3911 | ||
3912 | static int qib_7322_get_base_info(struct qib_ctxtdata *rcd, | |
3913 | struct qib_base_info *kinfo) | |
3914 | { | |
3915 | kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP | | |
3916 | QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL | | |
3917 | QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA; | |
3918 | if (rcd->dd->cspec->r1) | |
3919 | kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK; | |
3920 | if (rcd->dd->flags & QIB_USE_SPCL_TRIG) | |
3921 | kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER; | |
3922 | ||
3923 | return 0; | |
3924 | } | |
3925 | ||
3926 | static struct qib_message_header * | |
3927 | qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr) | |
3928 | { | |
3929 | u32 offset = qib_hdrget_offset(rhf_addr); | |
3930 | ||
3931 | return (struct qib_message_header *) | |
3932 | (rhf_addr - dd->rhf_offset + offset); | |
3933 | } | |
3934 | ||
3935 | /* | |
3936 | * Configure number of contexts. | |
3937 | */ | |
3938 | static void qib_7322_config_ctxts(struct qib_devdata *dd) | |
3939 | { | |
3940 | unsigned long flags; | |
3941 | u32 nchipctxts; | |
3942 | ||
3943 | nchipctxts = qib_read_kreg32(dd, kr_contextcnt); | |
3944 | dd->cspec->numctxts = nchipctxts; | |
3945 | if (qib_n_krcv_queues > 1 && dd->num_pports) { | |
f931551b RC |
3946 | dd->first_user_ctxt = NUM_IB_PORTS + |
3947 | (qib_n_krcv_queues - 1) * dd->num_pports; | |
3948 | if (dd->first_user_ctxt > nchipctxts) | |
3949 | dd->first_user_ctxt = nchipctxts; | |
3950 | dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports; | |
3951 | } else { | |
3952 | dd->first_user_ctxt = NUM_IB_PORTS; | |
3953 | dd->n_krcv_queues = 1; | |
3954 | } | |
3955 | ||
3956 | if (!qib_cfgctxts) { | |
3957 | int nctxts = dd->first_user_ctxt + num_online_cpus(); | |
3958 | ||
3959 | if (nctxts <= 6) | |
3960 | dd->ctxtcnt = 6; | |
3961 | else if (nctxts <= 10) | |
3962 | dd->ctxtcnt = 10; | |
3963 | else if (nctxts <= nchipctxts) | |
3964 | dd->ctxtcnt = nchipctxts; | |
3965 | } else if (qib_cfgctxts < dd->num_pports) | |
3966 | dd->ctxtcnt = dd->num_pports; | |
3967 | else if (qib_cfgctxts <= nchipctxts) | |
3968 | dd->ctxtcnt = qib_cfgctxts; | |
3969 | if (!dd->ctxtcnt) /* none of the above, set to max */ | |
3970 | dd->ctxtcnt = nchipctxts; | |
3971 | ||
3972 | /* | |
3973 | * Chip can be configured for 6, 10, or 18 ctxts, and choice | |
3974 | * affects number of eager TIDs per ctxt (1K, 2K, 4K). | |
3975 | * Lock to be paranoid about later motion, etc. | |
3976 | */ | |
3977 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | |
3978 | if (dd->ctxtcnt > 10) | |
3979 | dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg); | |
3980 | else if (dd->ctxtcnt > 6) | |
3981 | dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg); | |
3982 | /* else configure for default 6 receive ctxts */ | |
3983 | ||
3984 | /* The XRC opcode is 5. */ | |
3985 | dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode); | |
3986 | ||
3987 | /* | |
3988 | * RcvCtrl *must* be written here so that the | |
3989 | * chip understands how to change rcvegrcnt below. | |
3990 | */ | |
3991 | qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); | |
3992 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | |
3993 | ||
3994 | /* kr_rcvegrcnt changes based on the number of contexts enabled */ | |
3995 | dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); | |
0a43e117 MM |
3996 | if (qib_rcvhdrcnt) |
3997 | dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt); | |
3998 | else | |
8d4548f2 | 3999 | dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt, |
0a43e117 | 4000 | dd->num_pports > 1 ? 1024U : 2048U); |
f931551b RC |
4001 | } |
4002 | ||
4003 | static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which) | |
4004 | { | |
4005 | ||
4006 | int lsb, ret = 0; | |
4007 | u64 maskr; /* right-justified mask */ | |
4008 | ||
4009 | switch (which) { | |
4010 | ||
4011 | case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */ | |
4012 | ret = ppd->link_width_enabled; | |
4013 | goto done; | |
4014 | ||
4015 | case QIB_IB_CFG_LWID: /* Get currently active Link-width */ | |
4016 | ret = ppd->link_width_active; | |
4017 | goto done; | |
4018 | ||
4019 | case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */ | |
4020 | ret = ppd->link_speed_enabled; | |
4021 | goto done; | |
4022 | ||
4023 | case QIB_IB_CFG_SPD: /* Get current Link spd */ | |
4024 | ret = ppd->link_speed_active; | |
4025 | goto done; | |
4026 | ||
4027 | case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */ | |
4028 | lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP); | |
4029 | maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP); | |
4030 | break; | |
4031 | ||
4032 | case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */ | |
4033 | lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); | |
4034 | maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); | |
4035 | break; | |
4036 | ||
4037 | case QIB_IB_CFG_LINKLATENCY: | |
4038 | ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) & | |
4039 | SYM_MASK(IBCStatusB_0, LinkRoundTripLatency); | |
4040 | goto done; | |
4041 | ||
4042 | case QIB_IB_CFG_OP_VLS: | |
4043 | ret = ppd->vls_operational; | |
4044 | goto done; | |
4045 | ||
4046 | case QIB_IB_CFG_VL_HIGH_CAP: | |
4047 | ret = 16; | |
4048 | goto done; | |
4049 | ||
4050 | case QIB_IB_CFG_VL_LOW_CAP: | |
4051 | ret = 16; | |
4052 | goto done; | |
4053 | ||
4054 | case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ | |
4055 | ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, | |
4056 | OverrunThreshold); | |
4057 | goto done; | |
4058 | ||
4059 | case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ | |
4060 | ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, | |
4061 | PhyerrThreshold); | |
4062 | goto done; | |
4063 | ||
4064 | case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ | |
4065 | /* will only take effect when the link state changes */ | |
4066 | ret = (ppd->cpspec->ibcctrl_a & | |
4067 | SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ? | |
4068 | IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL; | |
4069 | goto done; | |
4070 | ||
4071 | case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */ | |
4072 | lsb = IBA7322_IBC_HRTBT_LSB; | |
4073 | maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */ | |
4074 | break; | |
4075 | ||
4076 | case QIB_IB_CFG_PMA_TICKS: | |
4077 | /* | |
4078 | * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs | |
4079 | * Since the clock is always 250MHz, the value is 3, 1 or 0. | |
4080 | */ | |
4081 | if (ppd->link_speed_active == QIB_IB_QDR) | |
4082 | ret = 3; | |
4083 | else if (ppd->link_speed_active == QIB_IB_DDR) | |
4084 | ret = 1; | |
4085 | else | |
4086 | ret = 0; | |
4087 | goto done; | |
4088 | ||
4089 | default: | |
4090 | ret = -EINVAL; | |
4091 | goto done; | |
4092 | } | |
4093 | ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr); | |
4094 | done: | |
4095 | return ret; | |
4096 | } | |
4097 | ||
4098 | /* | |
4099 | * Below again cribbed liberally from older version. Do not lean | |
4100 | * heavily on it. | |
4101 | */ | |
4102 | #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB | |
4103 | #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \ | |
4104 | | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16)) | |
4105 | ||
4106 | static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val) | |
4107 | { | |
4108 | struct qib_devdata *dd = ppd->dd; | |
4109 | u64 maskr; /* right-justified mask */ | |
4110 | int lsb, ret = 0; | |
4111 | u16 lcmd, licmd; | |
4112 | unsigned long flags; | |
4113 | ||
4114 | switch (which) { | |
4115 | case QIB_IB_CFG_LIDLMC: | |
4116 | /* | |
4117 | * Set LID and LMC. Combined to avoid possible hazard | |
4118 | * caller puts LMC in 16MSbits, DLID in 16LSbits of val | |
4119 | */ | |
4120 | lsb = IBA7322_IBC_DLIDLMC_SHIFT; | |
4121 | maskr = IBA7322_IBC_DLIDLMC_MASK; | |
4122 | /* | |
4123 | * For header-checking, the SLID in the packet will | |
4124 | * be masked with SendIBSLMCMask, and compared | |
4125 | * with SendIBSLIDAssignMask. Make sure we do not | |
4126 | * set any bits not covered by the mask, or we get | |
4127 | * false-positives. | |
4128 | */ | |
4129 | qib_write_kreg_port(ppd, krp_sendslid, | |
4130 | val & (val >> 16) & SendIBSLIDAssignMask); | |
4131 | qib_write_kreg_port(ppd, krp_sendslidmask, | |
4132 | (val >> 16) & SendIBSLMCMask); | |
4133 | break; | |
4134 | ||
4135 | case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */ | |
4136 | ppd->link_width_enabled = val; | |
4137 | /* convert IB value to chip register value */ | |
4138 | if (val == IB_WIDTH_1X) | |
4139 | val = 0; | |
4140 | else if (val == IB_WIDTH_4X) | |
4141 | val = 1; | |
4142 | else | |
4143 | val = 3; | |
4144 | maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS); | |
4145 | lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS); | |
4146 | break; | |
4147 | ||
4148 | case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */ | |
4149 | /* | |
4150 | * As with width, only write the actual register if the | |
4151 | * link is currently down, otherwise takes effect on next | |
25985edc | 4152 | * link change. Since setting is being explicitly requested |
f931551b RC |
4153 | * (via MAD or sysfs), clear autoneg failure status if speed |
4154 | * autoneg is enabled. | |
4155 | */ | |
4156 | ppd->link_speed_enabled = val; | |
4157 | val <<= IBA7322_IBC_SPEED_LSB; | |
4158 | maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK | | |
4159 | IBA7322_IBC_MAX_SPEED_MASK; | |
4160 | if (val & (val - 1)) { | |
4161 | /* Muliple speeds enabled */ | |
4162 | val |= IBA7322_IBC_IBTA_1_2_MASK | | |
4163 | IBA7322_IBC_MAX_SPEED_MASK; | |
4164 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
4165 | ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; | |
4166 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
4167 | } else if (val & IBA7322_IBC_SPEED_QDR) | |
4168 | val |= IBA7322_IBC_IBTA_1_2_MASK; | |
4169 | /* IBTA 1.2 mode + min/max + speed bits are contiguous */ | |
4170 | lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE); | |
4171 | break; | |
4172 | ||
4173 | case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */ | |
4174 | lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP); | |
4175 | maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP); | |
4176 | break; | |
4177 | ||
4178 | case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */ | |
4179 | lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); | |
4180 | maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); | |
4181 | break; | |
4182 | ||
4183 | case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ | |
4184 | maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, | |
4185 | OverrunThreshold); | |
4186 | if (maskr != val) { | |
4187 | ppd->cpspec->ibcctrl_a &= | |
4188 | ~SYM_MASK(IBCCtrlA_0, OverrunThreshold); | |
4189 | ppd->cpspec->ibcctrl_a |= (u64) val << | |
4190 | SYM_LSB(IBCCtrlA_0, OverrunThreshold); | |
4191 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | |
4192 | ppd->cpspec->ibcctrl_a); | |
4193 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
4194 | } | |
4195 | goto bail; | |
4196 | ||
4197 | case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ | |
4198 | maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, | |
4199 | PhyerrThreshold); | |
4200 | if (maskr != val) { | |
4201 | ppd->cpspec->ibcctrl_a &= | |
4202 | ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold); | |
4203 | ppd->cpspec->ibcctrl_a |= (u64) val << | |
4204 | SYM_LSB(IBCCtrlA_0, PhyerrThreshold); | |
4205 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | |
4206 | ppd->cpspec->ibcctrl_a); | |
4207 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
4208 | } | |
4209 | goto bail; | |
4210 | ||
4211 | case QIB_IB_CFG_PKEYS: /* update pkeys */ | |
4212 | maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) | | |
4213 | ((u64) ppd->pkeys[2] << 32) | | |
4214 | ((u64) ppd->pkeys[3] << 48); | |
4215 | qib_write_kreg_port(ppd, krp_partitionkey, maskr); | |
4216 | goto bail; | |
4217 | ||
4218 | case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ | |
4219 | /* will only take effect when the link state changes */ | |
4220 | if (val == IB_LINKINITCMD_POLL) | |
4221 | ppd->cpspec->ibcctrl_a &= | |
4222 | ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState); | |
4223 | else /* SLEEP */ | |
4224 | ppd->cpspec->ibcctrl_a |= | |
4225 | SYM_MASK(IBCCtrlA_0, LinkDownDefaultState); | |
4226 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); | |
4227 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
4228 | goto bail; | |
4229 | ||
4230 | case QIB_IB_CFG_MTU: /* update the MTU in IBC */ | |
4231 | /* | |
4232 | * Update our housekeeping variables, and set IBC max | |
4233 | * size, same as init code; max IBC is max we allow in | |
4234 | * buffer, less the qword pbc, plus 1 for ICRC, in dwords | |
4235 | * Set even if it's unchanged, print debug message only | |
4236 | * on changes. | |
4237 | */ | |
4238 | val = (ppd->ibmaxlen >> 2) + 1; | |
4239 | ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen); | |
4240 | ppd->cpspec->ibcctrl_a |= (u64)val << | |
4241 | SYM_LSB(IBCCtrlA_0, MaxPktLen); | |
4242 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | |
4243 | ppd->cpspec->ibcctrl_a); | |
4244 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
4245 | goto bail; | |
4246 | ||
4247 | case QIB_IB_CFG_LSTATE: /* set the IB link state */ | |
4248 | switch (val & 0xffff0000) { | |
4249 | case IB_LINKCMD_DOWN: | |
4250 | lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN; | |
4251 | ppd->cpspec->ibmalfusesnap = 1; | |
4252 | ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd, | |
4253 | crp_errlink); | |
4254 | if (!ppd->cpspec->ibdeltainprog && | |
4255 | qib_compat_ddr_negotiate) { | |
4256 | ppd->cpspec->ibdeltainprog = 1; | |
4257 | ppd->cpspec->ibsymsnap = | |
4258 | read_7322_creg32_port(ppd, | |
4259 | crp_ibsymbolerr); | |
4260 | ppd->cpspec->iblnkerrsnap = | |
4261 | read_7322_creg32_port(ppd, | |
4262 | crp_iblinkerrrecov); | |
4263 | } | |
4264 | break; | |
4265 | ||
4266 | case IB_LINKCMD_ARMED: | |
4267 | lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED; | |
4268 | if (ppd->cpspec->ibmalfusesnap) { | |
4269 | ppd->cpspec->ibmalfusesnap = 0; | |
4270 | ppd->cpspec->ibmalfdelta += | |
4271 | read_7322_creg32_port(ppd, | |
4272 | crp_errlink) - | |
4273 | ppd->cpspec->ibmalfsnap; | |
4274 | } | |
4275 | break; | |
4276 | ||
4277 | case IB_LINKCMD_ACTIVE: | |
4278 | lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE; | |
4279 | break; | |
4280 | ||
4281 | default: | |
4282 | ret = -EINVAL; | |
4283 | qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16); | |
4284 | goto bail; | |
4285 | } | |
4286 | switch (val & 0xffff) { | |
4287 | case IB_LINKINITCMD_NOP: | |
4288 | licmd = 0; | |
4289 | break; | |
4290 | ||
4291 | case IB_LINKINITCMD_POLL: | |
4292 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL; | |
4293 | break; | |
4294 | ||
4295 | case IB_LINKINITCMD_SLEEP: | |
4296 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP; | |
4297 | break; | |
4298 | ||
4299 | case IB_LINKINITCMD_DISABLE: | |
4300 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE; | |
4301 | ppd->cpspec->chase_end = 0; | |
4302 | /* | |
4303 | * stop state chase counter and timer, if running. | |
4304 | * wait forpending timer, but don't clear .data (ppd)! | |
4305 | */ | |
4306 | if (ppd->cpspec->chase_timer.expires) { | |
4307 | del_timer_sync(&ppd->cpspec->chase_timer); | |
4308 | ppd->cpspec->chase_timer.expires = 0; | |
4309 | } | |
4310 | break; | |
4311 | ||
4312 | default: | |
4313 | ret = -EINVAL; | |
4314 | qib_dev_err(dd, "bad linkinitcmd req 0x%x\n", | |
4315 | val & 0xffff); | |
4316 | goto bail; | |
4317 | } | |
4318 | qib_set_ib_7322_lstate(ppd, lcmd, licmd); | |
4319 | goto bail; | |
4320 | ||
4321 | case QIB_IB_CFG_OP_VLS: | |
4322 | if (ppd->vls_operational != val) { | |
4323 | ppd->vls_operational = val; | |
4324 | set_vls(ppd); | |
4325 | } | |
4326 | goto bail; | |
4327 | ||
4328 | case QIB_IB_CFG_VL_HIGH_LIMIT: | |
4329 | qib_write_kreg_port(ppd, krp_highprio_limit, val); | |
4330 | goto bail; | |
4331 | ||
4332 | case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */ | |
4333 | if (val > 3) { | |
4334 | ret = -EINVAL; | |
4335 | goto bail; | |
4336 | } | |
4337 | lsb = IBA7322_IBC_HRTBT_LSB; | |
4338 | maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */ | |
4339 | break; | |
4340 | ||
4341 | case QIB_IB_CFG_PORT: | |
4342 | /* val is the port number of the switch we are connected to. */ | |
4343 | if (ppd->dd->cspec->r1) { | |
4344 | cancel_delayed_work(&ppd->cpspec->ipg_work); | |
4345 | ppd->cpspec->ipg_tries = 0; | |
4346 | } | |
4347 | goto bail; | |
4348 | ||
4349 | default: | |
4350 | ret = -EINVAL; | |
4351 | goto bail; | |
4352 | } | |
4353 | ppd->cpspec->ibcctrl_b &= ~(maskr << lsb); | |
4354 | ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb); | |
4355 | qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); | |
4356 | qib_write_kreg(dd, kr_scratch, 0); | |
4357 | bail: | |
4358 | return ret; | |
4359 | } | |
4360 | ||
4361 | static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what) | |
4362 | { | |
4363 | int ret = 0; | |
4364 | u64 val, ctrlb; | |
4365 | ||
4366 | /* only IBC loopback, may add serdes and xgxs loopbacks later */ | |
4367 | if (!strncmp(what, "ibc", 3)) { | |
4368 | ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, | |
4369 | Loopback); | |
4370 | val = 0; /* disable heart beat, so link will come up */ | |
4371 | qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", | |
4372 | ppd->dd->unit, ppd->port); | |
4373 | } else if (!strncmp(what, "off", 3)) { | |
4374 | ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, | |
4375 | Loopback); | |
4376 | /* enable heart beat again */ | |
4377 | val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB; | |
7fac3301 MM |
4378 | qib_devinfo(ppd->dd->pcidev, |
4379 | "Disabling IB%u:%u IBC loopback (normal)\n", | |
4380 | ppd->dd->unit, ppd->port); | |
f931551b RC |
4381 | } else |
4382 | ret = -EINVAL; | |
4383 | if (!ret) { | |
4384 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | |
4385 | ppd->cpspec->ibcctrl_a); | |
4386 | ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK | |
4387 | << IBA7322_IBC_HRTBT_LSB); | |
4388 | ppd->cpspec->ibcctrl_b = ctrlb | val; | |
4389 | qib_write_kreg_port(ppd, krp_ibcctrl_b, | |
4390 | ppd->cpspec->ibcctrl_b); | |
4391 | qib_write_kreg(ppd->dd, kr_scratch, 0); | |
4392 | } | |
4393 | return ret; | |
4394 | } | |
4395 | ||
4396 | static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno, | |
4397 | struct ib_vl_weight_elem *vl) | |
4398 | { | |
4399 | unsigned i; | |
4400 | ||
4401 | for (i = 0; i < 16; i++, regno++, vl++) { | |
4402 | u32 val = qib_read_kreg_port(ppd, regno); | |
4403 | ||
4404 | vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) & | |
4405 | SYM_RMASK(LowPriority0_0, VirtualLane); | |
4406 | vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) & | |
4407 | SYM_RMASK(LowPriority0_0, Weight); | |
4408 | } | |
4409 | } | |
4410 | ||
4411 | static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno, | |
4412 | struct ib_vl_weight_elem *vl) | |
4413 | { | |
4414 | unsigned i; | |
4415 | ||
4416 | for (i = 0; i < 16; i++, regno++, vl++) { | |
4417 | u64 val; | |
4418 | ||
4419 | val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) << | |
4420 | SYM_LSB(LowPriority0_0, VirtualLane)) | | |
4421 | ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) << | |
4422 | SYM_LSB(LowPriority0_0, Weight)); | |
4423 | qib_write_kreg_port(ppd, regno, val); | |
4424 | } | |
4425 | if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) { | |
4426 | struct qib_devdata *dd = ppd->dd; | |
4427 | unsigned long flags; | |
4428 | ||
4429 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | |
4430 | ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn); | |
4431 | qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); | |
4432 | qib_write_kreg(dd, kr_scratch, 0); | |
4433 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | |
4434 | } | |
4435 | } | |
4436 | ||
4437 | static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t) | |
4438 | { | |
4439 | switch (which) { | |
4440 | case QIB_IB_TBL_VL_HIGH_ARB: | |
4441 | get_vl_weights(ppd, krp_highprio_0, t); | |
4442 | break; | |
4443 | ||
4444 | case QIB_IB_TBL_VL_LOW_ARB: | |
4445 | get_vl_weights(ppd, krp_lowprio_0, t); | |
4446 | break; | |
4447 | ||
4448 | default: | |
4449 | return -EINVAL; | |
4450 | } | |
4451 | return 0; | |
4452 | } | |
4453 | ||
4454 | static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t) | |
4455 | { | |
4456 | switch (which) { | |
4457 | case QIB_IB_TBL_VL_HIGH_ARB: | |
4458 | set_vl_weights(ppd, krp_highprio_0, t); | |
4459 | break; | |
4460 | ||
4461 | case QIB_IB_TBL_VL_LOW_ARB: | |
4462 | set_vl_weights(ppd, krp_lowprio_0, t); | |
4463 | break; | |
4464 | ||
4465 | default: | |
4466 | return -EINVAL; | |
4467 | } | |
4468 | return 0; | |
4469 | } | |
4470 | ||
4471 | static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd, | |
19ede2e4 | 4472 | u32 updegr, u32 egrhd, u32 npkts) |
f931551b | 4473 | { |
19ede2e4 MM |
4474 | /* |
4475 | * Need to write timeout register before updating rcvhdrhead to ensure | |
4476 | * that the timer is enabled on reception of a packet. | |
4477 | */ | |
4478 | if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT) | |
4479 | adjust_rcv_timeout(rcd, npkts); | |
f931551b RC |
4480 | if (updegr) |
4481 | qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); | |
eddfb675 RV |
4482 | mmiowb(); |
4483 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); | |
4484 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); | |
4485 | mmiowb(); | |
f931551b RC |
4486 | } |
4487 | ||
4488 | static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd) | |
4489 | { | |
4490 | u32 head, tail; | |
4491 | ||
4492 | head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); | |
4493 | if (rcd->rcvhdrtail_kvaddr) | |
4494 | tail = qib_get_rcvhdrtail(rcd); | |
4495 | else | |
4496 | tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); | |
4497 | return head == tail; | |
4498 | } | |
4499 | ||
4500 | #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \ | |
4501 | QIB_RCVCTRL_CTXT_DIS | \ | |
4502 | QIB_RCVCTRL_TIDFLOW_ENB | \ | |
4503 | QIB_RCVCTRL_TIDFLOW_DIS | \ | |
4504 | QIB_RCVCTRL_TAILUPD_ENB | \ | |
4505 | QIB_RCVCTRL_TAILUPD_DIS | \ | |
4506 | QIB_RCVCTRL_INTRAVAIL_ENB | \ | |
4507 | QIB_RCVCTRL_INTRAVAIL_DIS | \ | |
4508 | QIB_RCVCTRL_BP_ENB | \ | |
4509 | QIB_RCVCTRL_BP_DIS) | |
4510 | ||
4511 | #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \ | |
4512 | QIB_RCVCTRL_CTXT_DIS | \ | |
4513 | QIB_RCVCTRL_PKEY_DIS | \ | |
4514 | QIB_RCVCTRL_PKEY_ENB) | |
4515 | ||
4516 | /* | |
4517 | * Modify the RCVCTRL register in chip-specific way. This | |
4518 | * is a function because bit positions and (future) register | |
4519 | * location is chip-specifc, but the needed operations are | |
4520 | * generic. <op> is a bit-mask because we often want to | |
4521 | * do multiple modifications. | |
4522 | */ | |
4523 | static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op, | |
4524 | int ctxt) | |
4525 | { | |
4526 | struct qib_devdata *dd = ppd->dd; | |
4527 | struct qib_ctxtdata *rcd; | |
4528 | u64 mask, val; | |
4529 | unsigned long flags; | |
4530 | ||
4531 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | |
4532 | ||
4533 | if (op & QIB_RCVCTRL_TIDFLOW_ENB) | |
4534 | dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable); | |
4535 | if (op & QIB_RCVCTRL_TIDFLOW_DIS) | |
4536 | dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable); | |
4537 | if (op & QIB_RCVCTRL_TAILUPD_ENB) | |
4538 | dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd); | |
4539 | if (op & QIB_RCVCTRL_TAILUPD_DIS) | |
4540 | dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd); | |
4541 | if (op & QIB_RCVCTRL_PKEY_ENB) | |
4542 | ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable); | |
4543 | if (op & QIB_RCVCTRL_PKEY_DIS) | |
4544 | ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable); | |
4545 | if (ctxt < 0) { | |
4546 | mask = (1ULL << dd->ctxtcnt) - 1; | |
4547 | rcd = NULL; | |
4548 | } else { | |
4549 | mask = (1ULL << ctxt); | |
4550 | rcd = dd->rcd[ctxt]; | |
4551 | } | |
4552 | if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) { | |
4553 | ppd->p_rcvctrl |= | |
4554 | (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel)); | |
4555 | if (!(dd->flags & QIB_NODMA_RTAIL)) { | |
4556 | op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */ | |
4557 | dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd); | |
4558 | } | |
4559 | /* Write these registers before the context is enabled. */ | |
4560 | qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, | |
4561 | rcd->rcvhdrqtailaddr_phys); | |
4562 | qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, | |
4563 | rcd->rcvhdrq_phys); | |
4564 | rcd->seq_cnt = 1; | |
f931551b RC |
4565 | } |
4566 | if (op & QIB_RCVCTRL_CTXT_DIS) | |
4567 | ppd->p_rcvctrl &= | |
4568 | ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel)); | |
4569 | if (op & QIB_RCVCTRL_BP_ENB) | |
4570 | dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull); | |
4571 | if (op & QIB_RCVCTRL_BP_DIS) | |
4572 | dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull)); | |
4573 | if (op & QIB_RCVCTRL_INTRAVAIL_ENB) | |
4574 | dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail)); | |
4575 | if (op & QIB_RCVCTRL_INTRAVAIL_DIS) | |
4576 | dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail)); | |
4577 | /* | |
4578 | * Decide which registers to write depending on the ops enabled. | |
4579 | * Special case is "flush" (no bits set at all) | |
4580 | * which needs to write both. | |
4581 | */ | |
4582 | if (op == 0 || (op & RCVCTRL_COMMON_MODS)) | |
4583 | qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); | |
4584 | if (op == 0 || (op & RCVCTRL_PORT_MODS)) | |
4585 | qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); | |
4586 | if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) { | |
4587 | /* | |
4588 | * Init the context registers also; if we were | |
4589 | * disabled, tail and head should both be zero | |
4590 | * already from the enable, but since we don't | |
25985edc | 4591 | * know, we have to do it explicitly. |
f931551b RC |
4592 | */ |
4593 | val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); | |
4594 | qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); | |
4595 | ||
4596 | /* be sure enabling write seen; hd/tl should be 0 */ | |
4597 | (void) qib_read_kreg32(dd, kr_scratch); | |
4598 | val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); | |
4599 | dd->rcd[ctxt]->head = val; | |
4600 | /* If kctxt, interrupt on next receive. */ | |
4601 | if (ctxt < dd->first_user_ctxt) | |
4602 | val |= dd->rhdrhead_intr_off; | |
4603 | qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); | |
4604 | } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && | |
4605 | dd->rcd[ctxt] && dd->rhdrhead_intr_off) { | |
4606 | /* arm rcv interrupt */ | |
4607 | val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off; | |
4608 | qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); | |
4609 | } | |
4610 | if (op & QIB_RCVCTRL_CTXT_DIS) { | |
4611 | unsigned f; | |
4612 | ||
4613 | /* Now that the context is disabled, clear these registers. */ | |
4614 | if (ctxt >= 0) { | |
4615 | qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0); | |
4616 | qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0); | |
4617 | for (f = 0; f < NUM_TIDFLOWS_CTXT; f++) | |
4618 | qib_write_ureg(dd, ur_rcvflowtable + f, | |
4619 | TIDFLOW_ERRBITS, ctxt); | |
4620 | } else { | |
4621 | unsigned i; | |
4622 | ||
4623 | for (i = 0; i < dd->cfgctxts; i++) { | |
4624 | qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, | |
4625 | i, 0); | |
4626 | qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0); | |
4627 | for (f = 0; f < NUM_TIDFLOWS_CTXT; f++) | |
4628 | qib_write_ureg(dd, ur_rcvflowtable + f, | |
4629 | TIDFLOW_ERRBITS, i); | |
4630 | } | |
4631 | } | |
4632 | } | |
4633 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | |
4634 | } | |
4635 | ||
4636 | /* | |
4637 | * Modify the SENDCTRL register in chip-specific way. This | |
4638 | * is a function where there are multiple such registers with | |
4639 | * slightly different layouts. | |
4640 | * The chip doesn't allow back-to-back sendctrl writes, so write | |
4641 | * the scratch register after writing sendctrl. | |
4642 | * | |
4643 | * Which register is written depends on the operation. | |
4644 | * Most operate on the common register, while | |
4645 | * SEND_ENB and SEND_DIS operate on the per-port ones. | |
4646 | * SEND_ENB is included in common because it can change SPCL_TRIG | |
4647 | */ | |
4648 | #define SENDCTRL_COMMON_MODS (\ | |
4649 | QIB_SENDCTRL_CLEAR | \ | |
4650 | QIB_SENDCTRL_AVAIL_DIS | \ | |
4651 | QIB_SENDCTRL_AVAIL_ENB | \ | |
4652 | QIB_SENDCTRL_AVAIL_BLIP | \ | |
4653 | QIB_SENDCTRL_DISARM | \ | |
4654 | QIB_SENDCTRL_DISARM_ALL | \ | |
4655 | QIB_SENDCTRL_SEND_ENB) | |
4656 | ||
4657 | #define SENDCTRL_PORT_MODS (\ | |
4658 | QIB_SENDCTRL_CLEAR | \ | |
4659 | QIB_SENDCTRL_SEND_ENB | \ | |
4660 | QIB_SENDCTRL_SEND_DIS | \ | |
4661 | QIB_SENDCTRL_FLUSH) | |
4662 | ||
4663 | static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op) | |
4664 | { | |
4665 | struct qib_devdata *dd = ppd->dd; | |
4666 | u64 tmp_dd_sendctrl; | |
4667 | unsigned long flags; | |
4668 | ||
4669 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | |
4670 | ||
4671 | /* First the dd ones that are "sticky", saved in shadow */ | |
4672 | if (op & QIB_SENDCTRL_CLEAR) | |
4673 | dd->sendctrl = 0; | |
4674 | if (op & QIB_SENDCTRL_AVAIL_DIS) | |
4675 | dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); | |
4676 | else if (op & QIB_SENDCTRL_AVAIL_ENB) { | |
4677 | dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd); | |
4678 | if (dd->flags & QIB_USE_SPCL_TRIG) | |
4679 | dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn); | |
4680 | } | |
4681 | ||
4682 | /* Then the ppd ones that are "sticky", saved in shadow */ | |
4683 | if (op & QIB_SENDCTRL_SEND_DIS) | |
4684 | ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable); | |
4685 | else if (op & QIB_SENDCTRL_SEND_ENB) | |
4686 | ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable); | |
4687 | ||
4688 | if (op & QIB_SENDCTRL_DISARM_ALL) { | |
4689 | u32 i, last; | |
4690 | ||
4691 | tmp_dd_sendctrl = dd->sendctrl; | |
4692 | last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; | |
4693 | /* | |
4694 | * Disarm any buffers that are not yet launched, | |
4695 | * disabling updates until done. | |
4696 | */ | |
4697 | tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); | |
4698 | for (i = 0; i < last; i++) { | |
4699 | qib_write_kreg(dd, kr_sendctrl, | |
4700 | tmp_dd_sendctrl | | |
4701 | SYM_MASK(SendCtrl, Disarm) | i); | |
4702 | qib_write_kreg(dd, kr_scratch, 0); | |
4703 | } | |
4704 | } | |
4705 | ||
4706 | if (op & QIB_SENDCTRL_FLUSH) { | |
4707 | u64 tmp_ppd_sendctrl = ppd->p_sendctrl; | |
4708 | ||
4709 | /* | |
4710 | * Now drain all the fifos. The Abort bit should never be | |
4711 | * needed, so for now, at least, we don't use it. | |
4712 | */ | |
4713 | tmp_ppd_sendctrl |= | |
4714 | SYM_MASK(SendCtrl_0, TxeDrainRmFifo) | | |
4715 | SYM_MASK(SendCtrl_0, TxeDrainLaFifo) | | |
4716 | SYM_MASK(SendCtrl_0, TxeBypassIbc); | |
4717 | qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl); | |
4718 | qib_write_kreg(dd, kr_scratch, 0); | |
4719 | } | |
4720 | ||
4721 | tmp_dd_sendctrl = dd->sendctrl; | |
4722 | ||
4723 | if (op & QIB_SENDCTRL_DISARM) | |
4724 | tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) | | |
4725 | ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) << | |
4726 | SYM_LSB(SendCtrl, DisarmSendBuf)); | |
4727 | if ((op & QIB_SENDCTRL_AVAIL_BLIP) && | |
4728 | (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd))) | |
4729 | tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); | |
4730 | ||
4731 | if (op == 0 || (op & SENDCTRL_COMMON_MODS)) { | |
4732 | qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl); | |
4733 | qib_write_kreg(dd, kr_scratch, 0); | |
4734 | } | |
4735 | ||
4736 | if (op == 0 || (op & SENDCTRL_PORT_MODS)) { | |
4737 | qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); | |
4738 | qib_write_kreg(dd, kr_scratch, 0); | |
4739 | } | |
4740 | ||
4741 | if (op & QIB_SENDCTRL_AVAIL_BLIP) { | |
4742 | qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); | |
4743 | qib_write_kreg(dd, kr_scratch, 0); | |
4744 | } | |
4745 | ||
4746 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | |
4747 | ||
4748 | if (op & QIB_SENDCTRL_FLUSH) { | |
4749 | u32 v; | |
4750 | /* | |
4751 | * ensure writes have hit chip, then do a few | |
4752 | * more reads, to allow DMA of pioavail registers | |
4753 | * to occur, so in-memory copy is in sync with | |
4754 | * the chip. Not always safe to sleep. | |
4755 | */ | |
4756 | v = qib_read_kreg32(dd, kr_scratch); | |
4757 | qib_write_kreg(dd, kr_scratch, v); | |
4758 | v = qib_read_kreg32(dd, kr_scratch); | |
4759 | qib_write_kreg(dd, kr_scratch, v); | |
4760 | qib_read_kreg32(dd, kr_scratch); | |
4761 | } | |
4762 | } | |
4763 | ||
4764 | #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */ | |
4765 | #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */ | |
4766 | #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */ | |
4767 | ||
4768 | /** | |
4769 | * qib_portcntr_7322 - read a per-port chip counter | |
4770 | * @ppd: the qlogic_ib pport | |
4771 | * @creg: the counter to read (not a chip offset) | |
4772 | */ | |
4773 | static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg) | |
4774 | { | |
4775 | struct qib_devdata *dd = ppd->dd; | |
4776 | u64 ret = 0ULL; | |
4777 | u16 creg; | |
4778 | /* 0xffff for unimplemented or synthesized counters */ | |
4779 | static const u32 xlator[] = { | |
4780 | [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG, | |
4781 | [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG, | |
4782 | [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount, | |
4783 | [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount, | |
4784 | [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount, | |
4785 | [QIBPORTCNTR_SENDSTALL] = crp_sendstall, | |
4786 | [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG, | |
4787 | [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount, | |
4788 | [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount, | |
4789 | [QIBPORTCNTR_RCVEBP] = crp_rcvebp, | |
4790 | [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl, | |
4791 | [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG, | |
4792 | [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */ | |
4793 | [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr, | |
4794 | [QIBPORTCNTR_RXVLERR] = crp_rxvlerr, | |
4795 | [QIBPORTCNTR_ERRICRC] = crp_erricrc, | |
4796 | [QIBPORTCNTR_ERRVCRC] = crp_errvcrc, | |
4797 | [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc, | |
4798 | [QIBPORTCNTR_BADFORMAT] = crp_badformat, | |
4799 | [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen, | |
4800 | [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr, | |
4801 | [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen, | |
4802 | [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl, | |
4803 | [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl, | |
4804 | [QIBPORTCNTR_ERRLINK] = crp_errlink, | |
4805 | [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown, | |
4806 | [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov, | |
4807 | [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr, | |
4808 | [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt, | |
4809 | [QIBPORTCNTR_ERRPKEY] = crp_errpkey, | |
4810 | /* | |
4811 | * the next 3 aren't really counters, but were implemented | |
4812 | * as counters in older chips, so still get accessed as | |
4813 | * though they were counters from this code. | |
4814 | */ | |
4815 | [QIBPORTCNTR_PSINTERVAL] = krp_psinterval, | |
4816 | [QIBPORTCNTR_PSSTART] = krp_psstart, | |
4817 | [QIBPORTCNTR_PSSTAT] = krp_psstat, | |
4818 | /* pseudo-counter, summed for all ports */ | |
4819 | [QIBPORTCNTR_KHDROVFL] = 0xffff, | |
4820 | }; | |
4821 | ||
4822 | if (reg >= ARRAY_SIZE(xlator)) { | |
4823 | qib_devinfo(ppd->dd->pcidev, | |
4824 | "Unimplemented portcounter %u\n", reg); | |
4825 | goto done; | |
4826 | } | |
4827 | creg = xlator[reg] & _PORT_CNTR_IDXMASK; | |
4828 | ||
4829 | /* handle non-counters and special cases first */ | |
4830 | if (reg == QIBPORTCNTR_KHDROVFL) { | |
4831 | int i; | |
4832 | ||
4833 | /* sum over all kernel contexts (skip if mini_init) */ | |
4834 | for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) { | |
4835 | struct qib_ctxtdata *rcd = dd->rcd[i]; | |
4836 | ||
4837 | if (!rcd || rcd->ppd != ppd) | |
4838 | continue; | |
4839 | ret += read_7322_creg32(dd, cr_base_egrovfl + i); | |
4840 | } | |
4841 | goto done; | |
4842 | } else if (reg == QIBPORTCNTR_RXDROPPKT) { | |
4843 | /* | |
4844 | * Used as part of the synthesis of port_rcv_errors | |
4845 | * in the verbs code for IBTA counters. Not needed for 7322, | |
4846 | * because all the errors are already counted by other cntrs. | |
4847 | */ | |
4848 | goto done; | |
4849 | } else if (reg == QIBPORTCNTR_PSINTERVAL || | |
4850 | reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) { | |
4851 | /* were counters in older chips, now per-port kernel regs */ | |
4852 | ret = qib_read_kreg_port(ppd, creg); | |
4853 | goto done; | |
4854 | } | |
4855 | ||
4856 | /* | |
4857 | * Only fast increment counters are 64 bits; use 32 bit reads to | |
4858 | * avoid two independent reads when on Opteron. | |
4859 | */ | |
4860 | if (xlator[reg] & _PORT_64BIT_FLAG) | |
4861 | ret = read_7322_creg_port(ppd, creg); | |
4862 | else | |
4863 | ret = read_7322_creg32_port(ppd, creg); | |
4864 | if (creg == crp_ibsymbolerr) { | |
4865 | if (ppd->cpspec->ibdeltainprog) | |
4866 | ret -= ret - ppd->cpspec->ibsymsnap; | |
4867 | ret -= ppd->cpspec->ibsymdelta; | |
4868 | } else if (creg == crp_iblinkerrrecov) { | |
4869 | if (ppd->cpspec->ibdeltainprog) | |
4870 | ret -= ret - ppd->cpspec->iblnkerrsnap; | |
4871 | ret -= ppd->cpspec->iblnkerrdelta; | |
4872 | } else if (creg == crp_errlink) | |
4873 | ret -= ppd->cpspec->ibmalfdelta; | |
4874 | else if (creg == crp_iblinkdown) | |
4875 | ret += ppd->cpspec->iblnkdowndelta; | |
4876 | done: | |
4877 | return ret; | |
4878 | } | |
4879 | ||
4880 | /* | |
4881 | * Device counter names (not port-specific), one line per stat, | |
4882 | * single string. Used by utilities like ipathstats to print the stats | |
4883 | * in a way which works for different versions of drivers, without changing | |
4884 | * the utility. Names need to be 12 chars or less (w/o newline), for proper | |
4885 | * display by utility. | |
4886 | * Non-error counters are first. | |
4887 | * Start of "error" conters is indicated by a leading "E " on the first | |
4888 | * "error" counter, and doesn't count in label length. | |
4889 | * The EgrOvfl list needs to be last so we truncate them at the configured | |
4890 | * context count for the device. | |
4891 | * cntr7322indices contains the corresponding register indices. | |
4892 | */ | |
4893 | static const char cntr7322names[] = | |
4894 | "Interrupts\n" | |
4895 | "HostBusStall\n" | |
4896 | "E RxTIDFull\n" | |
4897 | "RxTIDInvalid\n" | |
4898 | "RxTIDFloDrop\n" /* 7322 only */ | |
4899 | "Ctxt0EgrOvfl\n" | |
4900 | "Ctxt1EgrOvfl\n" | |
4901 | "Ctxt2EgrOvfl\n" | |
4902 | "Ctxt3EgrOvfl\n" | |
4903 | "Ctxt4EgrOvfl\n" | |
4904 | "Ctxt5EgrOvfl\n" | |
4905 | "Ctxt6EgrOvfl\n" | |
4906 | "Ctxt7EgrOvfl\n" | |
4907 | "Ctxt8EgrOvfl\n" | |
4908 | "Ctxt9EgrOvfl\n" | |
4909 | "Ctx10EgrOvfl\n" | |
4910 | "Ctx11EgrOvfl\n" | |
4911 | "Ctx12EgrOvfl\n" | |
4912 | "Ctx13EgrOvfl\n" | |
4913 | "Ctx14EgrOvfl\n" | |
4914 | "Ctx15EgrOvfl\n" | |
4915 | "Ctx16EgrOvfl\n" | |
4916 | "Ctx17EgrOvfl\n" | |
4917 | ; | |
4918 | ||
4919 | static const u32 cntr7322indices[] = { | |
4920 | cr_lbint | _PORT_64BIT_FLAG, | |
4921 | cr_lbstall | _PORT_64BIT_FLAG, | |
4922 | cr_tidfull, | |
4923 | cr_tidinvalid, | |
4924 | cr_rxtidflowdrop, | |
4925 | cr_base_egrovfl + 0, | |
4926 | cr_base_egrovfl + 1, | |
4927 | cr_base_egrovfl + 2, | |
4928 | cr_base_egrovfl + 3, | |
4929 | cr_base_egrovfl + 4, | |
4930 | cr_base_egrovfl + 5, | |
4931 | cr_base_egrovfl + 6, | |
4932 | cr_base_egrovfl + 7, | |
4933 | cr_base_egrovfl + 8, | |
4934 | cr_base_egrovfl + 9, | |
4935 | cr_base_egrovfl + 10, | |
4936 | cr_base_egrovfl + 11, | |
4937 | cr_base_egrovfl + 12, | |
4938 | cr_base_egrovfl + 13, | |
4939 | cr_base_egrovfl + 14, | |
4940 | cr_base_egrovfl + 15, | |
4941 | cr_base_egrovfl + 16, | |
4942 | cr_base_egrovfl + 17, | |
4943 | }; | |
4944 | ||
4945 | /* | |
4946 | * same as cntr7322names and cntr7322indices, but for port-specific counters. | |
4947 | * portcntr7322indices is somewhat complicated by some registers needing | |
4948 | * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG | |
4949 | */ | |
4950 | static const char portcntr7322names[] = | |
4951 | "TxPkt\n" | |
4952 | "TxFlowPkt\n" | |
4953 | "TxWords\n" | |
4954 | "RxPkt\n" | |
4955 | "RxFlowPkt\n" | |
4956 | "RxWords\n" | |
4957 | "TxFlowStall\n" | |
4958 | "TxDmaDesc\n" /* 7220 and 7322-only */ | |
4959 | "E RxDlidFltr\n" /* 7220 and 7322-only */ | |
4960 | "IBStatusChng\n" | |
4961 | "IBLinkDown\n" | |
4962 | "IBLnkRecov\n" | |
4963 | "IBRxLinkErr\n" | |
4964 | "IBSymbolErr\n" | |
4965 | "RxLLIErr\n" | |
4966 | "RxBadFormat\n" | |
4967 | "RxBadLen\n" | |
4968 | "RxBufOvrfl\n" | |
4969 | "RxEBP\n" | |
4970 | "RxFlowCtlErr\n" | |
4971 | "RxICRCerr\n" | |
4972 | "RxLPCRCerr\n" | |
4973 | "RxVCRCerr\n" | |
4974 | "RxInvalLen\n" | |
4975 | "RxInvalPKey\n" | |
4976 | "RxPktDropped\n" | |
4977 | "TxBadLength\n" | |
4978 | "TxDropped\n" | |
4979 | "TxInvalLen\n" | |
4980 | "TxUnderrun\n" | |
4981 | "TxUnsupVL\n" | |
4982 | "RxLclPhyErr\n" /* 7220 and 7322-only from here down */ | |
4983 | "RxVL15Drop\n" | |
4984 | "RxVlErr\n" | |
4985 | "XcessBufOvfl\n" | |
4986 | "RxQPBadCtxt\n" /* 7322-only from here down */ | |
4987 | "TXBadHeader\n" | |
4988 | ; | |
4989 | ||
4990 | static const u32 portcntr7322indices[] = { | |
4991 | QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG, | |
4992 | crp_pktsendflow, | |
4993 | QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG, | |
4994 | QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG, | |
4995 | crp_pktrcvflowctrl, | |
4996 | QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG, | |
4997 | QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG, | |
4998 | crp_txsdmadesc | _PORT_64BIT_FLAG, | |
4999 | crp_rxdlidfltr, | |
5000 | crp_ibstatuschange, | |
5001 | QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG, | |
5002 | QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG, | |
5003 | QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG, | |
5004 | QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG, | |
5005 | QIBPORTCNTR_LLI | _PORT_VIRT_FLAG, | |
5006 | QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG, | |
5007 | QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG, | |
5008 | QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG, | |
5009 | QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG, | |
5010 | crp_rcvflowctrlviol, | |
5011 | QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG, | |
5012 | QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG, | |
5013 | QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG, | |
5014 | QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG, | |
5015 | QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG, | |
5016 | QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG, | |
5017 | crp_txminmaxlenerr, | |
5018 | crp_txdroppedpkt, | |
5019 | crp_txlenerr, | |
5020 | crp_txunderrun, | |
5021 | crp_txunsupvl, | |
5022 | QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG, | |
5023 | QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG, | |
5024 | QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG, | |
5025 | QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG, | |
5026 | crp_rxqpinvalidctxt, | |
5027 | crp_txhdrerr, | |
5028 | }; | |
5029 | ||
5030 | /* do all the setup to make the counter reads efficient later */ | |
5031 | static void init_7322_cntrnames(struct qib_devdata *dd) | |
5032 | { | |
5033 | int i, j = 0; | |
5034 | char *s; | |
5035 | ||
5036 | for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts; | |
5037 | i++) { | |
5038 | /* we always have at least one counter before the egrovfl */ | |
5039 | if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12)) | |
5040 | j = 1; | |
5041 | s = strchr(s + 1, '\n'); | |
5042 | if (s && j) | |
5043 | j++; | |
5044 | } | |
5045 | dd->cspec->ncntrs = i; | |
5046 | if (!s) | |
5047 | /* full list; size is without terminating null */ | |
5048 | dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1; | |
5049 | else | |
5050 | dd->cspec->cntrnamelen = 1 + s - cntr7322names; | |
5051 | dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs | |
5052 | * sizeof(u64), GFP_KERNEL); | |
5053 | if (!dd->cspec->cntrs) | |
5054 | qib_dev_err(dd, "Failed allocation for counters\n"); | |
5055 | ||
5056 | for (i = 0, s = (char *)portcntr7322names; s; i++) | |
5057 | s = strchr(s + 1, '\n'); | |
5058 | dd->cspec->nportcntrs = i - 1; | |
5059 | dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1; | |
5060 | for (i = 0; i < dd->num_pports; ++i) { | |
5061 | dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs | |
5062 | * sizeof(u64), GFP_KERNEL); | |
5063 | if (!dd->pport[i].cpspec->portcntrs) | |
7fac3301 MM |
5064 | qib_dev_err(dd, |
5065 | "Failed allocation for portcounters\n"); | |
f931551b RC |
5066 | } |
5067 | } | |
5068 | ||
5069 | static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep, | |
5070 | u64 **cntrp) | |
5071 | { | |
5072 | u32 ret; | |
5073 | ||
5074 | if (namep) { | |
5075 | ret = dd->cspec->cntrnamelen; | |
5076 | if (pos >= ret) | |
5077 | ret = 0; /* final read after getting everything */ | |
5078 | else | |
5079 | *namep = (char *) cntr7322names; | |
5080 | } else { | |
5081 | u64 *cntr = dd->cspec->cntrs; | |
5082 | int i; | |
5083 | ||
5084 | ret = dd->cspec->ncntrs * sizeof(u64); | |
5085 | if (!cntr || pos >= ret) { | |
5086 | /* everything read, or couldn't get memory */ | |
5087 | ret = 0; | |
5088 | goto done; | |
5089 | } | |
5090 | *cntrp = cntr; | |
5091 | for (i = 0; i < dd->cspec->ncntrs; i++) | |
5092 | if (cntr7322indices[i] & _PORT_64BIT_FLAG) | |
5093 | *cntr++ = read_7322_creg(dd, | |
5094 | cntr7322indices[i] & | |
5095 | _PORT_CNTR_IDXMASK); | |
5096 | else | |
5097 | *cntr++ = read_7322_creg32(dd, | |
5098 | cntr7322indices[i]); | |
5099 | } | |
5100 | done: | |
5101 | return ret; | |
5102 | } | |
5103 | ||
5104 | static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port, | |
5105 | char **namep, u64 **cntrp) | |
5106 | { | |
5107 | u32 ret; | |
5108 | ||
5109 | if (namep) { | |
5110 | ret = dd->cspec->portcntrnamelen; | |
5111 | if (pos >= ret) | |
5112 | ret = 0; /* final read after getting everything */ | |
5113 | else | |
5114 | *namep = (char *)portcntr7322names; | |
5115 | } else { | |
5116 | struct qib_pportdata *ppd = &dd->pport[port]; | |
5117 | u64 *cntr = ppd->cpspec->portcntrs; | |
5118 | int i; | |
5119 | ||
5120 | ret = dd->cspec->nportcntrs * sizeof(u64); | |
5121 | if (!cntr || pos >= ret) { | |
5122 | /* everything read, or couldn't get memory */ | |
5123 | ret = 0; | |
5124 | goto done; | |
5125 | } | |
5126 | *cntrp = cntr; | |
5127 | for (i = 0; i < dd->cspec->nportcntrs; i++) { | |
5128 | if (portcntr7322indices[i] & _PORT_VIRT_FLAG) | |
5129 | *cntr++ = qib_portcntr_7322(ppd, | |
5130 | portcntr7322indices[i] & | |
5131 | _PORT_CNTR_IDXMASK); | |
5132 | else if (portcntr7322indices[i] & _PORT_64BIT_FLAG) | |
5133 | *cntr++ = read_7322_creg_port(ppd, | |
5134 | portcntr7322indices[i] & | |
5135 | _PORT_CNTR_IDXMASK); | |
5136 | else | |
5137 | *cntr++ = read_7322_creg32_port(ppd, | |
5138 | portcntr7322indices[i]); | |
5139 | } | |
5140 | } | |
5141 | done: | |
5142 | return ret; | |
5143 | } | |
5144 | ||
5145 | /** | |
5146 | * qib_get_7322_faststats - get word counters from chip before they overflow | |
5147 | * @opaque - contains a pointer to the qlogic_ib device qib_devdata | |
5148 | * | |
5149 | * VESTIGIAL IBA7322 has no "small fast counters", so the only | |
5150 | * real purpose of this function is to maintain the notion of | |
5151 | * "active time", which in turn is only logged into the eeprom, | |
5152 | * which we don;t have, yet, for 7322-based boards. | |
5153 | * | |
5154 | * called from add_timer | |
5155 | */ | |
5156 | static void qib_get_7322_faststats(unsigned long opaque) | |
5157 | { | |
5158 | struct qib_devdata *dd = (struct qib_devdata *) opaque; | |
5159 | struct qib_pportdata *ppd; | |
5160 | unsigned long flags; | |
5161 | u64 traffic_wds; | |
5162 | int pidx; | |
5163 | ||
5164 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
5165 | ppd = dd->pport + pidx; | |
5166 | ||
5167 | /* | |
5168 | * If port isn't enabled or not operational ports, or | |
5169 | * diags is running (can cause memory diags to fail) | |
5170 | * skip this port this time. | |
5171 | */ | |
5172 | if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED) | |
5173 | || dd->diag_client) | |
5174 | continue; | |
5175 | ||
5176 | /* | |
5177 | * Maintain an activity timer, based on traffic | |
5178 | * exceeding a threshold, so we need to check the word-counts | |
5179 | * even if they are 64-bit. | |
5180 | */ | |
5181 | traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) + | |
5182 | qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND); | |
5183 | spin_lock_irqsave(&ppd->dd->eep_st_lock, flags); | |
5184 | traffic_wds -= ppd->dd->traffic_wds; | |
5185 | ppd->dd->traffic_wds += traffic_wds; | |
f931551b RC |
5186 | spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags); |
5187 | if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active & | |
5188 | QIB_IB_QDR) && | |
5189 | (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | | |
5190 | QIBL_LINKACTIVE)) && | |
5191 | ppd->cpspec->qdr_dfe_time && | |
8482d5d1 | 5192 | time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) { |
f931551b RC |
5193 | ppd->cpspec->qdr_dfe_on = 0; |
5194 | ||
5195 | qib_write_kreg_port(ppd, krp_static_adapt_dis(2), | |
5196 | ppd->dd->cspec->r1 ? | |
5197 | QDR_STATIC_ADAPT_INIT_R1 : | |
5198 | QDR_STATIC_ADAPT_INIT); | |
5199 | force_h1(ppd); | |
5200 | } | |
5201 | } | |
5202 | mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); | |
5203 | } | |
5204 | ||
5205 | /* | |
5206 | * If we were using MSIx, try to fallback to INTx. | |
5207 | */ | |
5208 | static int qib_7322_intr_fallback(struct qib_devdata *dd) | |
5209 | { | |
5210 | if (!dd->cspec->num_msix_entries) | |
5211 | return 0; /* already using INTx */ | |
5212 | ||
7fac3301 MM |
5213 | qib_devinfo(dd->pcidev, |
5214 | "MSIx interrupt not detected, trying INTx interrupts\n"); | |
f931551b RC |
5215 | qib_7322_nomsix(dd); |
5216 | qib_enable_intx(dd->pcidev); | |
5217 | qib_setup_7322_interrupt(dd, 0); | |
5218 | return 1; | |
5219 | } | |
5220 | ||
5221 | /* | |
5222 | * Reset the XGXS (between serdes and IBC). Slightly less intrusive | |
5223 | * than resetting the IBC or external link state, and useful in some | |
5224 | * cases to cause some retraining. To do this right, we reset IBC | |
5225 | * as well, then return to previous state (which may be still in reset) | |
5226 | * NOTE: some callers of this "know" this writes the current value | |
5227 | * of cpspec->ibcctrl_a as part of it's operation, so if that changes, | |
5228 | * check all callers. | |
5229 | */ | |
5230 | static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd) | |
5231 | { | |
5232 | u64 val; | |
5233 | struct qib_devdata *dd = ppd->dd; | |
5234 | const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) | | |
5235 | SYM_MASK(IBPCSConfig_0, xcv_treset) | | |
5236 | SYM_MASK(IBPCSConfig_0, tx_rx_reset); | |
5237 | ||
5238 | val = qib_read_kreg_port(ppd, krp_ib_pcsconfig); | |
b9e03e04 RC |
5239 | qib_write_kreg(dd, kr_hwerrmask, |
5240 | dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop)); | |
f931551b RC |
5241 | qib_write_kreg_port(ppd, krp_ibcctrl_a, |
5242 | ppd->cpspec->ibcctrl_a & | |
5243 | ~SYM_MASK(IBCCtrlA_0, IBLinkEn)); | |
5244 | ||
5245 | qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits); | |
5246 | qib_read_kreg32(dd, kr_scratch); | |
5247 | qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits); | |
5248 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); | |
5249 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
b9e03e04 RC |
5250 | qib_write_kreg(dd, kr_hwerrclear, |
5251 | SYM_MASK(HwErrClear, statusValidNoEopClear)); | |
5252 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | |
f931551b RC |
5253 | } |
5254 | ||
5255 | /* | |
5256 | * This code for non-IBTA-compliant IB speed negotiation is only known to | |
5257 | * work for the SDR to DDR transition, and only between an HCA and a switch | |
5258 | * with recent firmware. It is based on observed heuristics, rather than | |
5259 | * actual knowledge of the non-compliant speed negotiation. | |
5260 | * It has a number of hard-coded fields, since the hope is to rewrite this | |
5261 | * when a spec is available on how the negoation is intended to work. | |
5262 | */ | |
5263 | static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr, | |
5264 | u32 dcnt, u32 *data) | |
5265 | { | |
5266 | int i; | |
5267 | u64 pbc; | |
5268 | u32 __iomem *piobuf; | |
5269 | u32 pnum, control, len; | |
5270 | struct qib_devdata *dd = ppd->dd; | |
5271 | ||
5272 | i = 0; | |
5273 | len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */ | |
5274 | control = qib_7322_setpbc_control(ppd, len, 0, 15); | |
5275 | pbc = ((u64) control << 32) | len; | |
5276 | while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) { | |
5277 | if (i++ > 15) | |
5278 | return; | |
5279 | udelay(2); | |
5280 | } | |
5281 | /* disable header check on this packet, since it can't be valid */ | |
5282 | dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL); | |
5283 | writeq(pbc, piobuf); | |
5284 | qib_flush_wc(); | |
5285 | qib_pio_copy(piobuf + 2, hdr, 7); | |
5286 | qib_pio_copy(piobuf + 9, data, dcnt); | |
5287 | if (dd->flags & QIB_USE_SPCL_TRIG) { | |
5288 | u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023; | |
5289 | ||
5290 | qib_flush_wc(); | |
5291 | __raw_writel(0xaebecede, piobuf + spcl_off); | |
5292 | } | |
5293 | qib_flush_wc(); | |
5294 | qib_sendbuf_done(dd, pnum); | |
5295 | /* and re-enable hdr check */ | |
5296 | dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL); | |
5297 | } | |
5298 | ||
5299 | /* | |
5300 | * _start packet gets sent twice at start, _done gets sent twice at end | |
5301 | */ | |
5302 | static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which) | |
5303 | { | |
5304 | struct qib_devdata *dd = ppd->dd; | |
5305 | static u32 swapped; | |
5306 | u32 dw, i, hcnt, dcnt, *data; | |
5307 | static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba }; | |
5308 | static u32 madpayload_start[0x40] = { | |
5309 | 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, | |
5310 | 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, | |
5311 | 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */ | |
5312 | }; | |
5313 | static u32 madpayload_done[0x40] = { | |
5314 | 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, | |
5315 | 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, | |
5316 | 0x40000001, 0x1388, 0x15e, /* rest 0's */ | |
5317 | }; | |
5318 | ||
5319 | dcnt = ARRAY_SIZE(madpayload_start); | |
5320 | hcnt = ARRAY_SIZE(hdr); | |
5321 | if (!swapped) { | |
5322 | /* for maintainability, do it at runtime */ | |
5323 | for (i = 0; i < hcnt; i++) { | |
5324 | dw = (__force u32) cpu_to_be32(hdr[i]); | |
5325 | hdr[i] = dw; | |
5326 | } | |
5327 | for (i = 0; i < dcnt; i++) { | |
5328 | dw = (__force u32) cpu_to_be32(madpayload_start[i]); | |
5329 | madpayload_start[i] = dw; | |
5330 | dw = (__force u32) cpu_to_be32(madpayload_done[i]); | |
5331 | madpayload_done[i] = dw; | |
5332 | } | |
5333 | swapped = 1; | |
5334 | } | |
5335 | ||
5336 | data = which ? madpayload_done : madpayload_start; | |
5337 | ||
5338 | autoneg_7322_sendpkt(ppd, hdr, dcnt, data); | |
5339 | qib_read_kreg64(dd, kr_scratch); | |
5340 | udelay(2); | |
5341 | autoneg_7322_sendpkt(ppd, hdr, dcnt, data); | |
5342 | qib_read_kreg64(dd, kr_scratch); | |
5343 | udelay(2); | |
5344 | } | |
5345 | ||
5346 | /* | |
5347 | * Do the absolute minimum to cause an IB speed change, and make it | |
5348 | * ready, but don't actually trigger the change. The caller will | |
5349 | * do that when ready (if link is in Polling training state, it will | |
5350 | * happen immediately, otherwise when link next goes down) | |
5351 | * | |
5352 | * This routine should only be used as part of the DDR autonegotation | |
5353 | * code for devices that are not compliant with IB 1.2 (or code that | |
5354 | * fixes things up for same). | |
5355 | * | |
5356 | * When link has gone down, and autoneg enabled, or autoneg has | |
5357 | * failed and we give up until next time we set both speeds, and | |
5358 | * then we want IBTA enabled as well as "use max enabled speed. | |
5359 | */ | |
5360 | static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed) | |
5361 | { | |
5362 | u64 newctrlb; | |
5363 | newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK | | |
5364 | IBA7322_IBC_IBTA_1_2_MASK | | |
5365 | IBA7322_IBC_MAX_SPEED_MASK); | |
5366 | ||
5367 | if (speed & (speed - 1)) /* multiple speeds */ | |
5368 | newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) | | |
5369 | IBA7322_IBC_IBTA_1_2_MASK | | |
5370 | IBA7322_IBC_MAX_SPEED_MASK; | |
5371 | else | |
5372 | newctrlb |= speed == QIB_IB_QDR ? | |
5373 | IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK : | |
5374 | ((speed == QIB_IB_DDR ? | |
5375 | IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR)); | |
5376 | ||
5377 | if (newctrlb == ppd->cpspec->ibcctrl_b) | |
5378 | return; | |
5379 | ||
5380 | ppd->cpspec->ibcctrl_b = newctrlb; | |
5381 | qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); | |
5382 | qib_write_kreg(ppd->dd, kr_scratch, 0); | |
5383 | } | |
5384 | ||
5385 | /* | |
5386 | * This routine is only used when we are not talking to another | |
5387 | * IB 1.2-compliant device that we think can do DDR. | |
5388 | * (This includes all existing switch chips as of Oct 2007.) | |
5389 | * 1.2-compliant devices go directly to DDR prior to reaching INIT | |
5390 | */ | |
5391 | static void try_7322_autoneg(struct qib_pportdata *ppd) | |
5392 | { | |
5393 | unsigned long flags; | |
5394 | ||
5395 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
5396 | ppd->lflags |= QIBL_IB_AUTONEG_INPROG; | |
5397 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
5398 | qib_autoneg_7322_send(ppd, 0); | |
5399 | set_7322_ibspeed_fast(ppd, QIB_IB_DDR); | |
5400 | qib_7322_mini_pcs_reset(ppd); | |
5401 | /* 2 msec is minimum length of a poll cycle */ | |
f0626710 TH |
5402 | queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, |
5403 | msecs_to_jiffies(2)); | |
f931551b RC |
5404 | } |
5405 | ||
5406 | /* | |
5407 | * Handle the empirically determined mechanism for auto-negotiation | |
5408 | * of DDR speed with switches. | |
5409 | */ | |
5410 | static void autoneg_7322_work(struct work_struct *work) | |
5411 | { | |
5412 | struct qib_pportdata *ppd; | |
5413 | struct qib_devdata *dd; | |
5414 | u64 startms; | |
5415 | u32 i; | |
5416 | unsigned long flags; | |
5417 | ||
5418 | ppd = container_of(work, struct qib_chippport_specific, | |
5419 | autoneg_work.work)->ppd; | |
5420 | dd = ppd->dd; | |
5421 | ||
5422 | startms = jiffies_to_msecs(jiffies); | |
5423 | ||
5424 | /* | |
5425 | * Busy wait for this first part, it should be at most a | |
5426 | * few hundred usec, since we scheduled ourselves for 2msec. | |
5427 | */ | |
5428 | for (i = 0; i < 25; i++) { | |
5429 | if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState) | |
5430 | == IB_7322_LT_STATE_POLLQUIET) { | |
5431 | qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE); | |
5432 | break; | |
5433 | } | |
5434 | udelay(100); | |
5435 | } | |
5436 | ||
5437 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) | |
5438 | goto done; /* we got there early or told to stop */ | |
5439 | ||
5440 | /* we expect this to timeout */ | |
5441 | if (wait_event_timeout(ppd->cpspec->autoneg_wait, | |
5442 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), | |
5443 | msecs_to_jiffies(90))) | |
5444 | goto done; | |
5445 | qib_7322_mini_pcs_reset(ppd); | |
5446 | ||
5447 | /* we expect this to timeout */ | |
5448 | if (wait_event_timeout(ppd->cpspec->autoneg_wait, | |
5449 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), | |
5450 | msecs_to_jiffies(1700))) | |
5451 | goto done; | |
5452 | qib_7322_mini_pcs_reset(ppd); | |
5453 | ||
5454 | set_7322_ibspeed_fast(ppd, QIB_IB_SDR); | |
5455 | ||
5456 | /* | |
5457 | * Wait up to 250 msec for link to train and get to INIT at DDR; | |
5458 | * this should terminate early. | |
5459 | */ | |
5460 | wait_event_timeout(ppd->cpspec->autoneg_wait, | |
5461 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), | |
5462 | msecs_to_jiffies(250)); | |
5463 | done: | |
5464 | if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) { | |
5465 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
5466 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; | |
5467 | if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) { | |
5468 | ppd->lflags |= QIBL_IB_AUTONEG_FAILED; | |
5469 | ppd->cpspec->autoneg_tries = 0; | |
5470 | } | |
5471 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
5472 | set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); | |
5473 | } | |
5474 | } | |
5475 | ||
5476 | /* | |
5477 | * This routine is used to request IPG set in the QLogic switch. | |
5478 | * Only called if r1. | |
5479 | */ | |
5480 | static void try_7322_ipg(struct qib_pportdata *ppd) | |
5481 | { | |
5482 | struct qib_ibport *ibp = &ppd->ibport_data; | |
5483 | struct ib_mad_send_buf *send_buf; | |
5484 | struct ib_mad_agent *agent; | |
5485 | struct ib_smp *smp; | |
5486 | unsigned delay; | |
5487 | int ret; | |
5488 | ||
5489 | agent = ibp->send_agent; | |
5490 | if (!agent) | |
5491 | goto retry; | |
5492 | ||
5493 | send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, | |
5494 | IB_MGMT_MAD_DATA, GFP_ATOMIC); | |
5495 | if (IS_ERR(send_buf)) | |
5496 | goto retry; | |
5497 | ||
5498 | if (!ibp->smi_ah) { | |
f931551b RC |
5499 | struct ib_ah *ah; |
5500 | ||
1fb9fed6 | 5501 | ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE)); |
f931551b | 5502 | if (IS_ERR(ah)) |
1fb9fed6 | 5503 | ret = PTR_ERR(ah); |
f931551b RC |
5504 | else { |
5505 | send_buf->ah = ah; | |
5506 | ibp->smi_ah = to_iah(ah); | |
5507 | ret = 0; | |
5508 | } | |
5509 | } else { | |
5510 | send_buf->ah = &ibp->smi_ah->ibah; | |
5511 | ret = 0; | |
5512 | } | |
5513 | ||
5514 | smp = send_buf->mad; | |
5515 | smp->base_version = IB_MGMT_BASE_VERSION; | |
5516 | smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE; | |
5517 | smp->class_version = 1; | |
5518 | smp->method = IB_MGMT_METHOD_SEND; | |
5519 | smp->hop_cnt = 1; | |
5520 | smp->attr_id = QIB_VENDOR_IPG; | |
5521 | smp->attr_mod = 0; | |
5522 | ||
5523 | if (!ret) | |
5524 | ret = ib_post_send_mad(send_buf, NULL); | |
5525 | if (ret) | |
5526 | ib_free_send_mad(send_buf); | |
5527 | retry: | |
5528 | delay = 2 << ppd->cpspec->ipg_tries; | |
f0626710 TH |
5529 | queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work, |
5530 | msecs_to_jiffies(delay)); | |
f931551b RC |
5531 | } |
5532 | ||
5533 | /* | |
5534 | * Timeout handler for setting IPG. | |
5535 | * Only called if r1. | |
5536 | */ | |
5537 | static void ipg_7322_work(struct work_struct *work) | |
5538 | { | |
5539 | struct qib_pportdata *ppd; | |
5540 | ||
5541 | ppd = container_of(work, struct qib_chippport_specific, | |
5542 | ipg_work.work)->ppd; | |
5543 | if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE)) | |
5544 | && ++ppd->cpspec->ipg_tries <= 10) | |
5545 | try_7322_ipg(ppd); | |
5546 | } | |
5547 | ||
5548 | static u32 qib_7322_iblink_state(u64 ibcs) | |
5549 | { | |
5550 | u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState); | |
5551 | ||
5552 | switch (state) { | |
5553 | case IB_7322_L_STATE_INIT: | |
5554 | state = IB_PORT_INIT; | |
5555 | break; | |
5556 | case IB_7322_L_STATE_ARM: | |
5557 | state = IB_PORT_ARMED; | |
5558 | break; | |
5559 | case IB_7322_L_STATE_ACTIVE: | |
5560 | /* fall through */ | |
5561 | case IB_7322_L_STATE_ACT_DEFER: | |
5562 | state = IB_PORT_ACTIVE; | |
5563 | break; | |
5564 | default: /* fall through */ | |
5565 | case IB_7322_L_STATE_DOWN: | |
5566 | state = IB_PORT_DOWN; | |
5567 | break; | |
5568 | } | |
5569 | return state; | |
5570 | } | |
5571 | ||
5572 | /* returns the IBTA port state, rather than the IBC link training state */ | |
5573 | static u8 qib_7322_phys_portstate(u64 ibcs) | |
5574 | { | |
5575 | u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState); | |
5576 | return qib_7322_physportstate[state]; | |
5577 | } | |
5578 | ||
5579 | static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) | |
5580 | { | |
5581 | int ret = 0, symadj = 0; | |
5582 | unsigned long flags; | |
5583 | int mult; | |
5584 | ||
5585 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
5586 | ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY; | |
5587 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
5588 | ||
5589 | /* Update our picture of width and speed from chip */ | |
5590 | if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) { | |
5591 | ppd->link_speed_active = QIB_IB_QDR; | |
5592 | mult = 4; | |
5593 | } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) { | |
5594 | ppd->link_speed_active = QIB_IB_DDR; | |
5595 | mult = 2; | |
5596 | } else { | |
5597 | ppd->link_speed_active = QIB_IB_SDR; | |
5598 | mult = 1; | |
5599 | } | |
5600 | if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) { | |
5601 | ppd->link_width_active = IB_WIDTH_4X; | |
5602 | mult *= 4; | |
5603 | } else | |
5604 | ppd->link_width_active = IB_WIDTH_1X; | |
5605 | ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)]; | |
5606 | ||
5607 | if (!ibup) { | |
5608 | u64 clr; | |
5609 | ||
5610 | /* Link went down. */ | |
5611 | /* do IPG MAD again after linkdown, even if last time failed */ | |
5612 | ppd->cpspec->ipg_tries = 0; | |
5613 | clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) & | |
5614 | (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) | | |
5615 | SYM_MASK(IBCStatusB_0, heartbeat_crosstalk)); | |
5616 | if (clr) | |
5617 | qib_write_kreg_port(ppd, krp_ibcstatus_b, clr); | |
5618 | if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | | |
5619 | QIBL_IB_AUTONEG_INPROG))) | |
5620 | set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); | |
5621 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { | |
dde05cbd MH |
5622 | struct qib_qsfp_data *qd = |
5623 | &ppd->cpspec->qsfp_data; | |
a77fcf89 RC |
5624 | /* unlock the Tx settings, speed may change */ |
5625 | qib_write_kreg_port(ppd, krp_tx_deemph_override, | |
5626 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
5627 | reset_tx_deemphasis_override)); | |
f931551b | 5628 | qib_cancel_sends(ppd); |
a77fcf89 RC |
5629 | /* on link down, ensure sane pcs state */ |
5630 | qib_7322_mini_pcs_reset(ppd); | |
dde05cbd MH |
5631 | /* schedule the qsfp refresh which should turn the link |
5632 | off */ | |
5633 | if (ppd->dd->flags & QIB_HAS_QSFP) { | |
8482d5d1 | 5634 | qd->t_insert = jiffies; |
042f36e1 | 5635 | queue_work(ib_wq, &qd->work); |
dde05cbd | 5636 | } |
f931551b RC |
5637 | spin_lock_irqsave(&ppd->sdma_lock, flags); |
5638 | if (__qib_sdma_running(ppd)) | |
5639 | __qib_sdma_process_event(ppd, | |
5640 | qib_sdma_event_e70_go_idle); | |
5641 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | |
5642 | } | |
5643 | clr = read_7322_creg32_port(ppd, crp_iblinkdown); | |
5644 | if (clr == ppd->cpspec->iblnkdownsnap) | |
5645 | ppd->cpspec->iblnkdowndelta++; | |
5646 | } else { | |
5647 | if (qib_compat_ddr_negotiate && | |
5648 | !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | | |
5649 | QIBL_IB_AUTONEG_INPROG)) && | |
5650 | ppd->link_speed_active == QIB_IB_SDR && | |
5651 | (ppd->link_speed_enabled & QIB_IB_DDR) | |
5652 | && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) { | |
5653 | /* we are SDR, and auto-negotiation enabled */ | |
5654 | ++ppd->cpspec->autoneg_tries; | |
5655 | if (!ppd->cpspec->ibdeltainprog) { | |
5656 | ppd->cpspec->ibdeltainprog = 1; | |
5657 | ppd->cpspec->ibsymdelta += | |
5658 | read_7322_creg32_port(ppd, | |
5659 | crp_ibsymbolerr) - | |
5660 | ppd->cpspec->ibsymsnap; | |
5661 | ppd->cpspec->iblnkerrdelta += | |
5662 | read_7322_creg32_port(ppd, | |
5663 | crp_iblinkerrrecov) - | |
5664 | ppd->cpspec->iblnkerrsnap; | |
5665 | } | |
5666 | try_7322_autoneg(ppd); | |
5667 | ret = 1; /* no other IB status change processing */ | |
5668 | } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && | |
5669 | ppd->link_speed_active == QIB_IB_SDR) { | |
5670 | qib_autoneg_7322_send(ppd, 1); | |
5671 | set_7322_ibspeed_fast(ppd, QIB_IB_DDR); | |
5672 | qib_7322_mini_pcs_reset(ppd); | |
5673 | udelay(2); | |
5674 | ret = 1; /* no other IB status change processing */ | |
5675 | } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && | |
5676 | (ppd->link_speed_active & QIB_IB_DDR)) { | |
5677 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
5678 | ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG | | |
5679 | QIBL_IB_AUTONEG_FAILED); | |
5680 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
5681 | ppd->cpspec->autoneg_tries = 0; | |
5682 | /* re-enable SDR, for next link down */ | |
5683 | set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); | |
5684 | wake_up(&ppd->cpspec->autoneg_wait); | |
5685 | symadj = 1; | |
5686 | } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) { | |
5687 | /* | |
5688 | * Clear autoneg failure flag, and do setup | |
5689 | * so we'll try next time link goes down and | |
5690 | * back to INIT (possibly connected to a | |
5691 | * different device). | |
5692 | */ | |
5693 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
5694 | ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; | |
5695 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
5696 | ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK; | |
5697 | symadj = 1; | |
5698 | } | |
5699 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { | |
5700 | symadj = 1; | |
5701 | if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10) | |
5702 | try_7322_ipg(ppd); | |
5703 | if (!ppd->cpspec->recovery_init) | |
5704 | setup_7322_link_recovery(ppd, 0); | |
5705 | ppd->cpspec->qdr_dfe_time = jiffies + | |
5706 | msecs_to_jiffies(QDR_DFE_DISABLE_DELAY); | |
5707 | } | |
5708 | ppd->cpspec->ibmalfusesnap = 0; | |
5709 | ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd, | |
5710 | crp_errlink); | |
5711 | } | |
5712 | if (symadj) { | |
5713 | ppd->cpspec->iblnkdownsnap = | |
5714 | read_7322_creg32_port(ppd, crp_iblinkdown); | |
5715 | if (ppd->cpspec->ibdeltainprog) { | |
5716 | ppd->cpspec->ibdeltainprog = 0; | |
5717 | ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd, | |
5718 | crp_ibsymbolerr) - ppd->cpspec->ibsymsnap; | |
5719 | ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd, | |
5720 | crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap; | |
5721 | } | |
5722 | } else if (!ibup && qib_compat_ddr_negotiate && | |
5723 | !ppd->cpspec->ibdeltainprog && | |
5724 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { | |
5725 | ppd->cpspec->ibdeltainprog = 1; | |
5726 | ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, | |
5727 | crp_ibsymbolerr); | |
5728 | ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd, | |
5729 | crp_iblinkerrrecov); | |
5730 | } | |
5731 | ||
5732 | if (!ret) | |
5733 | qib_setup_7322_setextled(ppd, ibup); | |
5734 | return ret; | |
5735 | } | |
5736 | ||
5737 | /* | |
5738 | * Does read/modify/write to appropriate registers to | |
5739 | * set output and direction bits selected by mask. | |
5740 | * these are in their canonical postions (e.g. lsb of | |
5741 | * dir will end up in D48 of extctrl on existing chips). | |
5742 | * returns contents of GP Inputs. | |
5743 | */ | |
5744 | static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask) | |
5745 | { | |
5746 | u64 read_val, new_out; | |
5747 | unsigned long flags; | |
5748 | ||
5749 | if (mask) { | |
5750 | /* some bits being written, lock access to GPIO */ | |
5751 | dir &= mask; | |
5752 | out &= mask; | |
5753 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | |
5754 | dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe)); | |
5755 | dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe)); | |
5756 | new_out = (dd->cspec->gpio_out & ~mask) | out; | |
5757 | ||
5758 | qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); | |
5759 | qib_write_kreg(dd, kr_gpio_out, new_out); | |
5760 | dd->cspec->gpio_out = new_out; | |
5761 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | |
5762 | } | |
5763 | /* | |
5764 | * It is unlikely that a read at this time would get valid | |
5765 | * data on a pin whose direction line was set in the same | |
5766 | * call to this function. We include the read here because | |
5767 | * that allows us to potentially combine a change on one pin with | |
5768 | * a read on another, and because the old code did something like | |
5769 | * this. | |
5770 | */ | |
5771 | read_val = qib_read_kreg64(dd, kr_extstatus); | |
5772 | return SYM_FIELD(read_val, EXTStatus, GPIOIn); | |
5773 | } | |
5774 | ||
5775 | /* Enable writes to config EEPROM, if possible. Returns previous state */ | |
5776 | static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen) | |
5777 | { | |
5778 | int prev_wen; | |
5779 | u32 mask; | |
5780 | ||
5781 | mask = 1 << QIB_EEPROM_WEN_NUM; | |
5782 | prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM; | |
5783 | gpio_7322_mod(dd, wen ? 0 : mask, mask, mask); | |
5784 | ||
5785 | return prev_wen & 1; | |
5786 | } | |
5787 | ||
5788 | /* | |
5789 | * Read fundamental info we need to use the chip. These are | |
5790 | * the registers that describe chip capabilities, and are | |
5791 | * saved in shadow registers. | |
5792 | */ | |
5793 | static void get_7322_chip_params(struct qib_devdata *dd) | |
5794 | { | |
5795 | u64 val; | |
5796 | u32 piobufs; | |
5797 | int mtu; | |
5798 | ||
5799 | dd->palign = qib_read_kreg32(dd, kr_pagealign); | |
5800 | ||
5801 | dd->uregbase = qib_read_kreg32(dd, kr_userregbase); | |
5802 | ||
5803 | dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt); | |
5804 | dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase); | |
5805 | dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase); | |
5806 | dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase); | |
5807 | dd->pio2k_bufbase = dd->piobufbase & 0xffffffff; | |
5808 | ||
5809 | val = qib_read_kreg64(dd, kr_sendpiobufcnt); | |
5810 | dd->piobcnt2k = val & ~0U; | |
5811 | dd->piobcnt4k = val >> 32; | |
5812 | val = qib_read_kreg64(dd, kr_sendpiosize); | |
5813 | dd->piosize2k = val & ~0U; | |
5814 | dd->piosize4k = val >> 32; | |
5815 | ||
5816 | mtu = ib_mtu_enum_to_int(qib_ibmtu); | |
5817 | if (mtu == -1) | |
5818 | mtu = QIB_DEFAULT_MTU; | |
5819 | dd->pport[0].ibmtu = (u32)mtu; | |
5820 | dd->pport[1].ibmtu = (u32)mtu; | |
5821 | ||
5822 | /* these may be adjusted in init_chip_wc_pat() */ | |
5823 | dd->pio2kbase = (u32 __iomem *) | |
5824 | ((char __iomem *) dd->kregbase + dd->pio2k_bufbase); | |
5825 | dd->pio4kbase = (u32 __iomem *) | |
5826 | ((char __iomem *) dd->kregbase + | |
5827 | (dd->piobufbase >> 32)); | |
5828 | /* | |
5829 | * 4K buffers take 2 pages; we use roundup just to be | |
5830 | * paranoid; we calculate it once here, rather than on | |
5831 | * ever buf allocate | |
5832 | */ | |
5833 | dd->align4k = ALIGN(dd->piosize4k, dd->palign); | |
5834 | ||
5835 | piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS; | |
5836 | ||
5837 | dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / | |
5838 | (sizeof(u64) * BITS_PER_BYTE / 2); | |
5839 | } | |
5840 | ||
5841 | /* | |
5842 | * The chip base addresses in cspec and cpspec have to be set | |
5843 | * after possible init_chip_wc_pat(), rather than in | |
5844 | * get_7322_chip_params(), so split out as separate function | |
5845 | */ | |
5846 | static void qib_7322_set_baseaddrs(struct qib_devdata *dd) | |
5847 | { | |
5848 | u32 cregbase; | |
5849 | cregbase = qib_read_kreg32(dd, kr_counterregbase); | |
5850 | ||
5851 | dd->cspec->cregbase = (u64 __iomem *)(cregbase + | |
5852 | (char __iomem *)dd->kregbase); | |
5853 | ||
5854 | dd->egrtidbase = (u64 __iomem *) | |
5855 | ((char __iomem *) dd->kregbase + dd->rcvegrbase); | |
5856 | ||
5857 | /* port registers are defined as relative to base of chip */ | |
5858 | dd->pport[0].cpspec->kpregbase = | |
5859 | (u64 __iomem *)((char __iomem *)dd->kregbase); | |
5860 | dd->pport[1].cpspec->kpregbase = | |
5861 | (u64 __iomem *)(dd->palign + | |
5862 | (char __iomem *)dd->kregbase); | |
5863 | dd->pport[0].cpspec->cpregbase = | |
5864 | (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0], | |
5865 | kr_counterregbase) + (char __iomem *)dd->kregbase); | |
5866 | dd->pport[1].cpspec->cpregbase = | |
5867 | (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1], | |
5868 | kr_counterregbase) + (char __iomem *)dd->kregbase); | |
5869 | } | |
5870 | ||
5871 | /* | |
5872 | * This is a fairly special-purpose observer, so we only support | |
5873 | * the port-specific parts of SendCtrl | |
5874 | */ | |
5875 | ||
5876 | #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \ | |
5877 | SYM_MASK(SendCtrl_0, SDmaEnable) | \ | |
5878 | SYM_MASK(SendCtrl_0, SDmaIntEnable) | \ | |
5879 | SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \ | |
5880 | SYM_MASK(SendCtrl_0, SDmaHalt) | \ | |
5881 | SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \ | |
5882 | SYM_MASK(SendCtrl_0, ForceCreditUpToDate)) | |
5883 | ||
5884 | static int sendctrl_hook(struct qib_devdata *dd, | |
5885 | const struct diag_observer *op, u32 offs, | |
5886 | u64 *data, u64 mask, int only_32) | |
5887 | { | |
5888 | unsigned long flags; | |
5889 | unsigned idx; | |
5890 | unsigned pidx; | |
5891 | struct qib_pportdata *ppd = NULL; | |
5892 | u64 local_data, all_bits; | |
5893 | ||
5894 | /* | |
5895 | * The fixed correspondence between Physical ports and pports is | |
5896 | * severed. We need to hunt for the ppd that corresponds | |
5897 | * to the offset we got. And we have to do that without admitting | |
5898 | * we know the stride, apparently. | |
5899 | */ | |
5900 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
5901 | u64 __iomem *psptr; | |
5902 | u32 psoffs; | |
5903 | ||
5904 | ppd = dd->pport + pidx; | |
5905 | if (!ppd->cpspec->kpregbase) | |
5906 | continue; | |
5907 | ||
5908 | psptr = ppd->cpspec->kpregbase + krp_sendctrl; | |
5909 | psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr); | |
5910 | if (psoffs == offs) | |
5911 | break; | |
5912 | } | |
5913 | ||
5914 | /* If pport is not being managed by driver, just avoid shadows. */ | |
5915 | if (pidx >= dd->num_pports) | |
5916 | ppd = NULL; | |
5917 | ||
5918 | /* In any case, "idx" is flat index in kreg space */ | |
5919 | idx = offs / sizeof(u64); | |
5920 | ||
5921 | all_bits = ~0ULL; | |
5922 | if (only_32) | |
5923 | all_bits >>= 32; | |
5924 | ||
5925 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | |
5926 | if (!ppd || (mask & all_bits) != all_bits) { | |
5927 | /* | |
5928 | * At least some mask bits are zero, so we need | |
5929 | * to read. The judgement call is whether from | |
5930 | * reg or shadow. First-cut: read reg, and complain | |
5931 | * if any bits which should be shadowed are different | |
5932 | * from their shadowed value. | |
5933 | */ | |
5934 | if (only_32) | |
5935 | local_data = (u64)qib_read_kreg32(dd, idx); | |
5936 | else | |
5937 | local_data = qib_read_kreg64(dd, idx); | |
5938 | *data = (local_data & ~mask) | (*data & mask); | |
5939 | } | |
5940 | if (mask) { | |
5941 | /* | |
5942 | * At least some mask bits are one, so we need | |
5943 | * to write, but only shadow some bits. | |
5944 | */ | |
5945 | u64 sval, tval; /* Shadowed, transient */ | |
5946 | ||
5947 | /* | |
5948 | * New shadow val is bits we don't want to touch, | |
5949 | * ORed with bits we do, that are intended for shadow. | |
5950 | */ | |
5951 | if (ppd) { | |
5952 | sval = ppd->p_sendctrl & ~mask; | |
5953 | sval |= *data & SENDCTRL_SHADOWED & mask; | |
5954 | ppd->p_sendctrl = sval; | |
5955 | } else | |
5956 | sval = *data & SENDCTRL_SHADOWED & mask; | |
5957 | tval = sval | (*data & ~SENDCTRL_SHADOWED & mask); | |
5958 | qib_write_kreg(dd, idx, tval); | |
5959 | qib_write_kreg(dd, kr_scratch, 0Ull); | |
5960 | } | |
5961 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | |
5962 | return only_32 ? 4 : 8; | |
5963 | } | |
5964 | ||
5965 | static const struct diag_observer sendctrl_0_observer = { | |
5966 | sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64), | |
5967 | KREG_IDX(SendCtrl_0) * sizeof(u64) | |
5968 | }; | |
5969 | ||
5970 | static const struct diag_observer sendctrl_1_observer = { | |
5971 | sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64), | |
5972 | KREG_IDX(SendCtrl_1) * sizeof(u64) | |
5973 | }; | |
5974 | ||
5975 | static ushort sdma_fetch_prio = 8; | |
5976 | module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO); | |
5977 | MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority"); | |
5978 | ||
5979 | /* Besides logging QSFP events, we set appropriate TxDDS values */ | |
5980 | static void init_txdds_table(struct qib_pportdata *ppd, int override); | |
5981 | ||
5982 | static void qsfp_7322_event(struct work_struct *work) | |
5983 | { | |
5984 | struct qib_qsfp_data *qd; | |
5985 | struct qib_pportdata *ppd; | |
8482d5d1 | 5986 | unsigned long pwrup; |
16d99812 | 5987 | unsigned long flags; |
f931551b RC |
5988 | int ret; |
5989 | u32 le2; | |
5990 | ||
5991 | qd = container_of(work, struct qib_qsfp_data, work); | |
5992 | ppd = qd->ppd; | |
dde05cbd MH |
5993 | pwrup = qd->t_insert + |
5994 | msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC); | |
f931551b | 5995 | |
dde05cbd MH |
5996 | /* Delay for 20 msecs to allow ModPrs resistor to setup */ |
5997 | mdelay(QSFP_MODPRS_LAG_MSEC); | |
5998 | ||
16d99812 MH |
5999 | if (!qib_qsfp_mod_present(ppd)) { |
6000 | ppd->cpspec->qsfp_data.modpresent = 0; | |
dde05cbd MH |
6001 | /* Set the physical link to disabled */ |
6002 | qib_set_ib_7322_lstate(ppd, 0, | |
6003 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | |
16d99812 MH |
6004 | spin_lock_irqsave(&ppd->lflags_lock, flags); |
6005 | ppd->lflags &= ~QIBL_LINKV; | |
6006 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
6007 | } else { | |
dde05cbd MH |
6008 | /* |
6009 | * Some QSFP's not only do not respond until the full power-up | |
6010 | * time, but may behave badly if we try. So hold off responding | |
6011 | * to insertion. | |
6012 | */ | |
6013 | while (1) { | |
8482d5d1 | 6014 | if (time_is_before_jiffies(pwrup)) |
dde05cbd MH |
6015 | break; |
6016 | msleep(20); | |
6017 | } | |
6018 | ||
6019 | ret = qib_refresh_qsfp_cache(ppd, &qd->cache); | |
6020 | ||
6021 | /* | |
6022 | * Need to change LE2 back to defaults if we couldn't | |
6023 | * read the cable type (to handle cable swaps), so do this | |
6024 | * even on failure to read cable information. We don't | |
6025 | * get here for QME, so IS_QME check not needed here. | |
6026 | */ | |
6027 | if (!ret && !ppd->dd->cspec->r1) { | |
6028 | if (QSFP_IS_ACTIVE_FAR(qd->cache.tech)) | |
6029 | le2 = LE2_QME; | |
6030 | else if (qd->cache.atten[1] >= qib_long_atten && | |
6031 | QSFP_IS_CU(qd->cache.tech)) | |
6032 | le2 = LE2_5m; | |
6033 | else | |
6034 | le2 = LE2_DEFAULT; | |
6035 | } else | |
4634b794 | 6036 | le2 = LE2_DEFAULT; |
dde05cbd MH |
6037 | ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7)); |
6038 | /* | |
6039 | * We always change parameteters, since we can choose | |
6040 | * values for cables without eeproms, and the cable may have | |
6041 | * changed from a cable with full or partial eeprom content | |
6042 | * to one with partial or no content. | |
6043 | */ | |
6044 | init_txdds_table(ppd, 0); | |
6045 | /* The physical link is being re-enabled only when the | |
16d99812 MH |
6046 | * previous state was DISABLED and the VALID bit is not |
6047 | * set. This should only happen when the cable has been | |
6048 | * physically pulled. */ | |
6049 | if (!ppd->cpspec->qsfp_data.modpresent && | |
6050 | (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) { | |
6051 | ppd->cpspec->qsfp_data.modpresent = 1; | |
dde05cbd MH |
6052 | qib_set_ib_7322_lstate(ppd, 0, |
6053 | QLOGIC_IB_IBCC_LINKINITCMD_SLEEP); | |
16d99812 MH |
6054 | spin_lock_irqsave(&ppd->lflags_lock, flags); |
6055 | ppd->lflags |= QIBL_LINKV; | |
6056 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
6057 | } | |
dde05cbd | 6058 | } |
f931551b RC |
6059 | } |
6060 | ||
6061 | /* | |
6062 | * There is little we can do but complain to the user if QSFP | |
6063 | * initialization fails. | |
6064 | */ | |
6065 | static void qib_init_7322_qsfp(struct qib_pportdata *ppd) | |
6066 | { | |
6067 | unsigned long flags; | |
6068 | struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data; | |
6069 | struct qib_devdata *dd = ppd->dd; | |
6070 | u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N; | |
6071 | ||
6072 | mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); | |
6073 | qd->ppd = ppd; | |
6074 | qib_qsfp_init(qd, qsfp_7322_event); | |
6075 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | |
6076 | dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert)); | |
6077 | dd->cspec->gpio_mask |= mod_prs_bit; | |
6078 | qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); | |
6079 | qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); | |
6080 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | |
6081 | } | |
6082 | ||
6083 | /* | |
a77fcf89 | 6084 | * called at device initialization time, and also if the txselect |
f931551b RC |
6085 | * module parameter is changed. This is used for cables that don't |
6086 | * have valid QSFP EEPROMs (not present, or attenuation is zero). | |
6087 | * We initialize to the default, then if there is a specific | |
a77fcf89 RC |
6088 | * unit,port match, we use that (and set it immediately, for the |
6089 | * current speed, if the link is at INIT or better). | |
f931551b | 6090 | * String format is "default# unit#,port#=# ... u,p=#", separators must |
a77fcf89 RC |
6091 | * be a SPACE character. A newline terminates. The u,p=# tuples may |
6092 | * optionally have "u,p=#,#", where the final # is the H1 value | |
f931551b RC |
6093 | * The last specific match is used (actually, all are used, but last |
6094 | * one is the one that winds up set); if none at all, fall back on default. | |
6095 | */ | |
6096 | static void set_no_qsfp_atten(struct qib_devdata *dd, int change) | |
6097 | { | |
6098 | char *nxt, *str; | |
a77fcf89 | 6099 | u32 pidx, unit, port, deflt, h1; |
f931551b | 6100 | unsigned long val; |
a77fcf89 | 6101 | int any = 0, seth1; |
e706203c | 6102 | int txdds_size; |
f931551b | 6103 | |
a77fcf89 | 6104 | str = txselect_list; |
f931551b | 6105 | |
a77fcf89 | 6106 | /* default number is validated in setup_txselect() */ |
f931551b RC |
6107 | deflt = simple_strtoul(str, &nxt, 0); |
6108 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | |
6109 | dd->pport[pidx].cpspec->no_eep = deflt; | |
6110 | ||
e706203c MM |
6111 | txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ; |
6112 | if (IS_QME(dd) || IS_QMH(dd)) | |
6113 | txdds_size += TXDDS_MFG_SZ; | |
6114 | ||
f931551b RC |
6115 | while (*nxt && nxt[1]) { |
6116 | str = ++nxt; | |
6117 | unit = simple_strtoul(str, &nxt, 0); | |
6118 | if (nxt == str || !*nxt || *nxt != ',') { | |
6119 | while (*nxt && *nxt++ != ' ') /* skip to next, if any */ | |
6120 | ; | |
6121 | continue; | |
6122 | } | |
6123 | str = ++nxt; | |
6124 | port = simple_strtoul(str, &nxt, 0); | |
6125 | if (nxt == str || *nxt != '=') { | |
6126 | while (*nxt && *nxt++ != ' ') /* skip to next, if any */ | |
6127 | ; | |
6128 | continue; | |
6129 | } | |
6130 | str = ++nxt; | |
6131 | val = simple_strtoul(str, &nxt, 0); | |
6132 | if (nxt == str) { | |
6133 | while (*nxt && *nxt++ != ' ') /* skip to next, if any */ | |
6134 | ; | |
6135 | continue; | |
6136 | } | |
e706203c | 6137 | if (val >= txdds_size) |
f931551b | 6138 | continue; |
a77fcf89 RC |
6139 | seth1 = 0; |
6140 | h1 = 0; /* gcc thinks it might be used uninitted */ | |
6141 | if (*nxt == ',' && nxt[1]) { | |
6142 | str = ++nxt; | |
6143 | h1 = (u32)simple_strtoul(str, &nxt, 0); | |
6144 | if (nxt == str) | |
6145 | while (*nxt && *nxt++ != ' ') /* skip */ | |
6146 | ; | |
6147 | else | |
6148 | seth1 = 1; | |
6149 | } | |
f931551b RC |
6150 | for (pidx = 0; dd->unit == unit && pidx < dd->num_pports; |
6151 | ++pidx) { | |
a77fcf89 RC |
6152 | struct qib_pportdata *ppd = &dd->pport[pidx]; |
6153 | ||
6154 | if (ppd->port != port || !ppd->link_speed_supported) | |
f931551b | 6155 | continue; |
a77fcf89 | 6156 | ppd->cpspec->no_eep = val; |
7c7a416e RC |
6157 | if (seth1) |
6158 | ppd->cpspec->h1_val = h1; | |
f931551b | 6159 | /* now change the IBC and serdes, overriding generic */ |
a77fcf89 | 6160 | init_txdds_table(ppd, 1); |
d70585f7 | 6161 | /* Re-enable the physical state machine on mezz boards |
dde05cbd MH |
6162 | * now that the correct settings have been set. |
6163 | * QSFP boards are handles by the QSFP event handler */ | |
d70585f7 MH |
6164 | if (IS_QMH(dd) || IS_QME(dd)) |
6165 | qib_set_ib_7322_lstate(ppd, 0, | |
6166 | QLOGIC_IB_IBCC_LINKINITCMD_SLEEP); | |
f931551b RC |
6167 | any++; |
6168 | } | |
6169 | if (*nxt == '\n') | |
6170 | break; /* done */ | |
6171 | } | |
6172 | if (change && !any) { | |
6173 | /* no specific setting, use the default. | |
6174 | * Change the IBC and serdes, but since it's | |
6175 | * general, don't override specific settings. | |
6176 | */ | |
a77fcf89 RC |
6177 | for (pidx = 0; pidx < dd->num_pports; ++pidx) |
6178 | if (dd->pport[pidx].link_speed_supported) | |
6179 | init_txdds_table(&dd->pport[pidx], 0); | |
f931551b RC |
6180 | } |
6181 | } | |
6182 | ||
a77fcf89 RC |
6183 | /* handle the txselect parameter changing */ |
6184 | static int setup_txselect(const char *str, struct kernel_param *kp) | |
f931551b RC |
6185 | { |
6186 | struct qib_devdata *dd; | |
6187 | unsigned long val; | |
2fadd831 | 6188 | char *n; |
f931551b | 6189 | if (strlen(str) >= MAX_ATTEN_LEN) { |
7fac3301 | 6190 | pr_info("txselect_values string too long\n"); |
f931551b RC |
6191 | return -ENOSPC; |
6192 | } | |
2fadd831 MM |
6193 | val = simple_strtoul(str, &n, 0); |
6194 | if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + | |
e706203c | 6195 | TXDDS_MFG_SZ)) { |
7fac3301 | 6196 | pr_info("txselect_values must start with a number < %d\n", |
e706203c | 6197 | TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ); |
2fadd831 | 6198 | return -EINVAL; |
f931551b | 6199 | } |
7fac3301 | 6200 | strcpy(txselect_list, str); |
2fadd831 | 6201 | |
f931551b | 6202 | list_for_each_entry(dd, &qib_dev_list, list) |
a77fcf89 RC |
6203 | if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322) |
6204 | set_no_qsfp_atten(dd, 1); | |
f931551b RC |
6205 | return 0; |
6206 | } | |
6207 | ||
6208 | /* | |
6209 | * Write the final few registers that depend on some of the | |
6210 | * init setup. Done late in init, just before bringing up | |
6211 | * the serdes. | |
6212 | */ | |
6213 | static int qib_late_7322_initreg(struct qib_devdata *dd) | |
6214 | { | |
6215 | int ret = 0, n; | |
6216 | u64 val; | |
6217 | ||
6218 | qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize); | |
6219 | qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize); | |
6220 | qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt); | |
6221 | qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); | |
6222 | val = qib_read_kreg64(dd, kr_sendpioavailaddr); | |
6223 | if (val != dd->pioavailregs_phys) { | |
7fac3301 MM |
6224 | qib_dev_err(dd, |
6225 | "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n", | |
6226 | (unsigned long) dd->pioavailregs_phys, | |
6227 | (unsigned long long) val); | |
f931551b RC |
6228 | ret = -EINVAL; |
6229 | } | |
6230 | ||
6231 | n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; | |
6232 | qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL); | |
6233 | /* driver sends get pkey, lid, etc. checking also, to catch bugs */ | |
6234 | qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL); | |
6235 | ||
6236 | qib_register_observer(dd, &sendctrl_0_observer); | |
6237 | qib_register_observer(dd, &sendctrl_1_observer); | |
6238 | ||
6239 | dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN; | |
6240 | qib_write_kreg(dd, kr_control, dd->control); | |
6241 | /* | |
6242 | * Set SendDmaFetchPriority and init Tx params, including | |
6243 | * QSFP handler on boards that have QSFP. | |
6244 | * First set our default attenuation entry for cables that | |
6245 | * don't have valid attenuation. | |
6246 | */ | |
6247 | set_no_qsfp_atten(dd, 0); | |
6248 | for (n = 0; n < dd->num_pports; ++n) { | |
6249 | struct qib_pportdata *ppd = dd->pport + n; | |
6250 | ||
6251 | qib_write_kreg_port(ppd, krp_senddmaprioritythld, | |
6252 | sdma_fetch_prio & 0xf); | |
6253 | /* Initialize qsfp if present on board. */ | |
6254 | if (dd->flags & QIB_HAS_QSFP) | |
6255 | qib_init_7322_qsfp(ppd); | |
6256 | } | |
6257 | dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN; | |
6258 | qib_write_kreg(dd, kr_control, dd->control); | |
6259 | ||
6260 | return ret; | |
6261 | } | |
6262 | ||
6263 | /* per IB port errors. */ | |
6264 | #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \ | |
6265 | MASK_ACROSS(8, 15)) | |
6266 | #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41)) | |
6267 | #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \ | |
6268 | MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \ | |
6269 | MASK_ACROSS(0, 11)) | |
6270 | ||
6271 | /* | |
6272 | * Write the initialization per-port registers that need to be done at | |
6273 | * driver load and after reset completes (i.e., that aren't done as part | |
6274 | * of other init procedures called from qib_init.c). | |
6275 | * Some of these should be redundant on reset, but play safe. | |
6276 | */ | |
6277 | static void write_7322_init_portregs(struct qib_pportdata *ppd) | |
6278 | { | |
6279 | u64 val; | |
6280 | int i; | |
6281 | ||
6282 | if (!ppd->link_speed_supported) { | |
6283 | /* no buffer credits for this port */ | |
6284 | for (i = 1; i < 8; i++) | |
6285 | qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0); | |
6286 | qib_write_kreg_port(ppd, krp_ibcctrl_b, 0); | |
6287 | qib_write_kreg(ppd->dd, kr_scratch, 0); | |
6288 | return; | |
6289 | } | |
6290 | ||
6291 | /* | |
6292 | * Set the number of supported virtual lanes in IBC, | |
6293 | * for flow control packet handling on unsupported VLs | |
6294 | */ | |
6295 | val = qib_read_kreg_port(ppd, krp_ibsdtestiftx); | |
6296 | val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP); | |
6297 | val |= (u64)(ppd->vls_supported - 1) << | |
6298 | SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP); | |
6299 | qib_write_kreg_port(ppd, krp_ibsdtestiftx, val); | |
6300 | ||
6301 | qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP); | |
6302 | ||
6303 | /* enable tx header checking */ | |
6304 | qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY | | |
6305 | IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID | | |
6306 | IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ); | |
6307 | ||
6308 | qib_write_kreg_port(ppd, krp_ncmodectrl, | |
6309 | SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal)); | |
6310 | ||
6311 | /* | |
6312 | * Unconditionally clear the bufmask bits. If SDMA is | |
6313 | * enabled, we'll set them appropriately later. | |
6314 | */ | |
6315 | qib_write_kreg_port(ppd, krp_senddmabufmask0, 0); | |
6316 | qib_write_kreg_port(ppd, krp_senddmabufmask1, 0); | |
6317 | qib_write_kreg_port(ppd, krp_senddmabufmask2, 0); | |
6318 | if (ppd->dd->cspec->r1) | |
6319 | ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate); | |
6320 | } | |
6321 | ||
6322 | /* | |
6323 | * Write the initialization per-device registers that need to be done at | |
6324 | * driver load and after reset completes (i.e., that aren't done as part | |
6325 | * of other init procedures called from qib_init.c). Also write per-port | |
6326 | * registers that are affected by overall device config, such as QP mapping | |
6327 | * Some of these should be redundant on reset, but play safe. | |
6328 | */ | |
6329 | static void write_7322_initregs(struct qib_devdata *dd) | |
6330 | { | |
6331 | struct qib_pportdata *ppd; | |
6332 | int i, pidx; | |
6333 | u64 val; | |
6334 | ||
6335 | /* Set Multicast QPs received by port 2 to map to context one. */ | |
6336 | qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1); | |
6337 | ||
6338 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
6339 | unsigned n, regno; | |
6340 | unsigned long flags; | |
6341 | ||
2528ea60 MM |
6342 | if (dd->n_krcv_queues < 2 || |
6343 | !dd->pport[pidx].link_speed_supported) | |
f931551b RC |
6344 | continue; |
6345 | ||
6346 | ppd = &dd->pport[pidx]; | |
6347 | ||
6348 | /* be paranoid against later code motion, etc. */ | |
6349 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | |
6350 | ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable); | |
6351 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | |
6352 | ||
6353 | /* Initialize QP to context mapping */ | |
6354 | regno = krp_rcvqpmaptable; | |
6355 | val = 0; | |
6356 | if (dd->num_pports > 1) | |
6357 | n = dd->first_user_ctxt / dd->num_pports; | |
6358 | else | |
6359 | n = dd->first_user_ctxt - 1; | |
6360 | for (i = 0; i < 32; ) { | |
6361 | unsigned ctxt; | |
6362 | ||
6363 | if (dd->num_pports > 1) | |
6364 | ctxt = (i % n) * dd->num_pports + pidx; | |
6365 | else if (i % n) | |
6366 | ctxt = (i % n) + 1; | |
6367 | else | |
6368 | ctxt = ppd->hw_pidx; | |
6369 | val |= ctxt << (5 * (i % 6)); | |
6370 | i++; | |
6371 | if (i % 6 == 0) { | |
6372 | qib_write_kreg_port(ppd, regno, val); | |
6373 | val = 0; | |
6374 | regno++; | |
6375 | } | |
6376 | } | |
6377 | qib_write_kreg_port(ppd, regno, val); | |
6378 | } | |
6379 | ||
6380 | /* | |
6381 | * Setup up interrupt mitigation for kernel contexts, but | |
6382 | * not user contexts (user contexts use interrupts when | |
6383 | * stalled waiting for any packet, so want those interrupts | |
6384 | * right away). | |
6385 | */ | |
6386 | for (i = 0; i < dd->first_user_ctxt; i++) { | |
6387 | dd->cspec->rcvavail_timeout[i] = rcv_int_timeout; | |
6388 | qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout); | |
6389 | } | |
6390 | ||
6391 | /* | |
6392 | * Initialize as (disabled) rcvflow tables. Application code | |
6393 | * will setup each flow as it uses the flow. | |
6394 | * Doesn't clear any of the error bits that might be set. | |
6395 | */ | |
6396 | val = TIDFLOW_ERRBITS; /* these are W1C */ | |
0502f94c | 6397 | for (i = 0; i < dd->cfgctxts; i++) { |
f931551b RC |
6398 | int flow; |
6399 | for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++) | |
6400 | qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); | |
6401 | } | |
6402 | ||
6403 | /* | |
6404 | * dual cards init to dual port recovery, single port cards to | |
6405 | * the one port. Dual port cards may later adjust to 1 port, | |
6406 | * and then back to dual port if both ports are connected | |
6407 | * */ | |
6408 | if (dd->num_pports) | |
6409 | setup_7322_link_recovery(dd->pport, dd->num_pports > 1); | |
6410 | } | |
6411 | ||
6412 | static int qib_init_7322_variables(struct qib_devdata *dd) | |
6413 | { | |
6414 | struct qib_pportdata *ppd; | |
6415 | unsigned features, pidx, sbufcnt; | |
6416 | int ret, mtu; | |
6417 | u32 sbufs, updthresh; | |
6418 | ||
6419 | /* pport structs are contiguous, allocated after devdata */ | |
6420 | ppd = (struct qib_pportdata *)(dd + 1); | |
6421 | dd->pport = ppd; | |
6422 | ppd[0].dd = dd; | |
6423 | ppd[1].dd = dd; | |
6424 | ||
6425 | dd->cspec = (struct qib_chip_specific *)(ppd + 2); | |
6426 | ||
6427 | ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1); | |
6428 | ppd[1].cpspec = &ppd[0].cpspec[1]; | |
6429 | ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */ | |
6430 | ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */ | |
6431 | ||
6432 | spin_lock_init(&dd->cspec->rcvmod_lock); | |
6433 | spin_lock_init(&dd->cspec->gpio_lock); | |
6434 | ||
6435 | /* we haven't yet set QIB_PRESENT, so use read directly */ | |
6436 | dd->revision = readq(&dd->kregbase[kr_revision]); | |
6437 | ||
6438 | if ((dd->revision & 0xffffffffU) == 0xffffffffU) { | |
7fac3301 MM |
6439 | qib_dev_err(dd, |
6440 | "Revision register read failure, giving up initialization\n"); | |
f931551b RC |
6441 | ret = -ENODEV; |
6442 | goto bail; | |
6443 | } | |
6444 | dd->flags |= QIB_PRESENT; /* now register routines work */ | |
6445 | ||
6446 | dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor); | |
6447 | dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor); | |
6448 | dd->cspec->r1 = dd->minrev == 1; | |
6449 | ||
6450 | get_7322_chip_params(dd); | |
6451 | features = qib_7322_boardname(dd); | |
6452 | ||
6453 | /* now that piobcnt2k and 4k set, we can allocate these */ | |
6454 | sbufcnt = dd->piobcnt2k + dd->piobcnt4k + | |
6455 | NUM_VL15_BUFS + BITS_PER_LONG - 1; | |
6456 | sbufcnt /= BITS_PER_LONG; | |
6457 | dd->cspec->sendchkenable = kmalloc(sbufcnt * | |
6458 | sizeof(*dd->cspec->sendchkenable), GFP_KERNEL); | |
6459 | dd->cspec->sendgrhchk = kmalloc(sbufcnt * | |
6460 | sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL); | |
6461 | dd->cspec->sendibchk = kmalloc(sbufcnt * | |
6462 | sizeof(*dd->cspec->sendibchk), GFP_KERNEL); | |
6463 | if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk || | |
6464 | !dd->cspec->sendibchk) { | |
6465 | qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n"); | |
6466 | ret = -ENOMEM; | |
6467 | goto bail; | |
6468 | } | |
6469 | ||
6470 | ppd = dd->pport; | |
6471 | ||
6472 | /* | |
6473 | * GPIO bits for TWSI data and clock, | |
6474 | * used for serial EEPROM. | |
6475 | */ | |
6476 | dd->gpio_sda_num = _QIB_GPIO_SDA_NUM; | |
6477 | dd->gpio_scl_num = _QIB_GPIO_SCL_NUM; | |
6478 | dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV; | |
6479 | ||
6480 | dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY | | |
6481 | QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP | | |
6482 | QIB_HAS_THRESH_UPDATE | | |
6483 | (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0); | |
6484 | dd->flags |= qib_special_trigger ? | |
6485 | QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA; | |
6486 | ||
6487 | /* | |
6488 | * Setup initial values. These may change when PAT is enabled, but | |
6489 | * we need these to do initial chip register accesses. | |
6490 | */ | |
6491 | qib_7322_set_baseaddrs(dd); | |
6492 | ||
6493 | mtu = ib_mtu_enum_to_int(qib_ibmtu); | |
6494 | if (mtu == -1) | |
6495 | mtu = QIB_DEFAULT_MTU; | |
6496 | ||
6497 | dd->cspec->int_enable_mask = QIB_I_BITSEXTANT; | |
6498 | /* all hwerrors become interrupts, unless special purposed */ | |
6499 | dd->cspec->hwerrmask = ~0ULL; | |
6500 | /* link_recovery setup causes these errors, so ignore them, | |
6501 | * other than clearing them when they occur */ | |
6502 | dd->cspec->hwerrmask &= | |
6503 | ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) | | |
6504 | SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) | | |
6505 | HWE_MASK(LATriggered)); | |
6506 | ||
6507 | for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) { | |
6508 | struct qib_chippport_specific *cp = ppd->cpspec; | |
6509 | ppd->link_speed_supported = features & PORT_SPD_CAP; | |
6510 | features >>= PORT_SPD_CAP_SHIFT; | |
6511 | if (!ppd->link_speed_supported) { | |
6512 | /* single port mode (7340, or configured) */ | |
6513 | dd->skip_kctxt_mask |= 1 << pidx; | |
6514 | if (pidx == 0) { | |
6515 | /* Make sure port is disabled. */ | |
6516 | qib_write_kreg_port(ppd, krp_rcvctrl, 0); | |
6517 | qib_write_kreg_port(ppd, krp_ibcctrl_a, 0); | |
6518 | ppd[0] = ppd[1]; | |
6519 | dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask, | |
6520 | IBSerdesPClkNotDetectMask_0) | |
6521 | | SYM_MASK(HwErrMask, | |
6522 | SDmaMemReadErrMask_0)); | |
6523 | dd->cspec->int_enable_mask &= ~( | |
6524 | SYM_MASK(IntMask, SDmaCleanupDoneMask_0) | | |
6525 | SYM_MASK(IntMask, SDmaIdleIntMask_0) | | |
6526 | SYM_MASK(IntMask, SDmaProgressIntMask_0) | | |
6527 | SYM_MASK(IntMask, SDmaIntMask_0) | | |
6528 | SYM_MASK(IntMask, ErrIntMask_0) | | |
6529 | SYM_MASK(IntMask, SendDoneIntMask_0)); | |
6530 | } else { | |
6531 | /* Make sure port is disabled. */ | |
6532 | qib_write_kreg_port(ppd, krp_rcvctrl, 0); | |
6533 | qib_write_kreg_port(ppd, krp_ibcctrl_a, 0); | |
6534 | dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask, | |
6535 | IBSerdesPClkNotDetectMask_1) | |
6536 | | SYM_MASK(HwErrMask, | |
6537 | SDmaMemReadErrMask_1)); | |
6538 | dd->cspec->int_enable_mask &= ~( | |
6539 | SYM_MASK(IntMask, SDmaCleanupDoneMask_1) | | |
6540 | SYM_MASK(IntMask, SDmaIdleIntMask_1) | | |
6541 | SYM_MASK(IntMask, SDmaProgressIntMask_1) | | |
6542 | SYM_MASK(IntMask, SDmaIntMask_1) | | |
6543 | SYM_MASK(IntMask, ErrIntMask_1) | | |
6544 | SYM_MASK(IntMask, SendDoneIntMask_1)); | |
6545 | } | |
6546 | continue; | |
6547 | } | |
6548 | ||
6549 | dd->num_pports++; | |
7d7632ad MM |
6550 | ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports); |
6551 | if (ret) { | |
6552 | dd->num_pports--; | |
6553 | goto bail; | |
6554 | } | |
f931551b RC |
6555 | |
6556 | ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; | |
6557 | ppd->link_width_enabled = IB_WIDTH_4X; | |
6558 | ppd->link_speed_enabled = ppd->link_speed_supported; | |
6559 | /* | |
6560 | * Set the initial values to reasonable default, will be set | |
6561 | * for real when link is up. | |
6562 | */ | |
6563 | ppd->link_width_active = IB_WIDTH_4X; | |
6564 | ppd->link_speed_active = QIB_IB_SDR; | |
6565 | ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS]; | |
6566 | switch (qib_num_cfg_vls) { | |
6567 | case 1: | |
6568 | ppd->vls_supported = IB_VL_VL0; | |
6569 | break; | |
6570 | case 2: | |
6571 | ppd->vls_supported = IB_VL_VL0_1; | |
6572 | break; | |
6573 | default: | |
6574 | qib_devinfo(dd->pcidev, | |
6575 | "Invalid num_vls %u, using 4 VLs\n", | |
6576 | qib_num_cfg_vls); | |
6577 | qib_num_cfg_vls = 4; | |
6578 | /* fall through */ | |
6579 | case 4: | |
6580 | ppd->vls_supported = IB_VL_VL0_3; | |
6581 | break; | |
6582 | case 8: | |
6583 | if (mtu <= 2048) | |
6584 | ppd->vls_supported = IB_VL_VL0_7; | |
6585 | else { | |
6586 | qib_devinfo(dd->pcidev, | |
6587 | "Invalid num_vls %u for MTU %d " | |
6588 | ", using 4 VLs\n", | |
6589 | qib_num_cfg_vls, mtu); | |
6590 | ppd->vls_supported = IB_VL_VL0_3; | |
6591 | qib_num_cfg_vls = 4; | |
6592 | } | |
6593 | break; | |
6594 | } | |
6595 | ppd->vls_operational = ppd->vls_supported; | |
6596 | ||
6597 | init_waitqueue_head(&cp->autoneg_wait); | |
6598 | INIT_DELAYED_WORK(&cp->autoneg_work, | |
6599 | autoneg_7322_work); | |
6600 | if (ppd->dd->cspec->r1) | |
6601 | INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work); | |
6602 | ||
6603 | /* | |
6604 | * For Mez and similar cards, no qsfp info, so do | |
6605 | * the "cable info" setup here. Can be overridden | |
6606 | * in adapter-specific routines. | |
6607 | */ | |
7c7a416e RC |
6608 | if (!(dd->flags & QIB_HAS_QSFP)) { |
6609 | if (!IS_QMH(dd) && !IS_QME(dd)) | |
7fac3301 MM |
6610 | qib_devinfo(dd->pcidev, |
6611 | "IB%u:%u: Unknown mezzanine card type\n", | |
6612 | dd->unit, ppd->port); | |
a77fcf89 | 6613 | cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME; |
f931551b | 6614 | /* |
a77fcf89 RC |
6615 | * Choose center value as default tx serdes setting |
6616 | * until changed through module parameter. | |
f931551b | 6617 | */ |
a77fcf89 RC |
6618 | ppd->cpspec->no_eep = IS_QMH(dd) ? |
6619 | TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4; | |
f931551b RC |
6620 | } else |
6621 | cp->h1_val = H1_FORCE_VAL; | |
6622 | ||
6623 | /* Avoid writes to chip for mini_init */ | |
6624 | if (!qib_mini_init) | |
6625 | write_7322_init_portregs(ppd); | |
6626 | ||
6627 | init_timer(&cp->chase_timer); | |
6628 | cp->chase_timer.function = reenable_chase; | |
6629 | cp->chase_timer.data = (unsigned long)ppd; | |
6630 | ||
6631 | ppd++; | |
6632 | } | |
6633 | ||
0a43e117 MM |
6634 | dd->rcvhdrentsize = qib_rcvhdrentsize ? |
6635 | qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE; | |
6636 | dd->rcvhdrsize = qib_rcvhdrsize ? | |
6637 | qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE; | |
a77fcf89 | 6638 | dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); |
f931551b RC |
6639 | |
6640 | /* we always allocate at least 2048 bytes for eager buffers */ | |
6641 | dd->rcvegrbufsize = max(mtu, 2048); | |
9e1c0e43 MM |
6642 | BUG_ON(!is_power_of_2(dd->rcvegrbufsize)); |
6643 | dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize); | |
f931551b RC |
6644 | |
6645 | qib_7322_tidtemplate(dd); | |
6646 | ||
6647 | /* | |
6648 | * We can request a receive interrupt for 1 or | |
6649 | * more packets from current offset. | |
6650 | */ | |
6651 | dd->rhdrhead_intr_off = | |
6652 | (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT; | |
6653 | ||
6654 | /* setup the stats timer; the add_timer is done at end of init */ | |
6655 | init_timer(&dd->stats_timer); | |
6656 | dd->stats_timer.function = qib_get_7322_faststats; | |
6657 | dd->stats_timer.data = (unsigned long) dd; | |
6658 | ||
6659 | dd->ureg_align = 0x10000; /* 64KB alignment */ | |
6660 | ||
6661 | dd->piosize2kmax_dwords = dd->piosize2k >> 2; | |
6662 | ||
6663 | qib_7322_config_ctxts(dd); | |
6664 | qib_set_ctxtcnt(dd); | |
6665 | ||
6666 | if (qib_wc_pat) { | |
fce24a9d DO |
6667 | resource_size_t vl15off; |
6668 | /* | |
6669 | * We do not set WC on the VL15 buffers to avoid | |
6670 | * a rare problem with unaligned writes from | |
6671 | * interrupt-flushed store buffers, so we need | |
6672 | * to map those separately here. We can't solve | |
6673 | * this for the rarely used mtrr case. | |
6674 | */ | |
6675 | ret = init_chip_wc_pat(dd, 0); | |
f931551b RC |
6676 | if (ret) |
6677 | goto bail; | |
fce24a9d DO |
6678 | |
6679 | /* vl15 buffers start just after the 4k buffers */ | |
6680 | vl15off = dd->physaddr + (dd->piobufbase >> 32) + | |
6681 | dd->piobcnt4k * dd->align4k; | |
6682 | dd->piovl15base = ioremap_nocache(vl15off, | |
6683 | NUM_VL15_BUFS * dd->align4k); | |
51fa3ca3 JL |
6684 | if (!dd->piovl15base) { |
6685 | ret = -ENOMEM; | |
fce24a9d | 6686 | goto bail; |
51fa3ca3 | 6687 | } |
f931551b RC |
6688 | } |
6689 | qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ | |
6690 | ||
6691 | ret = 0; | |
6692 | if (qib_mini_init) | |
6693 | goto bail; | |
6694 | if (!dd->num_pports) { | |
6695 | qib_dev_err(dd, "No ports enabled, giving up initialization\n"); | |
6696 | goto bail; /* no error, so can still figure out why err */ | |
6697 | } | |
6698 | ||
6699 | write_7322_initregs(dd); | |
6700 | ret = qib_create_ctxts(dd); | |
6701 | init_7322_cntrnames(dd); | |
6702 | ||
6703 | updthresh = 8U; /* update threshold */ | |
6704 | ||
6705 | /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA. | |
6706 | * reserve the update threshold amount for other kernel use, such | |
6707 | * as sending SMI, MAD, and ACKs, or 3, whichever is greater, | |
6708 | * unless we aren't enabling SDMA, in which case we want to use | |
6709 | * all the 4k bufs for the kernel. | |
6710 | * if this was less than the update threshold, we could wait | |
6711 | * a long time for an update. Coded this way because we | |
6712 | * sometimes change the update threshold for various reasons, | |
6713 | * and we want this to remain robust. | |
6714 | */ | |
6715 | if (dd->flags & QIB_HAS_SEND_DMA) { | |
6716 | dd->cspec->sdmabufcnt = dd->piobcnt4k; | |
6717 | sbufs = updthresh > 3 ? updthresh : 3; | |
6718 | } else { | |
6719 | dd->cspec->sdmabufcnt = 0; | |
6720 | sbufs = dd->piobcnt4k; | |
6721 | } | |
6722 | dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k - | |
6723 | dd->cspec->sdmabufcnt; | |
6724 | dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs; | |
6725 | dd->cspec->lastbuf_for_pio--; /* range is <= , not < */ | |
bb77a077 | 6726 | dd->last_pio = dd->cspec->lastbuf_for_pio; |
f931551b RC |
6727 | dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ? |
6728 | dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0; | |
6729 | ||
6730 | /* | |
6731 | * If we have 16 user contexts, we will have 7 sbufs | |
6732 | * per context, so reduce the update threshold to match. We | |
6733 | * want to update before we actually run out, at low pbufs/ctxt | |
6734 | * so give ourselves some margin. | |
6735 | */ | |
6736 | if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh) | |
6737 | updthresh = dd->pbufsctxt - 2; | |
6738 | dd->cspec->updthresh_dflt = updthresh; | |
6739 | dd->cspec->updthresh = updthresh; | |
6740 | ||
6741 | /* before full enable, no interrupts, no locking needed */ | |
6742 | dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld)) | |
6743 | << SYM_LSB(SendCtrl, AvailUpdThld)) | | |
6744 | SYM_MASK(SendCtrl, SendBufAvailPad64Byte); | |
6745 | ||
6746 | dd->psxmitwait_supported = 1; | |
6747 | dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE; | |
6748 | bail: | |
6749 | if (!dd->ctxtcnt) | |
6750 | dd->ctxtcnt = 1; /* for other initialization code */ | |
6751 | ||
6752 | return ret; | |
6753 | } | |
6754 | ||
6755 | static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc, | |
6756 | u32 *pbufnum) | |
6757 | { | |
6758 | u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK; | |
6759 | struct qib_devdata *dd = ppd->dd; | |
6760 | ||
6761 | /* last is same for 2k and 4k, because we use 4k if all 2k busy */ | |
6762 | if (pbc & PBC_7322_VL15_SEND) { | |
6763 | first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx; | |
6764 | last = first; | |
6765 | } else { | |
6766 | if ((plen + 1) > dd->piosize2kmax_dwords) | |
6767 | first = dd->piobcnt2k; | |
6768 | else | |
6769 | first = 0; | |
6770 | last = dd->cspec->lastbuf_for_pio; | |
6771 | } | |
6772 | return qib_getsendbuf_range(dd, pbufnum, first, last); | |
6773 | } | |
6774 | ||
6775 | static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv, | |
6776 | u32 start) | |
6777 | { | |
6778 | qib_write_kreg_port(ppd, krp_psinterval, intv); | |
6779 | qib_write_kreg_port(ppd, krp_psstart, start); | |
6780 | } | |
6781 | ||
6782 | /* | |
6783 | * Must be called with sdma_lock held, or before init finished. | |
6784 | */ | |
6785 | static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt) | |
6786 | { | |
6787 | qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt); | |
6788 | } | |
6789 | ||
0b3ddf38 DL |
6790 | /* |
6791 | * sdma_lock should be acquired before calling this routine | |
6792 | */ | |
6793 | static void dump_sdma_7322_state(struct qib_pportdata *ppd) | |
6794 | { | |
6795 | u64 reg, reg1, reg2; | |
6796 | ||
6797 | reg = qib_read_kreg_port(ppd, krp_senddmastatus); | |
6798 | qib_dev_porterr(ppd->dd, ppd->port, | |
6799 | "SDMA senddmastatus: 0x%016llx\n", reg); | |
6800 | ||
6801 | reg = qib_read_kreg_port(ppd, krp_sendctrl); | |
6802 | qib_dev_porterr(ppd->dd, ppd->port, | |
6803 | "SDMA sendctrl: 0x%016llx\n", reg); | |
6804 | ||
6805 | reg = qib_read_kreg_port(ppd, krp_senddmabase); | |
6806 | qib_dev_porterr(ppd->dd, ppd->port, | |
6807 | "SDMA senddmabase: 0x%016llx\n", reg); | |
6808 | ||
6809 | reg = qib_read_kreg_port(ppd, krp_senddmabufmask0); | |
6810 | reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1); | |
6811 | reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2); | |
6812 | qib_dev_porterr(ppd->dd, ppd->port, | |
6813 | "SDMA senddmabufmask 0:%llx 1:%llx 2:%llx\n", | |
6814 | reg, reg1, reg2); | |
6815 | ||
6816 | /* get bufuse bits, clear them, and print them again if non-zero */ | |
6817 | reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0); | |
6818 | qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg); | |
6819 | reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1); | |
6820 | qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1); | |
6821 | reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2); | |
6822 | qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2); | |
6823 | /* 0 and 1 should always be zero, so print as short form */ | |
6824 | qib_dev_porterr(ppd->dd, ppd->port, | |
6825 | "SDMA current senddmabuf_use 0:%llx 1:%llx 2:%llx\n", | |
6826 | reg, reg1, reg2); | |
6827 | reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0); | |
6828 | reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1); | |
6829 | reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2); | |
6830 | /* 0 and 1 should always be zero, so print as short form */ | |
6831 | qib_dev_porterr(ppd->dd, ppd->port, | |
6832 | "SDMA cleared senddmabuf_use 0:%llx 1:%llx 2:%llx\n", | |
6833 | reg, reg1, reg2); | |
6834 | ||
6835 | reg = qib_read_kreg_port(ppd, krp_senddmatail); | |
6836 | qib_dev_porterr(ppd->dd, ppd->port, | |
6837 | "SDMA senddmatail: 0x%016llx\n", reg); | |
6838 | ||
6839 | reg = qib_read_kreg_port(ppd, krp_senddmahead); | |
6840 | qib_dev_porterr(ppd->dd, ppd->port, | |
6841 | "SDMA senddmahead: 0x%016llx\n", reg); | |
6842 | ||
6843 | reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr); | |
6844 | qib_dev_porterr(ppd->dd, ppd->port, | |
6845 | "SDMA senddmaheadaddr: 0x%016llx\n", reg); | |
6846 | ||
6847 | reg = qib_read_kreg_port(ppd, krp_senddmalengen); | |
6848 | qib_dev_porterr(ppd->dd, ppd->port, | |
6849 | "SDMA senddmalengen: 0x%016llx\n", reg); | |
6850 | ||
6851 | reg = qib_read_kreg_port(ppd, krp_senddmadesccnt); | |
6852 | qib_dev_porterr(ppd->dd, ppd->port, | |
6853 | "SDMA senddmadesccnt: 0x%016llx\n", reg); | |
6854 | ||
6855 | reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt); | |
6856 | qib_dev_porterr(ppd->dd, ppd->port, | |
6857 | "SDMA senddmaidlecnt: 0x%016llx\n", reg); | |
6858 | ||
6859 | reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld); | |
6860 | qib_dev_porterr(ppd->dd, ppd->port, | |
6861 | "SDMA senddmapriorityhld: 0x%016llx\n", reg); | |
6862 | ||
6863 | reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt); | |
6864 | qib_dev_porterr(ppd->dd, ppd->port, | |
6865 | "SDMA senddmareloadcnt: 0x%016llx\n", reg); | |
6866 | ||
6867 | dump_sdma_state(ppd); | |
6868 | } | |
6869 | ||
f931551b RC |
6870 | static struct sdma_set_state_action sdma_7322_action_table[] = { |
6871 | [qib_sdma_state_s00_hw_down] = { | |
6872 | .go_s99_running_tofalse = 1, | |
6873 | .op_enable = 0, | |
6874 | .op_intenable = 0, | |
6875 | .op_halt = 0, | |
6876 | .op_drain = 0, | |
6877 | }, | |
6878 | [qib_sdma_state_s10_hw_start_up_wait] = { | |
6879 | .op_enable = 0, | |
6880 | .op_intenable = 1, | |
6881 | .op_halt = 1, | |
6882 | .op_drain = 0, | |
6883 | }, | |
6884 | [qib_sdma_state_s20_idle] = { | |
6885 | .op_enable = 1, | |
6886 | .op_intenable = 1, | |
6887 | .op_halt = 1, | |
6888 | .op_drain = 0, | |
6889 | }, | |
6890 | [qib_sdma_state_s30_sw_clean_up_wait] = { | |
6891 | .op_enable = 0, | |
6892 | .op_intenable = 1, | |
6893 | .op_halt = 1, | |
6894 | .op_drain = 0, | |
6895 | }, | |
6896 | [qib_sdma_state_s40_hw_clean_up_wait] = { | |
6897 | .op_enable = 1, | |
6898 | .op_intenable = 1, | |
6899 | .op_halt = 1, | |
6900 | .op_drain = 0, | |
6901 | }, | |
6902 | [qib_sdma_state_s50_hw_halt_wait] = { | |
6903 | .op_enable = 1, | |
6904 | .op_intenable = 1, | |
6905 | .op_halt = 1, | |
6906 | .op_drain = 1, | |
6907 | }, | |
6908 | [qib_sdma_state_s99_running] = { | |
6909 | .op_enable = 1, | |
6910 | .op_intenable = 1, | |
6911 | .op_halt = 0, | |
6912 | .op_drain = 0, | |
6913 | .go_s99_running_totrue = 1, | |
6914 | }, | |
6915 | }; | |
6916 | ||
6917 | static void qib_7322_sdma_init_early(struct qib_pportdata *ppd) | |
6918 | { | |
6919 | ppd->sdma_state.set_state_action = sdma_7322_action_table; | |
6920 | } | |
6921 | ||
6922 | static int init_sdma_7322_regs(struct qib_pportdata *ppd) | |
6923 | { | |
6924 | struct qib_devdata *dd = ppd->dd; | |
6925 | unsigned lastbuf, erstbuf; | |
6926 | u64 senddmabufmask[3] = { 0 }; | |
6927 | int n, ret = 0; | |
6928 | ||
6929 | qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys); | |
6930 | qib_sdma_7322_setlengen(ppd); | |
6931 | qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */ | |
6932 | qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt); | |
6933 | qib_write_kreg_port(ppd, krp_senddmadesccnt, 0); | |
6934 | qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys); | |
6935 | ||
6936 | if (dd->num_pports) | |
6937 | n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */ | |
6938 | else | |
6939 | n = dd->cspec->sdmabufcnt; /* failsafe for init */ | |
6940 | erstbuf = (dd->piobcnt2k + dd->piobcnt4k) - | |
6941 | ((dd->num_pports == 1 || ppd->port == 2) ? n : | |
6942 | dd->cspec->sdmabufcnt); | |
6943 | lastbuf = erstbuf + n; | |
6944 | ||
6945 | ppd->sdma_state.first_sendbuf = erstbuf; | |
6946 | ppd->sdma_state.last_sendbuf = lastbuf; | |
6947 | for (; erstbuf < lastbuf; ++erstbuf) { | |
6948 | unsigned word = erstbuf / BITS_PER_LONG; | |
6949 | unsigned bit = erstbuf & (BITS_PER_LONG - 1); | |
6950 | ||
6951 | BUG_ON(word >= 3); | |
6952 | senddmabufmask[word] |= 1ULL << bit; | |
6953 | } | |
6954 | qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]); | |
6955 | qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]); | |
6956 | qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]); | |
6957 | return ret; | |
6958 | } | |
6959 | ||
6960 | /* sdma_lock must be held */ | |
6961 | static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd) | |
6962 | { | |
6963 | struct qib_devdata *dd = ppd->dd; | |
6964 | int sane; | |
6965 | int use_dmahead; | |
6966 | u16 swhead; | |
6967 | u16 swtail; | |
6968 | u16 cnt; | |
6969 | u16 hwhead; | |
6970 | ||
6971 | use_dmahead = __qib_sdma_running(ppd) && | |
6972 | (dd->flags & QIB_HAS_SDMA_TIMEOUT); | |
6973 | retry: | |
6974 | hwhead = use_dmahead ? | |
6975 | (u16) le64_to_cpu(*ppd->sdma_head_dma) : | |
6976 | (u16) qib_read_kreg_port(ppd, krp_senddmahead); | |
6977 | ||
6978 | swhead = ppd->sdma_descq_head; | |
6979 | swtail = ppd->sdma_descq_tail; | |
6980 | cnt = ppd->sdma_descq_cnt; | |
6981 | ||
6982 | if (swhead < swtail) | |
6983 | /* not wrapped */ | |
6984 | sane = (hwhead >= swhead) & (hwhead <= swtail); | |
6985 | else if (swhead > swtail) | |
6986 | /* wrapped around */ | |
6987 | sane = ((hwhead >= swhead) && (hwhead < cnt)) || | |
6988 | (hwhead <= swtail); | |
6989 | else | |
6990 | /* empty */ | |
6991 | sane = (hwhead == swhead); | |
6992 | ||
6993 | if (unlikely(!sane)) { | |
6994 | if (use_dmahead) { | |
6995 | /* try one more time, directly from the register */ | |
6996 | use_dmahead = 0; | |
6997 | goto retry; | |
6998 | } | |
6999 | /* proceed as if no progress */ | |
7000 | hwhead = swhead; | |
7001 | } | |
7002 | ||
7003 | return hwhead; | |
7004 | } | |
7005 | ||
7006 | static int qib_sdma_7322_busy(struct qib_pportdata *ppd) | |
7007 | { | |
7008 | u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus); | |
7009 | ||
7010 | return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) || | |
7011 | (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) || | |
7012 | !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) || | |
7013 | !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty)); | |
7014 | } | |
7015 | ||
7016 | /* | |
7017 | * Compute the amount of delay before sending the next packet if the | |
7018 | * port's send rate differs from the static rate set for the QP. | |
7019 | * The delay affects the next packet and the amount of the delay is | |
7020 | * based on the length of the this packet. | |
7021 | */ | |
7022 | static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen, | |
7023 | u8 srate, u8 vl) | |
7024 | { | |
7025 | u8 snd_mult = ppd->delay_mult; | |
7026 | u8 rcv_mult = ib_rate_to_delay[srate]; | |
7027 | u32 ret; | |
7028 | ||
7029 | ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0; | |
7030 | ||
7031 | /* Indicate VL15, else set the VL in the control word */ | |
7032 | if (vl == 15) | |
7033 | ret |= PBC_7322_VL15_SEND_CTRL; | |
7034 | else | |
7035 | ret |= vl << PBC_VL_NUM_LSB; | |
7036 | ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB; | |
7037 | ||
7038 | return ret; | |
7039 | } | |
7040 | ||
7041 | /* | |
7042 | * Enable the per-port VL15 send buffers for use. | |
7043 | * They follow the rest of the buffers, without a config parameter. | |
7044 | * This was in initregs, but that is done before the shadow | |
7045 | * is set up, and this has to be done after the shadow is | |
7046 | * set up. | |
7047 | */ | |
7048 | static void qib_7322_initvl15_bufs(struct qib_devdata *dd) | |
7049 | { | |
7050 | unsigned vl15bufs; | |
7051 | ||
7052 | vl15bufs = dd->piobcnt2k + dd->piobcnt4k; | |
7053 | qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS, | |
7054 | TXCHK_CHG_TYPE_KERN, NULL); | |
7055 | } | |
7056 | ||
7057 | static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd) | |
7058 | { | |
7059 | if (rcd->ctxt < NUM_IB_PORTS) { | |
7060 | if (rcd->dd->num_pports > 1) { | |
7061 | rcd->rcvegrcnt = KCTXT0_EGRCNT / 2; | |
7062 | rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0; | |
7063 | } else { | |
7064 | rcd->rcvegrcnt = KCTXT0_EGRCNT; | |
7065 | rcd->rcvegr_tid_base = 0; | |
7066 | } | |
7067 | } else { | |
7068 | rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt; | |
7069 | rcd->rcvegr_tid_base = KCTXT0_EGRCNT + | |
7070 | (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt; | |
7071 | } | |
7072 | } | |
7073 | ||
7074 | #define QTXSLEEPS 5000 | |
7075 | static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start, | |
7076 | u32 len, u32 which, struct qib_ctxtdata *rcd) | |
7077 | { | |
7078 | int i; | |
7079 | const int last = start + len - 1; | |
7080 | const int lastr = last / BITS_PER_LONG; | |
7081 | u32 sleeps = 0; | |
7082 | int wait = rcd != NULL; | |
7083 | unsigned long flags; | |
7084 | ||
7085 | while (wait) { | |
7086 | unsigned long shadow; | |
7087 | int cstart, previ = -1; | |
7088 | ||
7089 | /* | |
7090 | * when flipping from kernel to user, we can't change | |
7091 | * the checking type if the buffer is allocated to the | |
7092 | * driver. It's OK the other direction, because it's | |
7093 | * from close, and we have just disarm'ed all the | |
7094 | * buffers. All the kernel to kernel changes are also | |
7095 | * OK. | |
7096 | */ | |
7097 | for (cstart = start; cstart <= last; cstart++) { | |
7098 | i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT) | |
7099 | / BITS_PER_LONG; | |
7100 | if (i != previ) { | |
7101 | shadow = (unsigned long) | |
7102 | le64_to_cpu(dd->pioavailregs_dma[i]); | |
7103 | previ = i; | |
7104 | } | |
7105 | if (test_bit(((2 * cstart) + | |
7106 | QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT) | |
7107 | % BITS_PER_LONG, &shadow)) | |
7108 | break; | |
7109 | } | |
7110 | ||
7111 | if (cstart > last) | |
7112 | break; | |
7113 | ||
7114 | if (sleeps == QTXSLEEPS) | |
7115 | break; | |
7116 | /* make sure we see an updated copy next time around */ | |
7117 | sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | |
7118 | sleeps++; | |
a0a234d4 | 7119 | msleep(20); |
f931551b RC |
7120 | } |
7121 | ||
7122 | switch (which) { | |
7123 | case TXCHK_CHG_TYPE_DIS1: | |
7124 | /* | |
7125 | * disable checking on a range; used by diags; just | |
7126 | * one buffer, but still written generically | |
7127 | */ | |
7128 | for (i = start; i <= last; i++) | |
7129 | clear_bit(i, dd->cspec->sendchkenable); | |
7130 | break; | |
7131 | ||
7132 | case TXCHK_CHG_TYPE_ENAB1: | |
7133 | /* | |
7134 | * (re)enable checking on a range; used by diags; just | |
7135 | * one buffer, but still written generically; read | |
7136 | * scratch to be sure buffer actually triggered, not | |
7137 | * just flushed from processor. | |
7138 | */ | |
7139 | qib_read_kreg32(dd, kr_scratch); | |
7140 | for (i = start; i <= last; i++) | |
7141 | set_bit(i, dd->cspec->sendchkenable); | |
7142 | break; | |
7143 | ||
7144 | case TXCHK_CHG_TYPE_KERN: | |
7145 | /* usable by kernel */ | |
7146 | for (i = start; i <= last; i++) { | |
7147 | set_bit(i, dd->cspec->sendibchk); | |
7148 | clear_bit(i, dd->cspec->sendgrhchk); | |
7149 | } | |
7150 | spin_lock_irqsave(&dd->uctxt_lock, flags); | |
7151 | /* see if we need to raise avail update threshold */ | |
7152 | for (i = dd->first_user_ctxt; | |
7153 | dd->cspec->updthresh != dd->cspec->updthresh_dflt | |
7154 | && i < dd->cfgctxts; i++) | |
7155 | if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt && | |
7156 | ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1) | |
7157 | < dd->cspec->updthresh_dflt) | |
7158 | break; | |
7159 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | |
7160 | if (i == dd->cfgctxts) { | |
7161 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | |
7162 | dd->cspec->updthresh = dd->cspec->updthresh_dflt; | |
7163 | dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); | |
7164 | dd->sendctrl |= (dd->cspec->updthresh & | |
7165 | SYM_RMASK(SendCtrl, AvailUpdThld)) << | |
7166 | SYM_LSB(SendCtrl, AvailUpdThld); | |
7167 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | |
7168 | sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | |
7169 | } | |
7170 | break; | |
7171 | ||
7172 | case TXCHK_CHG_TYPE_USER: | |
7173 | /* for user process */ | |
7174 | for (i = start; i <= last; i++) { | |
7175 | clear_bit(i, dd->cspec->sendibchk); | |
7176 | set_bit(i, dd->cspec->sendgrhchk); | |
7177 | } | |
7178 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | |
7179 | if (rcd && rcd->subctxt_cnt && ((rcd->piocnt | |
7180 | / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) { | |
7181 | dd->cspec->updthresh = (rcd->piocnt / | |
7182 | rcd->subctxt_cnt) - 1; | |
7183 | dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); | |
7184 | dd->sendctrl |= (dd->cspec->updthresh & | |
7185 | SYM_RMASK(SendCtrl, AvailUpdThld)) | |
7186 | << SYM_LSB(SendCtrl, AvailUpdThld); | |
7187 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | |
7188 | sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | |
7189 | } else | |
7190 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | |
7191 | break; | |
7192 | ||
7193 | default: | |
7194 | break; | |
7195 | } | |
7196 | ||
7197 | for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i) | |
7198 | qib_write_kreg(dd, kr_sendcheckmask + i, | |
7199 | dd->cspec->sendchkenable[i]); | |
7200 | ||
7201 | for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) { | |
7202 | qib_write_kreg(dd, kr_sendgrhcheckmask + i, | |
7203 | dd->cspec->sendgrhchk[i]); | |
7204 | qib_write_kreg(dd, kr_sendibpktmask + i, | |
7205 | dd->cspec->sendibchk[i]); | |
7206 | } | |
7207 | ||
7208 | /* | |
7209 | * Be sure whatever we did was seen by the chip and acted upon, | |
7210 | * before we return. Mostly important for which >= 2. | |
7211 | */ | |
7212 | qib_read_kreg32(dd, kr_scratch); | |
7213 | } | |
7214 | ||
7215 | ||
7216 | /* useful for trigger analyzers, etc. */ | |
7217 | static void writescratch(struct qib_devdata *dd, u32 val) | |
7218 | { | |
7219 | qib_write_kreg(dd, kr_scratch, val); | |
7220 | } | |
7221 | ||
7222 | /* Dummy for now, use chip regs soon */ | |
7223 | static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum) | |
7224 | { | |
7225 | return -ENXIO; | |
7226 | } | |
7227 | ||
7228 | /** | |
7229 | * qib_init_iba7322_funcs - set up the chip-specific function pointers | |
7230 | * @dev: the pci_dev for qlogic_ib device | |
7231 | * @ent: pci_device_id struct for this dev | |
7232 | * | |
7233 | * Also allocates, inits, and returns the devdata struct for this | |
7234 | * device instance | |
7235 | * | |
7236 | * This is global, and is called directly at init to set up the | |
7237 | * chip-specific function pointers for later use. | |
7238 | */ | |
7239 | struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, | |
7240 | const struct pci_device_id *ent) | |
7241 | { | |
7242 | struct qib_devdata *dd; | |
7243 | int ret, i; | |
7244 | u32 tabsize, actual_cnt = 0; | |
7245 | ||
7246 | dd = qib_alloc_devdata(pdev, | |
7247 | NUM_IB_PORTS * sizeof(struct qib_pportdata) + | |
7248 | sizeof(struct qib_chip_specific) + | |
7249 | NUM_IB_PORTS * sizeof(struct qib_chippport_specific)); | |
7250 | if (IS_ERR(dd)) | |
7251 | goto bail; | |
7252 | ||
7253 | dd->f_bringup_serdes = qib_7322_bringup_serdes; | |
7254 | dd->f_cleanup = qib_setup_7322_cleanup; | |
7255 | dd->f_clear_tids = qib_7322_clear_tids; | |
7256 | dd->f_free_irq = qib_7322_free_irq; | |
7257 | dd->f_get_base_info = qib_7322_get_base_info; | |
7258 | dd->f_get_msgheader = qib_7322_get_msgheader; | |
7259 | dd->f_getsendbuf = qib_7322_getsendbuf; | |
7260 | dd->f_gpio_mod = gpio_7322_mod; | |
7261 | dd->f_eeprom_wen = qib_7322_eeprom_wen; | |
7262 | dd->f_hdrqempty = qib_7322_hdrqempty; | |
7263 | dd->f_ib_updown = qib_7322_ib_updown; | |
7264 | dd->f_init_ctxt = qib_7322_init_ctxt; | |
7265 | dd->f_initvl15_bufs = qib_7322_initvl15_bufs; | |
7266 | dd->f_intr_fallback = qib_7322_intr_fallback; | |
7267 | dd->f_late_initreg = qib_late_7322_initreg; | |
7268 | dd->f_setpbc_control = qib_7322_setpbc_control; | |
7269 | dd->f_portcntr = qib_portcntr_7322; | |
7270 | dd->f_put_tid = qib_7322_put_tid; | |
7271 | dd->f_quiet_serdes = qib_7322_mini_quiet_serdes; | |
7272 | dd->f_rcvctrl = rcvctrl_7322_mod; | |
7273 | dd->f_read_cntrs = qib_read_7322cntrs; | |
7274 | dd->f_read_portcntrs = qib_read_7322portcntrs; | |
7275 | dd->f_reset = qib_do_7322_reset; | |
7276 | dd->f_init_sdma_regs = init_sdma_7322_regs; | |
7277 | dd->f_sdma_busy = qib_sdma_7322_busy; | |
7278 | dd->f_sdma_gethead = qib_sdma_7322_gethead; | |
7279 | dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl; | |
7280 | dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt; | |
7281 | dd->f_sdma_update_tail = qib_sdma_update_7322_tail; | |
7282 | dd->f_sendctrl = sendctrl_7322_mod; | |
7283 | dd->f_set_armlaunch = qib_set_7322_armlaunch; | |
7284 | dd->f_set_cntr_sample = qib_set_cntr_7322_sample; | |
7285 | dd->f_iblink_state = qib_7322_iblink_state; | |
7286 | dd->f_ibphys_portstate = qib_7322_phys_portstate; | |
7287 | dd->f_get_ib_cfg = qib_7322_get_ib_cfg; | |
7288 | dd->f_set_ib_cfg = qib_7322_set_ib_cfg; | |
7289 | dd->f_set_ib_loopback = qib_7322_set_loopback; | |
7290 | dd->f_get_ib_table = qib_7322_get_ib_table; | |
7291 | dd->f_set_ib_table = qib_7322_set_ib_table; | |
7292 | dd->f_set_intr_state = qib_7322_set_intr_state; | |
7293 | dd->f_setextled = qib_setup_7322_setextled; | |
7294 | dd->f_txchk_change = qib_7322_txchk_change; | |
7295 | dd->f_update_usrhead = qib_update_7322_usrhead; | |
7296 | dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr; | |
7297 | dd->f_xgxs_reset = qib_7322_mini_pcs_reset; | |
7298 | dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up; | |
7299 | dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up; | |
7300 | dd->f_sdma_init_early = qib_7322_sdma_init_early; | |
7301 | dd->f_writescratch = writescratch; | |
7302 | dd->f_tempsense_rd = qib_7322_tempsense_rd; | |
8469ba39 MM |
7303 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
7304 | dd->f_notify_dca = qib_7322_notify_dca; | |
7305 | #endif | |
f931551b RC |
7306 | /* |
7307 | * Do remaining PCIe setup and save PCIe values in dd. | |
7308 | * Any error printing is already done by the init code. | |
7309 | * On return, we have the chip mapped, but chip registers | |
7310 | * are not set up until start of qib_init_7322_variables. | |
7311 | */ | |
7312 | ret = qib_pcie_ddinit(dd, pdev, ent); | |
7313 | if (ret < 0) | |
7314 | goto bail_free; | |
7315 | ||
7316 | /* initialize chip-specific variables */ | |
7317 | ret = qib_init_7322_variables(dd); | |
7318 | if (ret) | |
7319 | goto bail_cleanup; | |
7320 | ||
7321 | if (qib_mini_init || !dd->num_pports) | |
7322 | goto bail; | |
7323 | ||
7324 | /* | |
7325 | * Determine number of vectors we want; depends on port count | |
7326 | * and number of configured kernel receive queues actually used. | |
7327 | * Should also depend on whether sdma is enabled or not, but | |
7328 | * that's such a rare testing case it's not worth worrying about. | |
7329 | */ | |
7330 | tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table); | |
7331 | for (i = 0; i < tabsize; i++) | |
7332 | if ((i < ARRAY_SIZE(irq_table) && | |
7333 | irq_table[i].port <= dd->num_pports) || | |
7334 | (i >= ARRAY_SIZE(irq_table) && | |
7335 | dd->rcd[i - ARRAY_SIZE(irq_table)])) | |
7336 | actual_cnt++; | |
e67306a3 MM |
7337 | /* reduce by ctxt's < 2 */ |
7338 | if (qib_krcvq01_no_msi) | |
7339 | actual_cnt -= dd->num_pports; | |
7340 | ||
f931551b | 7341 | tabsize = actual_cnt; |
8469ba39 | 7342 | dd->cspec->msix_entries = kzalloc(tabsize * |
a778f3fd MM |
7343 | sizeof(struct qib_msix_entry), GFP_KERNEL); |
7344 | if (!dd->cspec->msix_entries) { | |
f931551b RC |
7345 | qib_dev_err(dd, "No memory for MSIx table\n"); |
7346 | tabsize = 0; | |
7347 | } | |
7348 | for (i = 0; i < tabsize; i++) | |
a778f3fd | 7349 | dd->cspec->msix_entries[i].msix.entry = i; |
f931551b RC |
7350 | |
7351 | if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries)) | |
7fac3301 MM |
7352 | qib_dev_err(dd, |
7353 | "Failed to setup PCIe or interrupts; continuing anyway\n"); | |
f931551b RC |
7354 | /* may be less than we wanted, if not enough available */ |
7355 | dd->cspec->num_msix_entries = tabsize; | |
7356 | ||
7357 | /* setup interrupt handler */ | |
7358 | qib_setup_7322_interrupt(dd, 1); | |
7359 | ||
7360 | /* clear diagctrl register, in case diags were running and crashed */ | |
7361 | qib_write_kreg(dd, kr_hwdiagctrl, 0); | |
8469ba39 MM |
7362 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
7363 | if (!dca_add_requester(&pdev->dev)) { | |
7364 | qib_devinfo(dd->pcidev, "DCA enabled\n"); | |
7365 | dd->flags |= QIB_DCA_ENABLED; | |
7366 | qib_setup_dca(dd); | |
7367 | } | |
7368 | #endif | |
f931551b RC |
7369 | goto bail; |
7370 | ||
7371 | bail_cleanup: | |
7372 | qib_pcie_ddcleanup(dd); | |
7373 | bail_free: | |
7374 | qib_free_devdata(dd); | |
7375 | dd = ERR_PTR(ret); | |
7376 | bail: | |
7377 | return dd; | |
7378 | } | |
7379 | ||
7380 | /* | |
7381 | * Set the table entry at the specified index from the table specifed. | |
7382 | * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first | |
7383 | * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR. | |
7384 | * 'idx' below addresses the correct entry, while its 4 LSBs select the | |
7385 | * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table. | |
7386 | */ | |
7387 | #define DDS_ENT_AMP_LSB 14 | |
7388 | #define DDS_ENT_MAIN_LSB 9 | |
7389 | #define DDS_ENT_POST_LSB 5 | |
7390 | #define DDS_ENT_PRE_XTRA_LSB 3 | |
7391 | #define DDS_ENT_PRE_LSB 0 | |
7392 | ||
7393 | /* | |
7394 | * Set one entry in the TxDDS table for spec'd port | |
7395 | * ridx picks one of the entries, while tp points | |
7396 | * to the appropriate table entry. | |
7397 | */ | |
7398 | static void set_txdds(struct qib_pportdata *ppd, int ridx, | |
7399 | const struct txdds_ent *tp) | |
7400 | { | |
7401 | struct qib_devdata *dd = ppd->dd; | |
7402 | u32 pack_ent; | |
7403 | int regidx; | |
7404 | ||
7405 | /* Get correct offset in chip-space, and in source table */ | |
7406 | regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx; | |
7407 | /* | |
7408 | * We do not use qib_write_kreg_port() because it was intended | |
7409 | * only for registers in the lower "port specific" pages. | |
7410 | * So do index calculation by hand. | |
7411 | */ | |
7412 | if (ppd->hw_pidx) | |
7413 | regidx += (dd->palign / sizeof(u64)); | |
7414 | ||
7415 | pack_ent = tp->amp << DDS_ENT_AMP_LSB; | |
7416 | pack_ent |= tp->main << DDS_ENT_MAIN_LSB; | |
7417 | pack_ent |= tp->pre << DDS_ENT_PRE_LSB; | |
7418 | pack_ent |= tp->post << DDS_ENT_POST_LSB; | |
7419 | qib_write_kreg(dd, regidx, pack_ent); | |
7420 | /* Prevent back-to-back writes by hitting scratch */ | |
7421 | qib_write_kreg(ppd->dd, kr_scratch, 0); | |
7422 | } | |
7423 | ||
7424 | static const struct vendor_txdds_ent vendor_txdds[] = { | |
7425 | { /* Amphenol 1m 30awg NoEq */ | |
7426 | { 0x41, 0x50, 0x48 }, "584470002 ", | |
7427 | { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 }, | |
7428 | }, | |
7429 | { /* Amphenol 3m 28awg NoEq */ | |
7430 | { 0x41, 0x50, 0x48 }, "584470004 ", | |
7431 | { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 }, | |
7432 | }, | |
7433 | { /* Finisar 3m OM2 Optical */ | |
7434 | { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL", | |
7435 | { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 }, | |
7436 | }, | |
7437 | { /* Finisar 30m OM2 Optical */ | |
7438 | { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL", | |
7439 | { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 }, | |
7440 | }, | |
7441 | { /* Finisar Default OM2 Optical */ | |
7442 | { 0x00, 0x90, 0x65 }, NULL, | |
7443 | { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 }, | |
7444 | }, | |
7445 | { /* Gore 1m 30awg NoEq */ | |
7446 | { 0x00, 0x21, 0x77 }, "QSN3300-1 ", | |
7447 | { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 }, | |
7448 | }, | |
7449 | { /* Gore 2m 30awg NoEq */ | |
7450 | { 0x00, 0x21, 0x77 }, "QSN3300-2 ", | |
7451 | { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 }, | |
7452 | }, | |
7453 | { /* Gore 1m 28awg NoEq */ | |
7454 | { 0x00, 0x21, 0x77 }, "QSN3800-1 ", | |
7455 | { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 }, | |
7456 | }, | |
7457 | { /* Gore 3m 28awg NoEq */ | |
7458 | { 0x00, 0x21, 0x77 }, "QSN3800-3 ", | |
7459 | { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 }, | |
7460 | }, | |
7461 | { /* Gore 5m 24awg Eq */ | |
7462 | { 0x00, 0x21, 0x77 }, "QSN7000-5 ", | |
7463 | { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 }, | |
7464 | }, | |
7465 | { /* Gore 7m 24awg Eq */ | |
7466 | { 0x00, 0x21, 0x77 }, "QSN7000-7 ", | |
7467 | { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 }, | |
7468 | }, | |
7469 | { /* Gore 5m 26awg Eq */ | |
7470 | { 0x00, 0x21, 0x77 }, "QSN7600-5 ", | |
7471 | { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 }, | |
7472 | }, | |
7473 | { /* Gore 7m 26awg Eq */ | |
7474 | { 0x00, 0x21, 0x77 }, "QSN7600-7 ", | |
7475 | { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 }, | |
7476 | }, | |
7477 | { /* Intersil 12m 24awg Active */ | |
7478 | { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224", | |
7479 | { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 }, | |
7480 | }, | |
7481 | { /* Intersil 10m 28awg Active */ | |
7482 | { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028", | |
7483 | { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 }, | |
7484 | }, | |
7485 | { /* Intersil 7m 30awg Active */ | |
7486 | { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730", | |
7487 | { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 }, | |
7488 | }, | |
7489 | { /* Intersil 5m 32awg Active */ | |
7490 | { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532", | |
7491 | { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 }, | |
7492 | }, | |
7493 | { /* Intersil Default Active */ | |
7494 | { 0x00, 0x30, 0xB4 }, NULL, | |
7495 | { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 }, | |
7496 | }, | |
7497 | { /* Luxtera 20m Active Optical */ | |
7498 | { 0x00, 0x25, 0x63 }, NULL, | |
7499 | { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 }, | |
7500 | }, | |
7501 | { /* Molex 1M Cu loopback */ | |
7502 | { 0x00, 0x09, 0x3A }, "74763-0025 ", | |
7503 | { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, | |
7504 | }, | |
7505 | { /* Molex 2m 28awg NoEq */ | |
7506 | { 0x00, 0x09, 0x3A }, "74757-2201 ", | |
7507 | { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 }, | |
7508 | }, | |
7509 | }; | |
7510 | ||
7511 | static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = { | |
7512 | /* amp, pre, main, post */ | |
7513 | { 2, 2, 15, 6 }, /* Loopback */ | |
7514 | { 0, 0, 0, 1 }, /* 2 dB */ | |
7515 | { 0, 0, 0, 2 }, /* 3 dB */ | |
7516 | { 0, 0, 0, 3 }, /* 4 dB */ | |
7517 | { 0, 0, 0, 4 }, /* 5 dB */ | |
7518 | { 0, 0, 0, 5 }, /* 6 dB */ | |
7519 | { 0, 0, 0, 6 }, /* 7 dB */ | |
7520 | { 0, 0, 0, 7 }, /* 8 dB */ | |
7521 | { 0, 0, 0, 8 }, /* 9 dB */ | |
7522 | { 0, 0, 0, 9 }, /* 10 dB */ | |
7523 | { 0, 0, 0, 10 }, /* 11 dB */ | |
7524 | { 0, 0, 0, 11 }, /* 12 dB */ | |
7525 | { 0, 0, 0, 12 }, /* 13 dB */ | |
7526 | { 0, 0, 0, 13 }, /* 14 dB */ | |
7527 | { 0, 0, 0, 14 }, /* 15 dB */ | |
7528 | { 0, 0, 0, 15 }, /* 16 dB */ | |
7529 | }; | |
7530 | ||
7531 | static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = { | |
7532 | /* amp, pre, main, post */ | |
7533 | { 2, 2, 15, 6 }, /* Loopback */ | |
7534 | { 0, 0, 0, 8 }, /* 2 dB */ | |
7535 | { 0, 0, 0, 8 }, /* 3 dB */ | |
7536 | { 0, 0, 0, 9 }, /* 4 dB */ | |
7537 | { 0, 0, 0, 9 }, /* 5 dB */ | |
7538 | { 0, 0, 0, 10 }, /* 6 dB */ | |
7539 | { 0, 0, 0, 10 }, /* 7 dB */ | |
7540 | { 0, 0, 0, 11 }, /* 8 dB */ | |
7541 | { 0, 0, 0, 11 }, /* 9 dB */ | |
7542 | { 0, 0, 0, 12 }, /* 10 dB */ | |
7543 | { 0, 0, 0, 12 }, /* 11 dB */ | |
7544 | { 0, 0, 0, 13 }, /* 12 dB */ | |
7545 | { 0, 0, 0, 13 }, /* 13 dB */ | |
7546 | { 0, 0, 0, 14 }, /* 14 dB */ | |
7547 | { 0, 0, 0, 14 }, /* 15 dB */ | |
7548 | { 0, 0, 0, 15 }, /* 16 dB */ | |
7549 | }; | |
7550 | ||
7551 | static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = { | |
7552 | /* amp, pre, main, post */ | |
7553 | { 2, 2, 15, 6 }, /* Loopback */ | |
a77fcf89 RC |
7554 | { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */ |
7555 | { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */ | |
f931551b RC |
7556 | { 0, 1, 0, 11 }, /* 4 dB */ |
7557 | { 0, 1, 0, 13 }, /* 5 dB */ | |
7558 | { 0, 1, 0, 15 }, /* 6 dB */ | |
7559 | { 0, 1, 3, 15 }, /* 7 dB */ | |
7560 | { 0, 1, 7, 15 }, /* 8 dB */ | |
7561 | { 0, 1, 7, 15 }, /* 9 dB */ | |
7562 | { 0, 1, 8, 15 }, /* 10 dB */ | |
7563 | { 0, 1, 9, 15 }, /* 11 dB */ | |
7564 | { 0, 1, 10, 15 }, /* 12 dB */ | |
7565 | { 0, 2, 6, 15 }, /* 13 dB */ | |
7566 | { 0, 2, 7, 15 }, /* 14 dB */ | |
7567 | { 0, 2, 8, 15 }, /* 15 dB */ | |
7568 | { 0, 2, 9, 15 }, /* 16 dB */ | |
7569 | }; | |
7570 | ||
a77fcf89 RC |
7571 | /* |
7572 | * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ. | |
7573 | * These are mostly used for mez cards going through connectors | |
7574 | * and backplane traces, but can be used to add other "unusual" | |
7575 | * table values as well. | |
7576 | */ | |
7577 | static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = { | |
7578 | /* amp, pre, main, post */ | |
7579 | { 0, 0, 0, 1 }, /* QMH7342 backplane settings */ | |
7580 | { 0, 0, 0, 1 }, /* QMH7342 backplane settings */ | |
7581 | { 0, 0, 0, 2 }, /* QMH7342 backplane settings */ | |
7582 | { 0, 0, 0, 2 }, /* QMH7342 backplane settings */ | |
7c7a416e RC |
7583 | { 0, 0, 0, 3 }, /* QMH7342 backplane settings */ |
7584 | { 0, 0, 0, 4 }, /* QMH7342 backplane settings */ | |
22baa407 MH |
7585 | { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */ |
7586 | { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */ | |
7587 | { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */ | |
7588 | { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */ | |
7589 | { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */ | |
7590 | { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */ | |
7591 | { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */ | |
7592 | { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */ | |
7593 | { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */ | |
7594 | { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */ | |
7595 | { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */ | |
7596 | { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */ | |
a77fcf89 RC |
7597 | }; |
7598 | ||
7599 | static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = { | |
7600 | /* amp, pre, main, post */ | |
7601 | { 0, 0, 0, 7 }, /* QMH7342 backplane settings */ | |
7602 | { 0, 0, 0, 7 }, /* QMH7342 backplane settings */ | |
7603 | { 0, 0, 0, 8 }, /* QMH7342 backplane settings */ | |
7604 | { 0, 0, 0, 8 }, /* QMH7342 backplane settings */ | |
7c7a416e RC |
7605 | { 0, 0, 0, 9 }, /* QMH7342 backplane settings */ |
7606 | { 0, 0, 0, 10 }, /* QMH7342 backplane settings */ | |
22baa407 MH |
7607 | { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */ |
7608 | { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */ | |
7609 | { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */ | |
7610 | { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */ | |
7611 | { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */ | |
7612 | { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */ | |
7613 | { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */ | |
7614 | { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */ | |
7615 | { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */ | |
7616 | { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */ | |
7617 | { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */ | |
7618 | { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */ | |
a77fcf89 RC |
7619 | }; |
7620 | ||
7621 | static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { | |
7622 | /* amp, pre, main, post */ | |
7623 | { 0, 1, 0, 4 }, /* QMH7342 backplane settings */ | |
7624 | { 0, 1, 0, 5 }, /* QMH7342 backplane settings */ | |
7625 | { 0, 1, 0, 6 }, /* QMH7342 backplane settings */ | |
7626 | { 0, 1, 0, 8 }, /* QMH7342 backplane settings */ | |
7c7a416e RC |
7627 | { 0, 1, 0, 10 }, /* QMH7342 backplane settings */ |
7628 | { 0, 1, 0, 12 }, /* QMH7342 backplane settings */ | |
22baa407 MH |
7629 | { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */ |
7630 | { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */ | |
7631 | { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */ | |
7632 | { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */ | |
7633 | { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */ | |
7634 | { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */ | |
7635 | { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */ | |
7636 | { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */ | |
7637 | { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */ | |
7638 | { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */ | |
7639 | { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */ | |
7640 | { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */ | |
a77fcf89 RC |
7641 | }; |
7642 | ||
e706203c MM |
7643 | static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = { |
7644 | /* amp, pre, main, post */ | |
7645 | { 0, 0, 0, 0 }, /* QME7342 mfg settings */ | |
7646 | { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */ | |
7647 | }; | |
7648 | ||
f931551b RC |
7649 | static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, |
7650 | unsigned atten) | |
7651 | { | |
7652 | /* | |
7653 | * The attenuation table starts at 2dB for entry 1, | |
7654 | * with entry 0 being the loopback entry. | |
7655 | */ | |
7656 | if (atten <= 2) | |
7657 | atten = 1; | |
7658 | else if (atten > TXDDS_TABLE_SZ) | |
7659 | atten = TXDDS_TABLE_SZ - 1; | |
7660 | else | |
7661 | atten--; | |
7662 | return txdds + atten; | |
7663 | } | |
7664 | ||
7665 | /* | |
a77fcf89 | 7666 | * if override is set, the module parameter txselect has a value |
f931551b RC |
7667 | * for this specific port, so use it, rather than our normal mechanism. |
7668 | */ | |
7669 | static void find_best_ent(struct qib_pportdata *ppd, | |
7670 | const struct txdds_ent **sdr_dds, | |
7671 | const struct txdds_ent **ddr_dds, | |
7672 | const struct txdds_ent **qdr_dds, int override) | |
7673 | { | |
7674 | struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache; | |
7675 | int idx; | |
7676 | ||
7677 | /* Search table of known cables */ | |
7678 | for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) { | |
7679 | const struct vendor_txdds_ent *v = vendor_txdds + idx; | |
7680 | ||
7681 | if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) && | |
7682 | (!v->partnum || | |
7683 | !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) { | |
7684 | *sdr_dds = &v->sdr; | |
7685 | *ddr_dds = &v->ddr; | |
7686 | *qdr_dds = &v->qdr; | |
7687 | return; | |
7688 | } | |
7689 | } | |
7690 | ||
dde05cbd MH |
7691 | /* Active cables don't have attenuation so we only set SERDES |
7692 | * settings to account for the attenuation of the board traces. */ | |
f931551b RC |
7693 | if (!override && QSFP_IS_ACTIVE(qd->tech)) { |
7694 | *sdr_dds = txdds_sdr + ppd->dd->board_atten; | |
7695 | *ddr_dds = txdds_ddr + ppd->dd->board_atten; | |
7696 | *qdr_dds = txdds_qdr + ppd->dd->board_atten; | |
7697 | return; | |
7698 | } | |
7699 | ||
7700 | if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] || | |
7701 | qd->atten[1])) { | |
7702 | *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]); | |
7703 | *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]); | |
7704 | *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]); | |
7705 | return; | |
a77fcf89 | 7706 | } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) { |
f931551b RC |
7707 | /* |
7708 | * If we have no (or incomplete) data from the cable | |
a77fcf89 RC |
7709 | * EEPROM, or no QSFP, or override is set, use the |
7710 | * module parameter value to index into the attentuation | |
7711 | * table. | |
f931551b | 7712 | */ |
a77fcf89 RC |
7713 | idx = ppd->cpspec->no_eep; |
7714 | *sdr_dds = &txdds_sdr[idx]; | |
7715 | *ddr_dds = &txdds_ddr[idx]; | |
7716 | *qdr_dds = &txdds_qdr[idx]; | |
7717 | } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) { | |
7718 | /* similar to above, but index into the "extra" table. */ | |
7719 | idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ; | |
7720 | *sdr_dds = &txdds_extra_sdr[idx]; | |
7721 | *ddr_dds = &txdds_extra_ddr[idx]; | |
7722 | *qdr_dds = &txdds_extra_qdr[idx]; | |
e706203c MM |
7723 | } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) && |
7724 | ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + | |
7725 | TXDDS_MFG_SZ)) { | |
7726 | idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); | |
7fac3301 | 7727 | pr_info("IB%u:%u use idx %u into txdds_mfg\n", |
e706203c MM |
7728 | ppd->dd->unit, ppd->port, idx); |
7729 | *sdr_dds = &txdds_extra_mfg[idx]; | |
7730 | *ddr_dds = &txdds_extra_mfg[idx]; | |
7731 | *qdr_dds = &txdds_extra_mfg[idx]; | |
a77fcf89 RC |
7732 | } else { |
7733 | /* this shouldn't happen, it's range checked */ | |
7734 | *sdr_dds = txdds_sdr + qib_long_atten; | |
7735 | *ddr_dds = txdds_ddr + qib_long_atten; | |
7736 | *qdr_dds = txdds_qdr + qib_long_atten; | |
f931551b RC |
7737 | } |
7738 | } | |
7739 | ||
7740 | static void init_txdds_table(struct qib_pportdata *ppd, int override) | |
7741 | { | |
7742 | const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds; | |
7743 | struct txdds_ent *dds; | |
7744 | int idx; | |
7745 | int single_ent = 0; | |
7746 | ||
a77fcf89 RC |
7747 | find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override); |
7748 | ||
7749 | /* for mez cards or override, use the selected value for all entries */ | |
7750 | if (!(ppd->dd->flags & QIB_HAS_QSFP) || override) | |
f931551b | 7751 | single_ent = 1; |
f931551b RC |
7752 | |
7753 | /* Fill in the first entry with the best entry found. */ | |
7754 | set_txdds(ppd, 0, sdr_dds); | |
7755 | set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds); | |
7756 | set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds); | |
a77fcf89 RC |
7757 | if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | |
7758 | QIBL_LINKACTIVE)) { | |
7759 | dds = (struct txdds_ent *)(ppd->link_speed_active == | |
7760 | QIB_IB_QDR ? qdr_dds : | |
7761 | (ppd->link_speed_active == | |
7762 | QIB_IB_DDR ? ddr_dds : sdr_dds)); | |
7763 | write_tx_serdes_param(ppd, dds); | |
7764 | } | |
f931551b RC |
7765 | |
7766 | /* Fill in the remaining entries with the default table values. */ | |
7767 | for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) { | |
7768 | set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx); | |
7769 | set_txdds(ppd, idx + TXDDS_TABLE_SZ, | |
7770 | single_ent ? ddr_dds : txdds_ddr + idx); | |
7771 | set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ, | |
7772 | single_ent ? qdr_dds : txdds_qdr + idx); | |
7773 | } | |
7774 | } | |
7775 | ||
7776 | #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl) | |
7777 | #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg) | |
7778 | #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy) | |
7779 | #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address) | |
7780 | #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data) | |
7781 | #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read) | |
7782 | #define AHB_TRANS_TRIES 10 | |
7783 | ||
7784 | /* | |
7785 | * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4, | |
7786 | * 5=subsystem which is why most calls have "chan + chan >> 1" | |
7787 | * for the channel argument. | |
7788 | */ | |
7789 | static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr, | |
7790 | u32 data, u32 mask) | |
7791 | { | |
7792 | u32 rd_data, wr_data, sz_mask; | |
7793 | u64 trans, acc, prev_acc; | |
7794 | u32 ret = 0xBAD0BAD; | |
7795 | int tries; | |
7796 | ||
7797 | prev_acc = qib_read_kreg64(dd, KR_AHB_ACC); | |
7798 | /* From this point on, make sure we return access */ | |
7799 | acc = (quad << 1) | 1; | |
7800 | qib_write_kreg(dd, KR_AHB_ACC, acc); | |
7801 | ||
7802 | for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) { | |
7803 | trans = qib_read_kreg64(dd, KR_AHB_TRANS); | |
7804 | if (trans & AHB_TRANS_RDY) | |
7805 | break; | |
7806 | } | |
7807 | if (tries >= AHB_TRANS_TRIES) { | |
7808 | qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES); | |
7809 | goto bail; | |
7810 | } | |
7811 | ||
7812 | /* If mask is not all 1s, we need to read, but different SerDes | |
7813 | * entities have different sizes | |
7814 | */ | |
7815 | sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1; | |
7816 | wr_data = data & mask & sz_mask; | |
7817 | if ((~mask & sz_mask) != 0) { | |
7818 | trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1); | |
7819 | qib_write_kreg(dd, KR_AHB_TRANS, trans); | |
7820 | ||
7821 | for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) { | |
7822 | trans = qib_read_kreg64(dd, KR_AHB_TRANS); | |
7823 | if (trans & AHB_TRANS_RDY) | |
7824 | break; | |
7825 | } | |
7826 | if (tries >= AHB_TRANS_TRIES) { | |
7827 | qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n", | |
7828 | AHB_TRANS_TRIES); | |
7829 | goto bail; | |
7830 | } | |
7831 | /* Re-read in case host split reads and read data first */ | |
7832 | trans = qib_read_kreg64(dd, KR_AHB_TRANS); | |
7833 | rd_data = (uint32_t)(trans >> AHB_DATA_LSB); | |
7834 | wr_data |= (rd_data & ~mask & sz_mask); | |
7835 | } | |
7836 | ||
7837 | /* If mask is not zero, we need to write. */ | |
7838 | if (mask & sz_mask) { | |
7839 | trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1); | |
7840 | trans |= ((uint64_t)wr_data << AHB_DATA_LSB); | |
7841 | trans |= AHB_WR; | |
7842 | qib_write_kreg(dd, KR_AHB_TRANS, trans); | |
7843 | ||
7844 | for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) { | |
7845 | trans = qib_read_kreg64(dd, KR_AHB_TRANS); | |
7846 | if (trans & AHB_TRANS_RDY) | |
7847 | break; | |
7848 | } | |
7849 | if (tries >= AHB_TRANS_TRIES) { | |
7850 | qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n", | |
7851 | AHB_TRANS_TRIES); | |
7852 | goto bail; | |
7853 | } | |
7854 | } | |
7855 | ret = wr_data; | |
7856 | bail: | |
7857 | qib_write_kreg(dd, KR_AHB_ACC, prev_acc); | |
7858 | return ret; | |
7859 | } | |
7860 | ||
7861 | static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data, | |
7862 | unsigned mask) | |
7863 | { | |
7864 | struct qib_devdata *dd = ppd->dd; | |
7865 | int chan; | |
7866 | u32 rbc; | |
7867 | ||
7868 | for (chan = 0; chan < SERDES_CHANS; ++chan) { | |
7869 | ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr, | |
7870 | data, mask); | |
7871 | rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | |
7872 | addr, 0, 0); | |
7873 | } | |
7874 | } | |
7875 | ||
a0a234d4 MM |
7876 | static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable) |
7877 | { | |
7878 | u64 data = qib_read_kreg_port(ppd, krp_serdesctrl); | |
31264484 MH |
7879 | u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN); |
7880 | ||
7881 | if (enable && !state) { | |
7fac3301 | 7882 | pr_info("IB%u:%u Turning LOS on\n", |
31264484 | 7883 | ppd->dd->unit, ppd->port); |
a0a234d4 | 7884 | data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN); |
31264484 | 7885 | } else if (!enable && state) { |
7fac3301 | 7886 | pr_info("IB%u:%u Turning LOS off\n", |
31264484 | 7887 | ppd->dd->unit, ppd->port); |
a0a234d4 | 7888 | data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN); |
31264484 | 7889 | } |
a0a234d4 MM |
7890 | qib_write_kreg_port(ppd, krp_serdesctrl, data); |
7891 | } | |
7892 | ||
f931551b RC |
7893 | static int serdes_7322_init(struct qib_pportdata *ppd) |
7894 | { | |
a0a234d4 MM |
7895 | int ret = 0; |
7896 | if (ppd->dd->cspec->r1) | |
7897 | ret = serdes_7322_init_old(ppd); | |
7898 | else | |
7899 | ret = serdes_7322_init_new(ppd); | |
7900 | return ret; | |
7901 | } | |
7902 | ||
7903 | static int serdes_7322_init_old(struct qib_pportdata *ppd) | |
7904 | { | |
f931551b RC |
7905 | u32 le_val; |
7906 | ||
7907 | /* | |
7908 | * Initialize the Tx DDS tables. Also done every QSFP event, | |
7909 | * for adapters with QSFP | |
7910 | */ | |
7911 | init_txdds_table(ppd, 0); | |
7912 | ||
a77fcf89 RC |
7913 | /* ensure no tx overrides from earlier driver loads */ |
7914 | qib_write_kreg_port(ppd, krp_tx_deemph_override, | |
7915 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
7916 | reset_tx_deemphasis_override)); | |
7917 | ||
f931551b RC |
7918 | /* Patch some SerDes defaults to "Better for IB" */ |
7919 | /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */ | |
7920 | ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9)); | |
7921 | ||
7922 | /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */ | |
7923 | ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11)); | |
7924 | /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */ | |
7925 | ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6)); | |
7926 | ||
7927 | /* May be overridden in qsfp_7322_event */ | |
7928 | le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT; | |
7929 | ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7)); | |
7930 | ||
7931 | /* enable LE1 adaptation for all but QME, which is disabled */ | |
7932 | le_val = IS_QME(ppd->dd) ? 0 : 1; | |
7933 | ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5)); | |
7934 | ||
7935 | /* Clear cmode-override, may be set from older driver */ | |
7936 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); | |
7937 | ||
7938 | /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */ | |
7939 | ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8)); | |
7940 | ||
7941 | /* setup LoS params; these are subsystem, so chan == 5 */ | |
7942 | /* LoS filter threshold_count on, ch 0-3, set to 8 */ | |
7943 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11)); | |
7944 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4)); | |
7945 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11)); | |
7946 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4)); | |
7947 | ||
7948 | /* LoS filter threshold_count off, ch 0-3, set to 4 */ | |
7949 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0)); | |
7950 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8)); | |
7951 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0)); | |
7952 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8)); | |
7953 | ||
7954 | /* LoS filter select enabled */ | |
7955 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15); | |
7956 | ||
7957 | /* LoS target data: SDR=4, DDR=2, QDR=1 */ | |
7958 | ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */ | |
7959 | ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ | |
7960 | ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ | |
7961 | ||
a0a234d4 | 7962 | serdes_7322_los_enable(ppd, 1); |
f931551b RC |
7963 | |
7964 | /* rxbistena; set 0 to avoid effects of it switch later */ | |
7965 | ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15); | |
7966 | ||
7967 | /* Configure 4 DFE taps, and only they adapt */ | |
7968 | ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0)); | |
7969 | ||
7970 | /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */ | |
7971 | le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac; | |
7972 | ibsd_wr_allchans(ppd, 21, le_val, 0xfffe); | |
7973 | ||
7974 | /* | |
7975 | * Set receive adaptation mode. SDR and DDR adaptation are | |
7976 | * always on, and QDR is initially enabled; later disabled. | |
7977 | */ | |
7978 | qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL); | |
7979 | qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL); | |
7980 | qib_write_kreg_port(ppd, krp_static_adapt_dis(2), | |
7981 | ppd->dd->cspec->r1 ? | |
7982 | QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN); | |
7983 | ppd->cpspec->qdr_dfe_on = 1; | |
7984 | ||
a77fcf89 | 7985 | /* FLoop LOS gate: PPM filter enabled */ |
f931551b RC |
7986 | ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10); |
7987 | ||
7988 | /* rx offset center enabled */ | |
7989 | ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4); | |
7990 | ||
7991 | if (!ppd->dd->cspec->r1) { | |
7992 | ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12); | |
7993 | ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8); | |
7994 | } | |
7995 | ||
7996 | /* Set the frequency loop bandwidth to 15 */ | |
7997 | ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5)); | |
7998 | ||
7999 | return 0; | |
8000 | } | |
8001 | ||
a0a234d4 MM |
8002 | static int serdes_7322_init_new(struct qib_pportdata *ppd) |
8003 | { | |
8482d5d1 | 8004 | unsigned long tend; |
a0a234d4 MM |
8005 | u32 le_val, rxcaldone; |
8006 | int chan, chan_done = (1 << SERDES_CHANS) - 1; | |
8007 | ||
a0a234d4 MM |
8008 | /* Clear cmode-override, may be set from older driver */ |
8009 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); | |
8010 | ||
8011 | /* ensure no tx overrides from earlier driver loads */ | |
8012 | qib_write_kreg_port(ppd, krp_tx_deemph_override, | |
8013 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8014 | reset_tx_deemphasis_override)); | |
8015 | ||
8016 | /* START OF LSI SUGGESTED SERDES BRINGUP */ | |
8017 | /* Reset - Calibration Setup */ | |
8018 | /* Stop DFE adaptaion */ | |
8019 | ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1)); | |
8020 | /* Disable LE1 */ | |
8021 | ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5)); | |
8022 | /* Disable autoadapt for LE1 */ | |
8023 | ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15)); | |
8024 | /* Disable LE2 */ | |
8025 | ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6)); | |
8026 | /* Disable VGA */ | |
8027 | ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0)); | |
8028 | /* Disable AFE Offset Cancel */ | |
8029 | ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12)); | |
8030 | /* Disable Timing Loop */ | |
8031 | ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3)); | |
8032 | /* Disable Frequency Loop */ | |
8033 | ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4)); | |
8034 | /* Disable Baseline Wander Correction */ | |
8035 | ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13)); | |
8036 | /* Disable RX Calibration */ | |
8037 | ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10)); | |
8038 | /* Disable RX Offset Calibration */ | |
8039 | ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4)); | |
8040 | /* Select BB CDR */ | |
8041 | ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15)); | |
8042 | /* CDR Step Size */ | |
8043 | ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8)); | |
8044 | /* Enable phase Calibration */ | |
8045 | ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5)); | |
8046 | /* DFE Bandwidth [2:14-12] */ | |
8047 | ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12)); | |
8048 | /* DFE Config (4 taps only) */ | |
8049 | ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0)); | |
8050 | /* Gain Loop Bandwidth */ | |
8051 | if (!ppd->dd->cspec->r1) { | |
8052 | ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12)); | |
8053 | ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8)); | |
8054 | } else { | |
8055 | ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11)); | |
8056 | } | |
8057 | /* Baseline Wander Correction Gain [13:4-0] (leave as default) */ | |
8058 | /* Baseline Wander Correction Gain [3:7-5] (leave as default) */ | |
8059 | /* Data Rate Select [5:7-6] (leave as default) */ | |
25985edc | 8060 | /* RX Parallel Word Width [3:10-8] (leave as default) */ |
a0a234d4 MM |
8061 | |
8062 | /* RX REST */ | |
8063 | /* Single- or Multi-channel reset */ | |
8064 | /* RX Analog reset */ | |
8065 | /* RX Digital reset */ | |
8066 | ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13)); | |
8067 | msleep(20); | |
8068 | /* RX Analog reset */ | |
8069 | ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14)); | |
8070 | msleep(20); | |
8071 | /* RX Digital reset */ | |
8072 | ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13)); | |
8073 | msleep(20); | |
8074 | ||
8075 | /* setup LoS params; these are subsystem, so chan == 5 */ | |
8076 | /* LoS filter threshold_count on, ch 0-3, set to 8 */ | |
8077 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11)); | |
8078 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4)); | |
8079 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11)); | |
8080 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4)); | |
8081 | ||
8082 | /* LoS filter threshold_count off, ch 0-3, set to 4 */ | |
8083 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0)); | |
8084 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8)); | |
8085 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0)); | |
8086 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8)); | |
8087 | ||
8088 | /* LoS filter select enabled */ | |
8089 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15); | |
8090 | ||
8091 | /* LoS target data: SDR=4, DDR=2, QDR=1 */ | |
8092 | ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */ | |
8093 | ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ | |
8094 | ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ | |
8095 | ||
8096 | /* Turn on LOS on initial SERDES init */ | |
8097 | serdes_7322_los_enable(ppd, 1); | |
8098 | /* FLoop LOS gate: PPM filter enabled */ | |
8099 | ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10); | |
8100 | ||
8101 | /* RX LATCH CALIBRATION */ | |
8102 | /* Enable Eyefinder Phase Calibration latch */ | |
8103 | ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0)); | |
8104 | /* Enable RX Offset Calibration latch */ | |
8105 | ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4)); | |
8106 | msleep(20); | |
8107 | /* Start Calibration */ | |
8108 | ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10)); | |
8482d5d1 MM |
8109 | tend = jiffies + msecs_to_jiffies(500); |
8110 | while (chan_done && !time_is_before_jiffies(tend)) { | |
a0a234d4 MM |
8111 | msleep(20); |
8112 | for (chan = 0; chan < SERDES_CHANS; ++chan) { | |
8113 | rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), | |
8114 | (chan + (chan >> 1)), | |
8115 | 25, 0, 0); | |
8116 | if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 && | |
8117 | (~chan_done & (1 << chan)) == 0) | |
8118 | chan_done &= ~(1 << chan); | |
8119 | } | |
8120 | } | |
8121 | if (chan_done) { | |
7fac3301 | 8122 | pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n", |
a0a234d4 MM |
8123 | IBSD(ppd->hw_pidx), chan_done); |
8124 | } else { | |
8125 | for (chan = 0; chan < SERDES_CHANS; ++chan) { | |
8126 | rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), | |
8127 | (chan + (chan >> 1)), | |
8128 | 25, 0, 0); | |
8129 | if ((~rxcaldone & (u32)BMASK(10, 10)) == 0) | |
7fac3301 MM |
8130 | pr_info("Serdes %d chan %d calibration failed\n", |
8131 | IBSD(ppd->hw_pidx), chan); | |
a0a234d4 MM |
8132 | } |
8133 | } | |
8134 | ||
8135 | /* Turn off Calibration */ | |
8136 | ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10)); | |
8137 | msleep(20); | |
8138 | ||
8139 | /* BRING RX UP */ | |
8140 | /* Set LE2 value (May be overridden in qsfp_7322_event) */ | |
8141 | le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT; | |
8142 | ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7)); | |
8143 | /* Set LE2 Loop bandwidth */ | |
8144 | ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5)); | |
8145 | /* Enable LE2 */ | |
8146 | ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6)); | |
8147 | msleep(20); | |
8148 | /* Enable H0 only */ | |
8149 | ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1)); | |
8150 | /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */ | |
8151 | le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac; | |
8152 | ibsd_wr_allchans(ppd, 21, le_val, 0xfffe); | |
8153 | /* Enable VGA */ | |
8154 | ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0)); | |
8155 | msleep(20); | |
8156 | /* Set Frequency Loop Bandwidth */ | |
f665acb3 | 8157 | ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5)); |
a0a234d4 MM |
8158 | /* Enable Frequency Loop */ |
8159 | ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4)); | |
8160 | /* Set Timing Loop Bandwidth */ | |
8161 | ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9)); | |
8162 | /* Enable Timing Loop */ | |
8163 | ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3)); | |
8164 | msleep(50); | |
8165 | /* Enable DFE | |
8166 | * Set receive adaptation mode. SDR and DDR adaptation are | |
8167 | * always on, and QDR is initially enabled; later disabled. | |
8168 | */ | |
8169 | qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL); | |
8170 | qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL); | |
8171 | qib_write_kreg_port(ppd, krp_static_adapt_dis(2), | |
8172 | ppd->dd->cspec->r1 ? | |
8173 | QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN); | |
8174 | ppd->cpspec->qdr_dfe_on = 1; | |
8175 | /* Disable LE1 */ | |
8176 | ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5)); | |
8177 | /* Disable auto adapt for LE1 */ | |
8178 | ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15)); | |
8179 | msleep(20); | |
8180 | /* Enable AFE Offset Cancel */ | |
8181 | ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12)); | |
8182 | /* Enable Baseline Wander Correction */ | |
8183 | ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13)); | |
8184 | /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */ | |
8185 | ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11)); | |
8186 | /* VGA output common mode */ | |
8187 | ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2)); | |
8188 | ||
dde05cbd MH |
8189 | /* |
8190 | * Initialize the Tx DDS tables. Also done every QSFP event, | |
8191 | * for adapters with QSFP | |
8192 | */ | |
8193 | init_txdds_table(ppd, 0); | |
8194 | ||
a0a234d4 MM |
8195 | return 0; |
8196 | } | |
8197 | ||
f931551b RC |
8198 | /* start adjust QMH serdes parameters */ |
8199 | ||
8200 | static void set_man_code(struct qib_pportdata *ppd, int chan, int code) | |
8201 | { | |
8202 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | |
8203 | 9, code << 9, 0x3f << 9); | |
8204 | } | |
8205 | ||
8206 | static void set_man_mode_h1(struct qib_pportdata *ppd, int chan, | |
8207 | int enable, u32 tapenable) | |
8208 | { | |
8209 | if (enable) | |
8210 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | |
8211 | 1, 3 << 10, 0x1f << 10); | |
8212 | else | |
8213 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | |
8214 | 1, 0, 0x1f << 10); | |
8215 | } | |
8216 | ||
8217 | /* Set clock to 1, 0, 1, 0 */ | |
8218 | static void clock_man(struct qib_pportdata *ppd, int chan) | |
8219 | { | |
8220 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | |
8221 | 4, 0x4000, 0x4000); | |
8222 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | |
8223 | 4, 0, 0x4000); | |
8224 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | |
8225 | 4, 0x4000, 0x4000); | |
8226 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | |
8227 | 4, 0, 0x4000); | |
8228 | } | |
8229 | ||
8230 | /* | |
8231 | * write the current Tx serdes pre,post,main,amp settings into the serdes. | |
8232 | * The caller must pass the settings appropriate for the current speed, | |
8233 | * or not care if they are correct for the current speed. | |
8234 | */ | |
8235 | static void write_tx_serdes_param(struct qib_pportdata *ppd, | |
8236 | struct txdds_ent *txdds) | |
8237 | { | |
8238 | u64 deemph; | |
8239 | ||
8240 | deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override); | |
8241 | /* field names for amp, main, post, pre, respectively */ | |
8242 | deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) | | |
8243 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) | | |
8244 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) | | |
8245 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena)); | |
a77fcf89 RC |
8246 | |
8247 | deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8248 | tx_override_deemphasis_select); | |
8249 | deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8250 | txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8251 | txampcntl_d2a); | |
8252 | deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8253 | txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8254 | txc0_ena); | |
8255 | deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8256 | txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8257 | txcp1_ena); | |
8258 | deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8259 | txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
f931551b RC |
8260 | txcn1_ena); |
8261 | qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph); | |
8262 | } | |
8263 | ||
8264 | /* | |
a77fcf89 RC |
8265 | * Set the parameters for mez cards on link bounce, so they are |
8266 | * always exactly what was requested. Similar logic to init_txdds | |
8267 | * but does just the serdes. | |
f931551b RC |
8268 | */ |
8269 | static void adj_tx_serdes(struct qib_pportdata *ppd) | |
8270 | { | |
a77fcf89 RC |
8271 | const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds; |
8272 | struct txdds_ent *dds; | |
f931551b | 8273 | |
a77fcf89 RC |
8274 | find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1); |
8275 | dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ? | |
8276 | qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ? | |
8277 | ddr_dds : sdr_dds)); | |
8278 | write_tx_serdes_param(ppd, dds); | |
f931551b RC |
8279 | } |
8280 | ||
8281 | /* set QDR forced value for H1, if needed */ | |
8282 | static void force_h1(struct qib_pportdata *ppd) | |
8283 | { | |
8284 | int chan; | |
8285 | ||
8286 | ppd->cpspec->qdr_reforce = 0; | |
8287 | if (!ppd->dd->cspec->r1) | |
8288 | return; | |
8289 | ||
8290 | for (chan = 0; chan < SERDES_CHANS; chan++) { | |
8291 | set_man_mode_h1(ppd, chan, 1, 0); | |
8292 | set_man_code(ppd, chan, ppd->cpspec->h1_val); | |
8293 | clock_man(ppd, chan); | |
8294 | set_man_mode_h1(ppd, chan, 0, 0); | |
8295 | } | |
8296 | } | |
8297 | ||
f931551b RC |
8298 | #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN) |
8299 | #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en) | |
8300 | ||
8301 | #define R_OPCODE_LSB 3 | |
8302 | #define R_OP_NOP 0 | |
8303 | #define R_OP_SHIFT 2 | |
8304 | #define R_OP_UPDATE 3 | |
8305 | #define R_TDI_LSB 2 | |
8306 | #define R_TDO_LSB 1 | |
8307 | #define R_RDY 1 | |
8308 | ||
8309 | static int qib_r_grab(struct qib_devdata *dd) | |
8310 | { | |
8311 | u64 val; | |
8312 | val = SJA_EN; | |
8313 | qib_write_kreg(dd, kr_r_access, val); | |
8314 | qib_read_kreg32(dd, kr_scratch); | |
8315 | return 0; | |
8316 | } | |
8317 | ||
8318 | /* qib_r_wait_for_rdy() not only waits for the ready bit, it | |
8319 | * returns the current state of R_TDO | |
8320 | */ | |
8321 | static int qib_r_wait_for_rdy(struct qib_devdata *dd) | |
8322 | { | |
8323 | u64 val; | |
8324 | int timeout; | |
8325 | for (timeout = 0; timeout < 100 ; ++timeout) { | |
8326 | val = qib_read_kreg32(dd, kr_r_access); | |
8327 | if (val & R_RDY) | |
8328 | return (val >> R_TDO_LSB) & 1; | |
8329 | } | |
8330 | return -1; | |
8331 | } | |
8332 | ||
8333 | static int qib_r_shift(struct qib_devdata *dd, int bisten, | |
8334 | int len, u8 *inp, u8 *outp) | |
8335 | { | |
8336 | u64 valbase, val; | |
8337 | int ret, pos; | |
8338 | ||
8339 | valbase = SJA_EN | (bisten << BISTEN_LSB) | | |
8340 | (R_OP_SHIFT << R_OPCODE_LSB); | |
8341 | ret = qib_r_wait_for_rdy(dd); | |
8342 | if (ret < 0) | |
8343 | goto bail; | |
8344 | for (pos = 0; pos < len; ++pos) { | |
8345 | val = valbase; | |
8346 | if (outp) { | |
8347 | outp[pos >> 3] &= ~(1 << (pos & 7)); | |
8348 | outp[pos >> 3] |= (ret << (pos & 7)); | |
8349 | } | |
8350 | if (inp) { | |
8351 | int tdi = inp[pos >> 3] >> (pos & 7); | |
8352 | val |= ((tdi & 1) << R_TDI_LSB); | |
8353 | } | |
8354 | qib_write_kreg(dd, kr_r_access, val); | |
8355 | qib_read_kreg32(dd, kr_scratch); | |
8356 | ret = qib_r_wait_for_rdy(dd); | |
8357 | if (ret < 0) | |
8358 | break; | |
8359 | } | |
8360 | /* Restore to NOP between operations. */ | |
8361 | val = SJA_EN | (bisten << BISTEN_LSB); | |
8362 | qib_write_kreg(dd, kr_r_access, val); | |
8363 | qib_read_kreg32(dd, kr_scratch); | |
8364 | ret = qib_r_wait_for_rdy(dd); | |
8365 | ||
8366 | if (ret >= 0) | |
8367 | ret = pos; | |
8368 | bail: | |
8369 | return ret; | |
8370 | } | |
8371 | ||
8372 | static int qib_r_update(struct qib_devdata *dd, int bisten) | |
8373 | { | |
8374 | u64 val; | |
8375 | int ret; | |
8376 | ||
8377 | val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB); | |
8378 | ret = qib_r_wait_for_rdy(dd); | |
8379 | if (ret >= 0) { | |
8380 | qib_write_kreg(dd, kr_r_access, val); | |
8381 | qib_read_kreg32(dd, kr_scratch); | |
8382 | } | |
8383 | return ret; | |
8384 | } | |
8385 | ||
8386 | #define BISTEN_PORT_SEL 15 | |
8387 | #define LEN_PORT_SEL 625 | |
8388 | #define BISTEN_AT 17 | |
8389 | #define LEN_AT 156 | |
8390 | #define BISTEN_ETM 16 | |
8391 | #define LEN_ETM 632 | |
8392 | ||
8393 | #define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE) | |
8394 | ||
8395 | /* these are common for all IB port use cases. */ | |
8396 | static u8 reset_at[BIT2BYTE(LEN_AT)] = { | |
8397 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8398 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, | |
8399 | }; | |
8400 | static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = { | |
8401 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8402 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8403 | 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e, | |
8404 | 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7, | |
8405 | 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70, | |
8406 | 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00, | |
8407 | 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8408 | 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, | |
8409 | }; | |
8410 | static u8 at[BIT2BYTE(LEN_AT)] = { | |
8411 | 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, | |
8412 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, | |
8413 | }; | |
8414 | ||
8415 | /* used for IB1 or IB2, only one in use */ | |
8416 | static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = { | |
8417 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8418 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8419 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8420 | 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00, | |
8421 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8422 | 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03, | |
8423 | 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00, | |
8424 | 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00, | |
8425 | }; | |
8426 | ||
8427 | /* used when both IB1 and IB2 are in use */ | |
8428 | static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = { | |
8429 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8430 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, | |
8431 | 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8432 | 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05, | |
8433 | 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, | |
8434 | 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07, | |
8435 | 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00, | |
8436 | 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, | |
8437 | }; | |
8438 | ||
8439 | /* used when only IB1 is in use */ | |
8440 | static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = { | |
8441 | 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13, | |
8442 | 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c, | |
8443 | 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, | |
8444 | 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, | |
8445 | 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32, | |
8446 | 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, | |
8447 | 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, | |
8448 | 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8449 | }; | |
8450 | ||
8451 | /* used when only IB2 is in use */ | |
8452 | static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = { | |
8453 | 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39, | |
8454 | 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32, | |
8455 | 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, | |
8456 | 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, | |
8457 | 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32, | |
8458 | 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, | |
8459 | 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, | |
8460 | 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, | |
8461 | }; | |
8462 | ||
8463 | /* used when both IB1 and IB2 are in use */ | |
8464 | static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = { | |
8465 | 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13, | |
8466 | 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c, | |
8467 | 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, | |
8468 | 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, | |
8469 | 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32, | |
8470 | 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a, | |
8471 | 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, | |
8472 | 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8473 | }; | |
8474 | ||
8475 | /* | |
8476 | * Do setup to properly handle IB link recovery; if port is zero, we | |
8477 | * are initializing to cover both ports; otherwise we are initializing | |
8478 | * to cover a single port card, or the port has reached INIT and we may | |
8479 | * need to switch coverage types. | |
8480 | */ | |
8481 | static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both) | |
8482 | { | |
8483 | u8 *portsel, *etm; | |
8484 | struct qib_devdata *dd = ppd->dd; | |
8485 | ||
8486 | if (!ppd->dd->cspec->r1) | |
8487 | return; | |
8488 | if (!both) { | |
8489 | dd->cspec->recovery_ports_initted++; | |
8490 | ppd->cpspec->recovery_init = 1; | |
8491 | } | |
8492 | if (!both && dd->cspec->recovery_ports_initted == 1) { | |
8493 | portsel = ppd->port == 1 ? portsel_port1 : portsel_port2; | |
8494 | etm = atetm_1port; | |
8495 | } else { | |
8496 | portsel = portsel_2port; | |
8497 | etm = atetm_2port; | |
8498 | } | |
8499 | ||
8500 | if (qib_r_grab(dd) < 0 || | |
8501 | qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 || | |
8502 | qib_r_update(dd, BISTEN_ETM) < 0 || | |
8503 | qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 || | |
8504 | qib_r_update(dd, BISTEN_AT) < 0 || | |
8505 | qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL, | |
8506 | portsel, NULL) < 0 || | |
8507 | qib_r_update(dd, BISTEN_PORT_SEL) < 0 || | |
8508 | qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 || | |
8509 | qib_r_update(dd, BISTEN_AT) < 0 || | |
8510 | qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 || | |
8511 | qib_r_update(dd, BISTEN_ETM) < 0) | |
8512 | qib_dev_err(dd, "Failed IB link recovery setup\n"); | |
8513 | } | |
8514 | ||
8515 | static void check_7322_rxe_status(struct qib_pportdata *ppd) | |
8516 | { | |
8517 | struct qib_devdata *dd = ppd->dd; | |
8518 | u64 fmask; | |
8519 | ||
8520 | if (dd->cspec->recovery_ports_initted != 1) | |
8521 | return; /* rest doesn't apply to dualport */ | |
8522 | qib_write_kreg(dd, kr_control, dd->control | | |
8523 | SYM_MASK(Control, FreezeMode)); | |
8524 | (void)qib_read_kreg64(dd, kr_scratch); | |
8525 | udelay(3); /* ibcreset asserted 400ns, be sure that's over */ | |
8526 | fmask = qib_read_kreg64(dd, kr_act_fmask); | |
8527 | if (!fmask) { | |
8528 | /* | |
8529 | * require a powercycle before we'll work again, and make | |
8530 | * sure we get no more interrupts, and don't turn off | |
8531 | * freeze. | |
8532 | */ | |
8533 | ppd->dd->cspec->stay_in_freeze = 1; | |
8534 | qib_7322_set_intr_state(ppd->dd, 0); | |
8535 | qib_write_kreg(dd, kr_fmask, 0ULL); | |
8536 | qib_dev_err(dd, "HCA unusable until powercycled\n"); | |
8537 | return; /* eventually reset */ | |
8538 | } | |
8539 | ||
8540 | qib_write_kreg(ppd->dd, kr_hwerrclear, | |
8541 | SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1)); | |
8542 | ||
8543 | /* don't do the full clear_freeze(), not needed for this */ | |
8544 | qib_write_kreg(dd, kr_control, dd->control); | |
8545 | qib_read_kreg32(dd, kr_scratch); | |
8546 | /* take IBC out of reset */ | |
8547 | if (ppd->link_speed_supported) { | |
8548 | ppd->cpspec->ibcctrl_a &= | |
8549 | ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn); | |
8550 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | |
8551 | ppd->cpspec->ibcctrl_a); | |
8552 | qib_read_kreg32(dd, kr_scratch); | |
8553 | if (ppd->lflags & QIBL_IB_LINK_DISABLED) | |
8554 | qib_set_ib_7322_lstate(ppd, 0, | |
8555 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | |
8556 | } | |
8557 | } |