IB/qib: Add qp_stats debug file
[deliverable/linux.git] / drivers / infiniband / hw / qib / qib_iba7322.c
CommitLineData
f931551b 1/*
1fb9fed6
MM
2 * Copyright (c) 2012 Intel Corporation. All rights reserved.
3 * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
f931551b
RC
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * This file contains all of the code that is specific to the
36 * InfiniPath 7322 chip
37 */
38
39#include <linux/interrupt.h>
40#include <linux/pci.h>
41#include <linux/delay.h>
42#include <linux/io.h>
43#include <linux/jiffies.h>
e4dd23d7 44#include <linux/module.h>
f931551b
RC
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_smi.h>
8469ba39
MM
47#ifdef CONFIG_INFINIBAND_QIB_DCA
48#include <linux/dca.h>
49#endif
f931551b
RC
50
51#include "qib.h"
52#include "qib_7322_regs.h"
53#include "qib_qsfp.h"
54
55#include "qib_mad.h"
1fb9fed6 56#include "qib_verbs.h"
f931551b 57
7fac3301
MM
58#undef pr_fmt
59#define pr_fmt(fmt) QIB_DRV_NAME " " fmt
60
f931551b
RC
61static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
62static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
63static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
64static irqreturn_t qib_7322intr(int irq, void *data);
65static irqreturn_t qib_7322bufavail(int irq, void *data);
66static irqreturn_t sdma_intr(int irq, void *data);
67static irqreturn_t sdma_idle_intr(int irq, void *data);
68static irqreturn_t sdma_progress_intr(int irq, void *data);
69static irqreturn_t sdma_cleanup_intr(int irq, void *data);
70static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
71 struct qib_ctxtdata *rcd);
72static u8 qib_7322_phys_portstate(u64);
73static u32 qib_7322_iblink_state(u64);
74static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
75 u16 linitcmd);
76static void force_h1(struct qib_pportdata *);
77static void adj_tx_serdes(struct qib_pportdata *);
78static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
79static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
80
81static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
82static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
a0a234d4
MM
83static void serdes_7322_los_enable(struct qib_pportdata *, int);
84static int serdes_7322_init_old(struct qib_pportdata *);
85static int serdes_7322_init_new(struct qib_pportdata *);
f931551b
RC
86
87#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
88
89/* LE2 serdes values for different cases */
90#define LE2_DEFAULT 5
91#define LE2_5m 4
92#define LE2_QME 0
93
94/* Below is special-purpose, so only really works for the IB SerDes blocks. */
95#define IBSD(hw_pidx) (hw_pidx + 2)
96
97/* these are variables for documentation and experimentation purposes */
98static const unsigned rcv_int_timeout = 375;
99static const unsigned rcv_int_count = 16;
100static const unsigned sdma_idle_cnt = 64;
101
102/* Time to stop altering Rx Equalization parameters, after link up. */
103#define RXEQ_DISABLE_MSECS 2500
104
105/*
106 * Number of VLs we are configured to use (to allow for more
107 * credits per vl, etc.)
108 */
109ushort qib_num_cfg_vls = 2;
110module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
111MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
112
113static ushort qib_chase = 1;
114module_param_named(chase, qib_chase, ushort, S_IRUGO);
115MODULE_PARM_DESC(chase, "Enable state chase handling");
116
117static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
118module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
119MODULE_PARM_DESC(long_attenuation, \
120 "attenuation cutoff (dB) for long copper cable setup");
121
122static ushort qib_singleport;
123module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
124MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
125
e67306a3
MM
126static ushort qib_krcvq01_no_msi;
127module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
128MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
129
0a43e117
MM
130/*
131 * Receive header queue sizes
132 */
133static unsigned qib_rcvhdrcnt;
134module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
135MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
136
137static unsigned qib_rcvhdrsize;
138module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
139MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
140
141static unsigned qib_rcvhdrentsize;
142module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
143MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
144
f931551b
RC
145#define MAX_ATTEN_LEN 64 /* plenty for any real system */
146/* for read back, default index is ~5m copper cable */
a77fcf89
RC
147static char txselect_list[MAX_ATTEN_LEN] = "10";
148static struct kparam_string kp_txselect = {
149 .string = txselect_list,
f931551b
RC
150 .maxlen = MAX_ATTEN_LEN
151};
a77fcf89
RC
152static int setup_txselect(const char *, struct kernel_param *);
153module_param_call(txselect, setup_txselect, param_get_string,
154 &kp_txselect, S_IWUSR | S_IRUGO);
155MODULE_PARM_DESC(txselect, \
156 "Tx serdes indices (for no QSFP or invalid QSFP data)");
f931551b
RC
157
158#define BOARD_QME7342 5
159#define BOARD_QMH7342 6
160#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
161 BOARD_QMH7342)
162#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
163 BOARD_QME7342)
164
165#define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
166
167#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
168
169#define MASK_ACROSS(lsb, msb) \
170 (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
171
172#define SYM_RMASK(regname, fldname) ((u64) \
173 QIB_7322_##regname##_##fldname##_RMASK)
174
175#define SYM_MASK(regname, fldname) ((u64) \
176 QIB_7322_##regname##_##fldname##_RMASK << \
177 QIB_7322_##regname##_##fldname##_LSB)
178
179#define SYM_FIELD(value, regname, fldname) ((u64) \
180 (((value) >> SYM_LSB(regname, fldname)) & \
181 SYM_RMASK(regname, fldname)))
182
183/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
184#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
185 (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
186
187#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
188#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
189#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
190#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
191#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
192/* Below because most, but not all, fields of IntMask have that full suffix */
193#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
194
195
196#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
197
198/*
199 * the size bits give us 2^N, in KB units. 0 marks as invalid,
200 * and 7 is reserved. We currently use only 2KB and 4KB
201 */
202#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
203#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
204#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
205#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
206
207#define SendIBSLIDAssignMask \
208 QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
209#define SendIBSLMCMask \
210 QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
211
212#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
213#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
214#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
215#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
216#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
217#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
218
219#define _QIB_GPIO_SDA_NUM 1
220#define _QIB_GPIO_SCL_NUM 0
221#define QIB_EEPROM_WEN_NUM 14
222#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
223
224/* HW counter clock is at 4nsec */
225#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
226
227/* full speed IB port 1 only */
228#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
229#define PORT_SPD_CAP_SHIFT 3
230
231/* full speed featuremask, both ports */
232#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
233
234/*
235 * This file contains almost all the chip-specific register information and
236 * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
237 */
238
239/* Use defines to tie machine-generated names to lower-case names */
240#define kr_contextcnt KREG_IDX(ContextCnt)
241#define kr_control KREG_IDX(Control)
242#define kr_counterregbase KREG_IDX(CntrRegBase)
243#define kr_errclear KREG_IDX(ErrClear)
244#define kr_errmask KREG_IDX(ErrMask)
245#define kr_errstatus KREG_IDX(ErrStatus)
246#define kr_extctrl KREG_IDX(EXTCtrl)
247#define kr_extstatus KREG_IDX(EXTStatus)
248#define kr_gpio_clear KREG_IDX(GPIOClear)
249#define kr_gpio_mask KREG_IDX(GPIOMask)
250#define kr_gpio_out KREG_IDX(GPIOOut)
251#define kr_gpio_status KREG_IDX(GPIOStatus)
252#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
253#define kr_debugportval KREG_IDX(DebugPortValueReg)
254#define kr_fmask KREG_IDX(feature_mask)
255#define kr_act_fmask KREG_IDX(active_feature_mask)
256#define kr_hwerrclear KREG_IDX(HwErrClear)
257#define kr_hwerrmask KREG_IDX(HwErrMask)
258#define kr_hwerrstatus KREG_IDX(HwErrStatus)
259#define kr_intclear KREG_IDX(IntClear)
260#define kr_intmask KREG_IDX(IntMask)
261#define kr_intredirect KREG_IDX(IntRedirect0)
262#define kr_intstatus KREG_IDX(IntStatus)
263#define kr_pagealign KREG_IDX(PageAlign)
264#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
265#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
266#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
267#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
268#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
269#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
270#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
271#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
272#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
273#define kr_revision KREG_IDX(Revision)
274#define kr_scratch KREG_IDX(Scratch)
275#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
276#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
277#define kr_sendctrl KREG_IDX(SendCtrl)
278#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
279#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
280#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
281#define kr_sendpiobufbase KREG_IDX(SendBufBase)
282#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
283#define kr_sendpiosize KREG_IDX(SendBufSize)
284#define kr_sendregbase KREG_IDX(SendRegBase)
285#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
286#define kr_userregbase KREG_IDX(UserRegBase)
287#define kr_intgranted KREG_IDX(Int_Granted)
288#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
289#define kr_intblocked KREG_IDX(IntBlocked)
290#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
291
292/*
293 * per-port kernel registers. Access only with qib_read_kreg_port()
294 * or qib_write_kreg_port()
295 */
296#define krp_errclear KREG_IBPORT_IDX(ErrClear)
297#define krp_errmask KREG_IBPORT_IDX(ErrMask)
298#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
299#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
300#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
301#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
302#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
303#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
304#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
305#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
306#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
307#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
308#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
309#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
310#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
311#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
312#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
313#define krp_psstart KREG_IBPORT_IDX(PSStart)
314#define krp_psstat KREG_IBPORT_IDX(PSStat)
315#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
316#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
317#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
318#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
319#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
320#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
321#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
322#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
323#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
324#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
325#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
326#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
327#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
328#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
329#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
330#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
331#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
332#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
333#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
334#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
335#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
336#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
337#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
338#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
339#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
340#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
341#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
342#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
343#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
344#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
345#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
346
347/*
b595076a 348 * Per-context kernel registers. Access only with qib_read_kreg_ctxt()
f931551b
RC
349 * or qib_write_kreg_ctxt()
350 */
351#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
352#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
353
354/*
355 * TID Flow table, per context. Reduces
356 * number of hdrq updates to one per flow (or on errors).
357 * context 0 and 1 share same memory, but have distinct
358 * addresses. Since for now, we never use expected sends
359 * on kernel contexts, we don't worry about that (we initialize
360 * those entries for ctxt 0/1 on driver load twice, for example).
361 */
362#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
363#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
364
365/* these are the error bits in the tid flows, and are W1C */
366#define TIDFLOW_ERRBITS ( \
367 (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
368 SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
369 (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
370 SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
371
372/* Most (not all) Counters are per-IBport.
373 * Requires LBIntCnt is at offset 0 in the group
374 */
375#define CREG_IDX(regname) \
376((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
377
378#define crp_badformat CREG_IDX(RxVersionErrCnt)
379#define crp_err_rlen CREG_IDX(RxLenErrCnt)
380#define crp_erricrc CREG_IDX(RxICRCErrCnt)
381#define crp_errlink CREG_IDX(RxLinkMalformCnt)
382#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
383#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
384#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
385#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
386#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
387#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
388#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
389#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
390#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
391#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
392#define crp_pktrcv CREG_IDX(RxDataPktCnt)
393#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
394#define crp_pktsend CREG_IDX(TxDataPktCnt)
395#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
396#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
397#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
398#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
399#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
400#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
401#define crp_rcvebp CREG_IDX(RxEBPCnt)
402#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
403#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
404#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
405#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
406#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
407#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
408#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
409#define crp_sendstall CREG_IDX(TxFlowStallCnt)
410#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
411#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
412#define crp_txlenerr CREG_IDX(TxLenErrCnt)
f931551b
RC
413#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
414#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
415#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
416#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
417#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
418#define crp_wordrcv CREG_IDX(RxDwordCnt)
419#define crp_wordsend CREG_IDX(TxDwordCnt)
420#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
421
422/* these are the (few) counters that are not port-specific */
423#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
424 QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
425#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
426#define cr_lbint CREG_DEVIDX(LBIntCnt)
427#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
428#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
429#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
430#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
431#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
432
433/* no chip register for # of IB ports supported, so define */
434#define NUM_IB_PORTS 2
435
436/* 1 VL15 buffer per hardware IB port, no register for this, so define */
437#define NUM_VL15_BUFS NUM_IB_PORTS
438
439/*
440 * context 0 and 1 are special, and there is no chip register that
441 * defines this value, so we have to define it here.
442 * These are all allocated to either 0 or 1 for single port
443 * hardware configuration, otherwise each gets half
444 */
445#define KCTXT0_EGRCNT 2048
446
447/* values for vl and port fields in PBC, 7322-specific */
448#define PBC_PORT_SEL_LSB 26
449#define PBC_PORT_SEL_RMASK 1
450#define PBC_VL_NUM_LSB 27
451#define PBC_VL_NUM_RMASK 7
452#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
453#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
454
455static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
456 [IB_RATE_2_5_GBPS] = 16,
457 [IB_RATE_5_GBPS] = 8,
458 [IB_RATE_10_GBPS] = 4,
459 [IB_RATE_20_GBPS] = 2,
460 [IB_RATE_30_GBPS] = 2,
461 [IB_RATE_40_GBPS] = 1
462};
463
464#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
465#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
466
467/* link training states, from IBC */
468#define IB_7322_LT_STATE_DISABLED 0x00
469#define IB_7322_LT_STATE_LINKUP 0x01
470#define IB_7322_LT_STATE_POLLACTIVE 0x02
471#define IB_7322_LT_STATE_POLLQUIET 0x03
472#define IB_7322_LT_STATE_SLEEPDELAY 0x04
473#define IB_7322_LT_STATE_SLEEPQUIET 0x05
474#define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
475#define IB_7322_LT_STATE_CFGRCVFCFG 0x09
476#define IB_7322_LT_STATE_CFGWAITRMT 0x0a
477#define IB_7322_LT_STATE_CFGIDLE 0x0b
478#define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
479#define IB_7322_LT_STATE_TXREVLANES 0x0d
480#define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
481#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
482#define IB_7322_LT_STATE_CFGENH 0x10
483#define IB_7322_LT_STATE_CFGTEST 0x11
31264484
MH
484#define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
485#define IB_7322_LT_STATE_CFGWAITENH 0x13
f931551b
RC
486
487/* link state machine states from IBC */
488#define IB_7322_L_STATE_DOWN 0x0
489#define IB_7322_L_STATE_INIT 0x1
490#define IB_7322_L_STATE_ARM 0x2
491#define IB_7322_L_STATE_ACTIVE 0x3
492#define IB_7322_L_STATE_ACT_DEFER 0x4
493
494static const u8 qib_7322_physportstate[0x20] = {
495 [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
496 [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
497 [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
498 [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
499 [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
500 [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
501 [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
502 [IB_7322_LT_STATE_CFGRCVFCFG] =
503 IB_PHYSPORTSTATE_CFG_TRAIN,
504 [IB_7322_LT_STATE_CFGWAITRMT] =
505 IB_PHYSPORTSTATE_CFG_TRAIN,
506 [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
507 [IB_7322_LT_STATE_RECOVERRETRAIN] =
508 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
509 [IB_7322_LT_STATE_RECOVERWAITRMT] =
510 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
511 [IB_7322_LT_STATE_RECOVERIDLE] =
512 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
513 [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
514 [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
31264484
MH
515 [IB_7322_LT_STATE_CFGWAITRMTTEST] =
516 IB_PHYSPORTSTATE_CFG_TRAIN,
517 [IB_7322_LT_STATE_CFGWAITENH] =
518 IB_PHYSPORTSTATE_CFG_WAIT_ENH,
f931551b
RC
519 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
520 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
521 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
522 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
523};
524
8469ba39
MM
525#ifdef CONFIG_INFINIBAND_QIB_DCA
526struct qib_irq_notify {
527 int rcv;
528 void *arg;
529 struct irq_affinity_notify notify;
530};
531#endif
532
f931551b
RC
533struct qib_chip_specific {
534 u64 __iomem *cregbase;
535 u64 *cntrs;
536 spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
537 spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
538 u64 main_int_mask; /* clear bits which have dedicated handlers */
539 u64 int_enable_mask; /* for per port interrupts in single port mode */
540 u64 errormask;
541 u64 hwerrmask;
542 u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
543 u64 gpio_mask; /* shadow the gpio mask register */
544 u64 extctrl; /* shadow the gpio output enable, etc... */
545 u32 ncntrs;
546 u32 nportcntrs;
547 u32 cntrnamelen;
548 u32 portcntrnamelen;
549 u32 numctxts;
550 u32 rcvegrcnt;
551 u32 updthresh; /* current AvailUpdThld */
552 u32 updthresh_dflt; /* default AvailUpdThld */
553 u32 r1;
554 int irq;
555 u32 num_msix_entries;
556 u32 sdmabufcnt;
557 u32 lastbuf_for_pio;
558 u32 stay_in_freeze;
559 u32 recovery_ports_initted;
8469ba39
MM
560#ifdef CONFIG_INFINIBAND_QIB_DCA
561 u32 dca_ctrl;
562 int rhdr_cpu[18];
563 int sdma_cpu[2];
564 u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
565#endif
a778f3fd 566 struct qib_msix_entry *msix_entries;
f931551b
RC
567 unsigned long *sendchkenable;
568 unsigned long *sendgrhchk;
569 unsigned long *sendibchk;
570 u32 rcvavail_timeout[18];
571 char emsgbuf[128]; /* for device error interrupt msg buffer */
572};
573
574/* Table of entries in "human readable" form Tx Emphasis. */
575struct txdds_ent {
576 u8 amp;
577 u8 pre;
578 u8 main;
579 u8 post;
580};
581
582struct vendor_txdds_ent {
583 u8 oui[QSFP_VOUI_LEN];
584 u8 *partnum;
585 struct txdds_ent sdr;
586 struct txdds_ent ddr;
587 struct txdds_ent qdr;
588};
589
590static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
591
592#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
7c7a416e 593#define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
e706203c 594#define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
f931551b
RC
595#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
596
597#define H1_FORCE_VAL 8
a77fcf89
RC
598#define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
599#define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
f931551b
RC
600
601/* The static and dynamic registers are paired, and the pairs indexed by spd */
602#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
603 + ((spd) * 2))
604
605#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
606#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
607#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
608#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
609#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
610
f931551b
RC
611struct qib_chippport_specific {
612 u64 __iomem *kpregbase;
613 u64 __iomem *cpregbase;
614 u64 *portcntrs;
615 struct qib_pportdata *ppd;
616 wait_queue_head_t autoneg_wait;
617 struct delayed_work autoneg_work;
618 struct delayed_work ipg_work;
619 struct timer_list chase_timer;
620 /*
621 * these 5 fields are used to establish deltas for IB symbol
622 * errors and linkrecovery errors. They can be reported on
623 * some chips during link negotiation prior to INIT, and with
624 * DDR when faking DDR negotiations with non-IBTA switches.
625 * The chip counters are adjusted at driver unload if there is
626 * a non-zero delta.
627 */
628 u64 ibdeltainprog;
629 u64 ibsymdelta;
630 u64 ibsymsnap;
631 u64 iblnkerrdelta;
632 u64 iblnkerrsnap;
633 u64 iblnkdownsnap;
634 u64 iblnkdowndelta;
635 u64 ibmalfdelta;
636 u64 ibmalfsnap;
637 u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
638 u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
8482d5d1
MM
639 unsigned long qdr_dfe_time;
640 unsigned long chase_end;
f931551b
RC
641 u32 autoneg_tries;
642 u32 recovery_init;
643 u32 qdr_dfe_on;
644 u32 qdr_reforce;
645 /*
646 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
647 * entry zero is unused, to simplify indexing
648 */
a77fcf89
RC
649 u8 h1_val;
650 u8 no_eep; /* txselect table index to use if no qsfp info */
f931551b
RC
651 u8 ipg_tries;
652 u8 ibmalfusesnap;
653 struct qib_qsfp_data qsfp_data;
654 char epmsgbuf[192]; /* for port error interrupt msg buffer */
655};
656
657static struct {
658 const char *name;
659 irq_handler_t handler;
660 int lsb;
661 int port; /* 0 if not port-specific, else port # */
8469ba39 662 int dca;
f931551b 663} irq_table[] = {
8469ba39 664 { "", qib_7322intr, -1, 0, 0 },
a778f3fd 665 { " (buf avail)", qib_7322bufavail,
8469ba39 666 SYM_LSB(IntStatus, SendBufAvail), 0, 0},
a778f3fd 667 { " (sdma 0)", sdma_intr,
8469ba39 668 SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
a778f3fd 669 { " (sdma 1)", sdma_intr,
8469ba39 670 SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
a778f3fd 671 { " (sdmaI 0)", sdma_idle_intr,
8469ba39 672 SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
a778f3fd 673 { " (sdmaI 1)", sdma_idle_intr,
8469ba39 674 SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
a778f3fd 675 { " (sdmaP 0)", sdma_progress_intr,
8469ba39 676 SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
a778f3fd 677 { " (sdmaP 1)", sdma_progress_intr,
8469ba39 678 SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
a778f3fd 679 { " (sdmaC 0)", sdma_cleanup_intr,
8469ba39 680 SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
a778f3fd 681 { " (sdmaC 1)", sdma_cleanup_intr,
8469ba39 682 SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
f931551b
RC
683};
684
8469ba39
MM
685#ifdef CONFIG_INFINIBAND_QIB_DCA
686
687static const struct dca_reg_map {
688 int shadow_inx;
689 int lsb;
690 u64 mask;
691 u16 regno;
692} dca_rcvhdr_reg_map[] = {
693 { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
694 ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
695 { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
696 ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
697 { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
698 ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
699 { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
700 ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
701 { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
702 ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
703 { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
704 ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
705 { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
706 ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
707 { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
708 ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
709 { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
710 ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
711 { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
712 ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
713 { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
714 ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
715 { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
716 ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
717 { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
718 ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
719 { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
720 ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
721 { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
722 ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
723 { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
724 ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
725 { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
726 ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
727 { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
728 ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
729};
730#endif
731
f931551b
RC
732/* ibcctrl bits */
733#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
734/* cycle through TS1/TS2 till OK */
735#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
736/* wait for TS1, then go on */
737#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
738#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
739
740#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
741#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
742#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
743
744#define BLOB_7322_IBCHG 0x101
745
746static inline void qib_write_kreg(const struct qib_devdata *dd,
747 const u32 regno, u64 value);
748static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
749static void write_7322_initregs(struct qib_devdata *);
750static void write_7322_init_portregs(struct qib_pportdata *);
751static void setup_7322_link_recovery(struct qib_pportdata *, u32);
752static void check_7322_rxe_status(struct qib_pportdata *);
753static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
8469ba39
MM
754#ifdef CONFIG_INFINIBAND_QIB_DCA
755static void qib_setup_dca(struct qib_devdata *dd);
756static void setup_dca_notifier(struct qib_devdata *dd,
757 struct qib_msix_entry *m);
758static void reset_dca_notifier(struct qib_devdata *dd,
759 struct qib_msix_entry *m);
760#endif
f931551b
RC
761
762/**
763 * qib_read_ureg32 - read 32-bit virtualized per-context register
764 * @dd: device
765 * @regno: register number
766 * @ctxt: context number
767 *
768 * Return the contents of a register that is virtualized to be per context.
769 * Returns -1 on errors (not distinguishable from valid contents at
770 * runtime; we may add a separate error variable at some point).
771 */
772static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
773 enum qib_ureg regno, int ctxt)
774{
775 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
776 return 0;
777 return readl(regno + (u64 __iomem *)(
778 (dd->ureg_align * ctxt) + (dd->userbase ?
779 (char __iomem *)dd->userbase :
780 (char __iomem *)dd->kregbase + dd->uregbase)));
781}
782
783/**
784 * qib_read_ureg - read virtualized per-context register
785 * @dd: device
786 * @regno: register number
787 * @ctxt: context number
788 *
789 * Return the contents of a register that is virtualized to be per context.
790 * Returns -1 on errors (not distinguishable from valid contents at
791 * runtime; we may add a separate error variable at some point).
792 */
793static inline u64 qib_read_ureg(const struct qib_devdata *dd,
794 enum qib_ureg regno, int ctxt)
795{
796
797 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
798 return 0;
799 return readq(regno + (u64 __iomem *)(
800 (dd->ureg_align * ctxt) + (dd->userbase ?
801 (char __iomem *)dd->userbase :
802 (char __iomem *)dd->kregbase + dd->uregbase)));
803}
804
805/**
806 * qib_write_ureg - write virtualized per-context register
807 * @dd: device
808 * @regno: register number
809 * @value: value
810 * @ctxt: context
811 *
812 * Write the contents of a register that is virtualized to be per context.
813 */
814static inline void qib_write_ureg(const struct qib_devdata *dd,
815 enum qib_ureg regno, u64 value, int ctxt)
816{
817 u64 __iomem *ubase;
818 if (dd->userbase)
819 ubase = (u64 __iomem *)
820 ((char __iomem *) dd->userbase +
821 dd->ureg_align * ctxt);
822 else
823 ubase = (u64 __iomem *)
824 (dd->uregbase +
825 (char __iomem *) dd->kregbase +
826 dd->ureg_align * ctxt);
827
828 if (dd->kregbase && (dd->flags & QIB_PRESENT))
829 writeq(value, &ubase[regno]);
830}
831
832static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
833 const u32 regno)
834{
835 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
836 return -1;
837 return readl((u32 __iomem *) &dd->kregbase[regno]);
838}
839
840static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
841 const u32 regno)
842{
843 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
844 return -1;
845 return readq(&dd->kregbase[regno]);
846}
847
848static inline void qib_write_kreg(const struct qib_devdata *dd,
849 const u32 regno, u64 value)
850{
851 if (dd->kregbase && (dd->flags & QIB_PRESENT))
852 writeq(value, &dd->kregbase[regno]);
853}
854
855/*
856 * not many sanity checks for the port-specific kernel register routines,
857 * since they are only used when it's known to be safe.
858*/
859static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
860 const u16 regno)
861{
862 if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
863 return 0ULL;
864 return readq(&ppd->cpspec->kpregbase[regno]);
865}
866
867static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
868 const u16 regno, u64 value)
869{
870 if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
871 (ppd->dd->flags & QIB_PRESENT))
872 writeq(value, &ppd->cpspec->kpregbase[regno]);
873}
874
875/**
876 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
877 * @dd: the qlogic_ib device
878 * @regno: the register number to write
879 * @ctxt: the context containing the register
880 * @value: the value to write
881 */
882static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
883 const u16 regno, unsigned ctxt,
884 u64 value)
885{
886 qib_write_kreg(dd, regno + ctxt, value);
887}
888
889static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
890{
891 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
892 return 0;
893 return readq(&dd->cspec->cregbase[regno]);
894
895
896}
897
898static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
899{
900 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
901 return 0;
902 return readl(&dd->cspec->cregbase[regno]);
903
904
905}
906
907static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
908 u16 regno, u64 value)
909{
910 if (ppd->cpspec && ppd->cpspec->cpregbase &&
911 (ppd->dd->flags & QIB_PRESENT))
912 writeq(value, &ppd->cpspec->cpregbase[regno]);
913}
914
915static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
916 u16 regno)
917{
918 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
919 !(ppd->dd->flags & QIB_PRESENT))
920 return 0;
921 return readq(&ppd->cpspec->cpregbase[regno]);
922}
923
924static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
925 u16 regno)
926{
927 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
928 !(ppd->dd->flags & QIB_PRESENT))
929 return 0;
930 return readl(&ppd->cpspec->cpregbase[regno]);
931}
932
933/* bits in Control register */
934#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
935#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
936
937/* bits in general interrupt regs */
938#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
939#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
940#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
941#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
942#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
943#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
944#define QIB_I_C_ERROR INT_MASK(Err)
945
946#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
947#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
948#define QIB_I_GPIO INT_MASK(AssertGPIO)
949#define QIB_I_P_SDMAINT(pidx) \
950 (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
951 INT_MASK_P(SDmaProgress, pidx) | \
952 INT_MASK_PM(SDmaCleanupDone, pidx))
953
954/* Interrupt bits that are "per port" */
955#define QIB_I_P_BITSEXTANT(pidx) \
956 (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
957 INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
958 INT_MASK_P(SDmaProgress, pidx) | \
959 INT_MASK_PM(SDmaCleanupDone, pidx))
960
961/* Interrupt bits that are common to a device */
962/* currently unused: QIB_I_SPIOSENT */
963#define QIB_I_C_BITSEXTANT \
964 (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
965 QIB_I_SPIOSENT | \
966 QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
967
968#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
969 QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
970
971/*
972 * Error bits that are "per port".
973 */
974#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
975#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
976#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
977#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
978#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
979#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
980#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
981#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
982#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
983#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
984#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
985#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
986#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
987#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
988#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
989#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
990#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
991#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
992#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
993#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
994#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
995#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
996#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
997#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
998#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
999#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
1000#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
1001#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
1002
1003#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
1004#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
1005#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
1006#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
1007#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
1008#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
1009#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
1010#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
1011#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
1012#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
1013#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
1014
1015/* Error bits that are common to a device */
1016#define QIB_E_RESET ERR_MASK(ResetNegated)
1017#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
1018#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
1019
1020
1021/*
1022 * Per chip (rather than per-port) errors. Most either do
1023 * nothing but trigger a print (because they self-recover, or
1024 * always occur in tandem with other errors that handle the
1025 * issue), or because they indicate errors with no recovery,
1026 * but we want to know that they happened.
1027 */
1028#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
1029#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
1030#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
1031#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
1032#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
1033#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
1034#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
1035#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
1036
1037/* SDMA chip errors (not per port)
1038 * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
1039 * the SDMAHALT error immediately, so we just print the dup error via the
1040 * E_AUTO mechanism. This is true of most of the per-port fatal errors
1041 * as well, but since this is port-independent, by definition, it's
1042 * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
1043 * packet send errors, and so are handled in the same manner as other
1044 * per-packet errors.
1045 */
1046#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
1047#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
1048#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
1049
1050/*
1051 * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
1052 * it is used to print "common" packet errors.
1053 */
1054#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
1055 QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
1056 QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
1057 QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1058 QIB_E_P_REBP)
1059
1060/* Error Bits that Packet-related (Receive, per-port) */
1061#define QIB_E_P_RPKTERRS (\
1062 QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
1063 QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
1064 QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
1065 QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
1066 QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
1067 QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1068
1069/*
1070 * Error bits that are Send-related (per port)
1071 * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
1072 * All of these potentially need to have a buffer disarmed
1073 */
1074#define QIB_E_P_SPKTERRS (\
1075 QIB_E_P_SUNEXP_PKTNUM |\
1076 QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1077 QIB_E_P_SMAXPKTLEN |\
1078 QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1079 QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1080 QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1081
1082#define QIB_E_SPKTERRS ( \
1083 QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1084 ERR_MASK_N(SendUnsupportedVLErr) | \
1085 QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1086
1087#define QIB_E_P_SDMAERRS ( \
1088 QIB_E_P_SDMAHALT | \
1089 QIB_E_P_SDMADESCADDRMISALIGN | \
1090 QIB_E_P_SDMAUNEXPDATA | \
1091 QIB_E_P_SDMAMISSINGDW | \
1092 QIB_E_P_SDMADWEN | \
1093 QIB_E_P_SDMARPYTAG | \
1094 QIB_E_P_SDMA1STDESC | \
1095 QIB_E_P_SDMABASE | \
1096 QIB_E_P_SDMATAILOUTOFBOUND | \
1097 QIB_E_P_SDMAOUTOFBOUND | \
1098 QIB_E_P_SDMAGENMISMATCH)
1099
1100/*
1101 * This sets some bits more than once, but makes it more obvious which
1102 * bits are not handled under other categories, and the repeat definition
1103 * is not a problem.
1104 */
1105#define QIB_E_P_BITSEXTANT ( \
1106 QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1107 QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1108 QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1109 QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1110 )
1111
1112/*
1113 * These are errors that can occur when the link
1114 * changes state while a packet is being sent or received. This doesn't
1115 * cover things like EBP or VCRC that can be the result of a sending
1116 * having the link change state, so we receive a "known bad" packet.
1117 * All of these are "per port", so renamed:
1118 */
1119#define QIB_E_P_LINK_PKTERRS (\
1120 QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1121 QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1122 QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1123 QIB_E_P_RUNEXPCHAR)
1124
1125/*
1126 * This sets some bits more than once, but makes it more obvious which
1127 * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1128 * and the repeat definition is not a problem.
1129 */
1130#define QIB_E_C_BITSEXTANT (\
1131 QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1132 QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1133 QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1134
1135/* Likewise Neuter E_SPKT_ERRS_IGNORE */
1136#define E_SPKT_ERRS_IGNORE 0
1137
1138#define QIB_EXTS_MEMBIST_DISABLED \
1139 SYM_MASK(EXTStatus, MemBISTDisabled)
1140#define QIB_EXTS_MEMBIST_ENDTEST \
1141 SYM_MASK(EXTStatus, MemBISTEndTest)
1142
1143#define QIB_E_SPIOARMLAUNCH \
1144 ERR_MASK(SendArmLaunchErr)
1145
1146#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1147#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1148
1149/*
1150 * IBTA_1_2 is set when multiple speeds are enabled (normal),
1151 * and also if forced QDR (only QDR enabled). It's enabled for the
1152 * forced QDR case so that scrambling will be enabled by the TS3
1153 * exchange, when supported by both sides of the link.
1154 */
1155#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1156#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1157#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1158#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1159#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1160#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1161 SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1162#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1163
1164#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1165#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1166
1167#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1168#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1169#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1170
1171#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1172#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1173#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1174 SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1175#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1176 SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1177#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1178
1179#define IBA7322_REDIRECT_VEC_PER_REG 12
1180
1181#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1182#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1183#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1184#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1185#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1186
1187#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1188
1189#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
e67306a3 1190 .msg = #fldname , .sz = sizeof(#fldname) }
f931551b 1191#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
e67306a3 1192 fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
f931551b
RC
1193static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1194 HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1195 HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1196 HWE_AUTO(PCIESerdesPClkNotDetect),
1197 HWE_AUTO(PowerOnBISTFailed),
1198 HWE_AUTO(TempsenseTholdReached),
1199 HWE_AUTO(MemoryErr),
1200 HWE_AUTO(PCIeBusParityErr),
1201 HWE_AUTO(PcieCplTimeout),
1202 HWE_AUTO(PciePoisonedTLP),
1203 HWE_AUTO_P(SDmaMemReadErr, 1),
1204 HWE_AUTO_P(SDmaMemReadErr, 0),
1205 HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
b9e03e04 1206 HWE_AUTO_P(IBCBusToSPCParityErr, 1),
f931551b 1207 HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
b9e03e04 1208 HWE_AUTO(statusValidNoEop),
f931551b 1209 HWE_AUTO(LATriggered),
e67306a3 1210 { .mask = 0, .sz = 0 }
f931551b
RC
1211};
1212
1213#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
e67306a3 1214 .msg = #fldname, .sz = sizeof(#fldname) }
f931551b 1215#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
e67306a3 1216 .msg = #fldname, .sz = sizeof(#fldname) }
f931551b 1217static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
e67306a3
MM
1218 E_AUTO(RcvEgrFullErr),
1219 E_AUTO(RcvHdrFullErr),
f931551b
RC
1220 E_AUTO(ResetNegated),
1221 E_AUTO(HardwareErr),
1222 E_AUTO(InvalidAddrErr),
1223 E_AUTO(SDmaVL15Err),
1224 E_AUTO(SBufVL15MisUseErr),
1225 E_AUTO(InvalidEEPCmd),
1226 E_AUTO(RcvContextShareErr),
1227 E_AUTO(SendVLMismatchErr),
1228 E_AUTO(SendArmLaunchErr),
1229 E_AUTO(SendSpecialTriggerErr),
1230 E_AUTO(SDmaWrongPortErr),
1231 E_AUTO(SDmaBufMaskDuplicateErr),
e67306a3 1232 { .mask = 0, .sz = 0 }
f931551b
RC
1233};
1234
1235static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
1236 E_P_AUTO(IBStatusChanged),
1237 E_P_AUTO(SHeadersErr),
1238 E_P_AUTO(VL15BufMisuseErr),
1239 /*
1240 * SDmaHaltErr is not really an error, make it clearer;
1241 */
e67306a3
MM
1242 {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1243 .sz = 11},
f931551b
RC
1244 E_P_AUTO(SDmaDescAddrMisalignErr),
1245 E_P_AUTO(SDmaUnexpDataErr),
1246 E_P_AUTO(SDmaMissingDwErr),
1247 E_P_AUTO(SDmaDwEnErr),
1248 E_P_AUTO(SDmaRpyTagErr),
1249 E_P_AUTO(SDma1stDescErr),
1250 E_P_AUTO(SDmaBaseErr),
1251 E_P_AUTO(SDmaTailOutOfBoundErr),
1252 E_P_AUTO(SDmaOutOfBoundErr),
1253 E_P_AUTO(SDmaGenMismatchErr),
1254 E_P_AUTO(SendBufMisuseErr),
1255 E_P_AUTO(SendUnsupportedVLErr),
1256 E_P_AUTO(SendUnexpectedPktNumErr),
1257 E_P_AUTO(SendDroppedDataPktErr),
1258 E_P_AUTO(SendDroppedSmpPktErr),
1259 E_P_AUTO(SendPktLenErr),
1260 E_P_AUTO(SendUnderRunErr),
1261 E_P_AUTO(SendMaxPktLenErr),
1262 E_P_AUTO(SendMinPktLenErr),
1263 E_P_AUTO(RcvIBLostLinkErr),
1264 E_P_AUTO(RcvHdrErr),
1265 E_P_AUTO(RcvHdrLenErr),
1266 E_P_AUTO(RcvBadTidErr),
1267 E_P_AUTO(RcvBadVersionErr),
1268 E_P_AUTO(RcvIBFlowErr),
1269 E_P_AUTO(RcvEBPErr),
1270 E_P_AUTO(RcvUnsupportedVLErr),
1271 E_P_AUTO(RcvUnexpectedCharErr),
1272 E_P_AUTO(RcvShortPktLenErr),
1273 E_P_AUTO(RcvLongPktLenErr),
1274 E_P_AUTO(RcvMaxPktLenErr),
1275 E_P_AUTO(RcvMinPktLenErr),
1276 E_P_AUTO(RcvICRCErr),
1277 E_P_AUTO(RcvVCRCErr),
1278 E_P_AUTO(RcvFormatErr),
e67306a3 1279 { .mask = 0, .sz = 0 }
f931551b
RC
1280};
1281
1282/*
1283 * Below generates "auto-message" for interrupts not specific to any port or
1284 * context
1285 */
1286#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
e67306a3 1287 .msg = #fldname, .sz = sizeof(#fldname) }
f931551b
RC
1288/* Below generates "auto-message" for interrupts specific to a port */
1289#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1290 SYM_LSB(IntMask, fldname##Mask##_0), \
1291 SYM_LSB(IntMask, fldname##Mask##_1)), \
e67306a3 1292 .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
f931551b
RC
1293/* For some reason, the SerDesTrimDone bits are reversed */
1294#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1295 SYM_LSB(IntMask, fldname##Mask##_1), \
1296 SYM_LSB(IntMask, fldname##Mask##_0)), \
e67306a3 1297 .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
f931551b
RC
1298/*
1299 * Below generates "auto-message" for interrupts specific to a context,
1300 * with ctxt-number appended
1301 */
1302#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1303 SYM_LSB(IntMask, fldname##0IntMask), \
1304 SYM_LSB(IntMask, fldname##17IntMask)), \
e67306a3 1305 .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
f931551b
RC
1306
1307static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
1308 INTR_AUTO_P(SDmaInt),
1309 INTR_AUTO_P(SDmaProgressInt),
1310 INTR_AUTO_P(SDmaIdleInt),
1311 INTR_AUTO_P(SDmaCleanupDone),
1312 INTR_AUTO_C(RcvUrg),
1313 INTR_AUTO_P(ErrInt),
1314 INTR_AUTO(ErrInt), /* non-port-specific errs */
1315 INTR_AUTO(AssertGPIOInt),
1316 INTR_AUTO_P(SendDoneInt),
1317 INTR_AUTO(SendBufAvailInt),
1318 INTR_AUTO_C(RcvAvail),
e67306a3 1319 { .mask = 0, .sz = 0 }
f931551b
RC
1320};
1321
1322#define TXSYMPTOM_AUTO_P(fldname) \
e67306a3
MM
1323 { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1324 .msg = #fldname, .sz = sizeof(#fldname) }
f931551b
RC
1325static const struct qib_hwerror_msgs hdrchk_msgs[] = {
1326 TXSYMPTOM_AUTO_P(NonKeyPacket),
1327 TXSYMPTOM_AUTO_P(GRHFail),
1328 TXSYMPTOM_AUTO_P(PkeyFail),
1329 TXSYMPTOM_AUTO_P(QPFail),
1330 TXSYMPTOM_AUTO_P(SLIDFail),
1331 TXSYMPTOM_AUTO_P(RawIPV6),
1332 TXSYMPTOM_AUTO_P(PacketTooSmall),
e67306a3 1333 { .mask = 0, .sz = 0 }
f931551b
RC
1334};
1335
1336#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1337
1338/*
1339 * Called when we might have an error that is specific to a particular
1340 * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1341 * because we don't need to force the update of pioavail
1342 */
1343static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1344{
1345 struct qib_devdata *dd = ppd->dd;
1346 u32 i;
1347 int any;
1348 u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1349 u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1350 unsigned long sbuf[4];
1351
1352 /*
1353 * It's possible that sendbuffererror could have bits set; might
1354 * have already done this as a result of hardware error handling.
1355 */
1356 any = 0;
1357 for (i = 0; i < regcnt; ++i) {
1358 sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1359 if (sbuf[i]) {
1360 any = 1;
1361 qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1362 }
1363 }
1364
1365 if (any)
1366 qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1367}
1368
1369/* No txe_recover yet, if ever */
1370
1371/* No decode__errors yet */
1372static void err_decode(char *msg, size_t len, u64 errs,
1373 const struct qib_hwerror_msgs *msp)
1374{
1375 u64 these, lmask;
1376 int took, multi, n = 0;
1377
e67306a3 1378 while (errs && msp && msp->mask) {
f931551b
RC
1379 multi = (msp->mask & (msp->mask - 1));
1380 while (errs & msp->mask) {
1381 these = (errs & msp->mask);
1382 lmask = (these & (these - 1)) ^ these;
1383 if (len) {
1384 if (n++) {
1385 /* separate the strings */
1386 *msg++ = ',';
1387 len--;
1388 }
e67306a3
MM
1389 BUG_ON(!msp->sz);
1390 /* msp->sz counts the nul */
1391 took = min_t(size_t, msp->sz - (size_t)1, len);
1392 memcpy(msg, msp->msg, took);
f931551b
RC
1393 len -= took;
1394 msg += took;
e67306a3
MM
1395 if (len)
1396 *msg = '\0';
f931551b
RC
1397 }
1398 errs &= ~lmask;
1399 if (len && multi) {
1400 /* More than one bit this mask */
1401 int idx = -1;
1402
1403 while (lmask & msp->mask) {
1404 ++idx;
1405 lmask >>= 1;
1406 }
1407 took = scnprintf(msg, len, "_%d", idx);
1408 len -= took;
1409 msg += took;
1410 }
1411 }
1412 ++msp;
1413 }
1414 /* If some bits are left, show in hex. */
1415 if (len && errs)
1416 snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1417 (unsigned long long) errs);
1418}
1419
1420/* only called if r1 set */
1421static void flush_fifo(struct qib_pportdata *ppd)
1422{
1423 struct qib_devdata *dd = ppd->dd;
1424 u32 __iomem *piobuf;
1425 u32 bufn;
1426 u32 *hdr;
1427 u64 pbc;
1428 const unsigned hdrwords = 7;
1429 static struct qib_ib_header ibhdr = {
1430 .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1431 .lrh[1] = IB_LID_PERMISSIVE,
1432 .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1433 .lrh[3] = IB_LID_PERMISSIVE,
1434 .u.oth.bth[0] = cpu_to_be32(
1435 (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1436 .u.oth.bth[1] = cpu_to_be32(0),
1437 .u.oth.bth[2] = cpu_to_be32(0),
1438 .u.oth.u.ud.deth[0] = cpu_to_be32(0),
1439 .u.oth.u.ud.deth[1] = cpu_to_be32(0),
1440 };
1441
1442 /*
1443 * Send a dummy VL15 packet to flush the launch FIFO.
1444 * This will not actually be sent since the TxeBypassIbc bit is set.
1445 */
1446 pbc = PBC_7322_VL15_SEND |
1447 (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1448 (hdrwords + SIZE_OF_CRC);
1449 piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1450 if (!piobuf)
1451 return;
1452 writeq(pbc, piobuf);
1453 hdr = (u32 *) &ibhdr;
1454 if (dd->flags & QIB_PIO_FLUSH_WC) {
1455 qib_flush_wc();
1456 qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1457 qib_flush_wc();
1458 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1459 qib_flush_wc();
1460 } else
1461 qib_pio_copy(piobuf + 2, hdr, hdrwords);
1462 qib_sendbuf_done(dd, bufn);
1463}
1464
1465/*
1466 * This is called with interrupts disabled and sdma_lock held.
1467 */
1468static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1469{
1470 struct qib_devdata *dd = ppd->dd;
1471 u64 set_sendctrl = 0;
1472 u64 clr_sendctrl = 0;
1473
1474 if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1475 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1476 else
1477 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1478
1479 if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1480 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1481 else
1482 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1483
1484 if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1485 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1486 else
1487 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1488
1489 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1490 set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1491 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1492 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1493 else
1494 clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1495 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1496 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1497
1498 spin_lock(&dd->sendctrl_lock);
1499
1500 /* If we are draining everything, block sends first */
1501 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1502 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1503 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1504 qib_write_kreg(dd, kr_scratch, 0);
1505 }
1506
1507 ppd->p_sendctrl |= set_sendctrl;
1508 ppd->p_sendctrl &= ~clr_sendctrl;
1509
1510 if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1511 qib_write_kreg_port(ppd, krp_sendctrl,
1512 ppd->p_sendctrl |
1513 SYM_MASK(SendCtrl_0, SDmaCleanup));
1514 else
1515 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1516 qib_write_kreg(dd, kr_scratch, 0);
1517
1518 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1519 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1520 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1521 qib_write_kreg(dd, kr_scratch, 0);
1522 }
1523
1524 spin_unlock(&dd->sendctrl_lock);
1525
1526 if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1527 flush_fifo(ppd);
1528}
1529
1530static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1531{
1532 __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1533}
1534
1535static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1536{
1537 /*
1538 * Set SendDmaLenGen and clear and set
1539 * the MSB of the generation count to enable generation checking
1540 * and load the internal generation counter.
1541 */
1542 qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1543 qib_write_kreg_port(ppd, krp_senddmalengen,
1544 ppd->sdma_descq_cnt |
1545 (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1546}
1547
1548/*
1549 * Must be called with sdma_lock held, or before init finished.
1550 */
1551static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1552{
1553 /* Commit writes to memory and advance the tail on the chip */
1554 wmb();
1555 ppd->sdma_descq_tail = tail;
1556 qib_write_kreg_port(ppd, krp_senddmatail, tail);
1557}
1558
1559/*
1560 * This is called with interrupts disabled and sdma_lock held.
1561 */
1562static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1563{
1564 /*
1565 * Drain all FIFOs.
1566 * The hardware doesn't require this but we do it so that verbs
1567 * and user applications don't wait for link active to send stale
1568 * data.
1569 */
1570 sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1571
1572 qib_sdma_7322_setlengen(ppd);
1573 qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1574 ppd->sdma_head_dma[0] = 0;
1575 qib_7322_sdma_sendctrl(ppd,
1576 ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1577}
1578
1579#define DISABLES_SDMA ( \
1580 QIB_E_P_SDMAHALT | \
1581 QIB_E_P_SDMADESCADDRMISALIGN | \
1582 QIB_E_P_SDMAMISSINGDW | \
1583 QIB_E_P_SDMADWEN | \
1584 QIB_E_P_SDMARPYTAG | \
1585 QIB_E_P_SDMA1STDESC | \
1586 QIB_E_P_SDMABASE | \
1587 QIB_E_P_SDMATAILOUTOFBOUND | \
1588 QIB_E_P_SDMAOUTOFBOUND | \
1589 QIB_E_P_SDMAGENMISMATCH)
1590
1591static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1592{
1593 unsigned long flags;
1594 struct qib_devdata *dd = ppd->dd;
1595
1596 errs &= QIB_E_P_SDMAERRS;
1597
1598 if (errs & QIB_E_P_SDMAUNEXPDATA)
1599 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1600 ppd->port);
1601
1602 spin_lock_irqsave(&ppd->sdma_lock, flags);
1603
1604 switch (ppd->sdma_state.current_state) {
1605 case qib_sdma_state_s00_hw_down:
1606 break;
1607
1608 case qib_sdma_state_s10_hw_start_up_wait:
1609 if (errs & QIB_E_P_SDMAHALT)
1610 __qib_sdma_process_event(ppd,
1611 qib_sdma_event_e20_hw_started);
1612 break;
1613
1614 case qib_sdma_state_s20_idle:
1615 break;
1616
1617 case qib_sdma_state_s30_sw_clean_up_wait:
1618 break;
1619
1620 case qib_sdma_state_s40_hw_clean_up_wait:
1621 if (errs & QIB_E_P_SDMAHALT)
1622 __qib_sdma_process_event(ppd,
1623 qib_sdma_event_e50_hw_cleaned);
1624 break;
1625
1626 case qib_sdma_state_s50_hw_halt_wait:
1627 if (errs & QIB_E_P_SDMAHALT)
1628 __qib_sdma_process_event(ppd,
1629 qib_sdma_event_e60_hw_halted);
1630 break;
1631
1632 case qib_sdma_state_s99_running:
1633 __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1634 __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1635 break;
1636 }
1637
1638 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1639}
1640
1641/*
1642 * handle per-device errors (not per-port errors)
1643 */
1644static noinline void handle_7322_errors(struct qib_devdata *dd)
1645{
1646 char *msg;
1647 u64 iserr = 0;
1648 u64 errs;
1649 u64 mask;
1650 int log_idx;
1651
1652 qib_stats.sps_errints++;
1653 errs = qib_read_kreg64(dd, kr_errstatus);
1654 if (!errs) {
7fac3301
MM
1655 qib_devinfo(dd->pcidev,
1656 "device error interrupt, but no error bits set!\n");
f931551b
RC
1657 goto done;
1658 }
1659
1660 /* don't report errors that are masked */
1661 errs &= dd->cspec->errormask;
1662 msg = dd->cspec->emsgbuf;
1663
1664 /* do these first, they are most important */
1665 if (errs & QIB_E_HARDWARE) {
1666 *msg = '\0';
1667 qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1668 } else
1669 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1670 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1671 qib_inc_eeprom_err(dd, log_idx, 1);
1672
1673 if (errs & QIB_E_SPKTERRS) {
1674 qib_disarm_7322_senderrbufs(dd->pport);
1675 qib_stats.sps_txerrs++;
1676 } else if (errs & QIB_E_INVALIDADDR)
1677 qib_stats.sps_txerrs++;
1678 else if (errs & QIB_E_ARMLAUNCH) {
1679 qib_stats.sps_txerrs++;
1680 qib_disarm_7322_senderrbufs(dd->pport);
1681 }
1682 qib_write_kreg(dd, kr_errclear, errs);
1683
1684 /*
1685 * The ones we mask off are handled specially below
1686 * or above. Also mask SDMADISABLED by default as it
1687 * is too chatty.
1688 */
1689 mask = QIB_E_HARDWARE;
1690 *msg = '\0';
1691
1692 err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
1693 qib_7322error_msgs);
1694
1695 /*
1696 * Getting reset is a tragedy for all ports. Mark the device
1697 * _and_ the ports as "offline" in way meaningful to each.
1698 */
1699 if (errs & QIB_E_RESET) {
1700 int pidx;
1701
7fac3301
MM
1702 qib_dev_err(dd,
1703 "Got reset, requires re-init (unload and reload driver)\n");
f931551b
RC
1704 dd->flags &= ~QIB_INITTED; /* needs re-init */
1705 /* mark as having had error */
1706 *dd->devstatusp |= QIB_STATUS_HWERROR;
1707 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1708 if (dd->pport[pidx].link_speed_supported)
1709 *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1710 }
1711
1712 if (*msg && iserr)
1713 qib_dev_err(dd, "%s error\n", msg);
1714
1715 /*
1716 * If there were hdrq or egrfull errors, wake up any processes
1717 * waiting in poll. We used to try to check which contexts had
1718 * the overflow, but given the cost of that and the chip reads
1719 * to support it, it's better to just wake everybody up if we
1720 * get an overflow; waiters can poll again if it's not them.
1721 */
1722 if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1723 qib_handle_urcv(dd, ~0U);
1724 if (errs & ERR_MASK(RcvEgrFullErr))
1725 qib_stats.sps_buffull++;
1726 else
1727 qib_stats.sps_hdrfull++;
1728 }
1729
1730done:
1731 return;
1732}
1733
e67306a3
MM
1734static void qib_error_tasklet(unsigned long data)
1735{
1736 struct qib_devdata *dd = (struct qib_devdata *)data;
1737
1738 handle_7322_errors(dd);
1739 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1740}
1741
f931551b
RC
1742static void reenable_chase(unsigned long opaque)
1743{
1744 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1745
1746 ppd->cpspec->chase_timer.expires = 0;
1747 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1748 QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1749}
1750
8482d5d1
MM
1751static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1752 u8 ibclt)
f931551b
RC
1753{
1754 ppd->cpspec->chase_end = 0;
1755
1756 if (!qib_chase)
1757 return;
1758
1759 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1760 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1761 ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1762 add_timer(&ppd->cpspec->chase_timer);
1763}
1764
1765static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1766{
1767 u8 ibclt;
8482d5d1 1768 unsigned long tnow;
f931551b
RC
1769
1770 ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1771
1772 /*
1773 * Detect and handle the state chase issue, where we can
1774 * get stuck if we are unlucky on timing on both sides of
1775 * the link. If we are, we disable, set a timer, and
1776 * then re-enable.
1777 */
1778 switch (ibclt) {
1779 case IB_7322_LT_STATE_CFGRCVFCFG:
1780 case IB_7322_LT_STATE_CFGWAITRMT:
1781 case IB_7322_LT_STATE_TXREVLANES:
1782 case IB_7322_LT_STATE_CFGENH:
8482d5d1 1783 tnow = jiffies;
f931551b 1784 if (ppd->cpspec->chase_end &&
8482d5d1 1785 time_after(tnow, ppd->cpspec->chase_end))
f931551b
RC
1786 disable_chase(ppd, tnow, ibclt);
1787 else if (!ppd->cpspec->chase_end)
1788 ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1789 break;
1790 default:
1791 ppd->cpspec->chase_end = 0;
1792 break;
1793 }
1794
31264484
MH
1795 if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1796 ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1797 ibclt == IB_7322_LT_STATE_LINKUP) &&
f931551b
RC
1798 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1799 force_h1(ppd);
1800 ppd->cpspec->qdr_reforce = 1;
a0a234d4
MM
1801 if (!ppd->dd->cspec->r1)
1802 serdes_7322_los_enable(ppd, 0);
f931551b
RC
1803 } else if (ppd->cpspec->qdr_reforce &&
1804 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1805 (ibclt == IB_7322_LT_STATE_CFGENH ||
1806 ibclt == IB_7322_LT_STATE_CFGIDLE ||
1807 ibclt == IB_7322_LT_STATE_LINKUP))
1808 force_h1(ppd);
1809
1810 if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1811 ppd->link_speed_enabled == QIB_IB_QDR &&
1812 (ibclt == IB_7322_LT_STATE_CFGTEST ||
1813 ibclt == IB_7322_LT_STATE_CFGENH ||
1814 (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1815 ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1816 adj_tx_serdes(ppd);
1817
a0a234d4
MM
1818 if (ibclt != IB_7322_LT_STATE_LINKUP) {
1819 u8 ltstate = qib_7322_phys_portstate(ibcst);
1820 u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1821 LinkTrainingState);
1822 if (!ppd->dd->cspec->r1 &&
1823 pibclt == IB_7322_LT_STATE_LINKUP &&
1824 ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1825 ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1826 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1827 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1828 /* If the link went down (but no into recovery,
1829 * turn LOS back on */
1830 serdes_7322_los_enable(ppd, 1);
1831 if (!ppd->cpspec->qdr_dfe_on &&
1832 ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1833 ppd->cpspec->qdr_dfe_on = 1;
1834 ppd->cpspec->qdr_dfe_time = 0;
1835 /* On link down, reenable QDR adaptation */
1836 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1837 ppd->dd->cspec->r1 ?
1838 QDR_STATIC_ADAPT_DOWN_R1 :
1839 QDR_STATIC_ADAPT_DOWN);
7fac3301
MM
1840 pr_info(
1841 "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
1842 ppd->dd->unit, ppd->port, ibclt);
a0a234d4 1843 }
f931551b
RC
1844 }
1845}
1846
f2d255a0
MM
1847static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1848
f931551b
RC
1849/*
1850 * This is per-pport error handling.
1851 * will likely get it's own MSIx interrupt (one for each port,
1852 * although just a single handler).
1853 */
1854static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1855{
1856 char *msg;
1857 u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1858 struct qib_devdata *dd = ppd->dd;
1859
1860 /* do this as soon as possible */
1861 fmask = qib_read_kreg64(dd, kr_act_fmask);
1862 if (!fmask)
1863 check_7322_rxe_status(ppd);
1864
1865 errs = qib_read_kreg_port(ppd, krp_errstatus);
1866 if (!errs)
1867 qib_devinfo(dd->pcidev,
1868 "Port%d error interrupt, but no error bits set!\n",
1869 ppd->port);
1870 if (!fmask)
1871 errs &= ~QIB_E_P_IBSTATUSCHANGED;
1872 if (!errs)
1873 goto done;
1874
1875 msg = ppd->cpspec->epmsgbuf;
1876 *msg = '\0';
1877
1878 if (errs & ~QIB_E_P_BITSEXTANT) {
1879 err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1880 errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1881 if (!*msg)
1882 snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
1883 "no others");
7fac3301
MM
1884 qib_dev_porterr(dd, ppd->port,
1885 "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
1886 (errs & ~QIB_E_P_BITSEXTANT), msg);
f931551b
RC
1887 *msg = '\0';
1888 }
1889
1890 if (errs & QIB_E_P_SHDR) {
1891 u64 symptom;
1892
1893 /* determine cause, then write to clear */
1894 symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1895 qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1896 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
1897 hdrchk_msgs);
1898 *msg = '\0';
1899 /* senderrbuf cleared in SPKTERRS below */
1900 }
1901
1902 if (errs & QIB_E_P_SPKTERRS) {
1903 if ((errs & QIB_E_P_LINK_PKTERRS) &&
1904 !(ppd->lflags & QIBL_LINKACTIVE)) {
1905 /*
1906 * This can happen when trying to bring the link
1907 * up, but the IB link changes state at the "wrong"
1908 * time. The IB logic then complains that the packet
1909 * isn't valid. We don't want to confuse people, so
1910 * we just don't print them, except at debug
1911 */
1912 err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1913 (errs & QIB_E_P_LINK_PKTERRS),
1914 qib_7322p_error_msgs);
1915 *msg = '\0';
1916 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1917 }
1918 qib_disarm_7322_senderrbufs(ppd);
1919 } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1920 !(ppd->lflags & QIBL_LINKACTIVE)) {
1921 /*
1922 * This can happen when SMA is trying to bring the link
1923 * up, but the IB link changes state at the "wrong" time.
1924 * The IB logic then complains that the packet isn't
1925 * valid. We don't want to confuse people, so we just
1926 * don't print them, except at debug
1927 */
1928 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
1929 qib_7322p_error_msgs);
1930 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1931 *msg = '\0';
1932 }
1933
1934 qib_write_kreg_port(ppd, krp_errclear, errs);
1935
1936 errs &= ~ignore_this_time;
1937 if (!errs)
1938 goto done;
1939
1940 if (errs & QIB_E_P_RPKTERRS)
1941 qib_stats.sps_rcverrs++;
1942 if (errs & QIB_E_P_SPKTERRS)
1943 qib_stats.sps_txerrs++;
1944
1945 iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1946
1947 if (errs & QIB_E_P_SDMAERRS)
1948 sdma_7322_p_errors(ppd, errs);
1949
1950 if (errs & QIB_E_P_IBSTATUSCHANGED) {
1951 u64 ibcs;
1952 u8 ltstate;
1953
1954 ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1955 ltstate = qib_7322_phys_portstate(ibcs);
1956
1957 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1958 handle_serdes_issues(ppd, ibcs);
1959 if (!(ppd->cpspec->ibcctrl_a &
1960 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1961 /*
1962 * We got our interrupt, so init code should be
1963 * happy and not try alternatives. Now squelch
1964 * other "chatter" from link-negotiation (pre Init)
1965 */
1966 ppd->cpspec->ibcctrl_a |=
1967 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1968 qib_write_kreg_port(ppd, krp_ibcctrl_a,
1969 ppd->cpspec->ibcctrl_a);
1970 }
1971
1972 /* Update our picture of width and speed from chip */
1973 ppd->link_width_active =
1974 (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1975 IB_WIDTH_4X : IB_WIDTH_1X;
1976 ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1977 LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1978 SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1979 QIB_IB_DDR : QIB_IB_SDR;
1980
1981 if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1982 IB_PHYSPORTSTATE_DISABLED)
1983 qib_set_ib_7322_lstate(ppd, 0,
1984 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
d70585f7 1985 else
f931551b
RC
1986 /*
1987 * Since going into a recovery state causes the link
1988 * state to go down and since recovery is transitory,
1989 * it is better if we "miss" ever seeing the link
1990 * training state go into recovery (i.e., ignore this
1991 * transition for link state special handling purposes)
1992 * without updating lastibcstat.
1993 */
1994 if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1995 ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1996 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1997 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1998 qib_handle_e_ibstatuschanged(ppd, ibcs);
1999 }
2000 if (*msg && iserr)
2001 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
2002
2003 if (ppd->state_wanted & ppd->lflags)
2004 wake_up_interruptible(&ppd->state_wait);
2005done:
2006 return;
2007}
2008
2009/* enable/disable chip from delivering interrupts */
2010static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
2011{
2012 if (enable) {
2013 if (dd->flags & QIB_BADINTR)
2014 return;
2015 qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
2016 /* cause any pending enabled interrupts to be re-delivered */
2017 qib_write_kreg(dd, kr_intclear, 0ULL);
2018 if (dd->cspec->num_msix_entries) {
2019 /* and same for MSIx */
2020 u64 val = qib_read_kreg64(dd, kr_intgranted);
2021 if (val)
2022 qib_write_kreg(dd, kr_intgranted, val);
2023 }
2024 } else
2025 qib_write_kreg(dd, kr_intmask, 0ULL);
2026}
2027
2028/*
2029 * Try to cleanup as much as possible for anything that might have gone
2030 * wrong while in freeze mode, such as pio buffers being written by user
2031 * processes (causing armlaunch), send errors due to going into freeze mode,
2032 * etc., and try to avoid causing extra interrupts while doing so.
2033 * Forcibly update the in-memory pioavail register copies after cleanup
2034 * because the chip won't do it while in freeze mode (the register values
2035 * themselves are kept correct).
2036 * Make sure that we don't lose any important interrupts by using the chip
2037 * feature that says that writing 0 to a bit in *clear that is set in
2038 * *status will cause an interrupt to be generated again (if allowed by
2039 * the *mask value).
2040 * This is in chip-specific code because of all of the register accesses,
2041 * even though the details are similar on most chips.
2042 */
2043static void qib_7322_clear_freeze(struct qib_devdata *dd)
2044{
2045 int pidx;
2046
2047 /* disable error interrupts, to avoid confusion */
2048 qib_write_kreg(dd, kr_errmask, 0ULL);
2049
2050 for (pidx = 0; pidx < dd->num_pports; ++pidx)
2051 if (dd->pport[pidx].link_speed_supported)
2052 qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2053 0ULL);
2054
2055 /* also disable interrupts; errormask is sometimes overwriten */
2056 qib_7322_set_intr_state(dd, 0);
2057
2058 /* clear the freeze, and be sure chip saw it */
2059 qib_write_kreg(dd, kr_control, dd->control);
2060 qib_read_kreg32(dd, kr_scratch);
2061
2062 /*
2063 * Force new interrupt if any hwerr, error or interrupt bits are
2064 * still set, and clear "safe" send packet errors related to freeze
2065 * and cancelling sends. Re-enable error interrupts before possible
2066 * force of re-interrupt on pending interrupts.
2067 */
2068 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2069 qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
2070 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2071 /* We need to purge per-port errs and reset mask, too */
2072 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2073 if (!dd->pport[pidx].link_speed_supported)
2074 continue;
2075 qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2076 qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2077 }
2078 qib_7322_set_intr_state(dd, 1);
2079}
2080
2081/* no error handling to speak of */
2082/**
2083 * qib_7322_handle_hwerrors - display hardware errors.
2084 * @dd: the qlogic_ib device
2085 * @msg: the output buffer
2086 * @msgl: the size of the output buffer
2087 *
2088 * Use same msg buffer as regular errors to avoid excessive stack
2089 * use. Most hardware errors are catastrophic, but for right now,
2090 * we'll print them and continue. We reuse the same message buffer as
2091 * qib_handle_errors() to avoid excessive stack usage.
2092 */
2093static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2094 size_t msgl)
2095{
2096 u64 hwerrs;
2097 u32 ctrl;
2098 int isfatal = 0;
2099
2100 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2101 if (!hwerrs)
2102 goto bail;
2103 if (hwerrs == ~0ULL) {
7fac3301
MM
2104 qib_dev_err(dd,
2105 "Read of hardware error status failed (all bits set); ignoring\n");
f931551b
RC
2106 goto bail;
2107 }
2108 qib_stats.sps_hwerrs++;
2109
2110 /* Always clear the error status register, except BIST fail */
2111 qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2112 ~HWE_MASK(PowerOnBISTFailed));
2113
2114 hwerrs &= dd->cspec->hwerrmask;
2115
2116 /* no EEPROM logging, yet */
2117
2118 if (hwerrs)
7fac3301
MM
2119 qib_devinfo(dd->pcidev,
2120 "Hardware error: hwerr=0x%llx (cleared)\n",
2121 (unsigned long long) hwerrs);
f931551b
RC
2122
2123 ctrl = qib_read_kreg32(dd, kr_control);
2124 if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2125 /*
2126 * No recovery yet...
2127 */
2128 if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2129 dd->cspec->stay_in_freeze) {
2130 /*
2131 * If any set that we aren't ignoring only make the
2132 * complaint once, in case it's stuck or recurring,
2133 * and we get here multiple times
2134 * Force link down, so switch knows, and
2135 * LEDs are turned off.
2136 */
2137 if (dd->flags & QIB_INITTED)
2138 isfatal = 1;
2139 } else
2140 qib_7322_clear_freeze(dd);
2141 }
2142
2143 if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2144 isfatal = 1;
7fac3301
MM
2145 strlcpy(msg,
2146 "[Memory BIST test failed, InfiniPath hardware unusable]",
2147 msgl);
f931551b
RC
2148 /* ignore from now on, so disable until driver reloaded */
2149 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2150 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2151 }
2152
2153 err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2154
2155 /* Ignore esoteric PLL failures et al. */
2156
2157 qib_dev_err(dd, "%s hardware error\n", msg);
2158
2159 if (isfatal && !dd->diag_client) {
7fac3301
MM
2160 qib_dev_err(dd,
2161 "Fatal Hardware Error, no longer usable, SN %.16s\n",
2162 dd->serial);
f931551b
RC
2163 /*
2164 * for /sys status file and user programs to print; if no
2165 * trailing brace is copied, we'll know it was truncated.
2166 */
2167 if (dd->freezemsg)
2168 snprintf(dd->freezemsg, dd->freezelen,
2169 "{%s}", msg);
2170 qib_disable_after_error(dd);
2171 }
2172bail:;
2173}
2174
2175/**
2176 * qib_7322_init_hwerrors - enable hardware errors
2177 * @dd: the qlogic_ib device
2178 *
2179 * now that we have finished initializing everything that might reasonably
2180 * cause a hardware error, and cleared those errors bits as they occur,
2181 * we can enable hardware errors in the mask (potentially enabling
2182 * freeze mode), and enable hardware errors as errors (along with
2183 * everything else) in errormask
2184 */
2185static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2186{
2187 int pidx;
2188 u64 extsval;
2189
2190 extsval = qib_read_kreg64(dd, kr_extstatus);
2191 if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2192 QIB_EXTS_MEMBIST_ENDTEST)))
2193 qib_dev_err(dd, "MemBIST did not complete!\n");
2194
2195 /* never clear BIST failure, so reported on each driver load */
2196 qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2197 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2198
2199 /* clear all */
2200 qib_write_kreg(dd, kr_errclear, ~0ULL);
2201 /* enable errors that are masked, at least this first time. */
2202 qib_write_kreg(dd, kr_errmask, ~0ULL);
2203 dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2204 for (pidx = 0; pidx < dd->num_pports; ++pidx)
2205 if (dd->pport[pidx].link_speed_supported)
2206 qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2207 ~0ULL);
2208}
2209
2210/*
2211 * Disable and enable the armlaunch error. Used for PIO bandwidth testing
2212 * on chips that are count-based, rather than trigger-based. There is no
2213 * reference counting, but that's also fine, given the intended use.
2214 * Only chip-specific because it's all register accesses
2215 */
2216static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2217{
2218 if (enable) {
2219 qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2220 dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2221 } else
2222 dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2223 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2224}
2225
2226/*
2227 * Formerly took parameter <which> in pre-shifted,
2228 * pre-merged form with LinkCmd and LinkInitCmd
2229 * together, and assuming the zero was NOP.
2230 */
2231static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2232 u16 linitcmd)
2233{
2234 u64 mod_wd;
2235 struct qib_devdata *dd = ppd->dd;
2236 unsigned long flags;
2237
2238 if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2239 /*
2240 * If we are told to disable, note that so link-recovery
2241 * code does not attempt to bring us back up.
2242 * Also reset everything that we can, so we start
2243 * completely clean when re-enabled (before we
2244 * actually issue the disable to the IBC)
2245 */
2246 qib_7322_mini_pcs_reset(ppd);
2247 spin_lock_irqsave(&ppd->lflags_lock, flags);
2248 ppd->lflags |= QIBL_IB_LINK_DISABLED;
2249 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2250 } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2251 /*
2252 * Any other linkinitcmd will lead to LINKDOWN and then
2253 * to INIT (if all is well), so clear flag to let
2254 * link-recovery code attempt to bring us back up.
2255 */
2256 spin_lock_irqsave(&ppd->lflags_lock, flags);
2257 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2258 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2259 /*
2260 * Clear status change interrupt reduction so the
2261 * new state is seen.
2262 */
2263 ppd->cpspec->ibcctrl_a &=
2264 ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2265 }
2266
2267 mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2268 (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2269
2270 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2271 mod_wd);
2272 /* write to chip to prevent back-to-back writes of ibc reg */
2273 qib_write_kreg(dd, kr_scratch, 0);
2274
2275}
2276
2277/*
2278 * The total RCV buffer memory is 64KB, used for both ports, and is
2279 * in units of 64 bytes (same as IB flow control credit unit).
2280 * The consumedVL unit in the same registers are in 32 byte units!
2281 * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2282 * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2283 * in krp_rxcreditvl15, rather than 10.
2284 */
2285#define RCV_BUF_UNITSZ 64
2286#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2287
2288static void set_vls(struct qib_pportdata *ppd)
2289{
2290 int i, numvls, totcred, cred_vl, vl0extra;
2291 struct qib_devdata *dd = ppd->dd;
2292 u64 val;
2293
2294 numvls = qib_num_vls(ppd->vls_operational);
2295
2296 /*
2297 * Set up per-VL credits. Below is kluge based on these assumptions:
2298 * 1) port is disabled at the time early_init is called.
2299 * 2) give VL15 17 credits, for two max-plausible packets.
2300 * 3) Give VL0-N the rest, with any rounding excess used for VL0
2301 */
2302 /* 2 VL15 packets @ 288 bytes each (including IB headers) */
2303 totcred = NUM_RCV_BUF_UNITS(dd);
2304 cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2305 totcred -= cred_vl;
2306 qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2307 cred_vl = totcred / numvls;
2308 vl0extra = totcred - cred_vl * numvls;
2309 qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2310 for (i = 1; i < numvls; i++)
2311 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2312 for (; i < 8; i++) /* no buffer space for other VLs */
2313 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2314
2315 /* Notify IBC that credits need to be recalculated */
2316 val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2317 val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2318 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2319 qib_write_kreg(dd, kr_scratch, 0ULL);
2320 val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2321 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2322
2323 for (i = 0; i < numvls; i++)
2324 val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2325 val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2326
2327 /* Change the number of operational VLs */
2328 ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2329 ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2330 ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2331 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2332 qib_write_kreg(dd, kr_scratch, 0ULL);
2333}
2334
2335/*
2336 * The code that deals with actual SerDes is in serdes_7322_init().
2337 * Compared to the code for iba7220, it is minimal.
2338 */
2339static int serdes_7322_init(struct qib_pportdata *ppd);
2340
2341/**
2342 * qib_7322_bringup_serdes - bring up the serdes
2343 * @ppd: physical port on the qlogic_ib device
2344 */
2345static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2346{
2347 struct qib_devdata *dd = ppd->dd;
2348 u64 val, guid, ibc;
2349 unsigned long flags;
2350 int ret = 0;
2351
2352 /*
2353 * SerDes model not in Pd, but still need to
2354 * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2355 * eventually.
2356 */
2357 /* Put IBC in reset, sends disabled (should be in reset already) */
2358 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2359 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2360 qib_write_kreg(dd, kr_scratch, 0ULL);
2361
2362 if (qib_compat_ddr_negotiate) {
2363 ppd->cpspec->ibdeltainprog = 1;
2364 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2365 crp_ibsymbolerr);
2366 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2367 crp_iblinkerrrecov);
2368 }
2369
2370 /* flowcontrolwatermark is in units of KBytes */
2371 ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2372 /*
2373 * Flow control is sent this often, even if no changes in
2374 * buffer space occur. Units are 128ns for this chip.
2375 * Set to 3usec.
2376 */
2377 ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2378 /* max error tolerance */
2379 ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2380 /* IB credit flow control. */
2381 ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2382 /*
2383 * set initial max size pkt IBC will send, including ICRC; it's the
2384 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2385 */
2386 ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2387 SYM_LSB(IBCCtrlA_0, MaxPktLen);
2388 ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2389
f931551b
RC
2390 /*
2391 * Reset the PCS interface to the serdes (and also ibc, which is still
2392 * in reset from above). Writes new value of ibcctrl_a as last step.
2393 */
2394 qib_7322_mini_pcs_reset(ppd);
f931551b
RC
2395
2396 if (!ppd->cpspec->ibcctrl_b) {
2397 unsigned lse = ppd->link_speed_enabled;
2398
2399 /*
2400 * Not on re-init after reset, establish shadow
2401 * and force initial config.
2402 */
2403 ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2404 krp_ibcctrl_b);
2405 ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2406 IBA7322_IBC_SPEED_DDR |
2407 IBA7322_IBC_SPEED_SDR |
2408 IBA7322_IBC_WIDTH_AUTONEG |
2409 SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2410 if (lse & (lse - 1)) /* Muliple speeds enabled */
2411 ppd->cpspec->ibcctrl_b |=
2412 (lse << IBA7322_IBC_SPEED_LSB) |
2413 IBA7322_IBC_IBTA_1_2_MASK |
2414 IBA7322_IBC_MAX_SPEED_MASK;
2415 else
2416 ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2417 IBA7322_IBC_SPEED_QDR |
2418 IBA7322_IBC_IBTA_1_2_MASK :
2419 (lse == QIB_IB_DDR) ?
2420 IBA7322_IBC_SPEED_DDR :
2421 IBA7322_IBC_SPEED_SDR;
2422 if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2423 (IB_WIDTH_1X | IB_WIDTH_4X))
2424 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2425 else
2426 ppd->cpspec->ibcctrl_b |=
2427 ppd->link_width_enabled == IB_WIDTH_4X ?
2428 IBA7322_IBC_WIDTH_4X_ONLY :
2429 IBA7322_IBC_WIDTH_1X_ONLY;
2430
2431 /* always enable these on driver reload, not sticky */
2432 ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2433 IBA7322_IBC_HRTBT_MASK);
2434 }
2435 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2436
2437 /* setup so we have more time at CFGTEST to change H1 */
2438 val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2439 val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2440 val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2441 qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2442
2443 serdes_7322_init(ppd);
2444
2445 guid = be64_to_cpu(ppd->guid);
2446 if (!guid) {
2447 if (dd->base_guid)
2448 guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2449 ppd->guid = cpu_to_be64(guid);
2450 }
2451
2452 qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2453 /* write to chip to prevent back-to-back writes of ibc reg */
2454 qib_write_kreg(dd, kr_scratch, 0);
2455
2456 /* Enable port */
2457 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2458 set_vls(ppd);
2459
8ee887d7
MM
2460 /* initially come up DISABLED, without sending anything. */
2461 val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2462 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2463 qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2464 qib_write_kreg(dd, kr_scratch, 0ULL);
2465 /* clear the linkinit cmds */
2466 ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2467
f931551b
RC
2468 /* be paranoid against later code motion, etc. */
2469 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2470 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2471 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2472 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2473
2474 /* Also enable IBSTATUSCHG interrupt. */
2475 val = qib_read_kreg_port(ppd, krp_errmask);
2476 qib_write_kreg_port(ppd, krp_errmask,
2477 val | ERR_MASK_N(IBStatusChanged));
2478
2479 /* Always zero until we start messing with SerDes for real */
2480 return ret;
2481}
2482
2483/**
2484 * qib_7322_quiet_serdes - set serdes to txidle
2485 * @dd: the qlogic_ib device
2486 * Called when driver is being unloaded
2487 */
2488static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2489{
2490 u64 val;
2491 unsigned long flags;
2492
2493 qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2494
2495 spin_lock_irqsave(&ppd->lflags_lock, flags);
2496 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2497 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2498 wake_up(&ppd->cpspec->autoneg_wait);
f0626710 2499 cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
f931551b 2500 if (ppd->dd->cspec->r1)
f0626710 2501 cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
f931551b
RC
2502
2503 ppd->cpspec->chase_end = 0;
2504 if (ppd->cpspec->chase_timer.data) /* if initted */
2505 del_timer_sync(&ppd->cpspec->chase_timer);
2506
2507 /*
2508 * Despite the name, actually disables IBC as well. Do it when
2509 * we are as sure as possible that no more packets can be
2510 * received, following the down and the PCS reset.
2511 * The actual disabling happens in qib_7322_mini_pci_reset(),
2512 * along with the PCS being reset.
2513 */
2514 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2515 qib_7322_mini_pcs_reset(ppd);
2516
2517 /*
2518 * Update the adjusted counters so the adjustment persists
2519 * across driver reload.
2520 */
2521 if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2522 ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2523 struct qib_devdata *dd = ppd->dd;
2524 u64 diagc;
2525
2526 /* enable counter writes */
2527 diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2528 qib_write_kreg(dd, kr_hwdiagctrl,
2529 diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2530
2531 if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2532 val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2533 if (ppd->cpspec->ibdeltainprog)
2534 val -= val - ppd->cpspec->ibsymsnap;
2535 val -= ppd->cpspec->ibsymdelta;
2536 write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2537 }
2538 if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2539 val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2540 if (ppd->cpspec->ibdeltainprog)
2541 val -= val - ppd->cpspec->iblnkerrsnap;
2542 val -= ppd->cpspec->iblnkerrdelta;
2543 write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2544 }
2545 if (ppd->cpspec->iblnkdowndelta) {
2546 val = read_7322_creg32_port(ppd, crp_iblinkdown);
2547 val += ppd->cpspec->iblnkdowndelta;
2548 write_7322_creg_port(ppd, crp_iblinkdown, val);
2549 }
2550 /*
2551 * No need to save ibmalfdelta since IB perfcounters
2552 * are cleared on driver reload.
2553 */
2554
2555 /* and disable counter writes */
2556 qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2557 }
2558}
2559
2560/**
2561 * qib_setup_7322_setextled - set the state of the two external LEDs
2562 * @ppd: physical port on the qlogic_ib device
2563 * @on: whether the link is up or not
2564 *
2565 * The exact combo of LEDs if on is true is determined by looking
2566 * at the ibcstatus.
2567 *
2568 * These LEDs indicate the physical and logical state of IB link.
2569 * For this chip (at least with recommended board pinouts), LED1
2570 * is Yellow (logical state) and LED2 is Green (physical state),
2571 *
2572 * Note: We try to match the Mellanox HCA LED behavior as best
2573 * we can. Green indicates physical link state is OK (something is
2574 * plugged in, and we can train).
2575 * Amber indicates the link is logically up (ACTIVE).
2576 * Mellanox further blinks the amber LED to indicate data packet
2577 * activity, but we have no hardware support for that, so it would
2578 * require waking up every 10-20 msecs and checking the counters
2579 * on the chip, and then turning the LED off if appropriate. That's
2580 * visible overhead, so not something we will do.
2581 */
2582static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2583{
2584 struct qib_devdata *dd = ppd->dd;
2585 u64 extctl, ledblink = 0, val;
2586 unsigned long flags;
2587 int yel, grn;
2588
2589 /*
2590 * The diags use the LED to indicate diag info, so we leave
2591 * the external LED alone when the diags are running.
2592 */
2593 if (dd->diag_client)
2594 return;
2595
2596 /* Allow override of LED display for, e.g. Locating system in rack */
2597 if (ppd->led_override) {
2598 grn = (ppd->led_override & QIB_LED_PHYS);
2599 yel = (ppd->led_override & QIB_LED_LOG);
2600 } else if (on) {
2601 val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2602 grn = qib_7322_phys_portstate(val) ==
2603 IB_PHYSPORTSTATE_LINKUP;
2604 yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2605 } else {
2606 grn = 0;
2607 yel = 0;
2608 }
2609
2610 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2611 extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2612 ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2613 if (grn) {
2614 extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2615 /*
2616 * Counts are in chip clock (4ns) periods.
2617 * This is 1/16 sec (66.6ms) on,
2618 * 3/16 sec (187.5 ms) off, with packets rcvd.
2619 */
2620 ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2621 ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2622 }
2623 if (yel)
2624 extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2625 dd->cspec->extctrl = extctl;
2626 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2627 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2628
2629 if (ledblink) /* blink the LED on packet receive */
2630 qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2631}
2632
8469ba39
MM
2633#ifdef CONFIG_INFINIBAND_QIB_DCA
2634
2635static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
2636{
2637 switch (event) {
2638 case DCA_PROVIDER_ADD:
2639 if (dd->flags & QIB_DCA_ENABLED)
2640 break;
2641 if (!dca_add_requester(&dd->pcidev->dev)) {
2642 qib_devinfo(dd->pcidev, "DCA enabled\n");
2643 dd->flags |= QIB_DCA_ENABLED;
2644 qib_setup_dca(dd);
2645 }
2646 break;
2647 case DCA_PROVIDER_REMOVE:
2648 if (dd->flags & QIB_DCA_ENABLED) {
2649 dca_remove_requester(&dd->pcidev->dev);
2650 dd->flags &= ~QIB_DCA_ENABLED;
2651 dd->cspec->dca_ctrl = 0;
2652 qib_write_kreg(dd, KREG_IDX(DCACtrlA),
2653 dd->cspec->dca_ctrl);
2654 }
2655 break;
2656 }
2657 return 0;
2658}
2659
2660static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
2661{
2662 struct qib_devdata *dd = rcd->dd;
2663 struct qib_chip_specific *cspec = dd->cspec;
2664
2665 if (!(dd->flags & QIB_DCA_ENABLED))
2666 return;
2667 if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2668 const struct dca_reg_map *rmp;
2669
2670 cspec->rhdr_cpu[rcd->ctxt] = cpu;
2671 rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2672 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2673 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2674 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2675 qib_devinfo(dd->pcidev,
2676 "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
2677 (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2678 qib_write_kreg(dd, rmp->regno,
2679 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2680 cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2681 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2682 }
2683}
2684
2685static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2686{
2687 struct qib_devdata *dd = ppd->dd;
2688 struct qib_chip_specific *cspec = dd->cspec;
2689 unsigned pidx = ppd->port - 1;
2690
2691 if (!(dd->flags & QIB_DCA_ENABLED))
2692 return;
2693 if (cspec->sdma_cpu[pidx] != cpu) {
2694 cspec->sdma_cpu[pidx] = cpu;
2695 cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2696 SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2697 SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2698 cspec->dca_rcvhdr_ctrl[4] |=
2699 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2700 (ppd->hw_pidx ?
2701 SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2702 SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2703 qib_devinfo(dd->pcidev,
2704 "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2705 (long long) cspec->dca_rcvhdr_ctrl[4]);
2706 qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2707 cspec->dca_rcvhdr_ctrl[4]);
2708 cspec->dca_ctrl |= ppd->hw_pidx ?
2709 SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2710 SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2711 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2712 }
2713}
2714
2715static void qib_setup_dca(struct qib_devdata *dd)
2716{
2717 struct qib_chip_specific *cspec = dd->cspec;
2718 int i;
2719
2720 for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2721 cspec->rhdr_cpu[i] = -1;
2722 for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2723 cspec->sdma_cpu[i] = -1;
2724 cspec->dca_rcvhdr_ctrl[0] =
2725 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2726 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2727 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2728 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2729 cspec->dca_rcvhdr_ctrl[1] =
2730 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2731 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2732 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2733 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2734 cspec->dca_rcvhdr_ctrl[2] =
2735 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2736 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2737 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2738 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2739 cspec->dca_rcvhdr_ctrl[3] =
2740 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2741 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2742 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2743 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2744 cspec->dca_rcvhdr_ctrl[4] =
2745 (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2746 (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2747 for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2748 qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2749 cspec->dca_rcvhdr_ctrl[i]);
2750 for (i = 0; i < cspec->num_msix_entries; i++)
2751 setup_dca_notifier(dd, &cspec->msix_entries[i]);
2752}
2753
2754static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2755 const cpumask_t *mask)
2756{
2757 struct qib_irq_notify *n =
2758 container_of(notify, struct qib_irq_notify, notify);
2759 int cpu = cpumask_first(mask);
2760
2761 if (n->rcv) {
2762 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2763 qib_update_rhdrq_dca(rcd, cpu);
2764 } else {
2765 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2766 qib_update_sdma_dca(ppd, cpu);
2767 }
2768}
2769
2770static void qib_irq_notifier_release(struct kref *ref)
2771{
2772 struct qib_irq_notify *n =
2773 container_of(ref, struct qib_irq_notify, notify.kref);
2774 struct qib_devdata *dd;
2775
2776 if (n->rcv) {
2777 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2778 dd = rcd->dd;
2779 } else {
2780 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2781 dd = ppd->dd;
2782 }
2783 qib_devinfo(dd->pcidev,
2784 "release on HCA notify 0x%p n 0x%p\n", ref, n);
2785 kfree(n);
2786}
2787#endif
2788
f931551b
RC
2789/*
2790 * Disable MSIx interrupt if enabled, call generic MSIx code
2791 * to cleanup, and clear pending MSIx interrupts.
2792 * Used for fallback to INTx, after reset, and when MSIx setup fails.
2793 */
2794static void qib_7322_nomsix(struct qib_devdata *dd)
2795{
2796 u64 intgranted;
2797 int n;
2798
2799 dd->cspec->main_int_mask = ~0ULL;
2800 n = dd->cspec->num_msix_entries;
2801 if (n) {
2802 int i;
2803
2804 dd->cspec->num_msix_entries = 0;
a778f3fd 2805 for (i = 0; i < n; i++) {
8469ba39
MM
2806#ifdef CONFIG_INFINIBAND_QIB_DCA
2807 reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
2808#endif
a778f3fd
MM
2809 irq_set_affinity_hint(
2810 dd->cspec->msix_entries[i].msix.vector, NULL);
2811 free_cpumask_var(dd->cspec->msix_entries[i].mask);
2812 free_irq(dd->cspec->msix_entries[i].msix.vector,
2813 dd->cspec->msix_entries[i].arg);
2814 }
f931551b
RC
2815 qib_nomsix(dd);
2816 }
2817 /* make sure no MSIx interrupts are left pending */
2818 intgranted = qib_read_kreg64(dd, kr_intgranted);
2819 if (intgranted)
2820 qib_write_kreg(dd, kr_intgranted, intgranted);
2821}
2822
2823static void qib_7322_free_irq(struct qib_devdata *dd)
2824{
2825 if (dd->cspec->irq) {
2826 free_irq(dd->cspec->irq, dd);
2827 dd->cspec->irq = 0;
2828 }
2829 qib_7322_nomsix(dd);
2830}
2831
2832static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2833{
2834 int i;
2835
8469ba39
MM
2836#ifdef CONFIG_INFINIBAND_QIB_DCA
2837 if (dd->flags & QIB_DCA_ENABLED) {
2838 dca_remove_requester(&dd->pcidev->dev);
2839 dd->flags &= ~QIB_DCA_ENABLED;
2840 dd->cspec->dca_ctrl = 0;
2841 qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2842 }
2843#endif
2844
f931551b
RC
2845 qib_7322_free_irq(dd);
2846 kfree(dd->cspec->cntrs);
2847 kfree(dd->cspec->sendchkenable);
2848 kfree(dd->cspec->sendgrhchk);
2849 kfree(dd->cspec->sendibchk);
2850 kfree(dd->cspec->msix_entries);
f931551b
RC
2851 for (i = 0; i < dd->num_pports; i++) {
2852 unsigned long flags;
2853 u32 mask = QSFP_GPIO_MOD_PRS_N |
2854 (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2855
2856 kfree(dd->pport[i].cpspec->portcntrs);
2857 if (dd->flags & QIB_HAS_QSFP) {
2858 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2859 dd->cspec->gpio_mask &= ~mask;
2860 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2861 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2862 qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
2863 }
2864 if (dd->pport[i].ibport_data.smi_ah)
2865 ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
2866 }
2867}
2868
2869/* handle SDMA interrupts */
2870static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2871{
2872 struct qib_pportdata *ppd0 = &dd->pport[0];
2873 struct qib_pportdata *ppd1 = &dd->pport[1];
2874 u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2875 INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2876 u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2877 INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2878
2879 if (intr0)
2880 qib_sdma_intr(ppd0);
2881 if (intr1)
2882 qib_sdma_intr(ppd1);
2883
2884 if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2885 qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2886 if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2887 qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2888}
2889
2890/*
2891 * Set or clear the Send buffer available interrupt enable bit.
2892 */
2893static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2894{
2895 unsigned long flags;
2896
2897 spin_lock_irqsave(&dd->sendctrl_lock, flags);
2898 if (needint)
2899 dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2900 else
2901 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2902 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2903 qib_write_kreg(dd, kr_scratch, 0ULL);
2904 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2905}
2906
2907/*
2908 * Somehow got an interrupt with reserved bits set in interrupt status.
2909 * Print a message so we know it happened, then clear them.
2910 * keep mainline interrupt handler cache-friendly
2911 */
2912static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2913{
2914 u64 kills;
2915 char msg[128];
2916
2917 kills = istat & ~QIB_I_BITSEXTANT;
7fac3301
MM
2918 qib_dev_err(dd,
2919 "Clearing reserved interrupt(s) 0x%016llx: %s\n",
2920 (unsigned long long) kills, msg);
f931551b
RC
2921 qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2922}
2923
2924/* keep mainline interrupt handler cache-friendly */
2925static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2926{
2927 u32 gpiostatus;
2928 int handled = 0;
2929 int pidx;
2930
2931 /*
2932 * Boards for this chip currently don't use GPIO interrupts,
2933 * so clear by writing GPIOstatus to GPIOclear, and complain
2934 * to developer. To avoid endless repeats, clear
2935 * the bits in the mask, since there is some kind of
2936 * programming error or chip problem.
2937 */
2938 gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2939 /*
2940 * In theory, writing GPIOstatus to GPIOclear could
2941 * have a bad side-effect on some diagnostic that wanted
2942 * to poll for a status-change, but the various shadows
2943 * make that problematic at best. Diags will just suppress
2944 * all GPIO interrupts during such tests.
2945 */
2946 qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2947 /*
2948 * Check for QSFP MOD_PRS changes
2949 * only works for single port if IB1 != pidx1
2950 */
2951 for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2952 ++pidx) {
2953 struct qib_pportdata *ppd;
2954 struct qib_qsfp_data *qd;
2955 u32 mask;
2956 if (!dd->pport[pidx].link_speed_supported)
2957 continue;
2958 mask = QSFP_GPIO_MOD_PRS_N;
2959 ppd = dd->pport + pidx;
2960 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2961 if (gpiostatus & dd->cspec->gpio_mask & mask) {
2962 u64 pins;
2963 qd = &ppd->cpspec->qsfp_data;
2964 gpiostatus &= ~mask;
2965 pins = qib_read_kreg64(dd, kr_extstatus);
2966 pins >>= SYM_LSB(EXTStatus, GPIOIn);
2967 if (!(pins & mask)) {
2968 ++handled;
8482d5d1 2969 qd->t_insert = jiffies;
f0626710 2970 queue_work(ib_wq, &qd->work);
f931551b
RC
2971 }
2972 }
2973 }
2974
2975 if (gpiostatus && !handled) {
2976 const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
2977 u32 gpio_irq = mask & gpiostatus;
2978
2979 /*
2980 * Clear any troublemakers, and update chip from shadow
2981 */
2982 dd->cspec->gpio_mask &= ~gpio_irq;
2983 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2984 }
2985}
2986
2987/*
2988 * Handle errors and unusual events first, separate function
2989 * to improve cache hits for fast path interrupt handling.
2990 */
2991static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
2992{
2993 if (istat & ~QIB_I_BITSEXTANT)
2994 unknown_7322_ibits(dd, istat);
2995 if (istat & QIB_I_GPIO)
2996 unknown_7322_gpio_intr(dd);
e67306a3
MM
2997 if (istat & QIB_I_C_ERROR) {
2998 qib_write_kreg(dd, kr_errmask, 0ULL);
2999 tasklet_schedule(&dd->error_tasklet);
3000 }
f931551b
RC
3001 if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
3002 handle_7322_p_errors(dd->rcd[0]->ppd);
3003 if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
3004 handle_7322_p_errors(dd->rcd[1]->ppd);
3005}
3006
3007/*
3008 * Dynamically adjust the rcv int timeout for a context based on incoming
3009 * packet rate.
3010 */
3011static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
3012{
3013 struct qib_devdata *dd = rcd->dd;
3014 u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
3015
3016 /*
3017 * Dynamically adjust idle timeout on chip
3018 * based on number of packets processed.
3019 */
3020 if (npkts < rcv_int_count && timeout > 2)
3021 timeout >>= 1;
3022 else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
3023 timeout = min(timeout << 1, rcv_int_timeout);
3024 else
3025 return;
3026
3027 dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
3028 qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
3029}
3030
3031/*
3032 * This is the main interrupt handler.
3033 * It will normally only be used for low frequency interrupts but may
3034 * have to handle all interrupts if INTx is enabled or fewer than normal
3035 * MSIx interrupts were allocated.
3036 * This routine should ignore the interrupt bits for any of the
3037 * dedicated MSIx handlers.
3038 */
3039static irqreturn_t qib_7322intr(int irq, void *data)
3040{
3041 struct qib_devdata *dd = data;
3042 irqreturn_t ret;
3043 u64 istat;
3044 u64 ctxtrbits;
3045 u64 rmask;
3046 unsigned i;
3047 u32 npkts;
3048
3049 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
3050 /*
3051 * This return value is not great, but we do not want the
3052 * interrupt core code to remove our interrupt handler
3053 * because we don't appear to be handling an interrupt
3054 * during a chip reset.
3055 */
3056 ret = IRQ_HANDLED;
3057 goto bail;
3058 }
3059
3060 istat = qib_read_kreg64(dd, kr_intstatus);
3061
3062 if (unlikely(istat == ~0ULL)) {
3063 qib_bad_intrstatus(dd);
3064 qib_dev_err(dd, "Interrupt status all f's, skipping\n");
3065 /* don't know if it was our interrupt or not */
3066 ret = IRQ_NONE;
3067 goto bail;
3068 }
3069
3070 istat &= dd->cspec->main_int_mask;
3071 if (unlikely(!istat)) {
3072 /* already handled, or shared and not us */
3073 ret = IRQ_NONE;
3074 goto bail;
3075 }
3076
3077 qib_stats.sps_ints++;
3078 if (dd->int_counter != (u32) -1)
3079 dd->int_counter++;
3080
3081 /* handle "errors" of various kinds first, device ahead of port */
3082 if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
3083 QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
3084 INT_MASK_P(Err, 1))))
3085 unlikely_7322_intr(dd, istat);
3086
3087 /*
3088 * Clear the interrupt bits we found set, relatively early, so we
3089 * "know" know the chip will have seen this by the time we process
3090 * the queue, and will re-interrupt if necessary. The processor
3091 * itself won't take the interrupt again until we return.
3092 */
3093 qib_write_kreg(dd, kr_intclear, istat);
3094
3095 /*
3096 * Handle kernel receive queues before checking for pio buffers
3097 * available since receives can overflow; piobuf waiters can afford
3098 * a few extra cycles, since they were waiting anyway.
3099 */
3100 ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
3101 if (ctxtrbits) {
3102 rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
3103 (1ULL << QIB_I_RCVURG_LSB);
3104 for (i = 0; i < dd->first_user_ctxt; i++) {
3105 if (ctxtrbits & rmask) {
3106 ctxtrbits &= ~rmask;
44d75d3d 3107 if (dd->rcd[i])
f931551b 3108 qib_kreceive(dd->rcd[i], NULL, &npkts);
f931551b
RC
3109 }
3110 rmask <<= 1;
3111 }
3112 if (ctxtrbits) {
3113 ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
3114 (ctxtrbits >> QIB_I_RCVURG_LSB);
3115 qib_handle_urcv(dd, ctxtrbits);
3116 }
3117 }
3118
3119 if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
3120 sdma_7322_intr(dd, istat);
3121
3122 if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
3123 qib_ib_piobufavail(dd);
3124
3125 ret = IRQ_HANDLED;
3126bail:
3127 return ret;
3128}
3129
3130/*
3131 * Dedicated receive packet available interrupt handler.
3132 */
3133static irqreturn_t qib_7322pintr(int irq, void *data)
3134{
3135 struct qib_ctxtdata *rcd = data;
3136 struct qib_devdata *dd = rcd->dd;
3137 u32 npkts;
3138
3139 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3140 /*
3141 * This return value is not great, but we do not want the
3142 * interrupt core code to remove our interrupt handler
3143 * because we don't appear to be handling an interrupt
3144 * during a chip reset.
3145 */
3146 return IRQ_HANDLED;
3147
3148 qib_stats.sps_ints++;
3149 if (dd->int_counter != (u32) -1)
3150 dd->int_counter++;
3151
f931551b
RC
3152 /* Clear the interrupt bit we expect to be set. */
3153 qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3154 (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
3155
3156 qib_kreceive(rcd, NULL, &npkts);
f931551b
RC
3157
3158 return IRQ_HANDLED;
3159}
3160
3161/*
3162 * Dedicated Send buffer available interrupt handler.
3163 */
3164static irqreturn_t qib_7322bufavail(int irq, void *data)
3165{
3166 struct qib_devdata *dd = data;
3167
3168 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3169 /*
3170 * This return value is not great, but we do not want the
3171 * interrupt core code to remove our interrupt handler
3172 * because we don't appear to be handling an interrupt
3173 * during a chip reset.
3174 */
3175 return IRQ_HANDLED;
3176
3177 qib_stats.sps_ints++;
3178 if (dd->int_counter != (u32) -1)
3179 dd->int_counter++;
3180
3181 /* Clear the interrupt bit we expect to be set. */
3182 qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
3183
3184 /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
3185 if (dd->flags & QIB_INITTED)
3186 qib_ib_piobufavail(dd);
3187 else
3188 qib_wantpiobuf_7322_intr(dd, 0);
3189
3190 return IRQ_HANDLED;
3191}
3192
3193/*
3194 * Dedicated Send DMA interrupt handler.
3195 */
3196static irqreturn_t sdma_intr(int irq, void *data)
3197{
3198 struct qib_pportdata *ppd = data;
3199 struct qib_devdata *dd = ppd->dd;
3200
3201 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3202 /*
3203 * This return value is not great, but we do not want the
3204 * interrupt core code to remove our interrupt handler
3205 * because we don't appear to be handling an interrupt
3206 * during a chip reset.
3207 */
3208 return IRQ_HANDLED;
3209
3210 qib_stats.sps_ints++;
3211 if (dd->int_counter != (u32) -1)
3212 dd->int_counter++;
3213
f931551b
RC
3214 /* Clear the interrupt bit we expect to be set. */
3215 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3216 INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
3217 qib_sdma_intr(ppd);
3218
3219 return IRQ_HANDLED;
3220}
3221
3222/*
3223 * Dedicated Send DMA idle interrupt handler.
3224 */
3225static irqreturn_t sdma_idle_intr(int irq, void *data)
3226{
3227 struct qib_pportdata *ppd = data;
3228 struct qib_devdata *dd = ppd->dd;
3229
3230 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3231 /*
3232 * This return value is not great, but we do not want the
3233 * interrupt core code to remove our interrupt handler
3234 * because we don't appear to be handling an interrupt
3235 * during a chip reset.
3236 */
3237 return IRQ_HANDLED;
3238
3239 qib_stats.sps_ints++;
3240 if (dd->int_counter != (u32) -1)
3241 dd->int_counter++;
3242
f931551b
RC
3243 /* Clear the interrupt bit we expect to be set. */
3244 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3245 INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3246 qib_sdma_intr(ppd);
3247
3248 return IRQ_HANDLED;
3249}
3250
3251/*
3252 * Dedicated Send DMA progress interrupt handler.
3253 */
3254static irqreturn_t sdma_progress_intr(int irq, void *data)
3255{
3256 struct qib_pportdata *ppd = data;
3257 struct qib_devdata *dd = ppd->dd;
3258
3259 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3260 /*
3261 * This return value is not great, but we do not want the
3262 * interrupt core code to remove our interrupt handler
3263 * because we don't appear to be handling an interrupt
3264 * during a chip reset.
3265 */
3266 return IRQ_HANDLED;
3267
3268 qib_stats.sps_ints++;
3269 if (dd->int_counter != (u32) -1)
3270 dd->int_counter++;
3271
f931551b
RC
3272 /* Clear the interrupt bit we expect to be set. */
3273 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3274 INT_MASK_P(SDmaProgress, 1) :
3275 INT_MASK_P(SDmaProgress, 0));
3276 qib_sdma_intr(ppd);
3277
3278 return IRQ_HANDLED;
3279}
3280
3281/*
3282 * Dedicated Send DMA cleanup interrupt handler.
3283 */
3284static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3285{
3286 struct qib_pportdata *ppd = data;
3287 struct qib_devdata *dd = ppd->dd;
3288
3289 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3290 /*
3291 * This return value is not great, but we do not want the
3292 * interrupt core code to remove our interrupt handler
3293 * because we don't appear to be handling an interrupt
3294 * during a chip reset.
3295 */
3296 return IRQ_HANDLED;
3297
3298 qib_stats.sps_ints++;
3299 if (dd->int_counter != (u32) -1)
3300 dd->int_counter++;
3301
f931551b
RC
3302 /* Clear the interrupt bit we expect to be set. */
3303 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3304 INT_MASK_PM(SDmaCleanupDone, 1) :
3305 INT_MASK_PM(SDmaCleanupDone, 0));
3306 qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3307
3308 return IRQ_HANDLED;
3309}
3310
8469ba39
MM
3311#ifdef CONFIG_INFINIBAND_QIB_DCA
3312
3313static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
3314{
3315 if (!m->dca)
3316 return;
3317 qib_devinfo(dd->pcidev,
3318 "Disabling notifier on HCA %d irq %d\n",
3319 dd->unit,
3320 m->msix.vector);
3321 irq_set_affinity_notifier(
3322 m->msix.vector,
3323 NULL);
3324 m->notifier = NULL;
3325}
3326
3327static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
3328{
3329 struct qib_irq_notify *n;
3330
3331 if (!m->dca)
3332 return;
3333 n = kzalloc(sizeof(*n), GFP_KERNEL);
3334 if (n) {
3335 int ret;
3336
3337 m->notifier = n;
3338 n->notify.irq = m->msix.vector;
3339 n->notify.notify = qib_irq_notifier_notify;
3340 n->notify.release = qib_irq_notifier_release;
3341 n->arg = m->arg;
3342 n->rcv = m->rcv;
3343 qib_devinfo(dd->pcidev,
3344 "set notifier irq %d rcv %d notify %p\n",
3345 n->notify.irq, n->rcv, &n->notify);
3346 ret = irq_set_affinity_notifier(
3347 n->notify.irq,
3348 &n->notify);
3349 if (ret) {
3350 m->notifier = NULL;
3351 kfree(n);
3352 }
3353 }
3354}
3355
3356#endif
3357
f931551b
RC
3358/*
3359 * Set up our chip-specific interrupt handler.
3360 * The interrupt type has already been setup, so
3361 * we just need to do the registration and error checking.
3362 * If we are using MSIx interrupts, we may fall back to
3363 * INTx later, if the interrupt handler doesn't get called
3364 * within 1/2 second (see verify_interrupt()).
3365 */
3366static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3367{
3368 int ret, i, msixnum;
3369 u64 redirect[6];
3370 u64 mask;
a778f3fd
MM
3371 const struct cpumask *local_mask;
3372 int firstcpu, secondcpu = 0, currrcvcpu = 0;
f931551b
RC
3373
3374 if (!dd->num_pports)
3375 return;
3376
3377 if (clearpend) {
3378 /*
3379 * if not switching interrupt types, be sure interrupts are
3380 * disabled, and then clear anything pending at this point,
3381 * because we are starting clean.
3382 */
3383 qib_7322_set_intr_state(dd, 0);
3384
3385 /* clear the reset error, init error/hwerror mask */
3386 qib_7322_init_hwerrors(dd);
3387
3388 /* clear any interrupt bits that might be set */
3389 qib_write_kreg(dd, kr_intclear, ~0ULL);
3390
3391 /* make sure no pending MSIx intr, and clear diag reg */
3392 qib_write_kreg(dd, kr_intgranted, ~0ULL);
3393 qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3394 }
3395
3396 if (!dd->cspec->num_msix_entries) {
3397 /* Try to get INTx interrupt */
3398try_intx:
3399 if (!dd->pcidev->irq) {
7fac3301
MM
3400 qib_dev_err(dd,
3401 "irq is 0, BIOS error? Interrupts won't work\n");
f931551b
RC
3402 goto bail;
3403 }
3404 ret = request_irq(dd->pcidev->irq, qib_7322intr,
3405 IRQF_SHARED, QIB_DRV_NAME, dd);
3406 if (ret) {
7fac3301
MM
3407 qib_dev_err(dd,
3408 "Couldn't setup INTx interrupt (irq=%d): %d\n",
3409 dd->pcidev->irq, ret);
f931551b
RC
3410 goto bail;
3411 }
3412 dd->cspec->irq = dd->pcidev->irq;
3413 dd->cspec->main_int_mask = ~0ULL;
3414 goto bail;
3415 }
3416
3417 /* Try to get MSIx interrupts */
3418 memset(redirect, 0, sizeof redirect);
3419 mask = ~0ULL;
3420 msixnum = 0;
a778f3fd
MM
3421 local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3422 firstcpu = cpumask_first(local_mask);
3423 if (firstcpu >= nr_cpu_ids ||
3424 cpumask_weight(local_mask) == num_online_cpus()) {
3425 local_mask = topology_core_cpumask(0);
3426 firstcpu = cpumask_first(local_mask);
3427 }
3428 if (firstcpu < nr_cpu_ids) {
3429 secondcpu = cpumask_next(firstcpu, local_mask);
3430 if (secondcpu >= nr_cpu_ids)
3431 secondcpu = firstcpu;
3432 currrcvcpu = secondcpu;
3433 }
f931551b
RC
3434 for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3435 irq_handler_t handler;
f931551b
RC
3436 void *arg;
3437 u64 val;
3438 int lsb, reg, sh;
8469ba39
MM
3439#ifdef CONFIG_INFINIBAND_QIB_DCA
3440 int dca = 0;
3441#endif
f931551b 3442
a778f3fd
MM
3443 dd->cspec->msix_entries[msixnum].
3444 name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
3445 = '\0';
f931551b
RC
3446 if (i < ARRAY_SIZE(irq_table)) {
3447 if (irq_table[i].port) {
3448 /* skip if for a non-configured port */
3449 if (irq_table[i].port > dd->num_pports)
3450 continue;
3451 arg = dd->pport + irq_table[i].port - 1;
3452 } else
3453 arg = dd;
8469ba39
MM
3454#ifdef CONFIG_INFINIBAND_QIB_DCA
3455 dca = irq_table[i].dca;
3456#endif
f931551b
RC
3457 lsb = irq_table[i].lsb;
3458 handler = irq_table[i].handler;
a778f3fd
MM
3459 snprintf(dd->cspec->msix_entries[msixnum].name,
3460 sizeof(dd->cspec->msix_entries[msixnum].name)
3461 - 1,
3462 QIB_DRV_NAME "%d%s", dd->unit,
3463 irq_table[i].name);
f931551b
RC
3464 } else {
3465 unsigned ctxt;
3466
3467 ctxt = i - ARRAY_SIZE(irq_table);
3468 /* per krcvq context receive interrupt */
3469 arg = dd->rcd[ctxt];
3470 if (!arg)
3471 continue;
e67306a3
MM
3472 if (qib_krcvq01_no_msi && ctxt < 2)
3473 continue;
8469ba39
MM
3474#ifdef CONFIG_INFINIBAND_QIB_DCA
3475 dca = 1;
3476#endif
f931551b
RC
3477 lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3478 handler = qib_7322pintr;
a778f3fd
MM
3479 snprintf(dd->cspec->msix_entries[msixnum].name,
3480 sizeof(dd->cspec->msix_entries[msixnum].name)
3481 - 1,
3482 QIB_DRV_NAME "%d (kctx)", dd->unit);
f931551b 3483 }
a778f3fd
MM
3484 ret = request_irq(
3485 dd->cspec->msix_entries[msixnum].msix.vector,
3486 handler, 0, dd->cspec->msix_entries[msixnum].name,
3487 arg);
f931551b
RC
3488 if (ret) {
3489 /*
3490 * Shouldn't happen since the enable said we could
3491 * have as many as we are trying to setup here.
3492 */
7fac3301
MM
3493 qib_dev_err(dd,
3494 "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
3495 msixnum,
a778f3fd
MM
3496 dd->cspec->msix_entries[msixnum].msix.vector,
3497 ret);
f931551b
RC
3498 qib_7322_nomsix(dd);
3499 goto try_intx;
3500 }
a778f3fd 3501 dd->cspec->msix_entries[msixnum].arg = arg;
8469ba39
MM
3502#ifdef CONFIG_INFINIBAND_QIB_DCA
3503 dd->cspec->msix_entries[msixnum].dca = dca;
3504 dd->cspec->msix_entries[msixnum].rcv =
3505 handler == qib_7322pintr;
3506#endif
f931551b
RC
3507 if (lsb >= 0) {
3508 reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3509 sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3510 SYM_LSB(IntRedirect0, vec1);
3511 mask &= ~(1ULL << lsb);
3512 redirect[reg] |= ((u64) msixnum) << sh;
3513 }
3514 val = qib_read_kreg64(dd, 2 * msixnum + 1 +
3515 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
a778f3fd
MM
3516 if (firstcpu < nr_cpu_ids &&
3517 zalloc_cpumask_var(
3518 &dd->cspec->msix_entries[msixnum].mask,
3519 GFP_KERNEL)) {
3520 if (handler == qib_7322pintr) {
3521 cpumask_set_cpu(currrcvcpu,
3522 dd->cspec->msix_entries[msixnum].mask);
3523 currrcvcpu = cpumask_next(currrcvcpu,
3524 local_mask);
3525 if (currrcvcpu >= nr_cpu_ids)
3526 currrcvcpu = secondcpu;
3527 } else {
3528 cpumask_set_cpu(firstcpu,
3529 dd->cspec->msix_entries[msixnum].mask);
3530 }
3531 irq_set_affinity_hint(
3532 dd->cspec->msix_entries[msixnum].msix.vector,
3533 dd->cspec->msix_entries[msixnum].mask);
3534 }
f931551b
RC
3535 msixnum++;
3536 }
3537 /* Initialize the vector mapping */
3538 for (i = 0; i < ARRAY_SIZE(redirect); i++)
3539 qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3540 dd->cspec->main_int_mask = mask;
e67306a3
MM
3541 tasklet_init(&dd->error_tasklet, qib_error_tasklet,
3542 (unsigned long)dd);
f931551b
RC
3543bail:;
3544}
3545
3546/**
3547 * qib_7322_boardname - fill in the board name and note features
3548 * @dd: the qlogic_ib device
3549 *
3550 * info will be based on the board revision register
3551 */
3552static unsigned qib_7322_boardname(struct qib_devdata *dd)
3553{
3554 /* Will need enumeration of board-types here */
3555 char *n;
3556 u32 boardid, namelen;
3557 unsigned features = DUAL_PORT_CAP;
3558
3559 boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3560
3561 switch (boardid) {
3562 case 0:
3563 n = "InfiniPath_QLE7342_Emulation";
3564 break;
3565 case 1:
3566 n = "InfiniPath_QLE7340";
3567 dd->flags |= QIB_HAS_QSFP;
3568 features = PORT_SPD_CAP;
3569 break;
3570 case 2:
3571 n = "InfiniPath_QLE7342";
3572 dd->flags |= QIB_HAS_QSFP;
3573 break;
3574 case 3:
3575 n = "InfiniPath_QMI7342";
3576 break;
3577 case 4:
3578 n = "InfiniPath_Unsupported7342";
3579 qib_dev_err(dd, "Unsupported version of QMH7342\n");
3580 features = 0;
3581 break;
3582 case BOARD_QMH7342:
3583 n = "InfiniPath_QMH7342";
3584 features = 0x24;
3585 break;
3586 case BOARD_QME7342:
3587 n = "InfiniPath_QME7342";
3588 break;
f509f9c1
MM
3589 case 8:
3590 n = "InfiniPath_QME7362";
3591 dd->flags |= QIB_HAS_QSFP;
3592 break;
f931551b
RC
3593 case 15:
3594 n = "InfiniPath_QLE7342_TEST";
3595 dd->flags |= QIB_HAS_QSFP;
3596 break;
3597 default:
3598 n = "InfiniPath_QLE73xy_UNKNOWN";
3599 qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3600 break;
3601 }
3602 dd->board_atten = 1; /* index into txdds_Xdr */
3603
3604 namelen = strlen(n) + 1;
3605 dd->boardname = kmalloc(namelen, GFP_KERNEL);
3606 if (!dd->boardname)
3607 qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
3608 else
3609 snprintf(dd->boardname, namelen, "%s", n);
3610
3611 snprintf(dd->boardversion, sizeof(dd->boardversion),
3612 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3613 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3614 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
3615 dd->majrev, dd->minrev,
3616 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
3617
3618 if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
7fac3301
MM
3619 qib_devinfo(dd->pcidev,
3620 "IB%u: Forced to single port mode by module parameter\n",
3621 dd->unit);
f931551b
RC
3622 features &= PORT_SPD_CAP;
3623 }
3624
3625 return features;
3626}
3627
3628/*
3629 * This routine sleeps, so it can only be called from user context, not
3630 * from interrupt context.
3631 */
3632static int qib_do_7322_reset(struct qib_devdata *dd)
3633{
3634 u64 val;
3635 u64 *msix_vecsave;
3636 int i, msix_entries, ret = 1;
3637 u16 cmdval;
3638 u8 int_line, clinesz;
3639 unsigned long flags;
3640
3641 /* Use dev_err so it shows up in logs, etc. */
3642 qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3643
3644 qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3645
3646 msix_entries = dd->cspec->num_msix_entries;
3647
3648 /* no interrupts till re-initted */
3649 qib_7322_set_intr_state(dd, 0);
3650
3651 if (msix_entries) {
3652 qib_7322_nomsix(dd);
3653 /* can be up to 512 bytes, too big for stack */
3654 msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
3655 sizeof(u64), GFP_KERNEL);
3656 if (!msix_vecsave)
3657 qib_dev_err(dd, "No mem to save MSIx data\n");
3658 } else
3659 msix_vecsave = NULL;
3660
3661 /*
3662 * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3663 * info that is set up by the BIOS, so we have to save and restore
3664 * it ourselves. There is some risk something could change it,
3665 * after we save it, but since we have disabled the MSIx, it
3666 * shouldn't be touched...
3667 */
3668 for (i = 0; i < msix_entries; i++) {
3669 u64 vecaddr, vecdata;
3670 vecaddr = qib_read_kreg64(dd, 2 * i +
3671 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3672 vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3673 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3674 if (msix_vecsave) {
3675 msix_vecsave[2 * i] = vecaddr;
3676 /* save it without the masked bit set */
3677 msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3678 }
3679 }
3680
3681 dd->pport->cpspec->ibdeltainprog = 0;
3682 dd->pport->cpspec->ibsymdelta = 0;
3683 dd->pport->cpspec->iblnkerrdelta = 0;
3684 dd->pport->cpspec->ibmalfdelta = 0;
3685 dd->int_counter = 0; /* so we check interrupts work again */
3686
3687 /*
3688 * Keep chip from being accessed until we are ready. Use
3689 * writeq() directly, to allow the write even though QIB_PRESENT
e9c54999 3690 * isn't set.
f931551b
RC
3691 */
3692 dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3693 dd->flags |= QIB_DOING_RESET;
3694 val = dd->control | QLOGIC_IB_C_RESET;
3695 writeq(val, &dd->kregbase[kr_control]);
3696
3697 for (i = 1; i <= 5; i++) {
3698 /*
3699 * Allow MBIST, etc. to complete; longer on each retry.
3700 * We sometimes get machine checks from bus timeout if no
3701 * response, so for now, make it *really* long.
3702 */
3703 msleep(1000 + (1 + i) * 3000);
3704
3705 qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3706
3707 /*
3708 * Use readq directly, so we don't need to mark it as PRESENT
3709 * until we get a successful indication that all is well.
3710 */
3711 val = readq(&dd->kregbase[kr_revision]);
3712 if (val == dd->revision)
3713 break;
3714 if (i == 5) {
7fac3301
MM
3715 qib_dev_err(dd,
3716 "Failed to initialize after reset, unusable\n");
f931551b
RC
3717 ret = 0;
3718 goto bail;
3719 }
3720 }
3721
3722 dd->flags |= QIB_PRESENT; /* it's back */
3723
3724 if (msix_entries) {
3725 /* restore the MSIx vector address and data if saved above */
3726 for (i = 0; i < msix_entries; i++) {
a778f3fd 3727 dd->cspec->msix_entries[i].msix.entry = i;
f931551b
RC
3728 if (!msix_vecsave || !msix_vecsave[2 * i])
3729 continue;
3730 qib_write_kreg(dd, 2 * i +
3731 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3732 msix_vecsave[2 * i]);
3733 qib_write_kreg(dd, 1 + 2 * i +
3734 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3735 msix_vecsave[1 + 2 * i]);
3736 }
3737 }
3738
3739 /* initialize the remaining registers. */
3740 for (i = 0; i < dd->num_pports; ++i)
3741 write_7322_init_portregs(&dd->pport[i]);
3742 write_7322_initregs(dd);
3743
3744 if (qib_pcie_params(dd, dd->lbus_width,
3745 &dd->cspec->num_msix_entries,
3746 dd->cspec->msix_entries))
7fac3301
MM
3747 qib_dev_err(dd,
3748 "Reset failed to setup PCIe or interrupts; continuing anyway\n");
f931551b
RC
3749
3750 qib_setup_7322_interrupt(dd, 1);
3751
3752 for (i = 0; i < dd->num_pports; ++i) {
3753 struct qib_pportdata *ppd = &dd->pport[i];
3754
3755 spin_lock_irqsave(&ppd->lflags_lock, flags);
3756 ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3757 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3758 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3759 }
3760
3761bail:
3762 dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3763 kfree(msix_vecsave);
3764 return ret;
3765}
3766
3767/**
3768 * qib_7322_put_tid - write a TID to the chip
3769 * @dd: the qlogic_ib device
3770 * @tidptr: pointer to the expected TID (in chip) to update
3771 * @tidtype: 0 for eager, 1 for expected
3772 * @pa: physical address of in memory buffer; tidinvalid if freeing
3773 */
3774static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3775 u32 type, unsigned long pa)
3776{
3777 if (!(dd->flags & QIB_PRESENT))
3778 return;
3779 if (pa != dd->tidinvalid) {
3780 u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3781
3782 /* paranoia checks */
3783 if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3784 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3785 pa);
3786 return;
3787 }
3788 if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
7fac3301
MM
3789 qib_dev_err(dd,
3790 "Physical page address 0x%lx larger than supported\n",
3791 pa);
f931551b
RC
3792 return;
3793 }
3794
3795 if (type == RCVHQ_RCV_TYPE_EAGER)
3796 chippa |= dd->tidtemplate;
3797 else /* for now, always full 4KB page */
3798 chippa |= IBA7322_TID_SZ_4K;
3799 pa = chippa;
3800 }
3801 writeq(pa, tidptr);
3802 mmiowb();
3803}
3804
3805/**
3806 * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3807 * @dd: the qlogic_ib device
3808 * @ctxt: the ctxt
3809 *
3810 * clear all TID entries for a ctxt, expected and eager.
3811 * Used from qib_close().
3812 */
3813static void qib_7322_clear_tids(struct qib_devdata *dd,
3814 struct qib_ctxtdata *rcd)
3815{
3816 u64 __iomem *tidbase;
3817 unsigned long tidinv;
3818 u32 ctxt;
3819 int i;
3820
3821 if (!dd->kregbase || !rcd)
3822 return;
3823
3824 ctxt = rcd->ctxt;
3825
3826 tidinv = dd->tidinvalid;
3827 tidbase = (u64 __iomem *)
3828 ((char __iomem *) dd->kregbase +
3829 dd->rcvtidbase +
3830 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3831
3832 for (i = 0; i < dd->rcvtidcnt; i++)
3833 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3834 tidinv);
3835
3836 tidbase = (u64 __iomem *)
3837 ((char __iomem *) dd->kregbase +
3838 dd->rcvegrbase +
3839 rcd->rcvegr_tid_base * sizeof(*tidbase));
3840
3841 for (i = 0; i < rcd->rcvegrcnt; i++)
3842 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3843 tidinv);
3844}
3845
3846/**
3847 * qib_7322_tidtemplate - setup constants for TID updates
3848 * @dd: the qlogic_ib device
3849 *
3850 * We setup stuff that we use a lot, to avoid calculating each time
3851 */
3852static void qib_7322_tidtemplate(struct qib_devdata *dd)
3853{
3854 /*
3855 * For now, we always allocate 4KB buffers (at init) so we can
3856 * receive max size packets. We may want a module parameter to
3857 * specify 2KB or 4KB and/or make it per port instead of per device
3858 * for those who want to reduce memory footprint. Note that the
3859 * rcvhdrentsize size must be large enough to hold the largest
3860 * IB header (currently 96 bytes) that we expect to handle (plus of
3861 * course the 2 dwords of RHF).
3862 */
3863 if (dd->rcvegrbufsize == 2048)
3864 dd->tidtemplate = IBA7322_TID_SZ_2K;
3865 else if (dd->rcvegrbufsize == 4096)
3866 dd->tidtemplate = IBA7322_TID_SZ_4K;
3867 dd->tidinvalid = 0;
3868}
3869
3870/**
3871 * qib_init_7322_get_base_info - set chip-specific flags for user code
3872 * @rcd: the qlogic_ib ctxt
3873 * @kbase: qib_base_info pointer
3874 *
3875 * We set the PCIE flag because the lower bandwidth on PCIe vs
3876 * HyperTransport can affect some user packet algorithims.
3877 */
3878
3879static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3880 struct qib_base_info *kinfo)
3881{
3882 kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3883 QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3884 QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3885 if (rcd->dd->cspec->r1)
3886 kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3887 if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3888 kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3889
3890 return 0;
3891}
3892
3893static struct qib_message_header *
3894qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3895{
3896 u32 offset = qib_hdrget_offset(rhf_addr);
3897
3898 return (struct qib_message_header *)
3899 (rhf_addr - dd->rhf_offset + offset);
3900}
3901
3902/*
3903 * Configure number of contexts.
3904 */
3905static void qib_7322_config_ctxts(struct qib_devdata *dd)
3906{
3907 unsigned long flags;
3908 u32 nchipctxts;
3909
3910 nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3911 dd->cspec->numctxts = nchipctxts;
3912 if (qib_n_krcv_queues > 1 && dd->num_pports) {
f931551b
RC
3913 dd->first_user_ctxt = NUM_IB_PORTS +
3914 (qib_n_krcv_queues - 1) * dd->num_pports;
3915 if (dd->first_user_ctxt > nchipctxts)
3916 dd->first_user_ctxt = nchipctxts;
3917 dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3918 } else {
3919 dd->first_user_ctxt = NUM_IB_PORTS;
3920 dd->n_krcv_queues = 1;
3921 }
3922
3923 if (!qib_cfgctxts) {
3924 int nctxts = dd->first_user_ctxt + num_online_cpus();
3925
3926 if (nctxts <= 6)
3927 dd->ctxtcnt = 6;
3928 else if (nctxts <= 10)
3929 dd->ctxtcnt = 10;
3930 else if (nctxts <= nchipctxts)
3931 dd->ctxtcnt = nchipctxts;
3932 } else if (qib_cfgctxts < dd->num_pports)
3933 dd->ctxtcnt = dd->num_pports;
3934 else if (qib_cfgctxts <= nchipctxts)
3935 dd->ctxtcnt = qib_cfgctxts;
3936 if (!dd->ctxtcnt) /* none of the above, set to max */
3937 dd->ctxtcnt = nchipctxts;
3938
3939 /*
3940 * Chip can be configured for 6, 10, or 18 ctxts, and choice
3941 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3942 * Lock to be paranoid about later motion, etc.
3943 */
3944 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3945 if (dd->ctxtcnt > 10)
3946 dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3947 else if (dd->ctxtcnt > 6)
3948 dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3949 /* else configure for default 6 receive ctxts */
3950
3951 /* The XRC opcode is 5. */
3952 dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3953
3954 /*
3955 * RcvCtrl *must* be written here so that the
3956 * chip understands how to change rcvegrcnt below.
3957 */
3958 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3959 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3960
3961 /* kr_rcvegrcnt changes based on the number of contexts enabled */
3962 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
0a43e117
MM
3963 if (qib_rcvhdrcnt)
3964 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3965 else
8d4548f2 3966 dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
0a43e117 3967 dd->num_pports > 1 ? 1024U : 2048U);
f931551b
RC
3968}
3969
3970static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3971{
3972
3973 int lsb, ret = 0;
3974 u64 maskr; /* right-justified mask */
3975
3976 switch (which) {
3977
3978 case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
3979 ret = ppd->link_width_enabled;
3980 goto done;
3981
3982 case QIB_IB_CFG_LWID: /* Get currently active Link-width */
3983 ret = ppd->link_width_active;
3984 goto done;
3985
3986 case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
3987 ret = ppd->link_speed_enabled;
3988 goto done;
3989
3990 case QIB_IB_CFG_SPD: /* Get current Link spd */
3991 ret = ppd->link_speed_active;
3992 goto done;
3993
3994 case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
3995 lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3996 maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3997 break;
3998
3999 case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
4000 lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4001 maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4002 break;
4003
4004 case QIB_IB_CFG_LINKLATENCY:
4005 ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
4006 SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
4007 goto done;
4008
4009 case QIB_IB_CFG_OP_VLS:
4010 ret = ppd->vls_operational;
4011 goto done;
4012
4013 case QIB_IB_CFG_VL_HIGH_CAP:
4014 ret = 16;
4015 goto done;
4016
4017 case QIB_IB_CFG_VL_LOW_CAP:
4018 ret = 16;
4019 goto done;
4020
4021 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4022 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4023 OverrunThreshold);
4024 goto done;
4025
4026 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4027 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4028 PhyerrThreshold);
4029 goto done;
4030
4031 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4032 /* will only take effect when the link state changes */
4033 ret = (ppd->cpspec->ibcctrl_a &
4034 SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
4035 IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
4036 goto done;
4037
4038 case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
4039 lsb = IBA7322_IBC_HRTBT_LSB;
4040 maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4041 break;
4042
4043 case QIB_IB_CFG_PMA_TICKS:
4044 /*
4045 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
4046 * Since the clock is always 250MHz, the value is 3, 1 or 0.
4047 */
4048 if (ppd->link_speed_active == QIB_IB_QDR)
4049 ret = 3;
4050 else if (ppd->link_speed_active == QIB_IB_DDR)
4051 ret = 1;
4052 else
4053 ret = 0;
4054 goto done;
4055
4056 default:
4057 ret = -EINVAL;
4058 goto done;
4059 }
4060 ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
4061done:
4062 return ret;
4063}
4064
4065/*
4066 * Below again cribbed liberally from older version. Do not lean
4067 * heavily on it.
4068 */
4069#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
4070#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
4071 | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
4072
4073static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
4074{
4075 struct qib_devdata *dd = ppd->dd;
4076 u64 maskr; /* right-justified mask */
4077 int lsb, ret = 0;
4078 u16 lcmd, licmd;
4079 unsigned long flags;
4080
4081 switch (which) {
4082 case QIB_IB_CFG_LIDLMC:
4083 /*
4084 * Set LID and LMC. Combined to avoid possible hazard
4085 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
4086 */
4087 lsb = IBA7322_IBC_DLIDLMC_SHIFT;
4088 maskr = IBA7322_IBC_DLIDLMC_MASK;
4089 /*
4090 * For header-checking, the SLID in the packet will
4091 * be masked with SendIBSLMCMask, and compared
4092 * with SendIBSLIDAssignMask. Make sure we do not
4093 * set any bits not covered by the mask, or we get
4094 * false-positives.
4095 */
4096 qib_write_kreg_port(ppd, krp_sendslid,
4097 val & (val >> 16) & SendIBSLIDAssignMask);
4098 qib_write_kreg_port(ppd, krp_sendslidmask,
4099 (val >> 16) & SendIBSLMCMask);
4100 break;
4101
4102 case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
4103 ppd->link_width_enabled = val;
4104 /* convert IB value to chip register value */
4105 if (val == IB_WIDTH_1X)
4106 val = 0;
4107 else if (val == IB_WIDTH_4X)
4108 val = 1;
4109 else
4110 val = 3;
4111 maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
4112 lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
4113 break;
4114
4115 case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
4116 /*
4117 * As with width, only write the actual register if the
4118 * link is currently down, otherwise takes effect on next
25985edc 4119 * link change. Since setting is being explicitly requested
f931551b
RC
4120 * (via MAD or sysfs), clear autoneg failure status if speed
4121 * autoneg is enabled.
4122 */
4123 ppd->link_speed_enabled = val;
4124 val <<= IBA7322_IBC_SPEED_LSB;
4125 maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
4126 IBA7322_IBC_MAX_SPEED_MASK;
4127 if (val & (val - 1)) {
4128 /* Muliple speeds enabled */
4129 val |= IBA7322_IBC_IBTA_1_2_MASK |
4130 IBA7322_IBC_MAX_SPEED_MASK;
4131 spin_lock_irqsave(&ppd->lflags_lock, flags);
4132 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
4133 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4134 } else if (val & IBA7322_IBC_SPEED_QDR)
4135 val |= IBA7322_IBC_IBTA_1_2_MASK;
4136 /* IBTA 1.2 mode + min/max + speed bits are contiguous */
4137 lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
4138 break;
4139
4140 case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
4141 lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4142 maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4143 break;
4144
4145 case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
4146 lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4147 maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4148 break;
4149
4150 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4151 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4152 OverrunThreshold);
4153 if (maskr != val) {
4154 ppd->cpspec->ibcctrl_a &=
4155 ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
4156 ppd->cpspec->ibcctrl_a |= (u64) val <<
4157 SYM_LSB(IBCCtrlA_0, OverrunThreshold);
4158 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4159 ppd->cpspec->ibcctrl_a);
4160 qib_write_kreg(dd, kr_scratch, 0ULL);
4161 }
4162 goto bail;
4163
4164 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4165 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4166 PhyerrThreshold);
4167 if (maskr != val) {
4168 ppd->cpspec->ibcctrl_a &=
4169 ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
4170 ppd->cpspec->ibcctrl_a |= (u64) val <<
4171 SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
4172 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4173 ppd->cpspec->ibcctrl_a);
4174 qib_write_kreg(dd, kr_scratch, 0ULL);
4175 }
4176 goto bail;
4177
4178 case QIB_IB_CFG_PKEYS: /* update pkeys */
4179 maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
4180 ((u64) ppd->pkeys[2] << 32) |
4181 ((u64) ppd->pkeys[3] << 48);
4182 qib_write_kreg_port(ppd, krp_partitionkey, maskr);
4183 goto bail;
4184
4185 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4186 /* will only take effect when the link state changes */
4187 if (val == IB_LINKINITCMD_POLL)
4188 ppd->cpspec->ibcctrl_a &=
4189 ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4190 else /* SLEEP */
4191 ppd->cpspec->ibcctrl_a |=
4192 SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4193 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4194 qib_write_kreg(dd, kr_scratch, 0ULL);
4195 goto bail;
4196
4197 case QIB_IB_CFG_MTU: /* update the MTU in IBC */
4198 /*
4199 * Update our housekeeping variables, and set IBC max
4200 * size, same as init code; max IBC is max we allow in
4201 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
4202 * Set even if it's unchanged, print debug message only
4203 * on changes.
4204 */
4205 val = (ppd->ibmaxlen >> 2) + 1;
4206 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
4207 ppd->cpspec->ibcctrl_a |= (u64)val <<
4208 SYM_LSB(IBCCtrlA_0, MaxPktLen);
4209 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4210 ppd->cpspec->ibcctrl_a);
4211 qib_write_kreg(dd, kr_scratch, 0ULL);
4212 goto bail;
4213
4214 case QIB_IB_CFG_LSTATE: /* set the IB link state */
4215 switch (val & 0xffff0000) {
4216 case IB_LINKCMD_DOWN:
4217 lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
4218 ppd->cpspec->ibmalfusesnap = 1;
4219 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
4220 crp_errlink);
4221 if (!ppd->cpspec->ibdeltainprog &&
4222 qib_compat_ddr_negotiate) {
4223 ppd->cpspec->ibdeltainprog = 1;
4224 ppd->cpspec->ibsymsnap =
4225 read_7322_creg32_port(ppd,
4226 crp_ibsymbolerr);
4227 ppd->cpspec->iblnkerrsnap =
4228 read_7322_creg32_port(ppd,
4229 crp_iblinkerrrecov);
4230 }
4231 break;
4232
4233 case IB_LINKCMD_ARMED:
4234 lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
4235 if (ppd->cpspec->ibmalfusesnap) {
4236 ppd->cpspec->ibmalfusesnap = 0;
4237 ppd->cpspec->ibmalfdelta +=
4238 read_7322_creg32_port(ppd,
4239 crp_errlink) -
4240 ppd->cpspec->ibmalfsnap;
4241 }
4242 break;
4243
4244 case IB_LINKCMD_ACTIVE:
4245 lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
4246 break;
4247
4248 default:
4249 ret = -EINVAL;
4250 qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
4251 goto bail;
4252 }
4253 switch (val & 0xffff) {
4254 case IB_LINKINITCMD_NOP:
4255 licmd = 0;
4256 break;
4257
4258 case IB_LINKINITCMD_POLL:
4259 licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
4260 break;
4261
4262 case IB_LINKINITCMD_SLEEP:
4263 licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
4264 break;
4265
4266 case IB_LINKINITCMD_DISABLE:
4267 licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
4268 ppd->cpspec->chase_end = 0;
4269 /*
4270 * stop state chase counter and timer, if running.
4271 * wait forpending timer, but don't clear .data (ppd)!
4272 */
4273 if (ppd->cpspec->chase_timer.expires) {
4274 del_timer_sync(&ppd->cpspec->chase_timer);
4275 ppd->cpspec->chase_timer.expires = 0;
4276 }
4277 break;
4278
4279 default:
4280 ret = -EINVAL;
4281 qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
4282 val & 0xffff);
4283 goto bail;
4284 }
4285 qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4286 goto bail;
4287
4288 case QIB_IB_CFG_OP_VLS:
4289 if (ppd->vls_operational != val) {
4290 ppd->vls_operational = val;
4291 set_vls(ppd);
4292 }
4293 goto bail;
4294
4295 case QIB_IB_CFG_VL_HIGH_LIMIT:
4296 qib_write_kreg_port(ppd, krp_highprio_limit, val);
4297 goto bail;
4298
4299 case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
4300 if (val > 3) {
4301 ret = -EINVAL;
4302 goto bail;
4303 }
4304 lsb = IBA7322_IBC_HRTBT_LSB;
4305 maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4306 break;
4307
4308 case QIB_IB_CFG_PORT:
4309 /* val is the port number of the switch we are connected to. */
4310 if (ppd->dd->cspec->r1) {
4311 cancel_delayed_work(&ppd->cpspec->ipg_work);
4312 ppd->cpspec->ipg_tries = 0;
4313 }
4314 goto bail;
4315
4316 default:
4317 ret = -EINVAL;
4318 goto bail;
4319 }
4320 ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4321 ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4322 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4323 qib_write_kreg(dd, kr_scratch, 0);
4324bail:
4325 return ret;
4326}
4327
4328static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4329{
4330 int ret = 0;
4331 u64 val, ctrlb;
4332
4333 /* only IBC loopback, may add serdes and xgxs loopbacks later */
4334 if (!strncmp(what, "ibc", 3)) {
4335 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4336 Loopback);
4337 val = 0; /* disable heart beat, so link will come up */
4338 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4339 ppd->dd->unit, ppd->port);
4340 } else if (!strncmp(what, "off", 3)) {
4341 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4342 Loopback);
4343 /* enable heart beat again */
4344 val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
7fac3301
MM
4345 qib_devinfo(ppd->dd->pcidev,
4346 "Disabling IB%u:%u IBC loopback (normal)\n",
4347 ppd->dd->unit, ppd->port);
f931551b
RC
4348 } else
4349 ret = -EINVAL;
4350 if (!ret) {
4351 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4352 ppd->cpspec->ibcctrl_a);
4353 ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4354 << IBA7322_IBC_HRTBT_LSB);
4355 ppd->cpspec->ibcctrl_b = ctrlb | val;
4356 qib_write_kreg_port(ppd, krp_ibcctrl_b,
4357 ppd->cpspec->ibcctrl_b);
4358 qib_write_kreg(ppd->dd, kr_scratch, 0);
4359 }
4360 return ret;
4361}
4362
4363static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4364 struct ib_vl_weight_elem *vl)
4365{
4366 unsigned i;
4367
4368 for (i = 0; i < 16; i++, regno++, vl++) {
4369 u32 val = qib_read_kreg_port(ppd, regno);
4370
4371 vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4372 SYM_RMASK(LowPriority0_0, VirtualLane);
4373 vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4374 SYM_RMASK(LowPriority0_0, Weight);
4375 }
4376}
4377
4378static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4379 struct ib_vl_weight_elem *vl)
4380{
4381 unsigned i;
4382
4383 for (i = 0; i < 16; i++, regno++, vl++) {
4384 u64 val;
4385
4386 val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4387 SYM_LSB(LowPriority0_0, VirtualLane)) |
4388 ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4389 SYM_LSB(LowPriority0_0, Weight));
4390 qib_write_kreg_port(ppd, regno, val);
4391 }
4392 if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4393 struct qib_devdata *dd = ppd->dd;
4394 unsigned long flags;
4395
4396 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4397 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4398 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4399 qib_write_kreg(dd, kr_scratch, 0);
4400 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4401 }
4402}
4403
4404static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4405{
4406 switch (which) {
4407 case QIB_IB_TBL_VL_HIGH_ARB:
4408 get_vl_weights(ppd, krp_highprio_0, t);
4409 break;
4410
4411 case QIB_IB_TBL_VL_LOW_ARB:
4412 get_vl_weights(ppd, krp_lowprio_0, t);
4413 break;
4414
4415 default:
4416 return -EINVAL;
4417 }
4418 return 0;
4419}
4420
4421static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4422{
4423 switch (which) {
4424 case QIB_IB_TBL_VL_HIGH_ARB:
4425 set_vl_weights(ppd, krp_highprio_0, t);
4426 break;
4427
4428 case QIB_IB_TBL_VL_LOW_ARB:
4429 set_vl_weights(ppd, krp_lowprio_0, t);
4430 break;
4431
4432 default:
4433 return -EINVAL;
4434 }
4435 return 0;
4436}
4437
4438static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
19ede2e4 4439 u32 updegr, u32 egrhd, u32 npkts)
f931551b 4440{
19ede2e4
MM
4441 /*
4442 * Need to write timeout register before updating rcvhdrhead to ensure
4443 * that the timer is enabled on reception of a packet.
4444 */
4445 if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4446 adjust_rcv_timeout(rcd, npkts);
f931551b
RC
4447 if (updegr)
4448 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
eddfb675
RV
4449 mmiowb();
4450 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4451 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4452 mmiowb();
f931551b
RC
4453}
4454
4455static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4456{
4457 u32 head, tail;
4458
4459 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4460 if (rcd->rcvhdrtail_kvaddr)
4461 tail = qib_get_rcvhdrtail(rcd);
4462 else
4463 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4464 return head == tail;
4465}
4466
4467#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4468 QIB_RCVCTRL_CTXT_DIS | \
4469 QIB_RCVCTRL_TIDFLOW_ENB | \
4470 QIB_RCVCTRL_TIDFLOW_DIS | \
4471 QIB_RCVCTRL_TAILUPD_ENB | \
4472 QIB_RCVCTRL_TAILUPD_DIS | \
4473 QIB_RCVCTRL_INTRAVAIL_ENB | \
4474 QIB_RCVCTRL_INTRAVAIL_DIS | \
4475 QIB_RCVCTRL_BP_ENB | \
4476 QIB_RCVCTRL_BP_DIS)
4477
4478#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4479 QIB_RCVCTRL_CTXT_DIS | \
4480 QIB_RCVCTRL_PKEY_DIS | \
4481 QIB_RCVCTRL_PKEY_ENB)
4482
4483/*
4484 * Modify the RCVCTRL register in chip-specific way. This
4485 * is a function because bit positions and (future) register
4486 * location is chip-specifc, but the needed operations are
4487 * generic. <op> is a bit-mask because we often want to
4488 * do multiple modifications.
4489 */
4490static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4491 int ctxt)
4492{
4493 struct qib_devdata *dd = ppd->dd;
4494 struct qib_ctxtdata *rcd;
4495 u64 mask, val;
4496 unsigned long flags;
4497
4498 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4499
4500 if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4501 dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4502 if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4503 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4504 if (op & QIB_RCVCTRL_TAILUPD_ENB)
4505 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4506 if (op & QIB_RCVCTRL_TAILUPD_DIS)
4507 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4508 if (op & QIB_RCVCTRL_PKEY_ENB)
4509 ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4510 if (op & QIB_RCVCTRL_PKEY_DIS)
4511 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4512 if (ctxt < 0) {
4513 mask = (1ULL << dd->ctxtcnt) - 1;
4514 rcd = NULL;
4515 } else {
4516 mask = (1ULL << ctxt);
4517 rcd = dd->rcd[ctxt];
4518 }
4519 if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4520 ppd->p_rcvctrl |=
4521 (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4522 if (!(dd->flags & QIB_NODMA_RTAIL)) {
4523 op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4524 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4525 }
4526 /* Write these registers before the context is enabled. */
4527 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4528 rcd->rcvhdrqtailaddr_phys);
4529 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4530 rcd->rcvhdrq_phys);
4531 rcd->seq_cnt = 1;
f931551b
RC
4532 }
4533 if (op & QIB_RCVCTRL_CTXT_DIS)
4534 ppd->p_rcvctrl &=
4535 ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4536 if (op & QIB_RCVCTRL_BP_ENB)
4537 dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4538 if (op & QIB_RCVCTRL_BP_DIS)
4539 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4540 if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4541 dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4542 if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4543 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4544 /*
4545 * Decide which registers to write depending on the ops enabled.
4546 * Special case is "flush" (no bits set at all)
4547 * which needs to write both.
4548 */
4549 if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4550 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4551 if (op == 0 || (op & RCVCTRL_PORT_MODS))
4552 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4553 if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4554 /*
4555 * Init the context registers also; if we were
4556 * disabled, tail and head should both be zero
4557 * already from the enable, but since we don't
25985edc 4558 * know, we have to do it explicitly.
f931551b
RC
4559 */
4560 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4561 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4562
4563 /* be sure enabling write seen; hd/tl should be 0 */
4564 (void) qib_read_kreg32(dd, kr_scratch);
4565 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4566 dd->rcd[ctxt]->head = val;
4567 /* If kctxt, interrupt on next receive. */
4568 if (ctxt < dd->first_user_ctxt)
4569 val |= dd->rhdrhead_intr_off;
4570 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4571 } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4572 dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4573 /* arm rcv interrupt */
4574 val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4575 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4576 }
4577 if (op & QIB_RCVCTRL_CTXT_DIS) {
4578 unsigned f;
4579
4580 /* Now that the context is disabled, clear these registers. */
4581 if (ctxt >= 0) {
4582 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4583 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4584 for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4585 qib_write_ureg(dd, ur_rcvflowtable + f,
4586 TIDFLOW_ERRBITS, ctxt);
4587 } else {
4588 unsigned i;
4589
4590 for (i = 0; i < dd->cfgctxts; i++) {
4591 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4592 i, 0);
4593 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4594 for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4595 qib_write_ureg(dd, ur_rcvflowtable + f,
4596 TIDFLOW_ERRBITS, i);
4597 }
4598 }
4599 }
4600 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4601}
4602
4603/*
4604 * Modify the SENDCTRL register in chip-specific way. This
4605 * is a function where there are multiple such registers with
4606 * slightly different layouts.
4607 * The chip doesn't allow back-to-back sendctrl writes, so write
4608 * the scratch register after writing sendctrl.
4609 *
4610 * Which register is written depends on the operation.
4611 * Most operate on the common register, while
4612 * SEND_ENB and SEND_DIS operate on the per-port ones.
4613 * SEND_ENB is included in common because it can change SPCL_TRIG
4614 */
4615#define SENDCTRL_COMMON_MODS (\
4616 QIB_SENDCTRL_CLEAR | \
4617 QIB_SENDCTRL_AVAIL_DIS | \
4618 QIB_SENDCTRL_AVAIL_ENB | \
4619 QIB_SENDCTRL_AVAIL_BLIP | \
4620 QIB_SENDCTRL_DISARM | \
4621 QIB_SENDCTRL_DISARM_ALL | \
4622 QIB_SENDCTRL_SEND_ENB)
4623
4624#define SENDCTRL_PORT_MODS (\
4625 QIB_SENDCTRL_CLEAR | \
4626 QIB_SENDCTRL_SEND_ENB | \
4627 QIB_SENDCTRL_SEND_DIS | \
4628 QIB_SENDCTRL_FLUSH)
4629
4630static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4631{
4632 struct qib_devdata *dd = ppd->dd;
4633 u64 tmp_dd_sendctrl;
4634 unsigned long flags;
4635
4636 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4637
4638 /* First the dd ones that are "sticky", saved in shadow */
4639 if (op & QIB_SENDCTRL_CLEAR)
4640 dd->sendctrl = 0;
4641 if (op & QIB_SENDCTRL_AVAIL_DIS)
4642 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4643 else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4644 dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4645 if (dd->flags & QIB_USE_SPCL_TRIG)
4646 dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4647 }
4648
4649 /* Then the ppd ones that are "sticky", saved in shadow */
4650 if (op & QIB_SENDCTRL_SEND_DIS)
4651 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4652 else if (op & QIB_SENDCTRL_SEND_ENB)
4653 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4654
4655 if (op & QIB_SENDCTRL_DISARM_ALL) {
4656 u32 i, last;
4657
4658 tmp_dd_sendctrl = dd->sendctrl;
4659 last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4660 /*
4661 * Disarm any buffers that are not yet launched,
4662 * disabling updates until done.
4663 */
4664 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4665 for (i = 0; i < last; i++) {
4666 qib_write_kreg(dd, kr_sendctrl,
4667 tmp_dd_sendctrl |
4668 SYM_MASK(SendCtrl, Disarm) | i);
4669 qib_write_kreg(dd, kr_scratch, 0);
4670 }
4671 }
4672
4673 if (op & QIB_SENDCTRL_FLUSH) {
4674 u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4675
4676 /*
4677 * Now drain all the fifos. The Abort bit should never be
4678 * needed, so for now, at least, we don't use it.
4679 */
4680 tmp_ppd_sendctrl |=
4681 SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4682 SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4683 SYM_MASK(SendCtrl_0, TxeBypassIbc);
4684 qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4685 qib_write_kreg(dd, kr_scratch, 0);
4686 }
4687
4688 tmp_dd_sendctrl = dd->sendctrl;
4689
4690 if (op & QIB_SENDCTRL_DISARM)
4691 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4692 ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4693 SYM_LSB(SendCtrl, DisarmSendBuf));
4694 if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4695 (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4696 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4697
4698 if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4699 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4700 qib_write_kreg(dd, kr_scratch, 0);
4701 }
4702
4703 if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4704 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4705 qib_write_kreg(dd, kr_scratch, 0);
4706 }
4707
4708 if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4709 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4710 qib_write_kreg(dd, kr_scratch, 0);
4711 }
4712
4713 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4714
4715 if (op & QIB_SENDCTRL_FLUSH) {
4716 u32 v;
4717 /*
4718 * ensure writes have hit chip, then do a few
4719 * more reads, to allow DMA of pioavail registers
4720 * to occur, so in-memory copy is in sync with
4721 * the chip. Not always safe to sleep.
4722 */
4723 v = qib_read_kreg32(dd, kr_scratch);
4724 qib_write_kreg(dd, kr_scratch, v);
4725 v = qib_read_kreg32(dd, kr_scratch);
4726 qib_write_kreg(dd, kr_scratch, v);
4727 qib_read_kreg32(dd, kr_scratch);
4728 }
4729}
4730
4731#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4732#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4733#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4734
4735/**
4736 * qib_portcntr_7322 - read a per-port chip counter
4737 * @ppd: the qlogic_ib pport
4738 * @creg: the counter to read (not a chip offset)
4739 */
4740static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4741{
4742 struct qib_devdata *dd = ppd->dd;
4743 u64 ret = 0ULL;
4744 u16 creg;
4745 /* 0xffff for unimplemented or synthesized counters */
4746 static const u32 xlator[] = {
4747 [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4748 [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4749 [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4750 [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4751 [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4752 [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4753 [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4754 [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4755 [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4756 [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4757 [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4758 [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4759 [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
4760 [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4761 [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4762 [QIBPORTCNTR_ERRICRC] = crp_erricrc,
4763 [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4764 [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4765 [QIBPORTCNTR_BADFORMAT] = crp_badformat,
4766 [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4767 [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4768 [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4769 [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4770 [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4771 [QIBPORTCNTR_ERRLINK] = crp_errlink,
4772 [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4773 [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4774 [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4775 [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4776 [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4777 /*
4778 * the next 3 aren't really counters, but were implemented
4779 * as counters in older chips, so still get accessed as
4780 * though they were counters from this code.
4781 */
4782 [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4783 [QIBPORTCNTR_PSSTART] = krp_psstart,
4784 [QIBPORTCNTR_PSSTAT] = krp_psstat,
4785 /* pseudo-counter, summed for all ports */
4786 [QIBPORTCNTR_KHDROVFL] = 0xffff,
4787 };
4788
4789 if (reg >= ARRAY_SIZE(xlator)) {
4790 qib_devinfo(ppd->dd->pcidev,
4791 "Unimplemented portcounter %u\n", reg);
4792 goto done;
4793 }
4794 creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4795
4796 /* handle non-counters and special cases first */
4797 if (reg == QIBPORTCNTR_KHDROVFL) {
4798 int i;
4799
4800 /* sum over all kernel contexts (skip if mini_init) */
4801 for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4802 struct qib_ctxtdata *rcd = dd->rcd[i];
4803
4804 if (!rcd || rcd->ppd != ppd)
4805 continue;
4806 ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4807 }
4808 goto done;
4809 } else if (reg == QIBPORTCNTR_RXDROPPKT) {
4810 /*
4811 * Used as part of the synthesis of port_rcv_errors
4812 * in the verbs code for IBTA counters. Not needed for 7322,
4813 * because all the errors are already counted by other cntrs.
4814 */
4815 goto done;
4816 } else if (reg == QIBPORTCNTR_PSINTERVAL ||
4817 reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4818 /* were counters in older chips, now per-port kernel regs */
4819 ret = qib_read_kreg_port(ppd, creg);
4820 goto done;
4821 }
4822
4823 /*
4824 * Only fast increment counters are 64 bits; use 32 bit reads to
4825 * avoid two independent reads when on Opteron.
4826 */
4827 if (xlator[reg] & _PORT_64BIT_FLAG)
4828 ret = read_7322_creg_port(ppd, creg);
4829 else
4830 ret = read_7322_creg32_port(ppd, creg);
4831 if (creg == crp_ibsymbolerr) {
4832 if (ppd->cpspec->ibdeltainprog)
4833 ret -= ret - ppd->cpspec->ibsymsnap;
4834 ret -= ppd->cpspec->ibsymdelta;
4835 } else if (creg == crp_iblinkerrrecov) {
4836 if (ppd->cpspec->ibdeltainprog)
4837 ret -= ret - ppd->cpspec->iblnkerrsnap;
4838 ret -= ppd->cpspec->iblnkerrdelta;
4839 } else if (creg == crp_errlink)
4840 ret -= ppd->cpspec->ibmalfdelta;
4841 else if (creg == crp_iblinkdown)
4842 ret += ppd->cpspec->iblnkdowndelta;
4843done:
4844 return ret;
4845}
4846
4847/*
4848 * Device counter names (not port-specific), one line per stat,
4849 * single string. Used by utilities like ipathstats to print the stats
4850 * in a way which works for different versions of drivers, without changing
4851 * the utility. Names need to be 12 chars or less (w/o newline), for proper
4852 * display by utility.
4853 * Non-error counters are first.
4854 * Start of "error" conters is indicated by a leading "E " on the first
4855 * "error" counter, and doesn't count in label length.
4856 * The EgrOvfl list needs to be last so we truncate them at the configured
4857 * context count for the device.
4858 * cntr7322indices contains the corresponding register indices.
4859 */
4860static const char cntr7322names[] =
4861 "Interrupts\n"
4862 "HostBusStall\n"
4863 "E RxTIDFull\n"
4864 "RxTIDInvalid\n"
4865 "RxTIDFloDrop\n" /* 7322 only */
4866 "Ctxt0EgrOvfl\n"
4867 "Ctxt1EgrOvfl\n"
4868 "Ctxt2EgrOvfl\n"
4869 "Ctxt3EgrOvfl\n"
4870 "Ctxt4EgrOvfl\n"
4871 "Ctxt5EgrOvfl\n"
4872 "Ctxt6EgrOvfl\n"
4873 "Ctxt7EgrOvfl\n"
4874 "Ctxt8EgrOvfl\n"
4875 "Ctxt9EgrOvfl\n"
4876 "Ctx10EgrOvfl\n"
4877 "Ctx11EgrOvfl\n"
4878 "Ctx12EgrOvfl\n"
4879 "Ctx13EgrOvfl\n"
4880 "Ctx14EgrOvfl\n"
4881 "Ctx15EgrOvfl\n"
4882 "Ctx16EgrOvfl\n"
4883 "Ctx17EgrOvfl\n"
4884 ;
4885
4886static const u32 cntr7322indices[] = {
4887 cr_lbint | _PORT_64BIT_FLAG,
4888 cr_lbstall | _PORT_64BIT_FLAG,
4889 cr_tidfull,
4890 cr_tidinvalid,
4891 cr_rxtidflowdrop,
4892 cr_base_egrovfl + 0,
4893 cr_base_egrovfl + 1,
4894 cr_base_egrovfl + 2,
4895 cr_base_egrovfl + 3,
4896 cr_base_egrovfl + 4,
4897 cr_base_egrovfl + 5,
4898 cr_base_egrovfl + 6,
4899 cr_base_egrovfl + 7,
4900 cr_base_egrovfl + 8,
4901 cr_base_egrovfl + 9,
4902 cr_base_egrovfl + 10,
4903 cr_base_egrovfl + 11,
4904 cr_base_egrovfl + 12,
4905 cr_base_egrovfl + 13,
4906 cr_base_egrovfl + 14,
4907 cr_base_egrovfl + 15,
4908 cr_base_egrovfl + 16,
4909 cr_base_egrovfl + 17,
4910};
4911
4912/*
4913 * same as cntr7322names and cntr7322indices, but for port-specific counters.
4914 * portcntr7322indices is somewhat complicated by some registers needing
4915 * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4916 */
4917static const char portcntr7322names[] =
4918 "TxPkt\n"
4919 "TxFlowPkt\n"
4920 "TxWords\n"
4921 "RxPkt\n"
4922 "RxFlowPkt\n"
4923 "RxWords\n"
4924 "TxFlowStall\n"
4925 "TxDmaDesc\n" /* 7220 and 7322-only */
4926 "E RxDlidFltr\n" /* 7220 and 7322-only */
4927 "IBStatusChng\n"
4928 "IBLinkDown\n"
4929 "IBLnkRecov\n"
4930 "IBRxLinkErr\n"
4931 "IBSymbolErr\n"
4932 "RxLLIErr\n"
4933 "RxBadFormat\n"
4934 "RxBadLen\n"
4935 "RxBufOvrfl\n"
4936 "RxEBP\n"
4937 "RxFlowCtlErr\n"
4938 "RxICRCerr\n"
4939 "RxLPCRCerr\n"
4940 "RxVCRCerr\n"
4941 "RxInvalLen\n"
4942 "RxInvalPKey\n"
4943 "RxPktDropped\n"
4944 "TxBadLength\n"
4945 "TxDropped\n"
4946 "TxInvalLen\n"
4947 "TxUnderrun\n"
4948 "TxUnsupVL\n"
4949 "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4950 "RxVL15Drop\n"
4951 "RxVlErr\n"
4952 "XcessBufOvfl\n"
4953 "RxQPBadCtxt\n" /* 7322-only from here down */
4954 "TXBadHeader\n"
4955 ;
4956
4957static const u32 portcntr7322indices[] = {
4958 QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4959 crp_pktsendflow,
4960 QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4961 QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4962 crp_pktrcvflowctrl,
4963 QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4964 QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4965 crp_txsdmadesc | _PORT_64BIT_FLAG,
4966 crp_rxdlidfltr,
4967 crp_ibstatuschange,
4968 QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4969 QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4970 QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4971 QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4972 QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4973 QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4974 QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4975 QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4976 QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4977 crp_rcvflowctrlviol,
4978 QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
4979 QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
4980 QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
4981 QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
4982 QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
4983 QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
4984 crp_txminmaxlenerr,
4985 crp_txdroppedpkt,
4986 crp_txlenerr,
4987 crp_txunderrun,
4988 crp_txunsupvl,
4989 QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
4990 QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
4991 QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
4992 QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
4993 crp_rxqpinvalidctxt,
4994 crp_txhdrerr,
4995};
4996
4997/* do all the setup to make the counter reads efficient later */
4998static void init_7322_cntrnames(struct qib_devdata *dd)
4999{
5000 int i, j = 0;
5001 char *s;
5002
5003 for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
5004 i++) {
5005 /* we always have at least one counter before the egrovfl */
5006 if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
5007 j = 1;
5008 s = strchr(s + 1, '\n');
5009 if (s && j)
5010 j++;
5011 }
5012 dd->cspec->ncntrs = i;
5013 if (!s)
5014 /* full list; size is without terminating null */
5015 dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
5016 else
5017 dd->cspec->cntrnamelen = 1 + s - cntr7322names;
5018 dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
5019 * sizeof(u64), GFP_KERNEL);
5020 if (!dd->cspec->cntrs)
5021 qib_dev_err(dd, "Failed allocation for counters\n");
5022
5023 for (i = 0, s = (char *)portcntr7322names; s; i++)
5024 s = strchr(s + 1, '\n');
5025 dd->cspec->nportcntrs = i - 1;
5026 dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
5027 for (i = 0; i < dd->num_pports; ++i) {
5028 dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
5029 * sizeof(u64), GFP_KERNEL);
5030 if (!dd->pport[i].cpspec->portcntrs)
7fac3301
MM
5031 qib_dev_err(dd,
5032 "Failed allocation for portcounters\n");
f931551b
RC
5033 }
5034}
5035
5036static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
5037 u64 **cntrp)
5038{
5039 u32 ret;
5040
5041 if (namep) {
5042 ret = dd->cspec->cntrnamelen;
5043 if (pos >= ret)
5044 ret = 0; /* final read after getting everything */
5045 else
5046 *namep = (char *) cntr7322names;
5047 } else {
5048 u64 *cntr = dd->cspec->cntrs;
5049 int i;
5050
5051 ret = dd->cspec->ncntrs * sizeof(u64);
5052 if (!cntr || pos >= ret) {
5053 /* everything read, or couldn't get memory */
5054 ret = 0;
5055 goto done;
5056 }
5057 *cntrp = cntr;
5058 for (i = 0; i < dd->cspec->ncntrs; i++)
5059 if (cntr7322indices[i] & _PORT_64BIT_FLAG)
5060 *cntr++ = read_7322_creg(dd,
5061 cntr7322indices[i] &
5062 _PORT_CNTR_IDXMASK);
5063 else
5064 *cntr++ = read_7322_creg32(dd,
5065 cntr7322indices[i]);
5066 }
5067done:
5068 return ret;
5069}
5070
5071static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
5072 char **namep, u64 **cntrp)
5073{
5074 u32 ret;
5075
5076 if (namep) {
5077 ret = dd->cspec->portcntrnamelen;
5078 if (pos >= ret)
5079 ret = 0; /* final read after getting everything */
5080 else
5081 *namep = (char *)portcntr7322names;
5082 } else {
5083 struct qib_pportdata *ppd = &dd->pport[port];
5084 u64 *cntr = ppd->cpspec->portcntrs;
5085 int i;
5086
5087 ret = dd->cspec->nportcntrs * sizeof(u64);
5088 if (!cntr || pos >= ret) {
5089 /* everything read, or couldn't get memory */
5090 ret = 0;
5091 goto done;
5092 }
5093 *cntrp = cntr;
5094 for (i = 0; i < dd->cspec->nportcntrs; i++) {
5095 if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
5096 *cntr++ = qib_portcntr_7322(ppd,
5097 portcntr7322indices[i] &
5098 _PORT_CNTR_IDXMASK);
5099 else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
5100 *cntr++ = read_7322_creg_port(ppd,
5101 portcntr7322indices[i] &
5102 _PORT_CNTR_IDXMASK);
5103 else
5104 *cntr++ = read_7322_creg32_port(ppd,
5105 portcntr7322indices[i]);
5106 }
5107 }
5108done:
5109 return ret;
5110}
5111
5112/**
5113 * qib_get_7322_faststats - get word counters from chip before they overflow
5114 * @opaque - contains a pointer to the qlogic_ib device qib_devdata
5115 *
5116 * VESTIGIAL IBA7322 has no "small fast counters", so the only
5117 * real purpose of this function is to maintain the notion of
5118 * "active time", which in turn is only logged into the eeprom,
5119 * which we don;t have, yet, for 7322-based boards.
5120 *
5121 * called from add_timer
5122 */
5123static void qib_get_7322_faststats(unsigned long opaque)
5124{
5125 struct qib_devdata *dd = (struct qib_devdata *) opaque;
5126 struct qib_pportdata *ppd;
5127 unsigned long flags;
5128 u64 traffic_wds;
5129 int pidx;
5130
5131 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5132 ppd = dd->pport + pidx;
5133
5134 /*
5135 * If port isn't enabled or not operational ports, or
5136 * diags is running (can cause memory diags to fail)
5137 * skip this port this time.
5138 */
5139 if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
5140 || dd->diag_client)
5141 continue;
5142
5143 /*
5144 * Maintain an activity timer, based on traffic
5145 * exceeding a threshold, so we need to check the word-counts
5146 * even if they are 64-bit.
5147 */
5148 traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
5149 qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
5150 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5151 traffic_wds -= ppd->dd->traffic_wds;
5152 ppd->dd->traffic_wds += traffic_wds;
5153 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
5154 atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
5155 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5156 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5157 QIB_IB_QDR) &&
5158 (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
5159 QIBL_LINKACTIVE)) &&
5160 ppd->cpspec->qdr_dfe_time &&
8482d5d1 5161 time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
f931551b
RC
5162 ppd->cpspec->qdr_dfe_on = 0;
5163
5164 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
5165 ppd->dd->cspec->r1 ?
5166 QDR_STATIC_ADAPT_INIT_R1 :
5167 QDR_STATIC_ADAPT_INIT);
5168 force_h1(ppd);
5169 }
5170 }
5171 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
5172}
5173
5174/*
5175 * If we were using MSIx, try to fallback to INTx.
5176 */
5177static int qib_7322_intr_fallback(struct qib_devdata *dd)
5178{
5179 if (!dd->cspec->num_msix_entries)
5180 return 0; /* already using INTx */
5181
7fac3301
MM
5182 qib_devinfo(dd->pcidev,
5183 "MSIx interrupt not detected, trying INTx interrupts\n");
f931551b
RC
5184 qib_7322_nomsix(dd);
5185 qib_enable_intx(dd->pcidev);
5186 qib_setup_7322_interrupt(dd, 0);
5187 return 1;
5188}
5189
5190/*
5191 * Reset the XGXS (between serdes and IBC). Slightly less intrusive
5192 * than resetting the IBC or external link state, and useful in some
5193 * cases to cause some retraining. To do this right, we reset IBC
5194 * as well, then return to previous state (which may be still in reset)
5195 * NOTE: some callers of this "know" this writes the current value
5196 * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
5197 * check all callers.
5198 */
5199static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
5200{
5201 u64 val;
5202 struct qib_devdata *dd = ppd->dd;
5203 const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
5204 SYM_MASK(IBPCSConfig_0, xcv_treset) |
5205 SYM_MASK(IBPCSConfig_0, tx_rx_reset);
5206
5207 val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
b9e03e04
RC
5208 qib_write_kreg(dd, kr_hwerrmask,
5209 dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
f931551b
RC
5210 qib_write_kreg_port(ppd, krp_ibcctrl_a,
5211 ppd->cpspec->ibcctrl_a &
5212 ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
5213
5214 qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
5215 qib_read_kreg32(dd, kr_scratch);
5216 qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
5217 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
5218 qib_write_kreg(dd, kr_scratch, 0ULL);
b9e03e04
RC
5219 qib_write_kreg(dd, kr_hwerrclear,
5220 SYM_MASK(HwErrClear, statusValidNoEopClear));
5221 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
f931551b
RC
5222}
5223
5224/*
5225 * This code for non-IBTA-compliant IB speed negotiation is only known to
5226 * work for the SDR to DDR transition, and only between an HCA and a switch
5227 * with recent firmware. It is based on observed heuristics, rather than
5228 * actual knowledge of the non-compliant speed negotiation.
5229 * It has a number of hard-coded fields, since the hope is to rewrite this
5230 * when a spec is available on how the negoation is intended to work.
5231 */
5232static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5233 u32 dcnt, u32 *data)
5234{
5235 int i;
5236 u64 pbc;
5237 u32 __iomem *piobuf;
5238 u32 pnum, control, len;
5239 struct qib_devdata *dd = ppd->dd;
5240
5241 i = 0;
5242 len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
5243 control = qib_7322_setpbc_control(ppd, len, 0, 15);
5244 pbc = ((u64) control << 32) | len;
5245 while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5246 if (i++ > 15)
5247 return;
5248 udelay(2);
5249 }
5250 /* disable header check on this packet, since it can't be valid */
5251 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
5252 writeq(pbc, piobuf);
5253 qib_flush_wc();
5254 qib_pio_copy(piobuf + 2, hdr, 7);
5255 qib_pio_copy(piobuf + 9, data, dcnt);
5256 if (dd->flags & QIB_USE_SPCL_TRIG) {
5257 u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
5258
5259 qib_flush_wc();
5260 __raw_writel(0xaebecede, piobuf + spcl_off);
5261 }
5262 qib_flush_wc();
5263 qib_sendbuf_done(dd, pnum);
5264 /* and re-enable hdr check */
5265 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
5266}
5267
5268/*
5269 * _start packet gets sent twice at start, _done gets sent twice at end
5270 */
5271static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5272{
5273 struct qib_devdata *dd = ppd->dd;
5274 static u32 swapped;
5275 u32 dw, i, hcnt, dcnt, *data;
5276 static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
5277 static u32 madpayload_start[0x40] = {
5278 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5279 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5280 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
5281 };
5282 static u32 madpayload_done[0x40] = {
5283 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5284 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5285 0x40000001, 0x1388, 0x15e, /* rest 0's */
5286 };
5287
5288 dcnt = ARRAY_SIZE(madpayload_start);
5289 hcnt = ARRAY_SIZE(hdr);
5290 if (!swapped) {
5291 /* for maintainability, do it at runtime */
5292 for (i = 0; i < hcnt; i++) {
5293 dw = (__force u32) cpu_to_be32(hdr[i]);
5294 hdr[i] = dw;
5295 }
5296 for (i = 0; i < dcnt; i++) {
5297 dw = (__force u32) cpu_to_be32(madpayload_start[i]);
5298 madpayload_start[i] = dw;
5299 dw = (__force u32) cpu_to_be32(madpayload_done[i]);
5300 madpayload_done[i] = dw;
5301 }
5302 swapped = 1;
5303 }
5304
5305 data = which ? madpayload_done : madpayload_start;
5306
5307 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5308 qib_read_kreg64(dd, kr_scratch);
5309 udelay(2);
5310 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5311 qib_read_kreg64(dd, kr_scratch);
5312 udelay(2);
5313}
5314
5315/*
5316 * Do the absolute minimum to cause an IB speed change, and make it
5317 * ready, but don't actually trigger the change. The caller will
5318 * do that when ready (if link is in Polling training state, it will
5319 * happen immediately, otherwise when link next goes down)
5320 *
5321 * This routine should only be used as part of the DDR autonegotation
5322 * code for devices that are not compliant with IB 1.2 (or code that
5323 * fixes things up for same).
5324 *
5325 * When link has gone down, and autoneg enabled, or autoneg has
5326 * failed and we give up until next time we set both speeds, and
5327 * then we want IBTA enabled as well as "use max enabled speed.
5328 */
5329static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5330{
5331 u64 newctrlb;
5332 newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5333 IBA7322_IBC_IBTA_1_2_MASK |
5334 IBA7322_IBC_MAX_SPEED_MASK);
5335
5336 if (speed & (speed - 1)) /* multiple speeds */
5337 newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5338 IBA7322_IBC_IBTA_1_2_MASK |
5339 IBA7322_IBC_MAX_SPEED_MASK;
5340 else
5341 newctrlb |= speed == QIB_IB_QDR ?
5342 IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5343 ((speed == QIB_IB_DDR ?
5344 IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5345
5346 if (newctrlb == ppd->cpspec->ibcctrl_b)
5347 return;
5348
5349 ppd->cpspec->ibcctrl_b = newctrlb;
5350 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5351 qib_write_kreg(ppd->dd, kr_scratch, 0);
5352}
5353
5354/*
5355 * This routine is only used when we are not talking to another
5356 * IB 1.2-compliant device that we think can do DDR.
5357 * (This includes all existing switch chips as of Oct 2007.)
5358 * 1.2-compliant devices go directly to DDR prior to reaching INIT
5359 */
5360static void try_7322_autoneg(struct qib_pportdata *ppd)
5361{
5362 unsigned long flags;
5363
5364 spin_lock_irqsave(&ppd->lflags_lock, flags);
5365 ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5366 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5367 qib_autoneg_7322_send(ppd, 0);
5368 set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5369 qib_7322_mini_pcs_reset(ppd);
5370 /* 2 msec is minimum length of a poll cycle */
f0626710
TH
5371 queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5372 msecs_to_jiffies(2));
f931551b
RC
5373}
5374
5375/*
5376 * Handle the empirically determined mechanism for auto-negotiation
5377 * of DDR speed with switches.
5378 */
5379static void autoneg_7322_work(struct work_struct *work)
5380{
5381 struct qib_pportdata *ppd;
5382 struct qib_devdata *dd;
5383 u64 startms;
5384 u32 i;
5385 unsigned long flags;
5386
5387 ppd = container_of(work, struct qib_chippport_specific,
5388 autoneg_work.work)->ppd;
5389 dd = ppd->dd;
5390
5391 startms = jiffies_to_msecs(jiffies);
5392
5393 /*
5394 * Busy wait for this first part, it should be at most a
5395 * few hundred usec, since we scheduled ourselves for 2msec.
5396 */
5397 for (i = 0; i < 25; i++) {
5398 if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5399 == IB_7322_LT_STATE_POLLQUIET) {
5400 qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5401 break;
5402 }
5403 udelay(100);
5404 }
5405
5406 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5407 goto done; /* we got there early or told to stop */
5408
5409 /* we expect this to timeout */
5410 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5411 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5412 msecs_to_jiffies(90)))
5413 goto done;
5414 qib_7322_mini_pcs_reset(ppd);
5415
5416 /* we expect this to timeout */
5417 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5418 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5419 msecs_to_jiffies(1700)))
5420 goto done;
5421 qib_7322_mini_pcs_reset(ppd);
5422
5423 set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5424
5425 /*
5426 * Wait up to 250 msec for link to train and get to INIT at DDR;
5427 * this should terminate early.
5428 */
5429 wait_event_timeout(ppd->cpspec->autoneg_wait,
5430 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5431 msecs_to_jiffies(250));
5432done:
5433 if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5434 spin_lock_irqsave(&ppd->lflags_lock, flags);
5435 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5436 if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5437 ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5438 ppd->cpspec->autoneg_tries = 0;
5439 }
5440 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5441 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5442 }
5443}
5444
5445/*
5446 * This routine is used to request IPG set in the QLogic switch.
5447 * Only called if r1.
5448 */
5449static void try_7322_ipg(struct qib_pportdata *ppd)
5450{
5451 struct qib_ibport *ibp = &ppd->ibport_data;
5452 struct ib_mad_send_buf *send_buf;
5453 struct ib_mad_agent *agent;
5454 struct ib_smp *smp;
5455 unsigned delay;
5456 int ret;
5457
5458 agent = ibp->send_agent;
5459 if (!agent)
5460 goto retry;
5461
5462 send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5463 IB_MGMT_MAD_DATA, GFP_ATOMIC);
5464 if (IS_ERR(send_buf))
5465 goto retry;
5466
5467 if (!ibp->smi_ah) {
f931551b
RC
5468 struct ib_ah *ah;
5469
1fb9fed6 5470 ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
f931551b 5471 if (IS_ERR(ah))
1fb9fed6 5472 ret = PTR_ERR(ah);
f931551b
RC
5473 else {
5474 send_buf->ah = ah;
5475 ibp->smi_ah = to_iah(ah);
5476 ret = 0;
5477 }
5478 } else {
5479 send_buf->ah = &ibp->smi_ah->ibah;
5480 ret = 0;
5481 }
5482
5483 smp = send_buf->mad;
5484 smp->base_version = IB_MGMT_BASE_VERSION;
5485 smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5486 smp->class_version = 1;
5487 smp->method = IB_MGMT_METHOD_SEND;
5488 smp->hop_cnt = 1;
5489 smp->attr_id = QIB_VENDOR_IPG;
5490 smp->attr_mod = 0;
5491
5492 if (!ret)
5493 ret = ib_post_send_mad(send_buf, NULL);
5494 if (ret)
5495 ib_free_send_mad(send_buf);
5496retry:
5497 delay = 2 << ppd->cpspec->ipg_tries;
f0626710
TH
5498 queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5499 msecs_to_jiffies(delay));
f931551b
RC
5500}
5501
5502/*
5503 * Timeout handler for setting IPG.
5504 * Only called if r1.
5505 */
5506static void ipg_7322_work(struct work_struct *work)
5507{
5508 struct qib_pportdata *ppd;
5509
5510 ppd = container_of(work, struct qib_chippport_specific,
5511 ipg_work.work)->ppd;
5512 if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5513 && ++ppd->cpspec->ipg_tries <= 10)
5514 try_7322_ipg(ppd);
5515}
5516
5517static u32 qib_7322_iblink_state(u64 ibcs)
5518{
5519 u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5520
5521 switch (state) {
5522 case IB_7322_L_STATE_INIT:
5523 state = IB_PORT_INIT;
5524 break;
5525 case IB_7322_L_STATE_ARM:
5526 state = IB_PORT_ARMED;
5527 break;
5528 case IB_7322_L_STATE_ACTIVE:
5529 /* fall through */
5530 case IB_7322_L_STATE_ACT_DEFER:
5531 state = IB_PORT_ACTIVE;
5532 break;
5533 default: /* fall through */
5534 case IB_7322_L_STATE_DOWN:
5535 state = IB_PORT_DOWN;
5536 break;
5537 }
5538 return state;
5539}
5540
5541/* returns the IBTA port state, rather than the IBC link training state */
5542static u8 qib_7322_phys_portstate(u64 ibcs)
5543{
5544 u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5545 return qib_7322_physportstate[state];
5546}
5547
5548static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5549{
5550 int ret = 0, symadj = 0;
5551 unsigned long flags;
5552 int mult;
5553
5554 spin_lock_irqsave(&ppd->lflags_lock, flags);
5555 ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5556 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5557
5558 /* Update our picture of width and speed from chip */
5559 if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5560 ppd->link_speed_active = QIB_IB_QDR;
5561 mult = 4;
5562 } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5563 ppd->link_speed_active = QIB_IB_DDR;
5564 mult = 2;
5565 } else {
5566 ppd->link_speed_active = QIB_IB_SDR;
5567 mult = 1;
5568 }
5569 if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5570 ppd->link_width_active = IB_WIDTH_4X;
5571 mult *= 4;
5572 } else
5573 ppd->link_width_active = IB_WIDTH_1X;
5574 ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5575
5576 if (!ibup) {
5577 u64 clr;
5578
5579 /* Link went down. */
5580 /* do IPG MAD again after linkdown, even if last time failed */
5581 ppd->cpspec->ipg_tries = 0;
5582 clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5583 (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5584 SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5585 if (clr)
5586 qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5587 if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5588 QIBL_IB_AUTONEG_INPROG)))
5589 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5590 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
dde05cbd
MH
5591 struct qib_qsfp_data *qd =
5592 &ppd->cpspec->qsfp_data;
a77fcf89
RC
5593 /* unlock the Tx settings, speed may change */
5594 qib_write_kreg_port(ppd, krp_tx_deemph_override,
5595 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5596 reset_tx_deemphasis_override));
f931551b 5597 qib_cancel_sends(ppd);
a77fcf89
RC
5598 /* on link down, ensure sane pcs state */
5599 qib_7322_mini_pcs_reset(ppd);
dde05cbd
MH
5600 /* schedule the qsfp refresh which should turn the link
5601 off */
5602 if (ppd->dd->flags & QIB_HAS_QSFP) {
8482d5d1 5603 qd->t_insert = jiffies;
042f36e1 5604 queue_work(ib_wq, &qd->work);
dde05cbd 5605 }
f931551b
RC
5606 spin_lock_irqsave(&ppd->sdma_lock, flags);
5607 if (__qib_sdma_running(ppd))
5608 __qib_sdma_process_event(ppd,
5609 qib_sdma_event_e70_go_idle);
5610 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5611 }
5612 clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5613 if (clr == ppd->cpspec->iblnkdownsnap)
5614 ppd->cpspec->iblnkdowndelta++;
5615 } else {
5616 if (qib_compat_ddr_negotiate &&
5617 !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5618 QIBL_IB_AUTONEG_INPROG)) &&
5619 ppd->link_speed_active == QIB_IB_SDR &&
5620 (ppd->link_speed_enabled & QIB_IB_DDR)
5621 && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5622 /* we are SDR, and auto-negotiation enabled */
5623 ++ppd->cpspec->autoneg_tries;
5624 if (!ppd->cpspec->ibdeltainprog) {
5625 ppd->cpspec->ibdeltainprog = 1;
5626 ppd->cpspec->ibsymdelta +=
5627 read_7322_creg32_port(ppd,
5628 crp_ibsymbolerr) -
5629 ppd->cpspec->ibsymsnap;
5630 ppd->cpspec->iblnkerrdelta +=
5631 read_7322_creg32_port(ppd,
5632 crp_iblinkerrrecov) -
5633 ppd->cpspec->iblnkerrsnap;
5634 }
5635 try_7322_autoneg(ppd);
5636 ret = 1; /* no other IB status change processing */
5637 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5638 ppd->link_speed_active == QIB_IB_SDR) {
5639 qib_autoneg_7322_send(ppd, 1);
5640 set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5641 qib_7322_mini_pcs_reset(ppd);
5642 udelay(2);
5643 ret = 1; /* no other IB status change processing */
5644 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5645 (ppd->link_speed_active & QIB_IB_DDR)) {
5646 spin_lock_irqsave(&ppd->lflags_lock, flags);
5647 ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5648 QIBL_IB_AUTONEG_FAILED);
5649 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5650 ppd->cpspec->autoneg_tries = 0;
5651 /* re-enable SDR, for next link down */
5652 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5653 wake_up(&ppd->cpspec->autoneg_wait);
5654 symadj = 1;
5655 } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5656 /*
5657 * Clear autoneg failure flag, and do setup
5658 * so we'll try next time link goes down and
5659 * back to INIT (possibly connected to a
5660 * different device).
5661 */
5662 spin_lock_irqsave(&ppd->lflags_lock, flags);
5663 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5664 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5665 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5666 symadj = 1;
5667 }
5668 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5669 symadj = 1;
5670 if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5671 try_7322_ipg(ppd);
5672 if (!ppd->cpspec->recovery_init)
5673 setup_7322_link_recovery(ppd, 0);
5674 ppd->cpspec->qdr_dfe_time = jiffies +
5675 msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5676 }
5677 ppd->cpspec->ibmalfusesnap = 0;
5678 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5679 crp_errlink);
5680 }
5681 if (symadj) {
5682 ppd->cpspec->iblnkdownsnap =
5683 read_7322_creg32_port(ppd, crp_iblinkdown);
5684 if (ppd->cpspec->ibdeltainprog) {
5685 ppd->cpspec->ibdeltainprog = 0;
5686 ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5687 crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5688 ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5689 crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5690 }
5691 } else if (!ibup && qib_compat_ddr_negotiate &&
5692 !ppd->cpspec->ibdeltainprog &&
5693 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5694 ppd->cpspec->ibdeltainprog = 1;
5695 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5696 crp_ibsymbolerr);
5697 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5698 crp_iblinkerrrecov);
5699 }
5700
5701 if (!ret)
5702 qib_setup_7322_setextled(ppd, ibup);
5703 return ret;
5704}
5705
5706/*
5707 * Does read/modify/write to appropriate registers to
5708 * set output and direction bits selected by mask.
5709 * these are in their canonical postions (e.g. lsb of
5710 * dir will end up in D48 of extctrl on existing chips).
5711 * returns contents of GP Inputs.
5712 */
5713static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5714{
5715 u64 read_val, new_out;
5716 unsigned long flags;
5717
5718 if (mask) {
5719 /* some bits being written, lock access to GPIO */
5720 dir &= mask;
5721 out &= mask;
5722 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5723 dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5724 dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5725 new_out = (dd->cspec->gpio_out & ~mask) | out;
5726
5727 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5728 qib_write_kreg(dd, kr_gpio_out, new_out);
5729 dd->cspec->gpio_out = new_out;
5730 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5731 }
5732 /*
5733 * It is unlikely that a read at this time would get valid
5734 * data on a pin whose direction line was set in the same
5735 * call to this function. We include the read here because
5736 * that allows us to potentially combine a change on one pin with
5737 * a read on another, and because the old code did something like
5738 * this.
5739 */
5740 read_val = qib_read_kreg64(dd, kr_extstatus);
5741 return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5742}
5743
5744/* Enable writes to config EEPROM, if possible. Returns previous state */
5745static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5746{
5747 int prev_wen;
5748 u32 mask;
5749
5750 mask = 1 << QIB_EEPROM_WEN_NUM;
5751 prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5752 gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5753
5754 return prev_wen & 1;
5755}
5756
5757/*
5758 * Read fundamental info we need to use the chip. These are
5759 * the registers that describe chip capabilities, and are
5760 * saved in shadow registers.
5761 */
5762static void get_7322_chip_params(struct qib_devdata *dd)
5763{
5764 u64 val;
5765 u32 piobufs;
5766 int mtu;
5767
5768 dd->palign = qib_read_kreg32(dd, kr_pagealign);
5769
5770 dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5771
5772 dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5773 dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5774 dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5775 dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5776 dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5777
5778 val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5779 dd->piobcnt2k = val & ~0U;
5780 dd->piobcnt4k = val >> 32;
5781 val = qib_read_kreg64(dd, kr_sendpiosize);
5782 dd->piosize2k = val & ~0U;
5783 dd->piosize4k = val >> 32;
5784
5785 mtu = ib_mtu_enum_to_int(qib_ibmtu);
5786 if (mtu == -1)
5787 mtu = QIB_DEFAULT_MTU;
5788 dd->pport[0].ibmtu = (u32)mtu;
5789 dd->pport[1].ibmtu = (u32)mtu;
5790
5791 /* these may be adjusted in init_chip_wc_pat() */
5792 dd->pio2kbase = (u32 __iomem *)
5793 ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5794 dd->pio4kbase = (u32 __iomem *)
5795 ((char __iomem *) dd->kregbase +
5796 (dd->piobufbase >> 32));
5797 /*
5798 * 4K buffers take 2 pages; we use roundup just to be
5799 * paranoid; we calculate it once here, rather than on
5800 * ever buf allocate
5801 */
5802 dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5803
5804 piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5805
5806 dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5807 (sizeof(u64) * BITS_PER_BYTE / 2);
5808}
5809
5810/*
5811 * The chip base addresses in cspec and cpspec have to be set
5812 * after possible init_chip_wc_pat(), rather than in
5813 * get_7322_chip_params(), so split out as separate function
5814 */
5815static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5816{
5817 u32 cregbase;
5818 cregbase = qib_read_kreg32(dd, kr_counterregbase);
5819
5820 dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5821 (char __iomem *)dd->kregbase);
5822
5823 dd->egrtidbase = (u64 __iomem *)
5824 ((char __iomem *) dd->kregbase + dd->rcvegrbase);
5825
5826 /* port registers are defined as relative to base of chip */
5827 dd->pport[0].cpspec->kpregbase =
5828 (u64 __iomem *)((char __iomem *)dd->kregbase);
5829 dd->pport[1].cpspec->kpregbase =
5830 (u64 __iomem *)(dd->palign +
5831 (char __iomem *)dd->kregbase);
5832 dd->pport[0].cpspec->cpregbase =
5833 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5834 kr_counterregbase) + (char __iomem *)dd->kregbase);
5835 dd->pport[1].cpspec->cpregbase =
5836 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5837 kr_counterregbase) + (char __iomem *)dd->kregbase);
5838}
5839
5840/*
5841 * This is a fairly special-purpose observer, so we only support
5842 * the port-specific parts of SendCtrl
5843 */
5844
5845#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
5846 SYM_MASK(SendCtrl_0, SDmaEnable) | \
5847 SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
5848 SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5849 SYM_MASK(SendCtrl_0, SDmaHalt) | \
5850 SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
5851 SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5852
5853static int sendctrl_hook(struct qib_devdata *dd,
5854 const struct diag_observer *op, u32 offs,
5855 u64 *data, u64 mask, int only_32)
5856{
5857 unsigned long flags;
5858 unsigned idx;
5859 unsigned pidx;
5860 struct qib_pportdata *ppd = NULL;
5861 u64 local_data, all_bits;
5862
5863 /*
5864 * The fixed correspondence between Physical ports and pports is
5865 * severed. We need to hunt for the ppd that corresponds
5866 * to the offset we got. And we have to do that without admitting
5867 * we know the stride, apparently.
5868 */
5869 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5870 u64 __iomem *psptr;
5871 u32 psoffs;
5872
5873 ppd = dd->pport + pidx;
5874 if (!ppd->cpspec->kpregbase)
5875 continue;
5876
5877 psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5878 psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5879 if (psoffs == offs)
5880 break;
5881 }
5882
5883 /* If pport is not being managed by driver, just avoid shadows. */
5884 if (pidx >= dd->num_pports)
5885 ppd = NULL;
5886
5887 /* In any case, "idx" is flat index in kreg space */
5888 idx = offs / sizeof(u64);
5889
5890 all_bits = ~0ULL;
5891 if (only_32)
5892 all_bits >>= 32;
5893
5894 spin_lock_irqsave(&dd->sendctrl_lock, flags);
5895 if (!ppd || (mask & all_bits) != all_bits) {
5896 /*
5897 * At least some mask bits are zero, so we need
5898 * to read. The judgement call is whether from
5899 * reg or shadow. First-cut: read reg, and complain
5900 * if any bits which should be shadowed are different
5901 * from their shadowed value.
5902 */
5903 if (only_32)
5904 local_data = (u64)qib_read_kreg32(dd, idx);
5905 else
5906 local_data = qib_read_kreg64(dd, idx);
5907 *data = (local_data & ~mask) | (*data & mask);
5908 }
5909 if (mask) {
5910 /*
5911 * At least some mask bits are one, so we need
5912 * to write, but only shadow some bits.
5913 */
5914 u64 sval, tval; /* Shadowed, transient */
5915
5916 /*
5917 * New shadow val is bits we don't want to touch,
5918 * ORed with bits we do, that are intended for shadow.
5919 */
5920 if (ppd) {
5921 sval = ppd->p_sendctrl & ~mask;
5922 sval |= *data & SENDCTRL_SHADOWED & mask;
5923 ppd->p_sendctrl = sval;
5924 } else
5925 sval = *data & SENDCTRL_SHADOWED & mask;
5926 tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5927 qib_write_kreg(dd, idx, tval);
5928 qib_write_kreg(dd, kr_scratch, 0Ull);
5929 }
5930 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5931 return only_32 ? 4 : 8;
5932}
5933
5934static const struct diag_observer sendctrl_0_observer = {
5935 sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5936 KREG_IDX(SendCtrl_0) * sizeof(u64)
5937};
5938
5939static const struct diag_observer sendctrl_1_observer = {
5940 sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5941 KREG_IDX(SendCtrl_1) * sizeof(u64)
5942};
5943
5944static ushort sdma_fetch_prio = 8;
5945module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5946MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5947
5948/* Besides logging QSFP events, we set appropriate TxDDS values */
5949static void init_txdds_table(struct qib_pportdata *ppd, int override);
5950
5951static void qsfp_7322_event(struct work_struct *work)
5952{
5953 struct qib_qsfp_data *qd;
5954 struct qib_pportdata *ppd;
8482d5d1 5955 unsigned long pwrup;
16d99812 5956 unsigned long flags;
f931551b
RC
5957 int ret;
5958 u32 le2;
5959
5960 qd = container_of(work, struct qib_qsfp_data, work);
5961 ppd = qd->ppd;
dde05cbd
MH
5962 pwrup = qd->t_insert +
5963 msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
f931551b 5964
dde05cbd
MH
5965 /* Delay for 20 msecs to allow ModPrs resistor to setup */
5966 mdelay(QSFP_MODPRS_LAG_MSEC);
5967
16d99812
MH
5968 if (!qib_qsfp_mod_present(ppd)) {
5969 ppd->cpspec->qsfp_data.modpresent = 0;
dde05cbd
MH
5970 /* Set the physical link to disabled */
5971 qib_set_ib_7322_lstate(ppd, 0,
5972 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
16d99812
MH
5973 spin_lock_irqsave(&ppd->lflags_lock, flags);
5974 ppd->lflags &= ~QIBL_LINKV;
5975 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5976 } else {
dde05cbd
MH
5977 /*
5978 * Some QSFP's not only do not respond until the full power-up
5979 * time, but may behave badly if we try. So hold off responding
5980 * to insertion.
5981 */
5982 while (1) {
8482d5d1 5983 if (time_is_before_jiffies(pwrup))
dde05cbd
MH
5984 break;
5985 msleep(20);
5986 }
5987
5988 ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5989
5990 /*
5991 * Need to change LE2 back to defaults if we couldn't
5992 * read the cable type (to handle cable swaps), so do this
5993 * even on failure to read cable information. We don't
5994 * get here for QME, so IS_QME check not needed here.
5995 */
5996 if (!ret && !ppd->dd->cspec->r1) {
5997 if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
5998 le2 = LE2_QME;
5999 else if (qd->cache.atten[1] >= qib_long_atten &&
6000 QSFP_IS_CU(qd->cache.tech))
6001 le2 = LE2_5m;
6002 else
6003 le2 = LE2_DEFAULT;
6004 } else
4634b794 6005 le2 = LE2_DEFAULT;
dde05cbd
MH
6006 ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
6007 /*
6008 * We always change parameteters, since we can choose
6009 * values for cables without eeproms, and the cable may have
6010 * changed from a cable with full or partial eeprom content
6011 * to one with partial or no content.
6012 */
6013 init_txdds_table(ppd, 0);
6014 /* The physical link is being re-enabled only when the
16d99812
MH
6015 * previous state was DISABLED and the VALID bit is not
6016 * set. This should only happen when the cable has been
6017 * physically pulled. */
6018 if (!ppd->cpspec->qsfp_data.modpresent &&
6019 (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
6020 ppd->cpspec->qsfp_data.modpresent = 1;
dde05cbd
MH
6021 qib_set_ib_7322_lstate(ppd, 0,
6022 QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
16d99812
MH
6023 spin_lock_irqsave(&ppd->lflags_lock, flags);
6024 ppd->lflags |= QIBL_LINKV;
6025 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
6026 }
dde05cbd 6027 }
f931551b
RC
6028}
6029
6030/*
6031 * There is little we can do but complain to the user if QSFP
6032 * initialization fails.
6033 */
6034static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
6035{
6036 unsigned long flags;
6037 struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
6038 struct qib_devdata *dd = ppd->dd;
6039 u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
6040
6041 mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
6042 qd->ppd = ppd;
6043 qib_qsfp_init(qd, qsfp_7322_event);
6044 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
6045 dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
6046 dd->cspec->gpio_mask |= mod_prs_bit;
6047 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
6048 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
6049 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
6050}
6051
6052/*
a77fcf89 6053 * called at device initialization time, and also if the txselect
f931551b
RC
6054 * module parameter is changed. This is used for cables that don't
6055 * have valid QSFP EEPROMs (not present, or attenuation is zero).
6056 * We initialize to the default, then if there is a specific
a77fcf89
RC
6057 * unit,port match, we use that (and set it immediately, for the
6058 * current speed, if the link is at INIT or better).
f931551b 6059 * String format is "default# unit#,port#=# ... u,p=#", separators must
a77fcf89
RC
6060 * be a SPACE character. A newline terminates. The u,p=# tuples may
6061 * optionally have "u,p=#,#", where the final # is the H1 value
f931551b
RC
6062 * The last specific match is used (actually, all are used, but last
6063 * one is the one that winds up set); if none at all, fall back on default.
6064 */
6065static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
6066{
6067 char *nxt, *str;
a77fcf89 6068 u32 pidx, unit, port, deflt, h1;
f931551b 6069 unsigned long val;
a77fcf89 6070 int any = 0, seth1;
e706203c 6071 int txdds_size;
f931551b 6072
a77fcf89 6073 str = txselect_list;
f931551b 6074
a77fcf89 6075 /* default number is validated in setup_txselect() */
f931551b
RC
6076 deflt = simple_strtoul(str, &nxt, 0);
6077 for (pidx = 0; pidx < dd->num_pports; ++pidx)
6078 dd->pport[pidx].cpspec->no_eep = deflt;
6079
e706203c
MM
6080 txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
6081 if (IS_QME(dd) || IS_QMH(dd))
6082 txdds_size += TXDDS_MFG_SZ;
6083
f931551b
RC
6084 while (*nxt && nxt[1]) {
6085 str = ++nxt;
6086 unit = simple_strtoul(str, &nxt, 0);
6087 if (nxt == str || !*nxt || *nxt != ',') {
6088 while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6089 ;
6090 continue;
6091 }
6092 str = ++nxt;
6093 port = simple_strtoul(str, &nxt, 0);
6094 if (nxt == str || *nxt != '=') {
6095 while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6096 ;
6097 continue;
6098 }
6099 str = ++nxt;
6100 val = simple_strtoul(str, &nxt, 0);
6101 if (nxt == str) {
6102 while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6103 ;
6104 continue;
6105 }
e706203c 6106 if (val >= txdds_size)
f931551b 6107 continue;
a77fcf89
RC
6108 seth1 = 0;
6109 h1 = 0; /* gcc thinks it might be used uninitted */
6110 if (*nxt == ',' && nxt[1]) {
6111 str = ++nxt;
6112 h1 = (u32)simple_strtoul(str, &nxt, 0);
6113 if (nxt == str)
6114 while (*nxt && *nxt++ != ' ') /* skip */
6115 ;
6116 else
6117 seth1 = 1;
6118 }
f931551b
RC
6119 for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
6120 ++pidx) {
a77fcf89
RC
6121 struct qib_pportdata *ppd = &dd->pport[pidx];
6122
6123 if (ppd->port != port || !ppd->link_speed_supported)
f931551b 6124 continue;
a77fcf89 6125 ppd->cpspec->no_eep = val;
7c7a416e
RC
6126 if (seth1)
6127 ppd->cpspec->h1_val = h1;
f931551b 6128 /* now change the IBC and serdes, overriding generic */
a77fcf89 6129 init_txdds_table(ppd, 1);
d70585f7 6130 /* Re-enable the physical state machine on mezz boards
dde05cbd
MH
6131 * now that the correct settings have been set.
6132 * QSFP boards are handles by the QSFP event handler */
d70585f7
MH
6133 if (IS_QMH(dd) || IS_QME(dd))
6134 qib_set_ib_7322_lstate(ppd, 0,
6135 QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
f931551b
RC
6136 any++;
6137 }
6138 if (*nxt == '\n')
6139 break; /* done */
6140 }
6141 if (change && !any) {
6142 /* no specific setting, use the default.
6143 * Change the IBC and serdes, but since it's
6144 * general, don't override specific settings.
6145 */
a77fcf89
RC
6146 for (pidx = 0; pidx < dd->num_pports; ++pidx)
6147 if (dd->pport[pidx].link_speed_supported)
6148 init_txdds_table(&dd->pport[pidx], 0);
f931551b
RC
6149 }
6150}
6151
a77fcf89
RC
6152/* handle the txselect parameter changing */
6153static int setup_txselect(const char *str, struct kernel_param *kp)
f931551b
RC
6154{
6155 struct qib_devdata *dd;
6156 unsigned long val;
7fac3301
MM
6157 int ret;
6158
f931551b 6159 if (strlen(str) >= MAX_ATTEN_LEN) {
7fac3301 6160 pr_info("txselect_values string too long\n");
f931551b
RC
6161 return -ENOSPC;
6162 }
7fac3301
MM
6163 ret = kstrtoul(str, 0, &val);
6164 if (ret || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
e706203c 6165 TXDDS_MFG_SZ)) {
7fac3301 6166 pr_info("txselect_values must start with a number < %d\n",
e706203c 6167 TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
7fac3301 6168 return ret ? ret : -EINVAL;
f931551b 6169 }
f931551b 6170
7fac3301 6171 strcpy(txselect_list, str);
f931551b 6172 list_for_each_entry(dd, &qib_dev_list, list)
a77fcf89
RC
6173 if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
6174 set_no_qsfp_atten(dd, 1);
f931551b
RC
6175 return 0;
6176}
6177
6178/*
6179 * Write the final few registers that depend on some of the
6180 * init setup. Done late in init, just before bringing up
6181 * the serdes.
6182 */
6183static int qib_late_7322_initreg(struct qib_devdata *dd)
6184{
6185 int ret = 0, n;
6186 u64 val;
6187
6188 qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
6189 qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
6190 qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
6191 qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
6192 val = qib_read_kreg64(dd, kr_sendpioavailaddr);
6193 if (val != dd->pioavailregs_phys) {
7fac3301
MM
6194 qib_dev_err(dd,
6195 "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
6196 (unsigned long) dd->pioavailregs_phys,
6197 (unsigned long long) val);
f931551b
RC
6198 ret = -EINVAL;
6199 }
6200
6201 n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
6202 qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
6203 /* driver sends get pkey, lid, etc. checking also, to catch bugs */
6204 qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
6205
6206 qib_register_observer(dd, &sendctrl_0_observer);
6207 qib_register_observer(dd, &sendctrl_1_observer);
6208
6209 dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
6210 qib_write_kreg(dd, kr_control, dd->control);
6211 /*
6212 * Set SendDmaFetchPriority and init Tx params, including
6213 * QSFP handler on boards that have QSFP.
6214 * First set our default attenuation entry for cables that
6215 * don't have valid attenuation.
6216 */
6217 set_no_qsfp_atten(dd, 0);
6218 for (n = 0; n < dd->num_pports; ++n) {
6219 struct qib_pportdata *ppd = dd->pport + n;
6220
6221 qib_write_kreg_port(ppd, krp_senddmaprioritythld,
6222 sdma_fetch_prio & 0xf);
6223 /* Initialize qsfp if present on board. */
6224 if (dd->flags & QIB_HAS_QSFP)
6225 qib_init_7322_qsfp(ppd);
6226 }
6227 dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
6228 qib_write_kreg(dd, kr_control, dd->control);
6229
6230 return ret;
6231}
6232
6233/* per IB port errors. */
6234#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
6235 MASK_ACROSS(8, 15))
6236#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
6237#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
6238 MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
6239 MASK_ACROSS(0, 11))
6240
6241/*
6242 * Write the initialization per-port registers that need to be done at
6243 * driver load and after reset completes (i.e., that aren't done as part
6244 * of other init procedures called from qib_init.c).
6245 * Some of these should be redundant on reset, but play safe.
6246 */
6247static void write_7322_init_portregs(struct qib_pportdata *ppd)
6248{
6249 u64 val;
6250 int i;
6251
6252 if (!ppd->link_speed_supported) {
6253 /* no buffer credits for this port */
6254 for (i = 1; i < 8; i++)
6255 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
6256 qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
6257 qib_write_kreg(ppd->dd, kr_scratch, 0);
6258 return;
6259 }
6260
6261 /*
6262 * Set the number of supported virtual lanes in IBC,
6263 * for flow control packet handling on unsupported VLs
6264 */
6265 val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
6266 val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
6267 val |= (u64)(ppd->vls_supported - 1) <<
6268 SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
6269 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
6270
6271 qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
6272
6273 /* enable tx header checking */
6274 qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
6275 IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
6276 IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
6277
6278 qib_write_kreg_port(ppd, krp_ncmodectrl,
6279 SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
6280
6281 /*
6282 * Unconditionally clear the bufmask bits. If SDMA is
6283 * enabled, we'll set them appropriately later.
6284 */
6285 qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
6286 qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
6287 qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
6288 if (ppd->dd->cspec->r1)
6289 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
6290}
6291
6292/*
6293 * Write the initialization per-device registers that need to be done at
6294 * driver load and after reset completes (i.e., that aren't done as part
6295 * of other init procedures called from qib_init.c). Also write per-port
6296 * registers that are affected by overall device config, such as QP mapping
6297 * Some of these should be redundant on reset, but play safe.
6298 */
6299static void write_7322_initregs(struct qib_devdata *dd)
6300{
6301 struct qib_pportdata *ppd;
6302 int i, pidx;
6303 u64 val;
6304
6305 /* Set Multicast QPs received by port 2 to map to context one. */
6306 qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
6307
6308 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
6309 unsigned n, regno;
6310 unsigned long flags;
6311
2528ea60
MM
6312 if (dd->n_krcv_queues < 2 ||
6313 !dd->pport[pidx].link_speed_supported)
f931551b
RC
6314 continue;
6315
6316 ppd = &dd->pport[pidx];
6317
6318 /* be paranoid against later code motion, etc. */
6319 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6320 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6321 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6322
6323 /* Initialize QP to context mapping */
6324 regno = krp_rcvqpmaptable;
6325 val = 0;
6326 if (dd->num_pports > 1)
6327 n = dd->first_user_ctxt / dd->num_pports;
6328 else
6329 n = dd->first_user_ctxt - 1;
6330 for (i = 0; i < 32; ) {
6331 unsigned ctxt;
6332
6333 if (dd->num_pports > 1)
6334 ctxt = (i % n) * dd->num_pports + pidx;
6335 else if (i % n)
6336 ctxt = (i % n) + 1;
6337 else
6338 ctxt = ppd->hw_pidx;
6339 val |= ctxt << (5 * (i % 6));
6340 i++;
6341 if (i % 6 == 0) {
6342 qib_write_kreg_port(ppd, regno, val);
6343 val = 0;
6344 regno++;
6345 }
6346 }
6347 qib_write_kreg_port(ppd, regno, val);
6348 }
6349
6350 /*
6351 * Setup up interrupt mitigation for kernel contexts, but
6352 * not user contexts (user contexts use interrupts when
6353 * stalled waiting for any packet, so want those interrupts
6354 * right away).
6355 */
6356 for (i = 0; i < dd->first_user_ctxt; i++) {
6357 dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6358 qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6359 }
6360
6361 /*
6362 * Initialize as (disabled) rcvflow tables. Application code
6363 * will setup each flow as it uses the flow.
6364 * Doesn't clear any of the error bits that might be set.
6365 */
6366 val = TIDFLOW_ERRBITS; /* these are W1C */
0502f94c 6367 for (i = 0; i < dd->cfgctxts; i++) {
f931551b
RC
6368 int flow;
6369 for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6370 qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6371 }
6372
6373 /*
6374 * dual cards init to dual port recovery, single port cards to
6375 * the one port. Dual port cards may later adjust to 1 port,
6376 * and then back to dual port if both ports are connected
6377 * */
6378 if (dd->num_pports)
6379 setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6380}
6381
6382static int qib_init_7322_variables(struct qib_devdata *dd)
6383{
6384 struct qib_pportdata *ppd;
6385 unsigned features, pidx, sbufcnt;
6386 int ret, mtu;
6387 u32 sbufs, updthresh;
6388
6389 /* pport structs are contiguous, allocated after devdata */
6390 ppd = (struct qib_pportdata *)(dd + 1);
6391 dd->pport = ppd;
6392 ppd[0].dd = dd;
6393 ppd[1].dd = dd;
6394
6395 dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6396
6397 ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6398 ppd[1].cpspec = &ppd[0].cpspec[1];
6399 ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6400 ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6401
6402 spin_lock_init(&dd->cspec->rcvmod_lock);
6403 spin_lock_init(&dd->cspec->gpio_lock);
6404
6405 /* we haven't yet set QIB_PRESENT, so use read directly */
6406 dd->revision = readq(&dd->kregbase[kr_revision]);
6407
6408 if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
7fac3301
MM
6409 qib_dev_err(dd,
6410 "Revision register read failure, giving up initialization\n");
f931551b
RC
6411 ret = -ENODEV;
6412 goto bail;
6413 }
6414 dd->flags |= QIB_PRESENT; /* now register routines work */
6415
6416 dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6417 dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6418 dd->cspec->r1 = dd->minrev == 1;
6419
6420 get_7322_chip_params(dd);
6421 features = qib_7322_boardname(dd);
6422
6423 /* now that piobcnt2k and 4k set, we can allocate these */
6424 sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6425 NUM_VL15_BUFS + BITS_PER_LONG - 1;
6426 sbufcnt /= BITS_PER_LONG;
6427 dd->cspec->sendchkenable = kmalloc(sbufcnt *
6428 sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
6429 dd->cspec->sendgrhchk = kmalloc(sbufcnt *
6430 sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
6431 dd->cspec->sendibchk = kmalloc(sbufcnt *
6432 sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
6433 if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6434 !dd->cspec->sendibchk) {
6435 qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
6436 ret = -ENOMEM;
6437 goto bail;
6438 }
6439
6440 ppd = dd->pport;
6441
6442 /*
6443 * GPIO bits for TWSI data and clock,
6444 * used for serial EEPROM.
6445 */
6446 dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6447 dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6448 dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6449
6450 dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6451 QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6452 QIB_HAS_THRESH_UPDATE |
6453 (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6454 dd->flags |= qib_special_trigger ?
6455 QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6456
6457 /*
6458 * Setup initial values. These may change when PAT is enabled, but
6459 * we need these to do initial chip register accesses.
6460 */
6461 qib_7322_set_baseaddrs(dd);
6462
6463 mtu = ib_mtu_enum_to_int(qib_ibmtu);
6464 if (mtu == -1)
6465 mtu = QIB_DEFAULT_MTU;
6466
6467 dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6468 /* all hwerrors become interrupts, unless special purposed */
6469 dd->cspec->hwerrmask = ~0ULL;
6470 /* link_recovery setup causes these errors, so ignore them,
6471 * other than clearing them when they occur */
6472 dd->cspec->hwerrmask &=
6473 ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6474 SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6475 HWE_MASK(LATriggered));
6476
6477 for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6478 struct qib_chippport_specific *cp = ppd->cpspec;
6479 ppd->link_speed_supported = features & PORT_SPD_CAP;
6480 features >>= PORT_SPD_CAP_SHIFT;
6481 if (!ppd->link_speed_supported) {
6482 /* single port mode (7340, or configured) */
6483 dd->skip_kctxt_mask |= 1 << pidx;
6484 if (pidx == 0) {
6485 /* Make sure port is disabled. */
6486 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6487 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6488 ppd[0] = ppd[1];
6489 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6490 IBSerdesPClkNotDetectMask_0)
6491 | SYM_MASK(HwErrMask,
6492 SDmaMemReadErrMask_0));
6493 dd->cspec->int_enable_mask &= ~(
6494 SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6495 SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6496 SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6497 SYM_MASK(IntMask, SDmaIntMask_0) |
6498 SYM_MASK(IntMask, ErrIntMask_0) |
6499 SYM_MASK(IntMask, SendDoneIntMask_0));
6500 } else {
6501 /* Make sure port is disabled. */
6502 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6503 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6504 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6505 IBSerdesPClkNotDetectMask_1)
6506 | SYM_MASK(HwErrMask,
6507 SDmaMemReadErrMask_1));
6508 dd->cspec->int_enable_mask &= ~(
6509 SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6510 SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6511 SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6512 SYM_MASK(IntMask, SDmaIntMask_1) |
6513 SYM_MASK(IntMask, ErrIntMask_1) |
6514 SYM_MASK(IntMask, SendDoneIntMask_1));
6515 }
6516 continue;
6517 }
6518
6519 dd->num_pports++;
6520 qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6521
6522 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6523 ppd->link_width_enabled = IB_WIDTH_4X;
6524 ppd->link_speed_enabled = ppd->link_speed_supported;
6525 /*
6526 * Set the initial values to reasonable default, will be set
6527 * for real when link is up.
6528 */
6529 ppd->link_width_active = IB_WIDTH_4X;
6530 ppd->link_speed_active = QIB_IB_SDR;
6531 ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6532 switch (qib_num_cfg_vls) {
6533 case 1:
6534 ppd->vls_supported = IB_VL_VL0;
6535 break;
6536 case 2:
6537 ppd->vls_supported = IB_VL_VL0_1;
6538 break;
6539 default:
6540 qib_devinfo(dd->pcidev,
6541 "Invalid num_vls %u, using 4 VLs\n",
6542 qib_num_cfg_vls);
6543 qib_num_cfg_vls = 4;
6544 /* fall through */
6545 case 4:
6546 ppd->vls_supported = IB_VL_VL0_3;
6547 break;
6548 case 8:
6549 if (mtu <= 2048)
6550 ppd->vls_supported = IB_VL_VL0_7;
6551 else {
6552 qib_devinfo(dd->pcidev,
6553 "Invalid num_vls %u for MTU %d "
6554 ", using 4 VLs\n",
6555 qib_num_cfg_vls, mtu);
6556 ppd->vls_supported = IB_VL_VL0_3;
6557 qib_num_cfg_vls = 4;
6558 }
6559 break;
6560 }
6561 ppd->vls_operational = ppd->vls_supported;
6562
6563 init_waitqueue_head(&cp->autoneg_wait);
6564 INIT_DELAYED_WORK(&cp->autoneg_work,
6565 autoneg_7322_work);
6566 if (ppd->dd->cspec->r1)
6567 INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6568
6569 /*
6570 * For Mez and similar cards, no qsfp info, so do
6571 * the "cable info" setup here. Can be overridden
6572 * in adapter-specific routines.
6573 */
7c7a416e
RC
6574 if (!(dd->flags & QIB_HAS_QSFP)) {
6575 if (!IS_QMH(dd) && !IS_QME(dd))
7fac3301
MM
6576 qib_devinfo(dd->pcidev,
6577 "IB%u:%u: Unknown mezzanine card type\n",
6578 dd->unit, ppd->port);
a77fcf89 6579 cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
f931551b 6580 /*
a77fcf89
RC
6581 * Choose center value as default tx serdes setting
6582 * until changed through module parameter.
f931551b 6583 */
a77fcf89
RC
6584 ppd->cpspec->no_eep = IS_QMH(dd) ?
6585 TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
f931551b
RC
6586 } else
6587 cp->h1_val = H1_FORCE_VAL;
6588
6589 /* Avoid writes to chip for mini_init */
6590 if (!qib_mini_init)
6591 write_7322_init_portregs(ppd);
6592
6593 init_timer(&cp->chase_timer);
6594 cp->chase_timer.function = reenable_chase;
6595 cp->chase_timer.data = (unsigned long)ppd;
6596
6597 ppd++;
6598 }
6599
0a43e117
MM
6600 dd->rcvhdrentsize = qib_rcvhdrentsize ?
6601 qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6602 dd->rcvhdrsize = qib_rcvhdrsize ?
6603 qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
a77fcf89 6604 dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
f931551b
RC
6605
6606 /* we always allocate at least 2048 bytes for eager buffers */
6607 dd->rcvegrbufsize = max(mtu, 2048);
9e1c0e43
MM
6608 BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
6609 dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
f931551b
RC
6610
6611 qib_7322_tidtemplate(dd);
6612
6613 /*
6614 * We can request a receive interrupt for 1 or
6615 * more packets from current offset.
6616 */
6617 dd->rhdrhead_intr_off =
6618 (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6619
6620 /* setup the stats timer; the add_timer is done at end of init */
6621 init_timer(&dd->stats_timer);
6622 dd->stats_timer.function = qib_get_7322_faststats;
6623 dd->stats_timer.data = (unsigned long) dd;
6624
6625 dd->ureg_align = 0x10000; /* 64KB alignment */
6626
6627 dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6628
6629 qib_7322_config_ctxts(dd);
6630 qib_set_ctxtcnt(dd);
6631
6632 if (qib_wc_pat) {
fce24a9d
DO
6633 resource_size_t vl15off;
6634 /*
6635 * We do not set WC on the VL15 buffers to avoid
6636 * a rare problem with unaligned writes from
6637 * interrupt-flushed store buffers, so we need
6638 * to map those separately here. We can't solve
6639 * this for the rarely used mtrr case.
6640 */
6641 ret = init_chip_wc_pat(dd, 0);
f931551b
RC
6642 if (ret)
6643 goto bail;
fce24a9d
DO
6644
6645 /* vl15 buffers start just after the 4k buffers */
6646 vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6647 dd->piobcnt4k * dd->align4k;
6648 dd->piovl15base = ioremap_nocache(vl15off,
6649 NUM_VL15_BUFS * dd->align4k);
51fa3ca3
JL
6650 if (!dd->piovl15base) {
6651 ret = -ENOMEM;
fce24a9d 6652 goto bail;
51fa3ca3 6653 }
f931551b
RC
6654 }
6655 qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6656
6657 ret = 0;
6658 if (qib_mini_init)
6659 goto bail;
6660 if (!dd->num_pports) {
6661 qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6662 goto bail; /* no error, so can still figure out why err */
6663 }
6664
6665 write_7322_initregs(dd);
6666 ret = qib_create_ctxts(dd);
6667 init_7322_cntrnames(dd);
6668
6669 updthresh = 8U; /* update threshold */
6670
6671 /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6672 * reserve the update threshold amount for other kernel use, such
6673 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6674 * unless we aren't enabling SDMA, in which case we want to use
6675 * all the 4k bufs for the kernel.
6676 * if this was less than the update threshold, we could wait
6677 * a long time for an update. Coded this way because we
6678 * sometimes change the update threshold for various reasons,
6679 * and we want this to remain robust.
6680 */
6681 if (dd->flags & QIB_HAS_SEND_DMA) {
6682 dd->cspec->sdmabufcnt = dd->piobcnt4k;
6683 sbufs = updthresh > 3 ? updthresh : 3;
6684 } else {
6685 dd->cspec->sdmabufcnt = 0;
6686 sbufs = dd->piobcnt4k;
6687 }
6688 dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6689 dd->cspec->sdmabufcnt;
6690 dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6691 dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
bb77a077 6692 dd->last_pio = dd->cspec->lastbuf_for_pio;
f931551b
RC
6693 dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6694 dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6695
6696 /*
6697 * If we have 16 user contexts, we will have 7 sbufs
6698 * per context, so reduce the update threshold to match. We
6699 * want to update before we actually run out, at low pbufs/ctxt
6700 * so give ourselves some margin.
6701 */
6702 if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6703 updthresh = dd->pbufsctxt - 2;
6704 dd->cspec->updthresh_dflt = updthresh;
6705 dd->cspec->updthresh = updthresh;
6706
6707 /* before full enable, no interrupts, no locking needed */
6708 dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6709 << SYM_LSB(SendCtrl, AvailUpdThld)) |
6710 SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6711
6712 dd->psxmitwait_supported = 1;
6713 dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6714bail:
6715 if (!dd->ctxtcnt)
6716 dd->ctxtcnt = 1; /* for other initialization code */
6717
6718 return ret;
6719}
6720
6721static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6722 u32 *pbufnum)
6723{
6724 u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6725 struct qib_devdata *dd = ppd->dd;
6726
6727 /* last is same for 2k and 4k, because we use 4k if all 2k busy */
6728 if (pbc & PBC_7322_VL15_SEND) {
6729 first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6730 last = first;
6731 } else {
6732 if ((plen + 1) > dd->piosize2kmax_dwords)
6733 first = dd->piobcnt2k;
6734 else
6735 first = 0;
6736 last = dd->cspec->lastbuf_for_pio;
6737 }
6738 return qib_getsendbuf_range(dd, pbufnum, first, last);
6739}
6740
6741static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6742 u32 start)
6743{
6744 qib_write_kreg_port(ppd, krp_psinterval, intv);
6745 qib_write_kreg_port(ppd, krp_psstart, start);
6746}
6747
6748/*
6749 * Must be called with sdma_lock held, or before init finished.
6750 */
6751static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6752{
6753 qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6754}
6755
6756static struct sdma_set_state_action sdma_7322_action_table[] = {
6757 [qib_sdma_state_s00_hw_down] = {
6758 .go_s99_running_tofalse = 1,
6759 .op_enable = 0,
6760 .op_intenable = 0,
6761 .op_halt = 0,
6762 .op_drain = 0,
6763 },
6764 [qib_sdma_state_s10_hw_start_up_wait] = {
6765 .op_enable = 0,
6766 .op_intenable = 1,
6767 .op_halt = 1,
6768 .op_drain = 0,
6769 },
6770 [qib_sdma_state_s20_idle] = {
6771 .op_enable = 1,
6772 .op_intenable = 1,
6773 .op_halt = 1,
6774 .op_drain = 0,
6775 },
6776 [qib_sdma_state_s30_sw_clean_up_wait] = {
6777 .op_enable = 0,
6778 .op_intenable = 1,
6779 .op_halt = 1,
6780 .op_drain = 0,
6781 },
6782 [qib_sdma_state_s40_hw_clean_up_wait] = {
6783 .op_enable = 1,
6784 .op_intenable = 1,
6785 .op_halt = 1,
6786 .op_drain = 0,
6787 },
6788 [qib_sdma_state_s50_hw_halt_wait] = {
6789 .op_enable = 1,
6790 .op_intenable = 1,
6791 .op_halt = 1,
6792 .op_drain = 1,
6793 },
6794 [qib_sdma_state_s99_running] = {
6795 .op_enable = 1,
6796 .op_intenable = 1,
6797 .op_halt = 0,
6798 .op_drain = 0,
6799 .go_s99_running_totrue = 1,
6800 },
6801};
6802
6803static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6804{
6805 ppd->sdma_state.set_state_action = sdma_7322_action_table;
6806}
6807
6808static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6809{
6810 struct qib_devdata *dd = ppd->dd;
6811 unsigned lastbuf, erstbuf;
6812 u64 senddmabufmask[3] = { 0 };
6813 int n, ret = 0;
6814
6815 qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6816 qib_sdma_7322_setlengen(ppd);
6817 qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6818 qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6819 qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6820 qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6821
6822 if (dd->num_pports)
6823 n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6824 else
6825 n = dd->cspec->sdmabufcnt; /* failsafe for init */
6826 erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6827 ((dd->num_pports == 1 || ppd->port == 2) ? n :
6828 dd->cspec->sdmabufcnt);
6829 lastbuf = erstbuf + n;
6830
6831 ppd->sdma_state.first_sendbuf = erstbuf;
6832 ppd->sdma_state.last_sendbuf = lastbuf;
6833 for (; erstbuf < lastbuf; ++erstbuf) {
6834 unsigned word = erstbuf / BITS_PER_LONG;
6835 unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6836
6837 BUG_ON(word >= 3);
6838 senddmabufmask[word] |= 1ULL << bit;
6839 }
6840 qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6841 qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6842 qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6843 return ret;
6844}
6845
6846/* sdma_lock must be held */
6847static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6848{
6849 struct qib_devdata *dd = ppd->dd;
6850 int sane;
6851 int use_dmahead;
6852 u16 swhead;
6853 u16 swtail;
6854 u16 cnt;
6855 u16 hwhead;
6856
6857 use_dmahead = __qib_sdma_running(ppd) &&
6858 (dd->flags & QIB_HAS_SDMA_TIMEOUT);
6859retry:
6860 hwhead = use_dmahead ?
6861 (u16) le64_to_cpu(*ppd->sdma_head_dma) :
6862 (u16) qib_read_kreg_port(ppd, krp_senddmahead);
6863
6864 swhead = ppd->sdma_descq_head;
6865 swtail = ppd->sdma_descq_tail;
6866 cnt = ppd->sdma_descq_cnt;
6867
6868 if (swhead < swtail)
6869 /* not wrapped */
6870 sane = (hwhead >= swhead) & (hwhead <= swtail);
6871 else if (swhead > swtail)
6872 /* wrapped around */
6873 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6874 (hwhead <= swtail);
6875 else
6876 /* empty */
6877 sane = (hwhead == swhead);
6878
6879 if (unlikely(!sane)) {
6880 if (use_dmahead) {
6881 /* try one more time, directly from the register */
6882 use_dmahead = 0;
6883 goto retry;
6884 }
6885 /* proceed as if no progress */
6886 hwhead = swhead;
6887 }
6888
6889 return hwhead;
6890}
6891
6892static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6893{
6894 u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6895
6896 return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6897 (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6898 !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6899 !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
6900}
6901
6902/*
6903 * Compute the amount of delay before sending the next packet if the
6904 * port's send rate differs from the static rate set for the QP.
6905 * The delay affects the next packet and the amount of the delay is
6906 * based on the length of the this packet.
6907 */
6908static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6909 u8 srate, u8 vl)
6910{
6911 u8 snd_mult = ppd->delay_mult;
6912 u8 rcv_mult = ib_rate_to_delay[srate];
6913 u32 ret;
6914
6915 ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
6916
6917 /* Indicate VL15, else set the VL in the control word */
6918 if (vl == 15)
6919 ret |= PBC_7322_VL15_SEND_CTRL;
6920 else
6921 ret |= vl << PBC_VL_NUM_LSB;
6922 ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
6923
6924 return ret;
6925}
6926
6927/*
6928 * Enable the per-port VL15 send buffers for use.
6929 * They follow the rest of the buffers, without a config parameter.
6930 * This was in initregs, but that is done before the shadow
6931 * is set up, and this has to be done after the shadow is
6932 * set up.
6933 */
6934static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
6935{
6936 unsigned vl15bufs;
6937
6938 vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
6939 qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
6940 TXCHK_CHG_TYPE_KERN, NULL);
6941}
6942
6943static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
6944{
6945 if (rcd->ctxt < NUM_IB_PORTS) {
6946 if (rcd->dd->num_pports > 1) {
6947 rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
6948 rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
6949 } else {
6950 rcd->rcvegrcnt = KCTXT0_EGRCNT;
6951 rcd->rcvegr_tid_base = 0;
6952 }
6953 } else {
6954 rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
6955 rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
6956 (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
6957 }
6958}
6959
6960#define QTXSLEEPS 5000
6961static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
6962 u32 len, u32 which, struct qib_ctxtdata *rcd)
6963{
6964 int i;
6965 const int last = start + len - 1;
6966 const int lastr = last / BITS_PER_LONG;
6967 u32 sleeps = 0;
6968 int wait = rcd != NULL;
6969 unsigned long flags;
6970
6971 while (wait) {
6972 unsigned long shadow;
6973 int cstart, previ = -1;
6974
6975 /*
6976 * when flipping from kernel to user, we can't change
6977 * the checking type if the buffer is allocated to the
6978 * driver. It's OK the other direction, because it's
6979 * from close, and we have just disarm'ed all the
6980 * buffers. All the kernel to kernel changes are also
6981 * OK.
6982 */
6983 for (cstart = start; cstart <= last; cstart++) {
6984 i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
6985 / BITS_PER_LONG;
6986 if (i != previ) {
6987 shadow = (unsigned long)
6988 le64_to_cpu(dd->pioavailregs_dma[i]);
6989 previ = i;
6990 }
6991 if (test_bit(((2 * cstart) +
6992 QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
6993 % BITS_PER_LONG, &shadow))
6994 break;
6995 }
6996
6997 if (cstart > last)
6998 break;
6999
7000 if (sleeps == QTXSLEEPS)
7001 break;
7002 /* make sure we see an updated copy next time around */
7003 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7004 sleeps++;
a0a234d4 7005 msleep(20);
f931551b
RC
7006 }
7007
7008 switch (which) {
7009 case TXCHK_CHG_TYPE_DIS1:
7010 /*
7011 * disable checking on a range; used by diags; just
7012 * one buffer, but still written generically
7013 */
7014 for (i = start; i <= last; i++)
7015 clear_bit(i, dd->cspec->sendchkenable);
7016 break;
7017
7018 case TXCHK_CHG_TYPE_ENAB1:
7019 /*
7020 * (re)enable checking on a range; used by diags; just
7021 * one buffer, but still written generically; read
7022 * scratch to be sure buffer actually triggered, not
7023 * just flushed from processor.
7024 */
7025 qib_read_kreg32(dd, kr_scratch);
7026 for (i = start; i <= last; i++)
7027 set_bit(i, dd->cspec->sendchkenable);
7028 break;
7029
7030 case TXCHK_CHG_TYPE_KERN:
7031 /* usable by kernel */
7032 for (i = start; i <= last; i++) {
7033 set_bit(i, dd->cspec->sendibchk);
7034 clear_bit(i, dd->cspec->sendgrhchk);
7035 }
7036 spin_lock_irqsave(&dd->uctxt_lock, flags);
7037 /* see if we need to raise avail update threshold */
7038 for (i = dd->first_user_ctxt;
7039 dd->cspec->updthresh != dd->cspec->updthresh_dflt
7040 && i < dd->cfgctxts; i++)
7041 if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
7042 ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
7043 < dd->cspec->updthresh_dflt)
7044 break;
7045 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
7046 if (i == dd->cfgctxts) {
7047 spin_lock_irqsave(&dd->sendctrl_lock, flags);
7048 dd->cspec->updthresh = dd->cspec->updthresh_dflt;
7049 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7050 dd->sendctrl |= (dd->cspec->updthresh &
7051 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
7052 SYM_LSB(SendCtrl, AvailUpdThld);
7053 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7054 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7055 }
7056 break;
7057
7058 case TXCHK_CHG_TYPE_USER:
7059 /* for user process */
7060 for (i = start; i <= last; i++) {
7061 clear_bit(i, dd->cspec->sendibchk);
7062 set_bit(i, dd->cspec->sendgrhchk);
7063 }
7064 spin_lock_irqsave(&dd->sendctrl_lock, flags);
7065 if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
7066 / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
7067 dd->cspec->updthresh = (rcd->piocnt /
7068 rcd->subctxt_cnt) - 1;
7069 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7070 dd->sendctrl |= (dd->cspec->updthresh &
7071 SYM_RMASK(SendCtrl, AvailUpdThld))
7072 << SYM_LSB(SendCtrl, AvailUpdThld);
7073 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7074 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7075 } else
7076 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7077 break;
7078
7079 default:
7080 break;
7081 }
7082
7083 for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
7084 qib_write_kreg(dd, kr_sendcheckmask + i,
7085 dd->cspec->sendchkenable[i]);
7086
7087 for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
7088 qib_write_kreg(dd, kr_sendgrhcheckmask + i,
7089 dd->cspec->sendgrhchk[i]);
7090 qib_write_kreg(dd, kr_sendibpktmask + i,
7091 dd->cspec->sendibchk[i]);
7092 }
7093
7094 /*
7095 * Be sure whatever we did was seen by the chip and acted upon,
7096 * before we return. Mostly important for which >= 2.
7097 */
7098 qib_read_kreg32(dd, kr_scratch);
7099}
7100
7101
7102/* useful for trigger analyzers, etc. */
7103static void writescratch(struct qib_devdata *dd, u32 val)
7104{
7105 qib_write_kreg(dd, kr_scratch, val);
7106}
7107
7108/* Dummy for now, use chip regs soon */
7109static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
7110{
7111 return -ENXIO;
7112}
7113
7114/**
7115 * qib_init_iba7322_funcs - set up the chip-specific function pointers
7116 * @dev: the pci_dev for qlogic_ib device
7117 * @ent: pci_device_id struct for this dev
7118 *
7119 * Also allocates, inits, and returns the devdata struct for this
7120 * device instance
7121 *
7122 * This is global, and is called directly at init to set up the
7123 * chip-specific function pointers for later use.
7124 */
7125struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
7126 const struct pci_device_id *ent)
7127{
7128 struct qib_devdata *dd;
7129 int ret, i;
7130 u32 tabsize, actual_cnt = 0;
7131
7132 dd = qib_alloc_devdata(pdev,
7133 NUM_IB_PORTS * sizeof(struct qib_pportdata) +
7134 sizeof(struct qib_chip_specific) +
7135 NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
7136 if (IS_ERR(dd))
7137 goto bail;
7138
7139 dd->f_bringup_serdes = qib_7322_bringup_serdes;
7140 dd->f_cleanup = qib_setup_7322_cleanup;
7141 dd->f_clear_tids = qib_7322_clear_tids;
7142 dd->f_free_irq = qib_7322_free_irq;
7143 dd->f_get_base_info = qib_7322_get_base_info;
7144 dd->f_get_msgheader = qib_7322_get_msgheader;
7145 dd->f_getsendbuf = qib_7322_getsendbuf;
7146 dd->f_gpio_mod = gpio_7322_mod;
7147 dd->f_eeprom_wen = qib_7322_eeprom_wen;
7148 dd->f_hdrqempty = qib_7322_hdrqempty;
7149 dd->f_ib_updown = qib_7322_ib_updown;
7150 dd->f_init_ctxt = qib_7322_init_ctxt;
7151 dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
7152 dd->f_intr_fallback = qib_7322_intr_fallback;
7153 dd->f_late_initreg = qib_late_7322_initreg;
7154 dd->f_setpbc_control = qib_7322_setpbc_control;
7155 dd->f_portcntr = qib_portcntr_7322;
7156 dd->f_put_tid = qib_7322_put_tid;
7157 dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
7158 dd->f_rcvctrl = rcvctrl_7322_mod;
7159 dd->f_read_cntrs = qib_read_7322cntrs;
7160 dd->f_read_portcntrs = qib_read_7322portcntrs;
7161 dd->f_reset = qib_do_7322_reset;
7162 dd->f_init_sdma_regs = init_sdma_7322_regs;
7163 dd->f_sdma_busy = qib_sdma_7322_busy;
7164 dd->f_sdma_gethead = qib_sdma_7322_gethead;
7165 dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
7166 dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
7167 dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
7168 dd->f_sendctrl = sendctrl_7322_mod;
7169 dd->f_set_armlaunch = qib_set_7322_armlaunch;
7170 dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
7171 dd->f_iblink_state = qib_7322_iblink_state;
7172 dd->f_ibphys_portstate = qib_7322_phys_portstate;
7173 dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
7174 dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
7175 dd->f_set_ib_loopback = qib_7322_set_loopback;
7176 dd->f_get_ib_table = qib_7322_get_ib_table;
7177 dd->f_set_ib_table = qib_7322_set_ib_table;
7178 dd->f_set_intr_state = qib_7322_set_intr_state;
7179 dd->f_setextled = qib_setup_7322_setextled;
7180 dd->f_txchk_change = qib_7322_txchk_change;
7181 dd->f_update_usrhead = qib_update_7322_usrhead;
7182 dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
7183 dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
7184 dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
7185 dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
7186 dd->f_sdma_init_early = qib_7322_sdma_init_early;
7187 dd->f_writescratch = writescratch;
7188 dd->f_tempsense_rd = qib_7322_tempsense_rd;
8469ba39
MM
7189#ifdef CONFIG_INFINIBAND_QIB_DCA
7190 dd->f_notify_dca = qib_7322_notify_dca;
7191#endif
f931551b
RC
7192 /*
7193 * Do remaining PCIe setup and save PCIe values in dd.
7194 * Any error printing is already done by the init code.
7195 * On return, we have the chip mapped, but chip registers
7196 * are not set up until start of qib_init_7322_variables.
7197 */
7198 ret = qib_pcie_ddinit(dd, pdev, ent);
7199 if (ret < 0)
7200 goto bail_free;
7201
7202 /* initialize chip-specific variables */
7203 ret = qib_init_7322_variables(dd);
7204 if (ret)
7205 goto bail_cleanup;
7206
7207 if (qib_mini_init || !dd->num_pports)
7208 goto bail;
7209
7210 /*
7211 * Determine number of vectors we want; depends on port count
7212 * and number of configured kernel receive queues actually used.
7213 * Should also depend on whether sdma is enabled or not, but
7214 * that's such a rare testing case it's not worth worrying about.
7215 */
7216 tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
7217 for (i = 0; i < tabsize; i++)
7218 if ((i < ARRAY_SIZE(irq_table) &&
7219 irq_table[i].port <= dd->num_pports) ||
7220 (i >= ARRAY_SIZE(irq_table) &&
7221 dd->rcd[i - ARRAY_SIZE(irq_table)]))
7222 actual_cnt++;
e67306a3
MM
7223 /* reduce by ctxt's < 2 */
7224 if (qib_krcvq01_no_msi)
7225 actual_cnt -= dd->num_pports;
7226
f931551b 7227 tabsize = actual_cnt;
8469ba39 7228 dd->cspec->msix_entries = kzalloc(tabsize *
a778f3fd
MM
7229 sizeof(struct qib_msix_entry), GFP_KERNEL);
7230 if (!dd->cspec->msix_entries) {
f931551b
RC
7231 qib_dev_err(dd, "No memory for MSIx table\n");
7232 tabsize = 0;
7233 }
7234 for (i = 0; i < tabsize; i++)
a778f3fd 7235 dd->cspec->msix_entries[i].msix.entry = i;
f931551b
RC
7236
7237 if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
7fac3301
MM
7238 qib_dev_err(dd,
7239 "Failed to setup PCIe or interrupts; continuing anyway\n");
f931551b
RC
7240 /* may be less than we wanted, if not enough available */
7241 dd->cspec->num_msix_entries = tabsize;
7242
7243 /* setup interrupt handler */
7244 qib_setup_7322_interrupt(dd, 1);
7245
7246 /* clear diagctrl register, in case diags were running and crashed */
7247 qib_write_kreg(dd, kr_hwdiagctrl, 0);
8469ba39
MM
7248#ifdef CONFIG_INFINIBAND_QIB_DCA
7249 if (!dca_add_requester(&pdev->dev)) {
7250 qib_devinfo(dd->pcidev, "DCA enabled\n");
7251 dd->flags |= QIB_DCA_ENABLED;
7252 qib_setup_dca(dd);
7253 }
7254#endif
f931551b
RC
7255 goto bail;
7256
7257bail_cleanup:
7258 qib_pcie_ddcleanup(dd);
7259bail_free:
7260 qib_free_devdata(dd);
7261 dd = ERR_PTR(ret);
7262bail:
7263 return dd;
7264}
7265
7266/*
7267 * Set the table entry at the specified index from the table specifed.
7268 * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
7269 * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
7270 * 'idx' below addresses the correct entry, while its 4 LSBs select the
7271 * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
7272 */
7273#define DDS_ENT_AMP_LSB 14
7274#define DDS_ENT_MAIN_LSB 9
7275#define DDS_ENT_POST_LSB 5
7276#define DDS_ENT_PRE_XTRA_LSB 3
7277#define DDS_ENT_PRE_LSB 0
7278
7279/*
7280 * Set one entry in the TxDDS table for spec'd port
7281 * ridx picks one of the entries, while tp points
7282 * to the appropriate table entry.
7283 */
7284static void set_txdds(struct qib_pportdata *ppd, int ridx,
7285 const struct txdds_ent *tp)
7286{
7287 struct qib_devdata *dd = ppd->dd;
7288 u32 pack_ent;
7289 int regidx;
7290
7291 /* Get correct offset in chip-space, and in source table */
7292 regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
7293 /*
7294 * We do not use qib_write_kreg_port() because it was intended
7295 * only for registers in the lower "port specific" pages.
7296 * So do index calculation by hand.
7297 */
7298 if (ppd->hw_pidx)
7299 regidx += (dd->palign / sizeof(u64));
7300
7301 pack_ent = tp->amp << DDS_ENT_AMP_LSB;
7302 pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
7303 pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
7304 pack_ent |= tp->post << DDS_ENT_POST_LSB;
7305 qib_write_kreg(dd, regidx, pack_ent);
7306 /* Prevent back-to-back writes by hitting scratch */
7307 qib_write_kreg(ppd->dd, kr_scratch, 0);
7308}
7309
7310static const struct vendor_txdds_ent vendor_txdds[] = {
7311 { /* Amphenol 1m 30awg NoEq */
7312 { 0x41, 0x50, 0x48 }, "584470002 ",
7313 { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
7314 },
7315 { /* Amphenol 3m 28awg NoEq */
7316 { 0x41, 0x50, 0x48 }, "584470004 ",
7317 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
7318 },
7319 { /* Finisar 3m OM2 Optical */
7320 { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
7321 { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
7322 },
7323 { /* Finisar 30m OM2 Optical */
7324 { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
7325 { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
7326 },
7327 { /* Finisar Default OM2 Optical */
7328 { 0x00, 0x90, 0x65 }, NULL,
7329 { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
7330 },
7331 { /* Gore 1m 30awg NoEq */
7332 { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
7333 { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
7334 },
7335 { /* Gore 2m 30awg NoEq */
7336 { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
7337 { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
7338 },
7339 { /* Gore 1m 28awg NoEq */
7340 { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
7341 { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
7342 },
7343 { /* Gore 3m 28awg NoEq */
7344 { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
7345 { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
7346 },
7347 { /* Gore 5m 24awg Eq */
7348 { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
7349 { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
7350 },
7351 { /* Gore 7m 24awg Eq */
7352 { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
7353 { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
7354 },
7355 { /* Gore 5m 26awg Eq */
7356 { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
7357 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
7358 },
7359 { /* Gore 7m 26awg Eq */
7360 { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
7361 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
7362 },
7363 { /* Intersil 12m 24awg Active */
7364 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7365 { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
7366 },
7367 { /* Intersil 10m 28awg Active */
7368 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7369 { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
7370 },
7371 { /* Intersil 7m 30awg Active */
7372 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7373 { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
7374 },
7375 { /* Intersil 5m 32awg Active */
7376 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7377 { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
7378 },
7379 { /* Intersil Default Active */
7380 { 0x00, 0x30, 0xB4 }, NULL,
7381 { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
7382 },
7383 { /* Luxtera 20m Active Optical */
7384 { 0x00, 0x25, 0x63 }, NULL,
7385 { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
7386 },
7387 { /* Molex 1M Cu loopback */
7388 { 0x00, 0x09, 0x3A }, "74763-0025 ",
7389 { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
7390 },
7391 { /* Molex 2m 28awg NoEq */
7392 { 0x00, 0x09, 0x3A }, "74757-2201 ",
7393 { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
7394 },
7395};
7396
7397static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7398 /* amp, pre, main, post */
7399 { 2, 2, 15, 6 }, /* Loopback */
7400 { 0, 0, 0, 1 }, /* 2 dB */
7401 { 0, 0, 0, 2 }, /* 3 dB */
7402 { 0, 0, 0, 3 }, /* 4 dB */
7403 { 0, 0, 0, 4 }, /* 5 dB */
7404 { 0, 0, 0, 5 }, /* 6 dB */
7405 { 0, 0, 0, 6 }, /* 7 dB */
7406 { 0, 0, 0, 7 }, /* 8 dB */
7407 { 0, 0, 0, 8 }, /* 9 dB */
7408 { 0, 0, 0, 9 }, /* 10 dB */
7409 { 0, 0, 0, 10 }, /* 11 dB */
7410 { 0, 0, 0, 11 }, /* 12 dB */
7411 { 0, 0, 0, 12 }, /* 13 dB */
7412 { 0, 0, 0, 13 }, /* 14 dB */
7413 { 0, 0, 0, 14 }, /* 15 dB */
7414 { 0, 0, 0, 15 }, /* 16 dB */
7415};
7416
7417static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7418 /* amp, pre, main, post */
7419 { 2, 2, 15, 6 }, /* Loopback */
7420 { 0, 0, 0, 8 }, /* 2 dB */
7421 { 0, 0, 0, 8 }, /* 3 dB */
7422 { 0, 0, 0, 9 }, /* 4 dB */
7423 { 0, 0, 0, 9 }, /* 5 dB */
7424 { 0, 0, 0, 10 }, /* 6 dB */
7425 { 0, 0, 0, 10 }, /* 7 dB */
7426 { 0, 0, 0, 11 }, /* 8 dB */
7427 { 0, 0, 0, 11 }, /* 9 dB */
7428 { 0, 0, 0, 12 }, /* 10 dB */
7429 { 0, 0, 0, 12 }, /* 11 dB */
7430 { 0, 0, 0, 13 }, /* 12 dB */
7431 { 0, 0, 0, 13 }, /* 13 dB */
7432 { 0, 0, 0, 14 }, /* 14 dB */
7433 { 0, 0, 0, 14 }, /* 15 dB */
7434 { 0, 0, 0, 15 }, /* 16 dB */
7435};
7436
7437static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7438 /* amp, pre, main, post */
7439 { 2, 2, 15, 6 }, /* Loopback */
a77fcf89
RC
7440 { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */
7441 { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */
f931551b
RC
7442 { 0, 1, 0, 11 }, /* 4 dB */
7443 { 0, 1, 0, 13 }, /* 5 dB */
7444 { 0, 1, 0, 15 }, /* 6 dB */
7445 { 0, 1, 3, 15 }, /* 7 dB */
7446 { 0, 1, 7, 15 }, /* 8 dB */
7447 { 0, 1, 7, 15 }, /* 9 dB */
7448 { 0, 1, 8, 15 }, /* 10 dB */
7449 { 0, 1, 9, 15 }, /* 11 dB */
7450 { 0, 1, 10, 15 }, /* 12 dB */
7451 { 0, 2, 6, 15 }, /* 13 dB */
7452 { 0, 2, 7, 15 }, /* 14 dB */
7453 { 0, 2, 8, 15 }, /* 15 dB */
7454 { 0, 2, 9, 15 }, /* 16 dB */
7455};
7456
a77fcf89
RC
7457/*
7458 * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
7459 * These are mostly used for mez cards going through connectors
7460 * and backplane traces, but can be used to add other "unusual"
7461 * table values as well.
7462 */
7463static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7464 /* amp, pre, main, post */
7465 { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
7466 { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
7467 { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
7468 { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
7469 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
7470 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
7471 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
7472 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
7473 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
7474 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
7475 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
7c7a416e
RC
7476 { 0, 0, 0, 3 }, /* QMH7342 backplane settings */
7477 { 0, 0, 0, 4 }, /* QMH7342 backplane settings */
a77fcf89
RC
7478};
7479
7480static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7481 /* amp, pre, main, post */
7482 { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
7483 { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
7484 { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
7485 { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
7486 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
7487 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
7488 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
7489 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
7490 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
7491 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
7492 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
7c7a416e
RC
7493 { 0, 0, 0, 9 }, /* QMH7342 backplane settings */
7494 { 0, 0, 0, 10 }, /* QMH7342 backplane settings */
a77fcf89
RC
7495};
7496
7497static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7498 /* amp, pre, main, post */
7499 { 0, 1, 0, 4 }, /* QMH7342 backplane settings */
7500 { 0, 1, 0, 5 }, /* QMH7342 backplane settings */
7501 { 0, 1, 0, 6 }, /* QMH7342 backplane settings */
7502 { 0, 1, 0, 8 }, /* QMH7342 backplane settings */
7503 { 0, 1, 12, 10 }, /* QME7342 backplane setting */
7504 { 0, 1, 12, 11 }, /* QME7342 backplane setting */
7505 { 0, 1, 12, 12 }, /* QME7342 backplane setting */
7506 { 0, 1, 12, 14 }, /* QME7342 backplane setting */
7507 { 0, 1, 12, 6 }, /* QME7342 backplane setting */
7508 { 0, 1, 12, 7 }, /* QME7342 backplane setting */
7509 { 0, 1, 12, 8 }, /* QME7342 backplane setting */
7c7a416e
RC
7510 { 0, 1, 0, 10 }, /* QMH7342 backplane settings */
7511 { 0, 1, 0, 12 }, /* QMH7342 backplane settings */
a77fcf89
RC
7512};
7513
e706203c
MM
7514static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7515 /* amp, pre, main, post */
7516 { 0, 0, 0, 0 }, /* QME7342 mfg settings */
7517 { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */
7518};
7519
f931551b
RC
7520static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7521 unsigned atten)
7522{
7523 /*
7524 * The attenuation table starts at 2dB for entry 1,
7525 * with entry 0 being the loopback entry.
7526 */
7527 if (atten <= 2)
7528 atten = 1;
7529 else if (atten > TXDDS_TABLE_SZ)
7530 atten = TXDDS_TABLE_SZ - 1;
7531 else
7532 atten--;
7533 return txdds + atten;
7534}
7535
7536/*
a77fcf89 7537 * if override is set, the module parameter txselect has a value
f931551b
RC
7538 * for this specific port, so use it, rather than our normal mechanism.
7539 */
7540static void find_best_ent(struct qib_pportdata *ppd,
7541 const struct txdds_ent **sdr_dds,
7542 const struct txdds_ent **ddr_dds,
7543 const struct txdds_ent **qdr_dds, int override)
7544{
7545 struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7546 int idx;
7547
7548 /* Search table of known cables */
7549 for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7550 const struct vendor_txdds_ent *v = vendor_txdds + idx;
7551
7552 if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7553 (!v->partnum ||
7554 !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7555 *sdr_dds = &v->sdr;
7556 *ddr_dds = &v->ddr;
7557 *qdr_dds = &v->qdr;
7558 return;
7559 }
7560 }
7561
dde05cbd
MH
7562 /* Active cables don't have attenuation so we only set SERDES
7563 * settings to account for the attenuation of the board traces. */
f931551b
RC
7564 if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7565 *sdr_dds = txdds_sdr + ppd->dd->board_atten;
7566 *ddr_dds = txdds_ddr + ppd->dd->board_atten;
7567 *qdr_dds = txdds_qdr + ppd->dd->board_atten;
7568 return;
7569 }
7570
7571 if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7572 qd->atten[1])) {
7573 *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7574 *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7575 *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7576 return;
a77fcf89 7577 } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
f931551b
RC
7578 /*
7579 * If we have no (or incomplete) data from the cable
a77fcf89
RC
7580 * EEPROM, or no QSFP, or override is set, use the
7581 * module parameter value to index into the attentuation
7582 * table.
f931551b 7583 */
a77fcf89
RC
7584 idx = ppd->cpspec->no_eep;
7585 *sdr_dds = &txdds_sdr[idx];
7586 *ddr_dds = &txdds_ddr[idx];
7587 *qdr_dds = &txdds_qdr[idx];
7588 } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7589 /* similar to above, but index into the "extra" table. */
7590 idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7591 *sdr_dds = &txdds_extra_sdr[idx];
7592 *ddr_dds = &txdds_extra_ddr[idx];
7593 *qdr_dds = &txdds_extra_qdr[idx];
e706203c
MM
7594 } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7595 ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7596 TXDDS_MFG_SZ)) {
7597 idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7fac3301 7598 pr_info("IB%u:%u use idx %u into txdds_mfg\n",
e706203c
MM
7599 ppd->dd->unit, ppd->port, idx);
7600 *sdr_dds = &txdds_extra_mfg[idx];
7601 *ddr_dds = &txdds_extra_mfg[idx];
7602 *qdr_dds = &txdds_extra_mfg[idx];
a77fcf89
RC
7603 } else {
7604 /* this shouldn't happen, it's range checked */
7605 *sdr_dds = txdds_sdr + qib_long_atten;
7606 *ddr_dds = txdds_ddr + qib_long_atten;
7607 *qdr_dds = txdds_qdr + qib_long_atten;
f931551b
RC
7608 }
7609}
7610
7611static void init_txdds_table(struct qib_pportdata *ppd, int override)
7612{
7613 const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7614 struct txdds_ent *dds;
7615 int idx;
7616 int single_ent = 0;
7617
a77fcf89
RC
7618 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7619
7620 /* for mez cards or override, use the selected value for all entries */
7621 if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
f931551b 7622 single_ent = 1;
f931551b
RC
7623
7624 /* Fill in the first entry with the best entry found. */
7625 set_txdds(ppd, 0, sdr_dds);
7626 set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7627 set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
a77fcf89
RC
7628 if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7629 QIBL_LINKACTIVE)) {
7630 dds = (struct txdds_ent *)(ppd->link_speed_active ==
7631 QIB_IB_QDR ? qdr_dds :
7632 (ppd->link_speed_active ==
7633 QIB_IB_DDR ? ddr_dds : sdr_dds));
7634 write_tx_serdes_param(ppd, dds);
7635 }
f931551b
RC
7636
7637 /* Fill in the remaining entries with the default table values. */
7638 for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7639 set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7640 set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7641 single_ent ? ddr_dds : txdds_ddr + idx);
7642 set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7643 single_ent ? qdr_dds : txdds_qdr + idx);
7644 }
7645}
7646
7647#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7648#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7649#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7650#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7651#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7652#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7653#define AHB_TRANS_TRIES 10
7654
7655/*
7656 * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7657 * 5=subsystem which is why most calls have "chan + chan >> 1"
7658 * for the channel argument.
7659 */
7660static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7661 u32 data, u32 mask)
7662{
7663 u32 rd_data, wr_data, sz_mask;
7664 u64 trans, acc, prev_acc;
7665 u32 ret = 0xBAD0BAD;
7666 int tries;
7667
7668 prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7669 /* From this point on, make sure we return access */
7670 acc = (quad << 1) | 1;
7671 qib_write_kreg(dd, KR_AHB_ACC, acc);
7672
7673 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7674 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7675 if (trans & AHB_TRANS_RDY)
7676 break;
7677 }
7678 if (tries >= AHB_TRANS_TRIES) {
7679 qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7680 goto bail;
7681 }
7682
7683 /* If mask is not all 1s, we need to read, but different SerDes
7684 * entities have different sizes
7685 */
7686 sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7687 wr_data = data & mask & sz_mask;
7688 if ((~mask & sz_mask) != 0) {
7689 trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7690 qib_write_kreg(dd, KR_AHB_TRANS, trans);
7691
7692 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7693 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7694 if (trans & AHB_TRANS_RDY)
7695 break;
7696 }
7697 if (tries >= AHB_TRANS_TRIES) {
7698 qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7699 AHB_TRANS_TRIES);
7700 goto bail;
7701 }
7702 /* Re-read in case host split reads and read data first */
7703 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7704 rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7705 wr_data |= (rd_data & ~mask & sz_mask);
7706 }
7707
7708 /* If mask is not zero, we need to write. */
7709 if (mask & sz_mask) {
7710 trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7711 trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7712 trans |= AHB_WR;
7713 qib_write_kreg(dd, KR_AHB_TRANS, trans);
7714
7715 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7716 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7717 if (trans & AHB_TRANS_RDY)
7718 break;
7719 }
7720 if (tries >= AHB_TRANS_TRIES) {
7721 qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7722 AHB_TRANS_TRIES);
7723 goto bail;
7724 }
7725 }
7726 ret = wr_data;
7727bail:
7728 qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7729 return ret;
7730}
7731
7732static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7733 unsigned mask)
7734{
7735 struct qib_devdata *dd = ppd->dd;
7736 int chan;
7737 u32 rbc;
7738
7739 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7740 ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7741 data, mask);
7742 rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7743 addr, 0, 0);
7744 }
7745}
7746
a0a234d4
MM
7747static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7748{
7749 u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
31264484
MH
7750 u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7751
7752 if (enable && !state) {
7fac3301 7753 pr_info("IB%u:%u Turning LOS on\n",
31264484 7754 ppd->dd->unit, ppd->port);
a0a234d4 7755 data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
31264484 7756 } else if (!enable && state) {
7fac3301 7757 pr_info("IB%u:%u Turning LOS off\n",
31264484 7758 ppd->dd->unit, ppd->port);
a0a234d4 7759 data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
31264484 7760 }
a0a234d4
MM
7761 qib_write_kreg_port(ppd, krp_serdesctrl, data);
7762}
7763
f931551b
RC
7764static int serdes_7322_init(struct qib_pportdata *ppd)
7765{
a0a234d4
MM
7766 int ret = 0;
7767 if (ppd->dd->cspec->r1)
7768 ret = serdes_7322_init_old(ppd);
7769 else
7770 ret = serdes_7322_init_new(ppd);
7771 return ret;
7772}
7773
7774static int serdes_7322_init_old(struct qib_pportdata *ppd)
7775{
f931551b
RC
7776 u32 le_val;
7777
7778 /*
7779 * Initialize the Tx DDS tables. Also done every QSFP event,
7780 * for adapters with QSFP
7781 */
7782 init_txdds_table(ppd, 0);
7783
a77fcf89
RC
7784 /* ensure no tx overrides from earlier driver loads */
7785 qib_write_kreg_port(ppd, krp_tx_deemph_override,
7786 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7787 reset_tx_deemphasis_override));
7788
f931551b
RC
7789 /* Patch some SerDes defaults to "Better for IB" */
7790 /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7791 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7792
7793 /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7794 ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7795 /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7796 ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7797
7798 /* May be overridden in qsfp_7322_event */
7799 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7800 ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7801
7802 /* enable LE1 adaptation for all but QME, which is disabled */
7803 le_val = IS_QME(ppd->dd) ? 0 : 1;
7804 ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7805
7806 /* Clear cmode-override, may be set from older driver */
7807 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7808
7809 /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7810 ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7811
7812 /* setup LoS params; these are subsystem, so chan == 5 */
7813 /* LoS filter threshold_count on, ch 0-3, set to 8 */
7814 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7815 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7816 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7817 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7818
7819 /* LoS filter threshold_count off, ch 0-3, set to 4 */
7820 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7821 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7822 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7823 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7824
7825 /* LoS filter select enabled */
7826 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7827
7828 /* LoS target data: SDR=4, DDR=2, QDR=1 */
7829 ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7830 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7831 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7832
a0a234d4 7833 serdes_7322_los_enable(ppd, 1);
f931551b
RC
7834
7835 /* rxbistena; set 0 to avoid effects of it switch later */
7836 ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7837
7838 /* Configure 4 DFE taps, and only they adapt */
7839 ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7840
7841 /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7842 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7843 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7844
7845 /*
7846 * Set receive adaptation mode. SDR and DDR adaptation are
7847 * always on, and QDR is initially enabled; later disabled.
7848 */
7849 qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7850 qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7851 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7852 ppd->dd->cspec->r1 ?
7853 QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7854 ppd->cpspec->qdr_dfe_on = 1;
7855
a77fcf89 7856 /* FLoop LOS gate: PPM filter enabled */
f931551b
RC
7857 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7858
7859 /* rx offset center enabled */
7860 ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7861
7862 if (!ppd->dd->cspec->r1) {
7863 ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7864 ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7865 }
7866
7867 /* Set the frequency loop bandwidth to 15 */
7868 ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7869
7870 return 0;
7871}
7872
a0a234d4
MM
7873static int serdes_7322_init_new(struct qib_pportdata *ppd)
7874{
8482d5d1 7875 unsigned long tend;
a0a234d4
MM
7876 u32 le_val, rxcaldone;
7877 int chan, chan_done = (1 << SERDES_CHANS) - 1;
7878
a0a234d4
MM
7879 /* Clear cmode-override, may be set from older driver */
7880 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7881
7882 /* ensure no tx overrides from earlier driver loads */
7883 qib_write_kreg_port(ppd, krp_tx_deemph_override,
7884 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7885 reset_tx_deemphasis_override));
7886
7887 /* START OF LSI SUGGESTED SERDES BRINGUP */
7888 /* Reset - Calibration Setup */
7889 /* Stop DFE adaptaion */
7890 ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
7891 /* Disable LE1 */
7892 ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
7893 /* Disable autoadapt for LE1 */
7894 ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
7895 /* Disable LE2 */
7896 ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
7897 /* Disable VGA */
7898 ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7899 /* Disable AFE Offset Cancel */
7900 ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
7901 /* Disable Timing Loop */
7902 ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
7903 /* Disable Frequency Loop */
7904 ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
7905 /* Disable Baseline Wander Correction */
7906 ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
7907 /* Disable RX Calibration */
7908 ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7909 /* Disable RX Offset Calibration */
7910 ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
7911 /* Select BB CDR */
7912 ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
7913 /* CDR Step Size */
7914 ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
7915 /* Enable phase Calibration */
7916 ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
7917 /* DFE Bandwidth [2:14-12] */
7918 ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
7919 /* DFE Config (4 taps only) */
7920 ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
7921 /* Gain Loop Bandwidth */
7922 if (!ppd->dd->cspec->r1) {
7923 ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
7924 ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
7925 } else {
7926 ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
7927 }
7928 /* Baseline Wander Correction Gain [13:4-0] (leave as default) */
7929 /* Baseline Wander Correction Gain [3:7-5] (leave as default) */
7930 /* Data Rate Select [5:7-6] (leave as default) */
25985edc 7931 /* RX Parallel Word Width [3:10-8] (leave as default) */
a0a234d4
MM
7932
7933 /* RX REST */
7934 /* Single- or Multi-channel reset */
7935 /* RX Analog reset */
7936 /* RX Digital reset */
7937 ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
7938 msleep(20);
7939 /* RX Analog reset */
7940 ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
7941 msleep(20);
7942 /* RX Digital reset */
7943 ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
7944 msleep(20);
7945
7946 /* setup LoS params; these are subsystem, so chan == 5 */
7947 /* LoS filter threshold_count on, ch 0-3, set to 8 */
7948 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7949 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7950 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7951 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7952
7953 /* LoS filter threshold_count off, ch 0-3, set to 4 */
7954 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7955 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7956 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7957 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7958
7959 /* LoS filter select enabled */
7960 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7961
7962 /* LoS target data: SDR=4, DDR=2, QDR=1 */
7963 ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7964 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7965 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7966
7967 /* Turn on LOS on initial SERDES init */
7968 serdes_7322_los_enable(ppd, 1);
7969 /* FLoop LOS gate: PPM filter enabled */
7970 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7971
7972 /* RX LATCH CALIBRATION */
7973 /* Enable Eyefinder Phase Calibration latch */
7974 ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
7975 /* Enable RX Offset Calibration latch */
7976 ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
7977 msleep(20);
7978 /* Start Calibration */
7979 ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
8482d5d1
MM
7980 tend = jiffies + msecs_to_jiffies(500);
7981 while (chan_done && !time_is_before_jiffies(tend)) {
a0a234d4
MM
7982 msleep(20);
7983 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7984 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
7985 (chan + (chan >> 1)),
7986 25, 0, 0);
7987 if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
7988 (~chan_done & (1 << chan)) == 0)
7989 chan_done &= ~(1 << chan);
7990 }
7991 }
7992 if (chan_done) {
7fac3301 7993 pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
a0a234d4
MM
7994 IBSD(ppd->hw_pidx), chan_done);
7995 } else {
7996 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7997 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
7998 (chan + (chan >> 1)),
7999 25, 0, 0);
8000 if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
7fac3301
MM
8001 pr_info("Serdes %d chan %d calibration failed\n",
8002 IBSD(ppd->hw_pidx), chan);
a0a234d4
MM
8003 }
8004 }
8005
8006 /* Turn off Calibration */
8007 ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8008 msleep(20);
8009
8010 /* BRING RX UP */
8011 /* Set LE2 value (May be overridden in qsfp_7322_event) */
8012 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
8013 ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
8014 /* Set LE2 Loop bandwidth */
8015 ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
8016 /* Enable LE2 */
8017 ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
8018 msleep(20);
8019 /* Enable H0 only */
8020 ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
8021 /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
8022 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
8023 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
8024 /* Enable VGA */
8025 ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8026 msleep(20);
8027 /* Set Frequency Loop Bandwidth */
f665acb3 8028 ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
a0a234d4
MM
8029 /* Enable Frequency Loop */
8030 ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
8031 /* Set Timing Loop Bandwidth */
8032 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
8033 /* Enable Timing Loop */
8034 ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
8035 msleep(50);
8036 /* Enable DFE
8037 * Set receive adaptation mode. SDR and DDR adaptation are
8038 * always on, and QDR is initially enabled; later disabled.
8039 */
8040 qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
8041 qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
8042 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
8043 ppd->dd->cspec->r1 ?
8044 QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
8045 ppd->cpspec->qdr_dfe_on = 1;
8046 /* Disable LE1 */
8047 ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
8048 /* Disable auto adapt for LE1 */
8049 ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
8050 msleep(20);
8051 /* Enable AFE Offset Cancel */
8052 ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
8053 /* Enable Baseline Wander Correction */
8054 ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
8055 /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
8056 ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
8057 /* VGA output common mode */
8058 ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
8059
dde05cbd
MH
8060 /*
8061 * Initialize the Tx DDS tables. Also done every QSFP event,
8062 * for adapters with QSFP
8063 */
8064 init_txdds_table(ppd, 0);
8065
a0a234d4
MM
8066 return 0;
8067}
8068
f931551b
RC
8069/* start adjust QMH serdes parameters */
8070
8071static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
8072{
8073 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8074 9, code << 9, 0x3f << 9);
8075}
8076
8077static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
8078 int enable, u32 tapenable)
8079{
8080 if (enable)
8081 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8082 1, 3 << 10, 0x1f << 10);
8083 else
8084 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8085 1, 0, 0x1f << 10);
8086}
8087
8088/* Set clock to 1, 0, 1, 0 */
8089static void clock_man(struct qib_pportdata *ppd, int chan)
8090{
8091 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8092 4, 0x4000, 0x4000);
8093 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8094 4, 0, 0x4000);
8095 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8096 4, 0x4000, 0x4000);
8097 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8098 4, 0, 0x4000);
8099}
8100
8101/*
8102 * write the current Tx serdes pre,post,main,amp settings into the serdes.
8103 * The caller must pass the settings appropriate for the current speed,
8104 * or not care if they are correct for the current speed.
8105 */
8106static void write_tx_serdes_param(struct qib_pportdata *ppd,
8107 struct txdds_ent *txdds)
8108{
8109 u64 deemph;
8110
8111 deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
8112 /* field names for amp, main, post, pre, respectively */
8113 deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
8114 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
8115 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
8116 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
a77fcf89
RC
8117
8118 deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8119 tx_override_deemphasis_select);
8120 deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8121 txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8122 txampcntl_d2a);
8123 deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8124 txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8125 txc0_ena);
8126 deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8127 txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8128 txcp1_ena);
8129 deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8130 txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
f931551b
RC
8131 txcn1_ena);
8132 qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
8133}
8134
8135/*
a77fcf89
RC
8136 * Set the parameters for mez cards on link bounce, so they are
8137 * always exactly what was requested. Similar logic to init_txdds
8138 * but does just the serdes.
f931551b
RC
8139 */
8140static void adj_tx_serdes(struct qib_pportdata *ppd)
8141{
a77fcf89
RC
8142 const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
8143 struct txdds_ent *dds;
f931551b 8144
a77fcf89
RC
8145 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
8146 dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
8147 qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
8148 ddr_dds : sdr_dds));
8149 write_tx_serdes_param(ppd, dds);
f931551b
RC
8150}
8151
8152/* set QDR forced value for H1, if needed */
8153static void force_h1(struct qib_pportdata *ppd)
8154{
8155 int chan;
8156
8157 ppd->cpspec->qdr_reforce = 0;
8158 if (!ppd->dd->cspec->r1)
8159 return;
8160
8161 for (chan = 0; chan < SERDES_CHANS; chan++) {
8162 set_man_mode_h1(ppd, chan, 1, 0);
8163 set_man_code(ppd, chan, ppd->cpspec->h1_val);
8164 clock_man(ppd, chan);
8165 set_man_mode_h1(ppd, chan, 0, 0);
8166 }
8167}
8168
f931551b
RC
8169#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
8170#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
8171
8172#define R_OPCODE_LSB 3
8173#define R_OP_NOP 0
8174#define R_OP_SHIFT 2
8175#define R_OP_UPDATE 3
8176#define R_TDI_LSB 2
8177#define R_TDO_LSB 1
8178#define R_RDY 1
8179
8180static int qib_r_grab(struct qib_devdata *dd)
8181{
8182 u64 val;
8183 val = SJA_EN;
8184 qib_write_kreg(dd, kr_r_access, val);
8185 qib_read_kreg32(dd, kr_scratch);
8186 return 0;
8187}
8188
8189/* qib_r_wait_for_rdy() not only waits for the ready bit, it
8190 * returns the current state of R_TDO
8191 */
8192static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8193{
8194 u64 val;
8195 int timeout;
8196 for (timeout = 0; timeout < 100 ; ++timeout) {
8197 val = qib_read_kreg32(dd, kr_r_access);
8198 if (val & R_RDY)
8199 return (val >> R_TDO_LSB) & 1;
8200 }
8201 return -1;
8202}
8203
8204static int qib_r_shift(struct qib_devdata *dd, int bisten,
8205 int len, u8 *inp, u8 *outp)
8206{
8207 u64 valbase, val;
8208 int ret, pos;
8209
8210 valbase = SJA_EN | (bisten << BISTEN_LSB) |
8211 (R_OP_SHIFT << R_OPCODE_LSB);
8212 ret = qib_r_wait_for_rdy(dd);
8213 if (ret < 0)
8214 goto bail;
8215 for (pos = 0; pos < len; ++pos) {
8216 val = valbase;
8217 if (outp) {
8218 outp[pos >> 3] &= ~(1 << (pos & 7));
8219 outp[pos >> 3] |= (ret << (pos & 7));
8220 }
8221 if (inp) {
8222 int tdi = inp[pos >> 3] >> (pos & 7);
8223 val |= ((tdi & 1) << R_TDI_LSB);
8224 }
8225 qib_write_kreg(dd, kr_r_access, val);
8226 qib_read_kreg32(dd, kr_scratch);
8227 ret = qib_r_wait_for_rdy(dd);
8228 if (ret < 0)
8229 break;
8230 }
8231 /* Restore to NOP between operations. */
8232 val = SJA_EN | (bisten << BISTEN_LSB);
8233 qib_write_kreg(dd, kr_r_access, val);
8234 qib_read_kreg32(dd, kr_scratch);
8235 ret = qib_r_wait_for_rdy(dd);
8236
8237 if (ret >= 0)
8238 ret = pos;
8239bail:
8240 return ret;
8241}
8242
8243static int qib_r_update(struct qib_devdata *dd, int bisten)
8244{
8245 u64 val;
8246 int ret;
8247
8248 val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
8249 ret = qib_r_wait_for_rdy(dd);
8250 if (ret >= 0) {
8251 qib_write_kreg(dd, kr_r_access, val);
8252 qib_read_kreg32(dd, kr_scratch);
8253 }
8254 return ret;
8255}
8256
8257#define BISTEN_PORT_SEL 15
8258#define LEN_PORT_SEL 625
8259#define BISTEN_AT 17
8260#define LEN_AT 156
8261#define BISTEN_ETM 16
8262#define LEN_ETM 632
8263
8264#define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
8265
8266/* these are common for all IB port use cases. */
8267static u8 reset_at[BIT2BYTE(LEN_AT)] = {
8268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8269 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8270};
8271static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
8272 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8273 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8274 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
8275 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
8276 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
8277 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
8278 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8279 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
8280};
8281static u8 at[BIT2BYTE(LEN_AT)] = {
8282 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
8283 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8284};
8285
8286/* used for IB1 or IB2, only one in use */
8287static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
8288 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8291 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
8292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8293 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
8294 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
8295 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
8296};
8297
8298/* used when both IB1 and IB2 are in use */
8299static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
8300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8301 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
8302 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8303 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
8304 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
8305 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
8306 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
8307 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
8308};
8309
8310/* used when only IB1 is in use */
8311static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
8312 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8313 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8314 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8315 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8316 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8317 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8318 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8319 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8320};
8321
8322/* used when only IB2 is in use */
8323static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
8324 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
8325 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
8326 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8327 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8328 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
8329 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8330 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8331 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
8332};
8333
8334/* used when both IB1 and IB2 are in use */
8335static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
8336 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8337 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8338 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8339 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8340 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8341 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
8342 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8343 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8344};
8345
8346/*
8347 * Do setup to properly handle IB link recovery; if port is zero, we
8348 * are initializing to cover both ports; otherwise we are initializing
8349 * to cover a single port card, or the port has reached INIT and we may
8350 * need to switch coverage types.
8351 */
8352static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8353{
8354 u8 *portsel, *etm;
8355 struct qib_devdata *dd = ppd->dd;
8356
8357 if (!ppd->dd->cspec->r1)
8358 return;
8359 if (!both) {
8360 dd->cspec->recovery_ports_initted++;
8361 ppd->cpspec->recovery_init = 1;
8362 }
8363 if (!both && dd->cspec->recovery_ports_initted == 1) {
8364 portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8365 etm = atetm_1port;
8366 } else {
8367 portsel = portsel_2port;
8368 etm = atetm_2port;
8369 }
8370
8371 if (qib_r_grab(dd) < 0 ||
8372 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8373 qib_r_update(dd, BISTEN_ETM) < 0 ||
8374 qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8375 qib_r_update(dd, BISTEN_AT) < 0 ||
8376 qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8377 portsel, NULL) < 0 ||
8378 qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8379 qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8380 qib_r_update(dd, BISTEN_AT) < 0 ||
8381 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8382 qib_r_update(dd, BISTEN_ETM) < 0)
8383 qib_dev_err(dd, "Failed IB link recovery setup\n");
8384}
8385
8386static void check_7322_rxe_status(struct qib_pportdata *ppd)
8387{
8388 struct qib_devdata *dd = ppd->dd;
8389 u64 fmask;
8390
8391 if (dd->cspec->recovery_ports_initted != 1)
8392 return; /* rest doesn't apply to dualport */
8393 qib_write_kreg(dd, kr_control, dd->control |
8394 SYM_MASK(Control, FreezeMode));
8395 (void)qib_read_kreg64(dd, kr_scratch);
8396 udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8397 fmask = qib_read_kreg64(dd, kr_act_fmask);
8398 if (!fmask) {
8399 /*
8400 * require a powercycle before we'll work again, and make
8401 * sure we get no more interrupts, and don't turn off
8402 * freeze.
8403 */
8404 ppd->dd->cspec->stay_in_freeze = 1;
8405 qib_7322_set_intr_state(ppd->dd, 0);
8406 qib_write_kreg(dd, kr_fmask, 0ULL);
8407 qib_dev_err(dd, "HCA unusable until powercycled\n");
8408 return; /* eventually reset */
8409 }
8410
8411 qib_write_kreg(ppd->dd, kr_hwerrclear,
8412 SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8413
8414 /* don't do the full clear_freeze(), not needed for this */
8415 qib_write_kreg(dd, kr_control, dd->control);
8416 qib_read_kreg32(dd, kr_scratch);
8417 /* take IBC out of reset */
8418 if (ppd->link_speed_supported) {
8419 ppd->cpspec->ibcctrl_a &=
8420 ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8421 qib_write_kreg_port(ppd, krp_ibcctrl_a,
8422 ppd->cpspec->ibcctrl_a);
8423 qib_read_kreg32(dd, kr_scratch);
8424 if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8425 qib_set_ib_7322_lstate(ppd, 0,
8426 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8427 }
8428}
This page took 0.560501 seconds and 5 git commands to generate.