IB/hfi1: Add device FW version string
[deliverable/linux.git] / drivers / infiniband / hw / hfi1 / verbs.c
CommitLineData
77241056 1/*
05d6ac1d 2 * Copyright(c) 2015, 2016 Intel Corporation.
77241056
MM
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
77241056
MM
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
77241056
MM
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <rdma/ib_mad.h>
49#include <rdma/ib_user_verbs.h>
50#include <linux/io.h>
51#include <linux/module.h>
52#include <linux/utsname.h>
53#include <linux/rculist.h>
54#include <linux/mm.h>
77241056
MM
55#include <linux/vmalloc.h>
56
57#include "hfi.h"
58#include "common.h"
59#include "device.h"
60#include "trace.h"
61#include "qp.h"
45842abb 62#include "verbs_txreq.h"
77241056 63
895420dd 64static unsigned int hfi1_lkey_table_size = 16;
77241056
MM
65module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
66 S_IRUGO);
67MODULE_PARM_DESC(lkey_table_size,
68 "LKEY table size in bits (2^n, 1 <= n <= 23)");
69
70static unsigned int hfi1_max_pds = 0xFFFF;
71module_param_named(max_pds, hfi1_max_pds, uint, S_IRUGO);
72MODULE_PARM_DESC(max_pds,
73 "Maximum number of protection domains to support");
74
75static unsigned int hfi1_max_ahs = 0xFFFF;
76module_param_named(max_ahs, hfi1_max_ahs, uint, S_IRUGO);
77MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
78
79unsigned int hfi1_max_cqes = 0x2FFFF;
80module_param_named(max_cqes, hfi1_max_cqes, uint, S_IRUGO);
81MODULE_PARM_DESC(max_cqes,
82 "Maximum number of completion queue entries to support");
83
84unsigned int hfi1_max_cqs = 0x1FFFF;
85module_param_named(max_cqs, hfi1_max_cqs, uint, S_IRUGO);
86MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
87
88unsigned int hfi1_max_qp_wrs = 0x3FFF;
89module_param_named(max_qp_wrs, hfi1_max_qp_wrs, uint, S_IRUGO);
90MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
91
92unsigned int hfi1_max_qps = 16384;
93module_param_named(max_qps, hfi1_max_qps, uint, S_IRUGO);
94MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
95
96unsigned int hfi1_max_sges = 0x60;
97module_param_named(max_sges, hfi1_max_sges, uint, S_IRUGO);
98MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
99
100unsigned int hfi1_max_mcast_grps = 16384;
101module_param_named(max_mcast_grps, hfi1_max_mcast_grps, uint, S_IRUGO);
102MODULE_PARM_DESC(max_mcast_grps,
103 "Maximum number of multicast groups to support");
104
105unsigned int hfi1_max_mcast_qp_attached = 16;
106module_param_named(max_mcast_qp_attached, hfi1_max_mcast_qp_attached,
107 uint, S_IRUGO);
108MODULE_PARM_DESC(max_mcast_qp_attached,
109 "Maximum number of attached QPs to support");
110
111unsigned int hfi1_max_srqs = 1024;
112module_param_named(max_srqs, hfi1_max_srqs, uint, S_IRUGO);
113MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
114
115unsigned int hfi1_max_srq_sges = 128;
116module_param_named(max_srq_sges, hfi1_max_srq_sges, uint, S_IRUGO);
117MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
118
119unsigned int hfi1_max_srq_wrs = 0x1FFFF;
120module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO);
121MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
122
d0e859c3 123unsigned short piothreshold = 256;
14553ca1
MM
124module_param(piothreshold, ushort, S_IRUGO);
125MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio");
126
528ee9fb
DL
127#define COPY_CACHELESS 1
128#define COPY_ADAPTIVE 2
129static unsigned int sge_copy_mode;
130module_param(sge_copy_mode, uint, S_IRUGO);
131MODULE_PARM_DESC(sge_copy_mode,
132 "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS");
133
77241056
MM
134static void verbs_sdma_complete(
135 struct sdma_txreq *cookie,
a545f530 136 int status);
77241056 137
14553ca1
MM
138static int pio_wait(struct rvt_qp *qp,
139 struct send_context *sc,
140 struct hfi1_pkt_state *ps,
141 u32 flag);
142
64ffd86c
JJ
143/* Length of buffer to create verbs txreq cache name */
144#define TXREQ_NAME_LEN 24
145
528ee9fb
DL
146static uint wss_threshold;
147module_param(wss_threshold, uint, S_IRUGO);
148MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
149static uint wss_clean_period = 256;
150module_param(wss_clean_period, uint, S_IRUGO);
151MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned");
152
153/* memory working set size */
154struct hfi1_wss {
155 unsigned long *entries;
156 atomic_t total_count;
157 atomic_t clean_counter;
158 atomic_t clean_entry;
159
160 int threshold;
161 int num_entries;
162 long pages_mask;
163};
164
165static struct hfi1_wss wss;
166
167int hfi1_wss_init(void)
168{
169 long llc_size;
170 long llc_bits;
171 long table_size;
172 long table_bits;
173
174 /* check for a valid percent range - default to 80 if none or invalid */
175 if (wss_threshold < 1 || wss_threshold > 100)
176 wss_threshold = 80;
177 /* reject a wildly large period */
178 if (wss_clean_period > 1000000)
179 wss_clean_period = 256;
180 /* reject a zero period */
181 if (wss_clean_period == 0)
182 wss_clean_period = 1;
183
184 /*
185 * Calculate the table size - the next power of 2 larger than the
186 * LLC size. LLC size is in KiB.
187 */
188 llc_size = wss_llc_size() * 1024;
189 table_size = roundup_pow_of_two(llc_size);
190
191 /* one bit per page in rounded up table */
192 llc_bits = llc_size / PAGE_SIZE;
193 table_bits = table_size / PAGE_SIZE;
194 wss.pages_mask = table_bits - 1;
195 wss.num_entries = table_bits / BITS_PER_LONG;
196
197 wss.threshold = (llc_bits * wss_threshold) / 100;
198 if (wss.threshold == 0)
199 wss.threshold = 1;
200
201 atomic_set(&wss.clean_counter, wss_clean_period);
202
203 wss.entries = kcalloc(wss.num_entries, sizeof(*wss.entries),
204 GFP_KERNEL);
205 if (!wss.entries) {
206 hfi1_wss_exit();
207 return -ENOMEM;
208 }
209
210 return 0;
211}
212
213void hfi1_wss_exit(void)
214{
215 /* coded to handle partially initialized and repeat callers */
216 kfree(wss.entries);
217 wss.entries = NULL;
218}
219
220/*
221 * Advance the clean counter. When the clean period has expired,
222 * clean an entry.
223 *
224 * This is implemented in atomics to avoid locking. Because multiple
225 * variables are involved, it can be racy which can lead to slightly
226 * inaccurate information. Since this is only a heuristic, this is
227 * OK. Any innaccuracies will clean themselves out as the counter
228 * advances. That said, it is unlikely the entry clean operation will
229 * race - the next possible racer will not start until the next clean
230 * period.
231 *
232 * The clean counter is implemented as a decrement to zero. When zero
233 * is reached an entry is cleaned.
234 */
235static void wss_advance_clean_counter(void)
236{
237 int entry;
238 int weight;
239 unsigned long bits;
240
241 /* become the cleaner if we decrement the counter to zero */
242 if (atomic_dec_and_test(&wss.clean_counter)) {
243 /*
244 * Set, not add, the clean period. This avoids an issue
245 * where the counter could decrement below the clean period.
246 * Doing a set can result in lost decrements, slowing the
247 * clean advance. Since this a heuristic, this possible
248 * slowdown is OK.
249 *
250 * An alternative is to loop, advancing the counter by a
251 * clean period until the result is > 0. However, this could
252 * lead to several threads keeping another in the clean loop.
253 * This could be mitigated by limiting the number of times
254 * we stay in the loop.
255 */
256 atomic_set(&wss.clean_counter, wss_clean_period);
257
258 /*
259 * Uniquely grab the entry to clean and move to next.
260 * The current entry is always the lower bits of
261 * wss.clean_entry. The table size, wss.num_entries,
262 * is always a power-of-2.
263 */
264 entry = (atomic_inc_return(&wss.clean_entry) - 1)
265 & (wss.num_entries - 1);
266
267 /* clear the entry and count the bits */
268 bits = xchg(&wss.entries[entry], 0);
269 weight = hweight64((u64)bits);
270 /* only adjust the contended total count if needed */
271 if (weight)
272 atomic_sub(weight, &wss.total_count);
273 }
274}
275
276/*
277 * Insert the given address into the working set array.
278 */
279static void wss_insert(void *address)
280{
281 u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss.pages_mask;
282 u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
283 u32 nr = page & (BITS_PER_LONG - 1);
284
285 if (!test_and_set_bit(nr, &wss.entries[entry]))
286 atomic_inc(&wss.total_count);
287
288 wss_advance_clean_counter();
289}
290
291/*
292 * Is the working set larger than the threshold?
293 */
294static inline int wss_exceeds_threshold(void)
295{
296 return atomic_read(&wss.total_count) >= wss.threshold;
297}
298
77241056
MM
299/*
300 * Translate ib_wr_opcode into ib_wc_opcode.
301 */
302const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
303 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
304 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
305 [IB_WR_SEND] = IB_WC_SEND,
306 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
307 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
308 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
309 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
310};
311
312/*
313 * Length of header by opcode, 0 --> not supported
314 */
315const u8 hdr_len_by_opcode[256] = {
316 /* RC */
317 [IB_OPCODE_RC_SEND_FIRST] = 12 + 8,
318 [IB_OPCODE_RC_SEND_MIDDLE] = 12 + 8,
319 [IB_OPCODE_RC_SEND_LAST] = 12 + 8,
320 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
321 [IB_OPCODE_RC_SEND_ONLY] = 12 + 8,
322 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
323 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
324 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = 12 + 8,
325 [IB_OPCODE_RC_RDMA_WRITE_LAST] = 12 + 8,
326 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
327 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
328 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
329 [IB_OPCODE_RC_RDMA_READ_REQUEST] = 12 + 8 + 16,
330 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = 12 + 8 + 4,
331 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = 12 + 8,
332 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = 12 + 8 + 4,
333 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = 12 + 8 + 4,
334 [IB_OPCODE_RC_ACKNOWLEDGE] = 12 + 8 + 4,
335 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4,
336 [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28,
337 [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28,
bdd8a98c
JX
338 [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4,
339 [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = 12 + 8 + 4,
77241056
MM
340 /* UC */
341 [IB_OPCODE_UC_SEND_FIRST] = 12 + 8,
342 [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8,
343 [IB_OPCODE_UC_SEND_LAST] = 12 + 8,
344 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
345 [IB_OPCODE_UC_SEND_ONLY] = 12 + 8,
346 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
347 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
348 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = 12 + 8,
349 [IB_OPCODE_UC_RDMA_WRITE_LAST] = 12 + 8,
350 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
351 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
352 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
353 /* UD */
354 [IB_OPCODE_UD_SEND_ONLY] = 12 + 8 + 8,
355 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 12
356};
357
358static const opcode_handler opcode_handler_tbl[256] = {
359 /* RC */
360 [IB_OPCODE_RC_SEND_FIRST] = &hfi1_rc_rcv,
361 [IB_OPCODE_RC_SEND_MIDDLE] = &hfi1_rc_rcv,
362 [IB_OPCODE_RC_SEND_LAST] = &hfi1_rc_rcv,
363 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
364 [IB_OPCODE_RC_SEND_ONLY] = &hfi1_rc_rcv,
365 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
366 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = &hfi1_rc_rcv,
367 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = &hfi1_rc_rcv,
368 [IB_OPCODE_RC_RDMA_WRITE_LAST] = &hfi1_rc_rcv,
369 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
370 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = &hfi1_rc_rcv,
371 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
372 [IB_OPCODE_RC_RDMA_READ_REQUEST] = &hfi1_rc_rcv,
373 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = &hfi1_rc_rcv,
374 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = &hfi1_rc_rcv,
375 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = &hfi1_rc_rcv,
376 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = &hfi1_rc_rcv,
377 [IB_OPCODE_RC_ACKNOWLEDGE] = &hfi1_rc_rcv,
378 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = &hfi1_rc_rcv,
379 [IB_OPCODE_RC_COMPARE_SWAP] = &hfi1_rc_rcv,
380 [IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv,
381 /* UC */
382 [IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv,
383 [IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv,
384 [IB_OPCODE_UC_SEND_LAST] = &hfi1_uc_rcv,
385 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
386 [IB_OPCODE_UC_SEND_ONLY] = &hfi1_uc_rcv,
387 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
388 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = &hfi1_uc_rcv,
389 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = &hfi1_uc_rcv,
390 [IB_OPCODE_UC_RDMA_WRITE_LAST] = &hfi1_uc_rcv,
391 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
392 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = &hfi1_uc_rcv,
393 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
394 /* UD */
395 [IB_OPCODE_UD_SEND_ONLY] = &hfi1_ud_rcv,
396 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_ud_rcv,
397 /* CNP */
398 [IB_OPCODE_CNP] = &hfi1_cnp_rcv
399};
400
401/*
402 * System image GUID.
403 */
404__be64 ib_hfi1_sys_image_guid;
405
406/**
407 * hfi1_copy_sge - copy data to SGE memory
408 * @ss: the SGE state
409 * @data: the data to copy
410 * @length: the length of the data
7b0b01aa 411 * @copy_last: do a separate copy of the last 8 bytes
77241056
MM
412 */
413void hfi1_copy_sge(
895420dd 414 struct rvt_sge_state *ss,
77241056 415 void *data, u32 length,
7b0b01aa
DL
416 int release,
417 int copy_last)
77241056 418{
895420dd 419 struct rvt_sge *sge = &ss->sge;
7b0b01aa
DL
420 int in_last = 0;
421 int i;
528ee9fb
DL
422 int cacheless_copy = 0;
423
424 if (sge_copy_mode == COPY_CACHELESS) {
425 cacheless_copy = length >= PAGE_SIZE;
426 } else if (sge_copy_mode == COPY_ADAPTIVE) {
427 if (length >= PAGE_SIZE) {
428 /*
429 * NOTE: this *assumes*:
430 * o The first vaddr is the dest.
431 * o If multiple pages, then vaddr is sequential.
432 */
433 wss_insert(sge->vaddr);
434 if (length >= (2 * PAGE_SIZE))
435 wss_insert(sge->vaddr + PAGE_SIZE);
77241056 436
528ee9fb
DL
437 cacheless_copy = wss_exceeds_threshold();
438 } else {
439 wss_advance_clean_counter();
440 }
441 }
7b0b01aa
DL
442 if (copy_last) {
443 if (length > 8) {
444 length -= 8;
445 } else {
446 copy_last = 0;
447 in_last = 1;
448 }
449 }
450
451again:
77241056
MM
452 while (length) {
453 u32 len = sge->length;
454
455 if (len > length)
456 len = length;
457 if (len > sge->sge_length)
458 len = sge->sge_length;
459 WARN_ON_ONCE(len == 0);
528ee9fb
DL
460 if (unlikely(in_last)) {
461 /* enforce byte transfer ordering */
7b0b01aa
DL
462 for (i = 0; i < len; i++)
463 ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
528ee9fb
DL
464 } else if (cacheless_copy) {
465 cacheless_memcpy(sge->vaddr, data, len);
7b0b01aa
DL
466 } else {
467 memcpy(sge->vaddr, data, len);
468 }
77241056
MM
469 sge->vaddr += len;
470 sge->length -= len;
471 sge->sge_length -= len;
472 if (sge->sge_length == 0) {
473 if (release)
895420dd 474 rvt_put_mr(sge->mr);
77241056
MM
475 if (--ss->num_sge)
476 *sge = *ss->sg_list++;
477 } else if (sge->length == 0 && sge->mr->lkey) {
cd4ceee3 478 if (++sge->n >= RVT_SEGSZ) {
77241056
MM
479 if (++sge->m >= sge->mr->mapsz)
480 break;
481 sge->n = 0;
482 }
483 sge->vaddr =
484 sge->mr->map[sge->m]->segs[sge->n].vaddr;
485 sge->length =
486 sge->mr->map[sge->m]->segs[sge->n].length;
487 }
488 data += len;
489 length -= len;
490 }
7b0b01aa
DL
491
492 if (copy_last) {
493 copy_last = 0;
494 in_last = 1;
495 length = 8;
496 goto again;
497 }
77241056
MM
498}
499
500/**
501 * hfi1_skip_sge - skip over SGE memory
502 * @ss: the SGE state
503 * @length: the number of bytes to skip
504 */
895420dd 505void hfi1_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
77241056 506{
895420dd 507 struct rvt_sge *sge = &ss->sge;
77241056
MM
508
509 while (length) {
510 u32 len = sge->length;
511
512 if (len > length)
513 len = length;
514 if (len > sge->sge_length)
515 len = sge->sge_length;
516 WARN_ON_ONCE(len == 0);
517 sge->vaddr += len;
518 sge->length -= len;
519 sge->sge_length -= len;
520 if (sge->sge_length == 0) {
521 if (release)
895420dd 522 rvt_put_mr(sge->mr);
77241056
MM
523 if (--ss->num_sge)
524 *sge = *ss->sg_list++;
525 } else if (sge->length == 0 && sge->mr->lkey) {
cd4ceee3 526 if (++sge->n >= RVT_SEGSZ) {
77241056
MM
527 if (++sge->m >= sge->mr->mapsz)
528 break;
529 sge->n = 0;
530 }
531 sge->vaddr =
532 sge->mr->map[sge->m]->segs[sge->n].vaddr;
533 sge->length =
534 sge->mr->map[sge->m]->segs[sge->n].length;
535 }
536 length -= len;
537 }
538}
539
77241056
MM
540/*
541 * Make sure the QP is ready and able to accept the given opcode.
542 */
543static inline int qp_ok(int opcode, struct hfi1_packet *packet)
544{
545 struct hfi1_ibport *ibp;
546
83693bd1 547 if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
77241056 548 goto dropit;
b218f786 549 if (((opcode & RVT_OPCODE_QP_MASK) == packet->qp->allowed_ops) ||
77241056
MM
550 (opcode == IB_OPCODE_CNP))
551 return 1;
552dropit:
553 ibp = &packet->rcd->ppd->ibport_data;
4eb06882 554 ibp->rvp.n_pkt_drops++;
77241056
MM
555 return 0;
556}
557
77241056
MM
558/**
559 * hfi1_ib_rcv - process an incoming packet
560 * @packet: data packet information
561 *
562 * This is called to process an incoming packet at interrupt level.
563 *
564 * Tlen is the length of the header + data + CRC in bytes.
565 */
566void hfi1_ib_rcv(struct hfi1_packet *packet)
567{
568 struct hfi1_ctxtdata *rcd = packet->rcd;
569 struct hfi1_ib_header *hdr = packet->hdr;
570 u32 tlen = packet->tlen;
571 struct hfi1_pportdata *ppd = rcd->ppd;
572 struct hfi1_ibport *ibp = &ppd->ibport_data;
ec4274f1 573 struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
b77d713a 574 unsigned long flags;
77241056
MM
575 u32 qp_num;
576 int lnh;
577 u8 opcode;
578 u16 lid;
579
580 /* Check for GRH */
581 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
e490974e 582 if (lnh == HFI1_LRH_BTH) {
77241056 583 packet->ohdr = &hdr->u.oth;
e490974e 584 } else if (lnh == HFI1_LRH_GRH) {
77241056
MM
585 u32 vtf;
586
587 packet->ohdr = &hdr->u.l.oth;
588 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
589 goto drop;
590 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
591 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
592 goto drop;
593 packet->rcv_flags |= HFI1_HAS_GRH;
e490974e 594 } else {
77241056 595 goto drop;
e490974e 596 }
77241056
MM
597
598 trace_input_ibhdr(rcd->dd, hdr);
599
600 opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24);
601 inc_opstats(tlen, &rcd->opstats->stats[opcode]);
602
603 /* Get the destination QP number. */
ec4274f1 604 qp_num = be32_to_cpu(packet->ohdr->bth[1]) & RVT_QPN_MASK;
77241056 605 lid = be16_to_cpu(hdr->lrh[1]);
8859b4a6
DD
606 if (unlikely((lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
607 (lid != be16_to_cpu(IB_LID_PERMISSIVE)))) {
0facc5a1
DD
608 struct rvt_mcast *mcast;
609 struct rvt_mcast_qp *p;
77241056
MM
610
611 if (lnh != HFI1_LRH_GRH)
612 goto drop;
0facc5a1 613 mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid);
d125a6c6 614 if (!mcast)
77241056
MM
615 goto drop;
616 list_for_each_entry_rcu(p, &mcast->qp_list, list) {
617 packet->qp = p->qp;
b77d713a 618 spin_lock_irqsave(&packet->qp->r_lock, flags);
77241056
MM
619 if (likely((qp_ok(opcode, packet))))
620 opcode_handler_tbl[opcode](packet);
b77d713a 621 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
77241056
MM
622 }
623 /*
0facc5a1 624 * Notify rvt_multicast_detach() if it is waiting for us
77241056
MM
625 * to finish.
626 */
627 if (atomic_dec_return(&mcast->refcount) <= 1)
628 wake_up(&mcast->wait);
629 } else {
630 rcu_read_lock();
ec4274f1 631 packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
77241056
MM
632 if (!packet->qp) {
633 rcu_read_unlock();
634 goto drop;
635 }
b77d713a 636 spin_lock_irqsave(&packet->qp->r_lock, flags);
77241056
MM
637 if (likely((qp_ok(opcode, packet))))
638 opcode_handler_tbl[opcode](packet);
b77d713a 639 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
77241056
MM
640 rcu_read_unlock();
641 }
642 return;
643
644drop:
4eb06882 645 ibp->rvp.n_pkt_drops++;
77241056
MM
646}
647
648/*
649 * This is called from a timer to check for QPs
650 * which need kernel memory in order to send a packet.
651 */
652static void mem_timer(unsigned long data)
653{
654 struct hfi1_ibdev *dev = (struct hfi1_ibdev *)data;
655 struct list_head *list = &dev->memwait;
895420dd 656 struct rvt_qp *qp = NULL;
77241056
MM
657 struct iowait *wait;
658 unsigned long flags;
4c6829c5 659 struct hfi1_qp_priv *priv;
77241056
MM
660
661 write_seqlock_irqsave(&dev->iowait_lock, flags);
662 if (!list_empty(list)) {
663 wait = list_first_entry(list, struct iowait, list);
4c6829c5
DD
664 qp = iowait_to_qp(wait);
665 priv = qp->priv;
666 list_del_init(&priv->s_iowait.list);
77241056
MM
667 /* refcount held until actual wake up */
668 if (!list_empty(list))
669 mod_timer(&dev->mem_timer, jiffies + 1);
670 }
671 write_sequnlock_irqrestore(&dev->iowait_lock, flags);
672
673 if (qp)
54d10c1e 674 hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM);
77241056
MM
675}
676
895420dd 677void update_sge(struct rvt_sge_state *ss, u32 length)
77241056 678{
895420dd 679 struct rvt_sge *sge = &ss->sge;
77241056
MM
680
681 sge->vaddr += length;
682 sge->length -= length;
683 sge->sge_length -= length;
684 if (sge->sge_length == 0) {
685 if (--ss->num_sge)
686 *sge = *ss->sg_list++;
687 } else if (sge->length == 0 && sge->mr->lkey) {
cd4ceee3 688 if (++sge->n >= RVT_SEGSZ) {
77241056
MM
689 if (++sge->m >= sge->mr->mapsz)
690 return;
691 sge->n = 0;
692 }
693 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
694 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
695 }
696}
697
77241056
MM
698/*
699 * This is called with progress side lock held.
700 */
701/* New API */
702static void verbs_sdma_complete(
703 struct sdma_txreq *cookie,
a545f530 704 int status)
77241056
MM
705{
706 struct verbs_txreq *tx =
707 container_of(cookie, struct verbs_txreq, txreq);
895420dd 708 struct rvt_qp *qp = tx->qp;
77241056
MM
709
710 spin_lock(&qp->s_lock);
e490974e 711 if (tx->wqe) {
77241056 712 hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
e490974e 713 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
77241056
MM
714 struct hfi1_ib_header *hdr;
715
716 hdr = &tx->phdr.hdr;
717 hfi1_rc_send_complete(qp, hdr);
718 }
77241056
MM
719 spin_unlock(&qp->s_lock);
720
721 hfi1_put_txreq(tx);
722}
723
711e104d
MM
724static int wait_kmem(struct hfi1_ibdev *dev,
725 struct rvt_qp *qp,
726 struct hfi1_pkt_state *ps)
77241056 727{
4c6829c5 728 struct hfi1_qp_priv *priv = qp->priv;
77241056
MM
729 unsigned long flags;
730 int ret = 0;
731
732 spin_lock_irqsave(&qp->s_lock, flags);
83693bd1 733 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
77241056 734 write_seqlock(&dev->iowait_lock);
711e104d
MM
735 list_add_tail(&ps->s_txreq->txreq.list,
736 &priv->s_iowait.tx_head);
4c6829c5 737 if (list_empty(&priv->s_iowait.list)) {
77241056
MM
738 if (list_empty(&dev->memwait))
739 mod_timer(&dev->mem_timer, jiffies + 1);
54d10c1e 740 qp->s_flags |= RVT_S_WAIT_KMEM;
4c6829c5 741 list_add_tail(&priv->s_iowait.list, &dev->memwait);
54d10c1e 742 trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
77241056
MM
743 atomic_inc(&qp->refcount);
744 }
745 write_sequnlock(&dev->iowait_lock);
54d10c1e 746 qp->s_flags &= ~RVT_S_BUSY;
77241056
MM
747 ret = -EBUSY;
748 }
749 spin_unlock_irqrestore(&qp->s_lock, flags);
750
751 return ret;
752}
753
754/*
755 * This routine calls txadds for each sg entry.
756 *
757 * Add failures will revert the sge cursor
758 */
711e104d 759static noinline int build_verbs_ulp_payload(
77241056 760 struct sdma_engine *sde,
895420dd 761 struct rvt_sge_state *ss,
77241056
MM
762 u32 length,
763 struct verbs_txreq *tx)
764{
895420dd
DD
765 struct rvt_sge *sg_list = ss->sg_list;
766 struct rvt_sge sge = ss->sge;
77241056
MM
767 u8 num_sge = ss->num_sge;
768 u32 len;
769 int ret = 0;
770
771 while (length) {
772 len = ss->sge.length;
773 if (len > length)
774 len = length;
775 if (len > ss->sge.sge_length)
776 len = ss->sge.sge_length;
777 WARN_ON_ONCE(len == 0);
778 ret = sdma_txadd_kvaddr(
779 sde->dd,
780 &tx->txreq,
781 ss->sge.vaddr,
782 len);
783 if (ret)
784 goto bail_txadd;
785 update_sge(ss, len);
786 length -= len;
787 }
788 return ret;
789bail_txadd:
790 /* unwind cursor */
791 ss->sge = sge;
792 ss->num_sge = num_sge;
793 ss->sg_list = sg_list;
794 return ret;
795}
796
797/*
798 * Build the number of DMA descriptors needed to send length bytes of data.
799 *
800 * NOTE: DMA mapping is held in the tx until completed in the ring or
801 * the tx desc is freed without having been submitted to the ring
802 *
bb5df5f9 803 * This routine ensures all the helper routine calls succeed.
77241056
MM
804 */
805/* New API */
806static int build_verbs_tx_desc(
807 struct sdma_engine *sde,
895420dd 808 struct rvt_sge_state *ss,
77241056
MM
809 u32 length,
810 struct verbs_txreq *tx,
811 struct ahg_ib_header *ahdr,
812 u64 pbc)
813{
814 int ret = 0;
bb5df5f9 815 struct hfi1_pio_header *phdr = &tx->phdr;
77241056
MM
816 u16 hdrbytes = tx->hdr_dwords << 2;
817
77241056
MM
818 if (!ahdr->ahgcount) {
819 ret = sdma_txinit_ahg(
820 &tx->txreq,
821 ahdr->tx_flags,
822 hdrbytes + length,
823 ahdr->ahgidx,
824 0,
825 NULL,
826 0,
827 verbs_sdma_complete);
828 if (ret)
829 goto bail_txadd;
830 phdr->pbc = cpu_to_le64(pbc);
77241056
MM
831 ret = sdma_txadd_kvaddr(
832 sde->dd,
833 &tx->txreq,
bb5df5f9
DD
834 phdr,
835 hdrbytes);
77241056
MM
836 if (ret)
837 goto bail_txadd;
838 } else {
77241056
MM
839 ret = sdma_txinit_ahg(
840 &tx->txreq,
841 ahdr->tx_flags,
842 length,
843 ahdr->ahgidx,
844 ahdr->ahgcount,
845 ahdr->ahgdesc,
846 hdrbytes,
847 verbs_sdma_complete);
848 if (ret)
849 goto bail_txadd;
850 }
851
852 /* add the ulp payload - if any. ss can be NULL for acks */
853 if (ss)
854 ret = build_verbs_ulp_payload(sde, ss, length, tx);
855bail_txadd:
856 return ret;
857}
858
895420dd 859int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
d46e5144 860 u64 pbc)
77241056 861{
4c6829c5
DD
862 struct hfi1_qp_priv *priv = qp->priv;
863 struct ahg_ib_header *ahdr = priv->s_hdr;
d46e5144 864 u32 hdrwords = qp->s_hdrwords;
895420dd 865 struct rvt_sge_state *ss = qp->s_cur_sge;
d46e5144
DD
866 u32 len = qp->s_cur_size;
867 u32 plen = hdrwords + ((len + 3) >> 2) + 2; /* includes pbc */
868 struct hfi1_ibdev *dev = ps->dev;
869 struct hfi1_pportdata *ppd = ps->ppd;
77241056 870 struct verbs_txreq *tx;
77241056 871 u64 pbc_flags = 0;
4c6829c5
DD
872 u8 sc5 = priv->s_sc;
873
77241056 874 int ret;
77241056 875
bb5df5f9 876 tx = ps->s_txreq;
711e104d
MM
877 if (!sdma_txreq_built(&tx->txreq)) {
878 if (likely(pbc == 0)) {
879 u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
880 /* No vl15 here */
881 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
882 pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
883
884 pbc = create_pbc(ppd,
885 pbc_flags,
886 qp->srate_mbps,
887 vl,
888 plen);
889 }
890 tx->wqe = qp->s_wqe;
891 ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahdr, pbc);
892 if (unlikely(ret))
893 goto bail_build;
77241056 894 }
5326dfbf
MM
895 ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq);
896 if (unlikely(ret < 0)) {
897 if (ret == -ECOMM)
898 goto bail_ecomm;
899 return ret;
900 }
1db78eee
MM
901 trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
902 &ps->s_txreq->phdr.hdr);
77241056
MM
903 return ret;
904
77241056
MM
905bail_ecomm:
906 /* The current one got "sent" */
907 return 0;
908bail_build:
711e104d
MM
909 ret = wait_kmem(dev, qp, ps);
910 if (!ret) {
911 /* free txreq - bad state */
912 hfi1_put_txreq(ps->s_txreq);
913 ps->s_txreq = NULL;
914 }
915 return ret;
77241056
MM
916}
917
918/*
919 * If we are now in the error state, return zero to flush the
920 * send work request.
921 */
14553ca1
MM
922static int pio_wait(struct rvt_qp *qp,
923 struct send_context *sc,
924 struct hfi1_pkt_state *ps,
925 u32 flag)
77241056 926{
4c6829c5 927 struct hfi1_qp_priv *priv = qp->priv;
77241056
MM
928 struct hfi1_devdata *dd = sc->dd;
929 struct hfi1_ibdev *dev = &dd->verbs_dev;
930 unsigned long flags;
931 int ret = 0;
932
933 /*
934 * Note that as soon as want_buffer() is called and
935 * possibly before it returns, sc_piobufavail()
936 * could be called. Therefore, put QP on the I/O wait list before
937 * enabling the PIO avail interrupt.
938 */
939 spin_lock_irqsave(&qp->s_lock, flags);
83693bd1 940 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
77241056 941 write_seqlock(&dev->iowait_lock);
711e104d
MM
942 list_add_tail(&ps->s_txreq->txreq.list,
943 &priv->s_iowait.tx_head);
4c6829c5 944 if (list_empty(&priv->s_iowait.list)) {
77241056
MM
945 struct hfi1_ibdev *dev = &dd->verbs_dev;
946 int was_empty;
947
14553ca1
MM
948 dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
949 dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN);
14553ca1 950 qp->s_flags |= flag;
77241056 951 was_empty = list_empty(&sc->piowait);
4c6829c5 952 list_add_tail(&priv->s_iowait.list, &sc->piowait);
54d10c1e 953 trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
77241056
MM
954 atomic_inc(&qp->refcount);
955 /* counting: only call wantpiobuf_intr if first user */
956 if (was_empty)
957 hfi1_sc_wantpiobuf_intr(sc, 1);
958 }
959 write_sequnlock(&dev->iowait_lock);
54d10c1e 960 qp->s_flags &= ~RVT_S_BUSY;
77241056
MM
961 ret = -EBUSY;
962 }
963 spin_unlock_irqrestore(&qp->s_lock, flags);
964 return ret;
965}
966
14553ca1
MM
967static void verbs_pio_complete(void *arg, int code)
968{
969 struct rvt_qp *qp = (struct rvt_qp *)arg;
970 struct hfi1_qp_priv *priv = qp->priv;
971
972 if (iowait_pio_dec(&priv->s_iowait))
973 iowait_drain_wakeup(&priv->s_iowait);
974}
975
895420dd 976int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
d46e5144 977 u64 pbc)
77241056 978{
4c6829c5 979 struct hfi1_qp_priv *priv = qp->priv;
d46e5144 980 u32 hdrwords = qp->s_hdrwords;
895420dd 981 struct rvt_sge_state *ss = qp->s_cur_sge;
d46e5144
DD
982 u32 len = qp->s_cur_size;
983 u32 dwords = (len + 3) >> 2;
984 u32 plen = hdrwords + dwords + 2; /* includes pbc */
985 struct hfi1_pportdata *ppd = ps->ppd;
bb5df5f9 986 u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr;
77241056 987 u64 pbc_flags = 0;
4f8cc5c0 988 u8 sc5;
77241056
MM
989 unsigned long flags = 0;
990 struct send_context *sc;
991 struct pio_buf *pbuf;
992 int wc_status = IB_WC_SUCCESS;
bb5df5f9 993 int ret = 0;
14553ca1
MM
994 pio_release_cb cb = NULL;
995
996 /* only RC/UC use complete */
997 switch (qp->ibqp.qp_type) {
998 case IB_QPT_RC:
999 case IB_QPT_UC:
1000 cb = verbs_pio_complete;
1001 break;
1002 default:
1003 break;
1004 }
77241056
MM
1005
1006 /* vl15 special case taken care of in ud.c */
4c6829c5 1007 sc5 = priv->s_sc;
cef504c5 1008 sc = ps->s_txreq->psc;
77241056 1009
77241056 1010 if (likely(pbc == 0)) {
4f8cc5c0 1011 u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
77241056
MM
1012 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
1013 pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
1014 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
1015 }
14553ca1
MM
1016 if (cb)
1017 iowait_pio_inc(&priv->s_iowait);
1018 pbuf = sc_buffer_alloc(sc, plen, cb, qp);
d125a6c6 1019 if (unlikely(!pbuf)) {
14553ca1
MM
1020 if (cb)
1021 verbs_pio_complete(qp, 0);
77241056
MM
1022 if (ppd->host_link_state != HLS_UP_ACTIVE) {
1023 /*
1024 * If we have filled the PIO buffers to capacity and are
1025 * not in an active state this request is not going to
1026 * go out to so just complete it with an error or else a
1027 * ULP or the core may be stuck waiting.
1028 */
1029 hfi1_cdbg(
1030 PIO,
1031 "alloc failed. state not active, completing");
1032 wc_status = IB_WC_GENERAL_ERR;
1033 goto pio_bail;
1034 } else {
1035 /*
1036 * This is a normal occurrence. The PIO buffs are full
1037 * up but we are still happily sending, well we could be
1038 * so lets continue to queue the request.
1039 */
1040 hfi1_cdbg(PIO, "alloc failed. state active, queuing");
14553ca1 1041 ret = pio_wait(qp, sc, ps, RVT_S_WAIT_PIO);
711e104d 1042 if (!ret)
14553ca1 1043 /* txreq not queued - free */
711e104d
MM
1044 goto bail;
1045 /* tx consumed in wait */
1046 return ret;
77241056
MM
1047 }
1048 }
1049
1050 if (len == 0) {
1051 pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords);
1052 } else {
1053 if (ss) {
8638b77f 1054 seg_pio_copy_start(pbuf, pbc, hdr, hdrwords * 4);
77241056
MM
1055 while (len) {
1056 void *addr = ss->sge.vaddr;
1057 u32 slen = ss->sge.length;
1058
1059 if (slen > len)
1060 slen = len;
1061 update_sge(ss, slen);
1062 seg_pio_copy_mid(pbuf, addr, slen);
1063 len -= slen;
1064 }
1065 seg_pio_copy_end(pbuf);
1066 }
1067 }
1068
1db78eee
MM
1069 trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
1070 &ps->s_txreq->phdr.hdr);
77241056 1071
77241056
MM
1072pio_bail:
1073 if (qp->s_wqe) {
1074 spin_lock_irqsave(&qp->s_lock, flags);
1075 hfi1_send_complete(qp, qp->s_wqe, wc_status);
1076 spin_unlock_irqrestore(&qp->s_lock, flags);
1077 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1078 spin_lock_irqsave(&qp->s_lock, flags);
bb5df5f9 1079 hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr);
77241056
MM
1080 spin_unlock_irqrestore(&qp->s_lock, flags);
1081 }
bb5df5f9
DD
1082
1083 ret = 0;
1084
1085bail:
1086 hfi1_put_txreq(ps->s_txreq);
1087 return ret;
77241056 1088}
b91cc573 1089
77241056
MM
1090/*
1091 * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
e38d1e4f 1092 * being an entry from the partition key table), return 0
77241056
MM
1093 * otherwise. Use the matching criteria for egress partition keys
1094 * specified in the OPAv1 spec., section 9.1l.7.
1095 */
1096static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
1097{
1098 u16 mkey = pkey & PKEY_LOW_15_MASK;
e38d1e4f 1099 u16 mentry = ent & PKEY_LOW_15_MASK;
77241056 1100
e38d1e4f 1101 if (mkey == mentry) {
77241056
MM
1102 /*
1103 * If pkey[15] is set (full partition member),
1104 * is bit 15 in the corresponding table element
1105 * clear (limited member)?
1106 */
1107 if (pkey & PKEY_MEMBER_MASK)
1108 return !!(ent & PKEY_MEMBER_MASK);
1109 return 1;
1110 }
1111 return 0;
1112}
1113
e38d1e4f
SS
1114/**
1115 * egress_pkey_check - check P_KEY of a packet
1116 * @ppd: Physical IB port data
1117 * @lrh: Local route header
1118 * @bth: Base transport header
1119 * @sc5: SC for packet
1120 * @s_pkey_index: It will be used for look up optimization for kernel contexts
1121 * only. If it is negative value, then it means user contexts is calling this
1122 * function.
1123 *
1124 * It checks if hdr's pkey is valid.
1125 *
1126 * Return: 0 on success, otherwise, 1
77241056 1127 */
e38d1e4f
SS
1128int egress_pkey_check(struct hfi1_pportdata *ppd, __be16 *lrh, __be32 *bth,
1129 u8 sc5, int8_t s_pkey_index)
77241056 1130{
77241056 1131 struct hfi1_devdata *dd;
e38d1e4f 1132 int i;
77241056 1133 u16 pkey;
e38d1e4f 1134 int is_user_ctxt_mechanism = (s_pkey_index < 0);
77241056
MM
1135
1136 if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT))
1137 return 0;
1138
e38d1e4f 1139 pkey = (u16)be32_to_cpu(bth[0]);
77241056
MM
1140
1141 /* If SC15, pkey[0:14] must be 0x7fff */
1142 if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
1143 goto bad;
1144
77241056
MM
1145 /* Is the pkey = 0x0, or 0x8000? */
1146 if ((pkey & PKEY_LOW_15_MASK) == 0)
1147 goto bad;
1148
e38d1e4f
SS
1149 /*
1150 * For the kernel contexts only, if a qp is passed into the function,
1151 * the most likely matching pkey has index qp->s_pkey_index
1152 */
1153 if (!is_user_ctxt_mechanism &&
1154 egress_pkey_matches_entry(pkey, ppd->pkeys[s_pkey_index])) {
1155 return 0;
77241056
MM
1156 }
1157
e38d1e4f
SS
1158 for (i = 0; i < MAX_PKEY_VALUES; i++) {
1159 if (egress_pkey_matches_entry(pkey, ppd->pkeys[i]))
1160 return 0;
1161 }
77241056 1162bad:
e38d1e4f
SS
1163 /*
1164 * For the user-context mechanism, the P_KEY check would only happen
1165 * once per SDMA request, not once per packet. Therefore, there's no
1166 * need to increment the counter for the user-context mechanism.
1167 */
1168 if (!is_user_ctxt_mechanism) {
1169 incr_cntr64(&ppd->port_xmit_constraint_errors);
1170 dd = ppd->dd;
1171 if (!(dd->err_info_xmit_constraint.status &
1172 OPA_EI_STATUS_SMASK)) {
1173 u16 slid = be16_to_cpu(lrh[3]);
1174
1175 dd->err_info_xmit_constraint.status |=
1176 OPA_EI_STATUS_SMASK;
1177 dd->err_info_xmit_constraint.slid = slid;
1178 dd->err_info_xmit_constraint.pkey = pkey;
1179 }
77241056
MM
1180 }
1181 return 1;
1182}
1183
14553ca1
MM
1184/**
1185 * get_send_routine - choose an egress routine
1186 *
1187 * Choose an egress routine based on QP type
1188 * and size
1189 */
1190static inline send_routine get_send_routine(struct rvt_qp *qp,
47177f1b 1191 struct verbs_txreq *tx)
14553ca1
MM
1192{
1193 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1194 struct hfi1_qp_priv *priv = qp->priv;
47177f1b 1195 struct hfi1_ib_header *h = &tx->phdr.hdr;
14553ca1
MM
1196
1197 if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA)))
1198 return dd->process_pio_send;
1199 switch (qp->ibqp.qp_type) {
1200 case IB_QPT_SMI:
1201 return dd->process_pio_send;
1202 case IB_QPT_GSI:
1203 case IB_QPT_UD:
14553ca1
MM
1204 break;
1205 case IB_QPT_RC:
1206 if (piothreshold &&
1207 qp->s_cur_size <= min(piothreshold, qp->pmtu) &&
1208 (BIT(get_opcode(h) & 0x1f) & rc_only_opcode) &&
47177f1b
MM
1209 iowait_sdma_pending(&priv->s_iowait) == 0 &&
1210 !sdma_txreq_built(&tx->txreq))
14553ca1
MM
1211 return dd->process_pio_send;
1212 break;
1213 case IB_QPT_UC:
1214 if (piothreshold &&
1215 qp->s_cur_size <= min(piothreshold, qp->pmtu) &&
1216 (BIT(get_opcode(h) & 0x1f) & uc_only_opcode) &&
47177f1b
MM
1217 iowait_sdma_pending(&priv->s_iowait) == 0 &&
1218 !sdma_txreq_built(&tx->txreq))
14553ca1
MM
1219 return dd->process_pio_send;
1220 break;
1221 default:
1222 break;
1223 }
1224 return dd->process_dma_send;
1225}
1226
77241056
MM
1227/**
1228 * hfi1_verbs_send - send a packet
1229 * @qp: the QP to send on
d46e5144 1230 * @ps: the state of the packet to send
77241056
MM
1231 *
1232 * Return zero if packet is sent or queued OK.
54d10c1e 1233 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
77241056 1234 */
895420dd 1235int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
77241056
MM
1236{
1237 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
47177f1b 1238 struct hfi1_qp_priv *priv = qp->priv;
e38d1e4f
SS
1239 struct hfi1_other_headers *ohdr;
1240 struct hfi1_ib_header *hdr;
14553ca1 1241 send_routine sr;
77241056 1242 int ret;
e38d1e4f
SS
1243 u8 lnh;
1244
1245 hdr = &ps->s_txreq->phdr.hdr;
1246 /* locate the pkey within the headers */
1247 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
1248 if (lnh == HFI1_LRH_GRH)
1249 ohdr = &hdr->u.l.oth;
1250 else
1251 ohdr = &hdr->u.oth;
77241056 1252
47177f1b 1253 sr = get_send_routine(qp, ps->s_txreq);
e38d1e4f
SS
1254 ret = egress_pkey_check(dd->pport,
1255 hdr->lrh,
1256 ohdr->bth,
1257 priv->s_sc,
1258 qp->s_pkey_index);
77241056
MM
1259 if (unlikely(ret)) {
1260 /*
1261 * The value we are returning here does not get propagated to
1262 * the verbs caller. Thus we need to complete the request with
1263 * error otherwise the caller could be sitting waiting on the
1264 * completion event. Only do this for PIO. SDMA has its own
1265 * mechanism for handling the errors. So for SDMA we can just
1266 * return.
1267 */
14553ca1
MM
1268 if (sr == dd->process_pio_send) {
1269 unsigned long flags;
1270
77241056
MM
1271 hfi1_cdbg(PIO, "%s() Failed. Completing with err",
1272 __func__);
1273 spin_lock_irqsave(&qp->s_lock, flags);
1274 hfi1_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
1275 spin_unlock_irqrestore(&qp->s_lock, flags);
1276 }
1277 return -EINVAL;
1278 }
47177f1b
MM
1279 if (sr == dd->process_dma_send && iowait_pio_pending(&priv->s_iowait))
1280 return pio_wait(qp,
1281 ps->s_txreq->psc,
1282 ps,
1283 RVT_S_WAIT_PIO_DRAIN);
14553ca1 1284 return sr(qp, ps, 0);
77241056
MM
1285}
1286
94d5171c
HC
1287/**
1288 * hfi1_fill_device_attr - Fill in rvt dev info device attributes.
1289 * @dd: the device data structure
1290 */
1291static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
77241056 1292{
94d5171c 1293 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
939b6ca8 1294 u16 ver = dd->dc8051_ver;
94d5171c
HC
1295
1296 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
1297
939b6ca8
IW
1298 rdi->dparms.props.fw_ver = ((u64)(dc8051_ver_maj(ver)) << 16) |
1299 (u64)dc8051_ver_min(ver);
94d5171c
HC
1300 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1301 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1302 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1303 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1304 rdi->dparms.props.page_size_cap = PAGE_SIZE;
1305 rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
1306 rdi->dparms.props.vendor_part_id = dd->pcidev->device;
1307 rdi->dparms.props.hw_ver = dd->minrev;
1308 rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid;
1309 rdi->dparms.props.max_mr_size = ~0ULL;
1310 rdi->dparms.props.max_qp = hfi1_max_qps;
1311 rdi->dparms.props.max_qp_wr = hfi1_max_qp_wrs;
1312 rdi->dparms.props.max_sge = hfi1_max_sges;
1313 rdi->dparms.props.max_sge_rd = hfi1_max_sges;
1314 rdi->dparms.props.max_cq = hfi1_max_cqs;
1315 rdi->dparms.props.max_ah = hfi1_max_ahs;
1316 rdi->dparms.props.max_cqe = hfi1_max_cqes;
1317 rdi->dparms.props.max_mr = rdi->lkey_table.max;
1318 rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1319 rdi->dparms.props.max_map_per_fmr = 32767;
1320 rdi->dparms.props.max_pd = hfi1_max_pds;
1321 rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
1322 rdi->dparms.props.max_qp_init_rd_atom = 255;
1323 rdi->dparms.props.max_srq = hfi1_max_srqs;
1324 rdi->dparms.props.max_srq_wr = hfi1_max_srq_wrs;
1325 rdi->dparms.props.max_srq_sge = hfi1_max_srq_sges;
1326 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
1327 rdi->dparms.props.max_pkeys = hfi1_get_npkeys(dd);
1328 rdi->dparms.props.max_mcast_grp = hfi1_max_mcast_grps;
1329 rdi->dparms.props.max_mcast_qp_attach = hfi1_max_mcast_qp_attached;
1330 rdi->dparms.props.max_total_mcast_qp_attach =
1331 rdi->dparms.props.max_mcast_qp_attach *
1332 rdi->dparms.props.max_mcast_grp;
77241056
MM
1333}
1334
1335static inline u16 opa_speed_to_ib(u16 in)
1336{
1337 u16 out = 0;
1338
1339 if (in & OPA_LINK_SPEED_25G)
1340 out |= IB_SPEED_EDR;
1341 if (in & OPA_LINK_SPEED_12_5G)
1342 out |= IB_SPEED_FDR;
1343
1344 return out;
1345}
1346
1347/*
1348 * Convert a single OPA link width (no multiple flags) to an IB value.
1349 * A zero OPA link width means link down, which means the IB width value
1350 * is a don't care.
1351 */
1352static inline u16 opa_width_to_ib(u16 in)
1353{
1354 switch (in) {
1355 case OPA_LINK_WIDTH_1X:
1356 /* map 2x and 3x to 1x as they don't exist in IB */
1357 case OPA_LINK_WIDTH_2X:
1358 case OPA_LINK_WIDTH_3X:
1359 return IB_WIDTH_1X;
1360 default: /* link down or unknown, return our largest width */
1361 case OPA_LINK_WIDTH_4X:
1362 return IB_WIDTH_4X;
1363 }
1364}
1365
45b59eef 1366static int query_port(struct rvt_dev_info *rdi, u8 port_num,
77241056
MM
1367 struct ib_port_attr *props)
1368{
45b59eef
HC
1369 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
1370 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
1371 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
77241056
MM
1372 u16 lid = ppd->lid;
1373
77241056
MM
1374 props->lid = lid ? lid : 0;
1375 props->lmc = ppd->lmc;
77241056
MM
1376 /* OPA logical states match IB logical states */
1377 props->state = driver_lstate(ppd);
1378 props->phys_state = hfi1_ibphys_portstate(ppd);
77241056 1379 props->gid_tbl_len = HFI1_GUIDS_PER_PORT;
77241056
MM
1380 props->active_width = (u8)opa_width_to_ib(ppd->link_width_active);
1381 /* see rate_show() in ib core/sysfs.c */
1382 props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active);
1383 props->max_vl_num = ppd->vls_supported;
77241056
MM
1384
1385 /* Once we are a "first class" citizen and have added the OPA MTUs to
1386 * the core we can advertise the larger MTU enum to the ULPs, for now
1387 * advertise only 4K.
1388 *
1389 * Those applications which are either OPA aware or pass the MTU enum
1390 * from the Path Records to us will get the new 8k MTU. Those that
1391 * attempt to process the MTU enum may fail in various ways.
1392 */
1393 props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ?
1394 4096 : hfi1_max_mtu), IB_MTU_4096);
1395 props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
1396 mtu_to_enum(ppd->ibmtu, IB_MTU_2048);
77241056
MM
1397
1398 return 0;
1399}
1400
1401static int modify_device(struct ib_device *device,
1402 int device_modify_mask,
1403 struct ib_device_modify *device_modify)
1404{
1405 struct hfi1_devdata *dd = dd_from_ibdev(device);
1406 unsigned i;
1407 int ret;
1408
1409 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1410 IB_DEVICE_MODIFY_NODE_DESC)) {
1411 ret = -EOPNOTSUPP;
1412 goto bail;
1413 }
1414
1415 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1416 memcpy(device->node_desc, device_modify->node_desc, 64);
1417 for (i = 0; i < dd->num_pports; i++) {
1418 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
1419
1420 hfi1_node_desc_chg(ibp);
1421 }
1422 }
1423
1424 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1425 ib_hfi1_sys_image_guid =
1426 cpu_to_be64(device_modify->sys_image_guid);
1427 for (i = 0; i < dd->num_pports; i++) {
1428 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
1429
1430 hfi1_sys_guid_chg(ibp);
1431 }
1432 }
1433
1434 ret = 0;
1435
1436bail:
1437 return ret;
1438}
1439
45b59eef 1440static int shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
77241056 1441{
45b59eef
HC
1442 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
1443 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
1444 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
1445 int ret;
77241056 1446
45b59eef
HC
1447 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
1448 OPA_LINKDOWN_REASON_UNKNOWN);
1449 ret = set_link_state(ppd, HLS_DN_DOWNDEF);
77241056
MM
1450 return ret;
1451}
1452
25131463
DD
1453static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
1454 int guid_index, __be64 *guid)
77241056 1455{
25131463
DD
1456 struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp);
1457 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
77241056 1458
25131463
DD
1459 if (guid_index == 0)
1460 *guid = cpu_to_be64(ppd->guid);
1461 else if (guid_index < HFI1_GUIDS_PER_PORT)
1462 *guid = ibp->guids[guid_index - 1];
1463 else
1464 return -EINVAL;
77241056 1465
25131463 1466 return 0;
77241056
MM
1467}
1468
77241056
MM
1469/*
1470 * convert ah port,sl to sc
1471 */
1472u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah)
1473{
1474 struct hfi1_ibport *ibp = to_iport(ibdev, ah->port_num);
1475
1476 return ibp->sl_to_sc[ah->sl];
1477}
1478
15723f06 1479static int hfi1_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
77241056
MM
1480{
1481 struct hfi1_ibport *ibp;
1482 struct hfi1_pportdata *ppd;
1483 struct hfi1_devdata *dd;
1484 u8 sc5;
1485
77241056
MM
1486 /* test the mapping for validity */
1487 ibp = to_iport(ibdev, ah_attr->port_num);
1488 ppd = ppd_from_ibp(ibp);
1489 sc5 = ibp->sl_to_sc[ah_attr->sl];
1490 dd = dd_from_ppd(ppd);
1491 if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
15723f06 1492 return -EINVAL;
77241056 1493 return 0;
77241056
MM
1494}
1495
8f1764fa
DD
1496static void hfi1_notify_new_ah(struct ib_device *ibdev,
1497 struct ib_ah_attr *ah_attr,
1498 struct rvt_ah *ah)
1499{
1500 struct hfi1_ibport *ibp;
1501 struct hfi1_pportdata *ppd;
1502 struct hfi1_devdata *dd;
1503 u8 sc5;
1504
1505 /*
1506 * Do not trust reading anything from rvt_ah at this point as it is not
1507 * done being setup. We can however modify things which we need to set.
1508 */
1509
1510 ibp = to_iport(ibdev, ah_attr->port_num);
1511 ppd = ppd_from_ibp(ibp);
1512 sc5 = ibp->sl_to_sc[ah->attr.sl];
1513 dd = dd_from_ppd(ppd);
1514 ah->vl = sc_to_vlt(dd, sc5);
1515 if (ah->vl < num_vls || ah->vl == 15)
1516 ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu);
1517}
1518
77241056
MM
1519struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid)
1520{
1521 struct ib_ah_attr attr;
1522 struct ib_ah *ah = ERR_PTR(-EINVAL);
895420dd 1523 struct rvt_qp *qp0;
77241056
MM
1524
1525 memset(&attr, 0, sizeof(attr));
1526 attr.dlid = dlid;
1527 attr.port_num = ppd_from_ibp(ibp)->port;
1528 rcu_read_lock();
4eb06882 1529 qp0 = rcu_dereference(ibp->rvp.qp[0]);
77241056
MM
1530 if (qp0)
1531 ah = ib_create_ah(qp0->ibqp.pd, &attr);
1532 rcu_read_unlock();
1533 return ah;
1534}
1535
77241056
MM
1536/**
1537 * hfi1_get_npkeys - return the size of the PKEY table for context 0
1538 * @dd: the hfi1_ib device
1539 */
1540unsigned hfi1_get_npkeys(struct hfi1_devdata *dd)
1541{
1542 return ARRAY_SIZE(dd->pport[0].pkeys);
1543}
1544
77241056
MM
1545static void init_ibport(struct hfi1_pportdata *ppd)
1546{
1547 struct hfi1_ibport *ibp = &ppd->ibport_data;
1548 size_t sz = ARRAY_SIZE(ibp->sl_to_sc);
1549 int i;
1550
1551 for (i = 0; i < sz; i++) {
1552 ibp->sl_to_sc[i] = i;
1553 ibp->sc_to_sl[i] = i;
1554 }
1555
4eb06882 1556 spin_lock_init(&ibp->rvp.lock);
77241056 1557 /* Set the prefix to the default value (see ch. 4.1.1) */
4eb06882
DD
1558 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1559 ibp->rvp.sm_lid = 0;
77241056 1560 /* Below should only set bits defined in OPA PortInfo.CapabilityMask */
4eb06882 1561 ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP |
77241056 1562 IB_PORT_CAP_MASK_NOTICE_SUP;
4eb06882
DD
1563 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1564 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1565 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1566 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1567 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1568
1569 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1570 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
77241056
MM
1571}
1572
939b6ca8
IW
1573static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str,
1574 size_t str_len)
1575{
1576 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
1577 struct hfi1_ibdev *dev = dev_from_rdi(rdi);
1578 u16 ver = dd_from_dev(dev)->dc8051_ver;
1579
1580 snprintf(str, str_len, "%u.%u", dc8051_ver_maj(ver),
1581 dc8051_ver_min(ver));
1582}
1583
77241056
MM
1584/**
1585 * hfi1_register_ib_device - register our device with the infiniband core
1586 * @dd: the device data structure
1587 * Return 0 if successful, errno if unsuccessful.
1588 */
1589int hfi1_register_ib_device(struct hfi1_devdata *dd)
1590{
1591 struct hfi1_ibdev *dev = &dd->verbs_dev;
ec3f2c12 1592 struct ib_device *ibdev = &dev->rdi.ibdev;
77241056 1593 struct hfi1_pportdata *ppd = dd->pport;
895420dd 1594 unsigned i;
77241056
MM
1595 int ret;
1596 size_t lcpysz = IB_DEVICE_NAME_MAX;
77241056 1597
77241056
MM
1598 for (i = 0; i < dd->num_pports; i++)
1599 init_ibport(ppd + i);
1600
1601 /* Only need to initialize non-zero fields. */
4f87ccfc 1602
045277cf 1603 setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
77241056 1604
77241056 1605 seqlock_init(&dev->iowait_lock);
77241056
MM
1606 INIT_LIST_HEAD(&dev->txwait);
1607 INIT_LIST_HEAD(&dev->memwait);
1608
45842abb
MM
1609 ret = verbs_txreq_init(dev);
1610 if (ret)
77241056 1611 goto err_verbs_txreq;
77241056
MM
1612
1613 /*
1614 * The system image GUID is supposed to be the same for all
1615 * HFIs in a single system but since there can be other
1616 * device types in the system, we can't be sure this is unique.
1617 */
1618 if (!ib_hfi1_sys_image_guid)
1619 ib_hfi1_sys_image_guid = cpu_to_be64(ppd->guid);
1620 lcpysz = strlcpy(ibdev->name, class_name(), lcpysz);
1621 strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz);
1622 ibdev->owner = THIS_MODULE;
1623 ibdev->node_guid = cpu_to_be64(ppd->guid);
77241056 1624 ibdev->phys_port_cnt = dd->num_pports;
77241056 1625 ibdev->dma_device = &dd->pcidev->dev;
77241056 1626 ibdev->modify_device = modify_device;
4331629f
DD
1627
1628 /* keep process mad in the driver */
77241056 1629 ibdev->process_mad = hfi1_process_mad;
939b6ca8 1630 ibdev->get_dev_fw_str = hfi1_get_dev_fw_str;
77241056
MM
1631
1632 strncpy(ibdev->node_desc, init_utsname()->nodename,
1633 sizeof(ibdev->node_desc));
1634
ec3f2c12
DD
1635 /*
1636 * Fill in rvt info object.
1637 */
1638 dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files;
49dbb6cf
DD
1639 dd->verbs_dev.rdi.driver_f.get_card_name = get_card_name;
1640 dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev;
15723f06 1641 dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah;
8f1764fa 1642 dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah;
25131463 1643 dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be;
45b59eef
HC
1644 dd->verbs_dev.rdi.driver_f.query_port_state = query_port;
1645 dd->verbs_dev.rdi.driver_f.shut_down_port = shut_down_port;
1646 dd->verbs_dev.rdi.driver_f.cap_mask_chg = hfi1_cap_mask_chg;
94d5171c
HC
1647 /*
1648 * Fill in rvt info device attributes.
1649 */
1650 hfi1_fill_device_attr(dd);
a2c2d608
DD
1651
1652 /* queue pair */
a2c2d608
DD
1653 dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size;
1654 dd->verbs_dev.rdi.dparms.qpn_start = 0;
1655 dd->verbs_dev.rdi.dparms.qpn_inc = 1;
1656 dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift;
1657 dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16;
1658 dd->verbs_dev.rdi.dparms.qpn_res_end =
abd712da 1659 dd->verbs_dev.rdi.dparms.qpn_res_start + 65535;
ec4274f1
DD
1660 dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC;
1661 dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK;
1662 dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT;
1663 dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK;
45b59eef
HC
1664 dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA;
1665 dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE;
1666
a2c2d608
DD
1667 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc;
1668 dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
1669 dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
1670 dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
83693bd1
DD
1671 dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send;
1672 dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
46a80d62 1673 dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
ec4274f1
DD
1674 dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
1675 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
1676 dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters;
1677 dd->verbs_dev.rdi.driver_f.stop_send_queue = stop_send_queue;
1678 dd->verbs_dev.rdi.driver_f.quiesce_qp = quiesce_qp;
1679 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
1680 dd->verbs_dev.rdi.driver_f.mtu_from_qp = mtu_from_qp;
1681 dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu;
1682 dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
1683 dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
46a80d62 1684 dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe;
a2c2d608 1685
abd712da
DD
1686 /* completeion queue */
1687 snprintf(dd->verbs_dev.rdi.dparms.cq_name,
1688 sizeof(dd->verbs_dev.rdi.dparms.cq_name),
1689 "hfi1_cq%d", dd->unit);
27807392 1690 dd->verbs_dev.rdi.dparms.node = dd->node;
abd712da 1691
a2c2d608 1692 /* misc settings */
abd712da 1693 dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */
895420dd 1694 dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size;
4eb06882
DD
1695 dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
1696 dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
1697
1698 ppd = dd->pport;
1699 for (i = 0; i < dd->num_pports; i++, ppd++)
1700 rvt_init_port(&dd->verbs_dev.rdi,
1701 &ppd->ibport_data.rvp,
1702 i,
1703 ppd->pkeys);
ec3f2c12
DD
1704
1705 ret = rvt_register_device(&dd->verbs_dev.rdi);
77241056 1706 if (ret)
9c4a311e 1707 goto err_verbs_txreq;
77241056
MM
1708
1709 ret = hfi1_verbs_register_sysfs(dd);
1710 if (ret)
1711 goto err_class;
1712
9c4a311e 1713 return ret;
77241056
MM
1714
1715err_class:
ec3f2c12 1716 rvt_unregister_device(&dd->verbs_dev.rdi);
77241056 1717err_verbs_txreq:
45842abb 1718 verbs_txreq_exit(dev);
77241056 1719 dd_dev_err(dd, "cannot register verbs: %d!\n", -ret);
77241056
MM
1720 return ret;
1721}
1722
1723void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
1724{
1725 struct hfi1_ibdev *dev = &dd->verbs_dev;
77241056
MM
1726
1727 hfi1_verbs_unregister_sysfs(dd);
1728
ec3f2c12 1729 rvt_unregister_device(&dd->verbs_dev.rdi);
77241056
MM
1730
1731 if (!list_empty(&dev->txwait))
1732 dd_dev_err(dd, "txwait list not empty!\n");
1733 if (!list_empty(&dev->memwait))
1734 dd_dev_err(dd, "memwait list not empty!\n");
77241056 1735
77241056 1736 del_timer_sync(&dev->mem_timer);
45842abb 1737 verbs_txreq_exit(dev);
77241056
MM
1738}
1739
77241056
MM
1740void hfi1_cnp_rcv(struct hfi1_packet *packet)
1741{
1742 struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
977940b8
AK
1743 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1744 struct hfi1_ib_header *hdr = packet->hdr;
895420dd 1745 struct rvt_qp *qp = packet->qp;
977940b8
AK
1746 u32 lqpn, rqpn = 0;
1747 u16 rlid = 0;
1748 u8 sl, sc5, sc4_bit, svc_type;
1749 bool sc4_set = has_sc4_bit(packet);
1750
1751 switch (packet->qp->ibqp.qp_type) {
1752 case IB_QPT_UC:
1753 rlid = qp->remote_ah_attr.dlid;
1754 rqpn = qp->remote_qpn;
1755 svc_type = IB_CC_SVCTYPE_UC;
1756 break;
1757 case IB_QPT_RC:
1758 rlid = qp->remote_ah_attr.dlid;
1759 rqpn = qp->remote_qpn;
1760 svc_type = IB_CC_SVCTYPE_RC;
1761 break;
1762 case IB_QPT_SMI:
1763 case IB_QPT_GSI:
1764 case IB_QPT_UD:
1765 svc_type = IB_CC_SVCTYPE_UD;
1766 break;
1767 default:
4eb06882 1768 ibp->rvp.n_pkt_drops++;
977940b8
AK
1769 return;
1770 }
1771
1772 sc4_bit = sc4_set << 4;
1773 sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
1774 sc5 |= sc4_bit;
1775 sl = ibp->sc_to_sl[sc5];
1776 lqpn = qp->ibqp.qp_num;
1777
1778 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
77241056 1779}
This page took 0.325167 seconds and 5 git commands to generate.