2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rdma/ib_mad.h>
49 #include <rdma/ib_user_verbs.h>
51 #include <linux/module.h>
52 #include <linux/utsname.h>
53 #include <linux/rculist.h>
55 #include <linux/vmalloc.h>
62 #include "verbs_txreq.h"
64 static unsigned int hfi1_lkey_table_size
= 16;
65 module_param_named(lkey_table_size
, hfi1_lkey_table_size
, uint
,
67 MODULE_PARM_DESC(lkey_table_size
,
68 "LKEY table size in bits (2^n, 1 <= n <= 23)");
70 static unsigned int hfi1_max_pds
= 0xFFFF;
71 module_param_named(max_pds
, hfi1_max_pds
, uint
, S_IRUGO
);
72 MODULE_PARM_DESC(max_pds
,
73 "Maximum number of protection domains to support");
75 static unsigned int hfi1_max_ahs
= 0xFFFF;
76 module_param_named(max_ahs
, hfi1_max_ahs
, uint
, S_IRUGO
);
77 MODULE_PARM_DESC(max_ahs
, "Maximum number of address handles to support");
79 unsigned int hfi1_max_cqes
= 0x2FFFF;
80 module_param_named(max_cqes
, hfi1_max_cqes
, uint
, S_IRUGO
);
81 MODULE_PARM_DESC(max_cqes
,
82 "Maximum number of completion queue entries to support");
84 unsigned int hfi1_max_cqs
= 0x1FFFF;
85 module_param_named(max_cqs
, hfi1_max_cqs
, uint
, S_IRUGO
);
86 MODULE_PARM_DESC(max_cqs
, "Maximum number of completion queues to support");
88 unsigned int hfi1_max_qp_wrs
= 0x3FFF;
89 module_param_named(max_qp_wrs
, hfi1_max_qp_wrs
, uint
, S_IRUGO
);
90 MODULE_PARM_DESC(max_qp_wrs
, "Maximum number of QP WRs to support");
92 unsigned int hfi1_max_qps
= 16384;
93 module_param_named(max_qps
, hfi1_max_qps
, uint
, S_IRUGO
);
94 MODULE_PARM_DESC(max_qps
, "Maximum number of QPs to support");
96 unsigned int hfi1_max_sges
= 0x60;
97 module_param_named(max_sges
, hfi1_max_sges
, uint
, S_IRUGO
);
98 MODULE_PARM_DESC(max_sges
, "Maximum number of SGEs to support");
100 unsigned int hfi1_max_mcast_grps
= 16384;
101 module_param_named(max_mcast_grps
, hfi1_max_mcast_grps
, uint
, S_IRUGO
);
102 MODULE_PARM_DESC(max_mcast_grps
,
103 "Maximum number of multicast groups to support");
105 unsigned int hfi1_max_mcast_qp_attached
= 16;
106 module_param_named(max_mcast_qp_attached
, hfi1_max_mcast_qp_attached
,
108 MODULE_PARM_DESC(max_mcast_qp_attached
,
109 "Maximum number of attached QPs to support");
111 unsigned int hfi1_max_srqs
= 1024;
112 module_param_named(max_srqs
, hfi1_max_srqs
, uint
, S_IRUGO
);
113 MODULE_PARM_DESC(max_srqs
, "Maximum number of SRQs to support");
115 unsigned int hfi1_max_srq_sges
= 128;
116 module_param_named(max_srq_sges
, hfi1_max_srq_sges
, uint
, S_IRUGO
);
117 MODULE_PARM_DESC(max_srq_sges
, "Maximum number of SRQ SGEs to support");
119 unsigned int hfi1_max_srq_wrs
= 0x1FFFF;
120 module_param_named(max_srq_wrs
, hfi1_max_srq_wrs
, uint
, S_IRUGO
);
121 MODULE_PARM_DESC(max_srq_wrs
, "Maximum number of SRQ WRs support");
123 unsigned short piothreshold
= 256;
124 module_param(piothreshold
, ushort
, S_IRUGO
);
125 MODULE_PARM_DESC(piothreshold
, "size used to determine sdma vs. pio");
127 #define COPY_CACHELESS 1
128 #define COPY_ADAPTIVE 2
129 static unsigned int sge_copy_mode
;
130 module_param(sge_copy_mode
, uint
, S_IRUGO
);
131 MODULE_PARM_DESC(sge_copy_mode
,
132 "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS");
134 static void verbs_sdma_complete(
135 struct sdma_txreq
*cookie
,
138 static int pio_wait(struct rvt_qp
*qp
,
139 struct send_context
*sc
,
140 struct hfi1_pkt_state
*ps
,
143 /* Length of buffer to create verbs txreq cache name */
144 #define TXREQ_NAME_LEN 24
146 static uint wss_threshold
;
147 module_param(wss_threshold
, uint
, S_IRUGO
);
148 MODULE_PARM_DESC(wss_threshold
, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
149 static uint wss_clean_period
= 256;
150 module_param(wss_clean_period
, uint
, S_IRUGO
);
151 MODULE_PARM_DESC(wss_clean_period
, "Count of verbs copies before an entry in the page copy table is cleaned");
153 /* memory working set size */
155 unsigned long *entries
;
156 atomic_t total_count
;
157 atomic_t clean_counter
;
158 atomic_t clean_entry
;
165 static struct hfi1_wss wss
;
167 int hfi1_wss_init(void)
174 /* check for a valid percent range - default to 80 if none or invalid */
175 if (wss_threshold
< 1 || wss_threshold
> 100)
177 /* reject a wildly large period */
178 if (wss_clean_period
> 1000000)
179 wss_clean_period
= 256;
180 /* reject a zero period */
181 if (wss_clean_period
== 0)
182 wss_clean_period
= 1;
185 * Calculate the table size - the next power of 2 larger than the
186 * LLC size. LLC size is in KiB.
188 llc_size
= wss_llc_size() * 1024;
189 table_size
= roundup_pow_of_two(llc_size
);
191 /* one bit per page in rounded up table */
192 llc_bits
= llc_size
/ PAGE_SIZE
;
193 table_bits
= table_size
/ PAGE_SIZE
;
194 wss
.pages_mask
= table_bits
- 1;
195 wss
.num_entries
= table_bits
/ BITS_PER_LONG
;
197 wss
.threshold
= (llc_bits
* wss_threshold
) / 100;
198 if (wss
.threshold
== 0)
201 atomic_set(&wss
.clean_counter
, wss_clean_period
);
203 wss
.entries
= kcalloc(wss
.num_entries
, sizeof(*wss
.entries
),
213 void hfi1_wss_exit(void)
215 /* coded to handle partially initialized and repeat callers */
221 * Advance the clean counter. When the clean period has expired,
224 * This is implemented in atomics to avoid locking. Because multiple
225 * variables are involved, it can be racy which can lead to slightly
226 * inaccurate information. Since this is only a heuristic, this is
227 * OK. Any innaccuracies will clean themselves out as the counter
228 * advances. That said, it is unlikely the entry clean operation will
229 * race - the next possible racer will not start until the next clean
232 * The clean counter is implemented as a decrement to zero. When zero
233 * is reached an entry is cleaned.
235 static void wss_advance_clean_counter(void)
241 /* become the cleaner if we decrement the counter to zero */
242 if (atomic_dec_and_test(&wss
.clean_counter
)) {
244 * Set, not add, the clean period. This avoids an issue
245 * where the counter could decrement below the clean period.
246 * Doing a set can result in lost decrements, slowing the
247 * clean advance. Since this a heuristic, this possible
250 * An alternative is to loop, advancing the counter by a
251 * clean period until the result is > 0. However, this could
252 * lead to several threads keeping another in the clean loop.
253 * This could be mitigated by limiting the number of times
254 * we stay in the loop.
256 atomic_set(&wss
.clean_counter
, wss_clean_period
);
259 * Uniquely grab the entry to clean and move to next.
260 * The current entry is always the lower bits of
261 * wss.clean_entry. The table size, wss.num_entries,
262 * is always a power-of-2.
264 entry
= (atomic_inc_return(&wss
.clean_entry
) - 1)
265 & (wss
.num_entries
- 1);
267 /* clear the entry and count the bits */
268 bits
= xchg(&wss
.entries
[entry
], 0);
269 weight
= hweight64((u64
)bits
);
270 /* only adjust the contended total count if needed */
272 atomic_sub(weight
, &wss
.total_count
);
277 * Insert the given address into the working set array.
279 static void wss_insert(void *address
)
281 u32 page
= ((unsigned long)address
>> PAGE_SHIFT
) & wss
.pages_mask
;
282 u32 entry
= page
/ BITS_PER_LONG
; /* assumes this ends up a shift */
283 u32 nr
= page
& (BITS_PER_LONG
- 1);
285 if (!test_and_set_bit(nr
, &wss
.entries
[entry
]))
286 atomic_inc(&wss
.total_count
);
288 wss_advance_clean_counter();
292 * Is the working set larger than the threshold?
294 static inline int wss_exceeds_threshold(void)
296 return atomic_read(&wss
.total_count
) >= wss
.threshold
;
300 * Translate ib_wr_opcode into ib_wc_opcode.
302 const enum ib_wc_opcode ib_hfi1_wc_opcode
[] = {
303 [IB_WR_RDMA_WRITE
] = IB_WC_RDMA_WRITE
,
304 [IB_WR_RDMA_WRITE_WITH_IMM
] = IB_WC_RDMA_WRITE
,
305 [IB_WR_SEND
] = IB_WC_SEND
,
306 [IB_WR_SEND_WITH_IMM
] = IB_WC_SEND
,
307 [IB_WR_RDMA_READ
] = IB_WC_RDMA_READ
,
308 [IB_WR_ATOMIC_CMP_AND_SWP
] = IB_WC_COMP_SWAP
,
309 [IB_WR_ATOMIC_FETCH_AND_ADD
] = IB_WC_FETCH_ADD
313 * Length of header by opcode, 0 --> not supported
315 const u8 hdr_len_by_opcode
[256] = {
317 [IB_OPCODE_RC_SEND_FIRST
] = 12 + 8,
318 [IB_OPCODE_RC_SEND_MIDDLE
] = 12 + 8,
319 [IB_OPCODE_RC_SEND_LAST
] = 12 + 8,
320 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE
] = 12 + 8 + 4,
321 [IB_OPCODE_RC_SEND_ONLY
] = 12 + 8,
322 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 4,
323 [IB_OPCODE_RC_RDMA_WRITE_FIRST
] = 12 + 8 + 16,
324 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE
] = 12 + 8,
325 [IB_OPCODE_RC_RDMA_WRITE_LAST
] = 12 + 8,
326 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE
] = 12 + 8 + 4,
327 [IB_OPCODE_RC_RDMA_WRITE_ONLY
] = 12 + 8 + 16,
328 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 20,
329 [IB_OPCODE_RC_RDMA_READ_REQUEST
] = 12 + 8 + 16,
330 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST
] = 12 + 8 + 4,
331 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
] = 12 + 8,
332 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST
] = 12 + 8 + 4,
333 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY
] = 12 + 8 + 4,
334 [IB_OPCODE_RC_ACKNOWLEDGE
] = 12 + 8 + 4,
335 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE
] = 12 + 8 + 4,
336 [IB_OPCODE_RC_COMPARE_SWAP
] = 12 + 8 + 28,
337 [IB_OPCODE_RC_FETCH_ADD
] = 12 + 8 + 28,
338 [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE
] = 12 + 8 + 4,
339 [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE
] = 12 + 8 + 4,
341 [IB_OPCODE_UC_SEND_FIRST
] = 12 + 8,
342 [IB_OPCODE_UC_SEND_MIDDLE
] = 12 + 8,
343 [IB_OPCODE_UC_SEND_LAST
] = 12 + 8,
344 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE
] = 12 + 8 + 4,
345 [IB_OPCODE_UC_SEND_ONLY
] = 12 + 8,
346 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 4,
347 [IB_OPCODE_UC_RDMA_WRITE_FIRST
] = 12 + 8 + 16,
348 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE
] = 12 + 8,
349 [IB_OPCODE_UC_RDMA_WRITE_LAST
] = 12 + 8,
350 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE
] = 12 + 8 + 4,
351 [IB_OPCODE_UC_RDMA_WRITE_ONLY
] = 12 + 8 + 16,
352 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 20,
354 [IB_OPCODE_UD_SEND_ONLY
] = 12 + 8 + 8,
355 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 12
358 static const opcode_handler opcode_handler_tbl
[256] = {
360 [IB_OPCODE_RC_SEND_FIRST
] = &hfi1_rc_rcv
,
361 [IB_OPCODE_RC_SEND_MIDDLE
] = &hfi1_rc_rcv
,
362 [IB_OPCODE_RC_SEND_LAST
] = &hfi1_rc_rcv
,
363 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE
] = &hfi1_rc_rcv
,
364 [IB_OPCODE_RC_SEND_ONLY
] = &hfi1_rc_rcv
,
365 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE
] = &hfi1_rc_rcv
,
366 [IB_OPCODE_RC_RDMA_WRITE_FIRST
] = &hfi1_rc_rcv
,
367 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE
] = &hfi1_rc_rcv
,
368 [IB_OPCODE_RC_RDMA_WRITE_LAST
] = &hfi1_rc_rcv
,
369 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE
] = &hfi1_rc_rcv
,
370 [IB_OPCODE_RC_RDMA_WRITE_ONLY
] = &hfi1_rc_rcv
,
371 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
] = &hfi1_rc_rcv
,
372 [IB_OPCODE_RC_RDMA_READ_REQUEST
] = &hfi1_rc_rcv
,
373 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST
] = &hfi1_rc_rcv
,
374 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
] = &hfi1_rc_rcv
,
375 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST
] = &hfi1_rc_rcv
,
376 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY
] = &hfi1_rc_rcv
,
377 [IB_OPCODE_RC_ACKNOWLEDGE
] = &hfi1_rc_rcv
,
378 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE
] = &hfi1_rc_rcv
,
379 [IB_OPCODE_RC_COMPARE_SWAP
] = &hfi1_rc_rcv
,
380 [IB_OPCODE_RC_FETCH_ADD
] = &hfi1_rc_rcv
,
382 [IB_OPCODE_UC_SEND_FIRST
] = &hfi1_uc_rcv
,
383 [IB_OPCODE_UC_SEND_MIDDLE
] = &hfi1_uc_rcv
,
384 [IB_OPCODE_UC_SEND_LAST
] = &hfi1_uc_rcv
,
385 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE
] = &hfi1_uc_rcv
,
386 [IB_OPCODE_UC_SEND_ONLY
] = &hfi1_uc_rcv
,
387 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE
] = &hfi1_uc_rcv
,
388 [IB_OPCODE_UC_RDMA_WRITE_FIRST
] = &hfi1_uc_rcv
,
389 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE
] = &hfi1_uc_rcv
,
390 [IB_OPCODE_UC_RDMA_WRITE_LAST
] = &hfi1_uc_rcv
,
391 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE
] = &hfi1_uc_rcv
,
392 [IB_OPCODE_UC_RDMA_WRITE_ONLY
] = &hfi1_uc_rcv
,
393 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
] = &hfi1_uc_rcv
,
395 [IB_OPCODE_UD_SEND_ONLY
] = &hfi1_ud_rcv
,
396 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
] = &hfi1_ud_rcv
,
398 [IB_OPCODE_CNP
] = &hfi1_cnp_rcv
404 __be64 ib_hfi1_sys_image_guid
;
407 * hfi1_copy_sge - copy data to SGE memory
409 * @data: the data to copy
410 * @length: the length of the data
411 * @copy_last: do a separate copy of the last 8 bytes
414 struct rvt_sge_state
*ss
,
415 void *data
, u32 length
,
419 struct rvt_sge
*sge
= &ss
->sge
;
422 int cacheless_copy
= 0;
424 if (sge_copy_mode
== COPY_CACHELESS
) {
425 cacheless_copy
= length
>= PAGE_SIZE
;
426 } else if (sge_copy_mode
== COPY_ADAPTIVE
) {
427 if (length
>= PAGE_SIZE
) {
429 * NOTE: this *assumes*:
430 * o The first vaddr is the dest.
431 * o If multiple pages, then vaddr is sequential.
433 wss_insert(sge
->vaddr
);
434 if (length
>= (2 * PAGE_SIZE
))
435 wss_insert(sge
->vaddr
+ PAGE_SIZE
);
437 cacheless_copy
= wss_exceeds_threshold();
439 wss_advance_clean_counter();
453 u32 len
= sge
->length
;
457 if (len
> sge
->sge_length
)
458 len
= sge
->sge_length
;
459 WARN_ON_ONCE(len
== 0);
460 if (unlikely(in_last
)) {
461 /* enforce byte transfer ordering */
462 for (i
= 0; i
< len
; i
++)
463 ((u8
*)sge
->vaddr
)[i
] = ((u8
*)data
)[i
];
464 } else if (cacheless_copy
) {
465 cacheless_memcpy(sge
->vaddr
, data
, len
);
467 memcpy(sge
->vaddr
, data
, len
);
471 sge
->sge_length
-= len
;
472 if (sge
->sge_length
== 0) {
476 *sge
= *ss
->sg_list
++;
477 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
478 if (++sge
->n
>= RVT_SEGSZ
) {
479 if (++sge
->m
>= sge
->mr
->mapsz
)
484 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
486 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
501 * hfi1_skip_sge - skip over SGE memory
503 * @length: the number of bytes to skip
505 void hfi1_skip_sge(struct rvt_sge_state
*ss
, u32 length
, int release
)
507 struct rvt_sge
*sge
= &ss
->sge
;
510 u32 len
= sge
->length
;
514 if (len
> sge
->sge_length
)
515 len
= sge
->sge_length
;
516 WARN_ON_ONCE(len
== 0);
519 sge
->sge_length
-= len
;
520 if (sge
->sge_length
== 0) {
524 *sge
= *ss
->sg_list
++;
525 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
526 if (++sge
->n
>= RVT_SEGSZ
) {
527 if (++sge
->m
>= sge
->mr
->mapsz
)
532 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
534 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
541 * Make sure the QP is ready and able to accept the given opcode.
543 static inline int qp_ok(int opcode
, struct hfi1_packet
*packet
)
545 struct hfi1_ibport
*ibp
;
547 if (!(ib_rvt_state_ops
[packet
->qp
->state
] & RVT_PROCESS_RECV_OK
))
549 if (((opcode
& RVT_OPCODE_QP_MASK
) == packet
->qp
->allowed_ops
) ||
550 (opcode
== IB_OPCODE_CNP
))
553 ibp
= &packet
->rcd
->ppd
->ibport_data
;
554 ibp
->rvp
.n_pkt_drops
++;
559 * hfi1_ib_rcv - process an incoming packet
560 * @packet: data packet information
562 * This is called to process an incoming packet at interrupt level.
564 * Tlen is the length of the header + data + CRC in bytes.
566 void hfi1_ib_rcv(struct hfi1_packet
*packet
)
568 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
569 struct hfi1_ib_header
*hdr
= packet
->hdr
;
570 u32 tlen
= packet
->tlen
;
571 struct hfi1_pportdata
*ppd
= rcd
->ppd
;
572 struct hfi1_ibport
*ibp
= &ppd
->ibport_data
;
573 struct rvt_dev_info
*rdi
= &ppd
->dd
->verbs_dev
.rdi
;
581 lnh
= be16_to_cpu(hdr
->lrh
[0]) & 3;
582 if (lnh
== HFI1_LRH_BTH
) {
583 packet
->ohdr
= &hdr
->u
.oth
;
584 } else if (lnh
== HFI1_LRH_GRH
) {
587 packet
->ohdr
= &hdr
->u
.l
.oth
;
588 if (hdr
->u
.l
.grh
.next_hdr
!= IB_GRH_NEXT_HDR
)
590 vtf
= be32_to_cpu(hdr
->u
.l
.grh
.version_tclass_flow
);
591 if ((vtf
>> IB_GRH_VERSION_SHIFT
) != IB_GRH_VERSION
)
593 packet
->rcv_flags
|= HFI1_HAS_GRH
;
598 trace_input_ibhdr(rcd
->dd
, hdr
);
600 opcode
= (be32_to_cpu(packet
->ohdr
->bth
[0]) >> 24);
601 inc_opstats(tlen
, &rcd
->opstats
->stats
[opcode
]);
603 /* Get the destination QP number. */
604 qp_num
= be32_to_cpu(packet
->ohdr
->bth
[1]) & RVT_QPN_MASK
;
605 lid
= be16_to_cpu(hdr
->lrh
[1]);
606 if (unlikely((lid
>= be16_to_cpu(IB_MULTICAST_LID_BASE
)) &&
607 (lid
!= be16_to_cpu(IB_LID_PERMISSIVE
)))) {
608 struct rvt_mcast
*mcast
;
609 struct rvt_mcast_qp
*p
;
611 if (lnh
!= HFI1_LRH_GRH
)
613 mcast
= rvt_mcast_find(&ibp
->rvp
, &hdr
->u
.l
.grh
.dgid
);
616 list_for_each_entry_rcu(p
, &mcast
->qp_list
, list
) {
618 spin_lock_irqsave(&packet
->qp
->r_lock
, flags
);
619 if (likely((qp_ok(opcode
, packet
))))
620 opcode_handler_tbl
[opcode
](packet
);
621 spin_unlock_irqrestore(&packet
->qp
->r_lock
, flags
);
624 * Notify rvt_multicast_detach() if it is waiting for us
627 if (atomic_dec_return(&mcast
->refcount
) <= 1)
628 wake_up(&mcast
->wait
);
631 packet
->qp
= rvt_lookup_qpn(rdi
, &ibp
->rvp
, qp_num
);
636 spin_lock_irqsave(&packet
->qp
->r_lock
, flags
);
637 if (likely((qp_ok(opcode
, packet
))))
638 opcode_handler_tbl
[opcode
](packet
);
639 spin_unlock_irqrestore(&packet
->qp
->r_lock
, flags
);
645 ibp
->rvp
.n_pkt_drops
++;
649 * This is called from a timer to check for QPs
650 * which need kernel memory in order to send a packet.
652 static void mem_timer(unsigned long data
)
654 struct hfi1_ibdev
*dev
= (struct hfi1_ibdev
*)data
;
655 struct list_head
*list
= &dev
->memwait
;
656 struct rvt_qp
*qp
= NULL
;
659 struct hfi1_qp_priv
*priv
;
661 write_seqlock_irqsave(&dev
->iowait_lock
, flags
);
662 if (!list_empty(list
)) {
663 wait
= list_first_entry(list
, struct iowait
, list
);
664 qp
= iowait_to_qp(wait
);
666 list_del_init(&priv
->s_iowait
.list
);
667 /* refcount held until actual wake up */
668 if (!list_empty(list
))
669 mod_timer(&dev
->mem_timer
, jiffies
+ 1);
671 write_sequnlock_irqrestore(&dev
->iowait_lock
, flags
);
674 hfi1_qp_wakeup(qp
, RVT_S_WAIT_KMEM
);
677 void update_sge(struct rvt_sge_state
*ss
, u32 length
)
679 struct rvt_sge
*sge
= &ss
->sge
;
681 sge
->vaddr
+= length
;
682 sge
->length
-= length
;
683 sge
->sge_length
-= length
;
684 if (sge
->sge_length
== 0) {
686 *sge
= *ss
->sg_list
++;
687 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
688 if (++sge
->n
>= RVT_SEGSZ
) {
689 if (++sge
->m
>= sge
->mr
->mapsz
)
693 sge
->vaddr
= sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
694 sge
->length
= sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
699 * This is called with progress side lock held.
702 static void verbs_sdma_complete(
703 struct sdma_txreq
*cookie
,
706 struct verbs_txreq
*tx
=
707 container_of(cookie
, struct verbs_txreq
, txreq
);
708 struct rvt_qp
*qp
= tx
->qp
;
710 spin_lock(&qp
->s_lock
);
712 hfi1_send_complete(qp
, tx
->wqe
, IB_WC_SUCCESS
);
713 } else if (qp
->ibqp
.qp_type
== IB_QPT_RC
) {
714 struct hfi1_ib_header
*hdr
;
717 hfi1_rc_send_complete(qp
, hdr
);
719 spin_unlock(&qp
->s_lock
);
724 static int wait_kmem(struct hfi1_ibdev
*dev
,
726 struct hfi1_pkt_state
*ps
)
728 struct hfi1_qp_priv
*priv
= qp
->priv
;
732 spin_lock_irqsave(&qp
->s_lock
, flags
);
733 if (ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
) {
734 write_seqlock(&dev
->iowait_lock
);
735 list_add_tail(&ps
->s_txreq
->txreq
.list
,
736 &priv
->s_iowait
.tx_head
);
737 if (list_empty(&priv
->s_iowait
.list
)) {
738 if (list_empty(&dev
->memwait
))
739 mod_timer(&dev
->mem_timer
, jiffies
+ 1);
740 qp
->s_flags
|= RVT_S_WAIT_KMEM
;
741 list_add_tail(&priv
->s_iowait
.list
, &dev
->memwait
);
742 trace_hfi1_qpsleep(qp
, RVT_S_WAIT_KMEM
);
743 atomic_inc(&qp
->refcount
);
745 write_sequnlock(&dev
->iowait_lock
);
746 qp
->s_flags
&= ~RVT_S_BUSY
;
749 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
755 * This routine calls txadds for each sg entry.
757 * Add failures will revert the sge cursor
759 static noinline
int build_verbs_ulp_payload(
760 struct sdma_engine
*sde
,
761 struct rvt_sge_state
*ss
,
763 struct verbs_txreq
*tx
)
765 struct rvt_sge
*sg_list
= ss
->sg_list
;
766 struct rvt_sge sge
= ss
->sge
;
767 u8 num_sge
= ss
->num_sge
;
772 len
= ss
->sge
.length
;
775 if (len
> ss
->sge
.sge_length
)
776 len
= ss
->sge
.sge_length
;
777 WARN_ON_ONCE(len
== 0);
778 ret
= sdma_txadd_kvaddr(
792 ss
->num_sge
= num_sge
;
793 ss
->sg_list
= sg_list
;
798 * Build the number of DMA descriptors needed to send length bytes of data.
800 * NOTE: DMA mapping is held in the tx until completed in the ring or
801 * the tx desc is freed without having been submitted to the ring
803 * This routine ensures all the helper routine calls succeed.
806 static int build_verbs_tx_desc(
807 struct sdma_engine
*sde
,
808 struct rvt_sge_state
*ss
,
810 struct verbs_txreq
*tx
,
811 struct ahg_ib_header
*ahdr
,
815 struct hfi1_pio_header
*phdr
= &tx
->phdr
;
816 u16 hdrbytes
= tx
->hdr_dwords
<< 2;
818 if (!ahdr
->ahgcount
) {
819 ret
= sdma_txinit_ahg(
827 verbs_sdma_complete
);
830 phdr
->pbc
= cpu_to_le64(pbc
);
831 ret
= sdma_txadd_kvaddr(
839 ret
= sdma_txinit_ahg(
847 verbs_sdma_complete
);
852 /* add the ulp payload - if any. ss can be NULL for acks */
854 ret
= build_verbs_ulp_payload(sde
, ss
, length
, tx
);
859 int hfi1_verbs_send_dma(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
,
862 struct hfi1_qp_priv
*priv
= qp
->priv
;
863 struct ahg_ib_header
*ahdr
= priv
->s_hdr
;
864 u32 hdrwords
= qp
->s_hdrwords
;
865 struct rvt_sge_state
*ss
= qp
->s_cur_sge
;
866 u32 len
= qp
->s_cur_size
;
867 u32 plen
= hdrwords
+ ((len
+ 3) >> 2) + 2; /* includes pbc */
868 struct hfi1_ibdev
*dev
= ps
->dev
;
869 struct hfi1_pportdata
*ppd
= ps
->ppd
;
870 struct verbs_txreq
*tx
;
877 if (!sdma_txreq_built(&tx
->txreq
)) {
878 if (likely(pbc
== 0)) {
879 u32 vl
= sc_to_vlt(dd_from_ibdev(qp
->ibqp
.device
), sc5
);
881 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
882 pbc_flags
|= (!!(sc5
& 0x10)) << PBC_DC_INFO_SHIFT
;
884 pbc
= create_pbc(ppd
,
891 ret
= build_verbs_tx_desc(tx
->sde
, ss
, len
, tx
, ahdr
, pbc
);
895 ret
= sdma_send_txreq(tx
->sde
, &priv
->s_iowait
, &tx
->txreq
);
896 if (unlikely(ret
< 0)) {
901 trace_sdma_output_ibhdr(dd_from_ibdev(qp
->ibqp
.device
),
902 &ps
->s_txreq
->phdr
.hdr
);
906 /* The current one got "sent" */
909 ret
= wait_kmem(dev
, qp
, ps
);
911 /* free txreq - bad state */
912 hfi1_put_txreq(ps
->s_txreq
);
919 * If we are now in the error state, return zero to flush the
922 static int pio_wait(struct rvt_qp
*qp
,
923 struct send_context
*sc
,
924 struct hfi1_pkt_state
*ps
,
927 struct hfi1_qp_priv
*priv
= qp
->priv
;
928 struct hfi1_devdata
*dd
= sc
->dd
;
929 struct hfi1_ibdev
*dev
= &dd
->verbs_dev
;
934 * Note that as soon as want_buffer() is called and
935 * possibly before it returns, sc_piobufavail()
936 * could be called. Therefore, put QP on the I/O wait list before
937 * enabling the PIO avail interrupt.
939 spin_lock_irqsave(&qp
->s_lock
, flags
);
940 if (ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
) {
941 write_seqlock(&dev
->iowait_lock
);
942 list_add_tail(&ps
->s_txreq
->txreq
.list
,
943 &priv
->s_iowait
.tx_head
);
944 if (list_empty(&priv
->s_iowait
.list
)) {
945 struct hfi1_ibdev
*dev
= &dd
->verbs_dev
;
948 dev
->n_piowait
+= !!(flag
& RVT_S_WAIT_PIO
);
949 dev
->n_piodrain
+= !!(flag
& RVT_S_WAIT_PIO_DRAIN
);
951 was_empty
= list_empty(&sc
->piowait
);
952 list_add_tail(&priv
->s_iowait
.list
, &sc
->piowait
);
953 trace_hfi1_qpsleep(qp
, RVT_S_WAIT_PIO
);
954 atomic_inc(&qp
->refcount
);
955 /* counting: only call wantpiobuf_intr if first user */
957 hfi1_sc_wantpiobuf_intr(sc
, 1);
959 write_sequnlock(&dev
->iowait_lock
);
960 qp
->s_flags
&= ~RVT_S_BUSY
;
963 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
967 static void verbs_pio_complete(void *arg
, int code
)
969 struct rvt_qp
*qp
= (struct rvt_qp
*)arg
;
970 struct hfi1_qp_priv
*priv
= qp
->priv
;
972 if (iowait_pio_dec(&priv
->s_iowait
))
973 iowait_drain_wakeup(&priv
->s_iowait
);
976 int hfi1_verbs_send_pio(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
,
979 struct hfi1_qp_priv
*priv
= qp
->priv
;
980 u32 hdrwords
= qp
->s_hdrwords
;
981 struct rvt_sge_state
*ss
= qp
->s_cur_sge
;
982 u32 len
= qp
->s_cur_size
;
983 u32 dwords
= (len
+ 3) >> 2;
984 u32 plen
= hdrwords
+ dwords
+ 2; /* includes pbc */
985 struct hfi1_pportdata
*ppd
= ps
->ppd
;
986 u32
*hdr
= (u32
*)&ps
->s_txreq
->phdr
.hdr
;
989 unsigned long flags
= 0;
990 struct send_context
*sc
;
991 struct pio_buf
*pbuf
;
992 int wc_status
= IB_WC_SUCCESS
;
994 pio_release_cb cb
= NULL
;
996 /* only RC/UC use complete */
997 switch (qp
->ibqp
.qp_type
) {
1000 cb
= verbs_pio_complete
;
1006 /* vl15 special case taken care of in ud.c */
1008 sc
= ps
->s_txreq
->psc
;
1010 if (likely(pbc
== 0)) {
1011 u8 vl
= sc_to_vlt(dd_from_ibdev(qp
->ibqp
.device
), sc5
);
1012 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
1013 pbc_flags
|= (!!(sc5
& 0x10)) << PBC_DC_INFO_SHIFT
;
1014 pbc
= create_pbc(ppd
, pbc_flags
, qp
->srate_mbps
, vl
, plen
);
1017 iowait_pio_inc(&priv
->s_iowait
);
1018 pbuf
= sc_buffer_alloc(sc
, plen
, cb
, qp
);
1019 if (unlikely(!pbuf
)) {
1021 verbs_pio_complete(qp
, 0);
1022 if (ppd
->host_link_state
!= HLS_UP_ACTIVE
) {
1024 * If we have filled the PIO buffers to capacity and are
1025 * not in an active state this request is not going to
1026 * go out to so just complete it with an error or else a
1027 * ULP or the core may be stuck waiting.
1031 "alloc failed. state not active, completing");
1032 wc_status
= IB_WC_GENERAL_ERR
;
1036 * This is a normal occurrence. The PIO buffs are full
1037 * up but we are still happily sending, well we could be
1038 * so lets continue to queue the request.
1040 hfi1_cdbg(PIO
, "alloc failed. state active, queuing");
1041 ret
= pio_wait(qp
, sc
, ps
, RVT_S_WAIT_PIO
);
1043 /* txreq not queued - free */
1045 /* tx consumed in wait */
1051 pio_copy(ppd
->dd
, pbuf
, pbc
, hdr
, hdrwords
);
1054 seg_pio_copy_start(pbuf
, pbc
, hdr
, hdrwords
* 4);
1056 void *addr
= ss
->sge
.vaddr
;
1057 u32 slen
= ss
->sge
.length
;
1061 update_sge(ss
, slen
);
1062 seg_pio_copy_mid(pbuf
, addr
, slen
);
1065 seg_pio_copy_end(pbuf
);
1069 trace_pio_output_ibhdr(dd_from_ibdev(qp
->ibqp
.device
),
1070 &ps
->s_txreq
->phdr
.hdr
);
1074 spin_lock_irqsave(&qp
->s_lock
, flags
);
1075 hfi1_send_complete(qp
, qp
->s_wqe
, wc_status
);
1076 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1077 } else if (qp
->ibqp
.qp_type
== IB_QPT_RC
) {
1078 spin_lock_irqsave(&qp
->s_lock
, flags
);
1079 hfi1_rc_send_complete(qp
, &ps
->s_txreq
->phdr
.hdr
);
1080 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1086 hfi1_put_txreq(ps
->s_txreq
);
1091 * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
1092 * being an entry from the partition key table), return 0
1093 * otherwise. Use the matching criteria for egress partition keys
1094 * specified in the OPAv1 spec., section 9.1l.7.
1096 static inline int egress_pkey_matches_entry(u16 pkey
, u16 ent
)
1098 u16 mkey
= pkey
& PKEY_LOW_15_MASK
;
1099 u16 mentry
= ent
& PKEY_LOW_15_MASK
;
1101 if (mkey
== mentry
) {
1103 * If pkey[15] is set (full partition member),
1104 * is bit 15 in the corresponding table element
1105 * clear (limited member)?
1107 if (pkey
& PKEY_MEMBER_MASK
)
1108 return !!(ent
& PKEY_MEMBER_MASK
);
1115 * egress_pkey_check - check P_KEY of a packet
1116 * @ppd: Physical IB port data
1117 * @lrh: Local route header
1118 * @bth: Base transport header
1119 * @sc5: SC for packet
1120 * @s_pkey_index: It will be used for look up optimization for kernel contexts
1121 * only. If it is negative value, then it means user contexts is calling this
1124 * It checks if hdr's pkey is valid.
1126 * Return: 0 on success, otherwise, 1
1128 int egress_pkey_check(struct hfi1_pportdata
*ppd
, __be16
*lrh
, __be32
*bth
,
1129 u8 sc5
, int8_t s_pkey_index
)
1131 struct hfi1_devdata
*dd
;
1134 int is_user_ctxt_mechanism
= (s_pkey_index
< 0);
1136 if (!(ppd
->part_enforce
& HFI1_PART_ENFORCE_OUT
))
1139 pkey
= (u16
)be32_to_cpu(bth
[0]);
1141 /* If SC15, pkey[0:14] must be 0x7fff */
1142 if ((sc5
== 0xf) && ((pkey
& PKEY_LOW_15_MASK
) != PKEY_LOW_15_MASK
))
1145 /* Is the pkey = 0x0, or 0x8000? */
1146 if ((pkey
& PKEY_LOW_15_MASK
) == 0)
1150 * For the kernel contexts only, if a qp is passed into the function,
1151 * the most likely matching pkey has index qp->s_pkey_index
1153 if (!is_user_ctxt_mechanism
&&
1154 egress_pkey_matches_entry(pkey
, ppd
->pkeys
[s_pkey_index
])) {
1158 for (i
= 0; i
< MAX_PKEY_VALUES
; i
++) {
1159 if (egress_pkey_matches_entry(pkey
, ppd
->pkeys
[i
]))
1164 * For the user-context mechanism, the P_KEY check would only happen
1165 * once per SDMA request, not once per packet. Therefore, there's no
1166 * need to increment the counter for the user-context mechanism.
1168 if (!is_user_ctxt_mechanism
) {
1169 incr_cntr64(&ppd
->port_xmit_constraint_errors
);
1171 if (!(dd
->err_info_xmit_constraint
.status
&
1172 OPA_EI_STATUS_SMASK
)) {
1173 u16 slid
= be16_to_cpu(lrh
[3]);
1175 dd
->err_info_xmit_constraint
.status
|=
1176 OPA_EI_STATUS_SMASK
;
1177 dd
->err_info_xmit_constraint
.slid
= slid
;
1178 dd
->err_info_xmit_constraint
.pkey
= pkey
;
1185 * get_send_routine - choose an egress routine
1187 * Choose an egress routine based on QP type
1190 static inline send_routine
get_send_routine(struct rvt_qp
*qp
,
1191 struct verbs_txreq
*tx
)
1193 struct hfi1_devdata
*dd
= dd_from_ibdev(qp
->ibqp
.device
);
1194 struct hfi1_qp_priv
*priv
= qp
->priv
;
1195 struct hfi1_ib_header
*h
= &tx
->phdr
.hdr
;
1197 if (unlikely(!(dd
->flags
& HFI1_HAS_SEND_DMA
)))
1198 return dd
->process_pio_send
;
1199 switch (qp
->ibqp
.qp_type
) {
1201 return dd
->process_pio_send
;
1207 qp
->s_cur_size
<= min(piothreshold
, qp
->pmtu
) &&
1208 (BIT(get_opcode(h
) & 0x1f) & rc_only_opcode
) &&
1209 iowait_sdma_pending(&priv
->s_iowait
) == 0 &&
1210 !sdma_txreq_built(&tx
->txreq
))
1211 return dd
->process_pio_send
;
1215 qp
->s_cur_size
<= min(piothreshold
, qp
->pmtu
) &&
1216 (BIT(get_opcode(h
) & 0x1f) & uc_only_opcode
) &&
1217 iowait_sdma_pending(&priv
->s_iowait
) == 0 &&
1218 !sdma_txreq_built(&tx
->txreq
))
1219 return dd
->process_pio_send
;
1224 return dd
->process_dma_send
;
1228 * hfi1_verbs_send - send a packet
1229 * @qp: the QP to send on
1230 * @ps: the state of the packet to send
1232 * Return zero if packet is sent or queued OK.
1233 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
1235 int hfi1_verbs_send(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
)
1237 struct hfi1_devdata
*dd
= dd_from_ibdev(qp
->ibqp
.device
);
1238 struct hfi1_qp_priv
*priv
= qp
->priv
;
1239 struct hfi1_other_headers
*ohdr
;
1240 struct hfi1_ib_header
*hdr
;
1245 hdr
= &ps
->s_txreq
->phdr
.hdr
;
1246 /* locate the pkey within the headers */
1247 lnh
= be16_to_cpu(hdr
->lrh
[0]) & 3;
1248 if (lnh
== HFI1_LRH_GRH
)
1249 ohdr
= &hdr
->u
.l
.oth
;
1253 sr
= get_send_routine(qp
, ps
->s_txreq
);
1254 ret
= egress_pkey_check(dd
->pport
,
1259 if (unlikely(ret
)) {
1261 * The value we are returning here does not get propagated to
1262 * the verbs caller. Thus we need to complete the request with
1263 * error otherwise the caller could be sitting waiting on the
1264 * completion event. Only do this for PIO. SDMA has its own
1265 * mechanism for handling the errors. So for SDMA we can just
1268 if (sr
== dd
->process_pio_send
) {
1269 unsigned long flags
;
1271 hfi1_cdbg(PIO
, "%s() Failed. Completing with err",
1273 spin_lock_irqsave(&qp
->s_lock
, flags
);
1274 hfi1_send_complete(qp
, qp
->s_wqe
, IB_WC_GENERAL_ERR
);
1275 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1279 if (sr
== dd
->process_dma_send
&& iowait_pio_pending(&priv
->s_iowait
))
1283 RVT_S_WAIT_PIO_DRAIN
);
1284 return sr(qp
, ps
, 0);
1288 * hfi1_fill_device_attr - Fill in rvt dev info device attributes.
1289 * @dd: the device data structure
1291 static void hfi1_fill_device_attr(struct hfi1_devdata
*dd
)
1293 struct rvt_dev_info
*rdi
= &dd
->verbs_dev
.rdi
;
1295 memset(&rdi
->dparms
.props
, 0, sizeof(rdi
->dparms
.props
));
1297 rdi
->dparms
.props
.device_cap_flags
= IB_DEVICE_BAD_PKEY_CNTR
|
1298 IB_DEVICE_BAD_QKEY_CNTR
| IB_DEVICE_SHUTDOWN_PORT
|
1299 IB_DEVICE_SYS_IMAGE_GUID
| IB_DEVICE_RC_RNR_NAK_GEN
|
1300 IB_DEVICE_PORT_ACTIVE_EVENT
| IB_DEVICE_SRQ_RESIZE
;
1301 rdi
->dparms
.props
.page_size_cap
= PAGE_SIZE
;
1302 rdi
->dparms
.props
.vendor_id
= dd
->oui1
<< 16 | dd
->oui2
<< 8 | dd
->oui3
;
1303 rdi
->dparms
.props
.vendor_part_id
= dd
->pcidev
->device
;
1304 rdi
->dparms
.props
.hw_ver
= dd
->minrev
;
1305 rdi
->dparms
.props
.sys_image_guid
= ib_hfi1_sys_image_guid
;
1306 rdi
->dparms
.props
.max_mr_size
= ~0ULL;
1307 rdi
->dparms
.props
.max_qp
= hfi1_max_qps
;
1308 rdi
->dparms
.props
.max_qp_wr
= hfi1_max_qp_wrs
;
1309 rdi
->dparms
.props
.max_sge
= hfi1_max_sges
;
1310 rdi
->dparms
.props
.max_sge_rd
= hfi1_max_sges
;
1311 rdi
->dparms
.props
.max_cq
= hfi1_max_cqs
;
1312 rdi
->dparms
.props
.max_ah
= hfi1_max_ahs
;
1313 rdi
->dparms
.props
.max_cqe
= hfi1_max_cqes
;
1314 rdi
->dparms
.props
.max_mr
= rdi
->lkey_table
.max
;
1315 rdi
->dparms
.props
.max_fmr
= rdi
->lkey_table
.max
;
1316 rdi
->dparms
.props
.max_map_per_fmr
= 32767;
1317 rdi
->dparms
.props
.max_pd
= hfi1_max_pds
;
1318 rdi
->dparms
.props
.max_qp_rd_atom
= HFI1_MAX_RDMA_ATOMIC
;
1319 rdi
->dparms
.props
.max_qp_init_rd_atom
= 255;
1320 rdi
->dparms
.props
.max_srq
= hfi1_max_srqs
;
1321 rdi
->dparms
.props
.max_srq_wr
= hfi1_max_srq_wrs
;
1322 rdi
->dparms
.props
.max_srq_sge
= hfi1_max_srq_sges
;
1323 rdi
->dparms
.props
.atomic_cap
= IB_ATOMIC_GLOB
;
1324 rdi
->dparms
.props
.max_pkeys
= hfi1_get_npkeys(dd
);
1325 rdi
->dparms
.props
.max_mcast_grp
= hfi1_max_mcast_grps
;
1326 rdi
->dparms
.props
.max_mcast_qp_attach
= hfi1_max_mcast_qp_attached
;
1327 rdi
->dparms
.props
.max_total_mcast_qp_attach
=
1328 rdi
->dparms
.props
.max_mcast_qp_attach
*
1329 rdi
->dparms
.props
.max_mcast_grp
;
1332 static inline u16
opa_speed_to_ib(u16 in
)
1336 if (in
& OPA_LINK_SPEED_25G
)
1337 out
|= IB_SPEED_EDR
;
1338 if (in
& OPA_LINK_SPEED_12_5G
)
1339 out
|= IB_SPEED_FDR
;
1345 * Convert a single OPA link width (no multiple flags) to an IB value.
1346 * A zero OPA link width means link down, which means the IB width value
1349 static inline u16
opa_width_to_ib(u16 in
)
1352 case OPA_LINK_WIDTH_1X
:
1353 /* map 2x and 3x to 1x as they don't exist in IB */
1354 case OPA_LINK_WIDTH_2X
:
1355 case OPA_LINK_WIDTH_3X
:
1357 default: /* link down or unknown, return our largest width */
1358 case OPA_LINK_WIDTH_4X
:
1363 static int query_port(struct rvt_dev_info
*rdi
, u8 port_num
,
1364 struct ib_port_attr
*props
)
1366 struct hfi1_ibdev
*verbs_dev
= dev_from_rdi(rdi
);
1367 struct hfi1_devdata
*dd
= dd_from_dev(verbs_dev
);
1368 struct hfi1_pportdata
*ppd
= &dd
->pport
[port_num
- 1];
1371 props
->lid
= lid
? lid
: 0;
1372 props
->lmc
= ppd
->lmc
;
1373 /* OPA logical states match IB logical states */
1374 props
->state
= driver_lstate(ppd
);
1375 props
->phys_state
= hfi1_ibphys_portstate(ppd
);
1376 props
->gid_tbl_len
= HFI1_GUIDS_PER_PORT
;
1377 props
->active_width
= (u8
)opa_width_to_ib(ppd
->link_width_active
);
1378 /* see rate_show() in ib core/sysfs.c */
1379 props
->active_speed
= (u8
)opa_speed_to_ib(ppd
->link_speed_active
);
1380 props
->max_vl_num
= ppd
->vls_supported
;
1382 /* Once we are a "first class" citizen and have added the OPA MTUs to
1383 * the core we can advertise the larger MTU enum to the ULPs, for now
1384 * advertise only 4K.
1386 * Those applications which are either OPA aware or pass the MTU enum
1387 * from the Path Records to us will get the new 8k MTU. Those that
1388 * attempt to process the MTU enum may fail in various ways.
1390 props
->max_mtu
= mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu
) ?
1391 4096 : hfi1_max_mtu
), IB_MTU_4096
);
1392 props
->active_mtu
= !valid_ib_mtu(ppd
->ibmtu
) ? props
->max_mtu
:
1393 mtu_to_enum(ppd
->ibmtu
, IB_MTU_2048
);
1398 static int modify_device(struct ib_device
*device
,
1399 int device_modify_mask
,
1400 struct ib_device_modify
*device_modify
)
1402 struct hfi1_devdata
*dd
= dd_from_ibdev(device
);
1406 if (device_modify_mask
& ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID
|
1407 IB_DEVICE_MODIFY_NODE_DESC
)) {
1412 if (device_modify_mask
& IB_DEVICE_MODIFY_NODE_DESC
) {
1413 memcpy(device
->node_desc
, device_modify
->node_desc
, 64);
1414 for (i
= 0; i
< dd
->num_pports
; i
++) {
1415 struct hfi1_ibport
*ibp
= &dd
->pport
[i
].ibport_data
;
1417 hfi1_node_desc_chg(ibp
);
1421 if (device_modify_mask
& IB_DEVICE_MODIFY_SYS_IMAGE_GUID
) {
1422 ib_hfi1_sys_image_guid
=
1423 cpu_to_be64(device_modify
->sys_image_guid
);
1424 for (i
= 0; i
< dd
->num_pports
; i
++) {
1425 struct hfi1_ibport
*ibp
= &dd
->pport
[i
].ibport_data
;
1427 hfi1_sys_guid_chg(ibp
);
1437 static int shut_down_port(struct rvt_dev_info
*rdi
, u8 port_num
)
1439 struct hfi1_ibdev
*verbs_dev
= dev_from_rdi(rdi
);
1440 struct hfi1_devdata
*dd
= dd_from_dev(verbs_dev
);
1441 struct hfi1_pportdata
*ppd
= &dd
->pport
[port_num
- 1];
1444 set_link_down_reason(ppd
, OPA_LINKDOWN_REASON_UNKNOWN
, 0,
1445 OPA_LINKDOWN_REASON_UNKNOWN
);
1446 ret
= set_link_state(ppd
, HLS_DN_DOWNDEF
);
1450 static int hfi1_get_guid_be(struct rvt_dev_info
*rdi
, struct rvt_ibport
*rvp
,
1451 int guid_index
, __be64
*guid
)
1453 struct hfi1_ibport
*ibp
= container_of(rvp
, struct hfi1_ibport
, rvp
);
1454 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
1456 if (guid_index
== 0)
1457 *guid
= cpu_to_be64(ppd
->guid
);
1458 else if (guid_index
< HFI1_GUIDS_PER_PORT
)
1459 *guid
= ibp
->guids
[guid_index
- 1];
1467 * convert ah port,sl to sc
1469 u8
ah_to_sc(struct ib_device
*ibdev
, struct ib_ah_attr
*ah
)
1471 struct hfi1_ibport
*ibp
= to_iport(ibdev
, ah
->port_num
);
1473 return ibp
->sl_to_sc
[ah
->sl
];
1476 static int hfi1_check_ah(struct ib_device
*ibdev
, struct ib_ah_attr
*ah_attr
)
1478 struct hfi1_ibport
*ibp
;
1479 struct hfi1_pportdata
*ppd
;
1480 struct hfi1_devdata
*dd
;
1483 /* test the mapping for validity */
1484 ibp
= to_iport(ibdev
, ah_attr
->port_num
);
1485 ppd
= ppd_from_ibp(ibp
);
1486 sc5
= ibp
->sl_to_sc
[ah_attr
->sl
];
1487 dd
= dd_from_ppd(ppd
);
1488 if (sc_to_vlt(dd
, sc5
) > num_vls
&& sc_to_vlt(dd
, sc5
) != 0xf)
1493 static void hfi1_notify_new_ah(struct ib_device
*ibdev
,
1494 struct ib_ah_attr
*ah_attr
,
1497 struct hfi1_ibport
*ibp
;
1498 struct hfi1_pportdata
*ppd
;
1499 struct hfi1_devdata
*dd
;
1503 * Do not trust reading anything from rvt_ah at this point as it is not
1504 * done being setup. We can however modify things which we need to set.
1507 ibp
= to_iport(ibdev
, ah_attr
->port_num
);
1508 ppd
= ppd_from_ibp(ibp
);
1509 sc5
= ibp
->sl_to_sc
[ah
->attr
.sl
];
1510 dd
= dd_from_ppd(ppd
);
1511 ah
->vl
= sc_to_vlt(dd
, sc5
);
1512 if (ah
->vl
< num_vls
|| ah
->vl
== 15)
1513 ah
->log_pmtu
= ilog2(dd
->vld
[ah
->vl
].mtu
);
1516 struct ib_ah
*hfi1_create_qp0_ah(struct hfi1_ibport
*ibp
, u16 dlid
)
1518 struct ib_ah_attr attr
;
1519 struct ib_ah
*ah
= ERR_PTR(-EINVAL
);
1522 memset(&attr
, 0, sizeof(attr
));
1524 attr
.port_num
= ppd_from_ibp(ibp
)->port
;
1526 qp0
= rcu_dereference(ibp
->rvp
.qp
[0]);
1528 ah
= ib_create_ah(qp0
->ibqp
.pd
, &attr
);
1534 * hfi1_get_npkeys - return the size of the PKEY table for context 0
1535 * @dd: the hfi1_ib device
1537 unsigned hfi1_get_npkeys(struct hfi1_devdata
*dd
)
1539 return ARRAY_SIZE(dd
->pport
[0].pkeys
);
1542 static void init_ibport(struct hfi1_pportdata
*ppd
)
1544 struct hfi1_ibport
*ibp
= &ppd
->ibport_data
;
1545 size_t sz
= ARRAY_SIZE(ibp
->sl_to_sc
);
1548 for (i
= 0; i
< sz
; i
++) {
1549 ibp
->sl_to_sc
[i
] = i
;
1550 ibp
->sc_to_sl
[i
] = i
;
1553 spin_lock_init(&ibp
->rvp
.lock
);
1554 /* Set the prefix to the default value (see ch. 4.1.1) */
1555 ibp
->rvp
.gid_prefix
= IB_DEFAULT_GID_PREFIX
;
1556 ibp
->rvp
.sm_lid
= 0;
1557 /* Below should only set bits defined in OPA PortInfo.CapabilityMask */
1558 ibp
->rvp
.port_cap_flags
= IB_PORT_AUTO_MIGR_SUP
|
1559 IB_PORT_CAP_MASK_NOTICE_SUP
;
1560 ibp
->rvp
.pma_counter_select
[0] = IB_PMA_PORT_XMIT_DATA
;
1561 ibp
->rvp
.pma_counter_select
[1] = IB_PMA_PORT_RCV_DATA
;
1562 ibp
->rvp
.pma_counter_select
[2] = IB_PMA_PORT_XMIT_PKTS
;
1563 ibp
->rvp
.pma_counter_select
[3] = IB_PMA_PORT_RCV_PKTS
;
1564 ibp
->rvp
.pma_counter_select
[4] = IB_PMA_PORT_XMIT_WAIT
;
1566 RCU_INIT_POINTER(ibp
->rvp
.qp
[0], NULL
);
1567 RCU_INIT_POINTER(ibp
->rvp
.qp
[1], NULL
);
1571 * hfi1_register_ib_device - register our device with the infiniband core
1572 * @dd: the device data structure
1573 * Return 0 if successful, errno if unsuccessful.
1575 int hfi1_register_ib_device(struct hfi1_devdata
*dd
)
1577 struct hfi1_ibdev
*dev
= &dd
->verbs_dev
;
1578 struct ib_device
*ibdev
= &dev
->rdi
.ibdev
;
1579 struct hfi1_pportdata
*ppd
= dd
->pport
;
1582 size_t lcpysz
= IB_DEVICE_NAME_MAX
;
1584 for (i
= 0; i
< dd
->num_pports
; i
++)
1585 init_ibport(ppd
+ i
);
1587 /* Only need to initialize non-zero fields. */
1589 setup_timer(&dev
->mem_timer
, mem_timer
, (unsigned long)dev
);
1591 seqlock_init(&dev
->iowait_lock
);
1592 INIT_LIST_HEAD(&dev
->txwait
);
1593 INIT_LIST_HEAD(&dev
->memwait
);
1595 ret
= verbs_txreq_init(dev
);
1597 goto err_verbs_txreq
;
1600 * The system image GUID is supposed to be the same for all
1601 * HFIs in a single system but since there can be other
1602 * device types in the system, we can't be sure this is unique.
1604 if (!ib_hfi1_sys_image_guid
)
1605 ib_hfi1_sys_image_guid
= cpu_to_be64(ppd
->guid
);
1606 lcpysz
= strlcpy(ibdev
->name
, class_name(), lcpysz
);
1607 strlcpy(ibdev
->name
+ lcpysz
, "_%d", IB_DEVICE_NAME_MAX
- lcpysz
);
1608 ibdev
->owner
= THIS_MODULE
;
1609 ibdev
->node_guid
= cpu_to_be64(ppd
->guid
);
1610 ibdev
->phys_port_cnt
= dd
->num_pports
;
1611 ibdev
->dma_device
= &dd
->pcidev
->dev
;
1612 ibdev
->modify_device
= modify_device
;
1614 /* keep process mad in the driver */
1615 ibdev
->process_mad
= hfi1_process_mad
;
1617 strncpy(ibdev
->node_desc
, init_utsname()->nodename
,
1618 sizeof(ibdev
->node_desc
));
1621 * Fill in rvt info object.
1623 dd
->verbs_dev
.rdi
.driver_f
.port_callback
= hfi1_create_port_files
;
1624 dd
->verbs_dev
.rdi
.driver_f
.get_card_name
= get_card_name
;
1625 dd
->verbs_dev
.rdi
.driver_f
.get_pci_dev
= get_pci_dev
;
1626 dd
->verbs_dev
.rdi
.driver_f
.check_ah
= hfi1_check_ah
;
1627 dd
->verbs_dev
.rdi
.driver_f
.notify_new_ah
= hfi1_notify_new_ah
;
1628 dd
->verbs_dev
.rdi
.driver_f
.get_guid_be
= hfi1_get_guid_be
;
1629 dd
->verbs_dev
.rdi
.driver_f
.query_port_state
= query_port
;
1630 dd
->verbs_dev
.rdi
.driver_f
.shut_down_port
= shut_down_port
;
1631 dd
->verbs_dev
.rdi
.driver_f
.cap_mask_chg
= hfi1_cap_mask_chg
;
1633 * Fill in rvt info device attributes.
1635 hfi1_fill_device_attr(dd
);
1638 dd
->verbs_dev
.rdi
.dparms
.qp_table_size
= hfi1_qp_table_size
;
1639 dd
->verbs_dev
.rdi
.dparms
.qpn_start
= 0;
1640 dd
->verbs_dev
.rdi
.dparms
.qpn_inc
= 1;
1641 dd
->verbs_dev
.rdi
.dparms
.qos_shift
= dd
->qos_shift
;
1642 dd
->verbs_dev
.rdi
.dparms
.qpn_res_start
= kdeth_qp
<< 16;
1643 dd
->verbs_dev
.rdi
.dparms
.qpn_res_end
=
1644 dd
->verbs_dev
.rdi
.dparms
.qpn_res_start
+ 65535;
1645 dd
->verbs_dev
.rdi
.dparms
.max_rdma_atomic
= HFI1_MAX_RDMA_ATOMIC
;
1646 dd
->verbs_dev
.rdi
.dparms
.psn_mask
= PSN_MASK
;
1647 dd
->verbs_dev
.rdi
.dparms
.psn_shift
= PSN_SHIFT
;
1648 dd
->verbs_dev
.rdi
.dparms
.psn_modify_mask
= PSN_MODIFY_MASK
;
1649 dd
->verbs_dev
.rdi
.dparms
.core_cap_flags
= RDMA_CORE_PORT_INTEL_OPA
;
1650 dd
->verbs_dev
.rdi
.dparms
.max_mad_size
= OPA_MGMT_MAD_SIZE
;
1652 dd
->verbs_dev
.rdi
.driver_f
.qp_priv_alloc
= qp_priv_alloc
;
1653 dd
->verbs_dev
.rdi
.driver_f
.qp_priv_free
= qp_priv_free
;
1654 dd
->verbs_dev
.rdi
.driver_f
.free_all_qps
= free_all_qps
;
1655 dd
->verbs_dev
.rdi
.driver_f
.notify_qp_reset
= notify_qp_reset
;
1656 dd
->verbs_dev
.rdi
.driver_f
.do_send
= hfi1_do_send
;
1657 dd
->verbs_dev
.rdi
.driver_f
.schedule_send
= hfi1_schedule_send
;
1658 dd
->verbs_dev
.rdi
.driver_f
.schedule_send_no_lock
= _hfi1_schedule_send
;
1659 dd
->verbs_dev
.rdi
.driver_f
.get_pmtu_from_attr
= get_pmtu_from_attr
;
1660 dd
->verbs_dev
.rdi
.driver_f
.notify_error_qp
= notify_error_qp
;
1661 dd
->verbs_dev
.rdi
.driver_f
.flush_qp_waiters
= flush_qp_waiters
;
1662 dd
->verbs_dev
.rdi
.driver_f
.stop_send_queue
= stop_send_queue
;
1663 dd
->verbs_dev
.rdi
.driver_f
.quiesce_qp
= quiesce_qp
;
1664 dd
->verbs_dev
.rdi
.driver_f
.notify_error_qp
= notify_error_qp
;
1665 dd
->verbs_dev
.rdi
.driver_f
.mtu_from_qp
= mtu_from_qp
;
1666 dd
->verbs_dev
.rdi
.driver_f
.mtu_to_path_mtu
= mtu_to_path_mtu
;
1667 dd
->verbs_dev
.rdi
.driver_f
.check_modify_qp
= hfi1_check_modify_qp
;
1668 dd
->verbs_dev
.rdi
.driver_f
.modify_qp
= hfi1_modify_qp
;
1669 dd
->verbs_dev
.rdi
.driver_f
.check_send_wqe
= hfi1_check_send_wqe
;
1671 /* completeion queue */
1672 snprintf(dd
->verbs_dev
.rdi
.dparms
.cq_name
,
1673 sizeof(dd
->verbs_dev
.rdi
.dparms
.cq_name
),
1674 "hfi1_cq%d", dd
->unit
);
1675 dd
->verbs_dev
.rdi
.dparms
.node
= dd
->node
;
1678 dd
->verbs_dev
.rdi
.flags
= 0; /* Let rdmavt handle it all */
1679 dd
->verbs_dev
.rdi
.dparms
.lkey_table_size
= hfi1_lkey_table_size
;
1680 dd
->verbs_dev
.rdi
.dparms
.nports
= dd
->num_pports
;
1681 dd
->verbs_dev
.rdi
.dparms
.npkeys
= hfi1_get_npkeys(dd
);
1684 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++)
1685 rvt_init_port(&dd
->verbs_dev
.rdi
,
1686 &ppd
->ibport_data
.rvp
,
1690 ret
= rvt_register_device(&dd
->verbs_dev
.rdi
);
1692 goto err_verbs_txreq
;
1694 ret
= hfi1_verbs_register_sysfs(dd
);
1701 rvt_unregister_device(&dd
->verbs_dev
.rdi
);
1703 verbs_txreq_exit(dev
);
1704 dd_dev_err(dd
, "cannot register verbs: %d!\n", -ret
);
1708 void hfi1_unregister_ib_device(struct hfi1_devdata
*dd
)
1710 struct hfi1_ibdev
*dev
= &dd
->verbs_dev
;
1712 hfi1_verbs_unregister_sysfs(dd
);
1714 rvt_unregister_device(&dd
->verbs_dev
.rdi
);
1716 if (!list_empty(&dev
->txwait
))
1717 dd_dev_err(dd
, "txwait list not empty!\n");
1718 if (!list_empty(&dev
->memwait
))
1719 dd_dev_err(dd
, "memwait list not empty!\n");
1721 del_timer_sync(&dev
->mem_timer
);
1722 verbs_txreq_exit(dev
);
1725 void hfi1_cnp_rcv(struct hfi1_packet
*packet
)
1727 struct hfi1_ibport
*ibp
= &packet
->rcd
->ppd
->ibport_data
;
1728 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
1729 struct hfi1_ib_header
*hdr
= packet
->hdr
;
1730 struct rvt_qp
*qp
= packet
->qp
;
1733 u8 sl
, sc5
, sc4_bit
, svc_type
;
1734 bool sc4_set
= has_sc4_bit(packet
);
1736 switch (packet
->qp
->ibqp
.qp_type
) {
1738 rlid
= qp
->remote_ah_attr
.dlid
;
1739 rqpn
= qp
->remote_qpn
;
1740 svc_type
= IB_CC_SVCTYPE_UC
;
1743 rlid
= qp
->remote_ah_attr
.dlid
;
1744 rqpn
= qp
->remote_qpn
;
1745 svc_type
= IB_CC_SVCTYPE_RC
;
1750 svc_type
= IB_CC_SVCTYPE_UD
;
1753 ibp
->rvp
.n_pkt_drops
++;
1757 sc4_bit
= sc4_set
<< 4;
1758 sc5
= (be16_to_cpu(hdr
->lrh
[0]) >> 12) & 0xf;
1760 sl
= ibp
->sc_to_sl
[sc5
];
1761 lqpn
= qp
->ibqp
.qp_num
;
1763 process_becn(ppd
, sl
, rlid
, lqpn
, rqpn
, svc_type
);