2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/log2.h>
35 #include <linux/slab.h>
37 #include <rdma/ib_cache.h>
38 #include <rdma/ib_pack.h>
40 #include <linux/mlx4/qp.h>
46 MLX4_IB_ACK_REQ_FREQ
= 8,
50 MLX4_IB_DEFAULT_SCHED_QUEUE
= 0x83,
51 MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f
56 * Largest possible UD header: send with GRH and immediate data.
58 MLX4_IB_UD_HEADER_SIZE
= 72,
59 MLX4_IB_LSO_HEADER_SPARE
= 128,
67 struct ib_ud_header ud_header
;
68 u8 header_buf
[MLX4_IB_UD_HEADER_SIZE
];
72 MLX4_IB_MIN_SQ_STRIDE
= 6,
73 MLX4_IB_CACHE_LINE_SIZE
= 64,
76 static const __be32 mlx4_ib_opcode
[] = {
77 [IB_WR_SEND
] = cpu_to_be32(MLX4_OPCODE_SEND
),
78 [IB_WR_LSO
] = cpu_to_be32(MLX4_OPCODE_LSO
),
79 [IB_WR_SEND_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_SEND_IMM
),
80 [IB_WR_RDMA_WRITE
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE
),
81 [IB_WR_RDMA_WRITE_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM
),
82 [IB_WR_RDMA_READ
] = cpu_to_be32(MLX4_OPCODE_RDMA_READ
),
83 [IB_WR_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS
),
84 [IB_WR_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA
),
85 [IB_WR_SEND_WITH_INV
] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL
),
86 [IB_WR_LOCAL_INV
] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL
),
87 [IB_WR_FAST_REG_MR
] = cpu_to_be32(MLX4_OPCODE_FMR
),
88 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS
),
89 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA
),
92 static struct mlx4_ib_sqp
*to_msqp(struct mlx4_ib_qp
*mqp
)
94 return container_of(mqp
, struct mlx4_ib_sqp
, qp
);
97 static int is_sqp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
99 return qp
->mqp
.qpn
>= dev
->dev
->caps
.sqp_start
&&
100 qp
->mqp
.qpn
<= dev
->dev
->caps
.sqp_start
+ 3;
103 static int is_qp0(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
105 return qp
->mqp
.qpn
>= dev
->dev
->caps
.sqp_start
&&
106 qp
->mqp
.qpn
<= dev
->dev
->caps
.sqp_start
+ 1;
109 static void *get_wqe(struct mlx4_ib_qp
*qp
, int offset
)
111 return mlx4_buf_offset(&qp
->buf
, offset
);
114 static void *get_recv_wqe(struct mlx4_ib_qp
*qp
, int n
)
116 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
119 static void *get_send_wqe(struct mlx4_ib_qp
*qp
, int n
)
121 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< qp
->sq
.wqe_shift
));
125 * Stamp a SQ WQE so that it is invalid if prefetched by marking the
126 * first four bytes of every 64 byte chunk with
127 * 0x7FFFFFF | (invalid_ownership_value << 31).
129 * When the max work request size is less than or equal to the WQE
130 * basic block size, as an optimization, we can stamp all WQEs with
131 * 0xffffffff, and skip the very first chunk of each WQE.
133 static void stamp_send_wqe(struct mlx4_ib_qp
*qp
, int n
, int size
)
141 struct mlx4_wqe_ctrl_seg
*ctrl
;
143 if (qp
->sq_max_wqes_per_wr
> 1) {
144 s
= roundup(size
, 1U << qp
->sq
.wqe_shift
);
145 for (i
= 0; i
< s
; i
+= 64) {
146 ind
= (i
>> qp
->sq
.wqe_shift
) + n
;
147 stamp
= ind
& qp
->sq
.wqe_cnt
? cpu_to_be32(0x7fffffff) :
148 cpu_to_be32(0xffffffff);
149 buf
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
150 wqe
= buf
+ (i
& ((1 << qp
->sq
.wqe_shift
) - 1));
154 ctrl
= buf
= get_send_wqe(qp
, n
& (qp
->sq
.wqe_cnt
- 1));
155 s
= (ctrl
->fence_size
& 0x3f) << 4;
156 for (i
= 64; i
< s
; i
+= 64) {
158 *wqe
= cpu_to_be32(0xffffffff);
163 static void post_nop_wqe(struct mlx4_ib_qp
*qp
, int n
, int size
)
165 struct mlx4_wqe_ctrl_seg
*ctrl
;
166 struct mlx4_wqe_inline_seg
*inl
;
170 ctrl
= wqe
= get_send_wqe(qp
, n
& (qp
->sq
.wqe_cnt
- 1));
171 s
= sizeof(struct mlx4_wqe_ctrl_seg
);
173 if (qp
->ibqp
.qp_type
== IB_QPT_UD
) {
174 struct mlx4_wqe_datagram_seg
*dgram
= wqe
+ sizeof *ctrl
;
175 struct mlx4_av
*av
= (struct mlx4_av
*)dgram
->av
;
176 memset(dgram
, 0, sizeof *dgram
);
177 av
->port_pd
= cpu_to_be32((qp
->port
<< 24) | to_mpd(qp
->ibqp
.pd
)->pdn
);
178 s
+= sizeof(struct mlx4_wqe_datagram_seg
);
181 /* Pad the remainder of the WQE with an inline data segment. */
184 inl
->byte_count
= cpu_to_be32(1 << 31 | (size
- s
- sizeof *inl
));
186 ctrl
->srcrb_flags
= 0;
187 ctrl
->fence_size
= size
/ 16;
189 * Make sure descriptor is fully written before setting ownership bit
190 * (because HW can start executing as soon as we do).
194 ctrl
->owner_opcode
= cpu_to_be32(MLX4_OPCODE_NOP
| MLX4_WQE_CTRL_NEC
) |
195 (n
& qp
->sq
.wqe_cnt
? cpu_to_be32(1 << 31) : 0);
197 stamp_send_wqe(qp
, n
+ qp
->sq_spare_wqes
, size
);
200 /* Post NOP WQE to prevent wrap-around in the middle of WR */
201 static inline unsigned pad_wraparound(struct mlx4_ib_qp
*qp
, int ind
)
203 unsigned s
= qp
->sq
.wqe_cnt
- (ind
& (qp
->sq
.wqe_cnt
- 1));
204 if (unlikely(s
< qp
->sq_max_wqes_per_wr
)) {
205 post_nop_wqe(qp
, ind
, s
<< qp
->sq
.wqe_shift
);
211 static void mlx4_ib_qp_event(struct mlx4_qp
*qp
, enum mlx4_event type
)
213 struct ib_event event
;
214 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
216 if (type
== MLX4_EVENT_TYPE_PATH_MIG
)
217 to_mibqp(qp
)->port
= to_mibqp(qp
)->alt_port
;
219 if (ibqp
->event_handler
) {
220 event
.device
= ibqp
->device
;
221 event
.element
.qp
= ibqp
;
223 case MLX4_EVENT_TYPE_PATH_MIG
:
224 event
.event
= IB_EVENT_PATH_MIG
;
226 case MLX4_EVENT_TYPE_COMM_EST
:
227 event
.event
= IB_EVENT_COMM_EST
;
229 case MLX4_EVENT_TYPE_SQ_DRAINED
:
230 event
.event
= IB_EVENT_SQ_DRAINED
;
232 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE
:
233 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
235 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR
:
236 event
.event
= IB_EVENT_QP_FATAL
;
238 case MLX4_EVENT_TYPE_PATH_MIG_FAILED
:
239 event
.event
= IB_EVENT_PATH_MIG_ERR
;
241 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
242 event
.event
= IB_EVENT_QP_REQ_ERR
;
244 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR
:
245 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
248 printk(KERN_WARNING
"mlx4_ib: Unexpected event type %d "
249 "on QP %06x\n", type
, qp
->qpn
);
253 ibqp
->event_handler(&event
, ibqp
->qp_context
);
257 static int send_wqe_overhead(enum ib_qp_type type
, u32 flags
)
260 * UD WQEs must have a datagram segment.
261 * RC and UC WQEs might have a remote address segment.
262 * MLX WQEs need two extra inline data segments (for the UD
263 * header and space for the ICRC).
267 return sizeof (struct mlx4_wqe_ctrl_seg
) +
268 sizeof (struct mlx4_wqe_datagram_seg
) +
269 ((flags
& MLX4_IB_QP_LSO
) ? MLX4_IB_LSO_HEADER_SPARE
: 0);
271 return sizeof (struct mlx4_wqe_ctrl_seg
) +
272 sizeof (struct mlx4_wqe_raddr_seg
);
274 return sizeof (struct mlx4_wqe_ctrl_seg
) +
275 sizeof (struct mlx4_wqe_atomic_seg
) +
276 sizeof (struct mlx4_wqe_raddr_seg
);
279 return sizeof (struct mlx4_wqe_ctrl_seg
) +
280 ALIGN(MLX4_IB_UD_HEADER_SIZE
+
281 DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE
,
283 sizeof (struct mlx4_wqe_inline_seg
),
284 sizeof (struct mlx4_wqe_data_seg
)) +
286 sizeof (struct mlx4_wqe_inline_seg
),
287 sizeof (struct mlx4_wqe_data_seg
));
289 return sizeof (struct mlx4_wqe_ctrl_seg
);
293 static int set_rq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
294 int is_user
, int has_srq
, struct mlx4_ib_qp
*qp
)
296 /* Sanity check RQ size before proceeding */
297 if (cap
->max_recv_wr
> dev
->dev
->caps
.max_wqes
||
298 cap
->max_recv_sge
> dev
->dev
->caps
.max_rq_sg
)
302 /* QPs attached to an SRQ should have no RQ */
303 if (cap
->max_recv_wr
)
306 qp
->rq
.wqe_cnt
= qp
->rq
.max_gs
= 0;
308 /* HW requires >= 1 RQ entry with >= 1 gather entry */
309 if (is_user
&& (!cap
->max_recv_wr
|| !cap
->max_recv_sge
))
312 qp
->rq
.wqe_cnt
= roundup_pow_of_two(max(1U, cap
->max_recv_wr
));
313 qp
->rq
.max_gs
= roundup_pow_of_two(max(1U, cap
->max_recv_sge
));
314 qp
->rq
.wqe_shift
= ilog2(qp
->rq
.max_gs
* sizeof (struct mlx4_wqe_data_seg
));
317 cap
->max_recv_wr
= qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
318 cap
->max_recv_sge
= qp
->rq
.max_gs
;
323 static int set_kernel_sq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
324 enum ib_qp_type type
, struct mlx4_ib_qp
*qp
)
328 /* Sanity check SQ size before proceeding */
329 if (cap
->max_send_wr
> dev
->dev
->caps
.max_wqes
||
330 cap
->max_send_sge
> dev
->dev
->caps
.max_sq_sg
||
331 cap
->max_inline_data
+ send_wqe_overhead(type
, qp
->flags
) +
332 sizeof (struct mlx4_wqe_inline_seg
) > dev
->dev
->caps
.max_sq_desc_sz
)
336 * For MLX transport we need 2 extra S/G entries:
337 * one for the header and one for the checksum at the end
339 if ((type
== IB_QPT_SMI
|| type
== IB_QPT_GSI
) &&
340 cap
->max_send_sge
+ 2 > dev
->dev
->caps
.max_sq_sg
)
343 s
= max(cap
->max_send_sge
* sizeof (struct mlx4_wqe_data_seg
),
344 cap
->max_inline_data
+ sizeof (struct mlx4_wqe_inline_seg
)) +
345 send_wqe_overhead(type
, qp
->flags
);
347 if (s
> dev
->dev
->caps
.max_sq_desc_sz
)
351 * Hermon supports shrinking WQEs, such that a single work
352 * request can include multiple units of 1 << wqe_shift. This
353 * way, work requests can differ in size, and do not have to
354 * be a power of 2 in size, saving memory and speeding up send
355 * WR posting. Unfortunately, if we do this then the
356 * wqe_index field in CQEs can't be used to look up the WR ID
357 * anymore, so we do this only if selective signaling is off.
359 * Further, on 32-bit platforms, we can't use vmap() to make
360 * the QP buffer virtually contiguous. Thus we have to use
361 * constant-sized WRs to make sure a WR is always fully within
362 * a single page-sized chunk.
364 * Finally, we use NOP work requests to pad the end of the
365 * work queue, to avoid wrap-around in the middle of WR. We
366 * set NEC bit to avoid getting completions with error for
367 * these NOP WRs, but since NEC is only supported starting
368 * with firmware 2.2.232, we use constant-sized WRs for older
371 * And, since MLX QPs only support SEND, we use constant-sized
374 * We look for the smallest value of wqe_shift such that the
375 * resulting number of wqes does not exceed device
378 * We set WQE size to at least 64 bytes, this way stamping
379 * invalidates each WQE.
381 if (dev
->dev
->caps
.fw_ver
>= MLX4_FW_VER_WQE_CTRL_NEC
&&
382 qp
->sq_signal_bits
&& BITS_PER_LONG
== 64 &&
383 type
!= IB_QPT_SMI
&& type
!= IB_QPT_GSI
)
384 qp
->sq
.wqe_shift
= ilog2(64);
386 qp
->sq
.wqe_shift
= ilog2(roundup_pow_of_two(s
));
389 qp
->sq_max_wqes_per_wr
= DIV_ROUND_UP(s
, 1U << qp
->sq
.wqe_shift
);
392 * We need to leave 2 KB + 1 WR of headroom in the SQ to
393 * allow HW to prefetch.
395 qp
->sq_spare_wqes
= (2048 >> qp
->sq
.wqe_shift
) + qp
->sq_max_wqes_per_wr
;
396 qp
->sq
.wqe_cnt
= roundup_pow_of_two(cap
->max_send_wr
*
397 qp
->sq_max_wqes_per_wr
+
400 if (qp
->sq
.wqe_cnt
<= dev
->dev
->caps
.max_wqes
)
403 if (qp
->sq_max_wqes_per_wr
<= 1)
409 qp
->sq
.max_gs
= (min(dev
->dev
->caps
.max_sq_desc_sz
,
410 (qp
->sq_max_wqes_per_wr
<< qp
->sq
.wqe_shift
)) -
411 send_wqe_overhead(type
, qp
->flags
)) /
412 sizeof (struct mlx4_wqe_data_seg
);
414 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
415 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
416 if (qp
->rq
.wqe_shift
> qp
->sq
.wqe_shift
) {
418 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
420 qp
->rq
.offset
= qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
;
424 cap
->max_send_wr
= qp
->sq
.max_post
=
425 (qp
->sq
.wqe_cnt
- qp
->sq_spare_wqes
) / qp
->sq_max_wqes_per_wr
;
426 cap
->max_send_sge
= min(qp
->sq
.max_gs
,
427 min(dev
->dev
->caps
.max_sq_sg
,
428 dev
->dev
->caps
.max_rq_sg
));
429 /* We don't support inline sends for kernel QPs (yet) */
430 cap
->max_inline_data
= 0;
435 static int set_user_sq_size(struct mlx4_ib_dev
*dev
,
436 struct mlx4_ib_qp
*qp
,
437 struct mlx4_ib_create_qp
*ucmd
)
439 /* Sanity check SQ size before proceeding */
440 if ((1 << ucmd
->log_sq_bb_count
) > dev
->dev
->caps
.max_wqes
||
441 ucmd
->log_sq_stride
>
442 ilog2(roundup_pow_of_two(dev
->dev
->caps
.max_sq_desc_sz
)) ||
443 ucmd
->log_sq_stride
< MLX4_IB_MIN_SQ_STRIDE
)
446 qp
->sq
.wqe_cnt
= 1 << ucmd
->log_sq_bb_count
;
447 qp
->sq
.wqe_shift
= ucmd
->log_sq_stride
;
449 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
450 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
455 static int create_qp_common(struct mlx4_ib_dev
*dev
, struct ib_pd
*pd
,
456 struct ib_qp_init_attr
*init_attr
,
457 struct ib_udata
*udata
, int sqpn
, struct mlx4_ib_qp
*qp
)
462 mutex_init(&qp
->mutex
);
463 spin_lock_init(&qp
->sq
.lock
);
464 spin_lock_init(&qp
->rq
.lock
);
466 qp
->state
= IB_QPS_RESET
;
467 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
468 qp
->sq_signal_bits
= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
470 err
= set_rq_size(dev
, &init_attr
->cap
, !!pd
->uobject
, !!init_attr
->srq
, qp
);
475 struct mlx4_ib_create_qp ucmd
;
477 if (ib_copy_from_udata(&ucmd
, udata
, sizeof ucmd
)) {
482 qp
->sq_no_prefetch
= ucmd
.sq_no_prefetch
;
484 err
= set_user_sq_size(dev
, qp
, &ucmd
);
488 qp
->umem
= ib_umem_get(pd
->uobject
->context
, ucmd
.buf_addr
,
490 if (IS_ERR(qp
->umem
)) {
491 err
= PTR_ERR(qp
->umem
);
495 err
= mlx4_mtt_init(dev
->dev
, ib_umem_page_count(qp
->umem
),
496 ilog2(qp
->umem
->page_size
), &qp
->mtt
);
500 err
= mlx4_ib_umem_write_mtt(dev
, &qp
->mtt
, qp
->umem
);
504 if (!init_attr
->srq
) {
505 err
= mlx4_ib_db_map_user(to_mucontext(pd
->uobject
->context
),
506 ucmd
.db_addr
, &qp
->db
);
511 qp
->sq_no_prefetch
= 0;
513 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
)
514 qp
->flags
|= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
516 if (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
517 qp
->flags
|= MLX4_IB_QP_LSO
;
519 err
= set_kernel_sq_size(dev
, &init_attr
->cap
, init_attr
->qp_type
, qp
);
523 if (!init_attr
->srq
) {
524 err
= mlx4_db_alloc(dev
->dev
, &qp
->db
, 0);
531 if (mlx4_buf_alloc(dev
->dev
, qp
->buf_size
, PAGE_SIZE
* 2, &qp
->buf
)) {
536 err
= mlx4_mtt_init(dev
->dev
, qp
->buf
.npages
, qp
->buf
.page_shift
,
541 err
= mlx4_buf_write_mtt(dev
->dev
, &qp
->mtt
, &qp
->buf
);
545 qp
->sq
.wrid
= kmalloc(qp
->sq
.wqe_cnt
* sizeof (u64
), GFP_KERNEL
);
546 qp
->rq
.wrid
= kmalloc(qp
->rq
.wqe_cnt
* sizeof (u64
), GFP_KERNEL
);
548 if (!qp
->sq
.wrid
|| !qp
->rq
.wrid
) {
557 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1, &qpn
);
562 err
= mlx4_qp_alloc(dev
->dev
, qpn
, &qp
->mqp
);
567 * Hardware wants QPN written in big-endian order (after
568 * shifting) for send doorbell. Precompute this value to save
569 * a little bit when posting sends.
571 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
573 qp
->mqp
.event
= mlx4_ib_qp_event
;
579 mlx4_qp_release_range(dev
->dev
, qpn
, 1);
584 mlx4_ib_db_unmap_user(to_mucontext(pd
->uobject
->context
),
592 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
596 ib_umem_release(qp
->umem
);
598 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
601 if (!pd
->uobject
&& !init_attr
->srq
)
602 mlx4_db_free(dev
->dev
, &qp
->db
);
608 static enum mlx4_qp_state
to_mlx4_state(enum ib_qp_state state
)
611 case IB_QPS_RESET
: return MLX4_QP_STATE_RST
;
612 case IB_QPS_INIT
: return MLX4_QP_STATE_INIT
;
613 case IB_QPS_RTR
: return MLX4_QP_STATE_RTR
;
614 case IB_QPS_RTS
: return MLX4_QP_STATE_RTS
;
615 case IB_QPS_SQD
: return MLX4_QP_STATE_SQD
;
616 case IB_QPS_SQE
: return MLX4_QP_STATE_SQER
;
617 case IB_QPS_ERR
: return MLX4_QP_STATE_ERR
;
622 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
623 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
625 if (send_cq
== recv_cq
) {
626 spin_lock_irq(&send_cq
->lock
);
627 __acquire(&recv_cq
->lock
);
628 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
629 spin_lock_irq(&send_cq
->lock
);
630 spin_lock_nested(&recv_cq
->lock
, SINGLE_DEPTH_NESTING
);
632 spin_lock_irq(&recv_cq
->lock
);
633 spin_lock_nested(&send_cq
->lock
, SINGLE_DEPTH_NESTING
);
637 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
638 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
640 if (send_cq
== recv_cq
) {
641 __release(&recv_cq
->lock
);
642 spin_unlock_irq(&send_cq
->lock
);
643 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
644 spin_unlock(&recv_cq
->lock
);
645 spin_unlock_irq(&send_cq
->lock
);
647 spin_unlock(&send_cq
->lock
);
648 spin_unlock_irq(&recv_cq
->lock
);
652 static void destroy_qp_common(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
,
655 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
657 if (qp
->state
!= IB_QPS_RESET
)
658 if (mlx4_qp_modify(dev
->dev
, NULL
, to_mlx4_state(qp
->state
),
659 MLX4_QP_STATE_RST
, NULL
, 0, 0, &qp
->mqp
))
660 printk(KERN_WARNING
"mlx4_ib: modify QP %06x to RESET failed.\n",
663 send_cq
= to_mcq(qp
->ibqp
.send_cq
);
664 recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
666 mlx4_ib_lock_cqs(send_cq
, recv_cq
);
669 __mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
670 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
): NULL
);
671 if (send_cq
!= recv_cq
)
672 __mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
675 mlx4_qp_remove(dev
->dev
, &qp
->mqp
);
677 mlx4_ib_unlock_cqs(send_cq
, recv_cq
);
679 mlx4_qp_free(dev
->dev
, &qp
->mqp
);
681 if (!is_sqp(dev
, qp
))
682 mlx4_qp_release_range(dev
->dev
, qp
->mqp
.qpn
, 1);
684 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
688 mlx4_ib_db_unmap_user(to_mucontext(qp
->ibqp
.uobject
->context
),
690 ib_umem_release(qp
->umem
);
694 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
696 mlx4_db_free(dev
->dev
, &qp
->db
);
700 struct ib_qp
*mlx4_ib_create_qp(struct ib_pd
*pd
,
701 struct ib_qp_init_attr
*init_attr
,
702 struct ib_udata
*udata
)
704 struct mlx4_ib_dev
*dev
= to_mdev(pd
->device
);
705 struct mlx4_ib_sqp
*sqp
;
706 struct mlx4_ib_qp
*qp
;
710 * We only support LSO and multicast loopback blocking, and
711 * only for kernel UD QPs.
713 if (init_attr
->create_flags
& ~(IB_QP_CREATE_IPOIB_UD_LSO
|
714 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
))
715 return ERR_PTR(-EINVAL
);
717 if (init_attr
->create_flags
&&
718 (pd
->uobject
|| init_attr
->qp_type
!= IB_QPT_UD
))
719 return ERR_PTR(-EINVAL
);
721 switch (init_attr
->qp_type
) {
726 qp
= kzalloc(sizeof *qp
, GFP_KERNEL
);
728 return ERR_PTR(-ENOMEM
);
730 err
= create_qp_common(dev
, pd
, init_attr
, udata
, 0, qp
);
736 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
743 /* Userspace is not allowed to create special QPs: */
745 return ERR_PTR(-EINVAL
);
747 sqp
= kzalloc(sizeof *sqp
, GFP_KERNEL
);
749 return ERR_PTR(-ENOMEM
);
753 err
= create_qp_common(dev
, pd
, init_attr
, udata
,
754 dev
->dev
->caps
.sqp_start
+
755 (init_attr
->qp_type
== IB_QPT_SMI
? 0 : 2) +
756 init_attr
->port_num
- 1,
763 qp
->port
= init_attr
->port_num
;
764 qp
->ibqp
.qp_num
= init_attr
->qp_type
== IB_QPT_SMI
? 0 : 1;
769 /* Don't support raw QPs */
770 return ERR_PTR(-EINVAL
);
776 int mlx4_ib_destroy_qp(struct ib_qp
*qp
)
778 struct mlx4_ib_dev
*dev
= to_mdev(qp
->device
);
779 struct mlx4_ib_qp
*mqp
= to_mqp(qp
);
781 if (is_qp0(dev
, mqp
))
782 mlx4_CLOSE_PORT(dev
->dev
, mqp
->port
);
784 destroy_qp_common(dev
, mqp
, !!qp
->pd
->uobject
);
786 if (is_sqp(dev
, mqp
))
794 static int to_mlx4_st(enum ib_qp_type type
)
797 case IB_QPT_RC
: return MLX4_QP_ST_RC
;
798 case IB_QPT_UC
: return MLX4_QP_ST_UC
;
799 case IB_QPT_UD
: return MLX4_QP_ST_UD
;
801 case IB_QPT_GSI
: return MLX4_QP_ST_MLX
;
806 static __be32
to_mlx4_access_flags(struct mlx4_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
811 u32 hw_access_flags
= 0;
813 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
814 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
816 dest_rd_atomic
= qp
->resp_depth
;
818 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
819 access_flags
= attr
->qp_access_flags
;
821 access_flags
= qp
->atomic_rd_en
;
824 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
826 if (access_flags
& IB_ACCESS_REMOTE_READ
)
827 hw_access_flags
|= MLX4_QP_BIT_RRE
;
828 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
829 hw_access_flags
|= MLX4_QP_BIT_RAE
;
830 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
831 hw_access_flags
|= MLX4_QP_BIT_RWE
;
833 return cpu_to_be32(hw_access_flags
);
836 static void store_sqp_attrs(struct mlx4_ib_sqp
*sqp
, const struct ib_qp_attr
*attr
,
839 if (attr_mask
& IB_QP_PKEY_INDEX
)
840 sqp
->pkey_index
= attr
->pkey_index
;
841 if (attr_mask
& IB_QP_QKEY
)
842 sqp
->qkey
= attr
->qkey
;
843 if (attr_mask
& IB_QP_SQ_PSN
)
844 sqp
->send_psn
= attr
->sq_psn
;
847 static void mlx4_set_sched(struct mlx4_qp_path
*path
, u8 port
)
849 path
->sched_queue
= (path
->sched_queue
& 0xbf) | ((port
- 1) << 6);
852 static int mlx4_set_path(struct mlx4_ib_dev
*dev
, const struct ib_ah_attr
*ah
,
853 struct mlx4_qp_path
*path
, u8 port
)
855 path
->grh_mylmc
= ah
->src_path_bits
& 0x7f;
856 path
->rlid
= cpu_to_be16(ah
->dlid
);
857 if (ah
->static_rate
) {
858 path
->static_rate
= ah
->static_rate
+ MLX4_STAT_RATE_OFFSET
;
859 while (path
->static_rate
> IB_RATE_2_5_GBPS
+ MLX4_STAT_RATE_OFFSET
&&
860 !(1 << path
->static_rate
& dev
->dev
->caps
.stat_rate_support
))
863 path
->static_rate
= 0;
864 path
->counter_index
= 0xff;
866 if (ah
->ah_flags
& IB_AH_GRH
) {
867 if (ah
->grh
.sgid_index
>= dev
->dev
->caps
.gid_table_len
[port
]) {
868 printk(KERN_ERR
"sgid_index (%u) too large. max is %d\n",
869 ah
->grh
.sgid_index
, dev
->dev
->caps
.gid_table_len
[port
] - 1);
873 path
->grh_mylmc
|= 1 << 7;
874 path
->mgid_index
= ah
->grh
.sgid_index
;
875 path
->hop_limit
= ah
->grh
.hop_limit
;
876 path
->tclass_flowlabel
=
877 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
878 (ah
->grh
.flow_label
));
879 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
882 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
883 ((port
- 1) << 6) | ((ah
->sl
& 0xf) << 2);
888 static int __mlx4_ib_modify_qp(struct ib_qp
*ibqp
,
889 const struct ib_qp_attr
*attr
, int attr_mask
,
890 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
892 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
893 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
894 struct mlx4_qp_context
*context
;
895 enum mlx4_qp_optpar optpar
= 0;
899 context
= kzalloc(sizeof *context
, GFP_KERNEL
);
903 context
->flags
= cpu_to_be32((to_mlx4_state(new_state
) << 28) |
904 (to_mlx4_st(ibqp
->qp_type
) << 16));
906 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
))
907 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
909 optpar
|= MLX4_QP_OPTPAR_PM_STATE
;
910 switch (attr
->path_mig_state
) {
911 case IB_MIG_MIGRATED
:
912 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
915 context
->flags
|= cpu_to_be32(MLX4_QP_PM_REARM
<< 11);
918 context
->flags
|= cpu_to_be32(MLX4_QP_PM_ARMED
<< 11);
923 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
)
924 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 11;
925 else if (ibqp
->qp_type
== IB_QPT_UD
) {
926 if (qp
->flags
& MLX4_IB_QP_LSO
)
927 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) |
928 ilog2(dev
->dev
->caps
.max_gso_sz
);
930 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
931 } else if (attr_mask
& IB_QP_PATH_MTU
) {
932 if (attr
->path_mtu
< IB_MTU_256
|| attr
->path_mtu
> IB_MTU_4096
) {
933 printk(KERN_ERR
"path MTU (%u) is invalid\n",
937 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) |
938 ilog2(dev
->dev
->caps
.max_msg_sz
);
942 context
->rq_size_stride
= ilog2(qp
->rq
.wqe_cnt
) << 3;
943 context
->rq_size_stride
|= qp
->rq
.wqe_shift
- 4;
946 context
->sq_size_stride
= ilog2(qp
->sq
.wqe_cnt
) << 3;
947 context
->sq_size_stride
|= qp
->sq
.wqe_shift
- 4;
949 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
950 context
->sq_size_stride
|= !!qp
->sq_no_prefetch
<< 7;
952 if (qp
->ibqp
.uobject
)
953 context
->usr_page
= cpu_to_be32(to_mucontext(ibqp
->uobject
->context
)->uar
.index
);
955 context
->usr_page
= cpu_to_be32(dev
->priv_uar
.index
);
957 if (attr_mask
& IB_QP_DEST_QPN
)
958 context
->remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
960 if (attr_mask
& IB_QP_PORT
) {
961 if (cur_state
== IB_QPS_SQD
&& new_state
== IB_QPS_SQD
&&
962 !(attr_mask
& IB_QP_AV
)) {
963 mlx4_set_sched(&context
->pri_path
, attr
->port_num
);
964 optpar
|= MLX4_QP_OPTPAR_SCHED_QUEUE
;
968 if (attr_mask
& IB_QP_PKEY_INDEX
) {
969 context
->pri_path
.pkey_index
= attr
->pkey_index
;
970 optpar
|= MLX4_QP_OPTPAR_PKEY_INDEX
;
973 if (attr_mask
& IB_QP_AV
) {
974 if (mlx4_set_path(dev
, &attr
->ah_attr
, &context
->pri_path
,
975 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
))
978 optpar
|= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
|
979 MLX4_QP_OPTPAR_SCHED_QUEUE
);
982 if (attr_mask
& IB_QP_TIMEOUT
) {
983 context
->pri_path
.ackto
= attr
->timeout
<< 3;
984 optpar
|= MLX4_QP_OPTPAR_ACK_TIMEOUT
;
987 if (attr_mask
& IB_QP_ALT_PATH
) {
988 if (attr
->alt_port_num
== 0 ||
989 attr
->alt_port_num
> dev
->dev
->caps
.num_ports
)
992 if (attr
->alt_pkey_index
>=
993 dev
->dev
->caps
.pkey_table_len
[attr
->alt_port_num
])
996 if (mlx4_set_path(dev
, &attr
->alt_ah_attr
, &context
->alt_path
,
1000 context
->alt_path
.pkey_index
= attr
->alt_pkey_index
;
1001 context
->alt_path
.ackto
= attr
->alt_timeout
<< 3;
1002 optpar
|= MLX4_QP_OPTPAR_ALT_ADDR_PATH
;
1005 context
->pd
= cpu_to_be32(to_mpd(ibqp
->pd
)->pdn
);
1006 context
->params1
= cpu_to_be32(MLX4_IB_ACK_REQ_FREQ
<< 28);
1008 /* Set "fast registration enabled" for all kernel QPs */
1009 if (!qp
->ibqp
.uobject
)
1010 context
->params1
|= cpu_to_be32(1 << 11);
1012 if (attr_mask
& IB_QP_RNR_RETRY
) {
1013 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
1014 optpar
|= MLX4_QP_OPTPAR_RNR_RETRY
;
1017 if (attr_mask
& IB_QP_RETRY_CNT
) {
1018 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
1019 optpar
|= MLX4_QP_OPTPAR_RETRY_COUNT
;
1022 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1023 if (attr
->max_rd_atomic
)
1025 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
1026 optpar
|= MLX4_QP_OPTPAR_SRA_MAX
;
1029 if (attr_mask
& IB_QP_SQ_PSN
)
1030 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
1032 context
->cqn_send
= cpu_to_be32(to_mcq(ibqp
->send_cq
)->mcq
.cqn
);
1034 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1035 if (attr
->max_dest_rd_atomic
)
1037 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
1038 optpar
|= MLX4_QP_OPTPAR_RRA_MAX
;
1041 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
)) {
1042 context
->params2
|= to_mlx4_access_flags(qp
, attr
, attr_mask
);
1043 optpar
|= MLX4_QP_OPTPAR_RWE
| MLX4_QP_OPTPAR_RRE
| MLX4_QP_OPTPAR_RAE
;
1047 context
->params2
|= cpu_to_be32(MLX4_QP_BIT_RIC
);
1049 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
1050 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
1051 optpar
|= MLX4_QP_OPTPAR_RNR_TIMEOUT
;
1053 if (attr_mask
& IB_QP_RQ_PSN
)
1054 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
1056 context
->cqn_recv
= cpu_to_be32(to_mcq(ibqp
->recv_cq
)->mcq
.cqn
);
1058 if (attr_mask
& IB_QP_QKEY
) {
1059 context
->qkey
= cpu_to_be32(attr
->qkey
);
1060 optpar
|= MLX4_QP_OPTPAR_Q_KEY
;
1064 context
->srqn
= cpu_to_be32(1 << 24 | to_msrq(ibqp
->srq
)->msrq
.srqn
);
1066 if (!ibqp
->srq
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1067 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
1069 if (cur_state
== IB_QPS_INIT
&&
1070 new_state
== IB_QPS_RTR
&&
1071 (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
||
1072 ibqp
->qp_type
== IB_QPT_UD
)) {
1073 context
->pri_path
.sched_queue
= (qp
->port
- 1) << 6;
1074 if (is_qp0(dev
, qp
))
1075 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
;
1077 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_SCHED_QUEUE
;
1080 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
1081 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&& attr
->en_sqd_async_notify
)
1086 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1087 context
->rlkey
|= (1 << 4);
1090 * Before passing a kernel QP to the HW, make sure that the
1091 * ownership bits of the send queue are set and the SQ
1092 * headroom is stamped so that the hardware doesn't start
1093 * processing stale work requests.
1095 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
1096 struct mlx4_wqe_ctrl_seg
*ctrl
;
1099 for (i
= 0; i
< qp
->sq
.wqe_cnt
; ++i
) {
1100 ctrl
= get_send_wqe(qp
, i
);
1101 ctrl
->owner_opcode
= cpu_to_be32(1 << 31);
1102 if (qp
->sq_max_wqes_per_wr
== 1)
1103 ctrl
->fence_size
= 1 << (qp
->sq
.wqe_shift
- 4);
1105 stamp_send_wqe(qp
, i
, 1 << qp
->sq
.wqe_shift
);
1109 err
= mlx4_qp_modify(dev
->dev
, &qp
->mtt
, to_mlx4_state(cur_state
),
1110 to_mlx4_state(new_state
), context
, optpar
,
1111 sqd_event
, &qp
->mqp
);
1115 qp
->state
= new_state
;
1117 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1118 qp
->atomic_rd_en
= attr
->qp_access_flags
;
1119 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1120 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
1121 if (attr_mask
& IB_QP_PORT
)
1122 qp
->port
= attr
->port_num
;
1123 if (attr_mask
& IB_QP_ALT_PATH
)
1124 qp
->alt_port
= attr
->alt_port_num
;
1126 if (is_sqp(dev
, qp
))
1127 store_sqp_attrs(to_msqp(qp
), attr
, attr_mask
);
1130 * If we moved QP0 to RTR, bring the IB link up; if we moved
1131 * QP0 to RESET or ERROR, bring the link back down.
1133 if (is_qp0(dev
, qp
)) {
1134 if (cur_state
!= IB_QPS_RTR
&& new_state
== IB_QPS_RTR
)
1135 if (mlx4_INIT_PORT(dev
->dev
, qp
->port
))
1136 printk(KERN_WARNING
"INIT_PORT failed for port %d\n",
1139 if (cur_state
!= IB_QPS_RESET
&& cur_state
!= IB_QPS_ERR
&&
1140 (new_state
== IB_QPS_RESET
|| new_state
== IB_QPS_ERR
))
1141 mlx4_CLOSE_PORT(dev
->dev
, qp
->port
);
1145 * If we moved a kernel QP to RESET, clean up all old CQ
1146 * entries and reinitialize the QP.
1148 if (new_state
== IB_QPS_RESET
&& !ibqp
->uobject
) {
1149 mlx4_ib_cq_clean(to_mcq(ibqp
->recv_cq
), qp
->mqp
.qpn
,
1150 ibqp
->srq
? to_msrq(ibqp
->srq
): NULL
);
1151 if (ibqp
->send_cq
!= ibqp
->recv_cq
)
1152 mlx4_ib_cq_clean(to_mcq(ibqp
->send_cq
), qp
->mqp
.qpn
, NULL
);
1158 qp
->sq_next_wqe
= 0;
1168 int mlx4_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1169 int attr_mask
, struct ib_udata
*udata
)
1171 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
1172 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
1173 enum ib_qp_state cur_state
, new_state
;
1176 mutex_lock(&qp
->mutex
);
1178 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
1179 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1181 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
, attr_mask
))
1184 if ((attr_mask
& IB_QP_PORT
) &&
1185 (attr
->port_num
== 0 || attr
->port_num
> dev
->dev
->caps
.num_ports
)) {
1189 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1190 int p
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
1191 if (attr
->pkey_index
>= dev
->dev
->caps
.pkey_table_len
[p
])
1195 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
1196 attr
->max_rd_atomic
> dev
->dev
->caps
.max_qp_init_rdma
) {
1200 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
1201 attr
->max_dest_rd_atomic
> dev
->dev
->caps
.max_qp_dest_rdma
) {
1205 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
1210 err
= __mlx4_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
);
1213 mutex_unlock(&qp
->mutex
);
1217 static int build_mlx_header(struct mlx4_ib_sqp
*sqp
, struct ib_send_wr
*wr
,
1218 void *wqe
, unsigned *mlx_seg_len
)
1220 struct ib_device
*ib_dev
= sqp
->qp
.ibqp
.device
;
1221 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
1222 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
1223 struct mlx4_ib_ah
*ah
= to_mah(wr
->wr
.ud
.ah
);
1231 for (i
= 0; i
< wr
->num_sge
; ++i
)
1232 send_size
+= wr
->sg_list
[i
].length
;
1234 ib_ud_header_init(send_size
, 1, 0, mlx4_ib_ah_grh_present(ah
), 0, &sqp
->ud_header
);
1236 sqp
->ud_header
.lrh
.service_level
=
1237 be32_to_cpu(ah
->av
.sl_tclass_flowlabel
) >> 28;
1238 sqp
->ud_header
.lrh
.destination_lid
= ah
->av
.dlid
;
1239 sqp
->ud_header
.lrh
.source_lid
= cpu_to_be16(ah
->av
.g_slid
& 0x7f);
1240 if (mlx4_ib_ah_grh_present(ah
)) {
1241 sqp
->ud_header
.grh
.traffic_class
=
1242 (be32_to_cpu(ah
->av
.sl_tclass_flowlabel
) >> 20) & 0xff;
1243 sqp
->ud_header
.grh
.flow_label
=
1244 ah
->av
.sl_tclass_flowlabel
& cpu_to_be32(0xfffff);
1245 sqp
->ud_header
.grh
.hop_limit
= ah
->av
.hop_limit
;
1246 ib_get_cached_gid(ib_dev
, be32_to_cpu(ah
->av
.port_pd
) >> 24,
1247 ah
->av
.gid_index
, &sqp
->ud_header
.grh
.source_gid
);
1248 memcpy(sqp
->ud_header
.grh
.destination_gid
.raw
,
1252 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
1253 mlx
->flags
|= cpu_to_be32((!sqp
->qp
.ibqp
.qp_num
? MLX4_WQE_MLX_VL15
: 0) |
1254 (sqp
->ud_header
.lrh
.destination_lid
==
1255 IB_LID_PERMISSIVE
? MLX4_WQE_MLX_SLR
: 0) |
1256 (sqp
->ud_header
.lrh
.service_level
<< 8));
1257 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
1259 switch (wr
->opcode
) {
1261 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
1262 sqp
->ud_header
.immediate_present
= 0;
1264 case IB_WR_SEND_WITH_IMM
:
1265 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
1266 sqp
->ud_header
.immediate_present
= 1;
1267 sqp
->ud_header
.immediate_data
= wr
->ex
.imm_data
;
1273 sqp
->ud_header
.lrh
.virtual_lane
= !sqp
->qp
.ibqp
.qp_num
? 15 : 0;
1274 if (sqp
->ud_header
.lrh
.destination_lid
== IB_LID_PERMISSIVE
)
1275 sqp
->ud_header
.lrh
.source_lid
= IB_LID_PERMISSIVE
;
1276 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->send_flags
& IB_SEND_SOLICITED
);
1277 if (!sqp
->qp
.ibqp
.qp_num
)
1278 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, sqp
->pkey_index
, &pkey
);
1280 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, wr
->wr
.ud
.pkey_index
, &pkey
);
1281 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
1282 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1283 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
1284 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
& 0x80000000 ?
1285 sqp
->qkey
: wr
->wr
.ud
.remote_qkey
);
1286 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.ibqp
.qp_num
);
1288 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
1291 printk(KERN_ERR
"built UD header of size %d:\n", header_size
);
1292 for (i
= 0; i
< header_size
/ 4; ++i
) {
1294 printk(" [%02x] ", i
* 4);
1296 be32_to_cpu(((__be32
*) sqp
->header_buf
)[i
]));
1297 if ((i
+ 1) % 8 == 0)
1304 * Inline data segments may not cross a 64 byte boundary. If
1305 * our UD header is bigger than the space available up to the
1306 * next 64 byte boundary in the WQE, use two inline data
1307 * segments to hold the UD header.
1309 spc
= MLX4_INLINE_ALIGN
-
1310 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
1311 if (header_size
<= spc
) {
1312 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
1313 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
1316 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
1317 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
1319 inl
= (void *) (inl
+ 1) + spc
;
1320 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
1322 * Need a barrier here to make sure all the data is
1323 * visible before the byte_count field is set.
1324 * Otherwise the HCA prefetcher could grab the 64-byte
1325 * chunk with this inline segment and get a valid (!=
1326 * 0xffffffff) byte count but stale data, and end up
1327 * generating a packet with bad headers.
1329 * The first inline segment's byte_count field doesn't
1330 * need a barrier, because it comes after a
1331 * control/MLX segment and therefore is at an offset
1335 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
1340 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
1344 static int mlx4_wq_overflow(struct mlx4_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
1347 struct mlx4_ib_cq
*cq
;
1349 cur
= wq
->head
- wq
->tail
;
1350 if (likely(cur
+ nreq
< wq
->max_post
))
1354 spin_lock(&cq
->lock
);
1355 cur
= wq
->head
- wq
->tail
;
1356 spin_unlock(&cq
->lock
);
1358 return cur
+ nreq
>= wq
->max_post
;
1361 static __be32
convert_access(int acc
)
1363 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? cpu_to_be32(MLX4_WQE_FMR_PERM_ATOMIC
) : 0) |
1364 (acc
& IB_ACCESS_REMOTE_WRITE
? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_WRITE
) : 0) |
1365 (acc
& IB_ACCESS_REMOTE_READ
? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_READ
) : 0) |
1366 (acc
& IB_ACCESS_LOCAL_WRITE
? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE
) : 0) |
1367 cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ
);
1370 static void set_fmr_seg(struct mlx4_wqe_fmr_seg
*fseg
, struct ib_send_wr
*wr
)
1372 struct mlx4_ib_fast_reg_page_list
*mfrpl
= to_mfrpl(wr
->wr
.fast_reg
.page_list
);
1375 for (i
= 0; i
< wr
->wr
.fast_reg
.page_list_len
; ++i
)
1376 mfrpl
->mapped_page_list
[i
] =
1377 cpu_to_be64(wr
->wr
.fast_reg
.page_list
->page_list
[i
] |
1378 MLX4_MTT_FLAG_PRESENT
);
1380 fseg
->flags
= convert_access(wr
->wr
.fast_reg
.access_flags
);
1381 fseg
->mem_key
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
1382 fseg
->buf_list
= cpu_to_be64(mfrpl
->map
);
1383 fseg
->start_addr
= cpu_to_be64(wr
->wr
.fast_reg
.iova_start
);
1384 fseg
->reg_len
= cpu_to_be64(wr
->wr
.fast_reg
.length
);
1385 fseg
->offset
= 0; /* XXX -- is this just for ZBVA? */
1386 fseg
->page_size
= cpu_to_be32(wr
->wr
.fast_reg
.page_shift
);
1387 fseg
->reserved
[0] = 0;
1388 fseg
->reserved
[1] = 0;
1391 static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg
*iseg
, u32 rkey
)
1394 iseg
->mem_key
= cpu_to_be32(rkey
);
1399 static __always_inline
void set_raddr_seg(struct mlx4_wqe_raddr_seg
*rseg
,
1400 u64 remote_addr
, u32 rkey
)
1402 rseg
->raddr
= cpu_to_be64(remote_addr
);
1403 rseg
->rkey
= cpu_to_be32(rkey
);
1407 static void set_atomic_seg(struct mlx4_wqe_atomic_seg
*aseg
, struct ib_send_wr
*wr
)
1409 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
1410 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.swap
);
1411 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1412 } else if (wr
->opcode
== IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
) {
1413 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1414 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add_mask
);
1416 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1422 static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg
*aseg
,
1423 struct ib_send_wr
*wr
)
1425 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.swap
);
1426 aseg
->swap_add_mask
= cpu_to_be64(wr
->wr
.atomic
.swap_mask
);
1427 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1428 aseg
->compare_mask
= cpu_to_be64(wr
->wr
.atomic
.compare_add_mask
);
1431 static void set_datagram_seg(struct mlx4_wqe_datagram_seg
*dseg
,
1432 struct ib_send_wr
*wr
)
1434 memcpy(dseg
->av
, &to_mah(wr
->wr
.ud
.ah
)->av
, sizeof (struct mlx4_av
));
1435 dseg
->dqpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1436 dseg
->qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
1439 static void set_mlx_icrc_seg(void *dseg
)
1442 struct mlx4_wqe_inline_seg
*iseg
= dseg
;
1447 * Need a barrier here before writing the byte_count field to
1448 * make sure that all the data is visible before the
1449 * byte_count field is set. Otherwise, if the segment begins
1450 * a new cacheline, the HCA prefetcher could grab the 64-byte
1451 * chunk and get a valid (!= * 0xffffffff) byte count but
1452 * stale data, and end up sending the wrong data.
1456 iseg
->byte_count
= cpu_to_be32((1 << 31) | 4);
1459 static void set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
1461 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
1462 dseg
->addr
= cpu_to_be64(sg
->addr
);
1465 * Need a barrier here before writing the byte_count field to
1466 * make sure that all the data is visible before the
1467 * byte_count field is set. Otherwise, if the segment begins
1468 * a new cacheline, the HCA prefetcher could grab the 64-byte
1469 * chunk and get a valid (!= * 0xffffffff) byte count but
1470 * stale data, and end up sending the wrong data.
1474 dseg
->byte_count
= cpu_to_be32(sg
->length
);
1477 static void __set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
1479 dseg
->byte_count
= cpu_to_be32(sg
->length
);
1480 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
1481 dseg
->addr
= cpu_to_be64(sg
->addr
);
1484 static int build_lso_seg(struct mlx4_wqe_lso_seg
*wqe
, struct ib_send_wr
*wr
,
1485 struct mlx4_ib_qp
*qp
, unsigned *lso_seg_len
,
1486 __be32
*lso_hdr_sz
, __be32
*blh
)
1488 unsigned halign
= ALIGN(sizeof *wqe
+ wr
->wr
.ud
.hlen
, 16);
1490 if (unlikely(halign
> MLX4_IB_CACHE_LINE_SIZE
))
1491 *blh
= cpu_to_be32(1 << 6);
1493 if (unlikely(!(qp
->flags
& MLX4_IB_QP_LSO
) &&
1494 wr
->num_sge
> qp
->sq
.max_gs
- (halign
>> 4)))
1497 memcpy(wqe
->header
, wr
->wr
.ud
.header
, wr
->wr
.ud
.hlen
);
1499 *lso_hdr_sz
= cpu_to_be32((wr
->wr
.ud
.mss
- wr
->wr
.ud
.hlen
) << 16 |
1501 *lso_seg_len
= halign
;
1505 static __be32
send_ieth(struct ib_send_wr
*wr
)
1507 switch (wr
->opcode
) {
1508 case IB_WR_SEND_WITH_IMM
:
1509 case IB_WR_RDMA_WRITE_WITH_IMM
:
1510 return wr
->ex
.imm_data
;
1512 case IB_WR_SEND_WITH_INV
:
1513 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
1520 int mlx4_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
1521 struct ib_send_wr
**bad_wr
)
1523 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
1525 struct mlx4_wqe_ctrl_seg
*ctrl
;
1526 struct mlx4_wqe_data_seg
*dseg
;
1527 unsigned long flags
;
1531 int uninitialized_var(stamp
);
1532 int uninitialized_var(size
);
1533 unsigned uninitialized_var(seglen
);
1536 __be32
uninitialized_var(lso_hdr_sz
);
1540 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
1542 ind
= qp
->sq_next_wqe
;
1544 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1548 if (mlx4_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
1554 if (unlikely(wr
->num_sge
> qp
->sq
.max_gs
)) {
1560 ctrl
= wqe
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
1561 qp
->sq
.wrid
[(qp
->sq
.head
+ nreq
) & (qp
->sq
.wqe_cnt
- 1)] = wr
->wr_id
;
1564 (wr
->send_flags
& IB_SEND_SIGNALED
?
1565 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) : 0) |
1566 (wr
->send_flags
& IB_SEND_SOLICITED
?
1567 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED
) : 0) |
1568 ((wr
->send_flags
& IB_SEND_IP_CSUM
) ?
1569 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM
|
1570 MLX4_WQE_CTRL_TCP_UDP_CSUM
) : 0) |
1573 ctrl
->imm
= send_ieth(wr
);
1575 wqe
+= sizeof *ctrl
;
1576 size
= sizeof *ctrl
/ 16;
1578 switch (ibqp
->qp_type
) {
1581 switch (wr
->opcode
) {
1582 case IB_WR_ATOMIC_CMP_AND_SWP
:
1583 case IB_WR_ATOMIC_FETCH_AND_ADD
:
1584 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
:
1585 set_raddr_seg(wqe
, wr
->wr
.atomic
.remote_addr
,
1586 wr
->wr
.atomic
.rkey
);
1587 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
1589 set_atomic_seg(wqe
, wr
);
1590 wqe
+= sizeof (struct mlx4_wqe_atomic_seg
);
1592 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
1593 sizeof (struct mlx4_wqe_atomic_seg
)) / 16;
1597 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
1598 set_raddr_seg(wqe
, wr
->wr
.atomic
.remote_addr
,
1599 wr
->wr
.atomic
.rkey
);
1600 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
1602 set_masked_atomic_seg(wqe
, wr
);
1603 wqe
+= sizeof (struct mlx4_wqe_masked_atomic_seg
);
1605 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
1606 sizeof (struct mlx4_wqe_masked_atomic_seg
)) / 16;
1610 case IB_WR_RDMA_READ
:
1611 case IB_WR_RDMA_WRITE
:
1612 case IB_WR_RDMA_WRITE_WITH_IMM
:
1613 set_raddr_seg(wqe
, wr
->wr
.rdma
.remote_addr
,
1615 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
1616 size
+= sizeof (struct mlx4_wqe_raddr_seg
) / 16;
1619 case IB_WR_LOCAL_INV
:
1620 ctrl
->srcrb_flags
|=
1621 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
1622 set_local_inv_seg(wqe
, wr
->ex
.invalidate_rkey
);
1623 wqe
+= sizeof (struct mlx4_wqe_local_inval_seg
);
1624 size
+= sizeof (struct mlx4_wqe_local_inval_seg
) / 16;
1627 case IB_WR_FAST_REG_MR
:
1628 ctrl
->srcrb_flags
|=
1629 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
1630 set_fmr_seg(wqe
, wr
);
1631 wqe
+= sizeof (struct mlx4_wqe_fmr_seg
);
1632 size
+= sizeof (struct mlx4_wqe_fmr_seg
) / 16;
1636 /* No extra segments required for sends */
1642 set_datagram_seg(wqe
, wr
);
1643 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
1644 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
1646 if (wr
->opcode
== IB_WR_LSO
) {
1647 err
= build_lso_seg(wqe
, wr
, qp
, &seglen
, &lso_hdr_sz
, &blh
);
1648 if (unlikely(err
)) {
1652 lso_wqe
= (__be32
*) wqe
;
1654 size
+= seglen
/ 16;
1660 err
= build_mlx_header(to_msqp(qp
), wr
, ctrl
, &seglen
);
1661 if (unlikely(err
)) {
1666 size
+= seglen
/ 16;
1674 * Write data segments in reverse order, so as to
1675 * overwrite cacheline stamp last within each
1676 * cacheline. This avoids issues with WQE
1681 dseg
+= wr
->num_sge
- 1;
1682 size
+= wr
->num_sge
* (sizeof (struct mlx4_wqe_data_seg
) / 16);
1684 /* Add one more inline data segment for ICRC for MLX sends */
1685 if (unlikely(qp
->ibqp
.qp_type
== IB_QPT_SMI
||
1686 qp
->ibqp
.qp_type
== IB_QPT_GSI
)) {
1687 set_mlx_icrc_seg(dseg
+ 1);
1688 size
+= sizeof (struct mlx4_wqe_data_seg
) / 16;
1691 for (i
= wr
->num_sge
- 1; i
>= 0; --i
, --dseg
)
1692 set_data_seg(dseg
, wr
->sg_list
+ i
);
1695 * Possibly overwrite stamping in cacheline with LSO
1696 * segment only after making sure all data segments
1700 *lso_wqe
= lso_hdr_sz
;
1702 ctrl
->fence_size
= (wr
->send_flags
& IB_SEND_FENCE
?
1703 MLX4_WQE_CTRL_FENCE
: 0) | size
;
1706 * Make sure descriptor is fully written before
1707 * setting ownership bit (because HW can start
1708 * executing as soon as we do).
1712 if (wr
->opcode
< 0 || wr
->opcode
>= ARRAY_SIZE(mlx4_ib_opcode
)) {
1717 ctrl
->owner_opcode
= mlx4_ib_opcode
[wr
->opcode
] |
1718 (ind
& qp
->sq
.wqe_cnt
? cpu_to_be32(1 << 31) : 0) | blh
;
1720 stamp
= ind
+ qp
->sq_spare_wqes
;
1721 ind
+= DIV_ROUND_UP(size
* 16, 1U << qp
->sq
.wqe_shift
);
1724 * We can improve latency by not stamping the last
1725 * send queue WQE until after ringing the doorbell, so
1726 * only stamp here if there are still more WQEs to post.
1728 * Same optimization applies to padding with NOP wqe
1729 * in case of WQE shrinking (used to prevent wrap-around
1730 * in the middle of WR).
1733 stamp_send_wqe(qp
, stamp
, size
* 16);
1734 ind
= pad_wraparound(qp
, ind
);
1740 qp
->sq
.head
+= nreq
;
1743 * Make sure that descriptors are written before
1748 writel(qp
->doorbell_qpn
,
1749 to_mdev(ibqp
->device
)->uar_map
+ MLX4_SEND_DOORBELL
);
1752 * Make sure doorbells don't leak out of SQ spinlock
1753 * and reach the HCA out of order.
1757 stamp_send_wqe(qp
, stamp
, size
* 16);
1759 ind
= pad_wraparound(qp
, ind
);
1760 qp
->sq_next_wqe
= ind
;
1763 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
1768 int mlx4_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
1769 struct ib_recv_wr
**bad_wr
)
1771 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
1772 struct mlx4_wqe_data_seg
*scat
;
1773 unsigned long flags
;
1779 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
1781 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
1783 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1784 if (mlx4_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
1790 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
1796 scat
= get_recv_wqe(qp
, ind
);
1798 for (i
= 0; i
< wr
->num_sge
; ++i
)
1799 __set_data_seg(scat
+ i
, wr
->sg_list
+ i
);
1801 if (i
< qp
->rq
.max_gs
) {
1802 scat
[i
].byte_count
= 0;
1803 scat
[i
].lkey
= cpu_to_be32(MLX4_INVALID_LKEY
);
1807 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
1809 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
1814 qp
->rq
.head
+= nreq
;
1817 * Make sure that descriptors are written before
1822 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
1825 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
1830 static inline enum ib_qp_state
to_ib_qp_state(enum mlx4_qp_state mlx4_state
)
1832 switch (mlx4_state
) {
1833 case MLX4_QP_STATE_RST
: return IB_QPS_RESET
;
1834 case MLX4_QP_STATE_INIT
: return IB_QPS_INIT
;
1835 case MLX4_QP_STATE_RTR
: return IB_QPS_RTR
;
1836 case MLX4_QP_STATE_RTS
: return IB_QPS_RTS
;
1837 case MLX4_QP_STATE_SQ_DRAINING
:
1838 case MLX4_QP_STATE_SQD
: return IB_QPS_SQD
;
1839 case MLX4_QP_STATE_SQER
: return IB_QPS_SQE
;
1840 case MLX4_QP_STATE_ERR
: return IB_QPS_ERR
;
1845 static inline enum ib_mig_state
to_ib_mig_state(int mlx4_mig_state
)
1847 switch (mlx4_mig_state
) {
1848 case MLX4_QP_PM_ARMED
: return IB_MIG_ARMED
;
1849 case MLX4_QP_PM_REARM
: return IB_MIG_REARM
;
1850 case MLX4_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
1855 static int to_ib_qp_access_flags(int mlx4_flags
)
1859 if (mlx4_flags
& MLX4_QP_BIT_RRE
)
1860 ib_flags
|= IB_ACCESS_REMOTE_READ
;
1861 if (mlx4_flags
& MLX4_QP_BIT_RWE
)
1862 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
1863 if (mlx4_flags
& MLX4_QP_BIT_RAE
)
1864 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
1869 static void to_ib_ah_attr(struct mlx4_dev
*dev
, struct ib_ah_attr
*ib_ah_attr
,
1870 struct mlx4_qp_path
*path
)
1872 memset(ib_ah_attr
, 0, sizeof *ib_ah_attr
);
1873 ib_ah_attr
->port_num
= path
->sched_queue
& 0x40 ? 2 : 1;
1875 if (ib_ah_attr
->port_num
== 0 || ib_ah_attr
->port_num
> dev
->caps
.num_ports
)
1878 ib_ah_attr
->dlid
= be16_to_cpu(path
->rlid
);
1879 ib_ah_attr
->sl
= (path
->sched_queue
>> 2) & 0xf;
1880 ib_ah_attr
->src_path_bits
= path
->grh_mylmc
& 0x7f;
1881 ib_ah_attr
->static_rate
= path
->static_rate
? path
->static_rate
- 5 : 0;
1882 ib_ah_attr
->ah_flags
= (path
->grh_mylmc
& (1 << 7)) ? IB_AH_GRH
: 0;
1883 if (ib_ah_attr
->ah_flags
) {
1884 ib_ah_attr
->grh
.sgid_index
= path
->mgid_index
;
1885 ib_ah_attr
->grh
.hop_limit
= path
->hop_limit
;
1886 ib_ah_attr
->grh
.traffic_class
=
1887 (be32_to_cpu(path
->tclass_flowlabel
) >> 20) & 0xff;
1888 ib_ah_attr
->grh
.flow_label
=
1889 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff;
1890 memcpy(ib_ah_attr
->grh
.dgid
.raw
,
1891 path
->rgid
, sizeof ib_ah_attr
->grh
.dgid
.raw
);
1895 int mlx4_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
1896 struct ib_qp_init_attr
*qp_init_attr
)
1898 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
1899 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
1900 struct mlx4_qp_context context
;
1904 mutex_lock(&qp
->mutex
);
1906 if (qp
->state
== IB_QPS_RESET
) {
1907 qp_attr
->qp_state
= IB_QPS_RESET
;
1911 err
= mlx4_qp_query(dev
->dev
, &qp
->mqp
, &context
);
1917 mlx4_state
= be32_to_cpu(context
.flags
) >> 28;
1919 qp
->state
= to_ib_qp_state(mlx4_state
);
1920 qp_attr
->qp_state
= qp
->state
;
1921 qp_attr
->path_mtu
= context
.mtu_msgmax
>> 5;
1922 qp_attr
->path_mig_state
=
1923 to_ib_mig_state((be32_to_cpu(context
.flags
) >> 11) & 0x3);
1924 qp_attr
->qkey
= be32_to_cpu(context
.qkey
);
1925 qp_attr
->rq_psn
= be32_to_cpu(context
.rnr_nextrecvpsn
) & 0xffffff;
1926 qp_attr
->sq_psn
= be32_to_cpu(context
.next_send_psn
) & 0xffffff;
1927 qp_attr
->dest_qp_num
= be32_to_cpu(context
.remote_qpn
) & 0xffffff;
1928 qp_attr
->qp_access_flags
=
1929 to_ib_qp_access_flags(be32_to_cpu(context
.params2
));
1931 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
1932 to_ib_ah_attr(dev
->dev
, &qp_attr
->ah_attr
, &context
.pri_path
);
1933 to_ib_ah_attr(dev
->dev
, &qp_attr
->alt_ah_attr
, &context
.alt_path
);
1934 qp_attr
->alt_pkey_index
= context
.alt_path
.pkey_index
& 0x7f;
1935 qp_attr
->alt_port_num
= qp_attr
->alt_ah_attr
.port_num
;
1938 qp_attr
->pkey_index
= context
.pri_path
.pkey_index
& 0x7f;
1939 if (qp_attr
->qp_state
== IB_QPS_INIT
)
1940 qp_attr
->port_num
= qp
->port
;
1942 qp_attr
->port_num
= context
.pri_path
.sched_queue
& 0x40 ? 2 : 1;
1944 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
1945 qp_attr
->sq_draining
= mlx4_state
== MLX4_QP_STATE_SQ_DRAINING
;
1947 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
.params1
) >> 21) & 0x7);
1949 qp_attr
->max_dest_rd_atomic
=
1950 1 << ((be32_to_cpu(context
.params2
) >> 21) & 0x7);
1951 qp_attr
->min_rnr_timer
=
1952 (be32_to_cpu(context
.rnr_nextrecvpsn
) >> 24) & 0x1f;
1953 qp_attr
->timeout
= context
.pri_path
.ackto
>> 3;
1954 qp_attr
->retry_cnt
= (be32_to_cpu(context
.params1
) >> 16) & 0x7;
1955 qp_attr
->rnr_retry
= (be32_to_cpu(context
.params1
) >> 13) & 0x7;
1956 qp_attr
->alt_timeout
= context
.alt_path
.ackto
>> 3;
1959 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
1960 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
1961 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
1963 if (!ibqp
->uobject
) {
1964 qp_attr
->cap
.max_send_wr
= qp
->sq
.wqe_cnt
;
1965 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
1967 qp_attr
->cap
.max_send_wr
= 0;
1968 qp_attr
->cap
.max_send_sge
= 0;
1972 * We don't support inline sends for kernel QPs (yet), and we
1973 * don't know what userspace's value should be.
1975 qp_attr
->cap
.max_inline_data
= 0;
1977 qp_init_attr
->cap
= qp_attr
->cap
;
1979 qp_init_attr
->create_flags
= 0;
1980 if (qp
->flags
& MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
1981 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
1983 if (qp
->flags
& MLX4_IB_QP_LSO
)
1984 qp_init_attr
->create_flags
|= IB_QP_CREATE_IPOIB_UD_LSO
;
1987 mutex_unlock(&qp
->mutex
);