2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/log2.h>
35 #include <linux/slab.h>
36 #include <linux/netdevice.h>
38 #include <rdma/ib_cache.h>
39 #include <rdma/ib_pack.h>
40 #include <rdma/ib_addr.h>
41 #include <rdma/ib_mad.h>
43 #include <linux/mlx4/qp.h>
49 MLX4_IB_ACK_REQ_FREQ
= 8,
53 MLX4_IB_DEFAULT_SCHED_QUEUE
= 0x83,
54 MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
55 MLX4_IB_LINK_TYPE_IB
= 0,
56 MLX4_IB_LINK_TYPE_ETH
= 1
61 * Largest possible UD header: send with GRH and immediate
62 * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
63 * tag. (LRH would only use 8 bytes, so Ethernet is the
66 MLX4_IB_UD_HEADER_SIZE
= 82,
67 MLX4_IB_LSO_HEADER_SPARE
= 128,
71 MLX4_IB_IBOE_ETHERTYPE
= 0x8915
79 struct ib_ud_header ud_header
;
80 u8 header_buf
[MLX4_IB_UD_HEADER_SIZE
];
84 MLX4_IB_MIN_SQ_STRIDE
= 6,
85 MLX4_IB_CACHE_LINE_SIZE
= 64,
90 MLX4_RAW_QP_MSGMAX
= 31,
96 static inline u64
mlx4_mac_to_u64(u8
*addr
)
101 for (i
= 0; i
< ETH_ALEN
; i
++) {
108 static const __be32 mlx4_ib_opcode
[] = {
109 [IB_WR_SEND
] = cpu_to_be32(MLX4_OPCODE_SEND
),
110 [IB_WR_LSO
] = cpu_to_be32(MLX4_OPCODE_LSO
),
111 [IB_WR_SEND_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_SEND_IMM
),
112 [IB_WR_RDMA_WRITE
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE
),
113 [IB_WR_RDMA_WRITE_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM
),
114 [IB_WR_RDMA_READ
] = cpu_to_be32(MLX4_OPCODE_RDMA_READ
),
115 [IB_WR_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS
),
116 [IB_WR_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA
),
117 [IB_WR_SEND_WITH_INV
] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL
),
118 [IB_WR_LOCAL_INV
] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL
),
119 [IB_WR_FAST_REG_MR
] = cpu_to_be32(MLX4_OPCODE_FMR
),
120 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS
),
121 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA
),
122 [IB_WR_BIND_MW
] = cpu_to_be32(MLX4_OPCODE_BIND_MW
),
125 static struct mlx4_ib_sqp
*to_msqp(struct mlx4_ib_qp
*mqp
)
127 return container_of(mqp
, struct mlx4_ib_sqp
, qp
);
130 static int is_tunnel_qp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
132 if (!mlx4_is_master(dev
->dev
))
135 return qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_tunnel_sqpn
&&
136 qp
->mqp
.qpn
< dev
->dev
->phys_caps
.base_tunnel_sqpn
+
140 static int is_sqp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
145 /* PPF or Native -- real SQP */
146 real_sqp
= ((mlx4_is_master(dev
->dev
) || !mlx4_is_mfunc(dev
->dev
)) &&
147 qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_sqpn
&&
148 qp
->mqp
.qpn
<= dev
->dev
->phys_caps
.base_sqpn
+ 3);
151 /* VF or PF -- proxy SQP */
152 if (mlx4_is_mfunc(dev
->dev
)) {
153 for (i
= 0; i
< dev
->dev
->caps
.num_ports
; i
++) {
154 if (qp
->mqp
.qpn
== dev
->dev
->caps
.qp0_proxy
[i
] ||
155 qp
->mqp
.qpn
== dev
->dev
->caps
.qp1_proxy
[i
]) {
164 /* used for INIT/CLOSE port logic */
165 static int is_qp0(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
170 /* PPF or Native -- real QP0 */
171 real_qp0
= ((mlx4_is_master(dev
->dev
) || !mlx4_is_mfunc(dev
->dev
)) &&
172 qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_sqpn
&&
173 qp
->mqp
.qpn
<= dev
->dev
->phys_caps
.base_sqpn
+ 1);
176 /* VF or PF -- proxy QP0 */
177 if (mlx4_is_mfunc(dev
->dev
)) {
178 for (i
= 0; i
< dev
->dev
->caps
.num_ports
; i
++) {
179 if (qp
->mqp
.qpn
== dev
->dev
->caps
.qp0_proxy
[i
]) {
188 static void *get_wqe(struct mlx4_ib_qp
*qp
, int offset
)
190 return mlx4_buf_offset(&qp
->buf
, offset
);
193 static void *get_recv_wqe(struct mlx4_ib_qp
*qp
, int n
)
195 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
198 static void *get_send_wqe(struct mlx4_ib_qp
*qp
, int n
)
200 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< qp
->sq
.wqe_shift
));
204 * Stamp a SQ WQE so that it is invalid if prefetched by marking the
205 * first four bytes of every 64 byte chunk with
206 * 0x7FFFFFF | (invalid_ownership_value << 31).
208 * When the max work request size is less than or equal to the WQE
209 * basic block size, as an optimization, we can stamp all WQEs with
210 * 0xffffffff, and skip the very first chunk of each WQE.
212 static void stamp_send_wqe(struct mlx4_ib_qp
*qp
, int n
, int size
)
220 struct mlx4_wqe_ctrl_seg
*ctrl
;
222 if (qp
->sq_max_wqes_per_wr
> 1) {
223 s
= roundup(size
, 1U << qp
->sq
.wqe_shift
);
224 for (i
= 0; i
< s
; i
+= 64) {
225 ind
= (i
>> qp
->sq
.wqe_shift
) + n
;
226 stamp
= ind
& qp
->sq
.wqe_cnt
? cpu_to_be32(0x7fffffff) :
227 cpu_to_be32(0xffffffff);
228 buf
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
229 wqe
= buf
+ (i
& ((1 << qp
->sq
.wqe_shift
) - 1));
233 ctrl
= buf
= get_send_wqe(qp
, n
& (qp
->sq
.wqe_cnt
- 1));
234 s
= (ctrl
->fence_size
& 0x3f) << 4;
235 for (i
= 64; i
< s
; i
+= 64) {
237 *wqe
= cpu_to_be32(0xffffffff);
242 static void post_nop_wqe(struct mlx4_ib_qp
*qp
, int n
, int size
)
244 struct mlx4_wqe_ctrl_seg
*ctrl
;
245 struct mlx4_wqe_inline_seg
*inl
;
249 ctrl
= wqe
= get_send_wqe(qp
, n
& (qp
->sq
.wqe_cnt
- 1));
250 s
= sizeof(struct mlx4_wqe_ctrl_seg
);
252 if (qp
->ibqp
.qp_type
== IB_QPT_UD
) {
253 struct mlx4_wqe_datagram_seg
*dgram
= wqe
+ sizeof *ctrl
;
254 struct mlx4_av
*av
= (struct mlx4_av
*)dgram
->av
;
255 memset(dgram
, 0, sizeof *dgram
);
256 av
->port_pd
= cpu_to_be32((qp
->port
<< 24) | to_mpd(qp
->ibqp
.pd
)->pdn
);
257 s
+= sizeof(struct mlx4_wqe_datagram_seg
);
260 /* Pad the remainder of the WQE with an inline data segment. */
263 inl
->byte_count
= cpu_to_be32(1 << 31 | (size
- s
- sizeof *inl
));
265 ctrl
->srcrb_flags
= 0;
266 ctrl
->fence_size
= size
/ 16;
268 * Make sure descriptor is fully written before setting ownership bit
269 * (because HW can start executing as soon as we do).
273 ctrl
->owner_opcode
= cpu_to_be32(MLX4_OPCODE_NOP
| MLX4_WQE_CTRL_NEC
) |
274 (n
& qp
->sq
.wqe_cnt
? cpu_to_be32(1 << 31) : 0);
276 stamp_send_wqe(qp
, n
+ qp
->sq_spare_wqes
, size
);
279 /* Post NOP WQE to prevent wrap-around in the middle of WR */
280 static inline unsigned pad_wraparound(struct mlx4_ib_qp
*qp
, int ind
)
282 unsigned s
= qp
->sq
.wqe_cnt
- (ind
& (qp
->sq
.wqe_cnt
- 1));
283 if (unlikely(s
< qp
->sq_max_wqes_per_wr
)) {
284 post_nop_wqe(qp
, ind
, s
<< qp
->sq
.wqe_shift
);
290 static void mlx4_ib_qp_event(struct mlx4_qp
*qp
, enum mlx4_event type
)
292 struct ib_event event
;
293 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
295 if (type
== MLX4_EVENT_TYPE_PATH_MIG
)
296 to_mibqp(qp
)->port
= to_mibqp(qp
)->alt_port
;
298 if (ibqp
->event_handler
) {
299 event
.device
= ibqp
->device
;
300 event
.element
.qp
= ibqp
;
302 case MLX4_EVENT_TYPE_PATH_MIG
:
303 event
.event
= IB_EVENT_PATH_MIG
;
305 case MLX4_EVENT_TYPE_COMM_EST
:
306 event
.event
= IB_EVENT_COMM_EST
;
308 case MLX4_EVENT_TYPE_SQ_DRAINED
:
309 event
.event
= IB_EVENT_SQ_DRAINED
;
311 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE
:
312 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
314 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR
:
315 event
.event
= IB_EVENT_QP_FATAL
;
317 case MLX4_EVENT_TYPE_PATH_MIG_FAILED
:
318 event
.event
= IB_EVENT_PATH_MIG_ERR
;
320 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
321 event
.event
= IB_EVENT_QP_REQ_ERR
;
323 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR
:
324 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
327 pr_warn("Unexpected event type %d "
328 "on QP %06x\n", type
, qp
->qpn
);
332 ibqp
->event_handler(&event
, ibqp
->qp_context
);
336 static int send_wqe_overhead(enum mlx4_ib_qp_type type
, u32 flags
)
339 * UD WQEs must have a datagram segment.
340 * RC and UC WQEs might have a remote address segment.
341 * MLX WQEs need two extra inline data segments (for the UD
342 * header and space for the ICRC).
346 return sizeof (struct mlx4_wqe_ctrl_seg
) +
347 sizeof (struct mlx4_wqe_datagram_seg
) +
348 ((flags
& MLX4_IB_QP_LSO
) ? MLX4_IB_LSO_HEADER_SPARE
: 0);
349 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
350 case MLX4_IB_QPT_PROXY_SMI
:
351 case MLX4_IB_QPT_PROXY_GSI
:
352 return sizeof (struct mlx4_wqe_ctrl_seg
) +
353 sizeof (struct mlx4_wqe_datagram_seg
) + 64;
354 case MLX4_IB_QPT_TUN_SMI_OWNER
:
355 case MLX4_IB_QPT_TUN_GSI
:
356 return sizeof (struct mlx4_wqe_ctrl_seg
) +
357 sizeof (struct mlx4_wqe_datagram_seg
);
360 return sizeof (struct mlx4_wqe_ctrl_seg
) +
361 sizeof (struct mlx4_wqe_raddr_seg
);
363 return sizeof (struct mlx4_wqe_ctrl_seg
) +
364 sizeof (struct mlx4_wqe_atomic_seg
) +
365 sizeof (struct mlx4_wqe_raddr_seg
);
366 case MLX4_IB_QPT_SMI
:
367 case MLX4_IB_QPT_GSI
:
368 return sizeof (struct mlx4_wqe_ctrl_seg
) +
369 ALIGN(MLX4_IB_UD_HEADER_SIZE
+
370 DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE
,
372 sizeof (struct mlx4_wqe_inline_seg
),
373 sizeof (struct mlx4_wqe_data_seg
)) +
375 sizeof (struct mlx4_wqe_inline_seg
),
376 sizeof (struct mlx4_wqe_data_seg
));
378 return sizeof (struct mlx4_wqe_ctrl_seg
);
382 static int set_rq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
383 int is_user
, int has_rq
, struct mlx4_ib_qp
*qp
)
385 /* Sanity check RQ size before proceeding */
386 if (cap
->max_recv_wr
> dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
||
387 cap
->max_recv_sge
> min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
))
391 if (cap
->max_recv_wr
)
394 qp
->rq
.wqe_cnt
= qp
->rq
.max_gs
= 0;
396 /* HW requires >= 1 RQ entry with >= 1 gather entry */
397 if (is_user
&& (!cap
->max_recv_wr
|| !cap
->max_recv_sge
))
400 qp
->rq
.wqe_cnt
= roundup_pow_of_two(max(1U, cap
->max_recv_wr
));
401 qp
->rq
.max_gs
= roundup_pow_of_two(max(1U, cap
->max_recv_sge
));
402 qp
->rq
.wqe_shift
= ilog2(qp
->rq
.max_gs
* sizeof (struct mlx4_wqe_data_seg
));
405 /* leave userspace return values as they were, so as not to break ABI */
407 cap
->max_recv_wr
= qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
408 cap
->max_recv_sge
= qp
->rq
.max_gs
;
410 cap
->max_recv_wr
= qp
->rq
.max_post
=
411 min(dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
, qp
->rq
.wqe_cnt
);
412 cap
->max_recv_sge
= min(qp
->rq
.max_gs
,
413 min(dev
->dev
->caps
.max_sq_sg
,
414 dev
->dev
->caps
.max_rq_sg
));
420 static int set_kernel_sq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
421 enum mlx4_ib_qp_type type
, struct mlx4_ib_qp
*qp
)
425 /* Sanity check SQ size before proceeding */
426 if (cap
->max_send_wr
> (dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
) ||
427 cap
->max_send_sge
> min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
) ||
428 cap
->max_inline_data
+ send_wqe_overhead(type
, qp
->flags
) +
429 sizeof (struct mlx4_wqe_inline_seg
) > dev
->dev
->caps
.max_sq_desc_sz
)
433 * For MLX transport we need 2 extra S/G entries:
434 * one for the header and one for the checksum at the end
436 if ((type
== MLX4_IB_QPT_SMI
|| type
== MLX4_IB_QPT_GSI
||
437 type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
)) &&
438 cap
->max_send_sge
+ 2 > dev
->dev
->caps
.max_sq_sg
)
441 s
= max(cap
->max_send_sge
* sizeof (struct mlx4_wqe_data_seg
),
442 cap
->max_inline_data
+ sizeof (struct mlx4_wqe_inline_seg
)) +
443 send_wqe_overhead(type
, qp
->flags
);
445 if (s
> dev
->dev
->caps
.max_sq_desc_sz
)
449 * Hermon supports shrinking WQEs, such that a single work
450 * request can include multiple units of 1 << wqe_shift. This
451 * way, work requests can differ in size, and do not have to
452 * be a power of 2 in size, saving memory and speeding up send
453 * WR posting. Unfortunately, if we do this then the
454 * wqe_index field in CQEs can't be used to look up the WR ID
455 * anymore, so we do this only if selective signaling is off.
457 * Further, on 32-bit platforms, we can't use vmap() to make
458 * the QP buffer virtually contiguous. Thus we have to use
459 * constant-sized WRs to make sure a WR is always fully within
460 * a single page-sized chunk.
462 * Finally, we use NOP work requests to pad the end of the
463 * work queue, to avoid wrap-around in the middle of WR. We
464 * set NEC bit to avoid getting completions with error for
465 * these NOP WRs, but since NEC is only supported starting
466 * with firmware 2.2.232, we use constant-sized WRs for older
469 * And, since MLX QPs only support SEND, we use constant-sized
472 * We look for the smallest value of wqe_shift such that the
473 * resulting number of wqes does not exceed device
476 * We set WQE size to at least 64 bytes, this way stamping
477 * invalidates each WQE.
479 if (dev
->dev
->caps
.fw_ver
>= MLX4_FW_VER_WQE_CTRL_NEC
&&
480 qp
->sq_signal_bits
&& BITS_PER_LONG
== 64 &&
481 type
!= MLX4_IB_QPT_SMI
&& type
!= MLX4_IB_QPT_GSI
&&
482 !(type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_PROXY_SMI
|
483 MLX4_IB_QPT_PROXY_GSI
| MLX4_IB_QPT_TUN_SMI_OWNER
)))
484 qp
->sq
.wqe_shift
= ilog2(64);
486 qp
->sq
.wqe_shift
= ilog2(roundup_pow_of_two(s
));
489 qp
->sq_max_wqes_per_wr
= DIV_ROUND_UP(s
, 1U << qp
->sq
.wqe_shift
);
492 * We need to leave 2 KB + 1 WR of headroom in the SQ to
493 * allow HW to prefetch.
495 qp
->sq_spare_wqes
= (2048 >> qp
->sq
.wqe_shift
) + qp
->sq_max_wqes_per_wr
;
496 qp
->sq
.wqe_cnt
= roundup_pow_of_two(cap
->max_send_wr
*
497 qp
->sq_max_wqes_per_wr
+
500 if (qp
->sq
.wqe_cnt
<= dev
->dev
->caps
.max_wqes
)
503 if (qp
->sq_max_wqes_per_wr
<= 1)
509 qp
->sq
.max_gs
= (min(dev
->dev
->caps
.max_sq_desc_sz
,
510 (qp
->sq_max_wqes_per_wr
<< qp
->sq
.wqe_shift
)) -
511 send_wqe_overhead(type
, qp
->flags
)) /
512 sizeof (struct mlx4_wqe_data_seg
);
514 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
515 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
516 if (qp
->rq
.wqe_shift
> qp
->sq
.wqe_shift
) {
518 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
520 qp
->rq
.offset
= qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
;
524 cap
->max_send_wr
= qp
->sq
.max_post
=
525 (qp
->sq
.wqe_cnt
- qp
->sq_spare_wqes
) / qp
->sq_max_wqes_per_wr
;
526 cap
->max_send_sge
= min(qp
->sq
.max_gs
,
527 min(dev
->dev
->caps
.max_sq_sg
,
528 dev
->dev
->caps
.max_rq_sg
));
529 /* We don't support inline sends for kernel QPs (yet) */
530 cap
->max_inline_data
= 0;
535 static int set_user_sq_size(struct mlx4_ib_dev
*dev
,
536 struct mlx4_ib_qp
*qp
,
537 struct mlx4_ib_create_qp
*ucmd
)
539 /* Sanity check SQ size before proceeding */
540 if ((1 << ucmd
->log_sq_bb_count
) > dev
->dev
->caps
.max_wqes
||
541 ucmd
->log_sq_stride
>
542 ilog2(roundup_pow_of_two(dev
->dev
->caps
.max_sq_desc_sz
)) ||
543 ucmd
->log_sq_stride
< MLX4_IB_MIN_SQ_STRIDE
)
546 qp
->sq
.wqe_cnt
= 1 << ucmd
->log_sq_bb_count
;
547 qp
->sq
.wqe_shift
= ucmd
->log_sq_stride
;
549 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
550 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
555 static int alloc_proxy_bufs(struct ib_device
*dev
, struct mlx4_ib_qp
*qp
)
560 kmalloc(sizeof (struct mlx4_ib_buf
) * qp
->rq
.wqe_cnt
,
562 if (!qp
->sqp_proxy_rcv
)
564 for (i
= 0; i
< qp
->rq
.wqe_cnt
; i
++) {
565 qp
->sqp_proxy_rcv
[i
].addr
=
566 kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr
),
568 if (!qp
->sqp_proxy_rcv
[i
].addr
)
570 qp
->sqp_proxy_rcv
[i
].map
=
571 ib_dma_map_single(dev
, qp
->sqp_proxy_rcv
[i
].addr
,
572 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
580 ib_dma_unmap_single(dev
, qp
->sqp_proxy_rcv
[i
].map
,
581 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
583 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
585 kfree(qp
->sqp_proxy_rcv
);
586 qp
->sqp_proxy_rcv
= NULL
;
590 static void free_proxy_bufs(struct ib_device
*dev
, struct mlx4_ib_qp
*qp
)
594 for (i
= 0; i
< qp
->rq
.wqe_cnt
; i
++) {
595 ib_dma_unmap_single(dev
, qp
->sqp_proxy_rcv
[i
].map
,
596 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
598 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
600 kfree(qp
->sqp_proxy_rcv
);
603 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
605 if (attr
->qp_type
== IB_QPT_XRC_INI
|| attr
->qp_type
== IB_QPT_XRC_TGT
)
611 static int qp0_enabled_vf(struct mlx4_dev
*dev
, int qpn
)
614 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
615 if (qpn
== dev
->caps
.qp0_proxy
[i
])
616 return !!dev
->caps
.qp0_qkey
[i
];
621 static int create_qp_common(struct mlx4_ib_dev
*dev
, struct ib_pd
*pd
,
622 struct ib_qp_init_attr
*init_attr
,
623 struct ib_udata
*udata
, int sqpn
, struct mlx4_ib_qp
**caller_qp
,
628 struct mlx4_ib_sqp
*sqp
;
629 struct mlx4_ib_qp
*qp
;
630 enum mlx4_ib_qp_type qp_type
= (enum mlx4_ib_qp_type
) init_attr
->qp_type
;
632 /* When tunneling special qps, we use a plain UD qp */
634 if (mlx4_is_mfunc(dev
->dev
) &&
635 (!mlx4_is_master(dev
->dev
) ||
636 !(init_attr
->create_flags
& MLX4_IB_SRIOV_SQP
))) {
637 if (init_attr
->qp_type
== IB_QPT_GSI
)
638 qp_type
= MLX4_IB_QPT_PROXY_GSI
;
640 if (mlx4_is_master(dev
->dev
) ||
641 qp0_enabled_vf(dev
->dev
, sqpn
))
642 qp_type
= MLX4_IB_QPT_PROXY_SMI_OWNER
;
644 qp_type
= MLX4_IB_QPT_PROXY_SMI
;
648 /* add extra sg entry for tunneling */
649 init_attr
->cap
.max_recv_sge
++;
650 } else if (init_attr
->create_flags
& MLX4_IB_SRIOV_TUNNEL_QP
) {
651 struct mlx4_ib_qp_tunnel_init_attr
*tnl_init
=
652 container_of(init_attr
,
653 struct mlx4_ib_qp_tunnel_init_attr
, init_attr
);
654 if ((tnl_init
->proxy_qp_type
!= IB_QPT_SMI
&&
655 tnl_init
->proxy_qp_type
!= IB_QPT_GSI
) ||
656 !mlx4_is_master(dev
->dev
))
658 if (tnl_init
->proxy_qp_type
== IB_QPT_GSI
)
659 qp_type
= MLX4_IB_QPT_TUN_GSI
;
660 else if (tnl_init
->slave
== mlx4_master_func_num(dev
->dev
) ||
661 mlx4_vf_smi_enabled(dev
->dev
, tnl_init
->slave
,
663 qp_type
= MLX4_IB_QPT_TUN_SMI_OWNER
;
665 qp_type
= MLX4_IB_QPT_TUN_SMI
;
666 /* we are definitely in the PPF here, since we are creating
667 * tunnel QPs. base_tunnel_sqpn is therefore valid. */
668 qpn
= dev
->dev
->phys_caps
.base_tunnel_sqpn
+ 8 * tnl_init
->slave
669 + tnl_init
->proxy_qp_type
* 2 + tnl_init
->port
- 1;
674 if (qp_type
== MLX4_IB_QPT_SMI
|| qp_type
== MLX4_IB_QPT_GSI
||
675 (qp_type
& (MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_SMI_OWNER
|
676 MLX4_IB_QPT_PROXY_GSI
| MLX4_IB_QPT_TUN_SMI_OWNER
))) {
677 sqp
= kzalloc(sizeof (struct mlx4_ib_sqp
), gfp
);
681 qp
->pri
.vid
= 0xFFFF;
682 qp
->alt
.vid
= 0xFFFF;
684 qp
= kzalloc(sizeof (struct mlx4_ib_qp
), gfp
);
687 qp
->pri
.vid
= 0xFFFF;
688 qp
->alt
.vid
= 0xFFFF;
693 qp
->mlx4_ib_qp_type
= qp_type
;
695 mutex_init(&qp
->mutex
);
696 spin_lock_init(&qp
->sq
.lock
);
697 spin_lock_init(&qp
->rq
.lock
);
698 INIT_LIST_HEAD(&qp
->gid_list
);
699 INIT_LIST_HEAD(&qp
->steering_rules
);
701 qp
->state
= IB_QPS_RESET
;
702 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
703 qp
->sq_signal_bits
= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
705 err
= set_rq_size(dev
, &init_attr
->cap
, !!pd
->uobject
, qp_has_rq(init_attr
), qp
);
710 struct mlx4_ib_create_qp ucmd
;
712 if (ib_copy_from_udata(&ucmd
, udata
, sizeof ucmd
)) {
717 qp
->sq_no_prefetch
= ucmd
.sq_no_prefetch
;
719 err
= set_user_sq_size(dev
, qp
, &ucmd
);
723 qp
->umem
= ib_umem_get(pd
->uobject
->context
, ucmd
.buf_addr
,
725 if (IS_ERR(qp
->umem
)) {
726 err
= PTR_ERR(qp
->umem
);
730 err
= mlx4_mtt_init(dev
->dev
, ib_umem_page_count(qp
->umem
),
731 ilog2(qp
->umem
->page_size
), &qp
->mtt
);
735 err
= mlx4_ib_umem_write_mtt(dev
, &qp
->mtt
, qp
->umem
);
739 if (qp_has_rq(init_attr
)) {
740 err
= mlx4_ib_db_map_user(to_mucontext(pd
->uobject
->context
),
741 ucmd
.db_addr
, &qp
->db
);
746 qp
->sq_no_prefetch
= 0;
748 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
)
749 qp
->flags
|= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
751 if (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
752 qp
->flags
|= MLX4_IB_QP_LSO
;
754 if (init_attr
->create_flags
& IB_QP_CREATE_NETIF_QP
) {
755 if (dev
->steering_support
==
756 MLX4_STEERING_MODE_DEVICE_MANAGED
)
757 qp
->flags
|= MLX4_IB_QP_NETIF
;
762 err
= set_kernel_sq_size(dev
, &init_attr
->cap
, qp_type
, qp
);
766 if (qp_has_rq(init_attr
)) {
767 err
= mlx4_db_alloc(dev
->dev
, &qp
->db
, 0, gfp
);
774 if (mlx4_buf_alloc(dev
->dev
, qp
->buf_size
, PAGE_SIZE
* 2, &qp
->buf
, gfp
)) {
779 err
= mlx4_mtt_init(dev
->dev
, qp
->buf
.npages
, qp
->buf
.page_shift
,
784 err
= mlx4_buf_write_mtt(dev
->dev
, &qp
->mtt
, &qp
->buf
, gfp
);
788 qp
->sq
.wrid
= kmalloc(qp
->sq
.wqe_cnt
* sizeof (u64
), gfp
);
789 qp
->rq
.wrid
= kmalloc(qp
->rq
.wqe_cnt
* sizeof (u64
), gfp
);
790 if (!qp
->sq
.wrid
|| !qp
->rq
.wrid
) {
797 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
798 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
)) {
799 if (alloc_proxy_bufs(pd
->device
, qp
)) {
805 /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE
806 * BlueFlame setup flow wrongly causes VLAN insertion. */
807 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
)
808 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1 << 8, &qpn
);
810 if (qp
->flags
& MLX4_IB_QP_NETIF
)
811 err
= mlx4_ib_steer_qp_alloc(dev
, 1, &qpn
);
813 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1,
819 err
= mlx4_qp_alloc(dev
->dev
, qpn
, &qp
->mqp
, gfp
);
823 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
)
824 qp
->mqp
.qpn
|= (1 << 23);
827 * Hardware wants QPN written in big-endian order (after
828 * shifting) for send doorbell. Precompute this value to save
829 * a little bit when posting sends.
831 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
833 qp
->mqp
.event
= mlx4_ib_qp_event
;
840 if (qp
->flags
& MLX4_IB_QP_NETIF
)
841 mlx4_ib_steer_qp_free(dev
, qpn
, 1);
843 mlx4_qp_release_range(dev
->dev
, qpn
, 1);
846 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
)
847 free_proxy_bufs(pd
->device
, qp
);
850 if (qp_has_rq(init_attr
))
851 mlx4_ib_db_unmap_user(to_mucontext(pd
->uobject
->context
), &qp
->db
);
858 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
862 ib_umem_release(qp
->umem
);
864 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
867 if (!pd
->uobject
&& qp_has_rq(init_attr
))
868 mlx4_db_free(dev
->dev
, &qp
->db
);
876 static enum mlx4_qp_state
to_mlx4_state(enum ib_qp_state state
)
879 case IB_QPS_RESET
: return MLX4_QP_STATE_RST
;
880 case IB_QPS_INIT
: return MLX4_QP_STATE_INIT
;
881 case IB_QPS_RTR
: return MLX4_QP_STATE_RTR
;
882 case IB_QPS_RTS
: return MLX4_QP_STATE_RTS
;
883 case IB_QPS_SQD
: return MLX4_QP_STATE_SQD
;
884 case IB_QPS_SQE
: return MLX4_QP_STATE_SQER
;
885 case IB_QPS_ERR
: return MLX4_QP_STATE_ERR
;
890 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
891 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
893 if (send_cq
== recv_cq
) {
894 spin_lock_irq(&send_cq
->lock
);
895 __acquire(&recv_cq
->lock
);
896 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
897 spin_lock_irq(&send_cq
->lock
);
898 spin_lock_nested(&recv_cq
->lock
, SINGLE_DEPTH_NESTING
);
900 spin_lock_irq(&recv_cq
->lock
);
901 spin_lock_nested(&send_cq
->lock
, SINGLE_DEPTH_NESTING
);
905 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
906 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
908 if (send_cq
== recv_cq
) {
909 __release(&recv_cq
->lock
);
910 spin_unlock_irq(&send_cq
->lock
);
911 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
912 spin_unlock(&recv_cq
->lock
);
913 spin_unlock_irq(&send_cq
->lock
);
915 spin_unlock(&send_cq
->lock
);
916 spin_unlock_irq(&recv_cq
->lock
);
920 static void del_gid_entries(struct mlx4_ib_qp
*qp
)
922 struct mlx4_ib_gid_entry
*ge
, *tmp
;
924 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
930 static struct mlx4_ib_pd
*get_pd(struct mlx4_ib_qp
*qp
)
932 if (qp
->ibqp
.qp_type
== IB_QPT_XRC_TGT
)
933 return to_mpd(to_mxrcd(qp
->ibqp
.xrcd
)->pd
);
935 return to_mpd(qp
->ibqp
.pd
);
938 static void get_cqs(struct mlx4_ib_qp
*qp
,
939 struct mlx4_ib_cq
**send_cq
, struct mlx4_ib_cq
**recv_cq
)
941 switch (qp
->ibqp
.qp_type
) {
943 *send_cq
= to_mcq(to_mxrcd(qp
->ibqp
.xrcd
)->cq
);
947 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
951 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
952 *recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
957 static void destroy_qp_common(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
,
960 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
962 if (qp
->state
!= IB_QPS_RESET
) {
963 if (mlx4_qp_modify(dev
->dev
, NULL
, to_mlx4_state(qp
->state
),
964 MLX4_QP_STATE_RST
, NULL
, 0, 0, &qp
->mqp
))
965 pr_warn("modify QP %06x to RESET failed.\n",
968 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
972 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
975 if (qp
->pri
.vid
< 0x1000) {
976 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
, qp
->pri
.vid
);
977 qp
->pri
.vid
= 0xFFFF;
978 qp
->pri
.candidate_vid
= 0xFFFF;
979 qp
->pri
.update_vid
= 0;
981 if (qp
->alt
.vid
< 0x1000) {
982 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
, qp
->alt
.vid
);
983 qp
->alt
.vid
= 0xFFFF;
984 qp
->alt
.candidate_vid
= 0xFFFF;
985 qp
->alt
.update_vid
= 0;
989 get_cqs(qp
, &send_cq
, &recv_cq
);
991 mlx4_ib_lock_cqs(send_cq
, recv_cq
);
994 __mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
995 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
): NULL
);
996 if (send_cq
!= recv_cq
)
997 __mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1000 mlx4_qp_remove(dev
->dev
, &qp
->mqp
);
1002 mlx4_ib_unlock_cqs(send_cq
, recv_cq
);
1004 mlx4_qp_free(dev
->dev
, &qp
->mqp
);
1006 if (!is_sqp(dev
, qp
) && !is_tunnel_qp(dev
, qp
)) {
1007 if (qp
->flags
& MLX4_IB_QP_NETIF
)
1008 mlx4_ib_steer_qp_free(dev
, qp
->mqp
.qpn
, 1);
1010 mlx4_qp_release_range(dev
->dev
, qp
->mqp
.qpn
, 1);
1013 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
1017 mlx4_ib_db_unmap_user(to_mucontext(qp
->ibqp
.uobject
->context
),
1019 ib_umem_release(qp
->umem
);
1023 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
1024 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
))
1025 free_proxy_bufs(&dev
->ib_dev
, qp
);
1026 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
1028 mlx4_db_free(dev
->dev
, &qp
->db
);
1031 del_gid_entries(qp
);
1034 static u32
get_sqp_num(struct mlx4_ib_dev
*dev
, struct ib_qp_init_attr
*attr
)
1037 if (!mlx4_is_mfunc(dev
->dev
) ||
1038 (mlx4_is_master(dev
->dev
) &&
1039 attr
->create_flags
& MLX4_IB_SRIOV_SQP
)) {
1040 return dev
->dev
->phys_caps
.base_sqpn
+
1041 (attr
->qp_type
== IB_QPT_SMI
? 0 : 2) +
1044 /* PF or VF -- creating proxies */
1045 if (attr
->qp_type
== IB_QPT_SMI
)
1046 return dev
->dev
->caps
.qp0_proxy
[attr
->port_num
- 1];
1048 return dev
->dev
->caps
.qp1_proxy
[attr
->port_num
- 1];
1051 struct ib_qp
*mlx4_ib_create_qp(struct ib_pd
*pd
,
1052 struct ib_qp_init_attr
*init_attr
,
1053 struct ib_udata
*udata
)
1055 struct mlx4_ib_qp
*qp
= NULL
;
1060 gfp
= (init_attr
->create_flags
& MLX4_IB_QP_CREATE_USE_GFP_NOIO
) ?
1061 GFP_NOIO
: GFP_KERNEL
;
1063 * We only support LSO, vendor flag1, and multicast loopback blocking,
1064 * and only for kernel UD QPs.
1066 if (init_attr
->create_flags
& ~(MLX4_IB_QP_LSO
|
1067 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
|
1068 MLX4_IB_SRIOV_TUNNEL_QP
|
1071 MLX4_IB_QP_CREATE_USE_GFP_NOIO
))
1072 return ERR_PTR(-EINVAL
);
1074 if (init_attr
->create_flags
& IB_QP_CREATE_NETIF_QP
) {
1075 if (init_attr
->qp_type
!= IB_QPT_UD
)
1076 return ERR_PTR(-EINVAL
);
1079 if (init_attr
->create_flags
&&
1081 ((init_attr
->create_flags
& ~(MLX4_IB_SRIOV_SQP
| MLX4_IB_QP_CREATE_USE_GFP_NOIO
)) &&
1082 init_attr
->qp_type
!= IB_QPT_UD
) ||
1083 ((init_attr
->create_flags
& MLX4_IB_SRIOV_SQP
) &&
1084 init_attr
->qp_type
> IB_QPT_GSI
)))
1085 return ERR_PTR(-EINVAL
);
1087 switch (init_attr
->qp_type
) {
1088 case IB_QPT_XRC_TGT
:
1089 pd
= to_mxrcd(init_attr
->xrcd
)->pd
;
1090 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
1091 init_attr
->send_cq
= to_mxrcd(init_attr
->xrcd
)->cq
;
1093 case IB_QPT_XRC_INI
:
1094 if (!(to_mdev(pd
->device
)->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
))
1095 return ERR_PTR(-ENOSYS
);
1096 init_attr
->recv_cq
= init_attr
->send_cq
;
1100 case IB_QPT_RAW_PACKET
:
1101 qp
= kzalloc(sizeof *qp
, gfp
);
1103 return ERR_PTR(-ENOMEM
);
1104 qp
->pri
.vid
= 0xFFFF;
1105 qp
->alt
.vid
= 0xFFFF;
1109 err
= create_qp_common(to_mdev(pd
->device
), pd
, init_attr
,
1110 udata
, 0, &qp
, gfp
);
1112 return ERR_PTR(err
);
1114 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
1122 /* Userspace is not allowed to create special QPs: */
1124 return ERR_PTR(-EINVAL
);
1126 err
= create_qp_common(to_mdev(pd
->device
), pd
, init_attr
, udata
,
1127 get_sqp_num(to_mdev(pd
->device
), init_attr
),
1130 return ERR_PTR(err
);
1132 qp
->port
= init_attr
->port_num
;
1133 qp
->ibqp
.qp_num
= init_attr
->qp_type
== IB_QPT_SMI
? 0 : 1;
1138 /* Don't support raw QPs */
1139 return ERR_PTR(-EINVAL
);
1145 int mlx4_ib_destroy_qp(struct ib_qp
*qp
)
1147 struct mlx4_ib_dev
*dev
= to_mdev(qp
->device
);
1148 struct mlx4_ib_qp
*mqp
= to_mqp(qp
);
1149 struct mlx4_ib_pd
*pd
;
1151 if (is_qp0(dev
, mqp
))
1152 mlx4_CLOSE_PORT(dev
->dev
, mqp
->port
);
1154 if (dev
->qp1_proxy
[mqp
->port
- 1] == mqp
) {
1155 mutex_lock(&dev
->qp1_proxy_lock
[mqp
->port
- 1]);
1156 dev
->qp1_proxy
[mqp
->port
- 1] = NULL
;
1157 mutex_unlock(&dev
->qp1_proxy_lock
[mqp
->port
- 1]);
1161 destroy_qp_common(dev
, mqp
, !!pd
->ibpd
.uobject
);
1163 if (is_sqp(dev
, mqp
))
1164 kfree(to_msqp(mqp
));
1171 static int to_mlx4_st(struct mlx4_ib_dev
*dev
, enum mlx4_ib_qp_type type
)
1174 case MLX4_IB_QPT_RC
: return MLX4_QP_ST_RC
;
1175 case MLX4_IB_QPT_UC
: return MLX4_QP_ST_UC
;
1176 case MLX4_IB_QPT_UD
: return MLX4_QP_ST_UD
;
1177 case MLX4_IB_QPT_XRC_INI
:
1178 case MLX4_IB_QPT_XRC_TGT
: return MLX4_QP_ST_XRC
;
1179 case MLX4_IB_QPT_SMI
:
1180 case MLX4_IB_QPT_GSI
:
1181 case MLX4_IB_QPT_RAW_PACKET
: return MLX4_QP_ST_MLX
;
1183 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
1184 case MLX4_IB_QPT_TUN_SMI_OWNER
: return (mlx4_is_mfunc(dev
->dev
) ?
1185 MLX4_QP_ST_MLX
: -1);
1186 case MLX4_IB_QPT_PROXY_SMI
:
1187 case MLX4_IB_QPT_TUN_SMI
:
1188 case MLX4_IB_QPT_PROXY_GSI
:
1189 case MLX4_IB_QPT_TUN_GSI
: return (mlx4_is_mfunc(dev
->dev
) ?
1190 MLX4_QP_ST_UD
: -1);
1195 static __be32
to_mlx4_access_flags(struct mlx4_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
1200 u32 hw_access_flags
= 0;
1202 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1203 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1205 dest_rd_atomic
= qp
->resp_depth
;
1207 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1208 access_flags
= attr
->qp_access_flags
;
1210 access_flags
= qp
->atomic_rd_en
;
1212 if (!dest_rd_atomic
)
1213 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
1215 if (access_flags
& IB_ACCESS_REMOTE_READ
)
1216 hw_access_flags
|= MLX4_QP_BIT_RRE
;
1217 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
1218 hw_access_flags
|= MLX4_QP_BIT_RAE
;
1219 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
1220 hw_access_flags
|= MLX4_QP_BIT_RWE
;
1222 return cpu_to_be32(hw_access_flags
);
1225 static void store_sqp_attrs(struct mlx4_ib_sqp
*sqp
, const struct ib_qp_attr
*attr
,
1228 if (attr_mask
& IB_QP_PKEY_INDEX
)
1229 sqp
->pkey_index
= attr
->pkey_index
;
1230 if (attr_mask
& IB_QP_QKEY
)
1231 sqp
->qkey
= attr
->qkey
;
1232 if (attr_mask
& IB_QP_SQ_PSN
)
1233 sqp
->send_psn
= attr
->sq_psn
;
1236 static void mlx4_set_sched(struct mlx4_qp_path
*path
, u8 port
)
1238 path
->sched_queue
= (path
->sched_queue
& 0xbf) | ((port
- 1) << 6);
1241 static int _mlx4_set_path(struct mlx4_ib_dev
*dev
, const struct ib_ah_attr
*ah
,
1242 u64 smac
, u16 vlan_tag
, struct mlx4_qp_path
*path
,
1243 struct mlx4_roce_smac_vlan_info
*smac_info
, u8 port
)
1245 int is_eth
= rdma_port_get_link_layer(&dev
->ib_dev
, port
) ==
1246 IB_LINK_LAYER_ETHERNET
;
1252 path
->grh_mylmc
= ah
->src_path_bits
& 0x7f;
1253 path
->rlid
= cpu_to_be16(ah
->dlid
);
1254 if (ah
->static_rate
) {
1255 path
->static_rate
= ah
->static_rate
+ MLX4_STAT_RATE_OFFSET
;
1256 while (path
->static_rate
> IB_RATE_2_5_GBPS
+ MLX4_STAT_RATE_OFFSET
&&
1257 !(1 << path
->static_rate
& dev
->dev
->caps
.stat_rate_support
))
1258 --path
->static_rate
;
1260 path
->static_rate
= 0;
1262 if (ah
->ah_flags
& IB_AH_GRH
) {
1263 if (ah
->grh
.sgid_index
>= dev
->dev
->caps
.gid_table_len
[port
]) {
1264 pr_err("sgid_index (%u) too large. max is %d\n",
1265 ah
->grh
.sgid_index
, dev
->dev
->caps
.gid_table_len
[port
] - 1);
1269 path
->grh_mylmc
|= 1 << 7;
1270 path
->mgid_index
= ah
->grh
.sgid_index
;
1271 path
->hop_limit
= ah
->grh
.hop_limit
;
1272 path
->tclass_flowlabel
=
1273 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
1274 (ah
->grh
.flow_label
));
1275 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
1279 if (!(ah
->ah_flags
& IB_AH_GRH
))
1282 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
1283 ((port
- 1) << 6) | ((ah
->sl
& 7) << 3);
1285 path
->feup
|= MLX4_FEUP_FORCE_ETH_UP
;
1286 if (vlan_tag
< 0x1000) {
1287 if (smac_info
->vid
< 0x1000) {
1288 /* both valid vlan ids */
1289 if (smac_info
->vid
!= vlan_tag
) {
1290 /* different VIDs. unreg old and reg new */
1291 err
= mlx4_register_vlan(dev
->dev
, port
, vlan_tag
, &vidx
);
1294 smac_info
->candidate_vid
= vlan_tag
;
1295 smac_info
->candidate_vlan_index
= vidx
;
1296 smac_info
->candidate_vlan_port
= port
;
1297 smac_info
->update_vid
= 1;
1298 path
->vlan_index
= vidx
;
1300 path
->vlan_index
= smac_info
->vlan_index
;
1303 /* no current vlan tag in qp */
1304 err
= mlx4_register_vlan(dev
->dev
, port
, vlan_tag
, &vidx
);
1307 smac_info
->candidate_vid
= vlan_tag
;
1308 smac_info
->candidate_vlan_index
= vidx
;
1309 smac_info
->candidate_vlan_port
= port
;
1310 smac_info
->update_vid
= 1;
1311 path
->vlan_index
= vidx
;
1313 path
->feup
|= MLX4_FVL_FORCE_ETH_VLAN
;
1316 /* have current vlan tag. unregister it at modify-qp success */
1317 if (smac_info
->vid
< 0x1000) {
1318 smac_info
->candidate_vid
= 0xFFFF;
1319 smac_info
->update_vid
= 1;
1323 /* get smac_index for RoCE use.
1324 * If no smac was yet assigned, register one.
1325 * If one was already assigned, but the new mac differs,
1326 * unregister the old one and register the new one.
1328 if (!smac_info
->smac
|| smac_info
->smac
!= smac
) {
1329 /* register candidate now, unreg if needed, after success */
1330 smac_index
= mlx4_register_mac(dev
->dev
, port
, smac
);
1331 if (smac_index
>= 0) {
1332 smac_info
->candidate_smac_index
= smac_index
;
1333 smac_info
->candidate_smac
= smac
;
1334 smac_info
->candidate_smac_port
= port
;
1339 smac_index
= smac_info
->smac_index
;
1342 memcpy(path
->dmac
, ah
->dmac
, 6);
1343 path
->ackto
= MLX4_IB_LINK_TYPE_ETH
;
1344 /* put MAC table smac index for IBoE */
1345 path
->grh_mylmc
= (u8
) (smac_index
) | 0x80;
1347 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
1348 ((port
- 1) << 6) | ((ah
->sl
& 0xf) << 2);
1354 static int mlx4_set_path(struct mlx4_ib_dev
*dev
, const struct ib_qp_attr
*qp
,
1355 enum ib_qp_attr_mask qp_attr_mask
,
1356 struct mlx4_ib_qp
*mqp
,
1357 struct mlx4_qp_path
*path
, u8 port
)
1359 return _mlx4_set_path(dev
, &qp
->ah_attr
,
1360 mlx4_mac_to_u64((u8
*)qp
->smac
),
1361 (qp_attr_mask
& IB_QP_VID
) ? qp
->vlan_id
: 0xffff,
1362 path
, &mqp
->pri
, port
);
1365 static int mlx4_set_alt_path(struct mlx4_ib_dev
*dev
,
1366 const struct ib_qp_attr
*qp
,
1367 enum ib_qp_attr_mask qp_attr_mask
,
1368 struct mlx4_ib_qp
*mqp
,
1369 struct mlx4_qp_path
*path
, u8 port
)
1371 return _mlx4_set_path(dev
, &qp
->alt_ah_attr
,
1372 mlx4_mac_to_u64((u8
*)qp
->alt_smac
),
1373 (qp_attr_mask
& IB_QP_ALT_VID
) ?
1374 qp
->alt_vlan_id
: 0xffff,
1375 path
, &mqp
->alt
, port
);
1378 static void update_mcg_macs(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
1380 struct mlx4_ib_gid_entry
*ge
, *tmp
;
1382 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
1383 if (!ge
->added
&& mlx4_ib_add_mc(dev
, qp
, &ge
->gid
)) {
1385 ge
->port
= qp
->port
;
1390 static int handle_eth_ud_smac_index(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
, u8
*smac
,
1391 struct mlx4_qp_context
*context
)
1393 struct net_device
*ndev
;
1398 ndev
= dev
->iboe
.netdevs
[qp
->port
- 1];
1400 smac
= ndev
->dev_addr
;
1401 u64_mac
= mlx4_mac_to_u64(smac
);
1403 u64_mac
= dev
->dev
->caps
.def_mac
[qp
->port
];
1406 context
->pri_path
.sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
| ((qp
->port
- 1) << 6);
1407 if (!qp
->pri
.smac
) {
1408 smac_index
= mlx4_register_mac(dev
->dev
, qp
->port
, u64_mac
);
1409 if (smac_index
>= 0) {
1410 qp
->pri
.candidate_smac_index
= smac_index
;
1411 qp
->pri
.candidate_smac
= u64_mac
;
1412 qp
->pri
.candidate_smac_port
= qp
->port
;
1413 context
->pri_path
.grh_mylmc
= 0x80 | (u8
) smac_index
;
1421 static int __mlx4_ib_modify_qp(struct ib_qp
*ibqp
,
1422 const struct ib_qp_attr
*attr
, int attr_mask
,
1423 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
1425 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
1426 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
1427 struct mlx4_ib_pd
*pd
;
1428 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
1429 struct mlx4_qp_context
*context
;
1430 enum mlx4_qp_optpar optpar
= 0;
1435 context
= kzalloc(sizeof *context
, GFP_KERNEL
);
1439 context
->flags
= cpu_to_be32((to_mlx4_state(new_state
) << 28) |
1440 (to_mlx4_st(dev
, qp
->mlx4_ib_qp_type
) << 16));
1442 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
))
1443 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
1445 optpar
|= MLX4_QP_OPTPAR_PM_STATE
;
1446 switch (attr
->path_mig_state
) {
1447 case IB_MIG_MIGRATED
:
1448 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
1451 context
->flags
|= cpu_to_be32(MLX4_QP_PM_REARM
<< 11);
1454 context
->flags
|= cpu_to_be32(MLX4_QP_PM_ARMED
<< 11);
1459 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
)
1460 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 11;
1461 else if (ibqp
->qp_type
== IB_QPT_RAW_PACKET
)
1462 context
->mtu_msgmax
= (MLX4_RAW_QP_MTU
<< 5) | MLX4_RAW_QP_MSGMAX
;
1463 else if (ibqp
->qp_type
== IB_QPT_UD
) {
1464 if (qp
->flags
& MLX4_IB_QP_LSO
)
1465 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) |
1466 ilog2(dev
->dev
->caps
.max_gso_sz
);
1468 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
1469 } else if (attr_mask
& IB_QP_PATH_MTU
) {
1470 if (attr
->path_mtu
< IB_MTU_256
|| attr
->path_mtu
> IB_MTU_4096
) {
1471 pr_err("path MTU (%u) is invalid\n",
1475 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) |
1476 ilog2(dev
->dev
->caps
.max_msg_sz
);
1480 context
->rq_size_stride
= ilog2(qp
->rq
.wqe_cnt
) << 3;
1481 context
->rq_size_stride
|= qp
->rq
.wqe_shift
- 4;
1484 context
->sq_size_stride
= ilog2(qp
->sq
.wqe_cnt
) << 3;
1485 context
->sq_size_stride
|= qp
->sq
.wqe_shift
- 4;
1487 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
1488 context
->sq_size_stride
|= !!qp
->sq_no_prefetch
<< 7;
1489 context
->xrcd
= cpu_to_be32((u32
) qp
->xrcdn
);
1490 if (ibqp
->qp_type
== IB_QPT_RAW_PACKET
)
1491 context
->param3
|= cpu_to_be32(1 << 30);
1494 if (qp
->ibqp
.uobject
)
1495 context
->usr_page
= cpu_to_be32(to_mucontext(ibqp
->uobject
->context
)->uar
.index
);
1497 context
->usr_page
= cpu_to_be32(dev
->priv_uar
.index
);
1499 if (attr_mask
& IB_QP_DEST_QPN
)
1500 context
->remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
1502 if (attr_mask
& IB_QP_PORT
) {
1503 if (cur_state
== IB_QPS_SQD
&& new_state
== IB_QPS_SQD
&&
1504 !(attr_mask
& IB_QP_AV
)) {
1505 mlx4_set_sched(&context
->pri_path
, attr
->port_num
);
1506 optpar
|= MLX4_QP_OPTPAR_SCHED_QUEUE
;
1510 if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
1511 if (dev
->counters
[qp
->port
- 1] != -1) {
1512 context
->pri_path
.counter_index
=
1513 dev
->counters
[qp
->port
- 1];
1514 optpar
|= MLX4_QP_OPTPAR_COUNTER_INDEX
;
1516 context
->pri_path
.counter_index
= 0xff;
1518 if (qp
->flags
& MLX4_IB_QP_NETIF
) {
1519 mlx4_ib_steer_qp_reg(dev
, qp
, 1);
1524 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1525 if (qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
)
1526 context
->pri_path
.disable_pkey_check
= 0x40;
1527 context
->pri_path
.pkey_index
= attr
->pkey_index
;
1528 optpar
|= MLX4_QP_OPTPAR_PKEY_INDEX
;
1531 if (attr_mask
& IB_QP_AV
) {
1532 if (mlx4_set_path(dev
, attr
, attr_mask
, qp
, &context
->pri_path
,
1533 attr_mask
& IB_QP_PORT
?
1534 attr
->port_num
: qp
->port
))
1537 optpar
|= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
|
1538 MLX4_QP_OPTPAR_SCHED_QUEUE
);
1541 if (attr_mask
& IB_QP_TIMEOUT
) {
1542 context
->pri_path
.ackto
|= attr
->timeout
<< 3;
1543 optpar
|= MLX4_QP_OPTPAR_ACK_TIMEOUT
;
1546 if (attr_mask
& IB_QP_ALT_PATH
) {
1547 if (attr
->alt_port_num
== 0 ||
1548 attr
->alt_port_num
> dev
->dev
->caps
.num_ports
)
1551 if (attr
->alt_pkey_index
>=
1552 dev
->dev
->caps
.pkey_table_len
[attr
->alt_port_num
])
1555 if (mlx4_set_alt_path(dev
, attr
, attr_mask
, qp
,
1557 attr
->alt_port_num
))
1560 context
->alt_path
.pkey_index
= attr
->alt_pkey_index
;
1561 context
->alt_path
.ackto
= attr
->alt_timeout
<< 3;
1562 optpar
|= MLX4_QP_OPTPAR_ALT_ADDR_PATH
;
1566 get_cqs(qp
, &send_cq
, &recv_cq
);
1567 context
->pd
= cpu_to_be32(pd
->pdn
);
1568 context
->cqn_send
= cpu_to_be32(send_cq
->mcq
.cqn
);
1569 context
->cqn_recv
= cpu_to_be32(recv_cq
->mcq
.cqn
);
1570 context
->params1
= cpu_to_be32(MLX4_IB_ACK_REQ_FREQ
<< 28);
1572 /* Set "fast registration enabled" for all kernel QPs */
1573 if (!qp
->ibqp
.uobject
)
1574 context
->params1
|= cpu_to_be32(1 << 11);
1576 if (attr_mask
& IB_QP_RNR_RETRY
) {
1577 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
1578 optpar
|= MLX4_QP_OPTPAR_RNR_RETRY
;
1581 if (attr_mask
& IB_QP_RETRY_CNT
) {
1582 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
1583 optpar
|= MLX4_QP_OPTPAR_RETRY_COUNT
;
1586 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1587 if (attr
->max_rd_atomic
)
1589 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
1590 optpar
|= MLX4_QP_OPTPAR_SRA_MAX
;
1593 if (attr_mask
& IB_QP_SQ_PSN
)
1594 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
1596 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1597 if (attr
->max_dest_rd_atomic
)
1599 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
1600 optpar
|= MLX4_QP_OPTPAR_RRA_MAX
;
1603 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
)) {
1604 context
->params2
|= to_mlx4_access_flags(qp
, attr
, attr_mask
);
1605 optpar
|= MLX4_QP_OPTPAR_RWE
| MLX4_QP_OPTPAR_RRE
| MLX4_QP_OPTPAR_RAE
;
1609 context
->params2
|= cpu_to_be32(MLX4_QP_BIT_RIC
);
1611 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
1612 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
1613 optpar
|= MLX4_QP_OPTPAR_RNR_TIMEOUT
;
1615 if (attr_mask
& IB_QP_RQ_PSN
)
1616 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
1618 /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */
1619 if (attr_mask
& IB_QP_QKEY
) {
1620 if (qp
->mlx4_ib_qp_type
&
1621 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
))
1622 context
->qkey
= cpu_to_be32(IB_QP_SET_QKEY
);
1624 if (mlx4_is_mfunc(dev
->dev
) &&
1625 !(qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
) &&
1626 (attr
->qkey
& MLX4_RESERVED_QKEY_MASK
) ==
1627 MLX4_RESERVED_QKEY_BASE
) {
1628 pr_err("Cannot use reserved QKEY"
1629 " 0x%x (range 0xffff0000..0xffffffff"
1630 " is reserved)\n", attr
->qkey
);
1634 context
->qkey
= cpu_to_be32(attr
->qkey
);
1636 optpar
|= MLX4_QP_OPTPAR_Q_KEY
;
1640 context
->srqn
= cpu_to_be32(1 << 24 | to_msrq(ibqp
->srq
)->msrq
.srqn
);
1642 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1643 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
1645 if (cur_state
== IB_QPS_INIT
&&
1646 new_state
== IB_QPS_RTR
&&
1647 (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
||
1648 ibqp
->qp_type
== IB_QPT_UD
||
1649 ibqp
->qp_type
== IB_QPT_RAW_PACKET
)) {
1650 context
->pri_path
.sched_queue
= (qp
->port
- 1) << 6;
1651 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_SMI
||
1652 qp
->mlx4_ib_qp_type
&
1653 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
)) {
1654 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
;
1655 if (qp
->mlx4_ib_qp_type
!= MLX4_IB_QPT_SMI
)
1656 context
->pri_path
.fl
= 0x80;
1658 if (qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
)
1659 context
->pri_path
.fl
= 0x80;
1660 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_SCHED_QUEUE
;
1662 if (rdma_port_get_link_layer(&dev
->ib_dev
, qp
->port
) ==
1663 IB_LINK_LAYER_ETHERNET
) {
1664 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_GSI
||
1665 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
)
1666 context
->pri_path
.feup
= 1 << 7; /* don't fsm */
1667 /* handle smac_index */
1668 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_UD
||
1669 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
||
1670 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_GSI
) {
1671 err
= handle_eth_ud_smac_index(dev
, qp
, (u8
*)attr
->smac
, context
);
1674 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
)
1675 dev
->qp1_proxy
[qp
->port
- 1] = qp
;
1680 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
)
1681 context
->pri_path
.ackto
= (context
->pri_path
.ackto
& 0xf8) |
1682 MLX4_IB_LINK_TYPE_ETH
;
1684 if (ibqp
->qp_type
== IB_QPT_UD
&& (new_state
== IB_QPS_RTR
)) {
1685 int is_eth
= rdma_port_get_link_layer(
1686 &dev
->ib_dev
, qp
->port
) ==
1687 IB_LINK_LAYER_ETHERNET
;
1689 context
->pri_path
.ackto
= MLX4_IB_LINK_TYPE_ETH
;
1690 optpar
|= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
;
1695 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
1696 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&& attr
->en_sqd_async_notify
)
1701 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1702 context
->rlkey
|= (1 << 4);
1705 * Before passing a kernel QP to the HW, make sure that the
1706 * ownership bits of the send queue are set and the SQ
1707 * headroom is stamped so that the hardware doesn't start
1708 * processing stale work requests.
1710 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
1711 struct mlx4_wqe_ctrl_seg
*ctrl
;
1714 for (i
= 0; i
< qp
->sq
.wqe_cnt
; ++i
) {
1715 ctrl
= get_send_wqe(qp
, i
);
1716 ctrl
->owner_opcode
= cpu_to_be32(1 << 31);
1717 if (qp
->sq_max_wqes_per_wr
== 1)
1718 ctrl
->fence_size
= 1 << (qp
->sq
.wqe_shift
- 4);
1720 stamp_send_wqe(qp
, i
, 1 << qp
->sq
.wqe_shift
);
1724 err
= mlx4_qp_modify(dev
->dev
, &qp
->mtt
, to_mlx4_state(cur_state
),
1725 to_mlx4_state(new_state
), context
, optpar
,
1726 sqd_event
, &qp
->mqp
);
1730 qp
->state
= new_state
;
1732 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1733 qp
->atomic_rd_en
= attr
->qp_access_flags
;
1734 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1735 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
1736 if (attr_mask
& IB_QP_PORT
) {
1737 qp
->port
= attr
->port_num
;
1738 update_mcg_macs(dev
, qp
);
1740 if (attr_mask
& IB_QP_ALT_PATH
)
1741 qp
->alt_port
= attr
->alt_port_num
;
1743 if (is_sqp(dev
, qp
))
1744 store_sqp_attrs(to_msqp(qp
), attr
, attr_mask
);
1747 * If we moved QP0 to RTR, bring the IB link up; if we moved
1748 * QP0 to RESET or ERROR, bring the link back down.
1750 if (is_qp0(dev
, qp
)) {
1751 if (cur_state
!= IB_QPS_RTR
&& new_state
== IB_QPS_RTR
)
1752 if (mlx4_INIT_PORT(dev
->dev
, qp
->port
))
1753 pr_warn("INIT_PORT failed for port %d\n",
1756 if (cur_state
!= IB_QPS_RESET
&& cur_state
!= IB_QPS_ERR
&&
1757 (new_state
== IB_QPS_RESET
|| new_state
== IB_QPS_ERR
))
1758 mlx4_CLOSE_PORT(dev
->dev
, qp
->port
);
1762 * If we moved a kernel QP to RESET, clean up all old CQ
1763 * entries and reinitialize the QP.
1765 if (new_state
== IB_QPS_RESET
) {
1766 if (!ibqp
->uobject
) {
1767 mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1768 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
1769 if (send_cq
!= recv_cq
)
1770 mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1776 qp
->sq_next_wqe
= 0;
1780 if (qp
->flags
& MLX4_IB_QP_NETIF
)
1781 mlx4_ib_steer_qp_reg(dev
, qp
, 0);
1784 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
1788 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
1791 if (qp
->pri
.vid
< 0x1000) {
1792 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
, qp
->pri
.vid
);
1793 qp
->pri
.vid
= 0xFFFF;
1794 qp
->pri
.candidate_vid
= 0xFFFF;
1795 qp
->pri
.update_vid
= 0;
1798 if (qp
->alt
.vid
< 0x1000) {
1799 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
, qp
->alt
.vid
);
1800 qp
->alt
.vid
= 0xFFFF;
1801 qp
->alt
.candidate_vid
= 0xFFFF;
1802 qp
->alt
.update_vid
= 0;
1806 if (err
&& steer_qp
)
1807 mlx4_ib_steer_qp_reg(dev
, qp
, 0);
1809 if (qp
->pri
.candidate_smac
) {
1811 mlx4_unregister_mac(dev
->dev
, qp
->pri
.candidate_smac_port
, qp
->pri
.candidate_smac
);
1814 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
1815 qp
->pri
.smac
= qp
->pri
.candidate_smac
;
1816 qp
->pri
.smac_index
= qp
->pri
.candidate_smac_index
;
1817 qp
->pri
.smac_port
= qp
->pri
.candidate_smac_port
;
1819 qp
->pri
.candidate_smac
= 0;
1820 qp
->pri
.candidate_smac_index
= 0;
1821 qp
->pri
.candidate_smac_port
= 0;
1823 if (qp
->alt
.candidate_smac
) {
1825 mlx4_unregister_mac(dev
->dev
, qp
->alt
.candidate_smac_port
, qp
->alt
.candidate_smac
);
1828 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
1829 qp
->alt
.smac
= qp
->alt
.candidate_smac
;
1830 qp
->alt
.smac_index
= qp
->alt
.candidate_smac_index
;
1831 qp
->alt
.smac_port
= qp
->alt
.candidate_smac_port
;
1833 qp
->alt
.candidate_smac
= 0;
1834 qp
->alt
.candidate_smac_index
= 0;
1835 qp
->alt
.candidate_smac_port
= 0;
1838 if (qp
->pri
.update_vid
) {
1840 if (qp
->pri
.candidate_vid
< 0x1000)
1841 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.candidate_vlan_port
,
1842 qp
->pri
.candidate_vid
);
1844 if (qp
->pri
.vid
< 0x1000)
1845 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
,
1847 qp
->pri
.vid
= qp
->pri
.candidate_vid
;
1848 qp
->pri
.vlan_port
= qp
->pri
.candidate_vlan_port
;
1849 qp
->pri
.vlan_index
= qp
->pri
.candidate_vlan_index
;
1851 qp
->pri
.candidate_vid
= 0xFFFF;
1852 qp
->pri
.update_vid
= 0;
1855 if (qp
->alt
.update_vid
) {
1857 if (qp
->alt
.candidate_vid
< 0x1000)
1858 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.candidate_vlan_port
,
1859 qp
->alt
.candidate_vid
);
1861 if (qp
->alt
.vid
< 0x1000)
1862 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
,
1864 qp
->alt
.vid
= qp
->alt
.candidate_vid
;
1865 qp
->alt
.vlan_port
= qp
->alt
.candidate_vlan_port
;
1866 qp
->alt
.vlan_index
= qp
->alt
.candidate_vlan_index
;
1868 qp
->alt
.candidate_vid
= 0xFFFF;
1869 qp
->alt
.update_vid
= 0;
1875 int mlx4_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1876 int attr_mask
, struct ib_udata
*udata
)
1878 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
1879 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
1880 enum ib_qp_state cur_state
, new_state
;
1883 mutex_lock(&qp
->mutex
);
1885 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
1886 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1888 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
1889 ll
= IB_LINK_LAYER_UNSPECIFIED
;
1891 int port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
1892 ll
= rdma_port_get_link_layer(&dev
->ib_dev
, port
);
1895 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
1897 pr_debug("qpn 0x%x: invalid attribute mask specified "
1898 "for transition %d to %d. qp_type %d,"
1899 " attr_mask 0x%x\n",
1900 ibqp
->qp_num
, cur_state
, new_state
,
1901 ibqp
->qp_type
, attr_mask
);
1905 if ((attr_mask
& IB_QP_PORT
) &&
1906 (attr
->port_num
== 0 || attr
->port_num
> dev
->num_ports
)) {
1907 pr_debug("qpn 0x%x: invalid port number (%d) specified "
1908 "for transition %d to %d. qp_type %d\n",
1909 ibqp
->qp_num
, attr
->port_num
, cur_state
,
1910 new_state
, ibqp
->qp_type
);
1914 if ((attr_mask
& IB_QP_PORT
) && (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) &&
1915 (rdma_port_get_link_layer(&dev
->ib_dev
, attr
->port_num
) !=
1916 IB_LINK_LAYER_ETHERNET
))
1919 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1920 int p
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
1921 if (attr
->pkey_index
>= dev
->dev
->caps
.pkey_table_len
[p
]) {
1922 pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
1923 "for transition %d to %d. qp_type %d\n",
1924 ibqp
->qp_num
, attr
->pkey_index
, cur_state
,
1925 new_state
, ibqp
->qp_type
);
1930 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
1931 attr
->max_rd_atomic
> dev
->dev
->caps
.max_qp_init_rdma
) {
1932 pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
1933 "Transition %d to %d. qp_type %d\n",
1934 ibqp
->qp_num
, attr
->max_rd_atomic
, cur_state
,
1935 new_state
, ibqp
->qp_type
);
1939 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
1940 attr
->max_dest_rd_atomic
> dev
->dev
->caps
.max_qp_dest_rdma
) {
1941 pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
1942 "Transition %d to %d. qp_type %d\n",
1943 ibqp
->qp_num
, attr
->max_dest_rd_atomic
, cur_state
,
1944 new_state
, ibqp
->qp_type
);
1948 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
1953 err
= __mlx4_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
);
1956 mutex_unlock(&qp
->mutex
);
1960 static int vf_get_qp0_qkey(struct mlx4_dev
*dev
, int qpn
, u32
*qkey
)
1963 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
1964 if (qpn
== dev
->caps
.qp0_proxy
[i
] ||
1965 qpn
== dev
->caps
.qp0_tunnel
[i
]) {
1966 *qkey
= dev
->caps
.qp0_qkey
[i
];
1973 static int build_sriov_qp0_header(struct mlx4_ib_sqp
*sqp
,
1974 struct ib_send_wr
*wr
,
1975 void *wqe
, unsigned *mlx_seg_len
)
1977 struct mlx4_ib_dev
*mdev
= to_mdev(sqp
->qp
.ibqp
.device
);
1978 struct ib_device
*ib_dev
= &mdev
->ib_dev
;
1979 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
1980 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
1981 struct mlx4_ib_ah
*ah
= to_mah(wr
->wr
.ud
.ah
);
1989 if (wr
->opcode
!= IB_WR_SEND
)
1994 for (i
= 0; i
< wr
->num_sge
; ++i
)
1995 send_size
+= wr
->sg_list
[i
].length
;
1997 /* for proxy-qp0 sends, need to add in size of tunnel header */
1998 /* for tunnel-qp0 sends, tunnel header is already in s/g list */
1999 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_SMI_OWNER
)
2000 send_size
+= sizeof (struct mlx4_ib_tunnel_header
);
2002 ib_ud_header_init(send_size
, 1, 0, 0, 0, 0, &sqp
->ud_header
);
2004 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_SMI_OWNER
) {
2005 sqp
->ud_header
.lrh
.service_level
=
2006 be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 28;
2007 sqp
->ud_header
.lrh
.destination_lid
=
2008 cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
2009 sqp
->ud_header
.lrh
.source_lid
=
2010 cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
2013 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
2015 /* force loopback */
2016 mlx
->flags
|= cpu_to_be32(MLX4_WQE_MLX_VL15
| 0x1 | MLX4_WQE_MLX_SLR
);
2017 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
2019 sqp
->ud_header
.lrh
.virtual_lane
= 0;
2020 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->send_flags
& IB_SEND_SOLICITED
);
2021 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, 0, &pkey
);
2022 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
2023 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_SMI_OWNER
)
2024 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
2026 sqp
->ud_header
.bth
.destination_qpn
=
2027 cpu_to_be32(mdev
->dev
->caps
.qp0_tunnel
[sqp
->qp
.port
- 1]);
2029 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
2030 if (mlx4_is_master(mdev
->dev
)) {
2031 if (mlx4_get_parav_qkey(mdev
->dev
, sqp
->qp
.mqp
.qpn
, &qkey
))
2034 if (vf_get_qp0_qkey(mdev
->dev
, sqp
->qp
.mqp
.qpn
, &qkey
))
2037 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(qkey
);
2038 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.mqp
.qpn
);
2040 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
2041 sqp
->ud_header
.immediate_present
= 0;
2043 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
2046 * Inline data segments may not cross a 64 byte boundary. If
2047 * our UD header is bigger than the space available up to the
2048 * next 64 byte boundary in the WQE, use two inline data
2049 * segments to hold the UD header.
2051 spc
= MLX4_INLINE_ALIGN
-
2052 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
2053 if (header_size
<= spc
) {
2054 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
2055 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
2058 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
2059 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
2061 inl
= (void *) (inl
+ 1) + spc
;
2062 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
2064 * Need a barrier here to make sure all the data is
2065 * visible before the byte_count field is set.
2066 * Otherwise the HCA prefetcher could grab the 64-byte
2067 * chunk with this inline segment and get a valid (!=
2068 * 0xffffffff) byte count but stale data, and end up
2069 * generating a packet with bad headers.
2071 * The first inline segment's byte_count field doesn't
2072 * need a barrier, because it comes after a
2073 * control/MLX segment and therefore is at an offset
2077 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
2082 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
2086 static int build_mlx_header(struct mlx4_ib_sqp
*sqp
, struct ib_send_wr
*wr
,
2087 void *wqe
, unsigned *mlx_seg_len
)
2089 struct ib_device
*ib_dev
= sqp
->qp
.ibqp
.device
;
2090 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
2091 struct mlx4_wqe_ctrl_seg
*ctrl
= wqe
;
2092 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
2093 struct mlx4_ib_ah
*ah
= to_mah(wr
->wr
.ud
.ah
);
2103 bool is_vlan
= false;
2107 for (i
= 0; i
< wr
->num_sge
; ++i
)
2108 send_size
+= wr
->sg_list
[i
].length
;
2110 is_eth
= rdma_port_get_link_layer(sqp
->qp
.ibqp
.device
, sqp
->qp
.port
) == IB_LINK_LAYER_ETHERNET
;
2111 is_grh
= mlx4_ib_ah_grh_present(ah
);
2113 if (mlx4_is_mfunc(to_mdev(ib_dev
)->dev
)) {
2114 /* When multi-function is enabled, the ib_core gid
2115 * indexes don't necessarily match the hw ones, so
2116 * we must use our own cache */
2117 err
= mlx4_get_roce_gid_from_slave(to_mdev(ib_dev
)->dev
,
2118 be32_to_cpu(ah
->av
.ib
.port_pd
) >> 24,
2119 ah
->av
.ib
.gid_index
, &sgid
.raw
[0]);
2123 err
= ib_get_cached_gid(ib_dev
,
2124 be32_to_cpu(ah
->av
.ib
.port_pd
) >> 24,
2125 ah
->av
.ib
.gid_index
, &sgid
);
2130 if (ah
->av
.eth
.vlan
!= cpu_to_be16(0xffff)) {
2131 vlan
= be16_to_cpu(ah
->av
.eth
.vlan
) & 0x0fff;
2135 ib_ud_header_init(send_size
, !is_eth
, is_eth
, is_vlan
, is_grh
, 0, &sqp
->ud_header
);
2138 sqp
->ud_header
.lrh
.service_level
=
2139 be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 28;
2140 sqp
->ud_header
.lrh
.destination_lid
= ah
->av
.ib
.dlid
;
2141 sqp
->ud_header
.lrh
.source_lid
= cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
2145 sqp
->ud_header
.grh
.traffic_class
=
2146 (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 20) & 0xff;
2147 sqp
->ud_header
.grh
.flow_label
=
2148 ah
->av
.ib
.sl_tclass_flowlabel
& cpu_to_be32(0xfffff);
2149 sqp
->ud_header
.grh
.hop_limit
= ah
->av
.ib
.hop_limit
;
2151 memcpy(sqp
->ud_header
.grh
.source_gid
.raw
, sgid
.raw
, 16);
2153 if (mlx4_is_mfunc(to_mdev(ib_dev
)->dev
)) {
2154 /* When multi-function is enabled, the ib_core gid
2155 * indexes don't necessarily match the hw ones, so
2156 * we must use our own cache */
2157 sqp
->ud_header
.grh
.source_gid
.global
.subnet_prefix
=
2158 to_mdev(ib_dev
)->sriov
.demux
[sqp
->qp
.port
- 1].
2160 sqp
->ud_header
.grh
.source_gid
.global
.interface_id
=
2161 to_mdev(ib_dev
)->sriov
.demux
[sqp
->qp
.port
- 1].
2162 guid_cache
[ah
->av
.ib
.gid_index
];
2164 ib_get_cached_gid(ib_dev
,
2165 be32_to_cpu(ah
->av
.ib
.port_pd
) >> 24,
2166 ah
->av
.ib
.gid_index
,
2167 &sqp
->ud_header
.grh
.source_gid
);
2169 memcpy(sqp
->ud_header
.grh
.destination_gid
.raw
,
2170 ah
->av
.ib
.dgid
, 16);
2173 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
2176 mlx
->flags
|= cpu_to_be32((!sqp
->qp
.ibqp
.qp_num
? MLX4_WQE_MLX_VL15
: 0) |
2177 (sqp
->ud_header
.lrh
.destination_lid
==
2178 IB_LID_PERMISSIVE
? MLX4_WQE_MLX_SLR
: 0) |
2179 (sqp
->ud_header
.lrh
.service_level
<< 8));
2180 if (ah
->av
.ib
.port_pd
& cpu_to_be32(0x80000000))
2181 mlx
->flags
|= cpu_to_be32(0x1); /* force loopback */
2182 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
2185 switch (wr
->opcode
) {
2187 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
2188 sqp
->ud_header
.immediate_present
= 0;
2190 case IB_WR_SEND_WITH_IMM
:
2191 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
2192 sqp
->ud_header
.immediate_present
= 1;
2193 sqp
->ud_header
.immediate_data
= wr
->ex
.imm_data
;
2201 struct in6_addr in6
;
2203 u16 pcp
= (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 29) << 13;
2205 mlx
->sched_prio
= cpu_to_be16(pcp
);
2207 memcpy(sqp
->ud_header
.eth
.dmac_h
, ah
->av
.eth
.mac
, 6);
2208 /* FIXME: cache smac value? */
2209 memcpy(&ctrl
->srcrb_flags16
[0], ah
->av
.eth
.mac
, 2);
2210 memcpy(&ctrl
->imm
, ah
->av
.eth
.mac
+ 2, 4);
2211 memcpy(&in6
, sgid
.raw
, sizeof(in6
));
2213 if (!mlx4_is_mfunc(to_mdev(ib_dev
)->dev
))
2214 smac
= to_mdev(sqp
->qp
.ibqp
.device
)->
2215 iboe
.netdevs
[sqp
->qp
.port
- 1]->dev_addr
;
2216 else /* use the src mac of the tunnel */
2217 smac
= ah
->av
.eth
.s_mac
;
2218 memcpy(sqp
->ud_header
.eth
.smac_h
, smac
, 6);
2219 if (!memcmp(sqp
->ud_header
.eth
.smac_h
, sqp
->ud_header
.eth
.dmac_h
, 6))
2220 mlx
->flags
|= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK
);
2222 sqp
->ud_header
.eth
.type
= cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE
);
2224 sqp
->ud_header
.vlan
.type
= cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE
);
2225 sqp
->ud_header
.vlan
.tag
= cpu_to_be16(vlan
| pcp
);
2228 sqp
->ud_header
.lrh
.virtual_lane
= !sqp
->qp
.ibqp
.qp_num
? 15 : 0;
2229 if (sqp
->ud_header
.lrh
.destination_lid
== IB_LID_PERMISSIVE
)
2230 sqp
->ud_header
.lrh
.source_lid
= IB_LID_PERMISSIVE
;
2232 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->send_flags
& IB_SEND_SOLICITED
);
2233 if (!sqp
->qp
.ibqp
.qp_num
)
2234 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, sqp
->pkey_index
, &pkey
);
2236 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, wr
->wr
.ud
.pkey_index
, &pkey
);
2237 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
2238 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
2239 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
2240 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
& 0x80000000 ?
2241 sqp
->qkey
: wr
->wr
.ud
.remote_qkey
);
2242 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.ibqp
.qp_num
);
2244 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
2247 pr_err("built UD header of size %d:\n", header_size
);
2248 for (i
= 0; i
< header_size
/ 4; ++i
) {
2250 pr_err(" [%02x] ", i
* 4);
2252 be32_to_cpu(((__be32
*) sqp
->header_buf
)[i
]));
2253 if ((i
+ 1) % 8 == 0)
2260 * Inline data segments may not cross a 64 byte boundary. If
2261 * our UD header is bigger than the space available up to the
2262 * next 64 byte boundary in the WQE, use two inline data
2263 * segments to hold the UD header.
2265 spc
= MLX4_INLINE_ALIGN
-
2266 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
2267 if (header_size
<= spc
) {
2268 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
2269 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
2272 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
2273 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
2275 inl
= (void *) (inl
+ 1) + spc
;
2276 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
2278 * Need a barrier here to make sure all the data is
2279 * visible before the byte_count field is set.
2280 * Otherwise the HCA prefetcher could grab the 64-byte
2281 * chunk with this inline segment and get a valid (!=
2282 * 0xffffffff) byte count but stale data, and end up
2283 * generating a packet with bad headers.
2285 * The first inline segment's byte_count field doesn't
2286 * need a barrier, because it comes after a
2287 * control/MLX segment and therefore is at an offset
2291 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
2296 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
2300 static int mlx4_wq_overflow(struct mlx4_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
2303 struct mlx4_ib_cq
*cq
;
2305 cur
= wq
->head
- wq
->tail
;
2306 if (likely(cur
+ nreq
< wq
->max_post
))
2310 spin_lock(&cq
->lock
);
2311 cur
= wq
->head
- wq
->tail
;
2312 spin_unlock(&cq
->lock
);
2314 return cur
+ nreq
>= wq
->max_post
;
2317 static __be32
convert_access(int acc
)
2319 return (acc
& IB_ACCESS_REMOTE_ATOMIC
?
2320 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC
) : 0) |
2321 (acc
& IB_ACCESS_REMOTE_WRITE
?
2322 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE
) : 0) |
2323 (acc
& IB_ACCESS_REMOTE_READ
?
2324 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ
) : 0) |
2325 (acc
& IB_ACCESS_LOCAL_WRITE
? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE
) : 0) |
2326 cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ
);
2329 static void set_fmr_seg(struct mlx4_wqe_fmr_seg
*fseg
, struct ib_send_wr
*wr
)
2331 struct mlx4_ib_fast_reg_page_list
*mfrpl
= to_mfrpl(wr
->wr
.fast_reg
.page_list
);
2334 for (i
= 0; i
< wr
->wr
.fast_reg
.page_list_len
; ++i
)
2335 mfrpl
->mapped_page_list
[i
] =
2336 cpu_to_be64(wr
->wr
.fast_reg
.page_list
->page_list
[i
] |
2337 MLX4_MTT_FLAG_PRESENT
);
2339 fseg
->flags
= convert_access(wr
->wr
.fast_reg
.access_flags
);
2340 fseg
->mem_key
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
2341 fseg
->buf_list
= cpu_to_be64(mfrpl
->map
);
2342 fseg
->start_addr
= cpu_to_be64(wr
->wr
.fast_reg
.iova_start
);
2343 fseg
->reg_len
= cpu_to_be64(wr
->wr
.fast_reg
.length
);
2344 fseg
->offset
= 0; /* XXX -- is this just for ZBVA? */
2345 fseg
->page_size
= cpu_to_be32(wr
->wr
.fast_reg
.page_shift
);
2346 fseg
->reserved
[0] = 0;
2347 fseg
->reserved
[1] = 0;
2350 static void set_bind_seg(struct mlx4_wqe_bind_seg
*bseg
, struct ib_send_wr
*wr
)
2353 convert_access(wr
->wr
.bind_mw
.bind_info
.mw_access_flags
) &
2354 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ
|
2355 MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE
|
2356 MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC
);
2358 if (wr
->wr
.bind_mw
.mw
->type
== IB_MW_TYPE_2
)
2359 bseg
->flags2
|= cpu_to_be32(MLX4_WQE_BIND_TYPE_2
);
2360 if (wr
->wr
.bind_mw
.bind_info
.mw_access_flags
& IB_ZERO_BASED
)
2361 bseg
->flags2
|= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED
);
2362 bseg
->new_rkey
= cpu_to_be32(wr
->wr
.bind_mw
.rkey
);
2363 bseg
->lkey
= cpu_to_be32(wr
->wr
.bind_mw
.bind_info
.mr
->lkey
);
2364 bseg
->addr
= cpu_to_be64(wr
->wr
.bind_mw
.bind_info
.addr
);
2365 bseg
->length
= cpu_to_be64(wr
->wr
.bind_mw
.bind_info
.length
);
2368 static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg
*iseg
, u32 rkey
)
2370 memset(iseg
, 0, sizeof(*iseg
));
2371 iseg
->mem_key
= cpu_to_be32(rkey
);
2374 static __always_inline
void set_raddr_seg(struct mlx4_wqe_raddr_seg
*rseg
,
2375 u64 remote_addr
, u32 rkey
)
2377 rseg
->raddr
= cpu_to_be64(remote_addr
);
2378 rseg
->rkey
= cpu_to_be32(rkey
);
2382 static void set_atomic_seg(struct mlx4_wqe_atomic_seg
*aseg
, struct ib_send_wr
*wr
)
2384 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
2385 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.swap
);
2386 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
2387 } else if (wr
->opcode
== IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
) {
2388 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
2389 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add_mask
);
2391 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
2397 static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg
*aseg
,
2398 struct ib_send_wr
*wr
)
2400 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.swap
);
2401 aseg
->swap_add_mask
= cpu_to_be64(wr
->wr
.atomic
.swap_mask
);
2402 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
2403 aseg
->compare_mask
= cpu_to_be64(wr
->wr
.atomic
.compare_add_mask
);
2406 static void set_datagram_seg(struct mlx4_wqe_datagram_seg
*dseg
,
2407 struct ib_send_wr
*wr
)
2409 memcpy(dseg
->av
, &to_mah(wr
->wr
.ud
.ah
)->av
, sizeof (struct mlx4_av
));
2410 dseg
->dqpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
2411 dseg
->qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
2412 dseg
->vlan
= to_mah(wr
->wr
.ud
.ah
)->av
.eth
.vlan
;
2413 memcpy(dseg
->mac
, to_mah(wr
->wr
.ud
.ah
)->av
.eth
.mac
, 6);
2416 static void set_tunnel_datagram_seg(struct mlx4_ib_dev
*dev
,
2417 struct mlx4_wqe_datagram_seg
*dseg
,
2418 struct ib_send_wr
*wr
,
2419 enum mlx4_ib_qp_type qpt
)
2421 union mlx4_ext_av
*av
= &to_mah(wr
->wr
.ud
.ah
)->av
;
2422 struct mlx4_av sqp_av
= {0};
2423 int port
= *((u8
*) &av
->ib
.port_pd
) & 0x3;
2425 /* force loopback */
2426 sqp_av
.port_pd
= av
->ib
.port_pd
| cpu_to_be32(0x80000000);
2427 sqp_av
.g_slid
= av
->ib
.g_slid
& 0x7f; /* no GRH */
2428 sqp_av
.sl_tclass_flowlabel
= av
->ib
.sl_tclass_flowlabel
&
2429 cpu_to_be32(0xf0000000);
2431 memcpy(dseg
->av
, &sqp_av
, sizeof (struct mlx4_av
));
2432 if (qpt
== MLX4_IB_QPT_PROXY_GSI
)
2433 dseg
->dqpn
= cpu_to_be32(dev
->dev
->caps
.qp1_tunnel
[port
- 1]);
2435 dseg
->dqpn
= cpu_to_be32(dev
->dev
->caps
.qp0_tunnel
[port
- 1]);
2436 /* Use QKEY from the QP context, which is set by master */
2437 dseg
->qkey
= cpu_to_be32(IB_QP_SET_QKEY
);
2440 static void build_tunnel_header(struct ib_send_wr
*wr
, void *wqe
, unsigned *mlx_seg_len
)
2442 struct mlx4_wqe_inline_seg
*inl
= wqe
;
2443 struct mlx4_ib_tunnel_header hdr
;
2444 struct mlx4_ib_ah
*ah
= to_mah(wr
->wr
.ud
.ah
);
2448 memcpy(&hdr
.av
, &ah
->av
, sizeof hdr
.av
);
2449 hdr
.remote_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
2450 hdr
.pkey_index
= cpu_to_be16(wr
->wr
.ud
.pkey_index
);
2451 hdr
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
2452 memcpy(hdr
.mac
, ah
->av
.eth
.mac
, 6);
2453 hdr
.vlan
= ah
->av
.eth
.vlan
;
2455 spc
= MLX4_INLINE_ALIGN
-
2456 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
2457 if (sizeof (hdr
) <= spc
) {
2458 memcpy(inl
+ 1, &hdr
, sizeof (hdr
));
2460 inl
->byte_count
= cpu_to_be32(1 << 31 | sizeof (hdr
));
2463 memcpy(inl
+ 1, &hdr
, spc
);
2465 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
2467 inl
= (void *) (inl
+ 1) + spc
;
2468 memcpy(inl
+ 1, (void *) &hdr
+ spc
, sizeof (hdr
) - spc
);
2470 inl
->byte_count
= cpu_to_be32(1 << 31 | (sizeof (hdr
) - spc
));
2475 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + sizeof (hdr
), 16);
2478 static void set_mlx_icrc_seg(void *dseg
)
2481 struct mlx4_wqe_inline_seg
*iseg
= dseg
;
2486 * Need a barrier here before writing the byte_count field to
2487 * make sure that all the data is visible before the
2488 * byte_count field is set. Otherwise, if the segment begins
2489 * a new cacheline, the HCA prefetcher could grab the 64-byte
2490 * chunk and get a valid (!= * 0xffffffff) byte count but
2491 * stale data, and end up sending the wrong data.
2495 iseg
->byte_count
= cpu_to_be32((1 << 31) | 4);
2498 static void set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
2500 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
2501 dseg
->addr
= cpu_to_be64(sg
->addr
);
2504 * Need a barrier here before writing the byte_count field to
2505 * make sure that all the data is visible before the
2506 * byte_count field is set. Otherwise, if the segment begins
2507 * a new cacheline, the HCA prefetcher could grab the 64-byte
2508 * chunk and get a valid (!= * 0xffffffff) byte count but
2509 * stale data, and end up sending the wrong data.
2513 dseg
->byte_count
= cpu_to_be32(sg
->length
);
2516 static void __set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
2518 dseg
->byte_count
= cpu_to_be32(sg
->length
);
2519 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
2520 dseg
->addr
= cpu_to_be64(sg
->addr
);
2523 static int build_lso_seg(struct mlx4_wqe_lso_seg
*wqe
, struct ib_send_wr
*wr
,
2524 struct mlx4_ib_qp
*qp
, unsigned *lso_seg_len
,
2525 __be32
*lso_hdr_sz
, __be32
*blh
)
2527 unsigned halign
= ALIGN(sizeof *wqe
+ wr
->wr
.ud
.hlen
, 16);
2529 if (unlikely(halign
> MLX4_IB_CACHE_LINE_SIZE
))
2530 *blh
= cpu_to_be32(1 << 6);
2532 if (unlikely(!(qp
->flags
& MLX4_IB_QP_LSO
) &&
2533 wr
->num_sge
> qp
->sq
.max_gs
- (halign
>> 4)))
2536 memcpy(wqe
->header
, wr
->wr
.ud
.header
, wr
->wr
.ud
.hlen
);
2538 *lso_hdr_sz
= cpu_to_be32((wr
->wr
.ud
.mss
- wr
->wr
.ud
.hlen
) << 16 |
2540 *lso_seg_len
= halign
;
2544 static __be32
send_ieth(struct ib_send_wr
*wr
)
2546 switch (wr
->opcode
) {
2547 case IB_WR_SEND_WITH_IMM
:
2548 case IB_WR_RDMA_WRITE_WITH_IMM
:
2549 return wr
->ex
.imm_data
;
2551 case IB_WR_SEND_WITH_INV
:
2552 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
2559 static void add_zero_len_inline(void *wqe
)
2561 struct mlx4_wqe_inline_seg
*inl
= wqe
;
2563 inl
->byte_count
= cpu_to_be32(1 << 31);
2566 int mlx4_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
2567 struct ib_send_wr
**bad_wr
)
2569 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
2571 struct mlx4_wqe_ctrl_seg
*ctrl
;
2572 struct mlx4_wqe_data_seg
*dseg
;
2573 unsigned long flags
;
2577 int uninitialized_var(stamp
);
2578 int uninitialized_var(size
);
2579 unsigned uninitialized_var(seglen
);
2582 __be32
uninitialized_var(lso_hdr_sz
);
2586 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
2588 ind
= qp
->sq_next_wqe
;
2590 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
2594 if (mlx4_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
2600 if (unlikely(wr
->num_sge
> qp
->sq
.max_gs
)) {
2606 ctrl
= wqe
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
2607 qp
->sq
.wrid
[(qp
->sq
.head
+ nreq
) & (qp
->sq
.wqe_cnt
- 1)] = wr
->wr_id
;
2610 (wr
->send_flags
& IB_SEND_SIGNALED
?
2611 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) : 0) |
2612 (wr
->send_flags
& IB_SEND_SOLICITED
?
2613 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED
) : 0) |
2614 ((wr
->send_flags
& IB_SEND_IP_CSUM
) ?
2615 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM
|
2616 MLX4_WQE_CTRL_TCP_UDP_CSUM
) : 0) |
2619 ctrl
->imm
= send_ieth(wr
);
2621 wqe
+= sizeof *ctrl
;
2622 size
= sizeof *ctrl
/ 16;
2624 switch (qp
->mlx4_ib_qp_type
) {
2625 case MLX4_IB_QPT_RC
:
2626 case MLX4_IB_QPT_UC
:
2627 switch (wr
->opcode
) {
2628 case IB_WR_ATOMIC_CMP_AND_SWP
:
2629 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2630 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
:
2631 set_raddr_seg(wqe
, wr
->wr
.atomic
.remote_addr
,
2632 wr
->wr
.atomic
.rkey
);
2633 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
2635 set_atomic_seg(wqe
, wr
);
2636 wqe
+= sizeof (struct mlx4_wqe_atomic_seg
);
2638 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
2639 sizeof (struct mlx4_wqe_atomic_seg
)) / 16;
2643 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
2644 set_raddr_seg(wqe
, wr
->wr
.atomic
.remote_addr
,
2645 wr
->wr
.atomic
.rkey
);
2646 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
2648 set_masked_atomic_seg(wqe
, wr
);
2649 wqe
+= sizeof (struct mlx4_wqe_masked_atomic_seg
);
2651 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
2652 sizeof (struct mlx4_wqe_masked_atomic_seg
)) / 16;
2656 case IB_WR_RDMA_READ
:
2657 case IB_WR_RDMA_WRITE
:
2658 case IB_WR_RDMA_WRITE_WITH_IMM
:
2659 set_raddr_seg(wqe
, wr
->wr
.rdma
.remote_addr
,
2661 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
2662 size
+= sizeof (struct mlx4_wqe_raddr_seg
) / 16;
2665 case IB_WR_LOCAL_INV
:
2666 ctrl
->srcrb_flags
|=
2667 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
2668 set_local_inv_seg(wqe
, wr
->ex
.invalidate_rkey
);
2669 wqe
+= sizeof (struct mlx4_wqe_local_inval_seg
);
2670 size
+= sizeof (struct mlx4_wqe_local_inval_seg
) / 16;
2673 case IB_WR_FAST_REG_MR
:
2674 ctrl
->srcrb_flags
|=
2675 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
2676 set_fmr_seg(wqe
, wr
);
2677 wqe
+= sizeof (struct mlx4_wqe_fmr_seg
);
2678 size
+= sizeof (struct mlx4_wqe_fmr_seg
) / 16;
2682 ctrl
->srcrb_flags
|=
2683 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
2684 set_bind_seg(wqe
, wr
);
2685 wqe
+= sizeof(struct mlx4_wqe_bind_seg
);
2686 size
+= sizeof(struct mlx4_wqe_bind_seg
) / 16;
2689 /* No extra segments required for sends */
2694 case MLX4_IB_QPT_TUN_SMI_OWNER
:
2695 err
= build_sriov_qp0_header(to_msqp(qp
), wr
, ctrl
, &seglen
);
2696 if (unlikely(err
)) {
2701 size
+= seglen
/ 16;
2703 case MLX4_IB_QPT_TUN_SMI
:
2704 case MLX4_IB_QPT_TUN_GSI
:
2705 /* this is a UD qp used in MAD responses to slaves. */
2706 set_datagram_seg(wqe
, wr
);
2707 /* set the forced-loopback bit in the data seg av */
2708 *(__be32
*) wqe
|= cpu_to_be32(0x80000000);
2709 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
2710 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
2712 case MLX4_IB_QPT_UD
:
2713 set_datagram_seg(wqe
, wr
);
2714 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
2715 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
2717 if (wr
->opcode
== IB_WR_LSO
) {
2718 err
= build_lso_seg(wqe
, wr
, qp
, &seglen
, &lso_hdr_sz
, &blh
);
2719 if (unlikely(err
)) {
2723 lso_wqe
= (__be32
*) wqe
;
2725 size
+= seglen
/ 16;
2729 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
2730 err
= build_sriov_qp0_header(to_msqp(qp
), wr
, ctrl
, &seglen
);
2731 if (unlikely(err
)) {
2736 size
+= seglen
/ 16;
2737 /* to start tunnel header on a cache-line boundary */
2738 add_zero_len_inline(wqe
);
2741 build_tunnel_header(wr
, wqe
, &seglen
);
2743 size
+= seglen
/ 16;
2745 case MLX4_IB_QPT_PROXY_SMI
:
2746 case MLX4_IB_QPT_PROXY_GSI
:
2747 /* If we are tunneling special qps, this is a UD qp.
2748 * In this case we first add a UD segment targeting
2749 * the tunnel qp, and then add a header with address
2751 set_tunnel_datagram_seg(to_mdev(ibqp
->device
), wqe
, wr
,
2752 qp
->mlx4_ib_qp_type
);
2753 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
2754 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
2755 build_tunnel_header(wr
, wqe
, &seglen
);
2757 size
+= seglen
/ 16;
2760 case MLX4_IB_QPT_SMI
:
2761 case MLX4_IB_QPT_GSI
:
2762 err
= build_mlx_header(to_msqp(qp
), wr
, ctrl
, &seglen
);
2763 if (unlikely(err
)) {
2768 size
+= seglen
/ 16;
2776 * Write data segments in reverse order, so as to
2777 * overwrite cacheline stamp last within each
2778 * cacheline. This avoids issues with WQE
2783 dseg
+= wr
->num_sge
- 1;
2784 size
+= wr
->num_sge
* (sizeof (struct mlx4_wqe_data_seg
) / 16);
2786 /* Add one more inline data segment for ICRC for MLX sends */
2787 if (unlikely(qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_SMI
||
2788 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
||
2789 qp
->mlx4_ib_qp_type
&
2790 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
))) {
2791 set_mlx_icrc_seg(dseg
+ 1);
2792 size
+= sizeof (struct mlx4_wqe_data_seg
) / 16;
2795 for (i
= wr
->num_sge
- 1; i
>= 0; --i
, --dseg
)
2796 set_data_seg(dseg
, wr
->sg_list
+ i
);
2799 * Possibly overwrite stamping in cacheline with LSO
2800 * segment only after making sure all data segments
2804 *lso_wqe
= lso_hdr_sz
;
2806 ctrl
->fence_size
= (wr
->send_flags
& IB_SEND_FENCE
?
2807 MLX4_WQE_CTRL_FENCE
: 0) | size
;
2810 * Make sure descriptor is fully written before
2811 * setting ownership bit (because HW can start
2812 * executing as soon as we do).
2816 if (wr
->opcode
< 0 || wr
->opcode
>= ARRAY_SIZE(mlx4_ib_opcode
)) {
2822 ctrl
->owner_opcode
= mlx4_ib_opcode
[wr
->opcode
] |
2823 (ind
& qp
->sq
.wqe_cnt
? cpu_to_be32(1 << 31) : 0) | blh
;
2825 stamp
= ind
+ qp
->sq_spare_wqes
;
2826 ind
+= DIV_ROUND_UP(size
* 16, 1U << qp
->sq
.wqe_shift
);
2829 * We can improve latency by not stamping the last
2830 * send queue WQE until after ringing the doorbell, so
2831 * only stamp here if there are still more WQEs to post.
2833 * Same optimization applies to padding with NOP wqe
2834 * in case of WQE shrinking (used to prevent wrap-around
2835 * in the middle of WR).
2838 stamp_send_wqe(qp
, stamp
, size
* 16);
2839 ind
= pad_wraparound(qp
, ind
);
2845 qp
->sq
.head
+= nreq
;
2848 * Make sure that descriptors are written before
2853 writel(qp
->doorbell_qpn
,
2854 to_mdev(ibqp
->device
)->uar_map
+ MLX4_SEND_DOORBELL
);
2857 * Make sure doorbells don't leak out of SQ spinlock
2858 * and reach the HCA out of order.
2862 stamp_send_wqe(qp
, stamp
, size
* 16);
2864 ind
= pad_wraparound(qp
, ind
);
2865 qp
->sq_next_wqe
= ind
;
2868 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
2873 int mlx4_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
2874 struct ib_recv_wr
**bad_wr
)
2876 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
2877 struct mlx4_wqe_data_seg
*scat
;
2878 unsigned long flags
;
2885 max_gs
= qp
->rq
.max_gs
;
2886 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
2888 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
2890 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
2891 if (mlx4_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
2897 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
2903 scat
= get_recv_wqe(qp
, ind
);
2905 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
2906 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
)) {
2907 ib_dma_sync_single_for_device(ibqp
->device
,
2908 qp
->sqp_proxy_rcv
[ind
].map
,
2909 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
2912 cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr
));
2913 /* use dma lkey from upper layer entry */
2914 scat
->lkey
= cpu_to_be32(wr
->sg_list
->lkey
);
2915 scat
->addr
= cpu_to_be64(qp
->sqp_proxy_rcv
[ind
].map
);
2920 for (i
= 0; i
< wr
->num_sge
; ++i
)
2921 __set_data_seg(scat
+ i
, wr
->sg_list
+ i
);
2924 scat
[i
].byte_count
= 0;
2925 scat
[i
].lkey
= cpu_to_be32(MLX4_INVALID_LKEY
);
2929 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
2931 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
2936 qp
->rq
.head
+= nreq
;
2939 * Make sure that descriptors are written before
2944 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
2947 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
2952 static inline enum ib_qp_state
to_ib_qp_state(enum mlx4_qp_state mlx4_state
)
2954 switch (mlx4_state
) {
2955 case MLX4_QP_STATE_RST
: return IB_QPS_RESET
;
2956 case MLX4_QP_STATE_INIT
: return IB_QPS_INIT
;
2957 case MLX4_QP_STATE_RTR
: return IB_QPS_RTR
;
2958 case MLX4_QP_STATE_RTS
: return IB_QPS_RTS
;
2959 case MLX4_QP_STATE_SQ_DRAINING
:
2960 case MLX4_QP_STATE_SQD
: return IB_QPS_SQD
;
2961 case MLX4_QP_STATE_SQER
: return IB_QPS_SQE
;
2962 case MLX4_QP_STATE_ERR
: return IB_QPS_ERR
;
2967 static inline enum ib_mig_state
to_ib_mig_state(int mlx4_mig_state
)
2969 switch (mlx4_mig_state
) {
2970 case MLX4_QP_PM_ARMED
: return IB_MIG_ARMED
;
2971 case MLX4_QP_PM_REARM
: return IB_MIG_REARM
;
2972 case MLX4_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
2977 static int to_ib_qp_access_flags(int mlx4_flags
)
2981 if (mlx4_flags
& MLX4_QP_BIT_RRE
)
2982 ib_flags
|= IB_ACCESS_REMOTE_READ
;
2983 if (mlx4_flags
& MLX4_QP_BIT_RWE
)
2984 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
2985 if (mlx4_flags
& MLX4_QP_BIT_RAE
)
2986 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
2991 static void to_ib_ah_attr(struct mlx4_ib_dev
*ibdev
, struct ib_ah_attr
*ib_ah_attr
,
2992 struct mlx4_qp_path
*path
)
2994 struct mlx4_dev
*dev
= ibdev
->dev
;
2997 memset(ib_ah_attr
, 0, sizeof *ib_ah_attr
);
2998 ib_ah_attr
->port_num
= path
->sched_queue
& 0x40 ? 2 : 1;
3000 if (ib_ah_attr
->port_num
== 0 || ib_ah_attr
->port_num
> dev
->caps
.num_ports
)
3003 is_eth
= rdma_port_get_link_layer(&ibdev
->ib_dev
, ib_ah_attr
->port_num
) ==
3004 IB_LINK_LAYER_ETHERNET
;
3006 ib_ah_attr
->sl
= ((path
->sched_queue
>> 3) & 0x7) |
3007 ((path
->sched_queue
& 4) << 1);
3009 ib_ah_attr
->sl
= (path
->sched_queue
>> 2) & 0xf;
3011 ib_ah_attr
->dlid
= be16_to_cpu(path
->rlid
);
3012 ib_ah_attr
->src_path_bits
= path
->grh_mylmc
& 0x7f;
3013 ib_ah_attr
->static_rate
= path
->static_rate
? path
->static_rate
- 5 : 0;
3014 ib_ah_attr
->ah_flags
= (path
->grh_mylmc
& (1 << 7)) ? IB_AH_GRH
: 0;
3015 if (ib_ah_attr
->ah_flags
) {
3016 ib_ah_attr
->grh
.sgid_index
= path
->mgid_index
;
3017 ib_ah_attr
->grh
.hop_limit
= path
->hop_limit
;
3018 ib_ah_attr
->grh
.traffic_class
=
3019 (be32_to_cpu(path
->tclass_flowlabel
) >> 20) & 0xff;
3020 ib_ah_attr
->grh
.flow_label
=
3021 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff;
3022 memcpy(ib_ah_attr
->grh
.dgid
.raw
,
3023 path
->rgid
, sizeof ib_ah_attr
->grh
.dgid
.raw
);
3027 int mlx4_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
3028 struct ib_qp_init_attr
*qp_init_attr
)
3030 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
3031 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
3032 struct mlx4_qp_context context
;
3036 mutex_lock(&qp
->mutex
);
3038 if (qp
->state
== IB_QPS_RESET
) {
3039 qp_attr
->qp_state
= IB_QPS_RESET
;
3043 err
= mlx4_qp_query(dev
->dev
, &qp
->mqp
, &context
);
3049 mlx4_state
= be32_to_cpu(context
.flags
) >> 28;
3051 qp
->state
= to_ib_qp_state(mlx4_state
);
3052 qp_attr
->qp_state
= qp
->state
;
3053 qp_attr
->path_mtu
= context
.mtu_msgmax
>> 5;
3054 qp_attr
->path_mig_state
=
3055 to_ib_mig_state((be32_to_cpu(context
.flags
) >> 11) & 0x3);
3056 qp_attr
->qkey
= be32_to_cpu(context
.qkey
);
3057 qp_attr
->rq_psn
= be32_to_cpu(context
.rnr_nextrecvpsn
) & 0xffffff;
3058 qp_attr
->sq_psn
= be32_to_cpu(context
.next_send_psn
) & 0xffffff;
3059 qp_attr
->dest_qp_num
= be32_to_cpu(context
.remote_qpn
) & 0xffffff;
3060 qp_attr
->qp_access_flags
=
3061 to_ib_qp_access_flags(be32_to_cpu(context
.params2
));
3063 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
3064 to_ib_ah_attr(dev
, &qp_attr
->ah_attr
, &context
.pri_path
);
3065 to_ib_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
.alt_path
);
3066 qp_attr
->alt_pkey_index
= context
.alt_path
.pkey_index
& 0x7f;
3067 qp_attr
->alt_port_num
= qp_attr
->alt_ah_attr
.port_num
;
3070 qp_attr
->pkey_index
= context
.pri_path
.pkey_index
& 0x7f;
3071 if (qp_attr
->qp_state
== IB_QPS_INIT
)
3072 qp_attr
->port_num
= qp
->port
;
3074 qp_attr
->port_num
= context
.pri_path
.sched_queue
& 0x40 ? 2 : 1;
3076 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
3077 qp_attr
->sq_draining
= mlx4_state
== MLX4_QP_STATE_SQ_DRAINING
;
3079 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
.params1
) >> 21) & 0x7);
3081 qp_attr
->max_dest_rd_atomic
=
3082 1 << ((be32_to_cpu(context
.params2
) >> 21) & 0x7);
3083 qp_attr
->min_rnr_timer
=
3084 (be32_to_cpu(context
.rnr_nextrecvpsn
) >> 24) & 0x1f;
3085 qp_attr
->timeout
= context
.pri_path
.ackto
>> 3;
3086 qp_attr
->retry_cnt
= (be32_to_cpu(context
.params1
) >> 16) & 0x7;
3087 qp_attr
->rnr_retry
= (be32_to_cpu(context
.params1
) >> 13) & 0x7;
3088 qp_attr
->alt_timeout
= context
.alt_path
.ackto
>> 3;
3091 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
3092 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
3093 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
3095 if (!ibqp
->uobject
) {
3096 qp_attr
->cap
.max_send_wr
= qp
->sq
.wqe_cnt
;
3097 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
3099 qp_attr
->cap
.max_send_wr
= 0;
3100 qp_attr
->cap
.max_send_sge
= 0;
3104 * We don't support inline sends for kernel QPs (yet), and we
3105 * don't know what userspace's value should be.
3107 qp_attr
->cap
.max_inline_data
= 0;
3109 qp_init_attr
->cap
= qp_attr
->cap
;
3111 qp_init_attr
->create_flags
= 0;
3112 if (qp
->flags
& MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
3113 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
3115 if (qp
->flags
& MLX4_IB_QP_LSO
)
3116 qp_init_attr
->create_flags
|= IB_QP_CREATE_IPOIB_UD_LSO
;
3118 if (qp
->flags
& MLX4_IB_QP_NETIF
)
3119 qp_init_attr
->create_flags
|= IB_QP_CREATE_NETIF_QP
;
3121 qp_init_attr
->sq_sig_type
=
3122 qp
->sq_signal_bits
== cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) ?
3123 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
3126 mutex_unlock(&qp
->mutex
);