2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/log2.h>
35 #include <linux/slab.h>
36 #include <linux/netdevice.h>
38 #include <rdma/ib_cache.h>
39 #include <rdma/ib_pack.h>
40 #include <rdma/ib_addr.h>
41 #include <rdma/ib_mad.h>
43 #include <linux/mlx4/qp.h>
49 MLX4_IB_ACK_REQ_FREQ
= 8,
53 MLX4_IB_DEFAULT_SCHED_QUEUE
= 0x83,
54 MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
55 MLX4_IB_LINK_TYPE_IB
= 0,
56 MLX4_IB_LINK_TYPE_ETH
= 1
61 * Largest possible UD header: send with GRH and immediate
62 * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
63 * tag. (LRH would only use 8 bytes, so Ethernet is the
66 MLX4_IB_UD_HEADER_SIZE
= 82,
67 MLX4_IB_LSO_HEADER_SPARE
= 128,
71 MLX4_IB_IBOE_ETHERTYPE
= 0x8915
79 struct ib_ud_header ud_header
;
80 u8 header_buf
[MLX4_IB_UD_HEADER_SIZE
];
84 MLX4_IB_MIN_SQ_STRIDE
= 6,
85 MLX4_IB_CACHE_LINE_SIZE
= 64,
90 MLX4_RAW_QP_MSGMAX
= 31,
96 static inline u64
mlx4_mac_to_u64(u8
*addr
)
101 for (i
= 0; i
< ETH_ALEN
; i
++) {
108 static const __be32 mlx4_ib_opcode
[] = {
109 [IB_WR_SEND
] = cpu_to_be32(MLX4_OPCODE_SEND
),
110 [IB_WR_LSO
] = cpu_to_be32(MLX4_OPCODE_LSO
),
111 [IB_WR_SEND_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_SEND_IMM
),
112 [IB_WR_RDMA_WRITE
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE
),
113 [IB_WR_RDMA_WRITE_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM
),
114 [IB_WR_RDMA_READ
] = cpu_to_be32(MLX4_OPCODE_RDMA_READ
),
115 [IB_WR_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS
),
116 [IB_WR_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA
),
117 [IB_WR_SEND_WITH_INV
] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL
),
118 [IB_WR_LOCAL_INV
] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL
),
119 [IB_WR_FAST_REG_MR
] = cpu_to_be32(MLX4_OPCODE_FMR
),
120 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS
),
121 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA
),
122 [IB_WR_BIND_MW
] = cpu_to_be32(MLX4_OPCODE_BIND_MW
),
125 static struct mlx4_ib_sqp
*to_msqp(struct mlx4_ib_qp
*mqp
)
127 return container_of(mqp
, struct mlx4_ib_sqp
, qp
);
130 static int is_tunnel_qp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
132 if (!mlx4_is_master(dev
->dev
))
135 return qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_tunnel_sqpn
&&
136 qp
->mqp
.qpn
< dev
->dev
->phys_caps
.base_tunnel_sqpn
+
140 static int is_sqp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
145 /* PPF or Native -- real SQP */
146 real_sqp
= ((mlx4_is_master(dev
->dev
) || !mlx4_is_mfunc(dev
->dev
)) &&
147 qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_sqpn
&&
148 qp
->mqp
.qpn
<= dev
->dev
->phys_caps
.base_sqpn
+ 3);
151 /* VF or PF -- proxy SQP */
152 if (mlx4_is_mfunc(dev
->dev
)) {
153 for (i
= 0; i
< dev
->dev
->caps
.num_ports
; i
++) {
154 if (qp
->mqp
.qpn
== dev
->dev
->caps
.qp0_proxy
[i
] ||
155 qp
->mqp
.qpn
== dev
->dev
->caps
.qp1_proxy
[i
]) {
164 /* used for INIT/CLOSE port logic */
165 static int is_qp0(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
170 /* PPF or Native -- real QP0 */
171 real_qp0
= ((mlx4_is_master(dev
->dev
) || !mlx4_is_mfunc(dev
->dev
)) &&
172 qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_sqpn
&&
173 qp
->mqp
.qpn
<= dev
->dev
->phys_caps
.base_sqpn
+ 1);
176 /* VF or PF -- proxy QP0 */
177 if (mlx4_is_mfunc(dev
->dev
)) {
178 for (i
= 0; i
< dev
->dev
->caps
.num_ports
; i
++) {
179 if (qp
->mqp
.qpn
== dev
->dev
->caps
.qp0_proxy
[i
]) {
188 static void *get_wqe(struct mlx4_ib_qp
*qp
, int offset
)
190 return mlx4_buf_offset(&qp
->buf
, offset
);
193 static void *get_recv_wqe(struct mlx4_ib_qp
*qp
, int n
)
195 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
198 static void *get_send_wqe(struct mlx4_ib_qp
*qp
, int n
)
200 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< qp
->sq
.wqe_shift
));
204 * Stamp a SQ WQE so that it is invalid if prefetched by marking the
205 * first four bytes of every 64 byte chunk with
206 * 0x7FFFFFF | (invalid_ownership_value << 31).
208 * When the max work request size is less than or equal to the WQE
209 * basic block size, as an optimization, we can stamp all WQEs with
210 * 0xffffffff, and skip the very first chunk of each WQE.
212 static void stamp_send_wqe(struct mlx4_ib_qp
*qp
, int n
, int size
)
220 struct mlx4_wqe_ctrl_seg
*ctrl
;
222 if (qp
->sq_max_wqes_per_wr
> 1) {
223 s
= roundup(size
, 1U << qp
->sq
.wqe_shift
);
224 for (i
= 0; i
< s
; i
+= 64) {
225 ind
= (i
>> qp
->sq
.wqe_shift
) + n
;
226 stamp
= ind
& qp
->sq
.wqe_cnt
? cpu_to_be32(0x7fffffff) :
227 cpu_to_be32(0xffffffff);
228 buf
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
229 wqe
= buf
+ (i
& ((1 << qp
->sq
.wqe_shift
) - 1));
233 ctrl
= buf
= get_send_wqe(qp
, n
& (qp
->sq
.wqe_cnt
- 1));
234 s
= (ctrl
->fence_size
& 0x3f) << 4;
235 for (i
= 64; i
< s
; i
+= 64) {
237 *wqe
= cpu_to_be32(0xffffffff);
242 static void post_nop_wqe(struct mlx4_ib_qp
*qp
, int n
, int size
)
244 struct mlx4_wqe_ctrl_seg
*ctrl
;
245 struct mlx4_wqe_inline_seg
*inl
;
249 ctrl
= wqe
= get_send_wqe(qp
, n
& (qp
->sq
.wqe_cnt
- 1));
250 s
= sizeof(struct mlx4_wqe_ctrl_seg
);
252 if (qp
->ibqp
.qp_type
== IB_QPT_UD
) {
253 struct mlx4_wqe_datagram_seg
*dgram
= wqe
+ sizeof *ctrl
;
254 struct mlx4_av
*av
= (struct mlx4_av
*)dgram
->av
;
255 memset(dgram
, 0, sizeof *dgram
);
256 av
->port_pd
= cpu_to_be32((qp
->port
<< 24) | to_mpd(qp
->ibqp
.pd
)->pdn
);
257 s
+= sizeof(struct mlx4_wqe_datagram_seg
);
260 /* Pad the remainder of the WQE with an inline data segment. */
263 inl
->byte_count
= cpu_to_be32(1 << 31 | (size
- s
- sizeof *inl
));
265 ctrl
->srcrb_flags
= 0;
266 ctrl
->fence_size
= size
/ 16;
268 * Make sure descriptor is fully written before setting ownership bit
269 * (because HW can start executing as soon as we do).
273 ctrl
->owner_opcode
= cpu_to_be32(MLX4_OPCODE_NOP
| MLX4_WQE_CTRL_NEC
) |
274 (n
& qp
->sq
.wqe_cnt
? cpu_to_be32(1 << 31) : 0);
276 stamp_send_wqe(qp
, n
+ qp
->sq_spare_wqes
, size
);
279 /* Post NOP WQE to prevent wrap-around in the middle of WR */
280 static inline unsigned pad_wraparound(struct mlx4_ib_qp
*qp
, int ind
)
282 unsigned s
= qp
->sq
.wqe_cnt
- (ind
& (qp
->sq
.wqe_cnt
- 1));
283 if (unlikely(s
< qp
->sq_max_wqes_per_wr
)) {
284 post_nop_wqe(qp
, ind
, s
<< qp
->sq
.wqe_shift
);
290 static void mlx4_ib_qp_event(struct mlx4_qp
*qp
, enum mlx4_event type
)
292 struct ib_event event
;
293 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
295 if (type
== MLX4_EVENT_TYPE_PATH_MIG
)
296 to_mibqp(qp
)->port
= to_mibqp(qp
)->alt_port
;
298 if (ibqp
->event_handler
) {
299 event
.device
= ibqp
->device
;
300 event
.element
.qp
= ibqp
;
302 case MLX4_EVENT_TYPE_PATH_MIG
:
303 event
.event
= IB_EVENT_PATH_MIG
;
305 case MLX4_EVENT_TYPE_COMM_EST
:
306 event
.event
= IB_EVENT_COMM_EST
;
308 case MLX4_EVENT_TYPE_SQ_DRAINED
:
309 event
.event
= IB_EVENT_SQ_DRAINED
;
311 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE
:
312 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
314 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR
:
315 event
.event
= IB_EVENT_QP_FATAL
;
317 case MLX4_EVENT_TYPE_PATH_MIG_FAILED
:
318 event
.event
= IB_EVENT_PATH_MIG_ERR
;
320 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
321 event
.event
= IB_EVENT_QP_REQ_ERR
;
323 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR
:
324 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
327 pr_warn("Unexpected event type %d "
328 "on QP %06x\n", type
, qp
->qpn
);
332 ibqp
->event_handler(&event
, ibqp
->qp_context
);
336 static int send_wqe_overhead(enum mlx4_ib_qp_type type
, u32 flags
)
339 * UD WQEs must have a datagram segment.
340 * RC and UC WQEs might have a remote address segment.
341 * MLX WQEs need two extra inline data segments (for the UD
342 * header and space for the ICRC).
346 return sizeof (struct mlx4_wqe_ctrl_seg
) +
347 sizeof (struct mlx4_wqe_datagram_seg
) +
348 ((flags
& MLX4_IB_QP_LSO
) ? MLX4_IB_LSO_HEADER_SPARE
: 0);
349 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
350 case MLX4_IB_QPT_PROXY_SMI
:
351 case MLX4_IB_QPT_PROXY_GSI
:
352 return sizeof (struct mlx4_wqe_ctrl_seg
) +
353 sizeof (struct mlx4_wqe_datagram_seg
) + 64;
354 case MLX4_IB_QPT_TUN_SMI_OWNER
:
355 case MLX4_IB_QPT_TUN_GSI
:
356 return sizeof (struct mlx4_wqe_ctrl_seg
) +
357 sizeof (struct mlx4_wqe_datagram_seg
);
360 return sizeof (struct mlx4_wqe_ctrl_seg
) +
361 sizeof (struct mlx4_wqe_raddr_seg
);
363 return sizeof (struct mlx4_wqe_ctrl_seg
) +
364 sizeof (struct mlx4_wqe_atomic_seg
) +
365 sizeof (struct mlx4_wqe_raddr_seg
);
366 case MLX4_IB_QPT_SMI
:
367 case MLX4_IB_QPT_GSI
:
368 return sizeof (struct mlx4_wqe_ctrl_seg
) +
369 ALIGN(MLX4_IB_UD_HEADER_SIZE
+
370 DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE
,
372 sizeof (struct mlx4_wqe_inline_seg
),
373 sizeof (struct mlx4_wqe_data_seg
)) +
375 sizeof (struct mlx4_wqe_inline_seg
),
376 sizeof (struct mlx4_wqe_data_seg
));
378 return sizeof (struct mlx4_wqe_ctrl_seg
);
382 static int set_rq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
383 int is_user
, int has_rq
, struct mlx4_ib_qp
*qp
)
385 /* Sanity check RQ size before proceeding */
386 if (cap
->max_recv_wr
> dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
||
387 cap
->max_recv_sge
> min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
))
391 if (cap
->max_recv_wr
)
394 qp
->rq
.wqe_cnt
= qp
->rq
.max_gs
= 0;
396 /* HW requires >= 1 RQ entry with >= 1 gather entry */
397 if (is_user
&& (!cap
->max_recv_wr
|| !cap
->max_recv_sge
))
400 qp
->rq
.wqe_cnt
= roundup_pow_of_two(max(1U, cap
->max_recv_wr
));
401 qp
->rq
.max_gs
= roundup_pow_of_two(max(1U, cap
->max_recv_sge
));
402 qp
->rq
.wqe_shift
= ilog2(qp
->rq
.max_gs
* sizeof (struct mlx4_wqe_data_seg
));
405 /* leave userspace return values as they were, so as not to break ABI */
407 cap
->max_recv_wr
= qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
408 cap
->max_recv_sge
= qp
->rq
.max_gs
;
410 cap
->max_recv_wr
= qp
->rq
.max_post
=
411 min(dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
, qp
->rq
.wqe_cnt
);
412 cap
->max_recv_sge
= min(qp
->rq
.max_gs
,
413 min(dev
->dev
->caps
.max_sq_sg
,
414 dev
->dev
->caps
.max_rq_sg
));
420 static int set_kernel_sq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
421 enum mlx4_ib_qp_type type
, struct mlx4_ib_qp
*qp
)
425 /* Sanity check SQ size before proceeding */
426 if (cap
->max_send_wr
> (dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
) ||
427 cap
->max_send_sge
> min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
) ||
428 cap
->max_inline_data
+ send_wqe_overhead(type
, qp
->flags
) +
429 sizeof (struct mlx4_wqe_inline_seg
) > dev
->dev
->caps
.max_sq_desc_sz
)
433 * For MLX transport we need 2 extra S/G entries:
434 * one for the header and one for the checksum at the end
436 if ((type
== MLX4_IB_QPT_SMI
|| type
== MLX4_IB_QPT_GSI
||
437 type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
)) &&
438 cap
->max_send_sge
+ 2 > dev
->dev
->caps
.max_sq_sg
)
441 s
= max(cap
->max_send_sge
* sizeof (struct mlx4_wqe_data_seg
),
442 cap
->max_inline_data
+ sizeof (struct mlx4_wqe_inline_seg
)) +
443 send_wqe_overhead(type
, qp
->flags
);
445 if (s
> dev
->dev
->caps
.max_sq_desc_sz
)
449 * Hermon supports shrinking WQEs, such that a single work
450 * request can include multiple units of 1 << wqe_shift. This
451 * way, work requests can differ in size, and do not have to
452 * be a power of 2 in size, saving memory and speeding up send
453 * WR posting. Unfortunately, if we do this then the
454 * wqe_index field in CQEs can't be used to look up the WR ID
455 * anymore, so we do this only if selective signaling is off.
457 * Further, on 32-bit platforms, we can't use vmap() to make
458 * the QP buffer virtually contiguous. Thus we have to use
459 * constant-sized WRs to make sure a WR is always fully within
460 * a single page-sized chunk.
462 * Finally, we use NOP work requests to pad the end of the
463 * work queue, to avoid wrap-around in the middle of WR. We
464 * set NEC bit to avoid getting completions with error for
465 * these NOP WRs, but since NEC is only supported starting
466 * with firmware 2.2.232, we use constant-sized WRs for older
469 * And, since MLX QPs only support SEND, we use constant-sized
472 * We look for the smallest value of wqe_shift such that the
473 * resulting number of wqes does not exceed device
476 * We set WQE size to at least 64 bytes, this way stamping
477 * invalidates each WQE.
479 if (dev
->dev
->caps
.fw_ver
>= MLX4_FW_VER_WQE_CTRL_NEC
&&
480 qp
->sq_signal_bits
&& BITS_PER_LONG
== 64 &&
481 type
!= MLX4_IB_QPT_SMI
&& type
!= MLX4_IB_QPT_GSI
&&
482 !(type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_PROXY_SMI
|
483 MLX4_IB_QPT_PROXY_GSI
| MLX4_IB_QPT_TUN_SMI_OWNER
)))
484 qp
->sq
.wqe_shift
= ilog2(64);
486 qp
->sq
.wqe_shift
= ilog2(roundup_pow_of_two(s
));
489 qp
->sq_max_wqes_per_wr
= DIV_ROUND_UP(s
, 1U << qp
->sq
.wqe_shift
);
492 * We need to leave 2 KB + 1 WR of headroom in the SQ to
493 * allow HW to prefetch.
495 qp
->sq_spare_wqes
= (2048 >> qp
->sq
.wqe_shift
) + qp
->sq_max_wqes_per_wr
;
496 qp
->sq
.wqe_cnt
= roundup_pow_of_two(cap
->max_send_wr
*
497 qp
->sq_max_wqes_per_wr
+
500 if (qp
->sq
.wqe_cnt
<= dev
->dev
->caps
.max_wqes
)
503 if (qp
->sq_max_wqes_per_wr
<= 1)
509 qp
->sq
.max_gs
= (min(dev
->dev
->caps
.max_sq_desc_sz
,
510 (qp
->sq_max_wqes_per_wr
<< qp
->sq
.wqe_shift
)) -
511 send_wqe_overhead(type
, qp
->flags
)) /
512 sizeof (struct mlx4_wqe_data_seg
);
514 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
515 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
516 if (qp
->rq
.wqe_shift
> qp
->sq
.wqe_shift
) {
518 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
520 qp
->rq
.offset
= qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
;
524 cap
->max_send_wr
= qp
->sq
.max_post
=
525 (qp
->sq
.wqe_cnt
- qp
->sq_spare_wqes
) / qp
->sq_max_wqes_per_wr
;
526 cap
->max_send_sge
= min(qp
->sq
.max_gs
,
527 min(dev
->dev
->caps
.max_sq_sg
,
528 dev
->dev
->caps
.max_rq_sg
));
529 /* We don't support inline sends for kernel QPs (yet) */
530 cap
->max_inline_data
= 0;
535 static int set_user_sq_size(struct mlx4_ib_dev
*dev
,
536 struct mlx4_ib_qp
*qp
,
537 struct mlx4_ib_create_qp
*ucmd
)
539 /* Sanity check SQ size before proceeding */
540 if ((1 << ucmd
->log_sq_bb_count
) > dev
->dev
->caps
.max_wqes
||
541 ucmd
->log_sq_stride
>
542 ilog2(roundup_pow_of_two(dev
->dev
->caps
.max_sq_desc_sz
)) ||
543 ucmd
->log_sq_stride
< MLX4_IB_MIN_SQ_STRIDE
)
546 qp
->sq
.wqe_cnt
= 1 << ucmd
->log_sq_bb_count
;
547 qp
->sq
.wqe_shift
= ucmd
->log_sq_stride
;
549 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
550 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
555 static int alloc_proxy_bufs(struct ib_device
*dev
, struct mlx4_ib_qp
*qp
)
560 kmalloc(sizeof (struct mlx4_ib_buf
) * qp
->rq
.wqe_cnt
,
562 if (!qp
->sqp_proxy_rcv
)
564 for (i
= 0; i
< qp
->rq
.wqe_cnt
; i
++) {
565 qp
->sqp_proxy_rcv
[i
].addr
=
566 kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr
),
568 if (!qp
->sqp_proxy_rcv
[i
].addr
)
570 qp
->sqp_proxy_rcv
[i
].map
=
571 ib_dma_map_single(dev
, qp
->sqp_proxy_rcv
[i
].addr
,
572 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
580 ib_dma_unmap_single(dev
, qp
->sqp_proxy_rcv
[i
].map
,
581 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
583 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
585 kfree(qp
->sqp_proxy_rcv
);
586 qp
->sqp_proxy_rcv
= NULL
;
590 static void free_proxy_bufs(struct ib_device
*dev
, struct mlx4_ib_qp
*qp
)
594 for (i
= 0; i
< qp
->rq
.wqe_cnt
; i
++) {
595 ib_dma_unmap_single(dev
, qp
->sqp_proxy_rcv
[i
].map
,
596 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
598 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
600 kfree(qp
->sqp_proxy_rcv
);
603 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
605 if (attr
->qp_type
== IB_QPT_XRC_INI
|| attr
->qp_type
== IB_QPT_XRC_TGT
)
611 static int create_qp_common(struct mlx4_ib_dev
*dev
, struct ib_pd
*pd
,
612 struct ib_qp_init_attr
*init_attr
,
613 struct ib_udata
*udata
, int sqpn
, struct mlx4_ib_qp
**caller_qp
,
618 struct mlx4_ib_sqp
*sqp
;
619 struct mlx4_ib_qp
*qp
;
620 enum mlx4_ib_qp_type qp_type
= (enum mlx4_ib_qp_type
) init_attr
->qp_type
;
622 /* When tunneling special qps, we use a plain UD qp */
624 if (mlx4_is_mfunc(dev
->dev
) &&
625 (!mlx4_is_master(dev
->dev
) ||
626 !(init_attr
->create_flags
& MLX4_IB_SRIOV_SQP
))) {
627 if (init_attr
->qp_type
== IB_QPT_GSI
)
628 qp_type
= MLX4_IB_QPT_PROXY_GSI
;
629 else if (mlx4_is_master(dev
->dev
))
630 qp_type
= MLX4_IB_QPT_PROXY_SMI_OWNER
;
632 qp_type
= MLX4_IB_QPT_PROXY_SMI
;
635 /* add extra sg entry for tunneling */
636 init_attr
->cap
.max_recv_sge
++;
637 } else if (init_attr
->create_flags
& MLX4_IB_SRIOV_TUNNEL_QP
) {
638 struct mlx4_ib_qp_tunnel_init_attr
*tnl_init
=
639 container_of(init_attr
,
640 struct mlx4_ib_qp_tunnel_init_attr
, init_attr
);
641 if ((tnl_init
->proxy_qp_type
!= IB_QPT_SMI
&&
642 tnl_init
->proxy_qp_type
!= IB_QPT_GSI
) ||
643 !mlx4_is_master(dev
->dev
))
645 if (tnl_init
->proxy_qp_type
== IB_QPT_GSI
)
646 qp_type
= MLX4_IB_QPT_TUN_GSI
;
647 else if (tnl_init
->slave
== mlx4_master_func_num(dev
->dev
))
648 qp_type
= MLX4_IB_QPT_TUN_SMI_OWNER
;
650 qp_type
= MLX4_IB_QPT_TUN_SMI
;
651 /* we are definitely in the PPF here, since we are creating
652 * tunnel QPs. base_tunnel_sqpn is therefore valid. */
653 qpn
= dev
->dev
->phys_caps
.base_tunnel_sqpn
+ 8 * tnl_init
->slave
654 + tnl_init
->proxy_qp_type
* 2 + tnl_init
->port
- 1;
659 if (qp_type
== MLX4_IB_QPT_SMI
|| qp_type
== MLX4_IB_QPT_GSI
||
660 (qp_type
& (MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_SMI_OWNER
|
661 MLX4_IB_QPT_PROXY_GSI
| MLX4_IB_QPT_TUN_SMI_OWNER
))) {
662 sqp
= kzalloc(sizeof (struct mlx4_ib_sqp
), GFP_KERNEL
);
666 qp
->pri
.vid
= 0xFFFF;
667 qp
->alt
.vid
= 0xFFFF;
669 qp
= kzalloc(sizeof (struct mlx4_ib_qp
), GFP_KERNEL
);
672 qp
->pri
.vid
= 0xFFFF;
673 qp
->alt
.vid
= 0xFFFF;
678 qp
->mlx4_ib_qp_type
= qp_type
;
680 mutex_init(&qp
->mutex
);
681 spin_lock_init(&qp
->sq
.lock
);
682 spin_lock_init(&qp
->rq
.lock
);
683 INIT_LIST_HEAD(&qp
->gid_list
);
684 INIT_LIST_HEAD(&qp
->steering_rules
);
686 qp
->state
= IB_QPS_RESET
;
687 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
688 qp
->sq_signal_bits
= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
690 err
= set_rq_size(dev
, &init_attr
->cap
, !!pd
->uobject
, qp_has_rq(init_attr
), qp
);
695 struct mlx4_ib_create_qp ucmd
;
697 if (ib_copy_from_udata(&ucmd
, udata
, sizeof ucmd
)) {
702 qp
->sq_no_prefetch
= ucmd
.sq_no_prefetch
;
704 err
= set_user_sq_size(dev
, qp
, &ucmd
);
708 qp
->umem
= ib_umem_get(pd
->uobject
->context
, ucmd
.buf_addr
,
710 if (IS_ERR(qp
->umem
)) {
711 err
= PTR_ERR(qp
->umem
);
715 err
= mlx4_mtt_init(dev
->dev
, ib_umem_page_count(qp
->umem
),
716 ilog2(qp
->umem
->page_size
), &qp
->mtt
);
720 err
= mlx4_ib_umem_write_mtt(dev
, &qp
->mtt
, qp
->umem
);
724 if (qp_has_rq(init_attr
)) {
725 err
= mlx4_ib_db_map_user(to_mucontext(pd
->uobject
->context
),
726 ucmd
.db_addr
, &qp
->db
);
731 qp
->sq_no_prefetch
= 0;
733 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
)
734 qp
->flags
|= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
736 if (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
737 qp
->flags
|= MLX4_IB_QP_LSO
;
739 if (init_attr
->create_flags
& IB_QP_CREATE_NETIF_QP
) {
740 if (dev
->steering_support
==
741 MLX4_STEERING_MODE_DEVICE_MANAGED
)
742 qp
->flags
|= MLX4_IB_QP_NETIF
;
747 err
= set_kernel_sq_size(dev
, &init_attr
->cap
, qp_type
, qp
);
751 if (qp_has_rq(init_attr
)) {
752 err
= mlx4_db_alloc(dev
->dev
, &qp
->db
, 0, gfp
);
759 if (mlx4_buf_alloc(dev
->dev
, qp
->buf_size
, PAGE_SIZE
* 2, &qp
->buf
, gfp
)) {
764 err
= mlx4_mtt_init(dev
->dev
, qp
->buf
.npages
, qp
->buf
.page_shift
,
769 err
= mlx4_buf_write_mtt(dev
->dev
, &qp
->mtt
, &qp
->buf
, gfp
);
773 qp
->sq
.wrid
= kmalloc(qp
->sq
.wqe_cnt
* sizeof (u64
), gfp
);
774 qp
->rq
.wrid
= kmalloc(qp
->rq
.wqe_cnt
* sizeof (u64
), gfp
);
775 if (!qp
->sq
.wrid
|| !qp
->rq
.wrid
) {
782 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
783 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
)) {
784 if (alloc_proxy_bufs(pd
->device
, qp
)) {
790 /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE
791 * BlueFlame setup flow wrongly causes VLAN insertion. */
792 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
)
793 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1 << 8, &qpn
);
795 if (qp
->flags
& MLX4_IB_QP_NETIF
)
796 err
= mlx4_ib_steer_qp_alloc(dev
, 1, &qpn
);
798 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1,
804 err
= mlx4_qp_alloc(dev
->dev
, qpn
, &qp
->mqp
, gfp
);
808 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
)
809 qp
->mqp
.qpn
|= (1 << 23);
812 * Hardware wants QPN written in big-endian order (after
813 * shifting) for send doorbell. Precompute this value to save
814 * a little bit when posting sends.
816 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
818 qp
->mqp
.event
= mlx4_ib_qp_event
;
825 if (qp
->flags
& MLX4_IB_QP_NETIF
)
826 mlx4_ib_steer_qp_free(dev
, qpn
, 1);
828 mlx4_qp_release_range(dev
->dev
, qpn
, 1);
831 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
)
832 free_proxy_bufs(pd
->device
, qp
);
835 if (qp_has_rq(init_attr
))
836 mlx4_ib_db_unmap_user(to_mucontext(pd
->uobject
->context
), &qp
->db
);
843 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
847 ib_umem_release(qp
->umem
);
849 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
852 if (!pd
->uobject
&& qp_has_rq(init_attr
))
853 mlx4_db_free(dev
->dev
, &qp
->db
);
861 static enum mlx4_qp_state
to_mlx4_state(enum ib_qp_state state
)
864 case IB_QPS_RESET
: return MLX4_QP_STATE_RST
;
865 case IB_QPS_INIT
: return MLX4_QP_STATE_INIT
;
866 case IB_QPS_RTR
: return MLX4_QP_STATE_RTR
;
867 case IB_QPS_RTS
: return MLX4_QP_STATE_RTS
;
868 case IB_QPS_SQD
: return MLX4_QP_STATE_SQD
;
869 case IB_QPS_SQE
: return MLX4_QP_STATE_SQER
;
870 case IB_QPS_ERR
: return MLX4_QP_STATE_ERR
;
875 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
876 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
878 if (send_cq
== recv_cq
) {
879 spin_lock_irq(&send_cq
->lock
);
880 __acquire(&recv_cq
->lock
);
881 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
882 spin_lock_irq(&send_cq
->lock
);
883 spin_lock_nested(&recv_cq
->lock
, SINGLE_DEPTH_NESTING
);
885 spin_lock_irq(&recv_cq
->lock
);
886 spin_lock_nested(&send_cq
->lock
, SINGLE_DEPTH_NESTING
);
890 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
891 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
893 if (send_cq
== recv_cq
) {
894 __release(&recv_cq
->lock
);
895 spin_unlock_irq(&send_cq
->lock
);
896 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
897 spin_unlock(&recv_cq
->lock
);
898 spin_unlock_irq(&send_cq
->lock
);
900 spin_unlock(&send_cq
->lock
);
901 spin_unlock_irq(&recv_cq
->lock
);
905 static void del_gid_entries(struct mlx4_ib_qp
*qp
)
907 struct mlx4_ib_gid_entry
*ge
, *tmp
;
909 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
915 static struct mlx4_ib_pd
*get_pd(struct mlx4_ib_qp
*qp
)
917 if (qp
->ibqp
.qp_type
== IB_QPT_XRC_TGT
)
918 return to_mpd(to_mxrcd(qp
->ibqp
.xrcd
)->pd
);
920 return to_mpd(qp
->ibqp
.pd
);
923 static void get_cqs(struct mlx4_ib_qp
*qp
,
924 struct mlx4_ib_cq
**send_cq
, struct mlx4_ib_cq
**recv_cq
)
926 switch (qp
->ibqp
.qp_type
) {
928 *send_cq
= to_mcq(to_mxrcd(qp
->ibqp
.xrcd
)->cq
);
932 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
936 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
937 *recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
942 static void destroy_qp_common(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
,
945 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
947 if (qp
->state
!= IB_QPS_RESET
) {
948 if (mlx4_qp_modify(dev
->dev
, NULL
, to_mlx4_state(qp
->state
),
949 MLX4_QP_STATE_RST
, NULL
, 0, 0, &qp
->mqp
))
950 pr_warn("modify QP %06x to RESET failed.\n",
953 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
957 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
960 if (qp
->pri
.vid
< 0x1000) {
961 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
, qp
->pri
.vid
);
962 qp
->pri
.vid
= 0xFFFF;
963 qp
->pri
.candidate_vid
= 0xFFFF;
964 qp
->pri
.update_vid
= 0;
966 if (qp
->alt
.vid
< 0x1000) {
967 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
, qp
->alt
.vid
);
968 qp
->alt
.vid
= 0xFFFF;
969 qp
->alt
.candidate_vid
= 0xFFFF;
970 qp
->alt
.update_vid
= 0;
974 get_cqs(qp
, &send_cq
, &recv_cq
);
976 mlx4_ib_lock_cqs(send_cq
, recv_cq
);
979 __mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
980 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
): NULL
);
981 if (send_cq
!= recv_cq
)
982 __mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
985 mlx4_qp_remove(dev
->dev
, &qp
->mqp
);
987 mlx4_ib_unlock_cqs(send_cq
, recv_cq
);
989 mlx4_qp_free(dev
->dev
, &qp
->mqp
);
991 if (!is_sqp(dev
, qp
) && !is_tunnel_qp(dev
, qp
)) {
992 if (qp
->flags
& MLX4_IB_QP_NETIF
)
993 mlx4_ib_steer_qp_free(dev
, qp
->mqp
.qpn
, 1);
995 mlx4_qp_release_range(dev
->dev
, qp
->mqp
.qpn
, 1);
998 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
1002 mlx4_ib_db_unmap_user(to_mucontext(qp
->ibqp
.uobject
->context
),
1004 ib_umem_release(qp
->umem
);
1008 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
1009 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
))
1010 free_proxy_bufs(&dev
->ib_dev
, qp
);
1011 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
1013 mlx4_db_free(dev
->dev
, &qp
->db
);
1016 del_gid_entries(qp
);
1019 static u32
get_sqp_num(struct mlx4_ib_dev
*dev
, struct ib_qp_init_attr
*attr
)
1022 if (!mlx4_is_mfunc(dev
->dev
) ||
1023 (mlx4_is_master(dev
->dev
) &&
1024 attr
->create_flags
& MLX4_IB_SRIOV_SQP
)) {
1025 return dev
->dev
->phys_caps
.base_sqpn
+
1026 (attr
->qp_type
== IB_QPT_SMI
? 0 : 2) +
1029 /* PF or VF -- creating proxies */
1030 if (attr
->qp_type
== IB_QPT_SMI
)
1031 return dev
->dev
->caps
.qp0_proxy
[attr
->port_num
- 1];
1033 return dev
->dev
->caps
.qp1_proxy
[attr
->port_num
- 1];
1036 struct ib_qp
*mlx4_ib_create_qp(struct ib_pd
*pd
,
1037 struct ib_qp_init_attr
*init_attr
,
1038 struct ib_udata
*udata
)
1040 struct mlx4_ib_qp
*qp
= NULL
;
1045 gfp
= (init_attr
->create_flags
& MLX4_IB_QP_CREATE_USE_GFP_NOIO
) ?
1046 GFP_NOIO
: GFP_KERNEL
;
1048 * We only support LSO, vendor flag1, and multicast loopback blocking,
1049 * and only for kernel UD QPs.
1051 if (init_attr
->create_flags
& ~(MLX4_IB_QP_LSO
|
1052 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
|
1053 MLX4_IB_SRIOV_TUNNEL_QP
|
1056 MLX4_IB_QP_CREATE_USE_GFP_NOIO
))
1057 return ERR_PTR(-EINVAL
);
1059 if (init_attr
->create_flags
& IB_QP_CREATE_NETIF_QP
) {
1060 if (init_attr
->qp_type
!= IB_QPT_UD
)
1061 return ERR_PTR(-EINVAL
);
1064 if (init_attr
->create_flags
&&
1066 ((init_attr
->create_flags
& ~(MLX4_IB_SRIOV_SQP
| MLX4_IB_QP_CREATE_USE_GFP_NOIO
)) &&
1067 init_attr
->qp_type
!= IB_QPT_UD
) ||
1068 ((init_attr
->create_flags
& MLX4_IB_SRIOV_SQP
) &&
1069 init_attr
->qp_type
> IB_QPT_GSI
)))
1070 return ERR_PTR(-EINVAL
);
1072 switch (init_attr
->qp_type
) {
1073 case IB_QPT_XRC_TGT
:
1074 pd
= to_mxrcd(init_attr
->xrcd
)->pd
;
1075 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
1076 init_attr
->send_cq
= to_mxrcd(init_attr
->xrcd
)->cq
;
1078 case IB_QPT_XRC_INI
:
1079 if (!(to_mdev(pd
->device
)->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
))
1080 return ERR_PTR(-ENOSYS
);
1081 init_attr
->recv_cq
= init_attr
->send_cq
;
1085 case IB_QPT_RAW_PACKET
:
1086 qp
= kzalloc(sizeof *qp
, gfp
);
1088 return ERR_PTR(-ENOMEM
);
1089 qp
->pri
.vid
= 0xFFFF;
1090 qp
->alt
.vid
= 0xFFFF;
1094 err
= create_qp_common(to_mdev(pd
->device
), pd
, init_attr
,
1095 udata
, 0, &qp
, gfp
);
1097 return ERR_PTR(err
);
1099 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
1107 /* Userspace is not allowed to create special QPs: */
1109 return ERR_PTR(-EINVAL
);
1111 err
= create_qp_common(to_mdev(pd
->device
), pd
, init_attr
, udata
,
1112 get_sqp_num(to_mdev(pd
->device
), init_attr
),
1115 return ERR_PTR(err
);
1117 qp
->port
= init_attr
->port_num
;
1118 qp
->ibqp
.qp_num
= init_attr
->qp_type
== IB_QPT_SMI
? 0 : 1;
1123 /* Don't support raw QPs */
1124 return ERR_PTR(-EINVAL
);
1130 int mlx4_ib_destroy_qp(struct ib_qp
*qp
)
1132 struct mlx4_ib_dev
*dev
= to_mdev(qp
->device
);
1133 struct mlx4_ib_qp
*mqp
= to_mqp(qp
);
1134 struct mlx4_ib_pd
*pd
;
1136 if (is_qp0(dev
, mqp
))
1137 mlx4_CLOSE_PORT(dev
->dev
, mqp
->port
);
1140 destroy_qp_common(dev
, mqp
, !!pd
->ibpd
.uobject
);
1142 if (is_sqp(dev
, mqp
))
1143 kfree(to_msqp(mqp
));
1150 static int to_mlx4_st(struct mlx4_ib_dev
*dev
, enum mlx4_ib_qp_type type
)
1153 case MLX4_IB_QPT_RC
: return MLX4_QP_ST_RC
;
1154 case MLX4_IB_QPT_UC
: return MLX4_QP_ST_UC
;
1155 case MLX4_IB_QPT_UD
: return MLX4_QP_ST_UD
;
1156 case MLX4_IB_QPT_XRC_INI
:
1157 case MLX4_IB_QPT_XRC_TGT
: return MLX4_QP_ST_XRC
;
1158 case MLX4_IB_QPT_SMI
:
1159 case MLX4_IB_QPT_GSI
:
1160 case MLX4_IB_QPT_RAW_PACKET
: return MLX4_QP_ST_MLX
;
1162 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
1163 case MLX4_IB_QPT_TUN_SMI_OWNER
: return (mlx4_is_mfunc(dev
->dev
) ?
1164 MLX4_QP_ST_MLX
: -1);
1165 case MLX4_IB_QPT_PROXY_SMI
:
1166 case MLX4_IB_QPT_TUN_SMI
:
1167 case MLX4_IB_QPT_PROXY_GSI
:
1168 case MLX4_IB_QPT_TUN_GSI
: return (mlx4_is_mfunc(dev
->dev
) ?
1169 MLX4_QP_ST_UD
: -1);
1174 static __be32
to_mlx4_access_flags(struct mlx4_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
1179 u32 hw_access_flags
= 0;
1181 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1182 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1184 dest_rd_atomic
= qp
->resp_depth
;
1186 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1187 access_flags
= attr
->qp_access_flags
;
1189 access_flags
= qp
->atomic_rd_en
;
1191 if (!dest_rd_atomic
)
1192 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
1194 if (access_flags
& IB_ACCESS_REMOTE_READ
)
1195 hw_access_flags
|= MLX4_QP_BIT_RRE
;
1196 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
1197 hw_access_flags
|= MLX4_QP_BIT_RAE
;
1198 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
1199 hw_access_flags
|= MLX4_QP_BIT_RWE
;
1201 return cpu_to_be32(hw_access_flags
);
1204 static void store_sqp_attrs(struct mlx4_ib_sqp
*sqp
, const struct ib_qp_attr
*attr
,
1207 if (attr_mask
& IB_QP_PKEY_INDEX
)
1208 sqp
->pkey_index
= attr
->pkey_index
;
1209 if (attr_mask
& IB_QP_QKEY
)
1210 sqp
->qkey
= attr
->qkey
;
1211 if (attr_mask
& IB_QP_SQ_PSN
)
1212 sqp
->send_psn
= attr
->sq_psn
;
1215 static void mlx4_set_sched(struct mlx4_qp_path
*path
, u8 port
)
1217 path
->sched_queue
= (path
->sched_queue
& 0xbf) | ((port
- 1) << 6);
1220 static int _mlx4_set_path(struct mlx4_ib_dev
*dev
, const struct ib_ah_attr
*ah
,
1221 u64 smac
, u16 vlan_tag
, struct mlx4_qp_path
*path
,
1222 struct mlx4_roce_smac_vlan_info
*smac_info
, u8 port
)
1224 int is_eth
= rdma_port_get_link_layer(&dev
->ib_dev
, port
) ==
1225 IB_LINK_LAYER_ETHERNET
;
1231 path
->grh_mylmc
= ah
->src_path_bits
& 0x7f;
1232 path
->rlid
= cpu_to_be16(ah
->dlid
);
1233 if (ah
->static_rate
) {
1234 path
->static_rate
= ah
->static_rate
+ MLX4_STAT_RATE_OFFSET
;
1235 while (path
->static_rate
> IB_RATE_2_5_GBPS
+ MLX4_STAT_RATE_OFFSET
&&
1236 !(1 << path
->static_rate
& dev
->dev
->caps
.stat_rate_support
))
1237 --path
->static_rate
;
1239 path
->static_rate
= 0;
1241 if (ah
->ah_flags
& IB_AH_GRH
) {
1242 if (ah
->grh
.sgid_index
>= dev
->dev
->caps
.gid_table_len
[port
]) {
1243 pr_err("sgid_index (%u) too large. max is %d\n",
1244 ah
->grh
.sgid_index
, dev
->dev
->caps
.gid_table_len
[port
] - 1);
1248 path
->grh_mylmc
|= 1 << 7;
1249 path
->mgid_index
= ah
->grh
.sgid_index
;
1250 path
->hop_limit
= ah
->grh
.hop_limit
;
1251 path
->tclass_flowlabel
=
1252 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
1253 (ah
->grh
.flow_label
));
1254 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
1258 if (!(ah
->ah_flags
& IB_AH_GRH
))
1261 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
1262 ((port
- 1) << 6) | ((ah
->sl
& 7) << 3);
1264 path
->feup
|= MLX4_FEUP_FORCE_ETH_UP
;
1265 if (vlan_tag
< 0x1000) {
1266 if (smac_info
->vid
< 0x1000) {
1267 /* both valid vlan ids */
1268 if (smac_info
->vid
!= vlan_tag
) {
1269 /* different VIDs. unreg old and reg new */
1270 err
= mlx4_register_vlan(dev
->dev
, port
, vlan_tag
, &vidx
);
1273 smac_info
->candidate_vid
= vlan_tag
;
1274 smac_info
->candidate_vlan_index
= vidx
;
1275 smac_info
->candidate_vlan_port
= port
;
1276 smac_info
->update_vid
= 1;
1277 path
->vlan_index
= vidx
;
1279 path
->vlan_index
= smac_info
->vlan_index
;
1282 /* no current vlan tag in qp */
1283 err
= mlx4_register_vlan(dev
->dev
, port
, vlan_tag
, &vidx
);
1286 smac_info
->candidate_vid
= vlan_tag
;
1287 smac_info
->candidate_vlan_index
= vidx
;
1288 smac_info
->candidate_vlan_port
= port
;
1289 smac_info
->update_vid
= 1;
1290 path
->vlan_index
= vidx
;
1292 path
->feup
|= MLX4_FVL_FORCE_ETH_VLAN
;
1295 /* have current vlan tag. unregister it at modify-qp success */
1296 if (smac_info
->vid
< 0x1000) {
1297 smac_info
->candidate_vid
= 0xFFFF;
1298 smac_info
->update_vid
= 1;
1302 /* get smac_index for RoCE use.
1303 * If no smac was yet assigned, register one.
1304 * If one was already assigned, but the new mac differs,
1305 * unregister the old one and register the new one.
1307 if (!smac_info
->smac
|| smac_info
->smac
!= smac
) {
1308 /* register candidate now, unreg if needed, after success */
1309 smac_index
= mlx4_register_mac(dev
->dev
, port
, smac
);
1310 if (smac_index
>= 0) {
1311 smac_info
->candidate_smac_index
= smac_index
;
1312 smac_info
->candidate_smac
= smac
;
1313 smac_info
->candidate_smac_port
= port
;
1318 smac_index
= smac_info
->smac_index
;
1321 memcpy(path
->dmac
, ah
->dmac
, 6);
1322 path
->ackto
= MLX4_IB_LINK_TYPE_ETH
;
1323 /* put MAC table smac index for IBoE */
1324 path
->grh_mylmc
= (u8
) (smac_index
) | 0x80;
1326 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
1327 ((port
- 1) << 6) | ((ah
->sl
& 0xf) << 2);
1333 static int mlx4_set_path(struct mlx4_ib_dev
*dev
, const struct ib_qp_attr
*qp
,
1334 enum ib_qp_attr_mask qp_attr_mask
,
1335 struct mlx4_ib_qp
*mqp
,
1336 struct mlx4_qp_path
*path
, u8 port
)
1338 return _mlx4_set_path(dev
, &qp
->ah_attr
,
1339 mlx4_mac_to_u64((u8
*)qp
->smac
),
1340 (qp_attr_mask
& IB_QP_VID
) ? qp
->vlan_id
: 0xffff,
1341 path
, &mqp
->pri
, port
);
1344 static int mlx4_set_alt_path(struct mlx4_ib_dev
*dev
,
1345 const struct ib_qp_attr
*qp
,
1346 enum ib_qp_attr_mask qp_attr_mask
,
1347 struct mlx4_ib_qp
*mqp
,
1348 struct mlx4_qp_path
*path
, u8 port
)
1350 return _mlx4_set_path(dev
, &qp
->alt_ah_attr
,
1351 mlx4_mac_to_u64((u8
*)qp
->alt_smac
),
1352 (qp_attr_mask
& IB_QP_ALT_VID
) ?
1353 qp
->alt_vlan_id
: 0xffff,
1354 path
, &mqp
->alt
, port
);
1357 static void update_mcg_macs(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
1359 struct mlx4_ib_gid_entry
*ge
, *tmp
;
1361 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
1362 if (!ge
->added
&& mlx4_ib_add_mc(dev
, qp
, &ge
->gid
)) {
1364 ge
->port
= qp
->port
;
1369 static int handle_eth_ud_smac_index(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
, u8
*smac
,
1370 struct mlx4_qp_context
*context
)
1372 struct net_device
*ndev
;
1377 ndev
= dev
->iboe
.netdevs
[qp
->port
- 1];
1379 smac
= ndev
->dev_addr
;
1380 u64_mac
= mlx4_mac_to_u64(smac
);
1382 u64_mac
= dev
->dev
->caps
.def_mac
[qp
->port
];
1385 context
->pri_path
.sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
| ((qp
->port
- 1) << 6);
1386 if (!qp
->pri
.smac
) {
1387 smac_index
= mlx4_register_mac(dev
->dev
, qp
->port
, u64_mac
);
1388 if (smac_index
>= 0) {
1389 qp
->pri
.candidate_smac_index
= smac_index
;
1390 qp
->pri
.candidate_smac
= u64_mac
;
1391 qp
->pri
.candidate_smac_port
= qp
->port
;
1392 context
->pri_path
.grh_mylmc
= 0x80 | (u8
) smac_index
;
1400 static int __mlx4_ib_modify_qp(struct ib_qp
*ibqp
,
1401 const struct ib_qp_attr
*attr
, int attr_mask
,
1402 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
1404 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
1405 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
1406 struct mlx4_ib_pd
*pd
;
1407 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
1408 struct mlx4_qp_context
*context
;
1409 enum mlx4_qp_optpar optpar
= 0;
1414 context
= kzalloc(sizeof *context
, GFP_KERNEL
);
1418 context
->flags
= cpu_to_be32((to_mlx4_state(new_state
) << 28) |
1419 (to_mlx4_st(dev
, qp
->mlx4_ib_qp_type
) << 16));
1421 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
))
1422 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
1424 optpar
|= MLX4_QP_OPTPAR_PM_STATE
;
1425 switch (attr
->path_mig_state
) {
1426 case IB_MIG_MIGRATED
:
1427 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
1430 context
->flags
|= cpu_to_be32(MLX4_QP_PM_REARM
<< 11);
1433 context
->flags
|= cpu_to_be32(MLX4_QP_PM_ARMED
<< 11);
1438 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
)
1439 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 11;
1440 else if (ibqp
->qp_type
== IB_QPT_RAW_PACKET
)
1441 context
->mtu_msgmax
= (MLX4_RAW_QP_MTU
<< 5) | MLX4_RAW_QP_MSGMAX
;
1442 else if (ibqp
->qp_type
== IB_QPT_UD
) {
1443 if (qp
->flags
& MLX4_IB_QP_LSO
)
1444 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) |
1445 ilog2(dev
->dev
->caps
.max_gso_sz
);
1447 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
1448 } else if (attr_mask
& IB_QP_PATH_MTU
) {
1449 if (attr
->path_mtu
< IB_MTU_256
|| attr
->path_mtu
> IB_MTU_4096
) {
1450 pr_err("path MTU (%u) is invalid\n",
1454 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) |
1455 ilog2(dev
->dev
->caps
.max_msg_sz
);
1459 context
->rq_size_stride
= ilog2(qp
->rq
.wqe_cnt
) << 3;
1460 context
->rq_size_stride
|= qp
->rq
.wqe_shift
- 4;
1463 context
->sq_size_stride
= ilog2(qp
->sq
.wqe_cnt
) << 3;
1464 context
->sq_size_stride
|= qp
->sq
.wqe_shift
- 4;
1466 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
1467 context
->sq_size_stride
|= !!qp
->sq_no_prefetch
<< 7;
1468 context
->xrcd
= cpu_to_be32((u32
) qp
->xrcdn
);
1469 if (ibqp
->qp_type
== IB_QPT_RAW_PACKET
)
1470 context
->param3
|= cpu_to_be32(1 << 30);
1473 if (qp
->ibqp
.uobject
)
1474 context
->usr_page
= cpu_to_be32(to_mucontext(ibqp
->uobject
->context
)->uar
.index
);
1476 context
->usr_page
= cpu_to_be32(dev
->priv_uar
.index
);
1478 if (attr_mask
& IB_QP_DEST_QPN
)
1479 context
->remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
1481 if (attr_mask
& IB_QP_PORT
) {
1482 if (cur_state
== IB_QPS_SQD
&& new_state
== IB_QPS_SQD
&&
1483 !(attr_mask
& IB_QP_AV
)) {
1484 mlx4_set_sched(&context
->pri_path
, attr
->port_num
);
1485 optpar
|= MLX4_QP_OPTPAR_SCHED_QUEUE
;
1489 if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
1490 if (dev
->counters
[qp
->port
- 1] != -1) {
1491 context
->pri_path
.counter_index
=
1492 dev
->counters
[qp
->port
- 1];
1493 optpar
|= MLX4_QP_OPTPAR_COUNTER_INDEX
;
1495 context
->pri_path
.counter_index
= 0xff;
1497 if (qp
->flags
& MLX4_IB_QP_NETIF
) {
1498 mlx4_ib_steer_qp_reg(dev
, qp
, 1);
1503 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1504 if (qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
)
1505 context
->pri_path
.disable_pkey_check
= 0x40;
1506 context
->pri_path
.pkey_index
= attr
->pkey_index
;
1507 optpar
|= MLX4_QP_OPTPAR_PKEY_INDEX
;
1510 if (attr_mask
& IB_QP_AV
) {
1511 if (mlx4_set_path(dev
, attr
, attr_mask
, qp
, &context
->pri_path
,
1512 attr_mask
& IB_QP_PORT
?
1513 attr
->port_num
: qp
->port
))
1516 optpar
|= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
|
1517 MLX4_QP_OPTPAR_SCHED_QUEUE
);
1520 if (attr_mask
& IB_QP_TIMEOUT
) {
1521 context
->pri_path
.ackto
|= attr
->timeout
<< 3;
1522 optpar
|= MLX4_QP_OPTPAR_ACK_TIMEOUT
;
1525 if (attr_mask
& IB_QP_ALT_PATH
) {
1526 if (attr
->alt_port_num
== 0 ||
1527 attr
->alt_port_num
> dev
->dev
->caps
.num_ports
)
1530 if (attr
->alt_pkey_index
>=
1531 dev
->dev
->caps
.pkey_table_len
[attr
->alt_port_num
])
1534 if (mlx4_set_alt_path(dev
, attr
, attr_mask
, qp
,
1536 attr
->alt_port_num
))
1539 context
->alt_path
.pkey_index
= attr
->alt_pkey_index
;
1540 context
->alt_path
.ackto
= attr
->alt_timeout
<< 3;
1541 optpar
|= MLX4_QP_OPTPAR_ALT_ADDR_PATH
;
1545 get_cqs(qp
, &send_cq
, &recv_cq
);
1546 context
->pd
= cpu_to_be32(pd
->pdn
);
1547 context
->cqn_send
= cpu_to_be32(send_cq
->mcq
.cqn
);
1548 context
->cqn_recv
= cpu_to_be32(recv_cq
->mcq
.cqn
);
1549 context
->params1
= cpu_to_be32(MLX4_IB_ACK_REQ_FREQ
<< 28);
1551 /* Set "fast registration enabled" for all kernel QPs */
1552 if (!qp
->ibqp
.uobject
)
1553 context
->params1
|= cpu_to_be32(1 << 11);
1555 if (attr_mask
& IB_QP_RNR_RETRY
) {
1556 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
1557 optpar
|= MLX4_QP_OPTPAR_RNR_RETRY
;
1560 if (attr_mask
& IB_QP_RETRY_CNT
) {
1561 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
1562 optpar
|= MLX4_QP_OPTPAR_RETRY_COUNT
;
1565 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1566 if (attr
->max_rd_atomic
)
1568 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
1569 optpar
|= MLX4_QP_OPTPAR_SRA_MAX
;
1572 if (attr_mask
& IB_QP_SQ_PSN
)
1573 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
1575 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1576 if (attr
->max_dest_rd_atomic
)
1578 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
1579 optpar
|= MLX4_QP_OPTPAR_RRA_MAX
;
1582 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
)) {
1583 context
->params2
|= to_mlx4_access_flags(qp
, attr
, attr_mask
);
1584 optpar
|= MLX4_QP_OPTPAR_RWE
| MLX4_QP_OPTPAR_RRE
| MLX4_QP_OPTPAR_RAE
;
1588 context
->params2
|= cpu_to_be32(MLX4_QP_BIT_RIC
);
1590 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
1591 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
1592 optpar
|= MLX4_QP_OPTPAR_RNR_TIMEOUT
;
1594 if (attr_mask
& IB_QP_RQ_PSN
)
1595 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
1597 /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */
1598 if (attr_mask
& IB_QP_QKEY
) {
1599 if (qp
->mlx4_ib_qp_type
&
1600 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
))
1601 context
->qkey
= cpu_to_be32(IB_QP_SET_QKEY
);
1603 if (mlx4_is_mfunc(dev
->dev
) &&
1604 !(qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
) &&
1605 (attr
->qkey
& MLX4_RESERVED_QKEY_MASK
) ==
1606 MLX4_RESERVED_QKEY_BASE
) {
1607 pr_err("Cannot use reserved QKEY"
1608 " 0x%x (range 0xffff0000..0xffffffff"
1609 " is reserved)\n", attr
->qkey
);
1613 context
->qkey
= cpu_to_be32(attr
->qkey
);
1615 optpar
|= MLX4_QP_OPTPAR_Q_KEY
;
1619 context
->srqn
= cpu_to_be32(1 << 24 | to_msrq(ibqp
->srq
)->msrq
.srqn
);
1621 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1622 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
1624 if (cur_state
== IB_QPS_INIT
&&
1625 new_state
== IB_QPS_RTR
&&
1626 (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
||
1627 ibqp
->qp_type
== IB_QPT_UD
||
1628 ibqp
->qp_type
== IB_QPT_RAW_PACKET
)) {
1629 context
->pri_path
.sched_queue
= (qp
->port
- 1) << 6;
1630 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_SMI
||
1631 qp
->mlx4_ib_qp_type
&
1632 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
)) {
1633 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
;
1634 if (qp
->mlx4_ib_qp_type
!= MLX4_IB_QPT_SMI
)
1635 context
->pri_path
.fl
= 0x80;
1637 if (qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
)
1638 context
->pri_path
.fl
= 0x80;
1639 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_SCHED_QUEUE
;
1641 if (rdma_port_get_link_layer(&dev
->ib_dev
, qp
->port
) ==
1642 IB_LINK_LAYER_ETHERNET
) {
1643 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_GSI
||
1644 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
)
1645 context
->pri_path
.feup
= 1 << 7; /* don't fsm */
1646 /* handle smac_index */
1647 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_UD
||
1648 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
||
1649 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_GSI
) {
1650 err
= handle_eth_ud_smac_index(dev
, qp
, (u8
*)attr
->smac
, context
);
1657 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
)
1658 context
->pri_path
.ackto
= (context
->pri_path
.ackto
& 0xf8) |
1659 MLX4_IB_LINK_TYPE_ETH
;
1661 if (ibqp
->qp_type
== IB_QPT_UD
&& (new_state
== IB_QPS_RTR
)) {
1662 int is_eth
= rdma_port_get_link_layer(
1663 &dev
->ib_dev
, qp
->port
) ==
1664 IB_LINK_LAYER_ETHERNET
;
1666 context
->pri_path
.ackto
= MLX4_IB_LINK_TYPE_ETH
;
1667 optpar
|= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
;
1672 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
1673 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&& attr
->en_sqd_async_notify
)
1678 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1679 context
->rlkey
|= (1 << 4);
1682 * Before passing a kernel QP to the HW, make sure that the
1683 * ownership bits of the send queue are set and the SQ
1684 * headroom is stamped so that the hardware doesn't start
1685 * processing stale work requests.
1687 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
1688 struct mlx4_wqe_ctrl_seg
*ctrl
;
1691 for (i
= 0; i
< qp
->sq
.wqe_cnt
; ++i
) {
1692 ctrl
= get_send_wqe(qp
, i
);
1693 ctrl
->owner_opcode
= cpu_to_be32(1 << 31);
1694 if (qp
->sq_max_wqes_per_wr
== 1)
1695 ctrl
->fence_size
= 1 << (qp
->sq
.wqe_shift
- 4);
1697 stamp_send_wqe(qp
, i
, 1 << qp
->sq
.wqe_shift
);
1701 err
= mlx4_qp_modify(dev
->dev
, &qp
->mtt
, to_mlx4_state(cur_state
),
1702 to_mlx4_state(new_state
), context
, optpar
,
1703 sqd_event
, &qp
->mqp
);
1707 qp
->state
= new_state
;
1709 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1710 qp
->atomic_rd_en
= attr
->qp_access_flags
;
1711 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1712 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
1713 if (attr_mask
& IB_QP_PORT
) {
1714 qp
->port
= attr
->port_num
;
1715 update_mcg_macs(dev
, qp
);
1717 if (attr_mask
& IB_QP_ALT_PATH
)
1718 qp
->alt_port
= attr
->alt_port_num
;
1720 if (is_sqp(dev
, qp
))
1721 store_sqp_attrs(to_msqp(qp
), attr
, attr_mask
);
1724 * If we moved QP0 to RTR, bring the IB link up; if we moved
1725 * QP0 to RESET or ERROR, bring the link back down.
1727 if (is_qp0(dev
, qp
)) {
1728 if (cur_state
!= IB_QPS_RTR
&& new_state
== IB_QPS_RTR
)
1729 if (mlx4_INIT_PORT(dev
->dev
, qp
->port
))
1730 pr_warn("INIT_PORT failed for port %d\n",
1733 if (cur_state
!= IB_QPS_RESET
&& cur_state
!= IB_QPS_ERR
&&
1734 (new_state
== IB_QPS_RESET
|| new_state
== IB_QPS_ERR
))
1735 mlx4_CLOSE_PORT(dev
->dev
, qp
->port
);
1739 * If we moved a kernel QP to RESET, clean up all old CQ
1740 * entries and reinitialize the QP.
1742 if (new_state
== IB_QPS_RESET
) {
1743 if (!ibqp
->uobject
) {
1744 mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1745 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
1746 if (send_cq
!= recv_cq
)
1747 mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1753 qp
->sq_next_wqe
= 0;
1757 if (qp
->flags
& MLX4_IB_QP_NETIF
)
1758 mlx4_ib_steer_qp_reg(dev
, qp
, 0);
1761 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
1765 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
1768 if (qp
->pri
.vid
< 0x1000) {
1769 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
, qp
->pri
.vid
);
1770 qp
->pri
.vid
= 0xFFFF;
1771 qp
->pri
.candidate_vid
= 0xFFFF;
1772 qp
->pri
.update_vid
= 0;
1775 if (qp
->alt
.vid
< 0x1000) {
1776 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
, qp
->alt
.vid
);
1777 qp
->alt
.vid
= 0xFFFF;
1778 qp
->alt
.candidate_vid
= 0xFFFF;
1779 qp
->alt
.update_vid
= 0;
1783 if (err
&& steer_qp
)
1784 mlx4_ib_steer_qp_reg(dev
, qp
, 0);
1786 if (qp
->pri
.candidate_smac
) {
1788 mlx4_unregister_mac(dev
->dev
, qp
->pri
.candidate_smac_port
, qp
->pri
.candidate_smac
);
1791 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
1792 qp
->pri
.smac
= qp
->pri
.candidate_smac
;
1793 qp
->pri
.smac_index
= qp
->pri
.candidate_smac_index
;
1794 qp
->pri
.smac_port
= qp
->pri
.candidate_smac_port
;
1796 qp
->pri
.candidate_smac
= 0;
1797 qp
->pri
.candidate_smac_index
= 0;
1798 qp
->pri
.candidate_smac_port
= 0;
1800 if (qp
->alt
.candidate_smac
) {
1802 mlx4_unregister_mac(dev
->dev
, qp
->alt
.candidate_smac_port
, qp
->alt
.candidate_smac
);
1805 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
1806 qp
->alt
.smac
= qp
->alt
.candidate_smac
;
1807 qp
->alt
.smac_index
= qp
->alt
.candidate_smac_index
;
1808 qp
->alt
.smac_port
= qp
->alt
.candidate_smac_port
;
1810 qp
->alt
.candidate_smac
= 0;
1811 qp
->alt
.candidate_smac_index
= 0;
1812 qp
->alt
.candidate_smac_port
= 0;
1815 if (qp
->pri
.update_vid
) {
1817 if (qp
->pri
.candidate_vid
< 0x1000)
1818 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.candidate_vlan_port
,
1819 qp
->pri
.candidate_vid
);
1821 if (qp
->pri
.vid
< 0x1000)
1822 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
,
1824 qp
->pri
.vid
= qp
->pri
.candidate_vid
;
1825 qp
->pri
.vlan_port
= qp
->pri
.candidate_vlan_port
;
1826 qp
->pri
.vlan_index
= qp
->pri
.candidate_vlan_index
;
1828 qp
->pri
.candidate_vid
= 0xFFFF;
1829 qp
->pri
.update_vid
= 0;
1832 if (qp
->alt
.update_vid
) {
1834 if (qp
->alt
.candidate_vid
< 0x1000)
1835 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.candidate_vlan_port
,
1836 qp
->alt
.candidate_vid
);
1838 if (qp
->alt
.vid
< 0x1000)
1839 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
,
1841 qp
->alt
.vid
= qp
->alt
.candidate_vid
;
1842 qp
->alt
.vlan_port
= qp
->alt
.candidate_vlan_port
;
1843 qp
->alt
.vlan_index
= qp
->alt
.candidate_vlan_index
;
1845 qp
->alt
.candidate_vid
= 0xFFFF;
1846 qp
->alt
.update_vid
= 0;
1852 int mlx4_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1853 int attr_mask
, struct ib_udata
*udata
)
1855 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
1856 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
1857 enum ib_qp_state cur_state
, new_state
;
1860 mutex_lock(&qp
->mutex
);
1862 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
1863 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1865 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
1866 ll
= IB_LINK_LAYER_UNSPECIFIED
;
1868 int port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
1869 ll
= rdma_port_get_link_layer(&dev
->ib_dev
, port
);
1872 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
1874 pr_debug("qpn 0x%x: invalid attribute mask specified "
1875 "for transition %d to %d. qp_type %d,"
1876 " attr_mask 0x%x\n",
1877 ibqp
->qp_num
, cur_state
, new_state
,
1878 ibqp
->qp_type
, attr_mask
);
1882 if ((attr_mask
& IB_QP_PORT
) &&
1883 (attr
->port_num
== 0 || attr
->port_num
> dev
->num_ports
)) {
1884 pr_debug("qpn 0x%x: invalid port number (%d) specified "
1885 "for transition %d to %d. qp_type %d\n",
1886 ibqp
->qp_num
, attr
->port_num
, cur_state
,
1887 new_state
, ibqp
->qp_type
);
1891 if ((attr_mask
& IB_QP_PORT
) && (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) &&
1892 (rdma_port_get_link_layer(&dev
->ib_dev
, attr
->port_num
) !=
1893 IB_LINK_LAYER_ETHERNET
))
1896 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1897 int p
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
1898 if (attr
->pkey_index
>= dev
->dev
->caps
.pkey_table_len
[p
]) {
1899 pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
1900 "for transition %d to %d. qp_type %d\n",
1901 ibqp
->qp_num
, attr
->pkey_index
, cur_state
,
1902 new_state
, ibqp
->qp_type
);
1907 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
1908 attr
->max_rd_atomic
> dev
->dev
->caps
.max_qp_init_rdma
) {
1909 pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
1910 "Transition %d to %d. qp_type %d\n",
1911 ibqp
->qp_num
, attr
->max_rd_atomic
, cur_state
,
1912 new_state
, ibqp
->qp_type
);
1916 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
1917 attr
->max_dest_rd_atomic
> dev
->dev
->caps
.max_qp_dest_rdma
) {
1918 pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
1919 "Transition %d to %d. qp_type %d\n",
1920 ibqp
->qp_num
, attr
->max_dest_rd_atomic
, cur_state
,
1921 new_state
, ibqp
->qp_type
);
1925 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
1930 err
= __mlx4_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
);
1933 mutex_unlock(&qp
->mutex
);
1937 static int build_sriov_qp0_header(struct mlx4_ib_sqp
*sqp
,
1938 struct ib_send_wr
*wr
,
1939 void *wqe
, unsigned *mlx_seg_len
)
1941 struct mlx4_ib_dev
*mdev
= to_mdev(sqp
->qp
.ibqp
.device
);
1942 struct ib_device
*ib_dev
= &mdev
->ib_dev
;
1943 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
1944 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
1945 struct mlx4_ib_ah
*ah
= to_mah(wr
->wr
.ud
.ah
);
1953 if (wr
->opcode
!= IB_WR_SEND
)
1958 for (i
= 0; i
< wr
->num_sge
; ++i
)
1959 send_size
+= wr
->sg_list
[i
].length
;
1961 /* for proxy-qp0 sends, need to add in size of tunnel header */
1962 /* for tunnel-qp0 sends, tunnel header is already in s/g list */
1963 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_SMI_OWNER
)
1964 send_size
+= sizeof (struct mlx4_ib_tunnel_header
);
1966 ib_ud_header_init(send_size
, 1, 0, 0, 0, 0, &sqp
->ud_header
);
1968 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_SMI_OWNER
) {
1969 sqp
->ud_header
.lrh
.service_level
=
1970 be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 28;
1971 sqp
->ud_header
.lrh
.destination_lid
=
1972 cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
1973 sqp
->ud_header
.lrh
.source_lid
=
1974 cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
1977 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
1979 /* force loopback */
1980 mlx
->flags
|= cpu_to_be32(MLX4_WQE_MLX_VL15
| 0x1 | MLX4_WQE_MLX_SLR
);
1981 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
1983 sqp
->ud_header
.lrh
.virtual_lane
= 0;
1984 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->send_flags
& IB_SEND_SOLICITED
);
1985 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, 0, &pkey
);
1986 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
1987 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_SMI_OWNER
)
1988 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1990 sqp
->ud_header
.bth
.destination_qpn
=
1991 cpu_to_be32(mdev
->dev
->caps
.qp0_tunnel
[sqp
->qp
.port
- 1]);
1993 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
1994 if (mlx4_get_parav_qkey(mdev
->dev
, sqp
->qp
.mqp
.qpn
, &qkey
))
1996 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(qkey
);
1997 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.mqp
.qpn
);
1999 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
2000 sqp
->ud_header
.immediate_present
= 0;
2002 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
2005 * Inline data segments may not cross a 64 byte boundary. If
2006 * our UD header is bigger than the space available up to the
2007 * next 64 byte boundary in the WQE, use two inline data
2008 * segments to hold the UD header.
2010 spc
= MLX4_INLINE_ALIGN
-
2011 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
2012 if (header_size
<= spc
) {
2013 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
2014 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
2017 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
2018 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
2020 inl
= (void *) (inl
+ 1) + spc
;
2021 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
2023 * Need a barrier here to make sure all the data is
2024 * visible before the byte_count field is set.
2025 * Otherwise the HCA prefetcher could grab the 64-byte
2026 * chunk with this inline segment and get a valid (!=
2027 * 0xffffffff) byte count but stale data, and end up
2028 * generating a packet with bad headers.
2030 * The first inline segment's byte_count field doesn't
2031 * need a barrier, because it comes after a
2032 * control/MLX segment and therefore is at an offset
2036 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
2041 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
2045 static int build_mlx_header(struct mlx4_ib_sqp
*sqp
, struct ib_send_wr
*wr
,
2046 void *wqe
, unsigned *mlx_seg_len
)
2048 struct ib_device
*ib_dev
= sqp
->qp
.ibqp
.device
;
2049 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
2050 struct mlx4_wqe_ctrl_seg
*ctrl
= wqe
;
2051 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
2052 struct mlx4_ib_ah
*ah
= to_mah(wr
->wr
.ud
.ah
);
2062 bool is_vlan
= false;
2066 for (i
= 0; i
< wr
->num_sge
; ++i
)
2067 send_size
+= wr
->sg_list
[i
].length
;
2069 is_eth
= rdma_port_get_link_layer(sqp
->qp
.ibqp
.device
, sqp
->qp
.port
) == IB_LINK_LAYER_ETHERNET
;
2070 is_grh
= mlx4_ib_ah_grh_present(ah
);
2072 if (mlx4_is_mfunc(to_mdev(ib_dev
)->dev
)) {
2073 /* When multi-function is enabled, the ib_core gid
2074 * indexes don't necessarily match the hw ones, so
2075 * we must use our own cache */
2076 err
= mlx4_get_roce_gid_from_slave(to_mdev(ib_dev
)->dev
,
2077 be32_to_cpu(ah
->av
.ib
.port_pd
) >> 24,
2078 ah
->av
.ib
.gid_index
, &sgid
.raw
[0]);
2082 err
= ib_get_cached_gid(ib_dev
,
2083 be32_to_cpu(ah
->av
.ib
.port_pd
) >> 24,
2084 ah
->av
.ib
.gid_index
, &sgid
);
2089 if (ah
->av
.eth
.vlan
!= cpu_to_be16(0xffff)) {
2090 vlan
= be16_to_cpu(ah
->av
.eth
.vlan
) & 0x0fff;
2094 ib_ud_header_init(send_size
, !is_eth
, is_eth
, is_vlan
, is_grh
, 0, &sqp
->ud_header
);
2097 sqp
->ud_header
.lrh
.service_level
=
2098 be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 28;
2099 sqp
->ud_header
.lrh
.destination_lid
= ah
->av
.ib
.dlid
;
2100 sqp
->ud_header
.lrh
.source_lid
= cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
2104 sqp
->ud_header
.grh
.traffic_class
=
2105 (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 20) & 0xff;
2106 sqp
->ud_header
.grh
.flow_label
=
2107 ah
->av
.ib
.sl_tclass_flowlabel
& cpu_to_be32(0xfffff);
2108 sqp
->ud_header
.grh
.hop_limit
= ah
->av
.ib
.hop_limit
;
2110 memcpy(sqp
->ud_header
.grh
.source_gid
.raw
, sgid
.raw
, 16);
2112 if (mlx4_is_mfunc(to_mdev(ib_dev
)->dev
)) {
2113 /* When multi-function is enabled, the ib_core gid
2114 * indexes don't necessarily match the hw ones, so
2115 * we must use our own cache */
2116 sqp
->ud_header
.grh
.source_gid
.global
.subnet_prefix
=
2117 to_mdev(ib_dev
)->sriov
.demux
[sqp
->qp
.port
- 1].
2119 sqp
->ud_header
.grh
.source_gid
.global
.interface_id
=
2120 to_mdev(ib_dev
)->sriov
.demux
[sqp
->qp
.port
- 1].
2121 guid_cache
[ah
->av
.ib
.gid_index
];
2123 ib_get_cached_gid(ib_dev
,
2124 be32_to_cpu(ah
->av
.ib
.port_pd
) >> 24,
2125 ah
->av
.ib
.gid_index
,
2126 &sqp
->ud_header
.grh
.source_gid
);
2128 memcpy(sqp
->ud_header
.grh
.destination_gid
.raw
,
2129 ah
->av
.ib
.dgid
, 16);
2132 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
2135 mlx
->flags
|= cpu_to_be32((!sqp
->qp
.ibqp
.qp_num
? MLX4_WQE_MLX_VL15
: 0) |
2136 (sqp
->ud_header
.lrh
.destination_lid
==
2137 IB_LID_PERMISSIVE
? MLX4_WQE_MLX_SLR
: 0) |
2138 (sqp
->ud_header
.lrh
.service_level
<< 8));
2139 if (ah
->av
.ib
.port_pd
& cpu_to_be32(0x80000000))
2140 mlx
->flags
|= cpu_to_be32(0x1); /* force loopback */
2141 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
2144 switch (wr
->opcode
) {
2146 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
2147 sqp
->ud_header
.immediate_present
= 0;
2149 case IB_WR_SEND_WITH_IMM
:
2150 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
2151 sqp
->ud_header
.immediate_present
= 1;
2152 sqp
->ud_header
.immediate_data
= wr
->ex
.imm_data
;
2160 struct in6_addr in6
;
2162 u16 pcp
= (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 29) << 13;
2164 mlx
->sched_prio
= cpu_to_be16(pcp
);
2166 memcpy(sqp
->ud_header
.eth
.dmac_h
, ah
->av
.eth
.mac
, 6);
2167 /* FIXME: cache smac value? */
2168 memcpy(&ctrl
->srcrb_flags16
[0], ah
->av
.eth
.mac
, 2);
2169 memcpy(&ctrl
->imm
, ah
->av
.eth
.mac
+ 2, 4);
2170 memcpy(&in6
, sgid
.raw
, sizeof(in6
));
2172 if (!mlx4_is_mfunc(to_mdev(ib_dev
)->dev
))
2173 smac
= to_mdev(sqp
->qp
.ibqp
.device
)->
2174 iboe
.netdevs
[sqp
->qp
.port
- 1]->dev_addr
;
2175 else /* use the src mac of the tunnel */
2176 smac
= ah
->av
.eth
.s_mac
;
2177 memcpy(sqp
->ud_header
.eth
.smac_h
, smac
, 6);
2178 if (!memcmp(sqp
->ud_header
.eth
.smac_h
, sqp
->ud_header
.eth
.dmac_h
, 6))
2179 mlx
->flags
|= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK
);
2181 sqp
->ud_header
.eth
.type
= cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE
);
2183 sqp
->ud_header
.vlan
.type
= cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE
);
2184 sqp
->ud_header
.vlan
.tag
= cpu_to_be16(vlan
| pcp
);
2187 sqp
->ud_header
.lrh
.virtual_lane
= !sqp
->qp
.ibqp
.qp_num
? 15 : 0;
2188 if (sqp
->ud_header
.lrh
.destination_lid
== IB_LID_PERMISSIVE
)
2189 sqp
->ud_header
.lrh
.source_lid
= IB_LID_PERMISSIVE
;
2191 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->send_flags
& IB_SEND_SOLICITED
);
2192 if (!sqp
->qp
.ibqp
.qp_num
)
2193 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, sqp
->pkey_index
, &pkey
);
2195 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, wr
->wr
.ud
.pkey_index
, &pkey
);
2196 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
2197 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
2198 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
2199 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
& 0x80000000 ?
2200 sqp
->qkey
: wr
->wr
.ud
.remote_qkey
);
2201 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.ibqp
.qp_num
);
2203 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
2206 pr_err("built UD header of size %d:\n", header_size
);
2207 for (i
= 0; i
< header_size
/ 4; ++i
) {
2209 pr_err(" [%02x] ", i
* 4);
2211 be32_to_cpu(((__be32
*) sqp
->header_buf
)[i
]));
2212 if ((i
+ 1) % 8 == 0)
2219 * Inline data segments may not cross a 64 byte boundary. If
2220 * our UD header is bigger than the space available up to the
2221 * next 64 byte boundary in the WQE, use two inline data
2222 * segments to hold the UD header.
2224 spc
= MLX4_INLINE_ALIGN
-
2225 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
2226 if (header_size
<= spc
) {
2227 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
2228 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
2231 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
2232 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
2234 inl
= (void *) (inl
+ 1) + spc
;
2235 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
2237 * Need a barrier here to make sure all the data is
2238 * visible before the byte_count field is set.
2239 * Otherwise the HCA prefetcher could grab the 64-byte
2240 * chunk with this inline segment and get a valid (!=
2241 * 0xffffffff) byte count but stale data, and end up
2242 * generating a packet with bad headers.
2244 * The first inline segment's byte_count field doesn't
2245 * need a barrier, because it comes after a
2246 * control/MLX segment and therefore is at an offset
2250 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
2255 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
2259 static int mlx4_wq_overflow(struct mlx4_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
2262 struct mlx4_ib_cq
*cq
;
2264 cur
= wq
->head
- wq
->tail
;
2265 if (likely(cur
+ nreq
< wq
->max_post
))
2269 spin_lock(&cq
->lock
);
2270 cur
= wq
->head
- wq
->tail
;
2271 spin_unlock(&cq
->lock
);
2273 return cur
+ nreq
>= wq
->max_post
;
2276 static __be32
convert_access(int acc
)
2278 return (acc
& IB_ACCESS_REMOTE_ATOMIC
?
2279 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC
) : 0) |
2280 (acc
& IB_ACCESS_REMOTE_WRITE
?
2281 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE
) : 0) |
2282 (acc
& IB_ACCESS_REMOTE_READ
?
2283 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ
) : 0) |
2284 (acc
& IB_ACCESS_LOCAL_WRITE
? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE
) : 0) |
2285 cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ
);
2288 static void set_fmr_seg(struct mlx4_wqe_fmr_seg
*fseg
, struct ib_send_wr
*wr
)
2290 struct mlx4_ib_fast_reg_page_list
*mfrpl
= to_mfrpl(wr
->wr
.fast_reg
.page_list
);
2293 for (i
= 0; i
< wr
->wr
.fast_reg
.page_list_len
; ++i
)
2294 mfrpl
->mapped_page_list
[i
] =
2295 cpu_to_be64(wr
->wr
.fast_reg
.page_list
->page_list
[i
] |
2296 MLX4_MTT_FLAG_PRESENT
);
2298 fseg
->flags
= convert_access(wr
->wr
.fast_reg
.access_flags
);
2299 fseg
->mem_key
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
2300 fseg
->buf_list
= cpu_to_be64(mfrpl
->map
);
2301 fseg
->start_addr
= cpu_to_be64(wr
->wr
.fast_reg
.iova_start
);
2302 fseg
->reg_len
= cpu_to_be64(wr
->wr
.fast_reg
.length
);
2303 fseg
->offset
= 0; /* XXX -- is this just for ZBVA? */
2304 fseg
->page_size
= cpu_to_be32(wr
->wr
.fast_reg
.page_shift
);
2305 fseg
->reserved
[0] = 0;
2306 fseg
->reserved
[1] = 0;
2309 static void set_bind_seg(struct mlx4_wqe_bind_seg
*bseg
, struct ib_send_wr
*wr
)
2312 convert_access(wr
->wr
.bind_mw
.bind_info
.mw_access_flags
) &
2313 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ
|
2314 MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE
|
2315 MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC
);
2317 if (wr
->wr
.bind_mw
.mw
->type
== IB_MW_TYPE_2
)
2318 bseg
->flags2
|= cpu_to_be32(MLX4_WQE_BIND_TYPE_2
);
2319 if (wr
->wr
.bind_mw
.bind_info
.mw_access_flags
& IB_ZERO_BASED
)
2320 bseg
->flags2
|= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED
);
2321 bseg
->new_rkey
= cpu_to_be32(wr
->wr
.bind_mw
.rkey
);
2322 bseg
->lkey
= cpu_to_be32(wr
->wr
.bind_mw
.bind_info
.mr
->lkey
);
2323 bseg
->addr
= cpu_to_be64(wr
->wr
.bind_mw
.bind_info
.addr
);
2324 bseg
->length
= cpu_to_be64(wr
->wr
.bind_mw
.bind_info
.length
);
2327 static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg
*iseg
, u32 rkey
)
2329 memset(iseg
, 0, sizeof(*iseg
));
2330 iseg
->mem_key
= cpu_to_be32(rkey
);
2333 static __always_inline
void set_raddr_seg(struct mlx4_wqe_raddr_seg
*rseg
,
2334 u64 remote_addr
, u32 rkey
)
2336 rseg
->raddr
= cpu_to_be64(remote_addr
);
2337 rseg
->rkey
= cpu_to_be32(rkey
);
2341 static void set_atomic_seg(struct mlx4_wqe_atomic_seg
*aseg
, struct ib_send_wr
*wr
)
2343 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
2344 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.swap
);
2345 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
2346 } else if (wr
->opcode
== IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
) {
2347 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
2348 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add_mask
);
2350 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
2356 static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg
*aseg
,
2357 struct ib_send_wr
*wr
)
2359 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.swap
);
2360 aseg
->swap_add_mask
= cpu_to_be64(wr
->wr
.atomic
.swap_mask
);
2361 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
2362 aseg
->compare_mask
= cpu_to_be64(wr
->wr
.atomic
.compare_add_mask
);
2365 static void set_datagram_seg(struct mlx4_wqe_datagram_seg
*dseg
,
2366 struct ib_send_wr
*wr
)
2368 memcpy(dseg
->av
, &to_mah(wr
->wr
.ud
.ah
)->av
, sizeof (struct mlx4_av
));
2369 dseg
->dqpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
2370 dseg
->qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
2371 dseg
->vlan
= to_mah(wr
->wr
.ud
.ah
)->av
.eth
.vlan
;
2372 memcpy(dseg
->mac
, to_mah(wr
->wr
.ud
.ah
)->av
.eth
.mac
, 6);
2375 static void set_tunnel_datagram_seg(struct mlx4_ib_dev
*dev
,
2376 struct mlx4_wqe_datagram_seg
*dseg
,
2377 struct ib_send_wr
*wr
, enum ib_qp_type qpt
)
2379 union mlx4_ext_av
*av
= &to_mah(wr
->wr
.ud
.ah
)->av
;
2380 struct mlx4_av sqp_av
= {0};
2381 int port
= *((u8
*) &av
->ib
.port_pd
) & 0x3;
2383 /* force loopback */
2384 sqp_av
.port_pd
= av
->ib
.port_pd
| cpu_to_be32(0x80000000);
2385 sqp_av
.g_slid
= av
->ib
.g_slid
& 0x7f; /* no GRH */
2386 sqp_av
.sl_tclass_flowlabel
= av
->ib
.sl_tclass_flowlabel
&
2387 cpu_to_be32(0xf0000000);
2389 memcpy(dseg
->av
, &sqp_av
, sizeof (struct mlx4_av
));
2390 /* This function used only for sending on QP1 proxies */
2391 dseg
->dqpn
= cpu_to_be32(dev
->dev
->caps
.qp1_tunnel
[port
- 1]);
2392 /* Use QKEY from the QP context, which is set by master */
2393 dseg
->qkey
= cpu_to_be32(IB_QP_SET_QKEY
);
2396 static void build_tunnel_header(struct ib_send_wr
*wr
, void *wqe
, unsigned *mlx_seg_len
)
2398 struct mlx4_wqe_inline_seg
*inl
= wqe
;
2399 struct mlx4_ib_tunnel_header hdr
;
2400 struct mlx4_ib_ah
*ah
= to_mah(wr
->wr
.ud
.ah
);
2404 memcpy(&hdr
.av
, &ah
->av
, sizeof hdr
.av
);
2405 hdr
.remote_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
2406 hdr
.pkey_index
= cpu_to_be16(wr
->wr
.ud
.pkey_index
);
2407 hdr
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
2408 memcpy(hdr
.mac
, ah
->av
.eth
.mac
, 6);
2409 hdr
.vlan
= ah
->av
.eth
.vlan
;
2411 spc
= MLX4_INLINE_ALIGN
-
2412 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
2413 if (sizeof (hdr
) <= spc
) {
2414 memcpy(inl
+ 1, &hdr
, sizeof (hdr
));
2416 inl
->byte_count
= cpu_to_be32(1 << 31 | sizeof (hdr
));
2419 memcpy(inl
+ 1, &hdr
, spc
);
2421 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
2423 inl
= (void *) (inl
+ 1) + spc
;
2424 memcpy(inl
+ 1, (void *) &hdr
+ spc
, sizeof (hdr
) - spc
);
2426 inl
->byte_count
= cpu_to_be32(1 << 31 | (sizeof (hdr
) - spc
));
2431 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + sizeof (hdr
), 16);
2434 static void set_mlx_icrc_seg(void *dseg
)
2437 struct mlx4_wqe_inline_seg
*iseg
= dseg
;
2442 * Need a barrier here before writing the byte_count field to
2443 * make sure that all the data is visible before the
2444 * byte_count field is set. Otherwise, if the segment begins
2445 * a new cacheline, the HCA prefetcher could grab the 64-byte
2446 * chunk and get a valid (!= * 0xffffffff) byte count but
2447 * stale data, and end up sending the wrong data.
2451 iseg
->byte_count
= cpu_to_be32((1 << 31) | 4);
2454 static void set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
2456 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
2457 dseg
->addr
= cpu_to_be64(sg
->addr
);
2460 * Need a barrier here before writing the byte_count field to
2461 * make sure that all the data is visible before the
2462 * byte_count field is set. Otherwise, if the segment begins
2463 * a new cacheline, the HCA prefetcher could grab the 64-byte
2464 * chunk and get a valid (!= * 0xffffffff) byte count but
2465 * stale data, and end up sending the wrong data.
2469 dseg
->byte_count
= cpu_to_be32(sg
->length
);
2472 static void __set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
2474 dseg
->byte_count
= cpu_to_be32(sg
->length
);
2475 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
2476 dseg
->addr
= cpu_to_be64(sg
->addr
);
2479 static int build_lso_seg(struct mlx4_wqe_lso_seg
*wqe
, struct ib_send_wr
*wr
,
2480 struct mlx4_ib_qp
*qp
, unsigned *lso_seg_len
,
2481 __be32
*lso_hdr_sz
, __be32
*blh
)
2483 unsigned halign
= ALIGN(sizeof *wqe
+ wr
->wr
.ud
.hlen
, 16);
2485 if (unlikely(halign
> MLX4_IB_CACHE_LINE_SIZE
))
2486 *blh
= cpu_to_be32(1 << 6);
2488 if (unlikely(!(qp
->flags
& MLX4_IB_QP_LSO
) &&
2489 wr
->num_sge
> qp
->sq
.max_gs
- (halign
>> 4)))
2492 memcpy(wqe
->header
, wr
->wr
.ud
.header
, wr
->wr
.ud
.hlen
);
2494 *lso_hdr_sz
= cpu_to_be32((wr
->wr
.ud
.mss
- wr
->wr
.ud
.hlen
) << 16 |
2496 *lso_seg_len
= halign
;
2500 static __be32
send_ieth(struct ib_send_wr
*wr
)
2502 switch (wr
->opcode
) {
2503 case IB_WR_SEND_WITH_IMM
:
2504 case IB_WR_RDMA_WRITE_WITH_IMM
:
2505 return wr
->ex
.imm_data
;
2507 case IB_WR_SEND_WITH_INV
:
2508 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
2515 static void add_zero_len_inline(void *wqe
)
2517 struct mlx4_wqe_inline_seg
*inl
= wqe
;
2519 inl
->byte_count
= cpu_to_be32(1 << 31);
2522 int mlx4_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
2523 struct ib_send_wr
**bad_wr
)
2525 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
2527 struct mlx4_wqe_ctrl_seg
*ctrl
;
2528 struct mlx4_wqe_data_seg
*dseg
;
2529 unsigned long flags
;
2533 int uninitialized_var(stamp
);
2534 int uninitialized_var(size
);
2535 unsigned uninitialized_var(seglen
);
2538 __be32
uninitialized_var(lso_hdr_sz
);
2542 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
2544 ind
= qp
->sq_next_wqe
;
2546 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
2550 if (mlx4_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
2556 if (unlikely(wr
->num_sge
> qp
->sq
.max_gs
)) {
2562 ctrl
= wqe
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
2563 qp
->sq
.wrid
[(qp
->sq
.head
+ nreq
) & (qp
->sq
.wqe_cnt
- 1)] = wr
->wr_id
;
2566 (wr
->send_flags
& IB_SEND_SIGNALED
?
2567 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) : 0) |
2568 (wr
->send_flags
& IB_SEND_SOLICITED
?
2569 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED
) : 0) |
2570 ((wr
->send_flags
& IB_SEND_IP_CSUM
) ?
2571 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM
|
2572 MLX4_WQE_CTRL_TCP_UDP_CSUM
) : 0) |
2575 ctrl
->imm
= send_ieth(wr
);
2577 wqe
+= sizeof *ctrl
;
2578 size
= sizeof *ctrl
/ 16;
2580 switch (qp
->mlx4_ib_qp_type
) {
2581 case MLX4_IB_QPT_RC
:
2582 case MLX4_IB_QPT_UC
:
2583 switch (wr
->opcode
) {
2584 case IB_WR_ATOMIC_CMP_AND_SWP
:
2585 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2586 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
:
2587 set_raddr_seg(wqe
, wr
->wr
.atomic
.remote_addr
,
2588 wr
->wr
.atomic
.rkey
);
2589 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
2591 set_atomic_seg(wqe
, wr
);
2592 wqe
+= sizeof (struct mlx4_wqe_atomic_seg
);
2594 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
2595 sizeof (struct mlx4_wqe_atomic_seg
)) / 16;
2599 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
2600 set_raddr_seg(wqe
, wr
->wr
.atomic
.remote_addr
,
2601 wr
->wr
.atomic
.rkey
);
2602 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
2604 set_masked_atomic_seg(wqe
, wr
);
2605 wqe
+= sizeof (struct mlx4_wqe_masked_atomic_seg
);
2607 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
2608 sizeof (struct mlx4_wqe_masked_atomic_seg
)) / 16;
2612 case IB_WR_RDMA_READ
:
2613 case IB_WR_RDMA_WRITE
:
2614 case IB_WR_RDMA_WRITE_WITH_IMM
:
2615 set_raddr_seg(wqe
, wr
->wr
.rdma
.remote_addr
,
2617 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
2618 size
+= sizeof (struct mlx4_wqe_raddr_seg
) / 16;
2621 case IB_WR_LOCAL_INV
:
2622 ctrl
->srcrb_flags
|=
2623 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
2624 set_local_inv_seg(wqe
, wr
->ex
.invalidate_rkey
);
2625 wqe
+= sizeof (struct mlx4_wqe_local_inval_seg
);
2626 size
+= sizeof (struct mlx4_wqe_local_inval_seg
) / 16;
2629 case IB_WR_FAST_REG_MR
:
2630 ctrl
->srcrb_flags
|=
2631 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
2632 set_fmr_seg(wqe
, wr
);
2633 wqe
+= sizeof (struct mlx4_wqe_fmr_seg
);
2634 size
+= sizeof (struct mlx4_wqe_fmr_seg
) / 16;
2638 ctrl
->srcrb_flags
|=
2639 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
2640 set_bind_seg(wqe
, wr
);
2641 wqe
+= sizeof(struct mlx4_wqe_bind_seg
);
2642 size
+= sizeof(struct mlx4_wqe_bind_seg
) / 16;
2645 /* No extra segments required for sends */
2650 case MLX4_IB_QPT_TUN_SMI_OWNER
:
2651 err
= build_sriov_qp0_header(to_msqp(qp
), wr
, ctrl
, &seglen
);
2652 if (unlikely(err
)) {
2657 size
+= seglen
/ 16;
2659 case MLX4_IB_QPT_TUN_SMI
:
2660 case MLX4_IB_QPT_TUN_GSI
:
2661 /* this is a UD qp used in MAD responses to slaves. */
2662 set_datagram_seg(wqe
, wr
);
2663 /* set the forced-loopback bit in the data seg av */
2664 *(__be32
*) wqe
|= cpu_to_be32(0x80000000);
2665 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
2666 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
2668 case MLX4_IB_QPT_UD
:
2669 set_datagram_seg(wqe
, wr
);
2670 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
2671 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
2673 if (wr
->opcode
== IB_WR_LSO
) {
2674 err
= build_lso_seg(wqe
, wr
, qp
, &seglen
, &lso_hdr_sz
, &blh
);
2675 if (unlikely(err
)) {
2679 lso_wqe
= (__be32
*) wqe
;
2681 size
+= seglen
/ 16;
2685 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
2686 if (unlikely(!mlx4_is_master(to_mdev(ibqp
->device
)->dev
))) {
2691 err
= build_sriov_qp0_header(to_msqp(qp
), wr
, ctrl
, &seglen
);
2692 if (unlikely(err
)) {
2697 size
+= seglen
/ 16;
2698 /* to start tunnel header on a cache-line boundary */
2699 add_zero_len_inline(wqe
);
2702 build_tunnel_header(wr
, wqe
, &seglen
);
2704 size
+= seglen
/ 16;
2706 case MLX4_IB_QPT_PROXY_SMI
:
2707 /* don't allow QP0 sends on guests */
2711 case MLX4_IB_QPT_PROXY_GSI
:
2712 /* If we are tunneling special qps, this is a UD qp.
2713 * In this case we first add a UD segment targeting
2714 * the tunnel qp, and then add a header with address
2716 set_tunnel_datagram_seg(to_mdev(ibqp
->device
), wqe
, wr
, ibqp
->qp_type
);
2717 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
2718 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
2719 build_tunnel_header(wr
, wqe
, &seglen
);
2721 size
+= seglen
/ 16;
2724 case MLX4_IB_QPT_SMI
:
2725 case MLX4_IB_QPT_GSI
:
2726 err
= build_mlx_header(to_msqp(qp
), wr
, ctrl
, &seglen
);
2727 if (unlikely(err
)) {
2732 size
+= seglen
/ 16;
2740 * Write data segments in reverse order, so as to
2741 * overwrite cacheline stamp last within each
2742 * cacheline. This avoids issues with WQE
2747 dseg
+= wr
->num_sge
- 1;
2748 size
+= wr
->num_sge
* (sizeof (struct mlx4_wqe_data_seg
) / 16);
2750 /* Add one more inline data segment for ICRC for MLX sends */
2751 if (unlikely(qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_SMI
||
2752 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
||
2753 qp
->mlx4_ib_qp_type
&
2754 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
))) {
2755 set_mlx_icrc_seg(dseg
+ 1);
2756 size
+= sizeof (struct mlx4_wqe_data_seg
) / 16;
2759 for (i
= wr
->num_sge
- 1; i
>= 0; --i
, --dseg
)
2760 set_data_seg(dseg
, wr
->sg_list
+ i
);
2763 * Possibly overwrite stamping in cacheline with LSO
2764 * segment only after making sure all data segments
2768 *lso_wqe
= lso_hdr_sz
;
2770 ctrl
->fence_size
= (wr
->send_flags
& IB_SEND_FENCE
?
2771 MLX4_WQE_CTRL_FENCE
: 0) | size
;
2774 * Make sure descriptor is fully written before
2775 * setting ownership bit (because HW can start
2776 * executing as soon as we do).
2780 if (wr
->opcode
< 0 || wr
->opcode
>= ARRAY_SIZE(mlx4_ib_opcode
)) {
2786 ctrl
->owner_opcode
= mlx4_ib_opcode
[wr
->opcode
] |
2787 (ind
& qp
->sq
.wqe_cnt
? cpu_to_be32(1 << 31) : 0) | blh
;
2789 stamp
= ind
+ qp
->sq_spare_wqes
;
2790 ind
+= DIV_ROUND_UP(size
* 16, 1U << qp
->sq
.wqe_shift
);
2793 * We can improve latency by not stamping the last
2794 * send queue WQE until after ringing the doorbell, so
2795 * only stamp here if there are still more WQEs to post.
2797 * Same optimization applies to padding with NOP wqe
2798 * in case of WQE shrinking (used to prevent wrap-around
2799 * in the middle of WR).
2802 stamp_send_wqe(qp
, stamp
, size
* 16);
2803 ind
= pad_wraparound(qp
, ind
);
2809 qp
->sq
.head
+= nreq
;
2812 * Make sure that descriptors are written before
2817 writel(qp
->doorbell_qpn
,
2818 to_mdev(ibqp
->device
)->uar_map
+ MLX4_SEND_DOORBELL
);
2821 * Make sure doorbells don't leak out of SQ spinlock
2822 * and reach the HCA out of order.
2826 stamp_send_wqe(qp
, stamp
, size
* 16);
2828 ind
= pad_wraparound(qp
, ind
);
2829 qp
->sq_next_wqe
= ind
;
2832 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
2837 int mlx4_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
2838 struct ib_recv_wr
**bad_wr
)
2840 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
2841 struct mlx4_wqe_data_seg
*scat
;
2842 unsigned long flags
;
2849 max_gs
= qp
->rq
.max_gs
;
2850 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
2852 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
2854 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
2855 if (mlx4_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
2861 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
2867 scat
= get_recv_wqe(qp
, ind
);
2869 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
2870 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
)) {
2871 ib_dma_sync_single_for_device(ibqp
->device
,
2872 qp
->sqp_proxy_rcv
[ind
].map
,
2873 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
2876 cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr
));
2877 /* use dma lkey from upper layer entry */
2878 scat
->lkey
= cpu_to_be32(wr
->sg_list
->lkey
);
2879 scat
->addr
= cpu_to_be64(qp
->sqp_proxy_rcv
[ind
].map
);
2884 for (i
= 0; i
< wr
->num_sge
; ++i
)
2885 __set_data_seg(scat
+ i
, wr
->sg_list
+ i
);
2888 scat
[i
].byte_count
= 0;
2889 scat
[i
].lkey
= cpu_to_be32(MLX4_INVALID_LKEY
);
2893 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
2895 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
2900 qp
->rq
.head
+= nreq
;
2903 * Make sure that descriptors are written before
2908 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
2911 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
2916 static inline enum ib_qp_state
to_ib_qp_state(enum mlx4_qp_state mlx4_state
)
2918 switch (mlx4_state
) {
2919 case MLX4_QP_STATE_RST
: return IB_QPS_RESET
;
2920 case MLX4_QP_STATE_INIT
: return IB_QPS_INIT
;
2921 case MLX4_QP_STATE_RTR
: return IB_QPS_RTR
;
2922 case MLX4_QP_STATE_RTS
: return IB_QPS_RTS
;
2923 case MLX4_QP_STATE_SQ_DRAINING
:
2924 case MLX4_QP_STATE_SQD
: return IB_QPS_SQD
;
2925 case MLX4_QP_STATE_SQER
: return IB_QPS_SQE
;
2926 case MLX4_QP_STATE_ERR
: return IB_QPS_ERR
;
2931 static inline enum ib_mig_state
to_ib_mig_state(int mlx4_mig_state
)
2933 switch (mlx4_mig_state
) {
2934 case MLX4_QP_PM_ARMED
: return IB_MIG_ARMED
;
2935 case MLX4_QP_PM_REARM
: return IB_MIG_REARM
;
2936 case MLX4_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
2941 static int to_ib_qp_access_flags(int mlx4_flags
)
2945 if (mlx4_flags
& MLX4_QP_BIT_RRE
)
2946 ib_flags
|= IB_ACCESS_REMOTE_READ
;
2947 if (mlx4_flags
& MLX4_QP_BIT_RWE
)
2948 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
2949 if (mlx4_flags
& MLX4_QP_BIT_RAE
)
2950 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
2955 static void to_ib_ah_attr(struct mlx4_ib_dev
*ibdev
, struct ib_ah_attr
*ib_ah_attr
,
2956 struct mlx4_qp_path
*path
)
2958 struct mlx4_dev
*dev
= ibdev
->dev
;
2961 memset(ib_ah_attr
, 0, sizeof *ib_ah_attr
);
2962 ib_ah_attr
->port_num
= path
->sched_queue
& 0x40 ? 2 : 1;
2964 if (ib_ah_attr
->port_num
== 0 || ib_ah_attr
->port_num
> dev
->caps
.num_ports
)
2967 is_eth
= rdma_port_get_link_layer(&ibdev
->ib_dev
, ib_ah_attr
->port_num
) ==
2968 IB_LINK_LAYER_ETHERNET
;
2970 ib_ah_attr
->sl
= ((path
->sched_queue
>> 3) & 0x7) |
2971 ((path
->sched_queue
& 4) << 1);
2973 ib_ah_attr
->sl
= (path
->sched_queue
>> 2) & 0xf;
2975 ib_ah_attr
->dlid
= be16_to_cpu(path
->rlid
);
2976 ib_ah_attr
->src_path_bits
= path
->grh_mylmc
& 0x7f;
2977 ib_ah_attr
->static_rate
= path
->static_rate
? path
->static_rate
- 5 : 0;
2978 ib_ah_attr
->ah_flags
= (path
->grh_mylmc
& (1 << 7)) ? IB_AH_GRH
: 0;
2979 if (ib_ah_attr
->ah_flags
) {
2980 ib_ah_attr
->grh
.sgid_index
= path
->mgid_index
;
2981 ib_ah_attr
->grh
.hop_limit
= path
->hop_limit
;
2982 ib_ah_attr
->grh
.traffic_class
=
2983 (be32_to_cpu(path
->tclass_flowlabel
) >> 20) & 0xff;
2984 ib_ah_attr
->grh
.flow_label
=
2985 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff;
2986 memcpy(ib_ah_attr
->grh
.dgid
.raw
,
2987 path
->rgid
, sizeof ib_ah_attr
->grh
.dgid
.raw
);
2991 int mlx4_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
2992 struct ib_qp_init_attr
*qp_init_attr
)
2994 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
2995 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
2996 struct mlx4_qp_context context
;
3000 mutex_lock(&qp
->mutex
);
3002 if (qp
->state
== IB_QPS_RESET
) {
3003 qp_attr
->qp_state
= IB_QPS_RESET
;
3007 err
= mlx4_qp_query(dev
->dev
, &qp
->mqp
, &context
);
3013 mlx4_state
= be32_to_cpu(context
.flags
) >> 28;
3015 qp
->state
= to_ib_qp_state(mlx4_state
);
3016 qp_attr
->qp_state
= qp
->state
;
3017 qp_attr
->path_mtu
= context
.mtu_msgmax
>> 5;
3018 qp_attr
->path_mig_state
=
3019 to_ib_mig_state((be32_to_cpu(context
.flags
) >> 11) & 0x3);
3020 qp_attr
->qkey
= be32_to_cpu(context
.qkey
);
3021 qp_attr
->rq_psn
= be32_to_cpu(context
.rnr_nextrecvpsn
) & 0xffffff;
3022 qp_attr
->sq_psn
= be32_to_cpu(context
.next_send_psn
) & 0xffffff;
3023 qp_attr
->dest_qp_num
= be32_to_cpu(context
.remote_qpn
) & 0xffffff;
3024 qp_attr
->qp_access_flags
=
3025 to_ib_qp_access_flags(be32_to_cpu(context
.params2
));
3027 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
3028 to_ib_ah_attr(dev
, &qp_attr
->ah_attr
, &context
.pri_path
);
3029 to_ib_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
.alt_path
);
3030 qp_attr
->alt_pkey_index
= context
.alt_path
.pkey_index
& 0x7f;
3031 qp_attr
->alt_port_num
= qp_attr
->alt_ah_attr
.port_num
;
3034 qp_attr
->pkey_index
= context
.pri_path
.pkey_index
& 0x7f;
3035 if (qp_attr
->qp_state
== IB_QPS_INIT
)
3036 qp_attr
->port_num
= qp
->port
;
3038 qp_attr
->port_num
= context
.pri_path
.sched_queue
& 0x40 ? 2 : 1;
3040 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
3041 qp_attr
->sq_draining
= mlx4_state
== MLX4_QP_STATE_SQ_DRAINING
;
3043 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
.params1
) >> 21) & 0x7);
3045 qp_attr
->max_dest_rd_atomic
=
3046 1 << ((be32_to_cpu(context
.params2
) >> 21) & 0x7);
3047 qp_attr
->min_rnr_timer
=
3048 (be32_to_cpu(context
.rnr_nextrecvpsn
) >> 24) & 0x1f;
3049 qp_attr
->timeout
= context
.pri_path
.ackto
>> 3;
3050 qp_attr
->retry_cnt
= (be32_to_cpu(context
.params1
) >> 16) & 0x7;
3051 qp_attr
->rnr_retry
= (be32_to_cpu(context
.params1
) >> 13) & 0x7;
3052 qp_attr
->alt_timeout
= context
.alt_path
.ackto
>> 3;
3055 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
3056 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
3057 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
3059 if (!ibqp
->uobject
) {
3060 qp_attr
->cap
.max_send_wr
= qp
->sq
.wqe_cnt
;
3061 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
3063 qp_attr
->cap
.max_send_wr
= 0;
3064 qp_attr
->cap
.max_send_sge
= 0;
3068 * We don't support inline sends for kernel QPs (yet), and we
3069 * don't know what userspace's value should be.
3071 qp_attr
->cap
.max_inline_data
= 0;
3073 qp_init_attr
->cap
= qp_attr
->cap
;
3075 qp_init_attr
->create_flags
= 0;
3076 if (qp
->flags
& MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
3077 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
3079 if (qp
->flags
& MLX4_IB_QP_LSO
)
3080 qp_init_attr
->create_flags
|= IB_QP_CREATE_IPOIB_UD_LSO
;
3082 if (qp
->flags
& MLX4_IB_QP_NETIF
)
3083 qp_init_attr
->create_flags
|= IB_QP_CREATE_NETIF_QP
;
3085 qp_init_attr
->sq_sig_type
=
3086 qp
->sq_signal_bits
== cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) ?
3087 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
3090 mutex_unlock(&qp
->mutex
);