2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/log2.h>
35 #include <linux/slab.h>
36 #include <linux/netdevice.h>
38 #include <rdma/ib_cache.h>
39 #include <rdma/ib_pack.h>
40 #include <rdma/ib_addr.h>
41 #include <rdma/ib_mad.h>
43 #include <linux/mlx4/driver.h>
44 #include <linux/mlx4/qp.h>
50 MLX4_IB_ACK_REQ_FREQ
= 8,
54 MLX4_IB_DEFAULT_SCHED_QUEUE
= 0x83,
55 MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
56 MLX4_IB_LINK_TYPE_IB
= 0,
57 MLX4_IB_LINK_TYPE_ETH
= 1
62 * Largest possible UD header: send with GRH and immediate
63 * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
64 * tag. (LRH would only use 8 bytes, so Ethernet is the
67 MLX4_IB_UD_HEADER_SIZE
= 82,
68 MLX4_IB_LSO_HEADER_SPARE
= 128,
72 MLX4_IB_IBOE_ETHERTYPE
= 0x8915
80 struct ib_ud_header ud_header
;
81 u8 header_buf
[MLX4_IB_UD_HEADER_SIZE
];
85 MLX4_IB_MIN_SQ_STRIDE
= 6,
86 MLX4_IB_CACHE_LINE_SIZE
= 64,
91 MLX4_RAW_QP_MSGMAX
= 31,
98 static const __be32 mlx4_ib_opcode
[] = {
99 [IB_WR_SEND
] = cpu_to_be32(MLX4_OPCODE_SEND
),
100 [IB_WR_LSO
] = cpu_to_be32(MLX4_OPCODE_LSO
),
101 [IB_WR_SEND_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_SEND_IMM
),
102 [IB_WR_RDMA_WRITE
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE
),
103 [IB_WR_RDMA_WRITE_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM
),
104 [IB_WR_RDMA_READ
] = cpu_to_be32(MLX4_OPCODE_RDMA_READ
),
105 [IB_WR_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS
),
106 [IB_WR_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA
),
107 [IB_WR_SEND_WITH_INV
] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL
),
108 [IB_WR_LOCAL_INV
] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL
),
109 [IB_WR_FAST_REG_MR
] = cpu_to_be32(MLX4_OPCODE_FMR
),
110 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS
),
111 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA
),
112 [IB_WR_BIND_MW
] = cpu_to_be32(MLX4_OPCODE_BIND_MW
),
115 static struct mlx4_ib_sqp
*to_msqp(struct mlx4_ib_qp
*mqp
)
117 return container_of(mqp
, struct mlx4_ib_sqp
, qp
);
120 static int is_tunnel_qp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
122 if (!mlx4_is_master(dev
->dev
))
125 return qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_tunnel_sqpn
&&
126 qp
->mqp
.qpn
< dev
->dev
->phys_caps
.base_tunnel_sqpn
+
130 static int is_sqp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
135 /* PPF or Native -- real SQP */
136 real_sqp
= ((mlx4_is_master(dev
->dev
) || !mlx4_is_mfunc(dev
->dev
)) &&
137 qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_sqpn
&&
138 qp
->mqp
.qpn
<= dev
->dev
->phys_caps
.base_sqpn
+ 3);
141 /* VF or PF -- proxy SQP */
142 if (mlx4_is_mfunc(dev
->dev
)) {
143 for (i
= 0; i
< dev
->dev
->caps
.num_ports
; i
++) {
144 if (qp
->mqp
.qpn
== dev
->dev
->caps
.qp0_proxy
[i
] ||
145 qp
->mqp
.qpn
== dev
->dev
->caps
.qp1_proxy
[i
]) {
154 /* used for INIT/CLOSE port logic */
155 static int is_qp0(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
160 /* PPF or Native -- real QP0 */
161 real_qp0
= ((mlx4_is_master(dev
->dev
) || !mlx4_is_mfunc(dev
->dev
)) &&
162 qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_sqpn
&&
163 qp
->mqp
.qpn
<= dev
->dev
->phys_caps
.base_sqpn
+ 1);
166 /* VF or PF -- proxy QP0 */
167 if (mlx4_is_mfunc(dev
->dev
)) {
168 for (i
= 0; i
< dev
->dev
->caps
.num_ports
; i
++) {
169 if (qp
->mqp
.qpn
== dev
->dev
->caps
.qp0_proxy
[i
]) {
178 static void *get_wqe(struct mlx4_ib_qp
*qp
, int offset
)
180 return mlx4_buf_offset(&qp
->buf
, offset
);
183 static void *get_recv_wqe(struct mlx4_ib_qp
*qp
, int n
)
185 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
188 static void *get_send_wqe(struct mlx4_ib_qp
*qp
, int n
)
190 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< qp
->sq
.wqe_shift
));
194 * Stamp a SQ WQE so that it is invalid if prefetched by marking the
195 * first four bytes of every 64 byte chunk with
196 * 0x7FFFFFF | (invalid_ownership_value << 31).
198 * When the max work request size is less than or equal to the WQE
199 * basic block size, as an optimization, we can stamp all WQEs with
200 * 0xffffffff, and skip the very first chunk of each WQE.
202 static void stamp_send_wqe(struct mlx4_ib_qp
*qp
, int n
, int size
)
210 struct mlx4_wqe_ctrl_seg
*ctrl
;
212 if (qp
->sq_max_wqes_per_wr
> 1) {
213 s
= roundup(size
, 1U << qp
->sq
.wqe_shift
);
214 for (i
= 0; i
< s
; i
+= 64) {
215 ind
= (i
>> qp
->sq
.wqe_shift
) + n
;
216 stamp
= ind
& qp
->sq
.wqe_cnt
? cpu_to_be32(0x7fffffff) :
217 cpu_to_be32(0xffffffff);
218 buf
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
219 wqe
= buf
+ (i
& ((1 << qp
->sq
.wqe_shift
) - 1));
223 ctrl
= buf
= get_send_wqe(qp
, n
& (qp
->sq
.wqe_cnt
- 1));
224 s
= (ctrl
->fence_size
& 0x3f) << 4;
225 for (i
= 64; i
< s
; i
+= 64) {
227 *wqe
= cpu_to_be32(0xffffffff);
232 static void post_nop_wqe(struct mlx4_ib_qp
*qp
, int n
, int size
)
234 struct mlx4_wqe_ctrl_seg
*ctrl
;
235 struct mlx4_wqe_inline_seg
*inl
;
239 ctrl
= wqe
= get_send_wqe(qp
, n
& (qp
->sq
.wqe_cnt
- 1));
240 s
= sizeof(struct mlx4_wqe_ctrl_seg
);
242 if (qp
->ibqp
.qp_type
== IB_QPT_UD
) {
243 struct mlx4_wqe_datagram_seg
*dgram
= wqe
+ sizeof *ctrl
;
244 struct mlx4_av
*av
= (struct mlx4_av
*)dgram
->av
;
245 memset(dgram
, 0, sizeof *dgram
);
246 av
->port_pd
= cpu_to_be32((qp
->port
<< 24) | to_mpd(qp
->ibqp
.pd
)->pdn
);
247 s
+= sizeof(struct mlx4_wqe_datagram_seg
);
250 /* Pad the remainder of the WQE with an inline data segment. */
253 inl
->byte_count
= cpu_to_be32(1 << 31 | (size
- s
- sizeof *inl
));
255 ctrl
->srcrb_flags
= 0;
256 ctrl
->fence_size
= size
/ 16;
258 * Make sure descriptor is fully written before setting ownership bit
259 * (because HW can start executing as soon as we do).
263 ctrl
->owner_opcode
= cpu_to_be32(MLX4_OPCODE_NOP
| MLX4_WQE_CTRL_NEC
) |
264 (n
& qp
->sq
.wqe_cnt
? cpu_to_be32(1 << 31) : 0);
266 stamp_send_wqe(qp
, n
+ qp
->sq_spare_wqes
, size
);
269 /* Post NOP WQE to prevent wrap-around in the middle of WR */
270 static inline unsigned pad_wraparound(struct mlx4_ib_qp
*qp
, int ind
)
272 unsigned s
= qp
->sq
.wqe_cnt
- (ind
& (qp
->sq
.wqe_cnt
- 1));
273 if (unlikely(s
< qp
->sq_max_wqes_per_wr
)) {
274 post_nop_wqe(qp
, ind
, s
<< qp
->sq
.wqe_shift
);
280 static void mlx4_ib_qp_event(struct mlx4_qp
*qp
, enum mlx4_event type
)
282 struct ib_event event
;
283 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
285 if (type
== MLX4_EVENT_TYPE_PATH_MIG
)
286 to_mibqp(qp
)->port
= to_mibqp(qp
)->alt_port
;
288 if (ibqp
->event_handler
) {
289 event
.device
= ibqp
->device
;
290 event
.element
.qp
= ibqp
;
292 case MLX4_EVENT_TYPE_PATH_MIG
:
293 event
.event
= IB_EVENT_PATH_MIG
;
295 case MLX4_EVENT_TYPE_COMM_EST
:
296 event
.event
= IB_EVENT_COMM_EST
;
298 case MLX4_EVENT_TYPE_SQ_DRAINED
:
299 event
.event
= IB_EVENT_SQ_DRAINED
;
301 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE
:
302 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
304 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR
:
305 event
.event
= IB_EVENT_QP_FATAL
;
307 case MLX4_EVENT_TYPE_PATH_MIG_FAILED
:
308 event
.event
= IB_EVENT_PATH_MIG_ERR
;
310 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
311 event
.event
= IB_EVENT_QP_REQ_ERR
;
313 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR
:
314 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
317 pr_warn("Unexpected event type %d "
318 "on QP %06x\n", type
, qp
->qpn
);
322 ibqp
->event_handler(&event
, ibqp
->qp_context
);
326 static int send_wqe_overhead(enum mlx4_ib_qp_type type
, u32 flags
)
329 * UD WQEs must have a datagram segment.
330 * RC and UC WQEs might have a remote address segment.
331 * MLX WQEs need two extra inline data segments (for the UD
332 * header and space for the ICRC).
336 return sizeof (struct mlx4_wqe_ctrl_seg
) +
337 sizeof (struct mlx4_wqe_datagram_seg
) +
338 ((flags
& MLX4_IB_QP_LSO
) ? MLX4_IB_LSO_HEADER_SPARE
: 0);
339 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
340 case MLX4_IB_QPT_PROXY_SMI
:
341 case MLX4_IB_QPT_PROXY_GSI
:
342 return sizeof (struct mlx4_wqe_ctrl_seg
) +
343 sizeof (struct mlx4_wqe_datagram_seg
) + 64;
344 case MLX4_IB_QPT_TUN_SMI_OWNER
:
345 case MLX4_IB_QPT_TUN_GSI
:
346 return sizeof (struct mlx4_wqe_ctrl_seg
) +
347 sizeof (struct mlx4_wqe_datagram_seg
);
350 return sizeof (struct mlx4_wqe_ctrl_seg
) +
351 sizeof (struct mlx4_wqe_raddr_seg
);
353 return sizeof (struct mlx4_wqe_ctrl_seg
) +
354 sizeof (struct mlx4_wqe_atomic_seg
) +
355 sizeof (struct mlx4_wqe_raddr_seg
);
356 case MLX4_IB_QPT_SMI
:
357 case MLX4_IB_QPT_GSI
:
358 return sizeof (struct mlx4_wqe_ctrl_seg
) +
359 ALIGN(MLX4_IB_UD_HEADER_SIZE
+
360 DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE
,
362 sizeof (struct mlx4_wqe_inline_seg
),
363 sizeof (struct mlx4_wqe_data_seg
)) +
365 sizeof (struct mlx4_wqe_inline_seg
),
366 sizeof (struct mlx4_wqe_data_seg
));
368 return sizeof (struct mlx4_wqe_ctrl_seg
);
372 static int set_rq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
373 int is_user
, int has_rq
, struct mlx4_ib_qp
*qp
)
375 /* Sanity check RQ size before proceeding */
376 if (cap
->max_recv_wr
> dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
||
377 cap
->max_recv_sge
> min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
))
381 if (cap
->max_recv_wr
)
384 qp
->rq
.wqe_cnt
= qp
->rq
.max_gs
= 0;
386 /* HW requires >= 1 RQ entry with >= 1 gather entry */
387 if (is_user
&& (!cap
->max_recv_wr
|| !cap
->max_recv_sge
))
390 qp
->rq
.wqe_cnt
= roundup_pow_of_two(max(1U, cap
->max_recv_wr
));
391 qp
->rq
.max_gs
= roundup_pow_of_two(max(1U, cap
->max_recv_sge
));
392 qp
->rq
.wqe_shift
= ilog2(qp
->rq
.max_gs
* sizeof (struct mlx4_wqe_data_seg
));
395 /* leave userspace return values as they were, so as not to break ABI */
397 cap
->max_recv_wr
= qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
398 cap
->max_recv_sge
= qp
->rq
.max_gs
;
400 cap
->max_recv_wr
= qp
->rq
.max_post
=
401 min(dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
, qp
->rq
.wqe_cnt
);
402 cap
->max_recv_sge
= min(qp
->rq
.max_gs
,
403 min(dev
->dev
->caps
.max_sq_sg
,
404 dev
->dev
->caps
.max_rq_sg
));
410 static int set_kernel_sq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
411 enum mlx4_ib_qp_type type
, struct mlx4_ib_qp
*qp
)
415 /* Sanity check SQ size before proceeding */
416 if (cap
->max_send_wr
> (dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
) ||
417 cap
->max_send_sge
> min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
) ||
418 cap
->max_inline_data
+ send_wqe_overhead(type
, qp
->flags
) +
419 sizeof (struct mlx4_wqe_inline_seg
) > dev
->dev
->caps
.max_sq_desc_sz
)
423 * For MLX transport we need 2 extra S/G entries:
424 * one for the header and one for the checksum at the end
426 if ((type
== MLX4_IB_QPT_SMI
|| type
== MLX4_IB_QPT_GSI
||
427 type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
)) &&
428 cap
->max_send_sge
+ 2 > dev
->dev
->caps
.max_sq_sg
)
431 s
= max(cap
->max_send_sge
* sizeof (struct mlx4_wqe_data_seg
),
432 cap
->max_inline_data
+ sizeof (struct mlx4_wqe_inline_seg
)) +
433 send_wqe_overhead(type
, qp
->flags
);
435 if (s
> dev
->dev
->caps
.max_sq_desc_sz
)
439 * Hermon supports shrinking WQEs, such that a single work
440 * request can include multiple units of 1 << wqe_shift. This
441 * way, work requests can differ in size, and do not have to
442 * be a power of 2 in size, saving memory and speeding up send
443 * WR posting. Unfortunately, if we do this then the
444 * wqe_index field in CQEs can't be used to look up the WR ID
445 * anymore, so we do this only if selective signaling is off.
447 * Further, on 32-bit platforms, we can't use vmap() to make
448 * the QP buffer virtually contiguous. Thus we have to use
449 * constant-sized WRs to make sure a WR is always fully within
450 * a single page-sized chunk.
452 * Finally, we use NOP work requests to pad the end of the
453 * work queue, to avoid wrap-around in the middle of WR. We
454 * set NEC bit to avoid getting completions with error for
455 * these NOP WRs, but since NEC is only supported starting
456 * with firmware 2.2.232, we use constant-sized WRs for older
459 * And, since MLX QPs only support SEND, we use constant-sized
462 * We look for the smallest value of wqe_shift such that the
463 * resulting number of wqes does not exceed device
466 * We set WQE size to at least 64 bytes, this way stamping
467 * invalidates each WQE.
469 if (dev
->dev
->caps
.fw_ver
>= MLX4_FW_VER_WQE_CTRL_NEC
&&
470 qp
->sq_signal_bits
&& BITS_PER_LONG
== 64 &&
471 type
!= MLX4_IB_QPT_SMI
&& type
!= MLX4_IB_QPT_GSI
&&
472 !(type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_PROXY_SMI
|
473 MLX4_IB_QPT_PROXY_GSI
| MLX4_IB_QPT_TUN_SMI_OWNER
)))
474 qp
->sq
.wqe_shift
= ilog2(64);
476 qp
->sq
.wqe_shift
= ilog2(roundup_pow_of_two(s
));
479 qp
->sq_max_wqes_per_wr
= DIV_ROUND_UP(s
, 1U << qp
->sq
.wqe_shift
);
482 * We need to leave 2 KB + 1 WR of headroom in the SQ to
483 * allow HW to prefetch.
485 qp
->sq_spare_wqes
= (2048 >> qp
->sq
.wqe_shift
) + qp
->sq_max_wqes_per_wr
;
486 qp
->sq
.wqe_cnt
= roundup_pow_of_two(cap
->max_send_wr
*
487 qp
->sq_max_wqes_per_wr
+
490 if (qp
->sq
.wqe_cnt
<= dev
->dev
->caps
.max_wqes
)
493 if (qp
->sq_max_wqes_per_wr
<= 1)
499 qp
->sq
.max_gs
= (min(dev
->dev
->caps
.max_sq_desc_sz
,
500 (qp
->sq_max_wqes_per_wr
<< qp
->sq
.wqe_shift
)) -
501 send_wqe_overhead(type
, qp
->flags
)) /
502 sizeof (struct mlx4_wqe_data_seg
);
504 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
505 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
506 if (qp
->rq
.wqe_shift
> qp
->sq
.wqe_shift
) {
508 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
510 qp
->rq
.offset
= qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
;
514 cap
->max_send_wr
= qp
->sq
.max_post
=
515 (qp
->sq
.wqe_cnt
- qp
->sq_spare_wqes
) / qp
->sq_max_wqes_per_wr
;
516 cap
->max_send_sge
= min(qp
->sq
.max_gs
,
517 min(dev
->dev
->caps
.max_sq_sg
,
518 dev
->dev
->caps
.max_rq_sg
));
519 /* We don't support inline sends for kernel QPs (yet) */
520 cap
->max_inline_data
= 0;
525 static int set_user_sq_size(struct mlx4_ib_dev
*dev
,
526 struct mlx4_ib_qp
*qp
,
527 struct mlx4_ib_create_qp
*ucmd
)
529 /* Sanity check SQ size before proceeding */
530 if ((1 << ucmd
->log_sq_bb_count
) > dev
->dev
->caps
.max_wqes
||
531 ucmd
->log_sq_stride
>
532 ilog2(roundup_pow_of_two(dev
->dev
->caps
.max_sq_desc_sz
)) ||
533 ucmd
->log_sq_stride
< MLX4_IB_MIN_SQ_STRIDE
)
536 qp
->sq
.wqe_cnt
= 1 << ucmd
->log_sq_bb_count
;
537 qp
->sq
.wqe_shift
= ucmd
->log_sq_stride
;
539 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
540 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
545 static int alloc_proxy_bufs(struct ib_device
*dev
, struct mlx4_ib_qp
*qp
)
550 kmalloc(sizeof (struct mlx4_ib_buf
) * qp
->rq
.wqe_cnt
,
552 if (!qp
->sqp_proxy_rcv
)
554 for (i
= 0; i
< qp
->rq
.wqe_cnt
; i
++) {
555 qp
->sqp_proxy_rcv
[i
].addr
=
556 kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr
),
558 if (!qp
->sqp_proxy_rcv
[i
].addr
)
560 qp
->sqp_proxy_rcv
[i
].map
=
561 ib_dma_map_single(dev
, qp
->sqp_proxy_rcv
[i
].addr
,
562 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
570 ib_dma_unmap_single(dev
, qp
->sqp_proxy_rcv
[i
].map
,
571 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
573 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
575 kfree(qp
->sqp_proxy_rcv
);
576 qp
->sqp_proxy_rcv
= NULL
;
580 static void free_proxy_bufs(struct ib_device
*dev
, struct mlx4_ib_qp
*qp
)
584 for (i
= 0; i
< qp
->rq
.wqe_cnt
; i
++) {
585 ib_dma_unmap_single(dev
, qp
->sqp_proxy_rcv
[i
].map
,
586 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
588 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
590 kfree(qp
->sqp_proxy_rcv
);
593 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
595 if (attr
->qp_type
== IB_QPT_XRC_INI
|| attr
->qp_type
== IB_QPT_XRC_TGT
)
601 static int qp0_enabled_vf(struct mlx4_dev
*dev
, int qpn
)
604 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
605 if (qpn
== dev
->caps
.qp0_proxy
[i
])
606 return !!dev
->caps
.qp0_qkey
[i
];
611 static int create_qp_common(struct mlx4_ib_dev
*dev
, struct ib_pd
*pd
,
612 struct ib_qp_init_attr
*init_attr
,
613 struct ib_udata
*udata
, int sqpn
, struct mlx4_ib_qp
**caller_qp
,
618 struct mlx4_ib_sqp
*sqp
;
619 struct mlx4_ib_qp
*qp
;
620 enum mlx4_ib_qp_type qp_type
= (enum mlx4_ib_qp_type
) init_attr
->qp_type
;
622 /* When tunneling special qps, we use a plain UD qp */
624 if (mlx4_is_mfunc(dev
->dev
) &&
625 (!mlx4_is_master(dev
->dev
) ||
626 !(init_attr
->create_flags
& MLX4_IB_SRIOV_SQP
))) {
627 if (init_attr
->qp_type
== IB_QPT_GSI
)
628 qp_type
= MLX4_IB_QPT_PROXY_GSI
;
630 if (mlx4_is_master(dev
->dev
) ||
631 qp0_enabled_vf(dev
->dev
, sqpn
))
632 qp_type
= MLX4_IB_QPT_PROXY_SMI_OWNER
;
634 qp_type
= MLX4_IB_QPT_PROXY_SMI
;
638 /* add extra sg entry for tunneling */
639 init_attr
->cap
.max_recv_sge
++;
640 } else if (init_attr
->create_flags
& MLX4_IB_SRIOV_TUNNEL_QP
) {
641 struct mlx4_ib_qp_tunnel_init_attr
*tnl_init
=
642 container_of(init_attr
,
643 struct mlx4_ib_qp_tunnel_init_attr
, init_attr
);
644 if ((tnl_init
->proxy_qp_type
!= IB_QPT_SMI
&&
645 tnl_init
->proxy_qp_type
!= IB_QPT_GSI
) ||
646 !mlx4_is_master(dev
->dev
))
648 if (tnl_init
->proxy_qp_type
== IB_QPT_GSI
)
649 qp_type
= MLX4_IB_QPT_TUN_GSI
;
650 else if (tnl_init
->slave
== mlx4_master_func_num(dev
->dev
) ||
651 mlx4_vf_smi_enabled(dev
->dev
, tnl_init
->slave
,
653 qp_type
= MLX4_IB_QPT_TUN_SMI_OWNER
;
655 qp_type
= MLX4_IB_QPT_TUN_SMI
;
656 /* we are definitely in the PPF here, since we are creating
657 * tunnel QPs. base_tunnel_sqpn is therefore valid. */
658 qpn
= dev
->dev
->phys_caps
.base_tunnel_sqpn
+ 8 * tnl_init
->slave
659 + tnl_init
->proxy_qp_type
* 2 + tnl_init
->port
- 1;
664 if (qp_type
== MLX4_IB_QPT_SMI
|| qp_type
== MLX4_IB_QPT_GSI
||
665 (qp_type
& (MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_SMI_OWNER
|
666 MLX4_IB_QPT_PROXY_GSI
| MLX4_IB_QPT_TUN_SMI_OWNER
))) {
667 sqp
= kzalloc(sizeof (struct mlx4_ib_sqp
), gfp
);
671 qp
->pri
.vid
= 0xFFFF;
672 qp
->alt
.vid
= 0xFFFF;
674 qp
= kzalloc(sizeof (struct mlx4_ib_qp
), gfp
);
677 qp
->pri
.vid
= 0xFFFF;
678 qp
->alt
.vid
= 0xFFFF;
683 qp
->mlx4_ib_qp_type
= qp_type
;
685 mutex_init(&qp
->mutex
);
686 spin_lock_init(&qp
->sq
.lock
);
687 spin_lock_init(&qp
->rq
.lock
);
688 INIT_LIST_HEAD(&qp
->gid_list
);
689 INIT_LIST_HEAD(&qp
->steering_rules
);
691 qp
->state
= IB_QPS_RESET
;
692 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
693 qp
->sq_signal_bits
= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
695 err
= set_rq_size(dev
, &init_attr
->cap
, !!pd
->uobject
, qp_has_rq(init_attr
), qp
);
700 struct mlx4_ib_create_qp ucmd
;
702 if (ib_copy_from_udata(&ucmd
, udata
, sizeof ucmd
)) {
707 qp
->sq_no_prefetch
= ucmd
.sq_no_prefetch
;
709 err
= set_user_sq_size(dev
, qp
, &ucmd
);
713 qp
->umem
= ib_umem_get(pd
->uobject
->context
, ucmd
.buf_addr
,
715 if (IS_ERR(qp
->umem
)) {
716 err
= PTR_ERR(qp
->umem
);
720 err
= mlx4_mtt_init(dev
->dev
, ib_umem_page_count(qp
->umem
),
721 ilog2(qp
->umem
->page_size
), &qp
->mtt
);
725 err
= mlx4_ib_umem_write_mtt(dev
, &qp
->mtt
, qp
->umem
);
729 if (qp_has_rq(init_attr
)) {
730 err
= mlx4_ib_db_map_user(to_mucontext(pd
->uobject
->context
),
731 ucmd
.db_addr
, &qp
->db
);
736 qp
->sq_no_prefetch
= 0;
738 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
)
739 qp
->flags
|= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
741 if (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
742 qp
->flags
|= MLX4_IB_QP_LSO
;
744 if (init_attr
->create_flags
& IB_QP_CREATE_NETIF_QP
) {
745 if (dev
->steering_support
==
746 MLX4_STEERING_MODE_DEVICE_MANAGED
)
747 qp
->flags
|= MLX4_IB_QP_NETIF
;
752 err
= set_kernel_sq_size(dev
, &init_attr
->cap
, qp_type
, qp
);
756 if (qp_has_rq(init_attr
)) {
757 err
= mlx4_db_alloc(dev
->dev
, &qp
->db
, 0, gfp
);
764 if (mlx4_buf_alloc(dev
->dev
, qp
->buf_size
, PAGE_SIZE
* 2, &qp
->buf
, gfp
)) {
769 err
= mlx4_mtt_init(dev
->dev
, qp
->buf
.npages
, qp
->buf
.page_shift
,
774 err
= mlx4_buf_write_mtt(dev
->dev
, &qp
->mtt
, &qp
->buf
, gfp
);
778 qp
->sq
.wrid
= kmalloc(qp
->sq
.wqe_cnt
* sizeof (u64
), gfp
);
779 qp
->rq
.wrid
= kmalloc(qp
->rq
.wqe_cnt
* sizeof (u64
), gfp
);
780 if (!qp
->sq
.wrid
|| !qp
->rq
.wrid
) {
787 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
788 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
)) {
789 if (alloc_proxy_bufs(pd
->device
, qp
)) {
795 /* Raw packet QPNs may not have bits 6,7 set in their qp_num;
796 * otherwise, the WQE BlueFlame setup flow wrongly causes
798 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
)
799 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1, &qpn
,
800 (init_attr
->cap
.max_send_wr
?
801 MLX4_RESERVE_ETH_BF_QP
: 0) |
802 (init_attr
->cap
.max_recv_wr
?
803 MLX4_RESERVE_A0_QP
: 0));
805 if (qp
->flags
& MLX4_IB_QP_NETIF
)
806 err
= mlx4_ib_steer_qp_alloc(dev
, 1, &qpn
);
808 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1,
814 err
= mlx4_qp_alloc(dev
->dev
, qpn
, &qp
->mqp
, gfp
);
818 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
)
819 qp
->mqp
.qpn
|= (1 << 23);
822 * Hardware wants QPN written in big-endian order (after
823 * shifting) for send doorbell. Precompute this value to save
824 * a little bit when posting sends.
826 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
828 qp
->mqp
.event
= mlx4_ib_qp_event
;
835 if (qp
->flags
& MLX4_IB_QP_NETIF
)
836 mlx4_ib_steer_qp_free(dev
, qpn
, 1);
838 mlx4_qp_release_range(dev
->dev
, qpn
, 1);
841 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
)
842 free_proxy_bufs(pd
->device
, qp
);
845 if (qp_has_rq(init_attr
))
846 mlx4_ib_db_unmap_user(to_mucontext(pd
->uobject
->context
), &qp
->db
);
853 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
857 ib_umem_release(qp
->umem
);
859 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
862 if (!pd
->uobject
&& qp_has_rq(init_attr
))
863 mlx4_db_free(dev
->dev
, &qp
->db
);
871 static enum mlx4_qp_state
to_mlx4_state(enum ib_qp_state state
)
874 case IB_QPS_RESET
: return MLX4_QP_STATE_RST
;
875 case IB_QPS_INIT
: return MLX4_QP_STATE_INIT
;
876 case IB_QPS_RTR
: return MLX4_QP_STATE_RTR
;
877 case IB_QPS_RTS
: return MLX4_QP_STATE_RTS
;
878 case IB_QPS_SQD
: return MLX4_QP_STATE_SQD
;
879 case IB_QPS_SQE
: return MLX4_QP_STATE_SQER
;
880 case IB_QPS_ERR
: return MLX4_QP_STATE_ERR
;
885 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
886 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
888 if (send_cq
== recv_cq
) {
889 spin_lock_irq(&send_cq
->lock
);
890 __acquire(&recv_cq
->lock
);
891 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
892 spin_lock_irq(&send_cq
->lock
);
893 spin_lock_nested(&recv_cq
->lock
, SINGLE_DEPTH_NESTING
);
895 spin_lock_irq(&recv_cq
->lock
);
896 spin_lock_nested(&send_cq
->lock
, SINGLE_DEPTH_NESTING
);
900 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
901 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
903 if (send_cq
== recv_cq
) {
904 __release(&recv_cq
->lock
);
905 spin_unlock_irq(&send_cq
->lock
);
906 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
907 spin_unlock(&recv_cq
->lock
);
908 spin_unlock_irq(&send_cq
->lock
);
910 spin_unlock(&send_cq
->lock
);
911 spin_unlock_irq(&recv_cq
->lock
);
915 static void del_gid_entries(struct mlx4_ib_qp
*qp
)
917 struct mlx4_ib_gid_entry
*ge
, *tmp
;
919 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
925 static struct mlx4_ib_pd
*get_pd(struct mlx4_ib_qp
*qp
)
927 if (qp
->ibqp
.qp_type
== IB_QPT_XRC_TGT
)
928 return to_mpd(to_mxrcd(qp
->ibqp
.xrcd
)->pd
);
930 return to_mpd(qp
->ibqp
.pd
);
933 static void get_cqs(struct mlx4_ib_qp
*qp
,
934 struct mlx4_ib_cq
**send_cq
, struct mlx4_ib_cq
**recv_cq
)
936 switch (qp
->ibqp
.qp_type
) {
938 *send_cq
= to_mcq(to_mxrcd(qp
->ibqp
.xrcd
)->cq
);
942 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
946 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
947 *recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
952 static void destroy_qp_common(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
,
955 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
957 if (qp
->state
!= IB_QPS_RESET
) {
958 if (mlx4_qp_modify(dev
->dev
, NULL
, to_mlx4_state(qp
->state
),
959 MLX4_QP_STATE_RST
, NULL
, 0, 0, &qp
->mqp
))
960 pr_warn("modify QP %06x to RESET failed.\n",
962 if (qp
->pri
.smac
|| (!qp
->pri
.smac
&& qp
->pri
.smac_port
)) {
963 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
965 qp
->pri
.smac_port
= 0;
968 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
971 if (qp
->pri
.vid
< 0x1000) {
972 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
, qp
->pri
.vid
);
973 qp
->pri
.vid
= 0xFFFF;
974 qp
->pri
.candidate_vid
= 0xFFFF;
975 qp
->pri
.update_vid
= 0;
977 if (qp
->alt
.vid
< 0x1000) {
978 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
, qp
->alt
.vid
);
979 qp
->alt
.vid
= 0xFFFF;
980 qp
->alt
.candidate_vid
= 0xFFFF;
981 qp
->alt
.update_vid
= 0;
985 get_cqs(qp
, &send_cq
, &recv_cq
);
987 mlx4_ib_lock_cqs(send_cq
, recv_cq
);
990 __mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
991 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
): NULL
);
992 if (send_cq
!= recv_cq
)
993 __mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
996 mlx4_qp_remove(dev
->dev
, &qp
->mqp
);
998 mlx4_ib_unlock_cqs(send_cq
, recv_cq
);
1000 mlx4_qp_free(dev
->dev
, &qp
->mqp
);
1002 if (!is_sqp(dev
, qp
) && !is_tunnel_qp(dev
, qp
)) {
1003 if (qp
->flags
& MLX4_IB_QP_NETIF
)
1004 mlx4_ib_steer_qp_free(dev
, qp
->mqp
.qpn
, 1);
1006 mlx4_qp_release_range(dev
->dev
, qp
->mqp
.qpn
, 1);
1009 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
1013 mlx4_ib_db_unmap_user(to_mucontext(qp
->ibqp
.uobject
->context
),
1015 ib_umem_release(qp
->umem
);
1019 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
1020 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
))
1021 free_proxy_bufs(&dev
->ib_dev
, qp
);
1022 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
1024 mlx4_db_free(dev
->dev
, &qp
->db
);
1027 del_gid_entries(qp
);
1030 static u32
get_sqp_num(struct mlx4_ib_dev
*dev
, struct ib_qp_init_attr
*attr
)
1033 if (!mlx4_is_mfunc(dev
->dev
) ||
1034 (mlx4_is_master(dev
->dev
) &&
1035 attr
->create_flags
& MLX4_IB_SRIOV_SQP
)) {
1036 return dev
->dev
->phys_caps
.base_sqpn
+
1037 (attr
->qp_type
== IB_QPT_SMI
? 0 : 2) +
1040 /* PF or VF -- creating proxies */
1041 if (attr
->qp_type
== IB_QPT_SMI
)
1042 return dev
->dev
->caps
.qp0_proxy
[attr
->port_num
- 1];
1044 return dev
->dev
->caps
.qp1_proxy
[attr
->port_num
- 1];
1047 struct ib_qp
*mlx4_ib_create_qp(struct ib_pd
*pd
,
1048 struct ib_qp_init_attr
*init_attr
,
1049 struct ib_udata
*udata
)
1051 struct mlx4_ib_qp
*qp
= NULL
;
1056 gfp
= (init_attr
->create_flags
& MLX4_IB_QP_CREATE_USE_GFP_NOIO
) ?
1057 GFP_NOIO
: GFP_KERNEL
;
1059 * We only support LSO, vendor flag1, and multicast loopback blocking,
1060 * and only for kernel UD QPs.
1062 if (init_attr
->create_flags
& ~(MLX4_IB_QP_LSO
|
1063 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
|
1064 MLX4_IB_SRIOV_TUNNEL_QP
|
1067 MLX4_IB_QP_CREATE_USE_GFP_NOIO
))
1068 return ERR_PTR(-EINVAL
);
1070 if (init_attr
->create_flags
& IB_QP_CREATE_NETIF_QP
) {
1071 if (init_attr
->qp_type
!= IB_QPT_UD
)
1072 return ERR_PTR(-EINVAL
);
1075 if (init_attr
->create_flags
&&
1077 ((init_attr
->create_flags
& ~(MLX4_IB_SRIOV_SQP
| MLX4_IB_QP_CREATE_USE_GFP_NOIO
)) &&
1078 init_attr
->qp_type
!= IB_QPT_UD
) ||
1079 ((init_attr
->create_flags
& MLX4_IB_SRIOV_SQP
) &&
1080 init_attr
->qp_type
> IB_QPT_GSI
)))
1081 return ERR_PTR(-EINVAL
);
1083 switch (init_attr
->qp_type
) {
1084 case IB_QPT_XRC_TGT
:
1085 pd
= to_mxrcd(init_attr
->xrcd
)->pd
;
1086 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
1087 init_attr
->send_cq
= to_mxrcd(init_attr
->xrcd
)->cq
;
1089 case IB_QPT_XRC_INI
:
1090 if (!(to_mdev(pd
->device
)->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
))
1091 return ERR_PTR(-ENOSYS
);
1092 init_attr
->recv_cq
= init_attr
->send_cq
;
1096 case IB_QPT_RAW_PACKET
:
1097 qp
= kzalloc(sizeof *qp
, gfp
);
1099 return ERR_PTR(-ENOMEM
);
1100 qp
->pri
.vid
= 0xFFFF;
1101 qp
->alt
.vid
= 0xFFFF;
1105 err
= create_qp_common(to_mdev(pd
->device
), pd
, init_attr
,
1106 udata
, 0, &qp
, gfp
);
1108 return ERR_PTR(err
);
1110 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
1118 /* Userspace is not allowed to create special QPs: */
1120 return ERR_PTR(-EINVAL
);
1122 err
= create_qp_common(to_mdev(pd
->device
), pd
, init_attr
, udata
,
1123 get_sqp_num(to_mdev(pd
->device
), init_attr
),
1126 return ERR_PTR(err
);
1128 qp
->port
= init_attr
->port_num
;
1129 qp
->ibqp
.qp_num
= init_attr
->qp_type
== IB_QPT_SMI
? 0 : 1;
1134 /* Don't support raw QPs */
1135 return ERR_PTR(-EINVAL
);
1141 int mlx4_ib_destroy_qp(struct ib_qp
*qp
)
1143 struct mlx4_ib_dev
*dev
= to_mdev(qp
->device
);
1144 struct mlx4_ib_qp
*mqp
= to_mqp(qp
);
1145 struct mlx4_ib_pd
*pd
;
1147 if (is_qp0(dev
, mqp
))
1148 mlx4_CLOSE_PORT(dev
->dev
, mqp
->port
);
1150 if (dev
->qp1_proxy
[mqp
->port
- 1] == mqp
) {
1151 mutex_lock(&dev
->qp1_proxy_lock
[mqp
->port
- 1]);
1152 dev
->qp1_proxy
[mqp
->port
- 1] = NULL
;
1153 mutex_unlock(&dev
->qp1_proxy_lock
[mqp
->port
- 1]);
1157 destroy_qp_common(dev
, mqp
, !!pd
->ibpd
.uobject
);
1159 if (is_sqp(dev
, mqp
))
1160 kfree(to_msqp(mqp
));
1167 static int to_mlx4_st(struct mlx4_ib_dev
*dev
, enum mlx4_ib_qp_type type
)
1170 case MLX4_IB_QPT_RC
: return MLX4_QP_ST_RC
;
1171 case MLX4_IB_QPT_UC
: return MLX4_QP_ST_UC
;
1172 case MLX4_IB_QPT_UD
: return MLX4_QP_ST_UD
;
1173 case MLX4_IB_QPT_XRC_INI
:
1174 case MLX4_IB_QPT_XRC_TGT
: return MLX4_QP_ST_XRC
;
1175 case MLX4_IB_QPT_SMI
:
1176 case MLX4_IB_QPT_GSI
:
1177 case MLX4_IB_QPT_RAW_PACKET
: return MLX4_QP_ST_MLX
;
1179 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
1180 case MLX4_IB_QPT_TUN_SMI_OWNER
: return (mlx4_is_mfunc(dev
->dev
) ?
1181 MLX4_QP_ST_MLX
: -1);
1182 case MLX4_IB_QPT_PROXY_SMI
:
1183 case MLX4_IB_QPT_TUN_SMI
:
1184 case MLX4_IB_QPT_PROXY_GSI
:
1185 case MLX4_IB_QPT_TUN_GSI
: return (mlx4_is_mfunc(dev
->dev
) ?
1186 MLX4_QP_ST_UD
: -1);
1191 static __be32
to_mlx4_access_flags(struct mlx4_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
1196 u32 hw_access_flags
= 0;
1198 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1199 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1201 dest_rd_atomic
= qp
->resp_depth
;
1203 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1204 access_flags
= attr
->qp_access_flags
;
1206 access_flags
= qp
->atomic_rd_en
;
1208 if (!dest_rd_atomic
)
1209 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
1211 if (access_flags
& IB_ACCESS_REMOTE_READ
)
1212 hw_access_flags
|= MLX4_QP_BIT_RRE
;
1213 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
1214 hw_access_flags
|= MLX4_QP_BIT_RAE
;
1215 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
1216 hw_access_flags
|= MLX4_QP_BIT_RWE
;
1218 return cpu_to_be32(hw_access_flags
);
1221 static void store_sqp_attrs(struct mlx4_ib_sqp
*sqp
, const struct ib_qp_attr
*attr
,
1224 if (attr_mask
& IB_QP_PKEY_INDEX
)
1225 sqp
->pkey_index
= attr
->pkey_index
;
1226 if (attr_mask
& IB_QP_QKEY
)
1227 sqp
->qkey
= attr
->qkey
;
1228 if (attr_mask
& IB_QP_SQ_PSN
)
1229 sqp
->send_psn
= attr
->sq_psn
;
1232 static void mlx4_set_sched(struct mlx4_qp_path
*path
, u8 port
)
1234 path
->sched_queue
= (path
->sched_queue
& 0xbf) | ((port
- 1) << 6);
1237 static int _mlx4_set_path(struct mlx4_ib_dev
*dev
, const struct ib_ah_attr
*ah
,
1238 u64 smac
, u16 vlan_tag
, struct mlx4_qp_path
*path
,
1239 struct mlx4_roce_smac_vlan_info
*smac_info
, u8 port
)
1241 int is_eth
= rdma_port_get_link_layer(&dev
->ib_dev
, port
) ==
1242 IB_LINK_LAYER_ETHERNET
;
1248 path
->grh_mylmc
= ah
->src_path_bits
& 0x7f;
1249 path
->rlid
= cpu_to_be16(ah
->dlid
);
1250 if (ah
->static_rate
) {
1251 path
->static_rate
= ah
->static_rate
+ MLX4_STAT_RATE_OFFSET
;
1252 while (path
->static_rate
> IB_RATE_2_5_GBPS
+ MLX4_STAT_RATE_OFFSET
&&
1253 !(1 << path
->static_rate
& dev
->dev
->caps
.stat_rate_support
))
1254 --path
->static_rate
;
1256 path
->static_rate
= 0;
1258 if (ah
->ah_flags
& IB_AH_GRH
) {
1259 if (ah
->grh
.sgid_index
>= dev
->dev
->caps
.gid_table_len
[port
]) {
1260 pr_err("sgid_index (%u) too large. max is %d\n",
1261 ah
->grh
.sgid_index
, dev
->dev
->caps
.gid_table_len
[port
] - 1);
1265 path
->grh_mylmc
|= 1 << 7;
1266 path
->mgid_index
= ah
->grh
.sgid_index
;
1267 path
->hop_limit
= ah
->grh
.hop_limit
;
1268 path
->tclass_flowlabel
=
1269 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
1270 (ah
->grh
.flow_label
));
1271 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
1275 if (!(ah
->ah_flags
& IB_AH_GRH
))
1278 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
1279 ((port
- 1) << 6) | ((ah
->sl
& 7) << 3);
1281 path
->feup
|= MLX4_FEUP_FORCE_ETH_UP
;
1282 if (vlan_tag
< 0x1000) {
1283 if (smac_info
->vid
< 0x1000) {
1284 /* both valid vlan ids */
1285 if (smac_info
->vid
!= vlan_tag
) {
1286 /* different VIDs. unreg old and reg new */
1287 err
= mlx4_register_vlan(dev
->dev
, port
, vlan_tag
, &vidx
);
1290 smac_info
->candidate_vid
= vlan_tag
;
1291 smac_info
->candidate_vlan_index
= vidx
;
1292 smac_info
->candidate_vlan_port
= port
;
1293 smac_info
->update_vid
= 1;
1294 path
->vlan_index
= vidx
;
1296 path
->vlan_index
= smac_info
->vlan_index
;
1299 /* no current vlan tag in qp */
1300 err
= mlx4_register_vlan(dev
->dev
, port
, vlan_tag
, &vidx
);
1303 smac_info
->candidate_vid
= vlan_tag
;
1304 smac_info
->candidate_vlan_index
= vidx
;
1305 smac_info
->candidate_vlan_port
= port
;
1306 smac_info
->update_vid
= 1;
1307 path
->vlan_index
= vidx
;
1309 path
->feup
|= MLX4_FVL_FORCE_ETH_VLAN
;
1312 /* have current vlan tag. unregister it at modify-qp success */
1313 if (smac_info
->vid
< 0x1000) {
1314 smac_info
->candidate_vid
= 0xFFFF;
1315 smac_info
->update_vid
= 1;
1319 /* get smac_index for RoCE use.
1320 * If no smac was yet assigned, register one.
1321 * If one was already assigned, but the new mac differs,
1322 * unregister the old one and register the new one.
1324 if ((!smac_info
->smac
&& !smac_info
->smac_port
) ||
1325 smac_info
->smac
!= smac
) {
1326 /* register candidate now, unreg if needed, after success */
1327 smac_index
= mlx4_register_mac(dev
->dev
, port
, smac
);
1328 if (smac_index
>= 0) {
1329 smac_info
->candidate_smac_index
= smac_index
;
1330 smac_info
->candidate_smac
= smac
;
1331 smac_info
->candidate_smac_port
= port
;
1336 smac_index
= smac_info
->smac_index
;
1339 memcpy(path
->dmac
, ah
->dmac
, 6);
1340 path
->ackto
= MLX4_IB_LINK_TYPE_ETH
;
1341 /* put MAC table smac index for IBoE */
1342 path
->grh_mylmc
= (u8
) (smac_index
) | 0x80;
1344 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
1345 ((port
- 1) << 6) | ((ah
->sl
& 0xf) << 2);
1351 static int mlx4_set_path(struct mlx4_ib_dev
*dev
, const struct ib_qp_attr
*qp
,
1352 enum ib_qp_attr_mask qp_attr_mask
,
1353 struct mlx4_ib_qp
*mqp
,
1354 struct mlx4_qp_path
*path
, u8 port
)
1356 return _mlx4_set_path(dev
, &qp
->ah_attr
,
1357 mlx4_mac_to_u64((u8
*)qp
->smac
),
1358 (qp_attr_mask
& IB_QP_VID
) ? qp
->vlan_id
: 0xffff,
1359 path
, &mqp
->pri
, port
);
1362 static int mlx4_set_alt_path(struct mlx4_ib_dev
*dev
,
1363 const struct ib_qp_attr
*qp
,
1364 enum ib_qp_attr_mask qp_attr_mask
,
1365 struct mlx4_ib_qp
*mqp
,
1366 struct mlx4_qp_path
*path
, u8 port
)
1368 return _mlx4_set_path(dev
, &qp
->alt_ah_attr
,
1369 mlx4_mac_to_u64((u8
*)qp
->alt_smac
),
1370 (qp_attr_mask
& IB_QP_ALT_VID
) ?
1371 qp
->alt_vlan_id
: 0xffff,
1372 path
, &mqp
->alt
, port
);
1375 static void update_mcg_macs(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
1377 struct mlx4_ib_gid_entry
*ge
, *tmp
;
1379 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
1380 if (!ge
->added
&& mlx4_ib_add_mc(dev
, qp
, &ge
->gid
)) {
1382 ge
->port
= qp
->port
;
1387 static int handle_eth_ud_smac_index(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
, u8
*smac
,
1388 struct mlx4_qp_context
*context
)
1393 u64_mac
= atomic64_read(&dev
->iboe
.mac
[qp
->port
- 1]);
1395 context
->pri_path
.sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
| ((qp
->port
- 1) << 6);
1396 if (!qp
->pri
.smac
&& !qp
->pri
.smac_port
) {
1397 smac_index
= mlx4_register_mac(dev
->dev
, qp
->port
, u64_mac
);
1398 if (smac_index
>= 0) {
1399 qp
->pri
.candidate_smac_index
= smac_index
;
1400 qp
->pri
.candidate_smac
= u64_mac
;
1401 qp
->pri
.candidate_smac_port
= qp
->port
;
1402 context
->pri_path
.grh_mylmc
= 0x80 | (u8
) smac_index
;
1410 static int __mlx4_ib_modify_qp(struct ib_qp
*ibqp
,
1411 const struct ib_qp_attr
*attr
, int attr_mask
,
1412 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
1414 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
1415 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
1416 struct mlx4_ib_pd
*pd
;
1417 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
1418 struct mlx4_qp_context
*context
;
1419 enum mlx4_qp_optpar optpar
= 0;
1424 /* APM is not supported under RoCE */
1425 if (attr_mask
& IB_QP_ALT_PATH
&&
1426 rdma_port_get_link_layer(&dev
->ib_dev
, qp
->port
) ==
1427 IB_LINK_LAYER_ETHERNET
)
1430 context
= kzalloc(sizeof *context
, GFP_KERNEL
);
1434 context
->flags
= cpu_to_be32((to_mlx4_state(new_state
) << 28) |
1435 (to_mlx4_st(dev
, qp
->mlx4_ib_qp_type
) << 16));
1437 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
))
1438 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
1440 optpar
|= MLX4_QP_OPTPAR_PM_STATE
;
1441 switch (attr
->path_mig_state
) {
1442 case IB_MIG_MIGRATED
:
1443 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
1446 context
->flags
|= cpu_to_be32(MLX4_QP_PM_REARM
<< 11);
1449 context
->flags
|= cpu_to_be32(MLX4_QP_PM_ARMED
<< 11);
1454 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
)
1455 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 11;
1456 else if (ibqp
->qp_type
== IB_QPT_RAW_PACKET
)
1457 context
->mtu_msgmax
= (MLX4_RAW_QP_MTU
<< 5) | MLX4_RAW_QP_MSGMAX
;
1458 else if (ibqp
->qp_type
== IB_QPT_UD
) {
1459 if (qp
->flags
& MLX4_IB_QP_LSO
)
1460 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) |
1461 ilog2(dev
->dev
->caps
.max_gso_sz
);
1463 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
1464 } else if (attr_mask
& IB_QP_PATH_MTU
) {
1465 if (attr
->path_mtu
< IB_MTU_256
|| attr
->path_mtu
> IB_MTU_4096
) {
1466 pr_err("path MTU (%u) is invalid\n",
1470 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) |
1471 ilog2(dev
->dev
->caps
.max_msg_sz
);
1475 context
->rq_size_stride
= ilog2(qp
->rq
.wqe_cnt
) << 3;
1476 context
->rq_size_stride
|= qp
->rq
.wqe_shift
- 4;
1479 context
->sq_size_stride
= ilog2(qp
->sq
.wqe_cnt
) << 3;
1480 context
->sq_size_stride
|= qp
->sq
.wqe_shift
- 4;
1482 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
1483 context
->sq_size_stride
|= !!qp
->sq_no_prefetch
<< 7;
1484 context
->xrcd
= cpu_to_be32((u32
) qp
->xrcdn
);
1485 if (ibqp
->qp_type
== IB_QPT_RAW_PACKET
)
1486 context
->param3
|= cpu_to_be32(1 << 30);
1489 if (qp
->ibqp
.uobject
)
1490 context
->usr_page
= cpu_to_be32(to_mucontext(ibqp
->uobject
->context
)->uar
.index
);
1492 context
->usr_page
= cpu_to_be32(dev
->priv_uar
.index
);
1494 if (attr_mask
& IB_QP_DEST_QPN
)
1495 context
->remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
1497 if (attr_mask
& IB_QP_PORT
) {
1498 if (cur_state
== IB_QPS_SQD
&& new_state
== IB_QPS_SQD
&&
1499 !(attr_mask
& IB_QP_AV
)) {
1500 mlx4_set_sched(&context
->pri_path
, attr
->port_num
);
1501 optpar
|= MLX4_QP_OPTPAR_SCHED_QUEUE
;
1505 if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
1506 if (dev
->counters
[qp
->port
- 1] != -1) {
1507 context
->pri_path
.counter_index
=
1508 dev
->counters
[qp
->port
- 1];
1509 optpar
|= MLX4_QP_OPTPAR_COUNTER_INDEX
;
1511 context
->pri_path
.counter_index
= 0xff;
1513 if (qp
->flags
& MLX4_IB_QP_NETIF
) {
1514 mlx4_ib_steer_qp_reg(dev
, qp
, 1);
1519 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1520 if (qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
)
1521 context
->pri_path
.disable_pkey_check
= 0x40;
1522 context
->pri_path
.pkey_index
= attr
->pkey_index
;
1523 optpar
|= MLX4_QP_OPTPAR_PKEY_INDEX
;
1526 if (attr_mask
& IB_QP_AV
) {
1527 if (mlx4_set_path(dev
, attr
, attr_mask
, qp
, &context
->pri_path
,
1528 attr_mask
& IB_QP_PORT
?
1529 attr
->port_num
: qp
->port
))
1532 optpar
|= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
|
1533 MLX4_QP_OPTPAR_SCHED_QUEUE
);
1536 if (attr_mask
& IB_QP_TIMEOUT
) {
1537 context
->pri_path
.ackto
|= attr
->timeout
<< 3;
1538 optpar
|= MLX4_QP_OPTPAR_ACK_TIMEOUT
;
1541 if (attr_mask
& IB_QP_ALT_PATH
) {
1542 if (attr
->alt_port_num
== 0 ||
1543 attr
->alt_port_num
> dev
->dev
->caps
.num_ports
)
1546 if (attr
->alt_pkey_index
>=
1547 dev
->dev
->caps
.pkey_table_len
[attr
->alt_port_num
])
1550 if (mlx4_set_alt_path(dev
, attr
, attr_mask
, qp
,
1552 attr
->alt_port_num
))
1555 context
->alt_path
.pkey_index
= attr
->alt_pkey_index
;
1556 context
->alt_path
.ackto
= attr
->alt_timeout
<< 3;
1557 optpar
|= MLX4_QP_OPTPAR_ALT_ADDR_PATH
;
1561 get_cqs(qp
, &send_cq
, &recv_cq
);
1562 context
->pd
= cpu_to_be32(pd
->pdn
);
1563 context
->cqn_send
= cpu_to_be32(send_cq
->mcq
.cqn
);
1564 context
->cqn_recv
= cpu_to_be32(recv_cq
->mcq
.cqn
);
1565 context
->params1
= cpu_to_be32(MLX4_IB_ACK_REQ_FREQ
<< 28);
1567 /* Set "fast registration enabled" for all kernel QPs */
1568 if (!qp
->ibqp
.uobject
)
1569 context
->params1
|= cpu_to_be32(1 << 11);
1571 if (attr_mask
& IB_QP_RNR_RETRY
) {
1572 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
1573 optpar
|= MLX4_QP_OPTPAR_RNR_RETRY
;
1576 if (attr_mask
& IB_QP_RETRY_CNT
) {
1577 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
1578 optpar
|= MLX4_QP_OPTPAR_RETRY_COUNT
;
1581 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1582 if (attr
->max_rd_atomic
)
1584 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
1585 optpar
|= MLX4_QP_OPTPAR_SRA_MAX
;
1588 if (attr_mask
& IB_QP_SQ_PSN
)
1589 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
1591 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1592 if (attr
->max_dest_rd_atomic
)
1594 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
1595 optpar
|= MLX4_QP_OPTPAR_RRA_MAX
;
1598 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
)) {
1599 context
->params2
|= to_mlx4_access_flags(qp
, attr
, attr_mask
);
1600 optpar
|= MLX4_QP_OPTPAR_RWE
| MLX4_QP_OPTPAR_RRE
| MLX4_QP_OPTPAR_RAE
;
1604 context
->params2
|= cpu_to_be32(MLX4_QP_BIT_RIC
);
1606 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
1607 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
1608 optpar
|= MLX4_QP_OPTPAR_RNR_TIMEOUT
;
1610 if (attr_mask
& IB_QP_RQ_PSN
)
1611 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
1613 /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */
1614 if (attr_mask
& IB_QP_QKEY
) {
1615 if (qp
->mlx4_ib_qp_type
&
1616 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
))
1617 context
->qkey
= cpu_to_be32(IB_QP_SET_QKEY
);
1619 if (mlx4_is_mfunc(dev
->dev
) &&
1620 !(qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
) &&
1621 (attr
->qkey
& MLX4_RESERVED_QKEY_MASK
) ==
1622 MLX4_RESERVED_QKEY_BASE
) {
1623 pr_err("Cannot use reserved QKEY"
1624 " 0x%x (range 0xffff0000..0xffffffff"
1625 " is reserved)\n", attr
->qkey
);
1629 context
->qkey
= cpu_to_be32(attr
->qkey
);
1631 optpar
|= MLX4_QP_OPTPAR_Q_KEY
;
1635 context
->srqn
= cpu_to_be32(1 << 24 | to_msrq(ibqp
->srq
)->msrq
.srqn
);
1637 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1638 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
1640 if (cur_state
== IB_QPS_INIT
&&
1641 new_state
== IB_QPS_RTR
&&
1642 (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
||
1643 ibqp
->qp_type
== IB_QPT_UD
||
1644 ibqp
->qp_type
== IB_QPT_RAW_PACKET
)) {
1645 context
->pri_path
.sched_queue
= (qp
->port
- 1) << 6;
1646 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_SMI
||
1647 qp
->mlx4_ib_qp_type
&
1648 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
)) {
1649 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
;
1650 if (qp
->mlx4_ib_qp_type
!= MLX4_IB_QPT_SMI
)
1651 context
->pri_path
.fl
= 0x80;
1653 if (qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
)
1654 context
->pri_path
.fl
= 0x80;
1655 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_SCHED_QUEUE
;
1657 if (rdma_port_get_link_layer(&dev
->ib_dev
, qp
->port
) ==
1658 IB_LINK_LAYER_ETHERNET
) {
1659 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_GSI
||
1660 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
)
1661 context
->pri_path
.feup
= 1 << 7; /* don't fsm */
1662 /* handle smac_index */
1663 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_UD
||
1664 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
||
1665 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_GSI
) {
1666 err
= handle_eth_ud_smac_index(dev
, qp
, (u8
*)attr
->smac
, context
);
1669 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
)
1670 dev
->qp1_proxy
[qp
->port
- 1] = qp
;
1675 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
) {
1676 context
->pri_path
.ackto
= (context
->pri_path
.ackto
& 0xf8) |
1677 MLX4_IB_LINK_TYPE_ETH
;
1678 if (dev
->dev
->caps
.tunnel_offload_mode
== MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) {
1679 /* set QP to receive both tunneled & non-tunneled packets */
1680 if (!(context
->flags
& cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET
)))
1681 context
->srqn
= cpu_to_be32(7 << 28);
1685 if (ibqp
->qp_type
== IB_QPT_UD
&& (new_state
== IB_QPS_RTR
)) {
1686 int is_eth
= rdma_port_get_link_layer(
1687 &dev
->ib_dev
, qp
->port
) ==
1688 IB_LINK_LAYER_ETHERNET
;
1690 context
->pri_path
.ackto
= MLX4_IB_LINK_TYPE_ETH
;
1691 optpar
|= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
;
1696 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
1697 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&& attr
->en_sqd_async_notify
)
1702 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1703 context
->rlkey
|= (1 << 4);
1706 * Before passing a kernel QP to the HW, make sure that the
1707 * ownership bits of the send queue are set and the SQ
1708 * headroom is stamped so that the hardware doesn't start
1709 * processing stale work requests.
1711 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
1712 struct mlx4_wqe_ctrl_seg
*ctrl
;
1715 for (i
= 0; i
< qp
->sq
.wqe_cnt
; ++i
) {
1716 ctrl
= get_send_wqe(qp
, i
);
1717 ctrl
->owner_opcode
= cpu_to_be32(1 << 31);
1718 if (qp
->sq_max_wqes_per_wr
== 1)
1719 ctrl
->fence_size
= 1 << (qp
->sq
.wqe_shift
- 4);
1721 stamp_send_wqe(qp
, i
, 1 << qp
->sq
.wqe_shift
);
1725 err
= mlx4_qp_modify(dev
->dev
, &qp
->mtt
, to_mlx4_state(cur_state
),
1726 to_mlx4_state(new_state
), context
, optpar
,
1727 sqd_event
, &qp
->mqp
);
1731 qp
->state
= new_state
;
1733 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1734 qp
->atomic_rd_en
= attr
->qp_access_flags
;
1735 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1736 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
1737 if (attr_mask
& IB_QP_PORT
) {
1738 qp
->port
= attr
->port_num
;
1739 update_mcg_macs(dev
, qp
);
1741 if (attr_mask
& IB_QP_ALT_PATH
)
1742 qp
->alt_port
= attr
->alt_port_num
;
1744 if (is_sqp(dev
, qp
))
1745 store_sqp_attrs(to_msqp(qp
), attr
, attr_mask
);
1748 * If we moved QP0 to RTR, bring the IB link up; if we moved
1749 * QP0 to RESET or ERROR, bring the link back down.
1751 if (is_qp0(dev
, qp
)) {
1752 if (cur_state
!= IB_QPS_RTR
&& new_state
== IB_QPS_RTR
)
1753 if (mlx4_INIT_PORT(dev
->dev
, qp
->port
))
1754 pr_warn("INIT_PORT failed for port %d\n",
1757 if (cur_state
!= IB_QPS_RESET
&& cur_state
!= IB_QPS_ERR
&&
1758 (new_state
== IB_QPS_RESET
|| new_state
== IB_QPS_ERR
))
1759 mlx4_CLOSE_PORT(dev
->dev
, qp
->port
);
1763 * If we moved a kernel QP to RESET, clean up all old CQ
1764 * entries and reinitialize the QP.
1766 if (new_state
== IB_QPS_RESET
) {
1767 if (!ibqp
->uobject
) {
1768 mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1769 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
1770 if (send_cq
!= recv_cq
)
1771 mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1777 qp
->sq_next_wqe
= 0;
1781 if (qp
->flags
& MLX4_IB_QP_NETIF
)
1782 mlx4_ib_steer_qp_reg(dev
, qp
, 0);
1784 if (qp
->pri
.smac
|| (!qp
->pri
.smac
&& qp
->pri
.smac_port
)) {
1785 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
1787 qp
->pri
.smac_port
= 0;
1790 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
1793 if (qp
->pri
.vid
< 0x1000) {
1794 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
, qp
->pri
.vid
);
1795 qp
->pri
.vid
= 0xFFFF;
1796 qp
->pri
.candidate_vid
= 0xFFFF;
1797 qp
->pri
.update_vid
= 0;
1800 if (qp
->alt
.vid
< 0x1000) {
1801 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
, qp
->alt
.vid
);
1802 qp
->alt
.vid
= 0xFFFF;
1803 qp
->alt
.candidate_vid
= 0xFFFF;
1804 qp
->alt
.update_vid
= 0;
1808 if (err
&& steer_qp
)
1809 mlx4_ib_steer_qp_reg(dev
, qp
, 0);
1811 if (qp
->pri
.candidate_smac
||
1812 (!qp
->pri
.candidate_smac
&& qp
->pri
.candidate_smac_port
)) {
1814 mlx4_unregister_mac(dev
->dev
, qp
->pri
.candidate_smac_port
, qp
->pri
.candidate_smac
);
1816 if (qp
->pri
.smac
|| (!qp
->pri
.smac
&& qp
->pri
.smac_port
))
1817 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
1818 qp
->pri
.smac
= qp
->pri
.candidate_smac
;
1819 qp
->pri
.smac_index
= qp
->pri
.candidate_smac_index
;
1820 qp
->pri
.smac_port
= qp
->pri
.candidate_smac_port
;
1822 qp
->pri
.candidate_smac
= 0;
1823 qp
->pri
.candidate_smac_index
= 0;
1824 qp
->pri
.candidate_smac_port
= 0;
1826 if (qp
->alt
.candidate_smac
) {
1828 mlx4_unregister_mac(dev
->dev
, qp
->alt
.candidate_smac_port
, qp
->alt
.candidate_smac
);
1831 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
1832 qp
->alt
.smac
= qp
->alt
.candidate_smac
;
1833 qp
->alt
.smac_index
= qp
->alt
.candidate_smac_index
;
1834 qp
->alt
.smac_port
= qp
->alt
.candidate_smac_port
;
1836 qp
->alt
.candidate_smac
= 0;
1837 qp
->alt
.candidate_smac_index
= 0;
1838 qp
->alt
.candidate_smac_port
= 0;
1841 if (qp
->pri
.update_vid
) {
1843 if (qp
->pri
.candidate_vid
< 0x1000)
1844 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.candidate_vlan_port
,
1845 qp
->pri
.candidate_vid
);
1847 if (qp
->pri
.vid
< 0x1000)
1848 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
,
1850 qp
->pri
.vid
= qp
->pri
.candidate_vid
;
1851 qp
->pri
.vlan_port
= qp
->pri
.candidate_vlan_port
;
1852 qp
->pri
.vlan_index
= qp
->pri
.candidate_vlan_index
;
1854 qp
->pri
.candidate_vid
= 0xFFFF;
1855 qp
->pri
.update_vid
= 0;
1858 if (qp
->alt
.update_vid
) {
1860 if (qp
->alt
.candidate_vid
< 0x1000)
1861 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.candidate_vlan_port
,
1862 qp
->alt
.candidate_vid
);
1864 if (qp
->alt
.vid
< 0x1000)
1865 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
,
1867 qp
->alt
.vid
= qp
->alt
.candidate_vid
;
1868 qp
->alt
.vlan_port
= qp
->alt
.candidate_vlan_port
;
1869 qp
->alt
.vlan_index
= qp
->alt
.candidate_vlan_index
;
1871 qp
->alt
.candidate_vid
= 0xFFFF;
1872 qp
->alt
.update_vid
= 0;
1878 int mlx4_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1879 int attr_mask
, struct ib_udata
*udata
)
1881 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
1882 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
1883 enum ib_qp_state cur_state
, new_state
;
1886 mutex_lock(&qp
->mutex
);
1888 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
1889 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1891 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
1892 ll
= IB_LINK_LAYER_UNSPECIFIED
;
1894 int port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
1895 ll
= rdma_port_get_link_layer(&dev
->ib_dev
, port
);
1898 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
1900 pr_debug("qpn 0x%x: invalid attribute mask specified "
1901 "for transition %d to %d. qp_type %d,"
1902 " attr_mask 0x%x\n",
1903 ibqp
->qp_num
, cur_state
, new_state
,
1904 ibqp
->qp_type
, attr_mask
);
1908 if (mlx4_is_bonded(dev
->dev
) && (attr_mask
& IB_QP_PORT
)) {
1909 if ((cur_state
== IB_QPS_RESET
) && (new_state
== IB_QPS_INIT
)) {
1910 if ((ibqp
->qp_type
== IB_QPT_RC
) ||
1911 (ibqp
->qp_type
== IB_QPT_UD
) ||
1912 (ibqp
->qp_type
== IB_QPT_UC
) ||
1913 (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) ||
1914 (ibqp
->qp_type
== IB_QPT_XRC_INI
)) {
1915 attr
->port_num
= mlx4_ib_bond_next_port(dev
);
1918 /* no sense in changing port_num
1919 * when ports are bonded */
1920 attr_mask
&= ~IB_QP_PORT
;
1924 if ((attr_mask
& IB_QP_PORT
) &&
1925 (attr
->port_num
== 0 || attr
->port_num
> dev
->num_ports
)) {
1926 pr_debug("qpn 0x%x: invalid port number (%d) specified "
1927 "for transition %d to %d. qp_type %d\n",
1928 ibqp
->qp_num
, attr
->port_num
, cur_state
,
1929 new_state
, ibqp
->qp_type
);
1933 if ((attr_mask
& IB_QP_PORT
) && (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) &&
1934 (rdma_port_get_link_layer(&dev
->ib_dev
, attr
->port_num
) !=
1935 IB_LINK_LAYER_ETHERNET
))
1938 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1939 int p
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
1940 if (attr
->pkey_index
>= dev
->dev
->caps
.pkey_table_len
[p
]) {
1941 pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
1942 "for transition %d to %d. qp_type %d\n",
1943 ibqp
->qp_num
, attr
->pkey_index
, cur_state
,
1944 new_state
, ibqp
->qp_type
);
1949 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
1950 attr
->max_rd_atomic
> dev
->dev
->caps
.max_qp_init_rdma
) {
1951 pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
1952 "Transition %d to %d. qp_type %d\n",
1953 ibqp
->qp_num
, attr
->max_rd_atomic
, cur_state
,
1954 new_state
, ibqp
->qp_type
);
1958 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
1959 attr
->max_dest_rd_atomic
> dev
->dev
->caps
.max_qp_dest_rdma
) {
1960 pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
1961 "Transition %d to %d. qp_type %d\n",
1962 ibqp
->qp_num
, attr
->max_dest_rd_atomic
, cur_state
,
1963 new_state
, ibqp
->qp_type
);
1967 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
1972 err
= __mlx4_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
);
1974 if (mlx4_is_bonded(dev
->dev
) && (attr_mask
& IB_QP_PORT
))
1978 mutex_unlock(&qp
->mutex
);
1982 static int vf_get_qp0_qkey(struct mlx4_dev
*dev
, int qpn
, u32
*qkey
)
1985 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
1986 if (qpn
== dev
->caps
.qp0_proxy
[i
] ||
1987 qpn
== dev
->caps
.qp0_tunnel
[i
]) {
1988 *qkey
= dev
->caps
.qp0_qkey
[i
];
1995 static int build_sriov_qp0_header(struct mlx4_ib_sqp
*sqp
,
1996 struct ib_send_wr
*wr
,
1997 void *wqe
, unsigned *mlx_seg_len
)
1999 struct mlx4_ib_dev
*mdev
= to_mdev(sqp
->qp
.ibqp
.device
);
2000 struct ib_device
*ib_dev
= &mdev
->ib_dev
;
2001 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
2002 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
2003 struct mlx4_ib_ah
*ah
= to_mah(wr
->wr
.ud
.ah
);
2011 if (wr
->opcode
!= IB_WR_SEND
)
2016 for (i
= 0; i
< wr
->num_sge
; ++i
)
2017 send_size
+= wr
->sg_list
[i
].length
;
2019 /* for proxy-qp0 sends, need to add in size of tunnel header */
2020 /* for tunnel-qp0 sends, tunnel header is already in s/g list */
2021 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_SMI_OWNER
)
2022 send_size
+= sizeof (struct mlx4_ib_tunnel_header
);
2024 ib_ud_header_init(send_size
, 1, 0, 0, 0, 0, &sqp
->ud_header
);
2026 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_SMI_OWNER
) {
2027 sqp
->ud_header
.lrh
.service_level
=
2028 be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 28;
2029 sqp
->ud_header
.lrh
.destination_lid
=
2030 cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
2031 sqp
->ud_header
.lrh
.source_lid
=
2032 cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
2035 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
2037 /* force loopback */
2038 mlx
->flags
|= cpu_to_be32(MLX4_WQE_MLX_VL15
| 0x1 | MLX4_WQE_MLX_SLR
);
2039 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
2041 sqp
->ud_header
.lrh
.virtual_lane
= 0;
2042 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->send_flags
& IB_SEND_SOLICITED
);
2043 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, 0, &pkey
);
2044 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
2045 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_SMI_OWNER
)
2046 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
2048 sqp
->ud_header
.bth
.destination_qpn
=
2049 cpu_to_be32(mdev
->dev
->caps
.qp0_tunnel
[sqp
->qp
.port
- 1]);
2051 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
2052 if (mlx4_is_master(mdev
->dev
)) {
2053 if (mlx4_get_parav_qkey(mdev
->dev
, sqp
->qp
.mqp
.qpn
, &qkey
))
2056 if (vf_get_qp0_qkey(mdev
->dev
, sqp
->qp
.mqp
.qpn
, &qkey
))
2059 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(qkey
);
2060 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.mqp
.qpn
);
2062 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
2063 sqp
->ud_header
.immediate_present
= 0;
2065 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
2068 * Inline data segments may not cross a 64 byte boundary. If
2069 * our UD header is bigger than the space available up to the
2070 * next 64 byte boundary in the WQE, use two inline data
2071 * segments to hold the UD header.
2073 spc
= MLX4_INLINE_ALIGN
-
2074 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
2075 if (header_size
<= spc
) {
2076 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
2077 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
2080 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
2081 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
2083 inl
= (void *) (inl
+ 1) + spc
;
2084 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
2086 * Need a barrier here to make sure all the data is
2087 * visible before the byte_count field is set.
2088 * Otherwise the HCA prefetcher could grab the 64-byte
2089 * chunk with this inline segment and get a valid (!=
2090 * 0xffffffff) byte count but stale data, and end up
2091 * generating a packet with bad headers.
2093 * The first inline segment's byte_count field doesn't
2094 * need a barrier, because it comes after a
2095 * control/MLX segment and therefore is at an offset
2099 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
2104 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
2108 static void mlx4_u64_to_smac(u8
*dst_mac
, u64 src_mac
)
2112 for (i
= ETH_ALEN
; i
; i
--) {
2113 dst_mac
[i
- 1] = src_mac
& 0xff;
2118 static int build_mlx_header(struct mlx4_ib_sqp
*sqp
, struct ib_send_wr
*wr
,
2119 void *wqe
, unsigned *mlx_seg_len
)
2121 struct ib_device
*ib_dev
= sqp
->qp
.ibqp
.device
;
2122 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
2123 struct mlx4_wqe_ctrl_seg
*ctrl
= wqe
;
2124 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
2125 struct mlx4_ib_ah
*ah
= to_mah(wr
->wr
.ud
.ah
);
2135 bool is_vlan
= false;
2139 for (i
= 0; i
< wr
->num_sge
; ++i
)
2140 send_size
+= wr
->sg_list
[i
].length
;
2142 is_eth
= rdma_port_get_link_layer(sqp
->qp
.ibqp
.device
, sqp
->qp
.port
) == IB_LINK_LAYER_ETHERNET
;
2143 is_grh
= mlx4_ib_ah_grh_present(ah
);
2145 if (mlx4_is_mfunc(to_mdev(ib_dev
)->dev
)) {
2146 /* When multi-function is enabled, the ib_core gid
2147 * indexes don't necessarily match the hw ones, so
2148 * we must use our own cache */
2149 err
= mlx4_get_roce_gid_from_slave(to_mdev(ib_dev
)->dev
,
2150 be32_to_cpu(ah
->av
.ib
.port_pd
) >> 24,
2151 ah
->av
.ib
.gid_index
, &sgid
.raw
[0]);
2155 err
= ib_get_cached_gid(ib_dev
,
2156 be32_to_cpu(ah
->av
.ib
.port_pd
) >> 24,
2157 ah
->av
.ib
.gid_index
, &sgid
);
2162 if (ah
->av
.eth
.vlan
!= cpu_to_be16(0xffff)) {
2163 vlan
= be16_to_cpu(ah
->av
.eth
.vlan
) & 0x0fff;
2167 ib_ud_header_init(send_size
, !is_eth
, is_eth
, is_vlan
, is_grh
, 0, &sqp
->ud_header
);
2170 sqp
->ud_header
.lrh
.service_level
=
2171 be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 28;
2172 sqp
->ud_header
.lrh
.destination_lid
= ah
->av
.ib
.dlid
;
2173 sqp
->ud_header
.lrh
.source_lid
= cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
2177 sqp
->ud_header
.grh
.traffic_class
=
2178 (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 20) & 0xff;
2179 sqp
->ud_header
.grh
.flow_label
=
2180 ah
->av
.ib
.sl_tclass_flowlabel
& cpu_to_be32(0xfffff);
2181 sqp
->ud_header
.grh
.hop_limit
= ah
->av
.ib
.hop_limit
;
2183 memcpy(sqp
->ud_header
.grh
.source_gid
.raw
, sgid
.raw
, 16);
2185 if (mlx4_is_mfunc(to_mdev(ib_dev
)->dev
)) {
2186 /* When multi-function is enabled, the ib_core gid
2187 * indexes don't necessarily match the hw ones, so
2188 * we must use our own cache */
2189 sqp
->ud_header
.grh
.source_gid
.global
.subnet_prefix
=
2190 to_mdev(ib_dev
)->sriov
.demux
[sqp
->qp
.port
- 1].
2192 sqp
->ud_header
.grh
.source_gid
.global
.interface_id
=
2193 to_mdev(ib_dev
)->sriov
.demux
[sqp
->qp
.port
- 1].
2194 guid_cache
[ah
->av
.ib
.gid_index
];
2196 ib_get_cached_gid(ib_dev
,
2197 be32_to_cpu(ah
->av
.ib
.port_pd
) >> 24,
2198 ah
->av
.ib
.gid_index
,
2199 &sqp
->ud_header
.grh
.source_gid
);
2201 memcpy(sqp
->ud_header
.grh
.destination_gid
.raw
,
2202 ah
->av
.ib
.dgid
, 16);
2205 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
2208 mlx
->flags
|= cpu_to_be32((!sqp
->qp
.ibqp
.qp_num
? MLX4_WQE_MLX_VL15
: 0) |
2209 (sqp
->ud_header
.lrh
.destination_lid
==
2210 IB_LID_PERMISSIVE
? MLX4_WQE_MLX_SLR
: 0) |
2211 (sqp
->ud_header
.lrh
.service_level
<< 8));
2212 if (ah
->av
.ib
.port_pd
& cpu_to_be32(0x80000000))
2213 mlx
->flags
|= cpu_to_be32(0x1); /* force loopback */
2214 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
2217 switch (wr
->opcode
) {
2219 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
2220 sqp
->ud_header
.immediate_present
= 0;
2222 case IB_WR_SEND_WITH_IMM
:
2223 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
2224 sqp
->ud_header
.immediate_present
= 1;
2225 sqp
->ud_header
.immediate_data
= wr
->ex
.imm_data
;
2232 struct in6_addr in6
;
2234 u16 pcp
= (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 29) << 13;
2236 mlx
->sched_prio
= cpu_to_be16(pcp
);
2238 memcpy(sqp
->ud_header
.eth
.dmac_h
, ah
->av
.eth
.mac
, 6);
2239 /* FIXME: cache smac value? */
2240 memcpy(&ctrl
->srcrb_flags16
[0], ah
->av
.eth
.mac
, 2);
2241 memcpy(&ctrl
->imm
, ah
->av
.eth
.mac
+ 2, 4);
2242 memcpy(&in6
, sgid
.raw
, sizeof(in6
));
2244 if (!mlx4_is_mfunc(to_mdev(ib_dev
)->dev
)) {
2245 u64 mac
= atomic64_read(&to_mdev(ib_dev
)->iboe
.mac
[sqp
->qp
.port
- 1]);
2248 mlx4_u64_to_smac(smac
, mac
);
2249 memcpy(sqp
->ud_header
.eth
.smac_h
, smac
, ETH_ALEN
);
2251 /* use the src mac of the tunnel */
2252 memcpy(sqp
->ud_header
.eth
.smac_h
, ah
->av
.eth
.s_mac
, ETH_ALEN
);
2255 if (!memcmp(sqp
->ud_header
.eth
.smac_h
, sqp
->ud_header
.eth
.dmac_h
, 6))
2256 mlx
->flags
|= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK
);
2258 sqp
->ud_header
.eth
.type
= cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE
);
2260 sqp
->ud_header
.vlan
.type
= cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE
);
2261 sqp
->ud_header
.vlan
.tag
= cpu_to_be16(vlan
| pcp
);
2264 sqp
->ud_header
.lrh
.virtual_lane
= !sqp
->qp
.ibqp
.qp_num
? 15 : 0;
2265 if (sqp
->ud_header
.lrh
.destination_lid
== IB_LID_PERMISSIVE
)
2266 sqp
->ud_header
.lrh
.source_lid
= IB_LID_PERMISSIVE
;
2268 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->send_flags
& IB_SEND_SOLICITED
);
2269 if (!sqp
->qp
.ibqp
.qp_num
)
2270 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, sqp
->pkey_index
, &pkey
);
2272 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, wr
->wr
.ud
.pkey_index
, &pkey
);
2273 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
2274 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
2275 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
2276 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
& 0x80000000 ?
2277 sqp
->qkey
: wr
->wr
.ud
.remote_qkey
);
2278 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.ibqp
.qp_num
);
2280 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
2283 pr_err("built UD header of size %d:\n", header_size
);
2284 for (i
= 0; i
< header_size
/ 4; ++i
) {
2286 pr_err(" [%02x] ", i
* 4);
2288 be32_to_cpu(((__be32
*) sqp
->header_buf
)[i
]));
2289 if ((i
+ 1) % 8 == 0)
2296 * Inline data segments may not cross a 64 byte boundary. If
2297 * our UD header is bigger than the space available up to the
2298 * next 64 byte boundary in the WQE, use two inline data
2299 * segments to hold the UD header.
2301 spc
= MLX4_INLINE_ALIGN
-
2302 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
2303 if (header_size
<= spc
) {
2304 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
2305 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
2308 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
2309 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
2311 inl
= (void *) (inl
+ 1) + spc
;
2312 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
2314 * Need a barrier here to make sure all the data is
2315 * visible before the byte_count field is set.
2316 * Otherwise the HCA prefetcher could grab the 64-byte
2317 * chunk with this inline segment and get a valid (!=
2318 * 0xffffffff) byte count but stale data, and end up
2319 * generating a packet with bad headers.
2321 * The first inline segment's byte_count field doesn't
2322 * need a barrier, because it comes after a
2323 * control/MLX segment and therefore is at an offset
2327 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
2332 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
2336 static int mlx4_wq_overflow(struct mlx4_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
2339 struct mlx4_ib_cq
*cq
;
2341 cur
= wq
->head
- wq
->tail
;
2342 if (likely(cur
+ nreq
< wq
->max_post
))
2346 spin_lock(&cq
->lock
);
2347 cur
= wq
->head
- wq
->tail
;
2348 spin_unlock(&cq
->lock
);
2350 return cur
+ nreq
>= wq
->max_post
;
2353 static __be32
convert_access(int acc
)
2355 return (acc
& IB_ACCESS_REMOTE_ATOMIC
?
2356 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC
) : 0) |
2357 (acc
& IB_ACCESS_REMOTE_WRITE
?
2358 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE
) : 0) |
2359 (acc
& IB_ACCESS_REMOTE_READ
?
2360 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ
) : 0) |
2361 (acc
& IB_ACCESS_LOCAL_WRITE
? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE
) : 0) |
2362 cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ
);
2365 static void set_fmr_seg(struct mlx4_wqe_fmr_seg
*fseg
, struct ib_send_wr
*wr
)
2367 struct mlx4_ib_fast_reg_page_list
*mfrpl
= to_mfrpl(wr
->wr
.fast_reg
.page_list
);
2370 for (i
= 0; i
< wr
->wr
.fast_reg
.page_list_len
; ++i
)
2371 mfrpl
->mapped_page_list
[i
] =
2372 cpu_to_be64(wr
->wr
.fast_reg
.page_list
->page_list
[i
] |
2373 MLX4_MTT_FLAG_PRESENT
);
2375 fseg
->flags
= convert_access(wr
->wr
.fast_reg
.access_flags
);
2376 fseg
->mem_key
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
2377 fseg
->buf_list
= cpu_to_be64(mfrpl
->map
);
2378 fseg
->start_addr
= cpu_to_be64(wr
->wr
.fast_reg
.iova_start
);
2379 fseg
->reg_len
= cpu_to_be64(wr
->wr
.fast_reg
.length
);
2380 fseg
->offset
= 0; /* XXX -- is this just for ZBVA? */
2381 fseg
->page_size
= cpu_to_be32(wr
->wr
.fast_reg
.page_shift
);
2382 fseg
->reserved
[0] = 0;
2383 fseg
->reserved
[1] = 0;
2386 static void set_bind_seg(struct mlx4_wqe_bind_seg
*bseg
, struct ib_send_wr
*wr
)
2389 convert_access(wr
->wr
.bind_mw
.bind_info
.mw_access_flags
) &
2390 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ
|
2391 MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE
|
2392 MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC
);
2394 if (wr
->wr
.bind_mw
.mw
->type
== IB_MW_TYPE_2
)
2395 bseg
->flags2
|= cpu_to_be32(MLX4_WQE_BIND_TYPE_2
);
2396 if (wr
->wr
.bind_mw
.bind_info
.mw_access_flags
& IB_ZERO_BASED
)
2397 bseg
->flags2
|= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED
);
2398 bseg
->new_rkey
= cpu_to_be32(wr
->wr
.bind_mw
.rkey
);
2399 bseg
->lkey
= cpu_to_be32(wr
->wr
.bind_mw
.bind_info
.mr
->lkey
);
2400 bseg
->addr
= cpu_to_be64(wr
->wr
.bind_mw
.bind_info
.addr
);
2401 bseg
->length
= cpu_to_be64(wr
->wr
.bind_mw
.bind_info
.length
);
2404 static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg
*iseg
, u32 rkey
)
2406 memset(iseg
, 0, sizeof(*iseg
));
2407 iseg
->mem_key
= cpu_to_be32(rkey
);
2410 static __always_inline
void set_raddr_seg(struct mlx4_wqe_raddr_seg
*rseg
,
2411 u64 remote_addr
, u32 rkey
)
2413 rseg
->raddr
= cpu_to_be64(remote_addr
);
2414 rseg
->rkey
= cpu_to_be32(rkey
);
2418 static void set_atomic_seg(struct mlx4_wqe_atomic_seg
*aseg
, struct ib_send_wr
*wr
)
2420 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
2421 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.swap
);
2422 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
2423 } else if (wr
->opcode
== IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
) {
2424 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
2425 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add_mask
);
2427 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
2433 static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg
*aseg
,
2434 struct ib_send_wr
*wr
)
2436 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.swap
);
2437 aseg
->swap_add_mask
= cpu_to_be64(wr
->wr
.atomic
.swap_mask
);
2438 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
2439 aseg
->compare_mask
= cpu_to_be64(wr
->wr
.atomic
.compare_add_mask
);
2442 static void set_datagram_seg(struct mlx4_wqe_datagram_seg
*dseg
,
2443 struct ib_send_wr
*wr
)
2445 memcpy(dseg
->av
, &to_mah(wr
->wr
.ud
.ah
)->av
, sizeof (struct mlx4_av
));
2446 dseg
->dqpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
2447 dseg
->qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
2448 dseg
->vlan
= to_mah(wr
->wr
.ud
.ah
)->av
.eth
.vlan
;
2449 memcpy(dseg
->mac
, to_mah(wr
->wr
.ud
.ah
)->av
.eth
.mac
, 6);
2452 static void set_tunnel_datagram_seg(struct mlx4_ib_dev
*dev
,
2453 struct mlx4_wqe_datagram_seg
*dseg
,
2454 struct ib_send_wr
*wr
,
2455 enum mlx4_ib_qp_type qpt
)
2457 union mlx4_ext_av
*av
= &to_mah(wr
->wr
.ud
.ah
)->av
;
2458 struct mlx4_av sqp_av
= {0};
2459 int port
= *((u8
*) &av
->ib
.port_pd
) & 0x3;
2461 /* force loopback */
2462 sqp_av
.port_pd
= av
->ib
.port_pd
| cpu_to_be32(0x80000000);
2463 sqp_av
.g_slid
= av
->ib
.g_slid
& 0x7f; /* no GRH */
2464 sqp_av
.sl_tclass_flowlabel
= av
->ib
.sl_tclass_flowlabel
&
2465 cpu_to_be32(0xf0000000);
2467 memcpy(dseg
->av
, &sqp_av
, sizeof (struct mlx4_av
));
2468 if (qpt
== MLX4_IB_QPT_PROXY_GSI
)
2469 dseg
->dqpn
= cpu_to_be32(dev
->dev
->caps
.qp1_tunnel
[port
- 1]);
2471 dseg
->dqpn
= cpu_to_be32(dev
->dev
->caps
.qp0_tunnel
[port
- 1]);
2472 /* Use QKEY from the QP context, which is set by master */
2473 dseg
->qkey
= cpu_to_be32(IB_QP_SET_QKEY
);
2476 static void build_tunnel_header(struct ib_send_wr
*wr
, void *wqe
, unsigned *mlx_seg_len
)
2478 struct mlx4_wqe_inline_seg
*inl
= wqe
;
2479 struct mlx4_ib_tunnel_header hdr
;
2480 struct mlx4_ib_ah
*ah
= to_mah(wr
->wr
.ud
.ah
);
2484 memcpy(&hdr
.av
, &ah
->av
, sizeof hdr
.av
);
2485 hdr
.remote_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
2486 hdr
.pkey_index
= cpu_to_be16(wr
->wr
.ud
.pkey_index
);
2487 hdr
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
2488 memcpy(hdr
.mac
, ah
->av
.eth
.mac
, 6);
2489 hdr
.vlan
= ah
->av
.eth
.vlan
;
2491 spc
= MLX4_INLINE_ALIGN
-
2492 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
2493 if (sizeof (hdr
) <= spc
) {
2494 memcpy(inl
+ 1, &hdr
, sizeof (hdr
));
2496 inl
->byte_count
= cpu_to_be32(1 << 31 | sizeof (hdr
));
2499 memcpy(inl
+ 1, &hdr
, spc
);
2501 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
2503 inl
= (void *) (inl
+ 1) + spc
;
2504 memcpy(inl
+ 1, (void *) &hdr
+ spc
, sizeof (hdr
) - spc
);
2506 inl
->byte_count
= cpu_to_be32(1 << 31 | (sizeof (hdr
) - spc
));
2511 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + sizeof (hdr
), 16);
2514 static void set_mlx_icrc_seg(void *dseg
)
2517 struct mlx4_wqe_inline_seg
*iseg
= dseg
;
2522 * Need a barrier here before writing the byte_count field to
2523 * make sure that all the data is visible before the
2524 * byte_count field is set. Otherwise, if the segment begins
2525 * a new cacheline, the HCA prefetcher could grab the 64-byte
2526 * chunk and get a valid (!= * 0xffffffff) byte count but
2527 * stale data, and end up sending the wrong data.
2531 iseg
->byte_count
= cpu_to_be32((1 << 31) | 4);
2534 static void set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
2536 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
2537 dseg
->addr
= cpu_to_be64(sg
->addr
);
2540 * Need a barrier here before writing the byte_count field to
2541 * make sure that all the data is visible before the
2542 * byte_count field is set. Otherwise, if the segment begins
2543 * a new cacheline, the HCA prefetcher could grab the 64-byte
2544 * chunk and get a valid (!= * 0xffffffff) byte count but
2545 * stale data, and end up sending the wrong data.
2549 dseg
->byte_count
= cpu_to_be32(sg
->length
);
2552 static void __set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
2554 dseg
->byte_count
= cpu_to_be32(sg
->length
);
2555 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
2556 dseg
->addr
= cpu_to_be64(sg
->addr
);
2559 static int build_lso_seg(struct mlx4_wqe_lso_seg
*wqe
, struct ib_send_wr
*wr
,
2560 struct mlx4_ib_qp
*qp
, unsigned *lso_seg_len
,
2561 __be32
*lso_hdr_sz
, __be32
*blh
)
2563 unsigned halign
= ALIGN(sizeof *wqe
+ wr
->wr
.ud
.hlen
, 16);
2565 if (unlikely(halign
> MLX4_IB_CACHE_LINE_SIZE
))
2566 *blh
= cpu_to_be32(1 << 6);
2568 if (unlikely(!(qp
->flags
& MLX4_IB_QP_LSO
) &&
2569 wr
->num_sge
> qp
->sq
.max_gs
- (halign
>> 4)))
2572 memcpy(wqe
->header
, wr
->wr
.ud
.header
, wr
->wr
.ud
.hlen
);
2574 *lso_hdr_sz
= cpu_to_be32((wr
->wr
.ud
.mss
- wr
->wr
.ud
.hlen
) << 16 |
2576 *lso_seg_len
= halign
;
2580 static __be32
send_ieth(struct ib_send_wr
*wr
)
2582 switch (wr
->opcode
) {
2583 case IB_WR_SEND_WITH_IMM
:
2584 case IB_WR_RDMA_WRITE_WITH_IMM
:
2585 return wr
->ex
.imm_data
;
2587 case IB_WR_SEND_WITH_INV
:
2588 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
2595 static void add_zero_len_inline(void *wqe
)
2597 struct mlx4_wqe_inline_seg
*inl
= wqe
;
2599 inl
->byte_count
= cpu_to_be32(1 << 31);
2602 int mlx4_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
2603 struct ib_send_wr
**bad_wr
)
2605 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
2607 struct mlx4_wqe_ctrl_seg
*ctrl
;
2608 struct mlx4_wqe_data_seg
*dseg
;
2609 unsigned long flags
;
2613 int uninitialized_var(stamp
);
2614 int uninitialized_var(size
);
2615 unsigned uninitialized_var(seglen
);
2618 __be32
uninitialized_var(lso_hdr_sz
);
2622 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
2624 ind
= qp
->sq_next_wqe
;
2626 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
2630 if (mlx4_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
2636 if (unlikely(wr
->num_sge
> qp
->sq
.max_gs
)) {
2642 ctrl
= wqe
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
2643 qp
->sq
.wrid
[(qp
->sq
.head
+ nreq
) & (qp
->sq
.wqe_cnt
- 1)] = wr
->wr_id
;
2646 (wr
->send_flags
& IB_SEND_SIGNALED
?
2647 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) : 0) |
2648 (wr
->send_flags
& IB_SEND_SOLICITED
?
2649 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED
) : 0) |
2650 ((wr
->send_flags
& IB_SEND_IP_CSUM
) ?
2651 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM
|
2652 MLX4_WQE_CTRL_TCP_UDP_CSUM
) : 0) |
2655 ctrl
->imm
= send_ieth(wr
);
2657 wqe
+= sizeof *ctrl
;
2658 size
= sizeof *ctrl
/ 16;
2660 switch (qp
->mlx4_ib_qp_type
) {
2661 case MLX4_IB_QPT_RC
:
2662 case MLX4_IB_QPT_UC
:
2663 switch (wr
->opcode
) {
2664 case IB_WR_ATOMIC_CMP_AND_SWP
:
2665 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2666 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
:
2667 set_raddr_seg(wqe
, wr
->wr
.atomic
.remote_addr
,
2668 wr
->wr
.atomic
.rkey
);
2669 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
2671 set_atomic_seg(wqe
, wr
);
2672 wqe
+= sizeof (struct mlx4_wqe_atomic_seg
);
2674 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
2675 sizeof (struct mlx4_wqe_atomic_seg
)) / 16;
2679 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
2680 set_raddr_seg(wqe
, wr
->wr
.atomic
.remote_addr
,
2681 wr
->wr
.atomic
.rkey
);
2682 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
2684 set_masked_atomic_seg(wqe
, wr
);
2685 wqe
+= sizeof (struct mlx4_wqe_masked_atomic_seg
);
2687 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
2688 sizeof (struct mlx4_wqe_masked_atomic_seg
)) / 16;
2692 case IB_WR_RDMA_READ
:
2693 case IB_WR_RDMA_WRITE
:
2694 case IB_WR_RDMA_WRITE_WITH_IMM
:
2695 set_raddr_seg(wqe
, wr
->wr
.rdma
.remote_addr
,
2697 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
2698 size
+= sizeof (struct mlx4_wqe_raddr_seg
) / 16;
2701 case IB_WR_LOCAL_INV
:
2702 ctrl
->srcrb_flags
|=
2703 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
2704 set_local_inv_seg(wqe
, wr
->ex
.invalidate_rkey
);
2705 wqe
+= sizeof (struct mlx4_wqe_local_inval_seg
);
2706 size
+= sizeof (struct mlx4_wqe_local_inval_seg
) / 16;
2709 case IB_WR_FAST_REG_MR
:
2710 ctrl
->srcrb_flags
|=
2711 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
2712 set_fmr_seg(wqe
, wr
);
2713 wqe
+= sizeof (struct mlx4_wqe_fmr_seg
);
2714 size
+= sizeof (struct mlx4_wqe_fmr_seg
) / 16;
2718 ctrl
->srcrb_flags
|=
2719 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
2720 set_bind_seg(wqe
, wr
);
2721 wqe
+= sizeof(struct mlx4_wqe_bind_seg
);
2722 size
+= sizeof(struct mlx4_wqe_bind_seg
) / 16;
2725 /* No extra segments required for sends */
2730 case MLX4_IB_QPT_TUN_SMI_OWNER
:
2731 err
= build_sriov_qp0_header(to_msqp(qp
), wr
, ctrl
, &seglen
);
2732 if (unlikely(err
)) {
2737 size
+= seglen
/ 16;
2739 case MLX4_IB_QPT_TUN_SMI
:
2740 case MLX4_IB_QPT_TUN_GSI
:
2741 /* this is a UD qp used in MAD responses to slaves. */
2742 set_datagram_seg(wqe
, wr
);
2743 /* set the forced-loopback bit in the data seg av */
2744 *(__be32
*) wqe
|= cpu_to_be32(0x80000000);
2745 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
2746 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
2748 case MLX4_IB_QPT_UD
:
2749 set_datagram_seg(wqe
, wr
);
2750 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
2751 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
2753 if (wr
->opcode
== IB_WR_LSO
) {
2754 err
= build_lso_seg(wqe
, wr
, qp
, &seglen
, &lso_hdr_sz
, &blh
);
2755 if (unlikely(err
)) {
2759 lso_wqe
= (__be32
*) wqe
;
2761 size
+= seglen
/ 16;
2765 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
2766 err
= build_sriov_qp0_header(to_msqp(qp
), wr
, ctrl
, &seglen
);
2767 if (unlikely(err
)) {
2772 size
+= seglen
/ 16;
2773 /* to start tunnel header on a cache-line boundary */
2774 add_zero_len_inline(wqe
);
2777 build_tunnel_header(wr
, wqe
, &seglen
);
2779 size
+= seglen
/ 16;
2781 case MLX4_IB_QPT_PROXY_SMI
:
2782 case MLX4_IB_QPT_PROXY_GSI
:
2783 /* If we are tunneling special qps, this is a UD qp.
2784 * In this case we first add a UD segment targeting
2785 * the tunnel qp, and then add a header with address
2787 set_tunnel_datagram_seg(to_mdev(ibqp
->device
), wqe
, wr
,
2788 qp
->mlx4_ib_qp_type
);
2789 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
2790 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
2791 build_tunnel_header(wr
, wqe
, &seglen
);
2793 size
+= seglen
/ 16;
2796 case MLX4_IB_QPT_SMI
:
2797 case MLX4_IB_QPT_GSI
:
2798 err
= build_mlx_header(to_msqp(qp
), wr
, ctrl
, &seglen
);
2799 if (unlikely(err
)) {
2804 size
+= seglen
/ 16;
2812 * Write data segments in reverse order, so as to
2813 * overwrite cacheline stamp last within each
2814 * cacheline. This avoids issues with WQE
2819 dseg
+= wr
->num_sge
- 1;
2820 size
+= wr
->num_sge
* (sizeof (struct mlx4_wqe_data_seg
) / 16);
2822 /* Add one more inline data segment for ICRC for MLX sends */
2823 if (unlikely(qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_SMI
||
2824 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
||
2825 qp
->mlx4_ib_qp_type
&
2826 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
))) {
2827 set_mlx_icrc_seg(dseg
+ 1);
2828 size
+= sizeof (struct mlx4_wqe_data_seg
) / 16;
2831 for (i
= wr
->num_sge
- 1; i
>= 0; --i
, --dseg
)
2832 set_data_seg(dseg
, wr
->sg_list
+ i
);
2835 * Possibly overwrite stamping in cacheline with LSO
2836 * segment only after making sure all data segments
2840 *lso_wqe
= lso_hdr_sz
;
2842 ctrl
->fence_size
= (wr
->send_flags
& IB_SEND_FENCE
?
2843 MLX4_WQE_CTRL_FENCE
: 0) | size
;
2846 * Make sure descriptor is fully written before
2847 * setting ownership bit (because HW can start
2848 * executing as soon as we do).
2852 if (wr
->opcode
< 0 || wr
->opcode
>= ARRAY_SIZE(mlx4_ib_opcode
)) {
2858 ctrl
->owner_opcode
= mlx4_ib_opcode
[wr
->opcode
] |
2859 (ind
& qp
->sq
.wqe_cnt
? cpu_to_be32(1 << 31) : 0) | blh
;
2861 stamp
= ind
+ qp
->sq_spare_wqes
;
2862 ind
+= DIV_ROUND_UP(size
* 16, 1U << qp
->sq
.wqe_shift
);
2865 * We can improve latency by not stamping the last
2866 * send queue WQE until after ringing the doorbell, so
2867 * only stamp here if there are still more WQEs to post.
2869 * Same optimization applies to padding with NOP wqe
2870 * in case of WQE shrinking (used to prevent wrap-around
2871 * in the middle of WR).
2874 stamp_send_wqe(qp
, stamp
, size
* 16);
2875 ind
= pad_wraparound(qp
, ind
);
2881 qp
->sq
.head
+= nreq
;
2884 * Make sure that descriptors are written before
2889 writel(qp
->doorbell_qpn
,
2890 to_mdev(ibqp
->device
)->uar_map
+ MLX4_SEND_DOORBELL
);
2893 * Make sure doorbells don't leak out of SQ spinlock
2894 * and reach the HCA out of order.
2898 stamp_send_wqe(qp
, stamp
, size
* 16);
2900 ind
= pad_wraparound(qp
, ind
);
2901 qp
->sq_next_wqe
= ind
;
2904 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
2909 int mlx4_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
2910 struct ib_recv_wr
**bad_wr
)
2912 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
2913 struct mlx4_wqe_data_seg
*scat
;
2914 unsigned long flags
;
2921 max_gs
= qp
->rq
.max_gs
;
2922 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
2924 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
2926 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
2927 if (mlx4_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
2933 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
2939 scat
= get_recv_wqe(qp
, ind
);
2941 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
2942 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
)) {
2943 ib_dma_sync_single_for_device(ibqp
->device
,
2944 qp
->sqp_proxy_rcv
[ind
].map
,
2945 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
2948 cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr
));
2949 /* use dma lkey from upper layer entry */
2950 scat
->lkey
= cpu_to_be32(wr
->sg_list
->lkey
);
2951 scat
->addr
= cpu_to_be64(qp
->sqp_proxy_rcv
[ind
].map
);
2956 for (i
= 0; i
< wr
->num_sge
; ++i
)
2957 __set_data_seg(scat
+ i
, wr
->sg_list
+ i
);
2960 scat
[i
].byte_count
= 0;
2961 scat
[i
].lkey
= cpu_to_be32(MLX4_INVALID_LKEY
);
2965 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
2967 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
2972 qp
->rq
.head
+= nreq
;
2975 * Make sure that descriptors are written before
2980 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
2983 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
2988 static inline enum ib_qp_state
to_ib_qp_state(enum mlx4_qp_state mlx4_state
)
2990 switch (mlx4_state
) {
2991 case MLX4_QP_STATE_RST
: return IB_QPS_RESET
;
2992 case MLX4_QP_STATE_INIT
: return IB_QPS_INIT
;
2993 case MLX4_QP_STATE_RTR
: return IB_QPS_RTR
;
2994 case MLX4_QP_STATE_RTS
: return IB_QPS_RTS
;
2995 case MLX4_QP_STATE_SQ_DRAINING
:
2996 case MLX4_QP_STATE_SQD
: return IB_QPS_SQD
;
2997 case MLX4_QP_STATE_SQER
: return IB_QPS_SQE
;
2998 case MLX4_QP_STATE_ERR
: return IB_QPS_ERR
;
3003 static inline enum ib_mig_state
to_ib_mig_state(int mlx4_mig_state
)
3005 switch (mlx4_mig_state
) {
3006 case MLX4_QP_PM_ARMED
: return IB_MIG_ARMED
;
3007 case MLX4_QP_PM_REARM
: return IB_MIG_REARM
;
3008 case MLX4_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
3013 static int to_ib_qp_access_flags(int mlx4_flags
)
3017 if (mlx4_flags
& MLX4_QP_BIT_RRE
)
3018 ib_flags
|= IB_ACCESS_REMOTE_READ
;
3019 if (mlx4_flags
& MLX4_QP_BIT_RWE
)
3020 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
3021 if (mlx4_flags
& MLX4_QP_BIT_RAE
)
3022 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
3027 static void to_ib_ah_attr(struct mlx4_ib_dev
*ibdev
, struct ib_ah_attr
*ib_ah_attr
,
3028 struct mlx4_qp_path
*path
)
3030 struct mlx4_dev
*dev
= ibdev
->dev
;
3033 memset(ib_ah_attr
, 0, sizeof *ib_ah_attr
);
3034 ib_ah_attr
->port_num
= path
->sched_queue
& 0x40 ? 2 : 1;
3036 if (ib_ah_attr
->port_num
== 0 || ib_ah_attr
->port_num
> dev
->caps
.num_ports
)
3039 is_eth
= rdma_port_get_link_layer(&ibdev
->ib_dev
, ib_ah_attr
->port_num
) ==
3040 IB_LINK_LAYER_ETHERNET
;
3042 ib_ah_attr
->sl
= ((path
->sched_queue
>> 3) & 0x7) |
3043 ((path
->sched_queue
& 4) << 1);
3045 ib_ah_attr
->sl
= (path
->sched_queue
>> 2) & 0xf;
3047 ib_ah_attr
->dlid
= be16_to_cpu(path
->rlid
);
3048 ib_ah_attr
->src_path_bits
= path
->grh_mylmc
& 0x7f;
3049 ib_ah_attr
->static_rate
= path
->static_rate
? path
->static_rate
- 5 : 0;
3050 ib_ah_attr
->ah_flags
= (path
->grh_mylmc
& (1 << 7)) ? IB_AH_GRH
: 0;
3051 if (ib_ah_attr
->ah_flags
) {
3052 ib_ah_attr
->grh
.sgid_index
= path
->mgid_index
;
3053 ib_ah_attr
->grh
.hop_limit
= path
->hop_limit
;
3054 ib_ah_attr
->grh
.traffic_class
=
3055 (be32_to_cpu(path
->tclass_flowlabel
) >> 20) & 0xff;
3056 ib_ah_attr
->grh
.flow_label
=
3057 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff;
3058 memcpy(ib_ah_attr
->grh
.dgid
.raw
,
3059 path
->rgid
, sizeof ib_ah_attr
->grh
.dgid
.raw
);
3063 int mlx4_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
3064 struct ib_qp_init_attr
*qp_init_attr
)
3066 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
3067 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
3068 struct mlx4_qp_context context
;
3072 mutex_lock(&qp
->mutex
);
3074 if (qp
->state
== IB_QPS_RESET
) {
3075 qp_attr
->qp_state
= IB_QPS_RESET
;
3079 err
= mlx4_qp_query(dev
->dev
, &qp
->mqp
, &context
);
3085 mlx4_state
= be32_to_cpu(context
.flags
) >> 28;
3087 qp
->state
= to_ib_qp_state(mlx4_state
);
3088 qp_attr
->qp_state
= qp
->state
;
3089 qp_attr
->path_mtu
= context
.mtu_msgmax
>> 5;
3090 qp_attr
->path_mig_state
=
3091 to_ib_mig_state((be32_to_cpu(context
.flags
) >> 11) & 0x3);
3092 qp_attr
->qkey
= be32_to_cpu(context
.qkey
);
3093 qp_attr
->rq_psn
= be32_to_cpu(context
.rnr_nextrecvpsn
) & 0xffffff;
3094 qp_attr
->sq_psn
= be32_to_cpu(context
.next_send_psn
) & 0xffffff;
3095 qp_attr
->dest_qp_num
= be32_to_cpu(context
.remote_qpn
) & 0xffffff;
3096 qp_attr
->qp_access_flags
=
3097 to_ib_qp_access_flags(be32_to_cpu(context
.params2
));
3099 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
3100 to_ib_ah_attr(dev
, &qp_attr
->ah_attr
, &context
.pri_path
);
3101 to_ib_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
.alt_path
);
3102 qp_attr
->alt_pkey_index
= context
.alt_path
.pkey_index
& 0x7f;
3103 qp_attr
->alt_port_num
= qp_attr
->alt_ah_attr
.port_num
;
3106 qp_attr
->pkey_index
= context
.pri_path
.pkey_index
& 0x7f;
3107 if (qp_attr
->qp_state
== IB_QPS_INIT
)
3108 qp_attr
->port_num
= qp
->port
;
3110 qp_attr
->port_num
= context
.pri_path
.sched_queue
& 0x40 ? 2 : 1;
3112 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
3113 qp_attr
->sq_draining
= mlx4_state
== MLX4_QP_STATE_SQ_DRAINING
;
3115 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
.params1
) >> 21) & 0x7);
3117 qp_attr
->max_dest_rd_atomic
=
3118 1 << ((be32_to_cpu(context
.params2
) >> 21) & 0x7);
3119 qp_attr
->min_rnr_timer
=
3120 (be32_to_cpu(context
.rnr_nextrecvpsn
) >> 24) & 0x1f;
3121 qp_attr
->timeout
= context
.pri_path
.ackto
>> 3;
3122 qp_attr
->retry_cnt
= (be32_to_cpu(context
.params1
) >> 16) & 0x7;
3123 qp_attr
->rnr_retry
= (be32_to_cpu(context
.params1
) >> 13) & 0x7;
3124 qp_attr
->alt_timeout
= context
.alt_path
.ackto
>> 3;
3127 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
3128 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
3129 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
3131 if (!ibqp
->uobject
) {
3132 qp_attr
->cap
.max_send_wr
= qp
->sq
.wqe_cnt
;
3133 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
3135 qp_attr
->cap
.max_send_wr
= 0;
3136 qp_attr
->cap
.max_send_sge
= 0;
3140 * We don't support inline sends for kernel QPs (yet), and we
3141 * don't know what userspace's value should be.
3143 qp_attr
->cap
.max_inline_data
= 0;
3145 qp_init_attr
->cap
= qp_attr
->cap
;
3147 qp_init_attr
->create_flags
= 0;
3148 if (qp
->flags
& MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
3149 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
3151 if (qp
->flags
& MLX4_IB_QP_LSO
)
3152 qp_init_attr
->create_flags
|= IB_QP_CREATE_IPOIB_UD_LSO
;
3154 if (qp
->flags
& MLX4_IB_QP_NETIF
)
3155 qp_init_attr
->create_flags
|= IB_QP_CREATE_NETIF_QP
;
3157 qp_init_attr
->sq_sig_type
=
3158 qp
->sq_signal_bits
== cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) ?
3159 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
3162 mutex_unlock(&qp
->mutex
);