2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/platform_device.h>
35 #include <rdma/ib_umem.h>
36 #include "hns_roce_common.h"
37 #include "hns_roce_device.h"
38 #include "hns_roce_hem.h"
39 #include "hns_roce_user.h"
41 #define DB_REG_OFFSET 0x1000
44 void hns_roce_qp_event(struct hns_roce_dev
*hr_dev
, u32 qpn
, int event_type
)
46 struct hns_roce_qp_table
*qp_table
= &hr_dev
->qp_table
;
47 struct device
*dev
= &hr_dev
->pdev
->dev
;
48 struct hns_roce_qp
*qp
;
50 spin_lock(&qp_table
->lock
);
52 qp
= __hns_roce_qp_lookup(hr_dev
, qpn
);
54 atomic_inc(&qp
->refcount
);
56 spin_unlock(&qp_table
->lock
);
59 dev_warn(dev
, "Async event for bogus QP %08x\n", qpn
);
63 qp
->event(qp
, (enum hns_roce_event
)event_type
);
65 if (atomic_dec_and_test(&qp
->refcount
))
69 static void hns_roce_ib_qp_event(struct hns_roce_qp
*hr_qp
,
70 enum hns_roce_event type
)
72 struct ib_event event
;
73 struct ib_qp
*ibqp
= &hr_qp
->ibqp
;
75 if (ibqp
->event_handler
) {
76 event
.device
= ibqp
->device
;
77 event
.element
.qp
= ibqp
;
79 case HNS_ROCE_EVENT_TYPE_PATH_MIG
:
80 event
.event
= IB_EVENT_PATH_MIG
;
82 case HNS_ROCE_EVENT_TYPE_COMM_EST
:
83 event
.event
= IB_EVENT_COMM_EST
;
85 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED
:
86 event
.event
= IB_EVENT_SQ_DRAINED
;
88 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH
:
89 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
91 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR
:
92 event
.event
= IB_EVENT_QP_FATAL
;
94 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED
:
95 event
.event
= IB_EVENT_PATH_MIG_ERR
;
97 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR
:
98 event
.event
= IB_EVENT_QP_REQ_ERR
;
100 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR
:
101 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
104 dev_dbg(ibqp
->device
->dma_device
, "roce_ib: Unexpected event type %d on QP %06lx\n",
108 ibqp
->event_handler(&event
, ibqp
->qp_context
);
112 static int hns_roce_reserve_range_qp(struct hns_roce_dev
*hr_dev
, int cnt
,
113 int align
, unsigned long *base
)
115 struct hns_roce_qp_table
*qp_table
= &hr_dev
->qp_table
;
119 ret
= hns_roce_bitmap_alloc_range(&qp_table
->bitmap
, cnt
, align
, &qpn
);
128 enum hns_roce_qp_state
to_hns_roce_state(enum ib_qp_state state
)
132 return HNS_ROCE_QP_STATE_RST
;
134 return HNS_ROCE_QP_STATE_INIT
;
136 return HNS_ROCE_QP_STATE_RTR
;
138 return HNS_ROCE_QP_STATE_RTS
;
140 return HNS_ROCE_QP_STATE_SQD
;
142 return HNS_ROCE_QP_STATE_ERR
;
144 return HNS_ROCE_QP_NUM_STATE
;
148 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev
*hr_dev
, unsigned long qpn
,
149 struct hns_roce_qp
*hr_qp
)
151 struct hns_roce_qp_table
*qp_table
= &hr_dev
->qp_table
;
159 spin_lock_irq(&qp_table
->lock
);
160 ret
= radix_tree_insert(&hr_dev
->qp_table_tree
,
161 hr_qp
->qpn
& (hr_dev
->caps
.num_qps
- 1), hr_qp
);
162 spin_unlock_irq(&qp_table
->lock
);
164 dev_err(&hr_dev
->pdev
->dev
, "QPC radix_tree_insert failed\n");
168 atomic_set(&hr_qp
->refcount
, 1);
169 init_completion(&hr_qp
->free
);
178 static int hns_roce_qp_alloc(struct hns_roce_dev
*hr_dev
, unsigned long qpn
,
179 struct hns_roce_qp
*hr_qp
)
181 struct hns_roce_qp_table
*qp_table
= &hr_dev
->qp_table
;
182 struct device
*dev
= &hr_dev
->pdev
->dev
;
190 /* Alloc memory for QPC */
191 ret
= hns_roce_table_get(hr_dev
, &qp_table
->qp_table
, hr_qp
->qpn
);
193 dev_err(dev
, "QPC table get failed\n");
197 /* Alloc memory for IRRL */
198 ret
= hns_roce_table_get(hr_dev
, &qp_table
->irrl_table
, hr_qp
->qpn
);
200 dev_err(dev
, "IRRL table get failed\n");
204 spin_lock_irq(&qp_table
->lock
);
205 ret
= radix_tree_insert(&hr_dev
->qp_table_tree
,
206 hr_qp
->qpn
& (hr_dev
->caps
.num_qps
- 1), hr_qp
);
207 spin_unlock_irq(&qp_table
->lock
);
209 dev_err(dev
, "QPC radix_tree_insert failed\n");
213 atomic_set(&hr_qp
->refcount
, 1);
214 init_completion(&hr_qp
->free
);
219 hns_roce_table_put(hr_dev
, &qp_table
->irrl_table
, hr_qp
->qpn
);
222 hns_roce_table_put(hr_dev
, &qp_table
->qp_table
, hr_qp
->qpn
);
228 void hns_roce_qp_remove(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
)
230 struct hns_roce_qp_table
*qp_table
= &hr_dev
->qp_table
;
233 spin_lock_irqsave(&qp_table
->lock
, flags
);
234 radix_tree_delete(&hr_dev
->qp_table_tree
,
235 hr_qp
->qpn
& (hr_dev
->caps
.num_qps
- 1));
236 spin_unlock_irqrestore(&qp_table
->lock
, flags
);
239 void hns_roce_qp_free(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
)
241 struct hns_roce_qp_table
*qp_table
= &hr_dev
->qp_table
;
243 if (atomic_dec_and_test(&hr_qp
->refcount
))
244 complete(&hr_qp
->free
);
245 wait_for_completion(&hr_qp
->free
);
247 if ((hr_qp
->ibqp
.qp_type
) != IB_QPT_GSI
) {
248 hns_roce_table_put(hr_dev
, &qp_table
->irrl_table
, hr_qp
->qpn
);
249 hns_roce_table_put(hr_dev
, &qp_table
->qp_table
, hr_qp
->qpn
);
253 void hns_roce_release_range_qp(struct hns_roce_dev
*hr_dev
, int base_qpn
,
256 struct hns_roce_qp_table
*qp_table
= &hr_dev
->qp_table
;
258 if (base_qpn
< (hr_dev
->caps
.sqp_start
+ 2 * hr_dev
->caps
.num_ports
))
261 hns_roce_bitmap_free_range(&qp_table
->bitmap
, base_qpn
, cnt
);
264 static int hns_roce_set_rq_size(struct hns_roce_dev
*hr_dev
,
265 struct ib_qp_cap
*cap
, int is_user
, int has_srq
,
266 struct hns_roce_qp
*hr_qp
)
269 struct device
*dev
= &hr_dev
->pdev
->dev
;
271 /* Check the validity of QP support capacity */
272 if (cap
->max_recv_wr
> hr_dev
->caps
.max_wqes
||
273 cap
->max_recv_sge
> hr_dev
->caps
.max_rq_sg
) {
274 dev_err(dev
, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
275 cap
->max_recv_wr
, cap
->max_recv_sge
);
279 /* If srq exit, set zero for relative number of rq */
281 if (cap
->max_recv_wr
) {
282 dev_dbg(dev
, "srq no need config max_recv_wr\n");
286 hr_qp
->rq
.wqe_cnt
= hr_qp
->rq
.max_gs
= 0;
288 if (is_user
&& (!cap
->max_recv_wr
|| !cap
->max_recv_sge
)) {
289 dev_err(dev
, "user space no need config max_recv_wr max_recv_sge\n");
293 /* In v1 engine, parameter verification procession */
294 max_cnt
= cap
->max_recv_wr
> HNS_ROCE_MIN_WQE_NUM
?
295 cap
->max_recv_wr
: HNS_ROCE_MIN_WQE_NUM
;
296 hr_qp
->rq
.wqe_cnt
= roundup_pow_of_two(max_cnt
);
298 if ((u32
)hr_qp
->rq
.wqe_cnt
> hr_dev
->caps
.max_wqes
) {
299 dev_err(dev
, "hns_roce_set_rq_size rq.wqe_cnt too large\n");
303 max_cnt
= max(1U, cap
->max_recv_sge
);
304 hr_qp
->rq
.max_gs
= roundup_pow_of_two(max_cnt
);
305 /* WQE is fixed for 64B */
306 hr_qp
->rq
.wqe_shift
= ilog2(hr_dev
->caps
.max_rq_desc_sz
);
309 cap
->max_recv_wr
= hr_qp
->rq
.max_post
= hr_qp
->rq
.wqe_cnt
;
310 cap
->max_recv_sge
= hr_qp
->rq
.max_gs
;
315 static int hns_roce_set_user_sq_size(struct hns_roce_dev
*hr_dev
,
316 struct hns_roce_qp
*hr_qp
,
317 struct hns_roce_ib_create_qp
*ucmd
)
319 u32 roundup_sq_stride
= roundup_pow_of_two(hr_dev
->caps
.max_sq_desc_sz
);
320 u8 max_sq_stride
= ilog2(roundup_sq_stride
);
322 /* Sanity check SQ size before proceeding */
323 if ((u32
)(1 << ucmd
->log_sq_bb_count
) > hr_dev
->caps
.max_wqes
||
324 ucmd
->log_sq_stride
> max_sq_stride
||
325 ucmd
->log_sq_stride
< HNS_ROCE_IB_MIN_SQ_STRIDE
) {
326 dev_err(&hr_dev
->pdev
->dev
, "check SQ size error!\n");
330 hr_qp
->sq
.wqe_cnt
= 1 << ucmd
->log_sq_bb_count
;
331 hr_qp
->sq
.wqe_shift
= ucmd
->log_sq_stride
;
333 /* Get buf size, SQ and RQ are aligned to page_szie */
334 hr_qp
->buff_size
= HNS_ROCE_ALOGN_UP((hr_qp
->rq
.wqe_cnt
<<
335 hr_qp
->rq
.wqe_shift
), PAGE_SIZE
) +
336 HNS_ROCE_ALOGN_UP((hr_qp
->sq
.wqe_cnt
<<
337 hr_qp
->sq
.wqe_shift
), PAGE_SIZE
);
339 hr_qp
->sq
.offset
= 0;
340 hr_qp
->rq
.offset
= HNS_ROCE_ALOGN_UP((hr_qp
->sq
.wqe_cnt
<<
341 hr_qp
->sq
.wqe_shift
), PAGE_SIZE
);
346 static int hns_roce_set_kernel_sq_size(struct hns_roce_dev
*hr_dev
,
347 struct ib_qp_cap
*cap
,
348 enum ib_qp_type type
,
349 struct hns_roce_qp
*hr_qp
)
351 struct device
*dev
= &hr_dev
->pdev
->dev
;
355 if (cap
->max_send_wr
> hr_dev
->caps
.max_wqes
||
356 cap
->max_send_sge
> hr_dev
->caps
.max_sq_sg
||
357 cap
->max_inline_data
> hr_dev
->caps
.max_sq_inline
) {
358 dev_err(dev
, "hns_roce_set_kernel_sq_size error1\n");
362 hr_qp
->sq
.wqe_shift
= ilog2(hr_dev
->caps
.max_sq_desc_sz
);
363 hr_qp
->sq_max_wqes_per_wr
= 1;
364 hr_qp
->sq_spare_wqes
= 0;
366 /* In v1 engine, parameter verification procession */
367 max_cnt
= cap
->max_send_wr
> HNS_ROCE_MIN_WQE_NUM
?
368 cap
->max_send_wr
: HNS_ROCE_MIN_WQE_NUM
;
369 hr_qp
->sq
.wqe_cnt
= roundup_pow_of_two(max_cnt
);
370 if ((u32
)hr_qp
->sq
.wqe_cnt
> hr_dev
->caps
.max_wqes
) {
371 dev_err(dev
, "hns_roce_set_kernel_sq_size sq.wqe_cnt too large\n");
375 /* Get data_seg numbers */
376 max_cnt
= max(1U, cap
->max_send_sge
);
377 hr_qp
->sq
.max_gs
= roundup_pow_of_two(max_cnt
);
379 /* Get buf size, SQ and RQ are aligned to page_szie */
380 hr_qp
->buff_size
= HNS_ROCE_ALOGN_UP((hr_qp
->rq
.wqe_cnt
<<
381 hr_qp
->rq
.wqe_shift
), PAGE_SIZE
) +
382 HNS_ROCE_ALOGN_UP((hr_qp
->sq
.wqe_cnt
<<
383 hr_qp
->sq
.wqe_shift
), PAGE_SIZE
);
384 hr_qp
->sq
.offset
= 0;
385 hr_qp
->rq
.offset
= HNS_ROCE_ALOGN_UP((hr_qp
->sq
.wqe_cnt
<<
386 hr_qp
->sq
.wqe_shift
), PAGE_SIZE
);
388 /* Get wr and sge number which send */
389 cap
->max_send_wr
= hr_qp
->sq
.max_post
= hr_qp
->sq
.wqe_cnt
;
390 cap
->max_send_sge
= hr_qp
->sq
.max_gs
;
392 /* We don't support inline sends for kernel QPs (yet) */
393 cap
->max_inline_data
= 0;
398 static int hns_roce_create_qp_common(struct hns_roce_dev
*hr_dev
,
400 struct ib_qp_init_attr
*init_attr
,
401 struct ib_udata
*udata
, unsigned long sqpn
,
402 struct hns_roce_qp
*hr_qp
)
404 struct device
*dev
= &hr_dev
->pdev
->dev
;
405 struct hns_roce_ib_create_qp ucmd
;
406 unsigned long qpn
= 0;
409 mutex_init(&hr_qp
->mutex
);
410 spin_lock_init(&hr_qp
->sq
.lock
);
411 spin_lock_init(&hr_qp
->rq
.lock
);
413 hr_qp
->state
= IB_QPS_RESET
;
415 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
416 hr_qp
->sq_signal_bits
= IB_SIGNAL_ALL_WR
;
418 hr_qp
->sq_signal_bits
= IB_SIGNAL_REQ_WR
;
420 ret
= hns_roce_set_rq_size(hr_dev
, &init_attr
->cap
, !!ib_pd
->uobject
,
421 !!init_attr
->srq
, hr_qp
);
423 dev_err(dev
, "hns_roce_set_rq_size failed\n");
427 if (ib_pd
->uobject
) {
428 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
429 dev_err(dev
, "ib_copy_from_udata error for create qp\n");
434 ret
= hns_roce_set_user_sq_size(hr_dev
, hr_qp
, &ucmd
);
436 dev_err(dev
, "hns_roce_set_user_sq_size error for create qp\n");
440 hr_qp
->umem
= ib_umem_get(ib_pd
->uobject
->context
,
441 ucmd
.buf_addr
, hr_qp
->buff_size
, 0,
443 if (IS_ERR(hr_qp
->umem
)) {
444 dev_err(dev
, "ib_umem_get error for create qp\n");
445 ret
= PTR_ERR(hr_qp
->umem
);
449 ret
= hns_roce_mtt_init(hr_dev
, ib_umem_page_count(hr_qp
->umem
),
450 ilog2((unsigned int)hr_qp
->umem
->page_size
),
453 dev_err(dev
, "hns_roce_mtt_init error for create qp\n");
457 ret
= hns_roce_ib_umem_write_mtt(hr_dev
, &hr_qp
->mtt
,
460 dev_err(dev
, "hns_roce_ib_umem_write_mtt error for create qp\n");
464 if (init_attr
->create_flags
&
465 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
) {
466 dev_err(dev
, "init_attr->create_flags error!\n");
471 if (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
) {
472 dev_err(dev
, "init_attr->create_flags error!\n");
478 ret
= hns_roce_set_kernel_sq_size(hr_dev
, &init_attr
->cap
,
479 init_attr
->qp_type
, hr_qp
);
481 dev_err(dev
, "hns_roce_set_kernel_sq_size error!\n");
485 /* QP doorbell register address */
486 hr_qp
->sq
.db_reg_l
= hr_dev
->reg_base
+ ROCEE_DB_SQ_L_0_REG
+
487 DB_REG_OFFSET
* hr_dev
->priv_uar
.index
;
488 hr_qp
->rq
.db_reg_l
= hr_dev
->reg_base
+
489 ROCEE_DB_OTHERS_L_0_REG
+
490 DB_REG_OFFSET
* hr_dev
->priv_uar
.index
;
492 /* Allocate QP buf */
493 if (hns_roce_buf_alloc(hr_dev
, hr_qp
->buff_size
, PAGE_SIZE
* 2,
495 dev_err(dev
, "hns_roce_buf_alloc error!\n");
501 ret
= hns_roce_mtt_init(hr_dev
, hr_qp
->hr_buf
.npages
,
502 hr_qp
->hr_buf
.page_shift
, &hr_qp
->mtt
);
504 dev_err(dev
, "hns_roce_mtt_init error for kernel create qp\n");
508 ret
= hns_roce_buf_write_mtt(hr_dev
, &hr_qp
->mtt
,
511 dev_err(dev
, "hns_roce_buf_write_mtt error for kernel create qp\n");
515 hr_qp
->sq
.wrid
= kmalloc_array(hr_qp
->sq
.wqe_cnt
, sizeof(u64
),
517 hr_qp
->rq
.wrid
= kmalloc_array(hr_qp
->rq
.wqe_cnt
, sizeof(u64
),
519 if (!hr_qp
->sq
.wrid
|| !hr_qp
->rq
.wrid
) {
529 ret
= hns_roce_reserve_range_qp(hr_dev
, 1, 1, &qpn
);
531 dev_err(dev
, "hns_roce_reserve_range_qp alloc qpn error\n");
536 if ((init_attr
->qp_type
) == IB_QPT_GSI
) {
537 ret
= hns_roce_gsi_qp_alloc(hr_dev
, qpn
, hr_qp
);
539 dev_err(dev
, "hns_roce_qp_alloc failed!\n");
543 ret
= hns_roce_qp_alloc(hr_dev
, qpn
, hr_qp
);
545 dev_err(dev
, "hns_roce_qp_alloc failed!\n");
551 hr_qp
->doorbell_qpn
= 1;
553 hr_qp
->doorbell_qpn
= cpu_to_le64(hr_qp
->qpn
);
555 hr_qp
->event
= hns_roce_ib_qp_event
;
561 hns_roce_release_range_qp(hr_dev
, qpn
, 1);
564 kfree(hr_qp
->sq
.wrid
);
565 kfree(hr_qp
->rq
.wrid
);
568 hns_roce_mtt_cleanup(hr_dev
, &hr_qp
->mtt
);
572 ib_umem_release(hr_qp
->umem
);
574 hns_roce_buf_free(hr_dev
, hr_qp
->buff_size
, &hr_qp
->hr_buf
);
580 struct ib_qp
*hns_roce_create_qp(struct ib_pd
*pd
,
581 struct ib_qp_init_attr
*init_attr
,
582 struct ib_udata
*udata
)
584 struct hns_roce_dev
*hr_dev
= to_hr_dev(pd
->device
);
585 struct device
*dev
= &hr_dev
->pdev
->dev
;
586 struct hns_roce_sqp
*hr_sqp
;
587 struct hns_roce_qp
*hr_qp
;
590 switch (init_attr
->qp_type
) {
592 hr_qp
= kzalloc(sizeof(*hr_qp
), GFP_KERNEL
);
594 return ERR_PTR(-ENOMEM
);
596 ret
= hns_roce_create_qp_common(hr_dev
, pd
, init_attr
, udata
, 0,
599 dev_err(dev
, "Create RC QP failed\n");
604 hr_qp
->ibqp
.qp_num
= hr_qp
->qpn
;
609 /* Userspace is not allowed to create special QPs: */
611 dev_err(dev
, "not support usr space GSI\n");
612 return ERR_PTR(-EINVAL
);
615 hr_sqp
= kzalloc(sizeof(*hr_sqp
), GFP_KERNEL
);
617 return ERR_PTR(-ENOMEM
);
619 hr_qp
= &hr_sqp
->hr_qp
;
621 ret
= hns_roce_create_qp_common(hr_dev
, pd
, init_attr
, udata
,
622 hr_dev
->caps
.sqp_start
+
623 hr_dev
->caps
.num_ports
+
624 init_attr
->port_num
- 1, hr_qp
);
626 dev_err(dev
, "Create GSI QP failed!\n");
631 hr_qp
->port
= (init_attr
->port_num
- 1);
632 hr_qp
->ibqp
.qp_num
= hr_dev
->caps
.sqp_start
+
633 hr_dev
->caps
.num_ports
+
634 init_attr
->port_num
- 1;
638 dev_err(dev
, "not support QP type %d\n", init_attr
->qp_type
);
639 return ERR_PTR(-EINVAL
);
646 int to_hr_qp_type(int qp_type
)
650 if (qp_type
== IB_QPT_RC
)
651 transport_type
= SERV_TYPE_RC
;
652 else if (qp_type
== IB_QPT_UC
)
653 transport_type
= SERV_TYPE_UC
;
654 else if (qp_type
== IB_QPT_UD
)
655 transport_type
= SERV_TYPE_UD
;
656 else if (qp_type
== IB_QPT_GSI
)
657 transport_type
= SERV_TYPE_UD
;
661 return transport_type
;
664 int hns_roce_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
665 int attr_mask
, struct ib_udata
*udata
)
667 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
668 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
669 enum ib_qp_state cur_state
, new_state
;
670 struct device
*dev
= &hr_dev
->pdev
->dev
;
674 mutex_lock(&hr_qp
->mutex
);
676 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
677 attr
->cur_qp_state
: (enum ib_qp_state
)hr_qp
->state
;
678 new_state
= attr_mask
& IB_QP_STATE
?
679 attr
->qp_state
: cur_state
;
681 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
, attr_mask
,
682 IB_LINK_LAYER_ETHERNET
)) {
683 dev_err(dev
, "ib_modify_qp_is_ok failed\n");
687 if ((attr_mask
& IB_QP_PORT
) &&
688 (attr
->port_num
== 0 || attr
->port_num
> hr_dev
->caps
.num_ports
)) {
689 dev_err(dev
, "attr port_num invalid.attr->port_num=%d\n",
694 if (attr_mask
& IB_QP_PKEY_INDEX
) {
695 p
= attr_mask
& IB_QP_PORT
? (attr
->port_num
- 1) : hr_qp
->port
;
696 if (attr
->pkey_index
>= hr_dev
->caps
.pkey_table_len
[p
]) {
697 dev_err(dev
, "attr pkey_index invalid.attr->pkey_index=%d\n",
703 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
704 attr
->max_rd_atomic
> hr_dev
->caps
.max_qp_init_rdma
) {
705 dev_err(dev
, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
706 attr
->max_rd_atomic
);
710 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
711 attr
->max_dest_rd_atomic
> hr_dev
->caps
.max_qp_dest_rdma
) {
712 dev_err(dev
, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
713 attr
->max_dest_rd_atomic
);
717 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
719 dev_err(dev
, "cur_state=%d new_state=%d\n", cur_state
,
724 ret
= hr_dev
->hw
->modify_qp(ibqp
, attr
, attr_mask
, cur_state
,
728 mutex_unlock(&hr_qp
->mutex
);
733 void hns_roce_lock_cqs(struct hns_roce_cq
*send_cq
, struct hns_roce_cq
*recv_cq
)
734 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
736 if (send_cq
== recv_cq
) {
737 spin_lock_irq(&send_cq
->lock
);
738 __acquire(&recv_cq
->lock
);
739 } else if (send_cq
->cqn
< recv_cq
->cqn
) {
740 spin_lock_irq(&send_cq
->lock
);
741 spin_lock_nested(&recv_cq
->lock
, SINGLE_DEPTH_NESTING
);
743 spin_lock_irq(&recv_cq
->lock
);
744 spin_lock_nested(&send_cq
->lock
, SINGLE_DEPTH_NESTING
);
748 void hns_roce_unlock_cqs(struct hns_roce_cq
*send_cq
,
749 struct hns_roce_cq
*recv_cq
) __releases(&send_cq
->lock
)
750 __releases(&recv_cq
->lock
)
752 if (send_cq
== recv_cq
) {
753 __release(&recv_cq
->lock
);
754 spin_unlock_irq(&send_cq
->lock
);
755 } else if (send_cq
->cqn
< recv_cq
->cqn
) {
756 spin_unlock(&recv_cq
->lock
);
757 spin_unlock_irq(&send_cq
->lock
);
759 spin_unlock(&send_cq
->lock
);
760 spin_unlock_irq(&recv_cq
->lock
);
764 __be32
send_ieth(struct ib_send_wr
*wr
)
766 switch (wr
->opcode
) {
767 case IB_WR_SEND_WITH_IMM
:
768 case IB_WR_RDMA_WRITE_WITH_IMM
:
769 return cpu_to_le32(wr
->ex
.imm_data
);
770 case IB_WR_SEND_WITH_INV
:
771 return cpu_to_le32(wr
->ex
.invalidate_rkey
);
777 static void *get_wqe(struct hns_roce_qp
*hr_qp
, int offset
)
780 return hns_roce_buf_offset(&hr_qp
->hr_buf
, offset
);
783 void *get_recv_wqe(struct hns_roce_qp
*hr_qp
, int n
)
785 struct ib_qp
*ibqp
= &hr_qp
->ibqp
;
786 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
788 if ((n
< 0) || (n
> hr_qp
->rq
.wqe_cnt
)) {
789 dev_err(&hr_dev
->pdev
->dev
, "rq wqe index:%d,rq wqe cnt:%d\r\n",
790 n
, hr_qp
->rq
.wqe_cnt
);
794 return get_wqe(hr_qp
, hr_qp
->rq
.offset
+ (n
<< hr_qp
->rq
.wqe_shift
));
797 void *get_send_wqe(struct hns_roce_qp
*hr_qp
, int n
)
799 struct ib_qp
*ibqp
= &hr_qp
->ibqp
;
800 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
802 if ((n
< 0) || (n
> hr_qp
->sq
.wqe_cnt
)) {
803 dev_err(&hr_dev
->pdev
->dev
, "sq wqe index:%d,sq wqe cnt:%d\r\n",
804 n
, hr_qp
->sq
.wqe_cnt
);
808 return get_wqe(hr_qp
, hr_qp
->sq
.offset
+ (n
<< hr_qp
->sq
.wqe_shift
));
811 bool hns_roce_wq_overflow(struct hns_roce_wq
*hr_wq
, int nreq
,
814 struct hns_roce_cq
*hr_cq
;
817 cur
= hr_wq
->head
- hr_wq
->tail
;
818 if (likely(cur
+ nreq
< hr_wq
->max_post
))
821 hr_cq
= to_hr_cq(ib_cq
);
822 spin_lock(&hr_cq
->lock
);
823 cur
= hr_wq
->head
- hr_wq
->tail
;
824 spin_unlock(&hr_cq
->lock
);
826 return cur
+ nreq
>= hr_wq
->max_post
;
829 int hns_roce_init_qp_table(struct hns_roce_dev
*hr_dev
)
831 struct hns_roce_qp_table
*qp_table
= &hr_dev
->qp_table
;
832 int reserved_from_top
= 0;
835 spin_lock_init(&qp_table
->lock
);
836 INIT_RADIX_TREE(&hr_dev
->qp_table_tree
, GFP_ATOMIC
);
838 /* A port include two SQP, six port total 12 */
839 ret
= hns_roce_bitmap_init(&qp_table
->bitmap
, hr_dev
->caps
.num_qps
,
840 hr_dev
->caps
.num_qps
- 1,
841 hr_dev
->caps
.sqp_start
+ SQP_NUM
,
844 dev_err(&hr_dev
->pdev
->dev
, "qp bitmap init failed!error=%d\n",
852 void hns_roce_cleanup_qp_table(struct hns_roce_dev
*hr_dev
)
854 hns_roce_bitmap_cleanup(&hr_dev
->qp_table
.bitmap
);