2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
38 #include <linux/init.h>
44 #include "mthca_dev.h"
45 #include "mthca_cmd.h"
46 #include "mthca_memfree.h"
49 MTHCA_MAX_DIRECT_QP_SIZE
= 4 * PAGE_SIZE
,
50 MTHCA_ACK_REQ_FREQ
= 10,
51 MTHCA_FLIGHT_LIMIT
= 9,
52 MTHCA_UD_HEADER_SIZE
= 72, /* largest UD header possible */
53 MTHCA_INLINE_HEADER_SIZE
= 4, /* data segment overhead for inline */
54 MTHCA_INLINE_CHUNK_SIZE
= 16 /* inline data segment chunk */
58 MTHCA_QP_STATE_RST
= 0,
59 MTHCA_QP_STATE_INIT
= 1,
60 MTHCA_QP_STATE_RTR
= 2,
61 MTHCA_QP_STATE_RTS
= 3,
62 MTHCA_QP_STATE_SQE
= 4,
63 MTHCA_QP_STATE_SQD
= 5,
64 MTHCA_QP_STATE_ERR
= 6,
65 MTHCA_QP_STATE_DRAINING
= 7
77 MTHCA_QP_PM_MIGRATED
= 0x3,
78 MTHCA_QP_PM_ARMED
= 0x0,
79 MTHCA_QP_PM_REARM
= 0x1
83 /* qp_context flags */
84 MTHCA_QP_BIT_DE
= 1 << 8,
86 MTHCA_QP_BIT_SRE
= 1 << 15,
87 MTHCA_QP_BIT_SWE
= 1 << 14,
88 MTHCA_QP_BIT_SAE
= 1 << 13,
89 MTHCA_QP_BIT_SIC
= 1 << 4,
90 MTHCA_QP_BIT_SSC
= 1 << 3,
92 MTHCA_QP_BIT_RRE
= 1 << 15,
93 MTHCA_QP_BIT_RWE
= 1 << 14,
94 MTHCA_QP_BIT_RAE
= 1 << 13,
95 MTHCA_QP_BIT_RIC
= 1 << 4,
96 MTHCA_QP_BIT_RSC
= 1 << 3
99 struct mthca_qp_path
{
108 __be32 sl_tclass_flowlabel
;
110 } __attribute__((packed
));
112 struct mthca_qp_context
{
114 __be32 tavor_sched_queue
; /* Reserved on Arbel */
116 u8 rq_size_stride
; /* Reserved on Tavor */
117 u8 sq_size_stride
; /* Reserved on Tavor */
118 u8 rlkey_arbel_sched_queue
; /* Reserved on Tavor */
123 struct mthca_qp_path pri_path
;
124 struct mthca_qp_path alt_path
;
131 __be32 next_send_psn
;
133 __be32 snd_wqe_base_l
; /* Next send WQE on Tavor */
134 __be32 snd_db_index
; /* (debugging only entries) */
135 __be32 last_acked_psn
;
138 __be32 rnr_nextrecvpsn
;
141 __be32 rcv_wqe_base_l
; /* Next recv WQE on Tavor */
142 __be32 rcv_db_index
; /* (debugging only entries) */
146 __be16 rq_wqe_counter
; /* reserved on Tavor */
147 __be16 sq_wqe_counter
; /* reserved on Tavor */
149 } __attribute__((packed
));
151 struct mthca_qp_param
{
152 __be32 opt_param_mask
;
154 struct mthca_qp_context context
;
156 } __attribute__((packed
));
159 MTHCA_QP_OPTPAR_ALT_ADDR_PATH
= 1 << 0,
160 MTHCA_QP_OPTPAR_RRE
= 1 << 1,
161 MTHCA_QP_OPTPAR_RAE
= 1 << 2,
162 MTHCA_QP_OPTPAR_RWE
= 1 << 3,
163 MTHCA_QP_OPTPAR_PKEY_INDEX
= 1 << 4,
164 MTHCA_QP_OPTPAR_Q_KEY
= 1 << 5,
165 MTHCA_QP_OPTPAR_RNR_TIMEOUT
= 1 << 6,
166 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH
= 1 << 7,
167 MTHCA_QP_OPTPAR_SRA_MAX
= 1 << 8,
168 MTHCA_QP_OPTPAR_RRA_MAX
= 1 << 9,
169 MTHCA_QP_OPTPAR_PM_STATE
= 1 << 10,
170 MTHCA_QP_OPTPAR_PORT_NUM
= 1 << 11,
171 MTHCA_QP_OPTPAR_RETRY_COUNT
= 1 << 12,
172 MTHCA_QP_OPTPAR_ALT_RNR_RETRY
= 1 << 13,
173 MTHCA_QP_OPTPAR_ACK_TIMEOUT
= 1 << 14,
174 MTHCA_QP_OPTPAR_RNR_RETRY
= 1 << 15,
175 MTHCA_QP_OPTPAR_SCHED_QUEUE
= 1 << 16
179 MTHCA_NEXT_DBD
= 1 << 7,
180 MTHCA_NEXT_FENCE
= 1 << 6,
181 MTHCA_NEXT_CQ_UPDATE
= 1 << 3,
182 MTHCA_NEXT_EVENT_GEN
= 1 << 2,
183 MTHCA_NEXT_SOLICIT
= 1 << 1,
185 MTHCA_MLX_VL15
= 1 << 17,
186 MTHCA_MLX_SLR
= 1 << 16
190 MTHCA_INVAL_LKEY
= 0x100
193 struct mthca_next_seg
{
194 __be32 nda_op
; /* [31:6] next WQE [4:0] next opcode */
195 __be32 ee_nds
; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */
196 __be32 flags
; /* [3] CQ [2] Event [1] Solicit */
197 __be32 imm
; /* immediate data */
200 struct mthca_tavor_ud_seg
{
210 struct mthca_arbel_ud_seg
{
217 struct mthca_bind_seg
{
218 __be32 flags
; /* [31] Atomic [30] rem write [29] rem read */
226 struct mthca_raddr_seg
{
232 struct mthca_atomic_seg
{
237 struct mthca_data_seg
{
243 struct mthca_mlx_seg
{
246 __be32 flags
; /* [17] VL15 [16] SLR [14:12] static rate
247 [11:8] SL [3] C [2] E */
252 static const u8 mthca_opcode
[] = {
253 [IB_WR_SEND
] = MTHCA_OPCODE_SEND
,
254 [IB_WR_SEND_WITH_IMM
] = MTHCA_OPCODE_SEND_IMM
,
255 [IB_WR_RDMA_WRITE
] = MTHCA_OPCODE_RDMA_WRITE
,
256 [IB_WR_RDMA_WRITE_WITH_IMM
] = MTHCA_OPCODE_RDMA_WRITE_IMM
,
257 [IB_WR_RDMA_READ
] = MTHCA_OPCODE_RDMA_READ
,
258 [IB_WR_ATOMIC_CMP_AND_SWP
] = MTHCA_OPCODE_ATOMIC_CS
,
259 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MTHCA_OPCODE_ATOMIC_FA
,
262 static int is_sqp(struct mthca_dev
*dev
, struct mthca_qp
*qp
)
264 return qp
->qpn
>= dev
->qp_table
.sqp_start
&&
265 qp
->qpn
<= dev
->qp_table
.sqp_start
+ 3;
268 static int is_qp0(struct mthca_dev
*dev
, struct mthca_qp
*qp
)
270 return qp
->qpn
>= dev
->qp_table
.sqp_start
&&
271 qp
->qpn
<= dev
->qp_table
.sqp_start
+ 1;
274 static void *get_recv_wqe(struct mthca_qp
*qp
, int n
)
277 return qp
->queue
.direct
.buf
+ (n
<< qp
->rq
.wqe_shift
);
279 return qp
->queue
.page_list
[(n
<< qp
->rq
.wqe_shift
) >> PAGE_SHIFT
].buf
+
280 ((n
<< qp
->rq
.wqe_shift
) & (PAGE_SIZE
- 1));
283 static void *get_send_wqe(struct mthca_qp
*qp
, int n
)
286 return qp
->queue
.direct
.buf
+ qp
->send_wqe_offset
+
287 (n
<< qp
->sq
.wqe_shift
);
289 return qp
->queue
.page_list
[(qp
->send_wqe_offset
+
290 (n
<< qp
->sq
.wqe_shift
)) >>
292 ((qp
->send_wqe_offset
+ (n
<< qp
->sq
.wqe_shift
)) &
296 void mthca_qp_event(struct mthca_dev
*dev
, u32 qpn
,
297 enum ib_event_type event_type
)
300 struct ib_event event
;
302 spin_lock(&dev
->qp_table
.lock
);
303 qp
= mthca_array_get(&dev
->qp_table
.qp
, qpn
& (dev
->limits
.num_qps
- 1));
305 atomic_inc(&qp
->refcount
);
306 spin_unlock(&dev
->qp_table
.lock
);
309 mthca_warn(dev
, "Async event for bogus QP %08x\n", qpn
);
313 event
.device
= &dev
->ib_dev
;
314 event
.event
= event_type
;
315 event
.element
.qp
= &qp
->ibqp
;
316 if (qp
->ibqp
.event_handler
)
317 qp
->ibqp
.event_handler(&event
, qp
->ibqp
.qp_context
);
319 if (atomic_dec_and_test(&qp
->refcount
))
323 static int to_mthca_state(enum ib_qp_state ib_state
)
326 case IB_QPS_RESET
: return MTHCA_QP_STATE_RST
;
327 case IB_QPS_INIT
: return MTHCA_QP_STATE_INIT
;
328 case IB_QPS_RTR
: return MTHCA_QP_STATE_RTR
;
329 case IB_QPS_RTS
: return MTHCA_QP_STATE_RTS
;
330 case IB_QPS_SQD
: return MTHCA_QP_STATE_SQD
;
331 case IB_QPS_SQE
: return MTHCA_QP_STATE_SQE
;
332 case IB_QPS_ERR
: return MTHCA_QP_STATE_ERR
;
337 enum { RC
, UC
, UD
, RD
, RDEE
, MLX
, NUM_TRANS
};
339 static int to_mthca_st(int transport
)
342 case RC
: return MTHCA_QP_ST_RC
;
343 case UC
: return MTHCA_QP_ST_UC
;
344 case UD
: return MTHCA_QP_ST_UD
;
345 case RD
: return MTHCA_QP_ST_RD
;
346 case MLX
: return MTHCA_QP_ST_MLX
;
351 static const struct {
353 u32 req_param
[NUM_TRANS
];
354 u32 opt_param
[NUM_TRANS
];
355 } state_table
[IB_QPS_ERR
+ 1][IB_QPS_ERR
+ 1] = {
357 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
358 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
360 .trans
= MTHCA_TRANS_RST2INIT
,
362 [UD
] = (IB_QP_PKEY_INDEX
|
365 [UC
] = (IB_QP_PKEY_INDEX
|
368 [RC
] = (IB_QP_PKEY_INDEX
|
371 [MLX
] = (IB_QP_PKEY_INDEX
|
374 /* bug-for-bug compatibility with VAPI: */
381 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
382 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
384 .trans
= MTHCA_TRANS_INIT2INIT
,
386 [UD
] = (IB_QP_PKEY_INDEX
|
389 [UC
] = (IB_QP_PKEY_INDEX
|
392 [RC
] = (IB_QP_PKEY_INDEX
|
395 [MLX
] = (IB_QP_PKEY_INDEX
|
400 .trans
= MTHCA_TRANS_INIT2RTR
,
406 IB_QP_MAX_DEST_RD_ATOMIC
),
411 IB_QP_MAX_DEST_RD_ATOMIC
|
412 IB_QP_MIN_RNR_TIMER
),
415 [UD
] = (IB_QP_PKEY_INDEX
|
417 [UC
] = (IB_QP_ALT_PATH
|
420 [RC
] = (IB_QP_ALT_PATH
|
423 [MLX
] = (IB_QP_PKEY_INDEX
|
429 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
430 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
432 .trans
= MTHCA_TRANS_RTR2RTS
,
435 [UC
] = (IB_QP_SQ_PSN
|
436 IB_QP_MAX_QP_RD_ATOMIC
),
437 [RC
] = (IB_QP_TIMEOUT
|
441 IB_QP_MAX_QP_RD_ATOMIC
),
442 [MLX
] = IB_QP_SQ_PSN
,
445 [UD
] = (IB_QP_CUR_STATE
|
447 [UC
] = (IB_QP_CUR_STATE
|
451 IB_QP_PATH_MIG_STATE
),
452 [RC
] = (IB_QP_CUR_STATE
|
456 IB_QP_MIN_RNR_TIMER
|
457 IB_QP_PATH_MIG_STATE
),
458 [MLX
] = (IB_QP_CUR_STATE
|
464 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
465 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
467 .trans
= MTHCA_TRANS_RTS2RTS
,
469 [UD
] = (IB_QP_CUR_STATE
|
471 [UC
] = (IB_QP_ACCESS_FLAGS
|
473 IB_QP_PATH_MIG_STATE
),
474 [RC
] = (IB_QP_ACCESS_FLAGS
|
476 IB_QP_PATH_MIG_STATE
|
477 IB_QP_MIN_RNR_TIMER
),
478 [MLX
] = (IB_QP_CUR_STATE
|
483 .trans
= MTHCA_TRANS_RTS2SQD
,
487 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
488 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
490 .trans
= MTHCA_TRANS_SQD2RTS
,
492 [UD
] = (IB_QP_CUR_STATE
|
494 [UC
] = (IB_QP_CUR_STATE
|
497 IB_QP_PATH_MIG_STATE
),
498 [RC
] = (IB_QP_CUR_STATE
|
501 IB_QP_MIN_RNR_TIMER
|
502 IB_QP_PATH_MIG_STATE
),
503 [MLX
] = (IB_QP_CUR_STATE
|
508 .trans
= MTHCA_TRANS_SQD2SQD
,
510 [UD
] = (IB_QP_PKEY_INDEX
|
513 IB_QP_MAX_QP_RD_ATOMIC
|
514 IB_QP_MAX_DEST_RD_ATOMIC
|
519 IB_QP_PATH_MIG_STATE
),
524 IB_QP_MAX_QP_RD_ATOMIC
|
525 IB_QP_MAX_DEST_RD_ATOMIC
|
530 IB_QP_MIN_RNR_TIMER
|
531 IB_QP_PATH_MIG_STATE
),
532 [MLX
] = (IB_QP_PKEY_INDEX
|
538 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
539 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
541 .trans
= MTHCA_TRANS_SQERR2RTS
,
543 [UD
] = (IB_QP_CUR_STATE
|
545 [UC
] = (IB_QP_CUR_STATE
),
546 [RC
] = (IB_QP_CUR_STATE
|
547 IB_QP_MIN_RNR_TIMER
),
548 [MLX
] = (IB_QP_CUR_STATE
|
554 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
555 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
}
559 static void store_attrs(struct mthca_sqp
*sqp
, struct ib_qp_attr
*attr
,
562 if (attr_mask
& IB_QP_PKEY_INDEX
)
563 sqp
->pkey_index
= attr
->pkey_index
;
564 if (attr_mask
& IB_QP_QKEY
)
565 sqp
->qkey
= attr
->qkey
;
566 if (attr_mask
& IB_QP_SQ_PSN
)
567 sqp
->send_psn
= attr
->sq_psn
;
570 static void init_port(struct mthca_dev
*dev
, int port
)
574 struct mthca_init_ib_param param
;
576 memset(¶m
, 0, sizeof param
);
580 param
.vl_cap
= dev
->limits
.vl_cap
;
581 param
.mtu_cap
= dev
->limits
.mtu_cap
;
582 param
.gid_cap
= dev
->limits
.gid_table_len
;
583 param
.pkey_cap
= dev
->limits
.pkey_table_len
;
585 err
= mthca_INIT_IB(dev
, ¶m
, port
, &status
);
587 mthca_warn(dev
, "INIT_IB failed, return code %d.\n", err
);
589 mthca_warn(dev
, "INIT_IB returned status %02x.\n", status
);
592 int mthca_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
, int attr_mask
)
594 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
595 struct mthca_qp
*qp
= to_mqp(ibqp
);
596 enum ib_qp_state cur_state
, new_state
;
597 struct mthca_mailbox
*mailbox
;
598 struct mthca_qp_param
*qp_param
;
599 struct mthca_qp_context
*qp_context
;
600 u32 req_param
, opt_param
;
604 if (attr_mask
& IB_QP_CUR_STATE
) {
605 if (attr
->cur_qp_state
!= IB_QPS_RTR
&&
606 attr
->cur_qp_state
!= IB_QPS_RTS
&&
607 attr
->cur_qp_state
!= IB_QPS_SQD
&&
608 attr
->cur_qp_state
!= IB_QPS_SQE
)
611 cur_state
= attr
->cur_qp_state
;
613 spin_lock_irq(&qp
->sq
.lock
);
614 spin_lock(&qp
->rq
.lock
);
615 cur_state
= qp
->state
;
616 spin_unlock(&qp
->rq
.lock
);
617 spin_unlock_irq(&qp
->sq
.lock
);
620 if (attr_mask
& IB_QP_STATE
) {
621 if (attr
->qp_state
< 0 || attr
->qp_state
> IB_QPS_ERR
)
623 new_state
= attr
->qp_state
;
625 new_state
= cur_state
;
627 if (state_table
[cur_state
][new_state
].trans
== MTHCA_TRANS_INVALID
) {
628 mthca_dbg(dev
, "Illegal QP transition "
629 "%d->%d\n", cur_state
, new_state
);
633 req_param
= state_table
[cur_state
][new_state
].req_param
[qp
->transport
];
634 opt_param
= state_table
[cur_state
][new_state
].opt_param
[qp
->transport
];
636 if ((req_param
& attr_mask
) != req_param
) {
637 mthca_dbg(dev
, "QP transition "
638 "%d->%d missing req attr 0x%08x\n",
639 cur_state
, new_state
,
640 req_param
& ~attr_mask
);
644 if (attr_mask
& ~(req_param
| opt_param
| IB_QP_STATE
)) {
645 mthca_dbg(dev
, "QP transition (transport %d) "
646 "%d->%d has extra attr 0x%08x\n",
648 cur_state
, new_state
,
649 attr_mask
& ~(req_param
| opt_param
|
654 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
656 return PTR_ERR(mailbox
);
657 qp_param
= mailbox
->buf
;
658 qp_context
= &qp_param
->context
;
659 memset(qp_param
, 0, sizeof *qp_param
);
661 qp_context
->flags
= cpu_to_be32((to_mthca_state(new_state
) << 28) |
662 (to_mthca_st(qp
->transport
) << 16));
663 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_BIT_DE
);
664 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
))
665 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_MIGRATED
<< 11);
667 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE
);
668 switch (attr
->path_mig_state
) {
669 case IB_MIG_MIGRATED
:
670 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_MIGRATED
<< 11);
673 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_REARM
<< 11);
676 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_ARMED
<< 11);
681 /* leave tavor_sched_queue as 0 */
683 if (qp
->transport
== MLX
|| qp
->transport
== UD
)
684 qp_context
->mtu_msgmax
= (IB_MTU_2048
<< 5) | 11;
685 else if (attr_mask
& IB_QP_PATH_MTU
)
686 qp_context
->mtu_msgmax
= (attr
->path_mtu
<< 5) | 31;
688 if (mthca_is_memfree(dev
)) {
689 qp_context
->rq_size_stride
=
690 ((ffs(qp
->rq
.max
) - 1) << 3) | (qp
->rq
.wqe_shift
- 4);
691 qp_context
->sq_size_stride
=
692 ((ffs(qp
->sq
.max
) - 1) << 3) | (qp
->sq
.wqe_shift
- 4);
695 /* leave arbel_sched_queue as 0 */
697 if (qp
->ibqp
.uobject
)
698 qp_context
->usr_page
=
699 cpu_to_be32(to_mucontext(qp
->ibqp
.uobject
->context
)->uar
.index
);
701 qp_context
->usr_page
= cpu_to_be32(dev
->driver_uar
.index
);
702 qp_context
->local_qpn
= cpu_to_be32(qp
->qpn
);
703 if (attr_mask
& IB_QP_DEST_QPN
) {
704 qp_context
->remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
707 if (qp
->transport
== MLX
)
708 qp_context
->pri_path
.port_pkey
|=
709 cpu_to_be32(to_msqp(qp
)->port
<< 24);
711 if (attr_mask
& IB_QP_PORT
) {
712 qp_context
->pri_path
.port_pkey
|=
713 cpu_to_be32(attr
->port_num
<< 24);
714 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM
);
718 if (attr_mask
& IB_QP_PKEY_INDEX
) {
719 qp_context
->pri_path
.port_pkey
|=
720 cpu_to_be32(attr
->pkey_index
);
721 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX
);
724 if (attr_mask
& IB_QP_RNR_RETRY
) {
725 qp_context
->pri_path
.rnr_retry
= attr
->rnr_retry
<< 5;
726 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY
);
729 if (attr_mask
& IB_QP_AV
) {
730 qp_context
->pri_path
.g_mylmc
= attr
->ah_attr
.src_path_bits
& 0x7f;
731 qp_context
->pri_path
.rlid
= cpu_to_be16(attr
->ah_attr
.dlid
);
732 qp_context
->pri_path
.static_rate
= !!attr
->ah_attr
.static_rate
;
733 if (attr
->ah_attr
.ah_flags
& IB_AH_GRH
) {
734 qp_context
->pri_path
.g_mylmc
|= 1 << 7;
735 qp_context
->pri_path
.mgid_index
= attr
->ah_attr
.grh
.sgid_index
;
736 qp_context
->pri_path
.hop_limit
= attr
->ah_attr
.grh
.hop_limit
;
737 qp_context
->pri_path
.sl_tclass_flowlabel
=
738 cpu_to_be32((attr
->ah_attr
.sl
<< 28) |
739 (attr
->ah_attr
.grh
.traffic_class
<< 20) |
740 (attr
->ah_attr
.grh
.flow_label
));
741 memcpy(qp_context
->pri_path
.rgid
,
742 attr
->ah_attr
.grh
.dgid
.raw
, 16);
744 qp_context
->pri_path
.sl_tclass_flowlabel
=
745 cpu_to_be32(attr
->ah_attr
.sl
<< 28);
747 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH
);
750 if (attr_mask
& IB_QP_TIMEOUT
) {
751 qp_context
->pri_path
.ackto
= attr
->timeout
;
752 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT
);
758 qp_context
->pd
= cpu_to_be32(to_mpd(ibqp
->pd
)->pd_num
);
759 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
760 qp_context
->wqe_lkey
= cpu_to_be32(qp
->mr
.ibmr
.lkey
);
761 qp_context
->params1
= cpu_to_be32((MTHCA_ACK_REQ_FREQ
<< 28) |
762 (MTHCA_FLIGHT_LIMIT
<< 24) |
766 if (qp
->sq_policy
== IB_SIGNAL_ALL_WR
)
767 qp_context
->params1
|= cpu_to_be32(MTHCA_QP_BIT_SSC
);
768 if (attr_mask
& IB_QP_RETRY_CNT
) {
769 qp_context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
770 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT
);
773 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
774 qp_context
->params1
|= cpu_to_be32(min(attr
->max_rd_atomic
?
775 ffs(attr
->max_rd_atomic
) - 1 : 0,
777 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX
);
780 if (attr_mask
& IB_QP_SQ_PSN
)
781 qp_context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
782 qp_context
->cqn_snd
= cpu_to_be32(to_mcq(ibqp
->send_cq
)->cqn
);
784 if (mthca_is_memfree(dev
)) {
785 qp_context
->snd_wqe_base_l
= cpu_to_be32(qp
->send_wqe_offset
);
786 qp_context
->snd_db_index
= cpu_to_be32(qp
->sq
.db_index
);
789 if (attr_mask
& IB_QP_ACCESS_FLAGS
) {
791 * Only enable RDMA/atomics if we have responder
792 * resources set to a non-zero value.
794 if (qp
->resp_depth
) {
795 qp_context
->params2
|=
796 cpu_to_be32(attr
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
?
797 MTHCA_QP_BIT_RWE
: 0);
798 qp_context
->params2
|=
799 cpu_to_be32(attr
->qp_access_flags
& IB_ACCESS_REMOTE_READ
?
800 MTHCA_QP_BIT_RRE
: 0);
801 qp_context
->params2
|=
802 cpu_to_be32(attr
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
?
803 MTHCA_QP_BIT_RAE
: 0);
806 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RWE
|
807 MTHCA_QP_OPTPAR_RRE
|
808 MTHCA_QP_OPTPAR_RAE
);
810 qp
->atomic_rd_en
= attr
->qp_access_flags
;
813 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
816 if (qp
->resp_depth
&& !attr
->max_dest_rd_atomic
) {
818 * Lowering our responder resources to zero.
819 * Turn off RDMA/atomics as responder.
820 * (RWE/RRE/RAE in params2 already zero)
822 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RWE
|
823 MTHCA_QP_OPTPAR_RRE
|
824 MTHCA_QP_OPTPAR_RAE
);
827 if (!qp
->resp_depth
&& attr
->max_dest_rd_atomic
) {
829 * Increasing our responder resources from
830 * zero. Turn on RDMA/atomics as appropriate.
832 qp_context
->params2
|=
833 cpu_to_be32(qp
->atomic_rd_en
& IB_ACCESS_REMOTE_WRITE
?
834 MTHCA_QP_BIT_RWE
: 0);
835 qp_context
->params2
|=
836 cpu_to_be32(qp
->atomic_rd_en
& IB_ACCESS_REMOTE_READ
?
837 MTHCA_QP_BIT_RRE
: 0);
838 qp_context
->params2
|=
839 cpu_to_be32(qp
->atomic_rd_en
& IB_ACCESS_REMOTE_ATOMIC
?
840 MTHCA_QP_BIT_RAE
: 0);
842 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RWE
|
843 MTHCA_QP_OPTPAR_RRE
|
844 MTHCA_QP_OPTPAR_RAE
);
848 1 << rra_max
< attr
->max_dest_rd_atomic
&&
849 rra_max
< dev
->qp_table
.rdb_shift
;
853 qp_context
->params2
|= cpu_to_be32(rra_max
<< 21);
854 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX
);
856 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
859 qp_context
->params2
|= cpu_to_be32(MTHCA_QP_BIT_RSC
);
861 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
862 qp_context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
863 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT
);
865 if (attr_mask
& IB_QP_RQ_PSN
)
866 qp_context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
868 qp_context
->ra_buff_indx
=
869 cpu_to_be32(dev
->qp_table
.rdb_base
+
870 ((qp
->qpn
& (dev
->limits
.num_qps
- 1)) * MTHCA_RDB_ENTRY_SIZE
<<
871 dev
->qp_table
.rdb_shift
));
873 qp_context
->cqn_rcv
= cpu_to_be32(to_mcq(ibqp
->recv_cq
)->cqn
);
875 if (mthca_is_memfree(dev
))
876 qp_context
->rcv_db_index
= cpu_to_be32(qp
->rq
.db_index
);
878 if (attr_mask
& IB_QP_QKEY
) {
879 qp_context
->qkey
= cpu_to_be32(attr
->qkey
);
880 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY
);
883 err
= mthca_MODIFY_QP(dev
, state_table
[cur_state
][new_state
].trans
,
884 qp
->qpn
, 0, mailbox
, 0, &status
);
886 mthca_warn(dev
, "modify QP %d returned status %02x.\n",
887 state_table
[cur_state
][new_state
].trans
, status
);
892 qp
->state
= new_state
;
894 mthca_free_mailbox(dev
, mailbox
);
897 store_attrs(to_msqp(qp
), attr
, attr_mask
);
900 * If we are moving QP0 to RTR, bring the IB link up; if we
901 * are moving QP0 to RESET or ERROR, bring the link back down.
903 if (is_qp0(dev
, qp
)) {
904 if (cur_state
!= IB_QPS_RTR
&&
905 new_state
== IB_QPS_RTR
)
906 init_port(dev
, to_msqp(qp
)->port
);
908 if (cur_state
!= IB_QPS_RESET
&&
909 cur_state
!= IB_QPS_ERR
&&
910 (new_state
== IB_QPS_RESET
||
911 new_state
== IB_QPS_ERR
))
912 mthca_CLOSE_IB(dev
, to_msqp(qp
)->port
, &status
);
919 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
920 * rq.max_gs and sq.max_gs must all be assigned.
921 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
922 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
925 static int mthca_alloc_wqe_buf(struct mthca_dev
*dev
,
933 u64
*dma_list
= NULL
;
936 size
= sizeof (struct mthca_next_seg
) +
937 qp
->rq
.max_gs
* sizeof (struct mthca_data_seg
);
939 for (qp
->rq
.wqe_shift
= 6; 1 << qp
->rq
.wqe_shift
< size
;
943 size
= sizeof (struct mthca_next_seg
) +
944 qp
->sq
.max_gs
* sizeof (struct mthca_data_seg
);
945 switch (qp
->transport
) {
947 size
+= 2 * sizeof (struct mthca_data_seg
);
950 if (mthca_is_memfree(dev
))
951 size
+= sizeof (struct mthca_arbel_ud_seg
);
953 size
+= sizeof (struct mthca_tavor_ud_seg
);
956 /* bind seg is as big as atomic + raddr segs */
957 size
+= sizeof (struct mthca_bind_seg
);
960 for (qp
->sq
.wqe_shift
= 6; 1 << qp
->sq
.wqe_shift
< size
;
964 qp
->send_wqe_offset
= ALIGN(qp
->rq
.max
<< qp
->rq
.wqe_shift
,
965 1 << qp
->sq
.wqe_shift
);
968 * If this is a userspace QP, we don't actually have to
969 * allocate anything. All we need is to calculate the WQE
970 * sizes and the send_wqe_offset, so we're done now.
972 if (pd
->ibpd
.uobject
)
975 size
= PAGE_ALIGN(qp
->send_wqe_offset
+
976 (qp
->sq
.max
<< qp
->sq
.wqe_shift
));
978 qp
->wrid
= kmalloc((qp
->rq
.max
+ qp
->sq
.max
) * sizeof (u64
),
983 if (size
<= MTHCA_MAX_DIRECT_QP_SIZE
) {
986 shift
= get_order(size
) + PAGE_SHIFT
;
989 mthca_dbg(dev
, "Creating direct QP of size %d (shift %d)\n",
992 qp
->queue
.direct
.buf
= dma_alloc_coherent(&dev
->pdev
->dev
, size
,
994 if (!qp
->queue
.direct
.buf
)
997 pci_unmap_addr_set(&qp
->queue
.direct
, mapping
, t
);
999 memset(qp
->queue
.direct
.buf
, 0, size
);
1001 while (t
& ((1 << shift
) - 1)) {
1006 dma_list
= kmalloc(npages
* sizeof *dma_list
, GFP_KERNEL
);
1010 for (i
= 0; i
< npages
; ++i
)
1011 dma_list
[i
] = t
+ i
* (1 << shift
);
1014 npages
= size
/ PAGE_SIZE
;
1018 mthca_dbg(dev
, "Creating indirect QP with %d pages\n", npages
);
1020 dma_list
= kmalloc(npages
* sizeof *dma_list
, GFP_KERNEL
);
1024 qp
->queue
.page_list
= kmalloc(npages
*
1025 sizeof *qp
->queue
.page_list
,
1027 if (!qp
->queue
.page_list
)
1030 for (i
= 0; i
< npages
; ++i
) {
1031 qp
->queue
.page_list
[i
].buf
=
1032 dma_alloc_coherent(&dev
->pdev
->dev
, PAGE_SIZE
,
1034 if (!qp
->queue
.page_list
[i
].buf
)
1037 memset(qp
->queue
.page_list
[i
].buf
, 0, PAGE_SIZE
);
1039 pci_unmap_addr_set(&qp
->queue
.page_list
[i
], mapping
, t
);
1044 err
= mthca_mr_alloc_phys(dev
, pd
->pd_num
, dma_list
, shift
,
1046 MTHCA_MPT_FLAG_LOCAL_READ
,
1055 if (qp
->is_direct
) {
1056 dma_free_coherent(&dev
->pdev
->dev
, size
, qp
->queue
.direct
.buf
,
1057 pci_unmap_addr(&qp
->queue
.direct
, mapping
));
1059 for (i
= 0; i
< npages
; ++i
) {
1060 if (qp
->queue
.page_list
[i
].buf
)
1061 dma_free_coherent(&dev
->pdev
->dev
, PAGE_SIZE
,
1062 qp
->queue
.page_list
[i
].buf
,
1063 pci_unmap_addr(&qp
->queue
.page_list
[i
],
1074 static void mthca_free_wqe_buf(struct mthca_dev
*dev
,
1075 struct mthca_qp
*qp
)
1078 int size
= PAGE_ALIGN(qp
->send_wqe_offset
+
1079 (qp
->sq
.max
<< qp
->sq
.wqe_shift
));
1081 if (qp
->is_direct
) {
1082 dma_free_coherent(&dev
->pdev
->dev
, size
, qp
->queue
.direct
.buf
,
1083 pci_unmap_addr(&qp
->queue
.direct
, mapping
));
1085 for (i
= 0; i
< size
/ PAGE_SIZE
; ++i
) {
1086 dma_free_coherent(&dev
->pdev
->dev
, PAGE_SIZE
,
1087 qp
->queue
.page_list
[i
].buf
,
1088 pci_unmap_addr(&qp
->queue
.page_list
[i
],
1096 static int mthca_map_memfree(struct mthca_dev
*dev
,
1097 struct mthca_qp
*qp
)
1101 if (mthca_is_memfree(dev
)) {
1102 ret
= mthca_table_get(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
1106 ret
= mthca_table_get(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
1110 ret
= mthca_table_get(dev
, dev
->qp_table
.rdb_table
,
1111 qp
->qpn
<< dev
->qp_table
.rdb_shift
);
1120 mthca_table_put(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
1123 mthca_table_put(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
1128 static void mthca_unmap_memfree(struct mthca_dev
*dev
,
1129 struct mthca_qp
*qp
)
1131 mthca_table_put(dev
, dev
->qp_table
.rdb_table
,
1132 qp
->qpn
<< dev
->qp_table
.rdb_shift
);
1133 mthca_table_put(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
1134 mthca_table_put(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
1137 static int mthca_alloc_memfree(struct mthca_dev
*dev
,
1138 struct mthca_qp
*qp
)
1142 if (mthca_is_memfree(dev
)) {
1143 qp
->rq
.db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_RQ
,
1144 qp
->qpn
, &qp
->rq
.db
);
1145 if (qp
->rq
.db_index
< 0)
1148 qp
->sq
.db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_SQ
,
1149 qp
->qpn
, &qp
->sq
.db
);
1150 if (qp
->sq
.db_index
< 0)
1151 mthca_free_db(dev
, MTHCA_DB_TYPE_RQ
, qp
->rq
.db_index
);
1157 static void mthca_free_memfree(struct mthca_dev
*dev
,
1158 struct mthca_qp
*qp
)
1160 if (mthca_is_memfree(dev
)) {
1161 mthca_free_db(dev
, MTHCA_DB_TYPE_SQ
, qp
->sq
.db_index
);
1162 mthca_free_db(dev
, MTHCA_DB_TYPE_RQ
, qp
->rq
.db_index
);
1166 static void mthca_wq_init(struct mthca_wq
* wq
)
1168 spin_lock_init(&wq
->lock
);
1170 wq
->last_comp
= wq
->max
- 1;
1176 static int mthca_alloc_qp_common(struct mthca_dev
*dev
,
1177 struct mthca_pd
*pd
,
1178 struct mthca_cq
*send_cq
,
1179 struct mthca_cq
*recv_cq
,
1180 enum ib_sig_type send_policy
,
1181 struct mthca_qp
*qp
)
1186 atomic_set(&qp
->refcount
, 1);
1187 qp
->state
= IB_QPS_RESET
;
1188 qp
->atomic_rd_en
= 0;
1190 qp
->sq_policy
= send_policy
;
1191 mthca_wq_init(&qp
->sq
);
1192 mthca_wq_init(&qp
->rq
);
1194 ret
= mthca_map_memfree(dev
, qp
);
1198 ret
= mthca_alloc_wqe_buf(dev
, pd
, qp
);
1200 mthca_unmap_memfree(dev
, qp
);
1205 * If this is a userspace QP, we're done now. The doorbells
1206 * will be allocated and buffers will be initialized in
1209 if (pd
->ibpd
.uobject
)
1212 ret
= mthca_alloc_memfree(dev
, qp
);
1214 mthca_free_wqe_buf(dev
, qp
);
1215 mthca_unmap_memfree(dev
, qp
);
1219 if (mthca_is_memfree(dev
)) {
1220 struct mthca_next_seg
*next
;
1221 struct mthca_data_seg
*scatter
;
1222 int size
= (sizeof (struct mthca_next_seg
) +
1223 qp
->rq
.max_gs
* sizeof (struct mthca_data_seg
)) / 16;
1225 for (i
= 0; i
< qp
->rq
.max
; ++i
) {
1226 next
= get_recv_wqe(qp
, i
);
1227 next
->nda_op
= cpu_to_be32(((i
+ 1) & (qp
->rq
.max
- 1)) <<
1229 next
->ee_nds
= cpu_to_be32(size
);
1231 for (scatter
= (void *) (next
+ 1);
1232 (void *) scatter
< (void *) next
+ (1 << qp
->rq
.wqe_shift
);
1234 scatter
->lkey
= cpu_to_be32(MTHCA_INVAL_LKEY
);
1237 for (i
= 0; i
< qp
->sq
.max
; ++i
) {
1238 next
= get_send_wqe(qp
, i
);
1239 next
->nda_op
= cpu_to_be32((((i
+ 1) & (qp
->sq
.max
- 1)) <<
1241 qp
->send_wqe_offset
);
1248 static int mthca_set_qp_size(struct mthca_dev
*dev
, struct ib_qp_cap
*cap
,
1249 struct mthca_qp
*qp
)
1251 /* Sanity check QP size before proceeding */
1252 if (cap
->max_send_wr
> 65536 || cap
->max_recv_wr
> 65536 ||
1253 cap
->max_send_sge
> 64 || cap
->max_recv_sge
> 64)
1256 if (mthca_is_memfree(dev
)) {
1257 qp
->rq
.max
= cap
->max_recv_wr
?
1258 roundup_pow_of_two(cap
->max_recv_wr
) : 0;
1259 qp
->sq
.max
= cap
->max_send_wr
?
1260 roundup_pow_of_two(cap
->max_send_wr
) : 0;
1262 qp
->rq
.max
= cap
->max_recv_wr
;
1263 qp
->sq
.max
= cap
->max_send_wr
;
1266 qp
->rq
.max_gs
= cap
->max_recv_sge
;
1267 qp
->sq
.max_gs
= max_t(int, cap
->max_send_sge
,
1268 ALIGN(cap
->max_inline_data
+ MTHCA_INLINE_HEADER_SIZE
,
1269 MTHCA_INLINE_CHUNK_SIZE
) /
1270 sizeof (struct mthca_data_seg
));
1273 * For MLX transport we need 2 extra S/G entries:
1274 * one for the header and one for the checksum at the end
1276 if ((qp
->transport
== MLX
&& qp
->sq
.max_gs
+ 2 > dev
->limits
.max_sg
) ||
1277 qp
->sq
.max_gs
> dev
->limits
.max_sg
|| qp
->rq
.max_gs
> dev
->limits
.max_sg
)
1283 int mthca_alloc_qp(struct mthca_dev
*dev
,
1284 struct mthca_pd
*pd
,
1285 struct mthca_cq
*send_cq
,
1286 struct mthca_cq
*recv_cq
,
1287 enum ib_qp_type type
,
1288 enum ib_sig_type send_policy
,
1289 struct ib_qp_cap
*cap
,
1290 struct mthca_qp
*qp
)
1294 err
= mthca_set_qp_size(dev
, cap
, qp
);
1299 case IB_QPT_RC
: qp
->transport
= RC
; break;
1300 case IB_QPT_UC
: qp
->transport
= UC
; break;
1301 case IB_QPT_UD
: qp
->transport
= UD
; break;
1302 default: return -EINVAL
;
1305 qp
->qpn
= mthca_alloc(&dev
->qp_table
.alloc
);
1309 err
= mthca_alloc_qp_common(dev
, pd
, send_cq
, recv_cq
,
1312 mthca_free(&dev
->qp_table
.alloc
, qp
->qpn
);
1316 spin_lock_irq(&dev
->qp_table
.lock
);
1317 mthca_array_set(&dev
->qp_table
.qp
,
1318 qp
->qpn
& (dev
->limits
.num_qps
- 1), qp
);
1319 spin_unlock_irq(&dev
->qp_table
.lock
);
1324 int mthca_alloc_sqp(struct mthca_dev
*dev
,
1325 struct mthca_pd
*pd
,
1326 struct mthca_cq
*send_cq
,
1327 struct mthca_cq
*recv_cq
,
1328 enum ib_sig_type send_policy
,
1329 struct ib_qp_cap
*cap
,
1332 struct mthca_sqp
*sqp
)
1334 u32 mqpn
= qpn
* 2 + dev
->qp_table
.sqp_start
+ port
- 1;
1337 err
= mthca_set_qp_size(dev
, cap
, &sqp
->qp
);
1341 sqp
->header_buf_size
= sqp
->qp
.sq
.max
* MTHCA_UD_HEADER_SIZE
;
1342 sqp
->header_buf
= dma_alloc_coherent(&dev
->pdev
->dev
, sqp
->header_buf_size
,
1343 &sqp
->header_dma
, GFP_KERNEL
);
1344 if (!sqp
->header_buf
)
1347 spin_lock_irq(&dev
->qp_table
.lock
);
1348 if (mthca_array_get(&dev
->qp_table
.qp
, mqpn
))
1351 mthca_array_set(&dev
->qp_table
.qp
, mqpn
, sqp
);
1352 spin_unlock_irq(&dev
->qp_table
.lock
);
1359 sqp
->qp
.transport
= MLX
;
1361 err
= mthca_alloc_qp_common(dev
, pd
, send_cq
, recv_cq
,
1362 send_policy
, &sqp
->qp
);
1366 atomic_inc(&pd
->sqp_count
);
1372 * Lock CQs here, so that CQ polling code can do QP lookup
1373 * without taking a lock.
1375 spin_lock_irq(&send_cq
->lock
);
1376 if (send_cq
!= recv_cq
)
1377 spin_lock(&recv_cq
->lock
);
1379 spin_lock(&dev
->qp_table
.lock
);
1380 mthca_array_clear(&dev
->qp_table
.qp
, mqpn
);
1381 spin_unlock(&dev
->qp_table
.lock
);
1383 if (send_cq
!= recv_cq
)
1384 spin_unlock(&recv_cq
->lock
);
1385 spin_unlock_irq(&send_cq
->lock
);
1388 dma_free_coherent(&dev
->pdev
->dev
, sqp
->header_buf_size
,
1389 sqp
->header_buf
, sqp
->header_dma
);
1394 void mthca_free_qp(struct mthca_dev
*dev
,
1395 struct mthca_qp
*qp
)
1398 struct mthca_cq
*send_cq
;
1399 struct mthca_cq
*recv_cq
;
1401 send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1402 recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
1405 * Lock CQs here, so that CQ polling code can do QP lookup
1406 * without taking a lock.
1408 spin_lock_irq(&send_cq
->lock
);
1409 if (send_cq
!= recv_cq
)
1410 spin_lock(&recv_cq
->lock
);
1412 spin_lock(&dev
->qp_table
.lock
);
1413 mthca_array_clear(&dev
->qp_table
.qp
,
1414 qp
->qpn
& (dev
->limits
.num_qps
- 1));
1415 spin_unlock(&dev
->qp_table
.lock
);
1417 if (send_cq
!= recv_cq
)
1418 spin_unlock(&recv_cq
->lock
);
1419 spin_unlock_irq(&send_cq
->lock
);
1421 atomic_dec(&qp
->refcount
);
1422 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
1424 if (qp
->state
!= IB_QPS_RESET
)
1425 mthca_MODIFY_QP(dev
, MTHCA_TRANS_ANY2RST
, qp
->qpn
, 0, NULL
, 0, &status
);
1428 * If this is a userspace QP, the buffers, MR, CQs and so on
1429 * will be cleaned up in userspace, so all we have to do is
1430 * unref the mem-free tables and free the QPN in our table.
1432 if (!qp
->ibqp
.uobject
) {
1433 mthca_cq_clean(dev
, to_mcq(qp
->ibqp
.send_cq
)->cqn
, qp
->qpn
);
1434 if (qp
->ibqp
.send_cq
!= qp
->ibqp
.recv_cq
)
1435 mthca_cq_clean(dev
, to_mcq(qp
->ibqp
.recv_cq
)->cqn
, qp
->qpn
);
1437 mthca_free_mr(dev
, &qp
->mr
);
1438 mthca_free_memfree(dev
, qp
);
1439 mthca_free_wqe_buf(dev
, qp
);
1442 mthca_unmap_memfree(dev
, qp
);
1444 if (is_sqp(dev
, qp
)) {
1445 atomic_dec(&(to_mpd(qp
->ibqp
.pd
)->sqp_count
));
1446 dma_free_coherent(&dev
->pdev
->dev
,
1447 to_msqp(qp
)->header_buf_size
,
1448 to_msqp(qp
)->header_buf
,
1449 to_msqp(qp
)->header_dma
);
1451 mthca_free(&dev
->qp_table
.alloc
, qp
->qpn
);
1454 /* Create UD header for an MLX send and build a data segment for it */
1455 static int build_mlx_header(struct mthca_dev
*dev
, struct mthca_sqp
*sqp
,
1456 int ind
, struct ib_send_wr
*wr
,
1457 struct mthca_mlx_seg
*mlx
,
1458 struct mthca_data_seg
*data
)
1464 ib_ud_header_init(256, /* assume a MAD */
1465 sqp
->ud_header
.grh_present
,
1468 err
= mthca_read_ah(dev
, to_mah(wr
->wr
.ud
.ah
), &sqp
->ud_header
);
1471 mlx
->flags
&= ~cpu_to_be32(MTHCA_NEXT_SOLICIT
| 1);
1472 mlx
->flags
|= cpu_to_be32((!sqp
->qp
.ibqp
.qp_num
? MTHCA_MLX_VL15
: 0) |
1473 (sqp
->ud_header
.lrh
.destination_lid
==
1474 IB_LID_PERMISSIVE
? MTHCA_MLX_SLR
: 0) |
1475 (sqp
->ud_header
.lrh
.service_level
<< 8));
1476 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
1479 switch (wr
->opcode
) {
1481 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
1482 sqp
->ud_header
.immediate_present
= 0;
1484 case IB_WR_SEND_WITH_IMM
:
1485 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
1486 sqp
->ud_header
.immediate_present
= 1;
1487 sqp
->ud_header
.immediate_data
= wr
->imm_data
;
1493 sqp
->ud_header
.lrh
.virtual_lane
= !sqp
->qp
.ibqp
.qp_num
? 15 : 0;
1494 if (sqp
->ud_header
.lrh
.destination_lid
== IB_LID_PERMISSIVE
)
1495 sqp
->ud_header
.lrh
.source_lid
= IB_LID_PERMISSIVE
;
1496 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->send_flags
& IB_SEND_SOLICITED
);
1497 if (!sqp
->qp
.ibqp
.qp_num
)
1498 ib_get_cached_pkey(&dev
->ib_dev
, sqp
->port
,
1499 sqp
->pkey_index
, &pkey
);
1501 ib_get_cached_pkey(&dev
->ib_dev
, sqp
->port
,
1502 wr
->wr
.ud
.pkey_index
, &pkey
);
1503 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
1504 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1505 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
1506 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
& 0x80000000 ?
1507 sqp
->qkey
: wr
->wr
.ud
.remote_qkey
);
1508 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.ibqp
.qp_num
);
1510 header_size
= ib_ud_header_pack(&sqp
->ud_header
,
1512 ind
* MTHCA_UD_HEADER_SIZE
);
1514 data
->byte_count
= cpu_to_be32(header_size
);
1515 data
->lkey
= cpu_to_be32(to_mpd(sqp
->qp
.ibqp
.pd
)->ntmr
.ibmr
.lkey
);
1516 data
->addr
= cpu_to_be64(sqp
->header_dma
+
1517 ind
* MTHCA_UD_HEADER_SIZE
);
1522 static inline int mthca_wq_overflow(struct mthca_wq
*wq
, int nreq
,
1523 struct ib_cq
*ib_cq
)
1526 struct mthca_cq
*cq
;
1528 cur
= wq
->head
- wq
->tail
;
1529 if (likely(cur
+ nreq
< wq
->max
))
1533 spin_lock(&cq
->lock
);
1534 cur
= wq
->head
- wq
->tail
;
1535 spin_unlock(&cq
->lock
);
1537 return cur
+ nreq
>= wq
->max
;
1540 int mthca_tavor_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
1541 struct ib_send_wr
**bad_wr
)
1543 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1544 struct mthca_qp
*qp
= to_mqp(ibqp
);
1547 unsigned long flags
;
1557 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
1559 /* XXX check that state is OK to post send */
1561 ind
= qp
->sq
.next_ind
;
1563 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1564 if (mthca_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
1565 mthca_err(dev
, "SQ %06x full (%u head, %u tail,"
1566 " %d max, %d nreq)\n", qp
->qpn
,
1567 qp
->sq
.head
, qp
->sq
.tail
,
1574 wqe
= get_send_wqe(qp
, ind
);
1575 prev_wqe
= qp
->sq
.last
;
1578 ((struct mthca_next_seg
*) wqe
)->nda_op
= 0;
1579 ((struct mthca_next_seg
*) wqe
)->ee_nds
= 0;
1580 ((struct mthca_next_seg
*) wqe
)->flags
=
1581 ((wr
->send_flags
& IB_SEND_SIGNALED
) ?
1582 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE
) : 0) |
1583 ((wr
->send_flags
& IB_SEND_SOLICITED
) ?
1584 cpu_to_be32(MTHCA_NEXT_SOLICIT
) : 0) |
1586 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
||
1587 wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
1588 ((struct mthca_next_seg
*) wqe
)->imm
= wr
->imm_data
;
1590 wqe
+= sizeof (struct mthca_next_seg
);
1591 size
= sizeof (struct mthca_next_seg
) / 16;
1593 switch (qp
->transport
) {
1595 switch (wr
->opcode
) {
1596 case IB_WR_ATOMIC_CMP_AND_SWP
:
1597 case IB_WR_ATOMIC_FETCH_AND_ADD
:
1598 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1599 cpu_to_be64(wr
->wr
.atomic
.remote_addr
);
1600 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1601 cpu_to_be32(wr
->wr
.atomic
.rkey
);
1602 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1604 wqe
+= sizeof (struct mthca_raddr_seg
);
1606 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
1607 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1608 cpu_to_be64(wr
->wr
.atomic
.swap
);
1609 ((struct mthca_atomic_seg
*) wqe
)->compare
=
1610 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1612 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1613 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1614 ((struct mthca_atomic_seg
*) wqe
)->compare
= 0;
1617 wqe
+= sizeof (struct mthca_atomic_seg
);
1618 size
+= sizeof (struct mthca_raddr_seg
) / 16 +
1619 sizeof (struct mthca_atomic_seg
);
1622 case IB_WR_RDMA_WRITE
:
1623 case IB_WR_RDMA_WRITE_WITH_IMM
:
1624 case IB_WR_RDMA_READ
:
1625 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1626 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
1627 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1628 cpu_to_be32(wr
->wr
.rdma
.rkey
);
1629 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1630 wqe
+= sizeof (struct mthca_raddr_seg
);
1631 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1635 /* No extra segments required for sends */
1642 switch (wr
->opcode
) {
1643 case IB_WR_RDMA_WRITE
:
1644 case IB_WR_RDMA_WRITE_WITH_IMM
:
1645 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1646 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
1647 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1648 cpu_to_be32(wr
->wr
.rdma
.rkey
);
1649 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1650 wqe
+= sizeof (struct mthca_raddr_seg
);
1651 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1655 /* No extra segments required for sends */
1662 ((struct mthca_tavor_ud_seg
*) wqe
)->lkey
=
1663 cpu_to_be32(to_mah(wr
->wr
.ud
.ah
)->key
);
1664 ((struct mthca_tavor_ud_seg
*) wqe
)->av_addr
=
1665 cpu_to_be64(to_mah(wr
->wr
.ud
.ah
)->avdma
);
1666 ((struct mthca_tavor_ud_seg
*) wqe
)->dqpn
=
1667 cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1668 ((struct mthca_tavor_ud_seg
*) wqe
)->qkey
=
1669 cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
1671 wqe
+= sizeof (struct mthca_tavor_ud_seg
);
1672 size
+= sizeof (struct mthca_tavor_ud_seg
) / 16;
1676 err
= build_mlx_header(dev
, to_msqp(qp
), ind
, wr
,
1677 wqe
- sizeof (struct mthca_next_seg
),
1683 wqe
+= sizeof (struct mthca_data_seg
);
1684 size
+= sizeof (struct mthca_data_seg
) / 16;
1688 if (wr
->num_sge
> qp
->sq
.max_gs
) {
1689 mthca_err(dev
, "too many gathers\n");
1695 for (i
= 0; i
< wr
->num_sge
; ++i
) {
1696 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1697 cpu_to_be32(wr
->sg_list
[i
].length
);
1698 ((struct mthca_data_seg
*) wqe
)->lkey
=
1699 cpu_to_be32(wr
->sg_list
[i
].lkey
);
1700 ((struct mthca_data_seg
*) wqe
)->addr
=
1701 cpu_to_be64(wr
->sg_list
[i
].addr
);
1702 wqe
+= sizeof (struct mthca_data_seg
);
1703 size
+= sizeof (struct mthca_data_seg
) / 16;
1706 /* Add one more inline data segment for ICRC */
1707 if (qp
->transport
== MLX
) {
1708 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1709 cpu_to_be32((1 << 31) | 4);
1710 ((u32
*) wqe
)[1] = 0;
1711 wqe
+= sizeof (struct mthca_data_seg
);
1712 size
+= sizeof (struct mthca_data_seg
) / 16;
1715 qp
->wrid
[ind
+ qp
->rq
.max
] = wr
->wr_id
;
1717 if (wr
->opcode
>= ARRAY_SIZE(mthca_opcode
)) {
1718 mthca_err(dev
, "opcode invalid\n");
1725 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
1726 cpu_to_be32(((ind
<< qp
->sq
.wqe_shift
) +
1727 qp
->send_wqe_offset
) |
1728 mthca_opcode
[wr
->opcode
]);
1730 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
1731 cpu_to_be32((size0
? 0 : MTHCA_NEXT_DBD
) | size
);
1736 op0
= mthca_opcode
[wr
->opcode
];
1740 if (unlikely(ind
>= qp
->sq
.max
))
1748 doorbell
[0] = cpu_to_be32(((qp
->sq
.next_ind
<< qp
->sq
.wqe_shift
) +
1749 qp
->send_wqe_offset
) | f0
| op0
);
1750 doorbell
[1] = cpu_to_be32((qp
->qpn
<< 8) | size0
);
1754 mthca_write64(doorbell
,
1755 dev
->kar
+ MTHCA_SEND_DOORBELL
,
1756 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1759 qp
->sq
.next_ind
= ind
;
1760 qp
->sq
.head
+= nreq
;
1762 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
1766 int mthca_tavor_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
1767 struct ib_recv_wr
**bad_wr
)
1769 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1770 struct mthca_qp
*qp
= to_mqp(ibqp
);
1771 unsigned long flags
;
1781 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
1783 /* XXX check that state is OK to post receive */
1785 ind
= qp
->rq
.next_ind
;
1787 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1788 if (mthca_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
1789 mthca_err(dev
, "RQ %06x full (%u head, %u tail,"
1790 " %d max, %d nreq)\n", qp
->qpn
,
1791 qp
->rq
.head
, qp
->rq
.tail
,
1798 wqe
= get_recv_wqe(qp
, ind
);
1799 prev_wqe
= qp
->rq
.last
;
1802 ((struct mthca_next_seg
*) wqe
)->nda_op
= 0;
1803 ((struct mthca_next_seg
*) wqe
)->ee_nds
=
1804 cpu_to_be32(MTHCA_NEXT_DBD
);
1805 ((struct mthca_next_seg
*) wqe
)->flags
= 0;
1807 wqe
+= sizeof (struct mthca_next_seg
);
1808 size
= sizeof (struct mthca_next_seg
) / 16;
1810 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
1816 for (i
= 0; i
< wr
->num_sge
; ++i
) {
1817 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1818 cpu_to_be32(wr
->sg_list
[i
].length
);
1819 ((struct mthca_data_seg
*) wqe
)->lkey
=
1820 cpu_to_be32(wr
->sg_list
[i
].lkey
);
1821 ((struct mthca_data_seg
*) wqe
)->addr
=
1822 cpu_to_be64(wr
->sg_list
[i
].addr
);
1823 wqe
+= sizeof (struct mthca_data_seg
);
1824 size
+= sizeof (struct mthca_data_seg
) / 16;
1827 qp
->wrid
[ind
] = wr
->wr_id
;
1829 if (likely(prev_wqe
)) {
1830 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
1831 cpu_to_be32((ind
<< qp
->rq
.wqe_shift
) | 1);
1833 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
1834 cpu_to_be32(MTHCA_NEXT_DBD
| size
);
1841 if (unlikely(ind
>= qp
->rq
.max
))
1849 doorbell
[0] = cpu_to_be32((qp
->rq
.next_ind
<< qp
->rq
.wqe_shift
) | size0
);
1850 doorbell
[1] = cpu_to_be32((qp
->qpn
<< 8) | nreq
);
1854 mthca_write64(doorbell
,
1855 dev
->kar
+ MTHCA_RECEIVE_DOORBELL
,
1856 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1859 qp
->rq
.next_ind
= ind
;
1860 qp
->rq
.head
+= nreq
;
1862 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
1866 int mthca_arbel_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
1867 struct ib_send_wr
**bad_wr
)
1869 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1870 struct mthca_qp
*qp
= to_mqp(ibqp
);
1873 unsigned long flags
;
1883 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
1885 /* XXX check that state is OK to post send */
1887 ind
= qp
->sq
.head
& (qp
->sq
.max
- 1);
1889 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1890 if (mthca_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
1891 mthca_err(dev
, "SQ %06x full (%u head, %u tail,"
1892 " %d max, %d nreq)\n", qp
->qpn
,
1893 qp
->sq
.head
, qp
->sq
.tail
,
1900 wqe
= get_send_wqe(qp
, ind
);
1901 prev_wqe
= qp
->sq
.last
;
1904 ((struct mthca_next_seg
*) wqe
)->flags
=
1905 ((wr
->send_flags
& IB_SEND_SIGNALED
) ?
1906 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE
) : 0) |
1907 ((wr
->send_flags
& IB_SEND_SOLICITED
) ?
1908 cpu_to_be32(MTHCA_NEXT_SOLICIT
) : 0) |
1910 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
||
1911 wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
1912 ((struct mthca_next_seg
*) wqe
)->imm
= wr
->imm_data
;
1914 wqe
+= sizeof (struct mthca_next_seg
);
1915 size
= sizeof (struct mthca_next_seg
) / 16;
1917 switch (qp
->transport
) {
1919 switch (wr
->opcode
) {
1920 case IB_WR_ATOMIC_CMP_AND_SWP
:
1921 case IB_WR_ATOMIC_FETCH_AND_ADD
:
1922 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1923 cpu_to_be64(wr
->wr
.atomic
.remote_addr
);
1924 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1925 cpu_to_be32(wr
->wr
.atomic
.rkey
);
1926 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1928 wqe
+= sizeof (struct mthca_raddr_seg
);
1930 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
1931 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1932 cpu_to_be64(wr
->wr
.atomic
.swap
);
1933 ((struct mthca_atomic_seg
*) wqe
)->compare
=
1934 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1936 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1937 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1938 ((struct mthca_atomic_seg
*) wqe
)->compare
= 0;
1941 wqe
+= sizeof (struct mthca_atomic_seg
);
1942 size
+= sizeof (struct mthca_raddr_seg
) / 16 +
1943 sizeof (struct mthca_atomic_seg
);
1946 case IB_WR_RDMA_READ
:
1947 case IB_WR_RDMA_WRITE
:
1948 case IB_WR_RDMA_WRITE_WITH_IMM
:
1949 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1950 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
1951 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1952 cpu_to_be32(wr
->wr
.rdma
.rkey
);
1953 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1954 wqe
+= sizeof (struct mthca_raddr_seg
);
1955 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1959 /* No extra segments required for sends */
1966 switch (wr
->opcode
) {
1967 case IB_WR_RDMA_WRITE
:
1968 case IB_WR_RDMA_WRITE_WITH_IMM
:
1969 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1970 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
1971 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1972 cpu_to_be32(wr
->wr
.rdma
.rkey
);
1973 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1974 wqe
+= sizeof (struct mthca_raddr_seg
);
1975 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1979 /* No extra segments required for sends */
1986 memcpy(((struct mthca_arbel_ud_seg
*) wqe
)->av
,
1987 to_mah(wr
->wr
.ud
.ah
)->av
, MTHCA_AV_SIZE
);
1988 ((struct mthca_arbel_ud_seg
*) wqe
)->dqpn
=
1989 cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1990 ((struct mthca_arbel_ud_seg
*) wqe
)->qkey
=
1991 cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
1993 wqe
+= sizeof (struct mthca_arbel_ud_seg
);
1994 size
+= sizeof (struct mthca_arbel_ud_seg
) / 16;
1998 err
= build_mlx_header(dev
, to_msqp(qp
), ind
, wr
,
1999 wqe
- sizeof (struct mthca_next_seg
),
2005 wqe
+= sizeof (struct mthca_data_seg
);
2006 size
+= sizeof (struct mthca_data_seg
) / 16;
2010 if (wr
->num_sge
> qp
->sq
.max_gs
) {
2011 mthca_err(dev
, "too many gathers\n");
2017 for (i
= 0; i
< wr
->num_sge
; ++i
) {
2018 ((struct mthca_data_seg
*) wqe
)->byte_count
=
2019 cpu_to_be32(wr
->sg_list
[i
].length
);
2020 ((struct mthca_data_seg
*) wqe
)->lkey
=
2021 cpu_to_be32(wr
->sg_list
[i
].lkey
);
2022 ((struct mthca_data_seg
*) wqe
)->addr
=
2023 cpu_to_be64(wr
->sg_list
[i
].addr
);
2024 wqe
+= sizeof (struct mthca_data_seg
);
2025 size
+= sizeof (struct mthca_data_seg
) / 16;
2028 /* Add one more inline data segment for ICRC */
2029 if (qp
->transport
== MLX
) {
2030 ((struct mthca_data_seg
*) wqe
)->byte_count
=
2031 cpu_to_be32((1 << 31) | 4);
2032 ((u32
*) wqe
)[1] = 0;
2033 wqe
+= sizeof (struct mthca_data_seg
);
2034 size
+= sizeof (struct mthca_data_seg
) / 16;
2037 qp
->wrid
[ind
+ qp
->rq
.max
] = wr
->wr_id
;
2039 if (wr
->opcode
>= ARRAY_SIZE(mthca_opcode
)) {
2040 mthca_err(dev
, "opcode invalid\n");
2046 if (likely(prev_wqe
)) {
2047 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
2048 cpu_to_be32(((ind
<< qp
->sq
.wqe_shift
) +
2049 qp
->send_wqe_offset
) |
2050 mthca_opcode
[wr
->opcode
]);
2052 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
2053 cpu_to_be32(MTHCA_NEXT_DBD
| size
);
2058 op0
= mthca_opcode
[wr
->opcode
];
2062 if (unlikely(ind
>= qp
->sq
.max
))
2070 doorbell
[0] = cpu_to_be32((nreq
<< 24) |
2071 ((qp
->sq
.head
& 0xffff) << 8) |
2073 doorbell
[1] = cpu_to_be32((qp
->qpn
<< 8) | size0
);
2075 qp
->sq
.head
+= nreq
;
2078 * Make sure that descriptors are written before
2082 *qp
->sq
.db
= cpu_to_be32(qp
->sq
.head
& 0xffff);
2085 * Make sure doorbell record is written before we
2086 * write MMIO send doorbell.
2089 mthca_write64(doorbell
,
2090 dev
->kar
+ MTHCA_SEND_DOORBELL
,
2091 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
2094 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
2098 int mthca_arbel_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
2099 struct ib_recv_wr
**bad_wr
)
2101 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
2102 struct mthca_qp
*qp
= to_mqp(ibqp
);
2103 unsigned long flags
;
2110 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
2112 /* XXX check that state is OK to post receive */
2114 ind
= qp
->rq
.head
& (qp
->rq
.max
- 1);
2116 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
2117 if (mthca_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
2118 mthca_err(dev
, "RQ %06x full (%u head, %u tail,"
2119 " %d max, %d nreq)\n", qp
->qpn
,
2120 qp
->rq
.head
, qp
->rq
.tail
,
2127 wqe
= get_recv_wqe(qp
, ind
);
2129 ((struct mthca_next_seg
*) wqe
)->flags
= 0;
2131 wqe
+= sizeof (struct mthca_next_seg
);
2133 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
2139 for (i
= 0; i
< wr
->num_sge
; ++i
) {
2140 ((struct mthca_data_seg
*) wqe
)->byte_count
=
2141 cpu_to_be32(wr
->sg_list
[i
].length
);
2142 ((struct mthca_data_seg
*) wqe
)->lkey
=
2143 cpu_to_be32(wr
->sg_list
[i
].lkey
);
2144 ((struct mthca_data_seg
*) wqe
)->addr
=
2145 cpu_to_be64(wr
->sg_list
[i
].addr
);
2146 wqe
+= sizeof (struct mthca_data_seg
);
2149 if (i
< qp
->rq
.max_gs
) {
2150 ((struct mthca_data_seg
*) wqe
)->byte_count
= 0;
2151 ((struct mthca_data_seg
*) wqe
)->lkey
= cpu_to_be32(MTHCA_INVAL_LKEY
);
2152 ((struct mthca_data_seg
*) wqe
)->addr
= 0;
2155 qp
->wrid
[ind
] = wr
->wr_id
;
2158 if (unlikely(ind
>= qp
->rq
.max
))
2163 qp
->rq
.head
+= nreq
;
2166 * Make sure that descriptors are written before
2170 *qp
->rq
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
2173 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
2177 int mthca_free_err_wqe(struct mthca_dev
*dev
, struct mthca_qp
*qp
, int is_send
,
2178 int index
, int *dbd
, __be32
*new_wqe
)
2180 struct mthca_next_seg
*next
;
2183 next
= get_send_wqe(qp
, index
);
2185 next
= get_recv_wqe(qp
, index
);
2187 if (mthca_is_memfree(dev
))
2190 *dbd
= !!(next
->ee_nds
& cpu_to_be32(MTHCA_NEXT_DBD
));
2191 if (next
->ee_nds
& cpu_to_be32(0x3f))
2192 *new_wqe
= (next
->nda_op
& cpu_to_be32(~0x3f)) |
2193 (next
->ee_nds
& cpu_to_be32(0x3f));
2200 int __devinit
mthca_init_qp_table(struct mthca_dev
*dev
)
2206 spin_lock_init(&dev
->qp_table
.lock
);
2209 * We reserve 2 extra QPs per port for the special QPs. The
2210 * special QP for port 1 has to be even, so round up.
2212 dev
->qp_table
.sqp_start
= (dev
->limits
.reserved_qps
+ 1) & ~1UL;
2213 err
= mthca_alloc_init(&dev
->qp_table
.alloc
,
2214 dev
->limits
.num_qps
,
2216 dev
->qp_table
.sqp_start
+
2217 MTHCA_MAX_PORTS
* 2);
2221 err
= mthca_array_init(&dev
->qp_table
.qp
,
2222 dev
->limits
.num_qps
);
2224 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);
2228 for (i
= 0; i
< 2; ++i
) {
2229 err
= mthca_CONF_SPECIAL_QP(dev
, i
? IB_QPT_GSI
: IB_QPT_SMI
,
2230 dev
->qp_table
.sqp_start
+ i
* 2,
2235 mthca_warn(dev
, "CONF_SPECIAL_QP returned "
2236 "status %02x, aborting.\n",
2245 for (i
= 0; i
< 2; ++i
)
2246 mthca_CONF_SPECIAL_QP(dev
, i
, 0, &status
);
2248 mthca_array_cleanup(&dev
->qp_table
.qp
, dev
->limits
.num_qps
);
2249 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);
2254 void __devexit
mthca_cleanup_qp_table(struct mthca_dev
*dev
)
2259 for (i
= 0; i
< 2; ++i
)
2260 mthca_CONF_SPECIAL_QP(dev
, i
, 0, &status
);
2262 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);