2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37 #include <linux/jhash.h>
38 #include <rdma/rdma_vt.h>
39 #ifdef CONFIG_DEBUG_FS
40 #include <linux/seq_file.h>
45 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
46 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
48 static inline unsigned mk_qpn(struct qib_qpn_table
*qpt
,
49 struct qpn_map
*map
, unsigned off
)
51 return (map
- qpt
->map
) * BITS_PER_PAGE
+ off
;
54 static inline unsigned find_next_offset(struct qib_qpn_table
*qpt
,
55 struct qpn_map
*map
, unsigned off
,
60 if (((off
& qpt
->mask
) >> 1) >= n
)
61 off
= (off
| qpt
->mask
) + 2;
63 off
= find_next_zero_bit(map
->page
, BITS_PER_PAGE
, off
);
68 * Convert the AETH credit code into the number of credits.
70 static u32 credit_table
[31] = {
104 static void get_map_page(struct qib_qpn_table
*qpt
, struct qpn_map
*map
,
107 unsigned long page
= get_zeroed_page(gfp
);
110 * Free the page if someone raced with us installing it.
113 spin_lock(&qpt
->lock
);
117 map
->page
= (void *)page
;
118 spin_unlock(&qpt
->lock
);
122 * Allocate the next available QPN or
123 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
125 static int alloc_qpn(struct qib_devdata
*dd
, struct qib_qpn_table
*qpt
,
126 enum ib_qp_type type
, u8 port
, gfp_t gfp
)
128 u32 i
, offset
, max_scan
, qpn
;
132 if (type
== IB_QPT_SMI
|| type
== IB_QPT_GSI
) {
135 ret
= type
== IB_QPT_GSI
;
136 n
= 1 << (ret
+ 2 * (port
- 1));
137 spin_lock(&qpt
->lock
);
142 spin_unlock(&qpt
->lock
);
149 if (qpt
->mask
&& ((qpn
& qpt
->mask
) >> 1) >= dd
->n_krcv_queues
)
150 qpn
= (qpn
| qpt
->mask
) + 2;
151 offset
= qpn
& BITS_PER_PAGE_MASK
;
152 map
= &qpt
->map
[qpn
/ BITS_PER_PAGE
];
153 max_scan
= qpt
->nmaps
- !offset
;
155 if (unlikely(!map
->page
)) {
156 get_map_page(qpt
, map
, gfp
);
157 if (unlikely(!map
->page
))
161 if (!test_and_set_bit(offset
, map
->page
)) {
166 offset
= find_next_offset(qpt
, map
, offset
,
168 qpn
= mk_qpn(qpt
, map
, offset
);
170 * This test differs from alloc_pidmap().
171 * If find_next_offset() does find a zero
172 * bit, we don't need to check for QPN
173 * wrapping around past our starting QPN.
174 * We just need to be sure we don't loop
177 } while (offset
< BITS_PER_PAGE
&& qpn
< QPN_MAX
);
179 * In order to keep the number of pages allocated to a
180 * minimum, we scan the all existing pages before increasing
181 * the size of the bitmap table.
183 if (++i
> max_scan
) {
184 if (qpt
->nmaps
== QPNMAP_ENTRIES
)
186 map
= &qpt
->map
[qpt
->nmaps
++];
188 } else if (map
< &qpt
->map
[qpt
->nmaps
]) {
195 qpn
= mk_qpn(qpt
, map
, offset
);
204 static void free_qpn(struct qib_qpn_table
*qpt
, u32 qpn
)
208 map
= qpt
->map
+ qpn
/ BITS_PER_PAGE
;
210 clear_bit(qpn
& BITS_PER_PAGE_MASK
, map
->page
);
213 static inline unsigned qpn_hash(struct qib_ibdev
*dev
, u32 qpn
)
215 return jhash_1word(qpn
, dev
->qp_rnd
) &
216 (dev
->qp_table_size
- 1);
221 * Put the QP into the hash table.
222 * The hash table holds a reference to the QP.
224 static void insert_qp(struct qib_ibdev
*dev
, struct rvt_qp
*qp
)
226 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
228 unsigned n
= qpn_hash(dev
, qp
->ibqp
.qp_num
);
230 atomic_inc(&qp
->refcount
);
231 spin_lock_irqsave(&dev
->qpt_lock
, flags
);
233 if (qp
->ibqp
.qp_num
== 0)
234 rcu_assign_pointer(ibp
->qp0
, qp
);
235 else if (qp
->ibqp
.qp_num
== 1)
236 rcu_assign_pointer(ibp
->qp1
, qp
);
238 qp
->next
= dev
->qp_table
[n
];
239 rcu_assign_pointer(dev
->qp_table
[n
], qp
);
242 spin_unlock_irqrestore(&dev
->qpt_lock
, flags
);
246 * Remove the QP from the table so it can't be found asynchronously by
247 * the receive interrupt routine.
249 static void remove_qp(struct qib_ibdev
*dev
, struct rvt_qp
*qp
)
251 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
252 unsigned n
= qpn_hash(dev
, qp
->ibqp
.qp_num
);
256 spin_lock_irqsave(&dev
->qpt_lock
, flags
);
258 if (rcu_dereference_protected(ibp
->qp0
,
259 lockdep_is_held(&dev
->qpt_lock
)) == qp
) {
260 RCU_INIT_POINTER(ibp
->qp0
, NULL
);
261 } else if (rcu_dereference_protected(ibp
->qp1
,
262 lockdep_is_held(&dev
->qpt_lock
)) == qp
) {
263 RCU_INIT_POINTER(ibp
->qp1
, NULL
);
266 struct rvt_qp __rcu
**qpp
;
269 qpp
= &dev
->qp_table
[n
];
270 for (; (q
= rcu_dereference_protected(*qpp
,
271 lockdep_is_held(&dev
->qpt_lock
))) != NULL
;
274 RCU_INIT_POINTER(*qpp
,
275 rcu_dereference_protected(qp
->next
,
276 lockdep_is_held(&dev
->qpt_lock
)));
282 spin_unlock_irqrestore(&dev
->qpt_lock
, flags
);
285 atomic_dec(&qp
->refcount
);
290 * qib_free_all_qps - check for QPs still in use
291 * @qpt: the QP table to empty
293 * There should not be any QPs still in use.
294 * Free memory for table.
296 unsigned qib_free_all_qps(struct qib_devdata
*dd
)
298 struct qib_ibdev
*dev
= &dd
->verbs_dev
;
301 unsigned n
, qp_inuse
= 0;
303 for (n
= 0; n
< dd
->num_pports
; n
++) {
304 struct qib_ibport
*ibp
= &dd
->pport
[n
].ibport_data
;
306 if (!qib_mcast_tree_empty(ibp
))
309 if (rcu_dereference(ibp
->qp0
))
311 if (rcu_dereference(ibp
->qp1
))
316 spin_lock_irqsave(&dev
->qpt_lock
, flags
);
317 for (n
= 0; n
< dev
->qp_table_size
; n
++) {
318 qp
= rcu_dereference_protected(dev
->qp_table
[n
],
319 lockdep_is_held(&dev
->qpt_lock
));
320 RCU_INIT_POINTER(dev
->qp_table
[n
], NULL
);
322 for (; qp
; qp
= rcu_dereference_protected(qp
->next
,
323 lockdep_is_held(&dev
->qpt_lock
)))
326 spin_unlock_irqrestore(&dev
->qpt_lock
, flags
);
333 * qib_lookup_qpn - return the QP with the given QPN
335 * @qpn: the QP number to look up
337 * The caller is responsible for decrementing the QP reference count
340 struct rvt_qp
*qib_lookup_qpn(struct qib_ibport
*ibp
, u32 qpn
)
342 struct rvt_qp
*qp
= NULL
;
345 if (unlikely(qpn
<= 1)) {
347 qp
= rcu_dereference(ibp
->qp0
);
349 qp
= rcu_dereference(ibp
->qp1
);
351 atomic_inc(&qp
->refcount
);
353 struct qib_ibdev
*dev
= &ppd_from_ibp(ibp
)->dd
->verbs_dev
;
354 unsigned n
= qpn_hash(dev
, qpn
);
356 for (qp
= rcu_dereference(dev
->qp_table
[n
]); qp
;
357 qp
= rcu_dereference(qp
->next
))
358 if (qp
->ibqp
.qp_num
== qpn
) {
359 atomic_inc(&qp
->refcount
);
368 * qib_reset_qp - initialize the QP state to the reset state
369 * @qp: the QP to reset
372 static void qib_reset_qp(struct rvt_qp
*qp
, enum ib_qp_type type
)
374 struct qib_qp_priv
*priv
= qp
->priv
;
377 qp
->qp_access_flags
= 0;
378 atomic_set(&priv
->s_dma_busy
, 0);
379 qp
->s_flags
&= QIB_S_SIGNAL_REQ_WR
;
385 qp
->s_sending_psn
= 0;
386 qp
->s_sending_hpsn
= 0;
390 if (type
== IB_QPT_RC
) {
391 qp
->s_state
= IB_OPCODE_RC_SEND_LAST
;
392 qp
->r_state
= IB_OPCODE_RC_SEND_LAST
;
394 qp
->s_state
= IB_OPCODE_UC_SEND_LAST
;
395 qp
->r_state
= IB_OPCODE_UC_SEND_LAST
;
397 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
408 qp
->s_mig_state
= IB_MIG_MIGRATED
;
409 memset(qp
->s_ack_queue
, 0, sizeof(qp
->s_ack_queue
));
410 qp
->r_head_ack_queue
= 0;
411 qp
->s_tail_ack_queue
= 0;
412 qp
->s_num_rd_atomic
= 0;
414 qp
->r_rq
.wq
->head
= 0;
415 qp
->r_rq
.wq
->tail
= 0;
417 qp
->r_sge
.num_sge
= 0;
420 static void clear_mr_refs(struct rvt_qp
*qp
, int clr_sends
)
424 if (test_and_clear_bit(QIB_R_REWIND_SGE
, &qp
->r_aflags
))
425 qib_put_ss(&qp
->s_rdma_read_sge
);
427 qib_put_ss(&qp
->r_sge
);
430 while (qp
->s_last
!= qp
->s_head
) {
431 struct rvt_swqe
*wqe
= get_swqe_ptr(qp
, qp
->s_last
);
434 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
435 struct rvt_sge
*sge
= &wqe
->sg_list
[i
];
439 if (qp
->ibqp
.qp_type
== IB_QPT_UD
||
440 qp
->ibqp
.qp_type
== IB_QPT_SMI
||
441 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
443 &ibah_to_rvtah(wqe
->ud_wr
.ah
)->refcount
);
444 if (++qp
->s_last
>= qp
->s_size
)
448 rvt_put_mr(qp
->s_rdma_mr
);
449 qp
->s_rdma_mr
= NULL
;
453 if (qp
->ibqp
.qp_type
!= IB_QPT_RC
)
456 for (n
= 0; n
< ARRAY_SIZE(qp
->s_ack_queue
); n
++) {
457 struct rvt_ack_entry
*e
= &qp
->s_ack_queue
[n
];
459 if (e
->opcode
== IB_OPCODE_RC_RDMA_READ_REQUEST
&&
461 rvt_put_mr(e
->rdma_sge
.mr
);
462 e
->rdma_sge
.mr
= NULL
;
468 * qib_error_qp - put a QP into the error state
469 * @qp: the QP to put into the error state
470 * @err: the receive completion error to signal if a RWQE is active
472 * Flushes both send and receive work queues.
473 * Returns true if last WQE event should be generated.
474 * The QP r_lock and s_lock should be held and interrupts disabled.
475 * If we are already in error state, just return.
477 int qib_error_qp(struct rvt_qp
*qp
, enum ib_wc_status err
)
479 struct qib_qp_priv
*priv
= qp
->priv
;
480 struct qib_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
484 if (qp
->state
== IB_QPS_ERR
|| qp
->state
== IB_QPS_RESET
)
487 qp
->state
= IB_QPS_ERR
;
489 if (qp
->s_flags
& (QIB_S_TIMER
| QIB_S_WAIT_RNR
)) {
490 qp
->s_flags
&= ~(QIB_S_TIMER
| QIB_S_WAIT_RNR
);
491 del_timer(&qp
->s_timer
);
494 if (qp
->s_flags
& QIB_S_ANY_WAIT_SEND
)
495 qp
->s_flags
&= ~QIB_S_ANY_WAIT_SEND
;
497 spin_lock(&dev
->pending_lock
);
498 if (!list_empty(&priv
->iowait
) && !(qp
->s_flags
& QIB_S_BUSY
)) {
499 qp
->s_flags
&= ~QIB_S_ANY_WAIT_IO
;
500 list_del_init(&priv
->iowait
);
502 spin_unlock(&dev
->pending_lock
);
504 if (!(qp
->s_flags
& QIB_S_BUSY
)) {
507 rvt_put_mr(qp
->s_rdma_mr
);
508 qp
->s_rdma_mr
= NULL
;
511 qib_put_txreq(priv
->s_tx
);
516 /* Schedule the sending tasklet to drain the send work queue. */
517 if (qp
->s_last
!= qp
->s_head
)
518 qib_schedule_send(qp
);
520 clear_mr_refs(qp
, 0);
522 memset(&wc
, 0, sizeof(wc
));
524 wc
.opcode
= IB_WC_RECV
;
526 if (test_and_clear_bit(QIB_R_WRID_VALID
, &qp
->r_aflags
)) {
527 wc
.wr_id
= qp
->r_wr_id
;
529 qib_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
531 wc
.status
= IB_WC_WR_FLUSH_ERR
;
538 spin_lock(&qp
->r_rq
.lock
);
540 /* sanity check pointers before trusting them */
543 if (head
>= qp
->r_rq
.size
)
546 if (tail
>= qp
->r_rq
.size
)
548 while (tail
!= head
) {
549 wc
.wr_id
= get_rwqe_ptr(&qp
->r_rq
, tail
)->wr_id
;
550 if (++tail
>= qp
->r_rq
.size
)
552 qib_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
556 spin_unlock(&qp
->r_rq
.lock
);
557 } else if (qp
->ibqp
.event_handler
)
565 * qib_modify_qp - modify the attributes of a queue pair
566 * @ibqp: the queue pair who's attributes we're modifying
567 * @attr: the new attributes
568 * @attr_mask: the mask of attributes to modify
569 * @udata: user data for libibverbs.so
571 * Returns 0 on success, otherwise returns an errno.
573 int qib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
574 int attr_mask
, struct ib_udata
*udata
)
576 struct qib_ibdev
*dev
= to_idev(ibqp
->device
);
577 struct rvt_qp
*qp
= to_iqp(ibqp
);
578 struct qib_qp_priv
*priv
= qp
->priv
;
579 enum ib_qp_state cur_state
, new_state
;
584 u32 pmtu
= 0; /* for gcc warning only */
586 spin_lock_irq(&qp
->r_lock
);
587 spin_lock(&qp
->s_lock
);
589 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
590 attr
->cur_qp_state
: qp
->state
;
591 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
593 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
594 attr_mask
, IB_LINK_LAYER_UNSPECIFIED
))
597 if (attr_mask
& IB_QP_AV
) {
598 if (attr
->ah_attr
.dlid
>= be16_to_cpu(IB_MULTICAST_LID_BASE
))
600 if (rvt_check_ah(qp
->ibqp
.device
, &attr
->ah_attr
))
604 if (attr_mask
& IB_QP_ALT_PATH
) {
605 if (attr
->alt_ah_attr
.dlid
>=
606 be16_to_cpu(IB_MULTICAST_LID_BASE
))
608 if (rvt_check_ah(qp
->ibqp
.device
, &attr
->alt_ah_attr
))
610 if (attr
->alt_pkey_index
>= qib_get_npkeys(dd_from_dev(dev
)))
614 if (attr_mask
& IB_QP_PKEY_INDEX
)
615 if (attr
->pkey_index
>= qib_get_npkeys(dd_from_dev(dev
)))
618 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
619 if (attr
->min_rnr_timer
> 31)
622 if (attr_mask
& IB_QP_PORT
)
623 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
||
624 qp
->ibqp
.qp_type
== IB_QPT_GSI
||
625 attr
->port_num
== 0 ||
626 attr
->port_num
> ibqp
->device
->phys_port_cnt
)
629 if (attr_mask
& IB_QP_DEST_QPN
)
630 if (attr
->dest_qp_num
> QIB_QPN_MASK
)
633 if (attr_mask
& IB_QP_RETRY_CNT
)
634 if (attr
->retry_cnt
> 7)
637 if (attr_mask
& IB_QP_RNR_RETRY
)
638 if (attr
->rnr_retry
> 7)
642 * Don't allow invalid path_mtu values. OK to set greater
643 * than the active mtu (or even the max_cap, if we have tuned
644 * that to a small mtu. We'll set qp->path_mtu
645 * to the lesser of requested attribute mtu and active,
646 * for packetizing messages.
647 * Note that the QP port has to be set in INIT and MTU in RTR.
649 if (attr_mask
& IB_QP_PATH_MTU
) {
650 struct qib_devdata
*dd
= dd_from_dev(dev
);
651 int mtu
, pidx
= qp
->port_num
- 1;
653 mtu
= ib_mtu_enum_to_int(attr
->path_mtu
);
656 if (mtu
> dd
->pport
[pidx
].ibmtu
) {
657 switch (dd
->pport
[pidx
].ibmtu
) {
677 pmtu
= attr
->path_mtu
;
680 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
681 if (attr
->path_mig_state
== IB_MIG_REARM
) {
682 if (qp
->s_mig_state
== IB_MIG_ARMED
)
684 if (new_state
!= IB_QPS_RTS
)
686 } else if (attr
->path_mig_state
== IB_MIG_MIGRATED
) {
687 if (qp
->s_mig_state
== IB_MIG_REARM
)
689 if (new_state
!= IB_QPS_RTS
&& new_state
!= IB_QPS_SQD
)
691 if (qp
->s_mig_state
== IB_MIG_ARMED
)
697 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
698 if (attr
->max_dest_rd_atomic
> QIB_MAX_RDMA_ATOMIC
)
703 if (qp
->state
!= IB_QPS_RESET
) {
704 qp
->state
= IB_QPS_RESET
;
705 spin_lock(&dev
->pending_lock
);
706 if (!list_empty(&priv
->iowait
))
707 list_del_init(&priv
->iowait
);
708 spin_unlock(&dev
->pending_lock
);
709 qp
->s_flags
&= ~(QIB_S_TIMER
| QIB_S_ANY_WAIT
);
710 spin_unlock(&qp
->s_lock
);
711 spin_unlock_irq(&qp
->r_lock
);
712 /* Stop the sending work queue and retry timer */
713 cancel_work_sync(&priv
->s_work
);
714 del_timer_sync(&qp
->s_timer
);
715 wait_event(priv
->wait_dma
,
716 !atomic_read(&priv
->s_dma_busy
));
718 qib_put_txreq(priv
->s_tx
);
722 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
723 spin_lock_irq(&qp
->r_lock
);
724 spin_lock(&qp
->s_lock
);
725 clear_mr_refs(qp
, 1);
726 qib_reset_qp(qp
, ibqp
->qp_type
);
731 /* Allow event to retrigger if QP set to RTR more than once */
732 qp
->r_flags
&= ~QIB_R_COMM_EST
;
733 qp
->state
= new_state
;
737 qp
->s_draining
= qp
->s_last
!= qp
->s_cur
;
738 qp
->state
= new_state
;
742 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
744 qp
->state
= new_state
;
748 lastwqe
= qib_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
752 qp
->state
= new_state
;
756 if (attr_mask
& IB_QP_PKEY_INDEX
)
757 qp
->s_pkey_index
= attr
->pkey_index
;
759 if (attr_mask
& IB_QP_PORT
)
760 qp
->port_num
= attr
->port_num
;
762 if (attr_mask
& IB_QP_DEST_QPN
)
763 qp
->remote_qpn
= attr
->dest_qp_num
;
765 if (attr_mask
& IB_QP_SQ_PSN
) {
766 qp
->s_next_psn
= attr
->sq_psn
& QIB_PSN_MASK
;
767 qp
->s_psn
= qp
->s_next_psn
;
768 qp
->s_sending_psn
= qp
->s_next_psn
;
769 qp
->s_last_psn
= qp
->s_next_psn
- 1;
770 qp
->s_sending_hpsn
= qp
->s_last_psn
;
773 if (attr_mask
& IB_QP_RQ_PSN
)
774 qp
->r_psn
= attr
->rq_psn
& QIB_PSN_MASK
;
776 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
777 qp
->qp_access_flags
= attr
->qp_access_flags
;
779 if (attr_mask
& IB_QP_AV
) {
780 qp
->remote_ah_attr
= attr
->ah_attr
;
781 qp
->s_srate
= attr
->ah_attr
.static_rate
;
784 if (attr_mask
& IB_QP_ALT_PATH
) {
785 qp
->alt_ah_attr
= attr
->alt_ah_attr
;
786 qp
->s_alt_pkey_index
= attr
->alt_pkey_index
;
789 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
790 qp
->s_mig_state
= attr
->path_mig_state
;
792 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
793 qp
->port_num
= qp
->alt_ah_attr
.port_num
;
794 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
798 if (attr_mask
& IB_QP_PATH_MTU
) {
800 qp
->pmtu
= ib_mtu_enum_to_int(pmtu
);
803 if (attr_mask
& IB_QP_RETRY_CNT
) {
804 qp
->s_retry_cnt
= attr
->retry_cnt
;
805 qp
->s_retry
= attr
->retry_cnt
;
808 if (attr_mask
& IB_QP_RNR_RETRY
) {
809 qp
->s_rnr_retry_cnt
= attr
->rnr_retry
;
810 qp
->s_rnr_retry
= attr
->rnr_retry
;
813 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
814 qp
->r_min_rnr_timer
= attr
->min_rnr_timer
;
816 if (attr_mask
& IB_QP_TIMEOUT
) {
817 qp
->timeout
= attr
->timeout
;
818 qp
->timeout_jiffies
=
819 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
823 if (attr_mask
& IB_QP_QKEY
)
824 qp
->qkey
= attr
->qkey
;
826 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
827 qp
->r_max_rd_atomic
= attr
->max_dest_rd_atomic
;
829 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
)
830 qp
->s_max_rd_atomic
= attr
->max_rd_atomic
;
832 spin_unlock(&qp
->s_lock
);
833 spin_unlock_irq(&qp
->r_lock
);
835 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
839 ev
.device
= qp
->ibqp
.device
;
840 ev
.element
.qp
= &qp
->ibqp
;
841 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
842 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
845 ev
.device
= qp
->ibqp
.device
;
846 ev
.element
.qp
= &qp
->ibqp
;
847 ev
.event
= IB_EVENT_PATH_MIG
;
848 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
854 spin_unlock(&qp
->s_lock
);
855 spin_unlock_irq(&qp
->r_lock
);
862 int qib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
863 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
865 struct rvt_qp
*qp
= to_iqp(ibqp
);
867 attr
->qp_state
= qp
->state
;
868 attr
->cur_qp_state
= attr
->qp_state
;
869 attr
->path_mtu
= qp
->path_mtu
;
870 attr
->path_mig_state
= qp
->s_mig_state
;
871 attr
->qkey
= qp
->qkey
;
872 attr
->rq_psn
= qp
->r_psn
& QIB_PSN_MASK
;
873 attr
->sq_psn
= qp
->s_next_psn
& QIB_PSN_MASK
;
874 attr
->dest_qp_num
= qp
->remote_qpn
;
875 attr
->qp_access_flags
= qp
->qp_access_flags
;
876 attr
->cap
.max_send_wr
= qp
->s_size
- 1;
877 attr
->cap
.max_recv_wr
= qp
->ibqp
.srq
? 0 : qp
->r_rq
.size
- 1;
878 attr
->cap
.max_send_sge
= qp
->s_max_sge
;
879 attr
->cap
.max_recv_sge
= qp
->r_rq
.max_sge
;
880 attr
->cap
.max_inline_data
= 0;
881 attr
->ah_attr
= qp
->remote_ah_attr
;
882 attr
->alt_ah_attr
= qp
->alt_ah_attr
;
883 attr
->pkey_index
= qp
->s_pkey_index
;
884 attr
->alt_pkey_index
= qp
->s_alt_pkey_index
;
885 attr
->en_sqd_async_notify
= 0;
886 attr
->sq_draining
= qp
->s_draining
;
887 attr
->max_rd_atomic
= qp
->s_max_rd_atomic
;
888 attr
->max_dest_rd_atomic
= qp
->r_max_rd_atomic
;
889 attr
->min_rnr_timer
= qp
->r_min_rnr_timer
;
890 attr
->port_num
= qp
->port_num
;
891 attr
->timeout
= qp
->timeout
;
892 attr
->retry_cnt
= qp
->s_retry_cnt
;
893 attr
->rnr_retry
= qp
->s_rnr_retry_cnt
;
894 attr
->alt_port_num
= qp
->alt_ah_attr
.port_num
;
895 attr
->alt_timeout
= qp
->alt_timeout
;
897 init_attr
->event_handler
= qp
->ibqp
.event_handler
;
898 init_attr
->qp_context
= qp
->ibqp
.qp_context
;
899 init_attr
->send_cq
= qp
->ibqp
.send_cq
;
900 init_attr
->recv_cq
= qp
->ibqp
.recv_cq
;
901 init_attr
->srq
= qp
->ibqp
.srq
;
902 init_attr
->cap
= attr
->cap
;
903 if (qp
->s_flags
& QIB_S_SIGNAL_REQ_WR
)
904 init_attr
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
906 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
907 init_attr
->qp_type
= qp
->ibqp
.qp_type
;
908 init_attr
->port_num
= qp
->port_num
;
913 * qib_compute_aeth - compute the AETH (syndrome + MSN)
914 * @qp: the queue pair to compute the AETH for
918 __be32
qib_compute_aeth(struct rvt_qp
*qp
)
920 u32 aeth
= qp
->r_msn
& QIB_MSN_MASK
;
924 * Shared receive queues don't generate credits.
925 * Set the credit field to the invalid value.
927 aeth
|= QIB_AETH_CREDIT_INVAL
<< QIB_AETH_CREDIT_SHIFT
;
931 struct rvt_rwq
*wq
= qp
->r_rq
.wq
;
935 /* sanity check pointers before trusting them */
937 if (head
>= qp
->r_rq
.size
)
940 if (tail
>= qp
->r_rq
.size
)
943 * Compute the number of credits available (RWQEs).
944 * XXX Not holding the r_rq.lock here so there is a small
945 * chance that the pair of reads are not atomic.
947 credits
= head
- tail
;
948 if ((int)credits
< 0)
949 credits
+= qp
->r_rq
.size
;
951 * Binary search the credit table to find the code to
958 if (credit_table
[x
] == credits
)
960 if (credit_table
[x
] > credits
)
967 aeth
|= x
<< QIB_AETH_CREDIT_SHIFT
;
969 return cpu_to_be32(aeth
);
973 * qib_create_qp - create a queue pair for a device
974 * @ibpd: the protection domain who's device we create the queue pair for
975 * @init_attr: the attributes of the queue pair
976 * @udata: user data for libibverbs.so
978 * Returns the queue pair on success, otherwise returns an errno.
980 * Called by the ib_create_qp() core verbs function.
982 struct ib_qp
*qib_create_qp(struct ib_pd
*ibpd
,
983 struct ib_qp_init_attr
*init_attr
,
984 struct ib_udata
*udata
)
988 struct rvt_swqe
*swq
= NULL
;
989 struct qib_ibdev
*dev
;
990 struct qib_devdata
*dd
;
995 struct qib_qp_priv
*priv
;
997 if (init_attr
->cap
.max_send_sge
> ib_qib_max_sges
||
998 init_attr
->cap
.max_send_wr
> ib_qib_max_qp_wrs
||
999 init_attr
->create_flags
& ~(IB_QP_CREATE_USE_GFP_NOIO
))
1000 return ERR_PTR(-EINVAL
);
1002 /* GFP_NOIO is applicable in RC QPs only */
1003 if (init_attr
->create_flags
& IB_QP_CREATE_USE_GFP_NOIO
&&
1004 init_attr
->qp_type
!= IB_QPT_RC
)
1005 return ERR_PTR(-EINVAL
);
1007 gfp
= init_attr
->create_flags
& IB_QP_CREATE_USE_GFP_NOIO
?
1008 GFP_NOIO
: GFP_KERNEL
;
1010 /* Check receive queue parameters if no SRQ is specified. */
1011 if (!init_attr
->srq
) {
1012 if (init_attr
->cap
.max_recv_sge
> ib_qib_max_sges
||
1013 init_attr
->cap
.max_recv_wr
> ib_qib_max_qp_wrs
) {
1014 ret
= ERR_PTR(-EINVAL
);
1017 if (init_attr
->cap
.max_send_sge
+
1018 init_attr
->cap
.max_send_wr
+
1019 init_attr
->cap
.max_recv_sge
+
1020 init_attr
->cap
.max_recv_wr
== 0) {
1021 ret
= ERR_PTR(-EINVAL
);
1026 switch (init_attr
->qp_type
) {
1029 if (init_attr
->port_num
== 0 ||
1030 init_attr
->port_num
> ibpd
->device
->phys_port_cnt
) {
1031 ret
= ERR_PTR(-EINVAL
);
1037 sz
= sizeof(struct rvt_sge
) *
1038 init_attr
->cap
.max_send_sge
+
1039 sizeof(struct rvt_swqe
);
1040 swq
= __vmalloc((init_attr
->cap
.max_send_wr
+ 1) * sz
,
1043 ret
= ERR_PTR(-ENOMEM
);
1048 if (init_attr
->srq
) {
1049 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(init_attr
->srq
);
1051 if (srq
->rq
.max_sge
> 1)
1052 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1053 (srq
->rq
.max_sge
- 1);
1054 } else if (init_attr
->cap
.max_recv_sge
> 1)
1055 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1056 (init_attr
->cap
.max_recv_sge
- 1);
1057 qp
= kzalloc(sz
+ sg_list_sz
, gfp
);
1059 ret
= ERR_PTR(-ENOMEM
);
1062 RCU_INIT_POINTER(qp
->next
, NULL
);
1063 priv
= kzalloc(sizeof(*priv
), gfp
);
1065 ret
= ERR_PTR(-ENOMEM
);
1069 priv
->s_hdr
= kzalloc(sizeof(*priv
->s_hdr
), gfp
);
1071 ret
= ERR_PTR(-ENOMEM
);
1075 qp
->timeout_jiffies
=
1076 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
1081 qp
->r_rq
.size
= init_attr
->cap
.max_recv_wr
+ 1;
1082 qp
->r_rq
.max_sge
= init_attr
->cap
.max_recv_sge
;
1083 sz
= (sizeof(struct ib_sge
) * qp
->r_rq
.max_sge
) +
1084 sizeof(struct rvt_rwqe
);
1085 if (gfp
!= GFP_NOIO
)
1086 qp
->r_rq
.wq
= vmalloc_user(
1087 sizeof(struct rvt_rwq
) +
1088 qp
->r_rq
.size
* sz
);
1090 qp
->r_rq
.wq
= __vmalloc(
1091 sizeof(struct rvt_rwq
) +
1096 ret
= ERR_PTR(-ENOMEM
);
1102 * ib_create_qp() will initialize qp->ibqp
1103 * except for qp->ibqp.qp_num.
1105 spin_lock_init(&qp
->r_lock
);
1106 spin_lock_init(&qp
->s_lock
);
1107 spin_lock_init(&qp
->r_rq
.lock
);
1108 atomic_set(&qp
->refcount
, 0);
1109 init_waitqueue_head(&qp
->wait
);
1110 init_waitqueue_head(&priv
->wait_dma
);
1111 init_timer(&qp
->s_timer
);
1112 qp
->s_timer
.data
= (unsigned long)qp
;
1113 INIT_WORK(&priv
->s_work
, qib_do_send
);
1114 INIT_LIST_HEAD(&priv
->iowait
);
1115 INIT_LIST_HEAD(&qp
->rspwait
);
1116 qp
->state
= IB_QPS_RESET
;
1118 qp
->s_size
= init_attr
->cap
.max_send_wr
+ 1;
1119 qp
->s_max_sge
= init_attr
->cap
.max_send_sge
;
1120 if (init_attr
->sq_sig_type
== IB_SIGNAL_REQ_WR
)
1121 qp
->s_flags
= QIB_S_SIGNAL_REQ_WR
;
1122 dev
= to_idev(ibpd
->device
);
1123 dd
= dd_from_dev(dev
);
1124 err
= alloc_qpn(dd
, &dev
->qpn_table
, init_attr
->qp_type
,
1125 init_attr
->port_num
, gfp
);
1131 qp
->ibqp
.qp_num
= err
;
1132 qp
->port_num
= init_attr
->port_num
;
1133 qib_reset_qp(qp
, init_attr
->qp_type
);
1137 /* Don't support raw QPs */
1138 ret
= ERR_PTR(-ENOSYS
);
1142 init_attr
->cap
.max_inline_data
= 0;
1145 * Return the address of the RWQ as the offset to mmap.
1146 * See qib_mmap() for details.
1148 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
1152 err
= ib_copy_to_udata(udata
, &offset
,
1159 u32 s
= sizeof(struct rvt_rwq
) + qp
->r_rq
.size
* sz
;
1161 qp
->ip
= qib_create_mmap_info(dev
, s
,
1162 ibpd
->uobject
->context
,
1165 ret
= ERR_PTR(-ENOMEM
);
1169 err
= ib_copy_to_udata(udata
, &(qp
->ip
->offset
),
1170 sizeof(qp
->ip
->offset
));
1178 spin_lock(&dev
->n_qps_lock
);
1179 if (dev
->n_qps_allocated
== ib_qib_max_qps
) {
1180 spin_unlock(&dev
->n_qps_lock
);
1181 ret
= ERR_PTR(-ENOMEM
);
1185 dev
->n_qps_allocated
++;
1186 spin_unlock(&dev
->n_qps_lock
);
1189 spin_lock_irq(&dev
->pending_lock
);
1190 list_add(&qp
->ip
->pending_mmaps
, &dev
->pending_mmaps
);
1191 spin_unlock_irq(&dev
->pending_lock
);
1199 kref_put(&qp
->ip
->ref
, qib_release_mmap_info
);
1202 free_qpn(&dev
->qpn_table
, qp
->ibqp
.qp_num
);
1215 * qib_destroy_qp - destroy a queue pair
1216 * @ibqp: the queue pair to destroy
1218 * Returns 0 on success.
1220 * Note that this can be called while the QP is actively sending or
1223 int qib_destroy_qp(struct ib_qp
*ibqp
)
1225 struct rvt_qp
*qp
= to_iqp(ibqp
);
1226 struct qib_ibdev
*dev
= to_idev(ibqp
->device
);
1227 struct qib_qp_priv
*priv
= qp
->priv
;
1229 /* Make sure HW and driver activity is stopped. */
1230 spin_lock_irq(&qp
->s_lock
);
1231 if (qp
->state
!= IB_QPS_RESET
) {
1232 qp
->state
= IB_QPS_RESET
;
1233 spin_lock(&dev
->pending_lock
);
1234 if (!list_empty(&priv
->iowait
))
1235 list_del_init(&priv
->iowait
);
1236 spin_unlock(&dev
->pending_lock
);
1237 qp
->s_flags
&= ~(QIB_S_TIMER
| QIB_S_ANY_WAIT
);
1238 spin_unlock_irq(&qp
->s_lock
);
1239 cancel_work_sync(&priv
->s_work
);
1240 del_timer_sync(&qp
->s_timer
);
1241 wait_event(priv
->wait_dma
, !atomic_read(&priv
->s_dma_busy
));
1243 qib_put_txreq(priv
->s_tx
);
1247 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
1248 clear_mr_refs(qp
, 1);
1250 spin_unlock_irq(&qp
->s_lock
);
1252 /* all user's cleaned up, mark it available */
1253 free_qpn(&dev
->qpn_table
, qp
->ibqp
.qp_num
);
1254 spin_lock(&dev
->n_qps_lock
);
1255 dev
->n_qps_allocated
--;
1256 spin_unlock(&dev
->n_qps_lock
);
1259 kref_put(&qp
->ip
->ref
, qib_release_mmap_info
);
1270 * qib_init_qpn_table - initialize the QP number table for a device
1271 * @qpt: the QPN table
1273 void qib_init_qpn_table(struct qib_devdata
*dd
, struct qib_qpn_table
*qpt
)
1275 spin_lock_init(&qpt
->lock
);
1276 qpt
->last
= 1; /* start with QPN 2 */
1278 qpt
->mask
= dd
->qpn_mask
;
1282 * qib_free_qpn_table - free the QP number table for a device
1283 * @qpt: the QPN table
1285 void qib_free_qpn_table(struct qib_qpn_table
*qpt
)
1289 for (i
= 0; i
< ARRAY_SIZE(qpt
->map
); i
++)
1290 if (qpt
->map
[i
].page
)
1291 free_page((unsigned long) qpt
->map
[i
].page
);
1295 * qib_get_credit - flush the send work queue of a QP
1296 * @qp: the qp who's send work queue to flush
1297 * @aeth: the Acknowledge Extended Transport Header
1299 * The QP s_lock should be held.
1301 void qib_get_credit(struct rvt_qp
*qp
, u32 aeth
)
1303 u32 credit
= (aeth
>> QIB_AETH_CREDIT_SHIFT
) & QIB_AETH_CREDIT_MASK
;
1306 * If the credit is invalid, we can send
1307 * as many packets as we like. Otherwise, we have to
1308 * honor the credit field.
1310 if (credit
== QIB_AETH_CREDIT_INVAL
) {
1311 if (!(qp
->s_flags
& QIB_S_UNLIMITED_CREDIT
)) {
1312 qp
->s_flags
|= QIB_S_UNLIMITED_CREDIT
;
1313 if (qp
->s_flags
& QIB_S_WAIT_SSN_CREDIT
) {
1314 qp
->s_flags
&= ~QIB_S_WAIT_SSN_CREDIT
;
1315 qib_schedule_send(qp
);
1318 } else if (!(qp
->s_flags
& QIB_S_UNLIMITED_CREDIT
)) {
1319 /* Compute new LSN (i.e., MSN + credit) */
1320 credit
= (aeth
+ credit_table
[credit
]) & QIB_MSN_MASK
;
1321 if (qib_cmp24(credit
, qp
->s_lsn
) > 0) {
1323 if (qp
->s_flags
& QIB_S_WAIT_SSN_CREDIT
) {
1324 qp
->s_flags
&= ~QIB_S_WAIT_SSN_CREDIT
;
1325 qib_schedule_send(qp
);
1331 #ifdef CONFIG_DEBUG_FS
1333 struct qib_qp_iter
{
1334 struct qib_ibdev
*dev
;
1339 struct qib_qp_iter
*qib_qp_iter_init(struct qib_ibdev
*dev
)
1341 struct qib_qp_iter
*iter
;
1343 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
1348 if (qib_qp_iter_next(iter
)) {
1356 int qib_qp_iter_next(struct qib_qp_iter
*iter
)
1358 struct qib_ibdev
*dev
= iter
->dev
;
1361 struct rvt_qp
*pqp
= iter
->qp
;
1364 for (; n
< dev
->qp_table_size
; n
++) {
1366 qp
= rcu_dereference(pqp
->next
);
1368 qp
= rcu_dereference(dev
->qp_table
[n
]);
1379 static const char * const qp_type_str
[] = {
1380 "SMI", "GSI", "RC", "UC", "UD",
1383 void qib_qp_iter_print(struct seq_file
*s
, struct qib_qp_iter
*iter
)
1385 struct rvt_swqe
*wqe
;
1386 struct rvt_qp
*qp
= iter
->qp
;
1387 struct qib_qp_priv
*priv
= qp
->priv
;
1389 wqe
= get_swqe_ptr(qp
, qp
->s_last
);
1391 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
1394 qp_type_str
[qp
->ibqp
.qp_type
],
1399 atomic_read(&priv
->s_dma_busy
),
1400 !list_empty(&priv
->iowait
),
1405 qp
->s_psn
, qp
->s_next_psn
,
1406 qp
->s_sending_psn
, qp
->s_sending_hpsn
,
1407 qp
->s_last
, qp
->s_acked
, qp
->s_cur
,
1408 qp
->s_tail
, qp
->s_head
, qp
->s_size
,
1410 qp
->remote_ah_attr
.dlid
);