2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37 #include <linux/jhash.h>
38 #ifdef CONFIG_DEBUG_FS
39 #include <linux/seq_file.h>
44 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
45 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
47 static inline unsigned mk_qpn(struct qib_qpn_table
*qpt
,
48 struct qpn_map
*map
, unsigned off
)
50 return (map
- qpt
->map
) * BITS_PER_PAGE
+ off
;
53 static inline unsigned find_next_offset(struct qib_qpn_table
*qpt
,
54 struct qpn_map
*map
, unsigned off
,
59 if (((off
& qpt
->mask
) >> 1) >= n
)
60 off
= (off
| qpt
->mask
) + 2;
62 off
= find_next_zero_bit(map
->page
, BITS_PER_PAGE
, off
);
67 * Convert the AETH credit code into the number of credits.
69 static u32 credit_table
[31] = {
103 static void get_map_page(struct qib_qpn_table
*qpt
, struct qpn_map
*map
,
106 unsigned long page
= get_zeroed_page(gfp
);
109 * Free the page if someone raced with us installing it.
112 spin_lock(&qpt
->lock
);
116 map
->page
= (void *)page
;
117 spin_unlock(&qpt
->lock
);
121 * Allocate the next available QPN or
122 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
124 static int alloc_qpn(struct qib_devdata
*dd
, struct qib_qpn_table
*qpt
,
125 enum ib_qp_type type
, u8 port
, gfp_t gfp
)
127 u32 i
, offset
, max_scan
, qpn
;
131 if (type
== IB_QPT_SMI
|| type
== IB_QPT_GSI
) {
134 ret
= type
== IB_QPT_GSI
;
135 n
= 1 << (ret
+ 2 * (port
- 1));
136 spin_lock(&qpt
->lock
);
141 spin_unlock(&qpt
->lock
);
148 if (qpt
->mask
&& ((qpn
& qpt
->mask
) >> 1) >= dd
->n_krcv_queues
)
149 qpn
= (qpn
| qpt
->mask
) + 2;
150 offset
= qpn
& BITS_PER_PAGE_MASK
;
151 map
= &qpt
->map
[qpn
/ BITS_PER_PAGE
];
152 max_scan
= qpt
->nmaps
- !offset
;
154 if (unlikely(!map
->page
)) {
155 get_map_page(qpt
, map
, gfp
);
156 if (unlikely(!map
->page
))
160 if (!test_and_set_bit(offset
, map
->page
)) {
165 offset
= find_next_offset(qpt
, map
, offset
,
167 qpn
= mk_qpn(qpt
, map
, offset
);
169 * This test differs from alloc_pidmap().
170 * If find_next_offset() does find a zero
171 * bit, we don't need to check for QPN
172 * wrapping around past our starting QPN.
173 * We just need to be sure we don't loop
176 } while (offset
< BITS_PER_PAGE
&& qpn
< QPN_MAX
);
178 * In order to keep the number of pages allocated to a
179 * minimum, we scan the all existing pages before increasing
180 * the size of the bitmap table.
182 if (++i
> max_scan
) {
183 if (qpt
->nmaps
== QPNMAP_ENTRIES
)
185 map
= &qpt
->map
[qpt
->nmaps
++];
187 } else if (map
< &qpt
->map
[qpt
->nmaps
]) {
194 qpn
= mk_qpn(qpt
, map
, offset
);
203 static void free_qpn(struct qib_qpn_table
*qpt
, u32 qpn
)
207 map
= qpt
->map
+ qpn
/ BITS_PER_PAGE
;
209 clear_bit(qpn
& BITS_PER_PAGE_MASK
, map
->page
);
212 static inline unsigned qpn_hash(struct qib_ibdev
*dev
, u32 qpn
)
214 return jhash_1word(qpn
, dev
->qp_rnd
) &
215 (dev
->qp_table_size
- 1);
220 * Put the QP into the hash table.
221 * The hash table holds a reference to the QP.
223 static void insert_qp(struct qib_ibdev
*dev
, struct qib_qp
*qp
)
225 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
227 unsigned n
= qpn_hash(dev
, qp
->ibqp
.qp_num
);
229 atomic_inc(&qp
->refcount
);
230 spin_lock_irqsave(&dev
->qpt_lock
, flags
);
232 if (qp
->ibqp
.qp_num
== 0)
233 rcu_assign_pointer(ibp
->qp0
, qp
);
234 else if (qp
->ibqp
.qp_num
== 1)
235 rcu_assign_pointer(ibp
->qp1
, qp
);
237 qp
->next
= dev
->qp_table
[n
];
238 rcu_assign_pointer(dev
->qp_table
[n
], qp
);
241 spin_unlock_irqrestore(&dev
->qpt_lock
, flags
);
245 * Remove the QP from the table so it can't be found asynchronously by
246 * the receive interrupt routine.
248 static void remove_qp(struct qib_ibdev
*dev
, struct qib_qp
*qp
)
250 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
251 unsigned n
= qpn_hash(dev
, qp
->ibqp
.qp_num
);
255 spin_lock_irqsave(&dev
->qpt_lock
, flags
);
257 if (rcu_dereference_protected(ibp
->qp0
,
258 lockdep_is_held(&dev
->qpt_lock
)) == qp
) {
259 RCU_INIT_POINTER(ibp
->qp0
, NULL
);
260 } else if (rcu_dereference_protected(ibp
->qp1
,
261 lockdep_is_held(&dev
->qpt_lock
)) == qp
) {
262 RCU_INIT_POINTER(ibp
->qp1
, NULL
);
265 struct qib_qp __rcu
**qpp
;
268 qpp
= &dev
->qp_table
[n
];
269 for (; (q
= rcu_dereference_protected(*qpp
,
270 lockdep_is_held(&dev
->qpt_lock
))) != NULL
;
273 RCU_INIT_POINTER(*qpp
,
274 rcu_dereference_protected(qp
->next
,
275 lockdep_is_held(&dev
->qpt_lock
)));
281 spin_unlock_irqrestore(&dev
->qpt_lock
, flags
);
284 atomic_dec(&qp
->refcount
);
289 * qib_free_all_qps - check for QPs still in use
290 * @qpt: the QP table to empty
292 * There should not be any QPs still in use.
293 * Free memory for table.
295 unsigned qib_free_all_qps(struct qib_devdata
*dd
)
297 struct qib_ibdev
*dev
= &dd
->verbs_dev
;
300 unsigned n
, qp_inuse
= 0;
302 for (n
= 0; n
< dd
->num_pports
; n
++) {
303 struct qib_ibport
*ibp
= &dd
->pport
[n
].ibport_data
;
305 if (!qib_mcast_tree_empty(ibp
))
308 if (rcu_dereference(ibp
->qp0
))
310 if (rcu_dereference(ibp
->qp1
))
315 spin_lock_irqsave(&dev
->qpt_lock
, flags
);
316 for (n
= 0; n
< dev
->qp_table_size
; n
++) {
317 qp
= rcu_dereference_protected(dev
->qp_table
[n
],
318 lockdep_is_held(&dev
->qpt_lock
));
319 RCU_INIT_POINTER(dev
->qp_table
[n
], NULL
);
321 for (; qp
; qp
= rcu_dereference_protected(qp
->next
,
322 lockdep_is_held(&dev
->qpt_lock
)))
325 spin_unlock_irqrestore(&dev
->qpt_lock
, flags
);
332 * qib_lookup_qpn - return the QP with the given QPN
334 * @qpn: the QP number to look up
336 * The caller is responsible for decrementing the QP reference count
339 struct qib_qp
*qib_lookup_qpn(struct qib_ibport
*ibp
, u32 qpn
)
341 struct qib_qp
*qp
= NULL
;
344 if (unlikely(qpn
<= 1)) {
346 qp
= rcu_dereference(ibp
->qp0
);
348 qp
= rcu_dereference(ibp
->qp1
);
350 atomic_inc(&qp
->refcount
);
352 struct qib_ibdev
*dev
= &ppd_from_ibp(ibp
)->dd
->verbs_dev
;
353 unsigned n
= qpn_hash(dev
, qpn
);
355 for (qp
= rcu_dereference(dev
->qp_table
[n
]); qp
;
356 qp
= rcu_dereference(qp
->next
))
357 if (qp
->ibqp
.qp_num
== qpn
) {
358 atomic_inc(&qp
->refcount
);
367 * qib_reset_qp - initialize the QP state to the reset state
368 * @qp: the QP to reset
371 static void qib_reset_qp(struct qib_qp
*qp
, enum ib_qp_type type
)
375 qp
->qp_access_flags
= 0;
376 atomic_set(&qp
->s_dma_busy
, 0);
377 qp
->s_flags
&= QIB_S_SIGNAL_REQ_WR
;
383 qp
->s_sending_psn
= 0;
384 qp
->s_sending_hpsn
= 0;
388 if (type
== IB_QPT_RC
) {
389 qp
->s_state
= IB_OPCODE_RC_SEND_LAST
;
390 qp
->r_state
= IB_OPCODE_RC_SEND_LAST
;
392 qp
->s_state
= IB_OPCODE_UC_SEND_LAST
;
393 qp
->r_state
= IB_OPCODE_UC_SEND_LAST
;
395 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
406 qp
->s_mig_state
= IB_MIG_MIGRATED
;
407 memset(qp
->s_ack_queue
, 0, sizeof(qp
->s_ack_queue
));
408 qp
->r_head_ack_queue
= 0;
409 qp
->s_tail_ack_queue
= 0;
410 qp
->s_num_rd_atomic
= 0;
412 qp
->r_rq
.wq
->head
= 0;
413 qp
->r_rq
.wq
->tail
= 0;
415 qp
->r_sge
.num_sge
= 0;
418 static void clear_mr_refs(struct qib_qp
*qp
, int clr_sends
)
422 if (test_and_clear_bit(QIB_R_REWIND_SGE
, &qp
->r_aflags
))
423 qib_put_ss(&qp
->s_rdma_read_sge
);
425 qib_put_ss(&qp
->r_sge
);
428 while (qp
->s_last
!= qp
->s_head
) {
429 struct qib_swqe
*wqe
= get_swqe_ptr(qp
, qp
->s_last
);
432 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
433 struct qib_sge
*sge
= &wqe
->sg_list
[i
];
437 if (qp
->ibqp
.qp_type
== IB_QPT_UD
||
438 qp
->ibqp
.qp_type
== IB_QPT_SMI
||
439 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
440 atomic_dec(&to_iah(wqe
->ud_wr
.ah
)->refcount
);
441 if (++qp
->s_last
>= qp
->s_size
)
445 qib_put_mr(qp
->s_rdma_mr
);
446 qp
->s_rdma_mr
= NULL
;
450 if (qp
->ibqp
.qp_type
!= IB_QPT_RC
)
453 for (n
= 0; n
< ARRAY_SIZE(qp
->s_ack_queue
); n
++) {
454 struct qib_ack_entry
*e
= &qp
->s_ack_queue
[n
];
456 if (e
->opcode
== IB_OPCODE_RC_RDMA_READ_REQUEST
&&
458 qib_put_mr(e
->rdma_sge
.mr
);
459 e
->rdma_sge
.mr
= NULL
;
465 * qib_error_qp - put a QP into the error state
466 * @qp: the QP to put into the error state
467 * @err: the receive completion error to signal if a RWQE is active
469 * Flushes both send and receive work queues.
470 * Returns true if last WQE event should be generated.
471 * The QP r_lock and s_lock should be held and interrupts disabled.
472 * If we are already in error state, just return.
474 int qib_error_qp(struct qib_qp
*qp
, enum ib_wc_status err
)
476 struct qib_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
480 if (qp
->state
== IB_QPS_ERR
|| qp
->state
== IB_QPS_RESET
)
483 qp
->state
= IB_QPS_ERR
;
485 if (qp
->s_flags
& (QIB_S_TIMER
| QIB_S_WAIT_RNR
)) {
486 qp
->s_flags
&= ~(QIB_S_TIMER
| QIB_S_WAIT_RNR
);
487 del_timer(&qp
->s_timer
);
490 if (qp
->s_flags
& QIB_S_ANY_WAIT_SEND
)
491 qp
->s_flags
&= ~QIB_S_ANY_WAIT_SEND
;
493 spin_lock(&dev
->pending_lock
);
494 if (!list_empty(&qp
->iowait
) && !(qp
->s_flags
& QIB_S_BUSY
)) {
495 qp
->s_flags
&= ~QIB_S_ANY_WAIT_IO
;
496 list_del_init(&qp
->iowait
);
498 spin_unlock(&dev
->pending_lock
);
500 if (!(qp
->s_flags
& QIB_S_BUSY
)) {
503 qib_put_mr(qp
->s_rdma_mr
);
504 qp
->s_rdma_mr
= NULL
;
507 qib_put_txreq(qp
->s_tx
);
512 /* Schedule the sending tasklet to drain the send work queue. */
513 if (qp
->s_last
!= qp
->s_head
)
514 qib_schedule_send(qp
);
516 clear_mr_refs(qp
, 0);
518 memset(&wc
, 0, sizeof(wc
));
520 wc
.opcode
= IB_WC_RECV
;
522 if (test_and_clear_bit(QIB_R_WRID_VALID
, &qp
->r_aflags
)) {
523 wc
.wr_id
= qp
->r_wr_id
;
525 qib_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
527 wc
.status
= IB_WC_WR_FLUSH_ERR
;
534 spin_lock(&qp
->r_rq
.lock
);
536 /* sanity check pointers before trusting them */
539 if (head
>= qp
->r_rq
.size
)
542 if (tail
>= qp
->r_rq
.size
)
544 while (tail
!= head
) {
545 wc
.wr_id
= get_rwqe_ptr(&qp
->r_rq
, tail
)->wr_id
;
546 if (++tail
>= qp
->r_rq
.size
)
548 qib_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
552 spin_unlock(&qp
->r_rq
.lock
);
553 } else if (qp
->ibqp
.event_handler
)
561 * qib_modify_qp - modify the attributes of a queue pair
562 * @ibqp: the queue pair who's attributes we're modifying
563 * @attr: the new attributes
564 * @attr_mask: the mask of attributes to modify
565 * @udata: user data for libibverbs.so
567 * Returns 0 on success, otherwise returns an errno.
569 int qib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
570 int attr_mask
, struct ib_udata
*udata
)
572 struct qib_ibdev
*dev
= to_idev(ibqp
->device
);
573 struct qib_qp
*qp
= to_iqp(ibqp
);
574 enum ib_qp_state cur_state
, new_state
;
579 u32 pmtu
= 0; /* for gcc warning only */
581 spin_lock_irq(&qp
->r_lock
);
582 spin_lock(&qp
->s_lock
);
584 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
585 attr
->cur_qp_state
: qp
->state
;
586 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
588 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
589 attr_mask
, IB_LINK_LAYER_UNSPECIFIED
))
592 if (attr_mask
& IB_QP_AV
) {
593 if (attr
->ah_attr
.dlid
>= QIB_MULTICAST_LID_BASE
)
595 if (qib_check_ah(qp
->ibqp
.device
, &attr
->ah_attr
))
599 if (attr_mask
& IB_QP_ALT_PATH
) {
600 if (attr
->alt_ah_attr
.dlid
>= QIB_MULTICAST_LID_BASE
)
602 if (qib_check_ah(qp
->ibqp
.device
, &attr
->alt_ah_attr
))
604 if (attr
->alt_pkey_index
>= qib_get_npkeys(dd_from_dev(dev
)))
608 if (attr_mask
& IB_QP_PKEY_INDEX
)
609 if (attr
->pkey_index
>= qib_get_npkeys(dd_from_dev(dev
)))
612 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
613 if (attr
->min_rnr_timer
> 31)
616 if (attr_mask
& IB_QP_PORT
)
617 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
||
618 qp
->ibqp
.qp_type
== IB_QPT_GSI
||
619 attr
->port_num
== 0 ||
620 attr
->port_num
> ibqp
->device
->phys_port_cnt
)
623 if (attr_mask
& IB_QP_DEST_QPN
)
624 if (attr
->dest_qp_num
> QIB_QPN_MASK
)
627 if (attr_mask
& IB_QP_RETRY_CNT
)
628 if (attr
->retry_cnt
> 7)
631 if (attr_mask
& IB_QP_RNR_RETRY
)
632 if (attr
->rnr_retry
> 7)
636 * Don't allow invalid path_mtu values. OK to set greater
637 * than the active mtu (or even the max_cap, if we have tuned
638 * that to a small mtu. We'll set qp->path_mtu
639 * to the lesser of requested attribute mtu and active,
640 * for packetizing messages.
641 * Note that the QP port has to be set in INIT and MTU in RTR.
643 if (attr_mask
& IB_QP_PATH_MTU
) {
644 struct qib_devdata
*dd
= dd_from_dev(dev
);
645 int mtu
, pidx
= qp
->port_num
- 1;
647 mtu
= ib_mtu_enum_to_int(attr
->path_mtu
);
650 if (mtu
> dd
->pport
[pidx
].ibmtu
) {
651 switch (dd
->pport
[pidx
].ibmtu
) {
671 pmtu
= attr
->path_mtu
;
674 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
675 if (attr
->path_mig_state
== IB_MIG_REARM
) {
676 if (qp
->s_mig_state
== IB_MIG_ARMED
)
678 if (new_state
!= IB_QPS_RTS
)
680 } else if (attr
->path_mig_state
== IB_MIG_MIGRATED
) {
681 if (qp
->s_mig_state
== IB_MIG_REARM
)
683 if (new_state
!= IB_QPS_RTS
&& new_state
!= IB_QPS_SQD
)
685 if (qp
->s_mig_state
== IB_MIG_ARMED
)
691 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
692 if (attr
->max_dest_rd_atomic
> QIB_MAX_RDMA_ATOMIC
)
697 if (qp
->state
!= IB_QPS_RESET
) {
698 qp
->state
= IB_QPS_RESET
;
699 spin_lock(&dev
->pending_lock
);
700 if (!list_empty(&qp
->iowait
))
701 list_del_init(&qp
->iowait
);
702 spin_unlock(&dev
->pending_lock
);
703 qp
->s_flags
&= ~(QIB_S_TIMER
| QIB_S_ANY_WAIT
);
704 spin_unlock(&qp
->s_lock
);
705 spin_unlock_irq(&qp
->r_lock
);
706 /* Stop the sending work queue and retry timer */
707 cancel_work_sync(&qp
->s_work
);
708 del_timer_sync(&qp
->s_timer
);
709 wait_event(qp
->wait_dma
, !atomic_read(&qp
->s_dma_busy
));
711 qib_put_txreq(qp
->s_tx
);
715 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
716 spin_lock_irq(&qp
->r_lock
);
717 spin_lock(&qp
->s_lock
);
718 clear_mr_refs(qp
, 1);
719 qib_reset_qp(qp
, ibqp
->qp_type
);
724 /* Allow event to retrigger if QP set to RTR more than once */
725 qp
->r_flags
&= ~QIB_R_COMM_EST
;
726 qp
->state
= new_state
;
730 qp
->s_draining
= qp
->s_last
!= qp
->s_cur
;
731 qp
->state
= new_state
;
735 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
737 qp
->state
= new_state
;
741 lastwqe
= qib_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
745 qp
->state
= new_state
;
749 if (attr_mask
& IB_QP_PKEY_INDEX
)
750 qp
->s_pkey_index
= attr
->pkey_index
;
752 if (attr_mask
& IB_QP_PORT
)
753 qp
->port_num
= attr
->port_num
;
755 if (attr_mask
& IB_QP_DEST_QPN
)
756 qp
->remote_qpn
= attr
->dest_qp_num
;
758 if (attr_mask
& IB_QP_SQ_PSN
) {
759 qp
->s_next_psn
= attr
->sq_psn
& QIB_PSN_MASK
;
760 qp
->s_psn
= qp
->s_next_psn
;
761 qp
->s_sending_psn
= qp
->s_next_psn
;
762 qp
->s_last_psn
= qp
->s_next_psn
- 1;
763 qp
->s_sending_hpsn
= qp
->s_last_psn
;
766 if (attr_mask
& IB_QP_RQ_PSN
)
767 qp
->r_psn
= attr
->rq_psn
& QIB_PSN_MASK
;
769 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
770 qp
->qp_access_flags
= attr
->qp_access_flags
;
772 if (attr_mask
& IB_QP_AV
) {
773 qp
->remote_ah_attr
= attr
->ah_attr
;
774 qp
->s_srate
= attr
->ah_attr
.static_rate
;
777 if (attr_mask
& IB_QP_ALT_PATH
) {
778 qp
->alt_ah_attr
= attr
->alt_ah_attr
;
779 qp
->s_alt_pkey_index
= attr
->alt_pkey_index
;
782 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
783 qp
->s_mig_state
= attr
->path_mig_state
;
785 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
786 qp
->port_num
= qp
->alt_ah_attr
.port_num
;
787 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
791 if (attr_mask
& IB_QP_PATH_MTU
) {
793 qp
->pmtu
= ib_mtu_enum_to_int(pmtu
);
796 if (attr_mask
& IB_QP_RETRY_CNT
) {
797 qp
->s_retry_cnt
= attr
->retry_cnt
;
798 qp
->s_retry
= attr
->retry_cnt
;
801 if (attr_mask
& IB_QP_RNR_RETRY
) {
802 qp
->s_rnr_retry_cnt
= attr
->rnr_retry
;
803 qp
->s_rnr_retry
= attr
->rnr_retry
;
806 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
807 qp
->r_min_rnr_timer
= attr
->min_rnr_timer
;
809 if (attr_mask
& IB_QP_TIMEOUT
) {
810 qp
->timeout
= attr
->timeout
;
811 qp
->timeout_jiffies
=
812 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
816 if (attr_mask
& IB_QP_QKEY
)
817 qp
->qkey
= attr
->qkey
;
819 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
820 qp
->r_max_rd_atomic
= attr
->max_dest_rd_atomic
;
822 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
)
823 qp
->s_max_rd_atomic
= attr
->max_rd_atomic
;
825 spin_unlock(&qp
->s_lock
);
826 spin_unlock_irq(&qp
->r_lock
);
828 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
832 ev
.device
= qp
->ibqp
.device
;
833 ev
.element
.qp
= &qp
->ibqp
;
834 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
835 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
838 ev
.device
= qp
->ibqp
.device
;
839 ev
.element
.qp
= &qp
->ibqp
;
840 ev
.event
= IB_EVENT_PATH_MIG
;
841 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
847 spin_unlock(&qp
->s_lock
);
848 spin_unlock_irq(&qp
->r_lock
);
855 int qib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
856 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
858 struct qib_qp
*qp
= to_iqp(ibqp
);
860 attr
->qp_state
= qp
->state
;
861 attr
->cur_qp_state
= attr
->qp_state
;
862 attr
->path_mtu
= qp
->path_mtu
;
863 attr
->path_mig_state
= qp
->s_mig_state
;
864 attr
->qkey
= qp
->qkey
;
865 attr
->rq_psn
= qp
->r_psn
& QIB_PSN_MASK
;
866 attr
->sq_psn
= qp
->s_next_psn
& QIB_PSN_MASK
;
867 attr
->dest_qp_num
= qp
->remote_qpn
;
868 attr
->qp_access_flags
= qp
->qp_access_flags
;
869 attr
->cap
.max_send_wr
= qp
->s_size
- 1;
870 attr
->cap
.max_recv_wr
= qp
->ibqp
.srq
? 0 : qp
->r_rq
.size
- 1;
871 attr
->cap
.max_send_sge
= qp
->s_max_sge
;
872 attr
->cap
.max_recv_sge
= qp
->r_rq
.max_sge
;
873 attr
->cap
.max_inline_data
= 0;
874 attr
->ah_attr
= qp
->remote_ah_attr
;
875 attr
->alt_ah_attr
= qp
->alt_ah_attr
;
876 attr
->pkey_index
= qp
->s_pkey_index
;
877 attr
->alt_pkey_index
= qp
->s_alt_pkey_index
;
878 attr
->en_sqd_async_notify
= 0;
879 attr
->sq_draining
= qp
->s_draining
;
880 attr
->max_rd_atomic
= qp
->s_max_rd_atomic
;
881 attr
->max_dest_rd_atomic
= qp
->r_max_rd_atomic
;
882 attr
->min_rnr_timer
= qp
->r_min_rnr_timer
;
883 attr
->port_num
= qp
->port_num
;
884 attr
->timeout
= qp
->timeout
;
885 attr
->retry_cnt
= qp
->s_retry_cnt
;
886 attr
->rnr_retry
= qp
->s_rnr_retry_cnt
;
887 attr
->alt_port_num
= qp
->alt_ah_attr
.port_num
;
888 attr
->alt_timeout
= qp
->alt_timeout
;
890 init_attr
->event_handler
= qp
->ibqp
.event_handler
;
891 init_attr
->qp_context
= qp
->ibqp
.qp_context
;
892 init_attr
->send_cq
= qp
->ibqp
.send_cq
;
893 init_attr
->recv_cq
= qp
->ibqp
.recv_cq
;
894 init_attr
->srq
= qp
->ibqp
.srq
;
895 init_attr
->cap
= attr
->cap
;
896 if (qp
->s_flags
& QIB_S_SIGNAL_REQ_WR
)
897 init_attr
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
899 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
900 init_attr
->qp_type
= qp
->ibqp
.qp_type
;
901 init_attr
->port_num
= qp
->port_num
;
906 * qib_compute_aeth - compute the AETH (syndrome + MSN)
907 * @qp: the queue pair to compute the AETH for
911 __be32
qib_compute_aeth(struct qib_qp
*qp
)
913 u32 aeth
= qp
->r_msn
& QIB_MSN_MASK
;
917 * Shared receive queues don't generate credits.
918 * Set the credit field to the invalid value.
920 aeth
|= QIB_AETH_CREDIT_INVAL
<< QIB_AETH_CREDIT_SHIFT
;
924 struct qib_rwq
*wq
= qp
->r_rq
.wq
;
928 /* sanity check pointers before trusting them */
930 if (head
>= qp
->r_rq
.size
)
933 if (tail
>= qp
->r_rq
.size
)
936 * Compute the number of credits available (RWQEs).
937 * XXX Not holding the r_rq.lock here so there is a small
938 * chance that the pair of reads are not atomic.
940 credits
= head
- tail
;
941 if ((int)credits
< 0)
942 credits
+= qp
->r_rq
.size
;
944 * Binary search the credit table to find the code to
951 if (credit_table
[x
] == credits
)
953 if (credit_table
[x
] > credits
)
960 aeth
|= x
<< QIB_AETH_CREDIT_SHIFT
;
962 return cpu_to_be32(aeth
);
966 * qib_create_qp - create a queue pair for a device
967 * @ibpd: the protection domain who's device we create the queue pair for
968 * @init_attr: the attributes of the queue pair
969 * @udata: user data for libibverbs.so
971 * Returns the queue pair on success, otherwise returns an errno.
973 * Called by the ib_create_qp() core verbs function.
975 struct ib_qp
*qib_create_qp(struct ib_pd
*ibpd
,
976 struct ib_qp_init_attr
*init_attr
,
977 struct ib_udata
*udata
)
981 struct qib_swqe
*swq
= NULL
;
982 struct qib_ibdev
*dev
;
983 struct qib_devdata
*dd
;
990 if (init_attr
->cap
.max_send_sge
> ib_qib_max_sges
||
991 init_attr
->cap
.max_send_wr
> ib_qib_max_qp_wrs
||
992 init_attr
->create_flags
& ~(IB_QP_CREATE_USE_GFP_NOIO
))
993 return ERR_PTR(-EINVAL
);
995 /* GFP_NOIO is applicable in RC QPs only */
996 if (init_attr
->create_flags
& IB_QP_CREATE_USE_GFP_NOIO
&&
997 init_attr
->qp_type
!= IB_QPT_RC
)
998 return ERR_PTR(-EINVAL
);
1000 gfp
= init_attr
->create_flags
& IB_QP_CREATE_USE_GFP_NOIO
?
1001 GFP_NOIO
: GFP_KERNEL
;
1003 /* Check receive queue parameters if no SRQ is specified. */
1004 if (!init_attr
->srq
) {
1005 if (init_attr
->cap
.max_recv_sge
> ib_qib_max_sges
||
1006 init_attr
->cap
.max_recv_wr
> ib_qib_max_qp_wrs
) {
1007 ret
= ERR_PTR(-EINVAL
);
1010 if (init_attr
->cap
.max_send_sge
+
1011 init_attr
->cap
.max_send_wr
+
1012 init_attr
->cap
.max_recv_sge
+
1013 init_attr
->cap
.max_recv_wr
== 0) {
1014 ret
= ERR_PTR(-EINVAL
);
1019 switch (init_attr
->qp_type
) {
1022 if (init_attr
->port_num
== 0 ||
1023 init_attr
->port_num
> ibpd
->device
->phys_port_cnt
) {
1024 ret
= ERR_PTR(-EINVAL
);
1030 sz
= sizeof(struct qib_sge
) *
1031 init_attr
->cap
.max_send_sge
+
1032 sizeof(struct qib_swqe
);
1033 swq
= __vmalloc((init_attr
->cap
.max_send_wr
+ 1) * sz
,
1036 ret
= ERR_PTR(-ENOMEM
);
1041 if (init_attr
->srq
) {
1042 struct qib_srq
*srq
= to_isrq(init_attr
->srq
);
1044 if (srq
->rq
.max_sge
> 1)
1045 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1046 (srq
->rq
.max_sge
- 1);
1047 } else if (init_attr
->cap
.max_recv_sge
> 1)
1048 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1049 (init_attr
->cap
.max_recv_sge
- 1);
1050 qp
= kzalloc(sz
+ sg_list_sz
, gfp
);
1052 ret
= ERR_PTR(-ENOMEM
);
1055 RCU_INIT_POINTER(qp
->next
, NULL
);
1056 qp
->s_hdr
= kzalloc(sizeof(*qp
->s_hdr
), gfp
);
1058 ret
= ERR_PTR(-ENOMEM
);
1061 qp
->timeout_jiffies
=
1062 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
1067 qp
->r_rq
.size
= init_attr
->cap
.max_recv_wr
+ 1;
1068 qp
->r_rq
.max_sge
= init_attr
->cap
.max_recv_sge
;
1069 sz
= (sizeof(struct ib_sge
) * qp
->r_rq
.max_sge
) +
1070 sizeof(struct qib_rwqe
);
1071 if (gfp
!= GFP_NOIO
)
1072 qp
->r_rq
.wq
= vmalloc_user(
1073 sizeof(struct qib_rwq
) +
1074 qp
->r_rq
.size
* sz
);
1076 qp
->r_rq
.wq
= __vmalloc(
1077 sizeof(struct qib_rwq
) +
1082 ret
= ERR_PTR(-ENOMEM
);
1088 * ib_create_qp() will initialize qp->ibqp
1089 * except for qp->ibqp.qp_num.
1091 spin_lock_init(&qp
->r_lock
);
1092 spin_lock_init(&qp
->s_lock
);
1093 spin_lock_init(&qp
->r_rq
.lock
);
1094 atomic_set(&qp
->refcount
, 0);
1095 init_waitqueue_head(&qp
->wait
);
1096 init_waitqueue_head(&qp
->wait_dma
);
1097 init_timer(&qp
->s_timer
);
1098 qp
->s_timer
.data
= (unsigned long)qp
;
1099 INIT_WORK(&qp
->s_work
, qib_do_send
);
1100 INIT_LIST_HEAD(&qp
->iowait
);
1101 INIT_LIST_HEAD(&qp
->rspwait
);
1102 qp
->state
= IB_QPS_RESET
;
1104 qp
->s_size
= init_attr
->cap
.max_send_wr
+ 1;
1105 qp
->s_max_sge
= init_attr
->cap
.max_send_sge
;
1106 if (init_attr
->sq_sig_type
== IB_SIGNAL_REQ_WR
)
1107 qp
->s_flags
= QIB_S_SIGNAL_REQ_WR
;
1108 dev
= to_idev(ibpd
->device
);
1109 dd
= dd_from_dev(dev
);
1110 err
= alloc_qpn(dd
, &dev
->qpn_table
, init_attr
->qp_type
,
1111 init_attr
->port_num
, gfp
);
1117 qp
->ibqp
.qp_num
= err
;
1118 qp
->port_num
= init_attr
->port_num
;
1119 qib_reset_qp(qp
, init_attr
->qp_type
);
1123 /* Don't support raw QPs */
1124 ret
= ERR_PTR(-ENOSYS
);
1128 init_attr
->cap
.max_inline_data
= 0;
1131 * Return the address of the RWQ as the offset to mmap.
1132 * See qib_mmap() for details.
1134 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
1138 err
= ib_copy_to_udata(udata
, &offset
,
1145 u32 s
= sizeof(struct qib_rwq
) + qp
->r_rq
.size
* sz
;
1147 qp
->ip
= qib_create_mmap_info(dev
, s
,
1148 ibpd
->uobject
->context
,
1151 ret
= ERR_PTR(-ENOMEM
);
1155 err
= ib_copy_to_udata(udata
, &(qp
->ip
->offset
),
1156 sizeof(qp
->ip
->offset
));
1164 spin_lock(&dev
->n_qps_lock
);
1165 if (dev
->n_qps_allocated
== ib_qib_max_qps
) {
1166 spin_unlock(&dev
->n_qps_lock
);
1167 ret
= ERR_PTR(-ENOMEM
);
1171 dev
->n_qps_allocated
++;
1172 spin_unlock(&dev
->n_qps_lock
);
1175 spin_lock_irq(&dev
->pending_lock
);
1176 list_add(&qp
->ip
->pending_mmaps
, &dev
->pending_mmaps
);
1177 spin_unlock_irq(&dev
->pending_lock
);
1185 kref_put(&qp
->ip
->ref
, qib_release_mmap_info
);
1188 free_qpn(&dev
->qpn_table
, qp
->ibqp
.qp_num
);
1199 * qib_destroy_qp - destroy a queue pair
1200 * @ibqp: the queue pair to destroy
1202 * Returns 0 on success.
1204 * Note that this can be called while the QP is actively sending or
1207 int qib_destroy_qp(struct ib_qp
*ibqp
)
1209 struct qib_qp
*qp
= to_iqp(ibqp
);
1210 struct qib_ibdev
*dev
= to_idev(ibqp
->device
);
1212 /* Make sure HW and driver activity is stopped. */
1213 spin_lock_irq(&qp
->s_lock
);
1214 if (qp
->state
!= IB_QPS_RESET
) {
1215 qp
->state
= IB_QPS_RESET
;
1216 spin_lock(&dev
->pending_lock
);
1217 if (!list_empty(&qp
->iowait
))
1218 list_del_init(&qp
->iowait
);
1219 spin_unlock(&dev
->pending_lock
);
1220 qp
->s_flags
&= ~(QIB_S_TIMER
| QIB_S_ANY_WAIT
);
1221 spin_unlock_irq(&qp
->s_lock
);
1222 cancel_work_sync(&qp
->s_work
);
1223 del_timer_sync(&qp
->s_timer
);
1224 wait_event(qp
->wait_dma
, !atomic_read(&qp
->s_dma_busy
));
1226 qib_put_txreq(qp
->s_tx
);
1230 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
1231 clear_mr_refs(qp
, 1);
1233 spin_unlock_irq(&qp
->s_lock
);
1235 /* all user's cleaned up, mark it available */
1236 free_qpn(&dev
->qpn_table
, qp
->ibqp
.qp_num
);
1237 spin_lock(&dev
->n_qps_lock
);
1238 dev
->n_qps_allocated
--;
1239 spin_unlock(&dev
->n_qps_lock
);
1242 kref_put(&qp
->ip
->ref
, qib_release_mmap_info
);
1252 * qib_init_qpn_table - initialize the QP number table for a device
1253 * @qpt: the QPN table
1255 void qib_init_qpn_table(struct qib_devdata
*dd
, struct qib_qpn_table
*qpt
)
1257 spin_lock_init(&qpt
->lock
);
1258 qpt
->last
= 1; /* start with QPN 2 */
1260 qpt
->mask
= dd
->qpn_mask
;
1264 * qib_free_qpn_table - free the QP number table for a device
1265 * @qpt: the QPN table
1267 void qib_free_qpn_table(struct qib_qpn_table
*qpt
)
1271 for (i
= 0; i
< ARRAY_SIZE(qpt
->map
); i
++)
1272 if (qpt
->map
[i
].page
)
1273 free_page((unsigned long) qpt
->map
[i
].page
);
1277 * qib_get_credit - flush the send work queue of a QP
1278 * @qp: the qp who's send work queue to flush
1279 * @aeth: the Acknowledge Extended Transport Header
1281 * The QP s_lock should be held.
1283 void qib_get_credit(struct qib_qp
*qp
, u32 aeth
)
1285 u32 credit
= (aeth
>> QIB_AETH_CREDIT_SHIFT
) & QIB_AETH_CREDIT_MASK
;
1288 * If the credit is invalid, we can send
1289 * as many packets as we like. Otherwise, we have to
1290 * honor the credit field.
1292 if (credit
== QIB_AETH_CREDIT_INVAL
) {
1293 if (!(qp
->s_flags
& QIB_S_UNLIMITED_CREDIT
)) {
1294 qp
->s_flags
|= QIB_S_UNLIMITED_CREDIT
;
1295 if (qp
->s_flags
& QIB_S_WAIT_SSN_CREDIT
) {
1296 qp
->s_flags
&= ~QIB_S_WAIT_SSN_CREDIT
;
1297 qib_schedule_send(qp
);
1300 } else if (!(qp
->s_flags
& QIB_S_UNLIMITED_CREDIT
)) {
1301 /* Compute new LSN (i.e., MSN + credit) */
1302 credit
= (aeth
+ credit_table
[credit
]) & QIB_MSN_MASK
;
1303 if (qib_cmp24(credit
, qp
->s_lsn
) > 0) {
1305 if (qp
->s_flags
& QIB_S_WAIT_SSN_CREDIT
) {
1306 qp
->s_flags
&= ~QIB_S_WAIT_SSN_CREDIT
;
1307 qib_schedule_send(qp
);
1313 #ifdef CONFIG_DEBUG_FS
1315 struct qib_qp_iter
{
1316 struct qib_ibdev
*dev
;
1321 struct qib_qp_iter
*qib_qp_iter_init(struct qib_ibdev
*dev
)
1323 struct qib_qp_iter
*iter
;
1325 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
1330 if (qib_qp_iter_next(iter
)) {
1338 int qib_qp_iter_next(struct qib_qp_iter
*iter
)
1340 struct qib_ibdev
*dev
= iter
->dev
;
1343 struct qib_qp
*pqp
= iter
->qp
;
1346 for (; n
< dev
->qp_table_size
; n
++) {
1348 qp
= rcu_dereference(pqp
->next
);
1350 qp
= rcu_dereference(dev
->qp_table
[n
]);
1361 static const char * const qp_type_str
[] = {
1362 "SMI", "GSI", "RC", "UC", "UD",
1365 void qib_qp_iter_print(struct seq_file
*s
, struct qib_qp_iter
*iter
)
1367 struct qib_swqe
*wqe
;
1368 struct qib_qp
*qp
= iter
->qp
;
1370 wqe
= get_swqe_ptr(qp
, qp
->s_last
);
1372 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
1375 qp_type_str
[qp
->ibqp
.qp_type
],
1380 atomic_read(&qp
->s_dma_busy
),
1381 !list_empty(&qp
->iowait
),
1386 qp
->s_psn
, qp
->s_next_psn
,
1387 qp
->s_sending_psn
, qp
->s_sending_hpsn
,
1388 qp
->s_last
, qp
->s_acked
, qp
->s_cur
,
1389 qp
->s_tail
, qp
->s_head
, qp
->s_size
,
1391 qp
->remote_ah_attr
.dlid
);