2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
37 #include "ipath_verbs.h"
38 #include "ipath_common.h"
40 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
41 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
42 #define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \
44 #define find_next_offset(map, off) find_next_zero_bit((map)->page, \
47 #define TRANS_INVALID 0
48 #define TRANS_ANY2RST 1
49 #define TRANS_RST2INIT 2
50 #define TRANS_INIT2INIT 3
51 #define TRANS_INIT2RTR 4
52 #define TRANS_RTR2RTS 5
53 #define TRANS_RTS2RTS 6
54 #define TRANS_SQERR2RTS 7
55 #define TRANS_ANY2ERR 8
56 #define TRANS_RTS2SQD 9 /* XXX Wait for expected ACKs & signal event */
57 #define TRANS_SQD2SQD 10 /* error if not drained & parameter change */
58 #define TRANS_SQD2RTS 11 /* error if not drained */
61 * Convert the AETH credit code into the number of credits.
63 static u32 credit_table
[31] = {
97 static u32
alloc_qpn(struct ipath_qp_table
*qpt
)
99 u32 i
, offset
, max_scan
, qpn
;
106 offset
= qpn
& BITS_PER_PAGE_MASK
;
107 map
= &qpt
->map
[qpn
/ BITS_PER_PAGE
];
108 max_scan
= qpt
->nmaps
- !offset
;
110 if (unlikely(!map
->page
)) {
111 unsigned long page
= get_zeroed_page(GFP_KERNEL
);
115 * Free the page if someone raced with us
118 spin_lock_irqsave(&qpt
->lock
, flags
);
122 map
->page
= (void *)page
;
123 spin_unlock_irqrestore(&qpt
->lock
, flags
);
124 if (unlikely(!map
->page
))
127 if (likely(atomic_read(&map
->n_free
))) {
129 if (!test_and_set_bit(offset
, map
->page
)) {
130 atomic_dec(&map
->n_free
);
135 offset
= find_next_offset(map
, offset
);
136 qpn
= mk_qpn(qpt
, map
, offset
);
138 * This test differs from alloc_pidmap().
139 * If find_next_offset() does find a zero
140 * bit, we don't need to check for QPN
141 * wrapping around past our starting QPN.
142 * We just need to be sure we don't loop
145 } while (offset
< BITS_PER_PAGE
&& qpn
< QPN_MAX
);
148 * In order to keep the number of pages allocated to a
149 * minimum, we scan the all existing pages before increasing
150 * the size of the bitmap table.
152 if (++i
> max_scan
) {
153 if (qpt
->nmaps
== QPNMAP_ENTRIES
)
155 map
= &qpt
->map
[qpt
->nmaps
++];
157 } else if (map
< &qpt
->map
[qpt
->nmaps
]) {
164 qpn
= mk_qpn(qpt
, map
, offset
);
173 static void free_qpn(struct ipath_qp_table
*qpt
, u32 qpn
)
177 map
= qpt
->map
+ qpn
/ BITS_PER_PAGE
;
179 clear_bit(qpn
& BITS_PER_PAGE_MASK
, map
->page
);
180 atomic_inc(&map
->n_free
);
184 * ipath_alloc_qpn - allocate a QP number
187 * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special)
189 * Allocate the next available QPN and put the QP into the hash table.
190 * The hash table holds a reference to the QP.
192 static int ipath_alloc_qpn(struct ipath_qp_table
*qpt
, struct ipath_qp
*qp
,
193 enum ib_qp_type type
)
199 if (type
== IB_QPT_SMI
)
201 else if (type
== IB_QPT_GSI
)
204 /* Allocate the next available QPN */
205 qpn
= alloc_qpn(qpt
);
211 qp
->ibqp
.qp_num
= qpn
;
213 /* Add the QP to the hash table. */
214 spin_lock_irqsave(&qpt
->lock
, flags
);
217 qp
->next
= qpt
->table
[qpn
];
218 qpt
->table
[qpn
] = qp
;
219 atomic_inc(&qp
->refcount
);
221 spin_unlock_irqrestore(&qpt
->lock
, flags
);
229 * ipath_free_qp - remove a QP from the QP table
231 * @qp: the QP to remove
233 * Remove the QP from the table so it can't be found asynchronously by
234 * the receive interrupt routine.
236 static void ipath_free_qp(struct ipath_qp_table
*qpt
, struct ipath_qp
*qp
)
238 struct ipath_qp
*q
, **qpp
;
242 spin_lock_irqsave(&qpt
->lock
, flags
);
244 /* Remove QP from the hash table. */
245 qpp
= &qpt
->table
[qp
->ibqp
.qp_num
% qpt
->max
];
246 for (; (q
= *qpp
) != NULL
; qpp
= &q
->next
) {
250 atomic_dec(&qp
->refcount
);
256 spin_unlock_irqrestore(&qpt
->lock
, flags
);
261 /* If QPN is not reserved, mark QPN free in the bitmap. */
262 if (qp
->ibqp
.qp_num
> 1)
263 free_qpn(qpt
, qp
->ibqp
.qp_num
);
265 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
269 * ipath_free_all_qps - remove all QPs from the table
270 * @qpt: the QP table to empty
272 void ipath_free_all_qps(struct ipath_qp_table
*qpt
)
275 struct ipath_qp
*qp
, *nqp
;
278 for (n
= 0; n
< qpt
->max
; n
++) {
279 spin_lock_irqsave(&qpt
->lock
, flags
);
281 qpt
->table
[n
] = NULL
;
282 spin_unlock_irqrestore(&qpt
->lock
, flags
);
286 if (qp
->ibqp
.qp_num
> 1)
287 free_qpn(qpt
, qp
->ibqp
.qp_num
);
288 if (!atomic_dec_and_test(&qp
->refcount
) ||
289 !ipath_destroy_qp(&qp
->ibqp
))
290 _VERBS_INFO("QP memory leak!\n");
295 for (n
= 0; n
< ARRAY_SIZE(qpt
->map
); n
++) {
296 if (qpt
->map
[n
].page
)
297 free_page((unsigned long)qpt
->map
[n
].page
);
302 * ipath_lookup_qpn - return the QP with the given QPN
304 * @qpn: the QP number to look up
306 * The caller is responsible for decrementing the QP reference count
309 struct ipath_qp
*ipath_lookup_qpn(struct ipath_qp_table
*qpt
, u32 qpn
)
314 spin_lock_irqsave(&qpt
->lock
, flags
);
316 for (qp
= qpt
->table
[qpn
% qpt
->max
]; qp
; qp
= qp
->next
) {
317 if (qp
->ibqp
.qp_num
== qpn
) {
318 atomic_inc(&qp
->refcount
);
323 spin_unlock_irqrestore(&qpt
->lock
, flags
);
328 * ipath_reset_qp - initialize the QP state to the reset state
329 * @qp: the QP to reset
331 static void ipath_reset_qp(struct ipath_qp
*qp
)
335 qp
->qp_access_flags
= 0;
336 clear_bit(IPATH_S_BUSY
, &qp
->s_flags
);
341 if (qp
->ibqp
.qp_type
== IB_QPT_RC
) {
342 qp
->s_state
= IB_OPCODE_RC_SEND_LAST
;
343 qp
->r_state
= IB_OPCODE_RC_SEND_LAST
;
345 qp
->s_state
= IB_OPCODE_UC_SEND_LAST
;
346 qp
->r_state
= IB_OPCODE_UC_SEND_LAST
;
348 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
349 qp
->r_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
351 qp
->s_rnr_timeout
= 0;
364 * ipath_error_qp - put a QP into an error state
365 * @qp: the QP to put into an error state
367 * Flushes both send and receive work queues.
368 * QP s_lock should be held and interrupts disabled.
371 void ipath_error_qp(struct ipath_qp
*qp
)
373 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
376 _VERBS_INFO("QP%d/%d in error state\n",
377 qp
->ibqp
.qp_num
, qp
->remote_qpn
);
379 spin_lock(&dev
->pending_lock
);
380 /* XXX What if its already removed by the timeout code? */
381 if (!list_empty(&qp
->timerwait
))
382 list_del_init(&qp
->timerwait
);
383 if (!list_empty(&qp
->piowait
))
384 list_del_init(&qp
->piowait
);
385 spin_unlock(&dev
->pending_lock
);
387 wc
.status
= IB_WC_WR_FLUSH_ERR
;
391 wc
.qp_num
= qp
->ibqp
.qp_num
;
397 wc
.dlid_path_bits
= 0;
400 while (qp
->s_last
!= qp
->s_head
) {
401 struct ipath_swqe
*wqe
= get_swqe_ptr(qp
, qp
->s_last
);
403 wc
.wr_id
= wqe
->wr
.wr_id
;
404 wc
.opcode
= ib_ipath_wc_opcode
[wqe
->wr
.opcode
];
405 if (++qp
->s_last
>= qp
->s_size
)
407 ipath_cq_enter(to_icq(qp
->ibqp
.send_cq
), &wc
, 1);
409 qp
->s_cur
= qp
->s_tail
= qp
->s_head
;
411 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
413 wc
.opcode
= IB_WC_RECV
;
414 spin_lock(&qp
->r_rq
.lock
);
415 while (qp
->r_rq
.tail
!= qp
->r_rq
.head
) {
416 wc
.wr_id
= get_rwqe_ptr(&qp
->r_rq
, qp
->r_rq
.tail
)->wr_id
;
417 if (++qp
->r_rq
.tail
>= qp
->r_rq
.size
)
419 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
421 spin_unlock(&qp
->r_rq
.lock
);
425 * ipath_modify_qp - modify the attributes of a queue pair
426 * @ibqp: the queue pair who's attributes we're modifying
427 * @attr: the new attributes
428 * @attr_mask: the mask of attributes to modify
429 * @udata: user data for ipathverbs.so
431 * Returns 0 on success, otherwise returns an errno.
433 int ipath_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
434 int attr_mask
, struct ib_udata
*udata
)
436 struct ipath_ibdev
*dev
= to_idev(ibqp
->device
);
437 struct ipath_qp
*qp
= to_iqp(ibqp
);
438 enum ib_qp_state cur_state
, new_state
;
442 spin_lock_irqsave(&qp
->s_lock
, flags
);
444 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
445 attr
->cur_qp_state
: qp
->state
;
446 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
448 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
452 if (attr_mask
& IB_QP_AV
)
453 if (attr
->ah_attr
.dlid
== 0 ||
454 attr
->ah_attr
.dlid
>= IPATH_MULTICAST_LID_BASE
)
457 if (attr_mask
& IB_QP_PKEY_INDEX
)
458 if (attr
->pkey_index
>= ipath_layer_get_npkeys(dev
->dd
))
461 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
462 if (attr
->min_rnr_timer
> 31)
479 if (attr_mask
& IB_QP_PKEY_INDEX
)
480 qp
->s_pkey_index
= attr
->pkey_index
;
482 if (attr_mask
& IB_QP_DEST_QPN
)
483 qp
->remote_qpn
= attr
->dest_qp_num
;
485 if (attr_mask
& IB_QP_SQ_PSN
) {
486 qp
->s_next_psn
= attr
->sq_psn
;
487 qp
->s_last_psn
= qp
->s_next_psn
- 1;
490 if (attr_mask
& IB_QP_RQ_PSN
)
491 qp
->r_psn
= attr
->rq_psn
;
493 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
494 qp
->qp_access_flags
= attr
->qp_access_flags
;
496 if (attr_mask
& IB_QP_AV
)
497 qp
->remote_ah_attr
= attr
->ah_attr
;
499 if (attr_mask
& IB_QP_PATH_MTU
)
500 qp
->path_mtu
= attr
->path_mtu
;
502 if (attr_mask
& IB_QP_RETRY_CNT
)
503 qp
->s_retry
= qp
->s_retry_cnt
= attr
->retry_cnt
;
505 if (attr_mask
& IB_QP_RNR_RETRY
) {
506 qp
->s_rnr_retry
= attr
->rnr_retry
;
507 if (qp
->s_rnr_retry
> 7)
509 qp
->s_rnr_retry_cnt
= qp
->s_rnr_retry
;
512 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
513 qp
->r_min_rnr_timer
= attr
->min_rnr_timer
;
515 if (attr_mask
& IB_QP_QKEY
)
516 qp
->qkey
= attr
->qkey
;
518 qp
->state
= new_state
;
519 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
525 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
532 int ipath_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
533 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
535 struct ipath_qp
*qp
= to_iqp(ibqp
);
537 attr
->qp_state
= qp
->state
;
538 attr
->cur_qp_state
= attr
->qp_state
;
539 attr
->path_mtu
= qp
->path_mtu
;
540 attr
->path_mig_state
= 0;
541 attr
->qkey
= qp
->qkey
;
542 attr
->rq_psn
= qp
->r_psn
;
543 attr
->sq_psn
= qp
->s_next_psn
;
544 attr
->dest_qp_num
= qp
->remote_qpn
;
545 attr
->qp_access_flags
= qp
->qp_access_flags
;
546 attr
->cap
.max_send_wr
= qp
->s_size
- 1;
547 attr
->cap
.max_recv_wr
= qp
->r_rq
.size
- 1;
548 attr
->cap
.max_send_sge
= qp
->s_max_sge
;
549 attr
->cap
.max_recv_sge
= qp
->r_rq
.max_sge
;
550 attr
->cap
.max_inline_data
= 0;
551 attr
->ah_attr
= qp
->remote_ah_attr
;
552 memset(&attr
->alt_ah_attr
, 0, sizeof(attr
->alt_ah_attr
));
553 attr
->pkey_index
= qp
->s_pkey_index
;
554 attr
->alt_pkey_index
= 0;
555 attr
->en_sqd_async_notify
= 0;
556 attr
->sq_draining
= 0;
557 attr
->max_rd_atomic
= 1;
558 attr
->max_dest_rd_atomic
= 1;
559 attr
->min_rnr_timer
= qp
->r_min_rnr_timer
;
562 attr
->retry_cnt
= qp
->s_retry_cnt
;
563 attr
->rnr_retry
= qp
->s_rnr_retry
;
564 attr
->alt_port_num
= 0;
565 attr
->alt_timeout
= 0;
567 init_attr
->event_handler
= qp
->ibqp
.event_handler
;
568 init_attr
->qp_context
= qp
->ibqp
.qp_context
;
569 init_attr
->send_cq
= qp
->ibqp
.send_cq
;
570 init_attr
->recv_cq
= qp
->ibqp
.recv_cq
;
571 init_attr
->srq
= qp
->ibqp
.srq
;
572 init_attr
->cap
= attr
->cap
;
573 init_attr
->sq_sig_type
=
574 (qp
->s_flags
& (1 << IPATH_S_SIGNAL_REQ_WR
))
575 ? IB_SIGNAL_REQ_WR
: 0;
576 init_attr
->qp_type
= qp
->ibqp
.qp_type
;
577 init_attr
->port_num
= 1;
582 * ipath_compute_aeth - compute the AETH (syndrome + MSN)
583 * @qp: the queue pair to compute the AETH for
587 __be32
ipath_compute_aeth(struct ipath_qp
*qp
)
589 u32 aeth
= qp
->r_msn
& IPATH_MSN_MASK
;
593 * Shared receive queues don't generate credits.
594 * Set the credit field to the invalid value.
596 aeth
|= IPATH_AETH_CREDIT_INVAL
<< IPATH_AETH_CREDIT_SHIFT
;
602 * Compute the number of credits available (RWQEs).
603 * XXX Not holding the r_rq.lock here so there is a small
604 * chance that the pair of reads are not atomic.
606 credits
= qp
->r_rq
.head
- qp
->r_rq
.tail
;
607 if ((int)credits
< 0)
608 credits
+= qp
->r_rq
.size
;
610 * Binary search the credit table to find the code to
617 if (credit_table
[x
] == credits
)
619 if (credit_table
[x
] > credits
)
626 aeth
|= x
<< IPATH_AETH_CREDIT_SHIFT
;
628 return cpu_to_be32(aeth
);
632 * ipath_create_qp - create a queue pair for a device
633 * @ibpd: the protection domain who's device we create the queue pair for
634 * @init_attr: the attributes of the queue pair
635 * @udata: unused by InfiniPath
637 * Returns the queue pair on success, otherwise returns an errno.
639 * Called by the ib_create_qp() core verbs function.
641 struct ib_qp
*ipath_create_qp(struct ib_pd
*ibpd
,
642 struct ib_qp_init_attr
*init_attr
,
643 struct ib_udata
*udata
)
647 struct ipath_swqe
*swq
= NULL
;
648 struct ipath_ibdev
*dev
;
652 if (init_attr
->cap
.max_send_sge
> ib_ipath_max_sges
||
653 init_attr
->cap
.max_recv_sge
> ib_ipath_max_sges
||
654 init_attr
->cap
.max_send_wr
> ib_ipath_max_qp_wrs
||
655 init_attr
->cap
.max_recv_wr
> ib_ipath_max_qp_wrs
) {
656 ret
= ERR_PTR(-ENOMEM
);
660 if (init_attr
->cap
.max_send_sge
+
661 init_attr
->cap
.max_recv_sge
+
662 init_attr
->cap
.max_send_wr
+
663 init_attr
->cap
.max_recv_wr
== 0) {
664 ret
= ERR_PTR(-EINVAL
);
668 switch (init_attr
->qp_type
) {
671 sz
= sizeof(struct ipath_sge
) *
672 init_attr
->cap
.max_send_sge
+
673 sizeof(struct ipath_swqe
);
674 swq
= vmalloc((init_attr
->cap
.max_send_wr
+ 1) * sz
);
676 ret
= ERR_PTR(-ENOMEM
);
683 qp
= kmalloc(sizeof(*qp
), GFP_KERNEL
);
686 ret
= ERR_PTR(-ENOMEM
);
689 if (init_attr
->srq
) {
691 qp
->r_rq
.max_sge
= 0;
694 qp
->r_rq
.size
= init_attr
->cap
.max_recv_wr
+ 1;
695 qp
->r_rq
.max_sge
= init_attr
->cap
.max_recv_sge
;
696 sz
= (sizeof(struct ipath_sge
) * qp
->r_rq
.max_sge
) +
697 sizeof(struct ipath_rwqe
);
698 qp
->r_rq
.wq
= vmalloc(qp
->r_rq
.size
* sz
);
702 ret
= ERR_PTR(-ENOMEM
);
708 * ib_create_qp() will initialize qp->ibqp
709 * except for qp->ibqp.qp_num.
711 spin_lock_init(&qp
->s_lock
);
712 spin_lock_init(&qp
->r_rq
.lock
);
713 atomic_set(&qp
->refcount
, 0);
714 init_waitqueue_head(&qp
->wait
);
715 tasklet_init(&qp
->s_task
, ipath_do_ruc_send
,
717 INIT_LIST_HEAD(&qp
->piowait
);
718 INIT_LIST_HEAD(&qp
->timerwait
);
719 qp
->state
= IB_QPS_RESET
;
721 qp
->s_size
= init_attr
->cap
.max_send_wr
+ 1;
722 qp
->s_max_sge
= init_attr
->cap
.max_send_sge
;
723 qp
->s_flags
= init_attr
->sq_sig_type
== IB_SIGNAL_REQ_WR
?
724 1 << IPATH_S_SIGNAL_REQ_WR
: 0;
725 dev
= to_idev(ibpd
->device
);
726 err
= ipath_alloc_qpn(&dev
->qp_table
, qp
,
737 /* Tell the core driver that the kernel SMA is present. */
738 if (init_attr
->qp_type
== IB_QPT_SMI
)
739 ipath_layer_set_verbs_flags(dev
->dd
,
740 IPATH_VERBS_KERNEL_SMA
);
744 /* Don't support raw QPs */
745 ret
= ERR_PTR(-ENOSYS
);
749 init_attr
->cap
.max_inline_data
= 0;
758 * ipath_destroy_qp - destroy a queue pair
759 * @ibqp: the queue pair to destroy
761 * Returns 0 on success.
763 * Note that this can be called while the QP is actively sending or
766 int ipath_destroy_qp(struct ib_qp
*ibqp
)
768 struct ipath_qp
*qp
= to_iqp(ibqp
);
769 struct ipath_ibdev
*dev
= to_idev(ibqp
->device
);
772 /* Tell the core driver that the kernel SMA is gone. */
773 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
)
774 ipath_layer_set_verbs_flags(dev
->dd
, 0);
776 spin_lock_irqsave(&qp
->r_rq
.lock
, flags
);
777 spin_lock(&qp
->s_lock
);
778 qp
->state
= IB_QPS_ERR
;
779 spin_unlock(&qp
->s_lock
);
780 spin_unlock_irqrestore(&qp
->r_rq
.lock
, flags
);
782 /* Stop the sending tasklet. */
783 tasklet_kill(&qp
->s_task
);
785 /* Make sure the QP isn't on the timeout list. */
786 spin_lock_irqsave(&dev
->pending_lock
, flags
);
787 if (!list_empty(&qp
->timerwait
))
788 list_del_init(&qp
->timerwait
);
789 if (!list_empty(&qp
->piowait
))
790 list_del_init(&qp
->piowait
);
791 spin_unlock_irqrestore(&dev
->pending_lock
, flags
);
794 * Make sure that the QP is not in the QPN table so receive
795 * interrupts will discard packets for this QP. XXX Also remove QP
796 * from multicast table.
798 if (atomic_read(&qp
->refcount
) != 0)
799 ipath_free_qp(&dev
->qp_table
, qp
);
808 * ipath_init_qp_table - initialize the QP table for a device
809 * @idev: the device who's QP table we're initializing
810 * @size: the size of the QP table
812 * Returns 0 on success, otherwise returns an errno.
814 int ipath_init_qp_table(struct ipath_ibdev
*idev
, int size
)
819 idev
->qp_table
.last
= 1; /* QPN 0 and 1 are special. */
820 idev
->qp_table
.max
= size
;
821 idev
->qp_table
.nmaps
= 1;
822 idev
->qp_table
.table
= kzalloc(size
* sizeof(*idev
->qp_table
.table
),
824 if (idev
->qp_table
.table
== NULL
) {
829 for (i
= 0; i
< ARRAY_SIZE(idev
->qp_table
.map
); i
++) {
830 atomic_set(&idev
->qp_table
.map
[i
].n_free
, BITS_PER_PAGE
);
831 idev
->qp_table
.map
[i
].page
= NULL
;
841 * ipath_sqerror_qp - put a QP's send queue into an error state
842 * @qp: QP who's send queue will be put into an error state
843 * @wc: the WC responsible for putting the QP in this state
845 * Flushes the send work queue.
846 * The QP s_lock should be held.
849 void ipath_sqerror_qp(struct ipath_qp
*qp
, struct ib_wc
*wc
)
851 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
852 struct ipath_swqe
*wqe
= get_swqe_ptr(qp
, qp
->s_last
);
854 _VERBS_INFO("Send queue error on QP%d/%d: err: %d\n",
855 qp
->ibqp
.qp_num
, qp
->remote_qpn
, wc
->status
);
857 spin_lock(&dev
->pending_lock
);
858 /* XXX What if its already removed by the timeout code? */
859 if (!list_empty(&qp
->timerwait
))
860 list_del_init(&qp
->timerwait
);
861 if (!list_empty(&qp
->piowait
))
862 list_del_init(&qp
->piowait
);
863 spin_unlock(&dev
->pending_lock
);
865 ipath_cq_enter(to_icq(qp
->ibqp
.send_cq
), wc
, 1);
866 if (++qp
->s_last
>= qp
->s_size
)
869 wc
->status
= IB_WC_WR_FLUSH_ERR
;
871 while (qp
->s_last
!= qp
->s_head
) {
872 wc
->wr_id
= wqe
->wr
.wr_id
;
873 wc
->opcode
= ib_ipath_wc_opcode
[wqe
->wr
.opcode
];
874 ipath_cq_enter(to_icq(qp
->ibqp
.send_cq
), wc
, 1);
875 if (++qp
->s_last
>= qp
->s_size
)
877 wqe
= get_swqe_ptr(qp
, qp
->s_last
);
879 qp
->s_cur
= qp
->s_tail
= qp
->s_head
;
880 qp
->state
= IB_QPS_SQE
;
884 * ipath_get_credit - flush the send work queue of a QP
885 * @qp: the qp who's send work queue to flush
886 * @aeth: the Acknowledge Extended Transport Header
888 * The QP s_lock should be held.
890 void ipath_get_credit(struct ipath_qp
*qp
, u32 aeth
)
892 u32 credit
= (aeth
>> IPATH_AETH_CREDIT_SHIFT
) & IPATH_AETH_CREDIT_MASK
;
895 * If the credit is invalid, we can send
896 * as many packets as we like. Otherwise, we have to
897 * honor the credit field.
899 if (credit
== IPATH_AETH_CREDIT_INVAL
)
900 qp
->s_lsn
= (u32
) -1;
901 else if (qp
->s_lsn
!= (u32
) -1) {
902 /* Compute new LSN (i.e., MSN + credit) */
903 credit
= (aeth
+ credit_table
[credit
]) & IPATH_MSN_MASK
;
904 if (ipath_cmp24(credit
, qp
->s_lsn
) > 0)
908 /* Restart sending if it was blocked due to lack of credits. */
909 if (qp
->s_cur
!= qp
->s_head
&&
910 (qp
->s_lsn
== (u32
) -1 ||
911 ipath_cmp24(get_swqe_ptr(qp
, qp
->s_cur
)->ssn
,
912 qp
->s_lsn
+ 1) <= 0))
913 tasklet_hi_schedule(&qp
->s_task
);
This page took 0.05612 seconds and 5 git commands to generate.