2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 /* cut down ridiculously long IB macro names */
39 #define OP(x) IB_OPCODE_RC_##x
41 static void rc_timeout(unsigned long arg
);
43 static u32
restart_sge(struct rvt_sge_state
*ss
, struct rvt_swqe
*wqe
,
48 len
= ((psn
- wqe
->psn
) & QIB_PSN_MASK
) * pmtu
;
49 ss
->sge
= wqe
->sg_list
[0];
50 ss
->sg_list
= wqe
->sg_list
+ 1;
51 ss
->num_sge
= wqe
->wr
.num_sge
;
52 ss
->total_len
= wqe
->length
;
53 qib_skip_sge(ss
, len
, 0);
54 return wqe
->length
- len
;
57 static void start_timer(struct rvt_qp
*qp
)
59 qp
->s_flags
|= QIB_S_TIMER
;
60 qp
->s_timer
.function
= rc_timeout
;
61 /* 4.096 usec. * (1 << qp->timeout) */
62 qp
->s_timer
.expires
= jiffies
+ qp
->timeout_jiffies
;
63 add_timer(&qp
->s_timer
);
67 * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
68 * @dev: the device for this QP
69 * @qp: a pointer to the QP
70 * @ohdr: a pointer to the IB header being constructed
73 * Return 1 if constructed; otherwise, return 0.
74 * Note that we are in the responder's side of the QP context.
75 * Note the QP s_lock must be held.
77 static int qib_make_rc_ack(struct qib_ibdev
*dev
, struct rvt_qp
*qp
,
78 struct qib_other_headers
*ohdr
, u32 pmtu
)
80 struct rvt_ack_entry
*e
;
86 /* Don't send an ACK if we aren't supposed to. */
87 if (!(ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_RECV_OK
))
90 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
93 switch (qp
->s_ack_state
) {
94 case OP(RDMA_READ_RESPONSE_LAST
):
95 case OP(RDMA_READ_RESPONSE_ONLY
):
96 e
= &qp
->s_ack_queue
[qp
->s_tail_ack_queue
];
98 rvt_put_mr(e
->rdma_sge
.mr
);
99 e
->rdma_sge
.mr
= NULL
;
102 case OP(ATOMIC_ACKNOWLEDGE
):
104 * We can increment the tail pointer now that the last
105 * response has been sent instead of only being
108 if (++qp
->s_tail_ack_queue
> QIB_MAX_RDMA_ATOMIC
)
109 qp
->s_tail_ack_queue
= 0;
112 case OP(ACKNOWLEDGE
):
113 /* Check for no next entry in the queue. */
114 if (qp
->r_head_ack_queue
== qp
->s_tail_ack_queue
) {
115 if (qp
->s_flags
& QIB_S_ACK_PENDING
)
120 e
= &qp
->s_ack_queue
[qp
->s_tail_ack_queue
];
121 if (e
->opcode
== OP(RDMA_READ_REQUEST
)) {
123 * If a RDMA read response is being resent and
124 * we haven't seen the duplicate request yet,
125 * then stop sending the remaining responses the
126 * responder has seen until the requester resends it.
128 len
= e
->rdma_sge
.sge_length
;
129 if (len
&& !e
->rdma_sge
.mr
) {
130 qp
->s_tail_ack_queue
= qp
->r_head_ack_queue
;
133 /* Copy SGE state in case we need to resend */
134 qp
->s_rdma_mr
= e
->rdma_sge
.mr
;
136 rvt_get_mr(qp
->s_rdma_mr
);
137 qp
->s_ack_rdma_sge
.sge
= e
->rdma_sge
;
138 qp
->s_ack_rdma_sge
.num_sge
= 1;
139 qp
->s_cur_sge
= &qp
->s_ack_rdma_sge
;
142 qp
->s_ack_state
= OP(RDMA_READ_RESPONSE_FIRST
);
144 qp
->s_ack_state
= OP(RDMA_READ_RESPONSE_ONLY
);
147 ohdr
->u
.aeth
= qib_compute_aeth(qp
);
149 qp
->s_ack_rdma_psn
= e
->psn
;
150 bth2
= qp
->s_ack_rdma_psn
++ & QIB_PSN_MASK
;
152 /* COMPARE_SWAP or FETCH_ADD */
153 qp
->s_cur_sge
= NULL
;
155 qp
->s_ack_state
= OP(ATOMIC_ACKNOWLEDGE
);
156 ohdr
->u
.at
.aeth
= qib_compute_aeth(qp
);
157 ohdr
->u
.at
.atomic_ack_eth
[0] =
158 cpu_to_be32(e
->atomic_data
>> 32);
159 ohdr
->u
.at
.atomic_ack_eth
[1] =
160 cpu_to_be32(e
->atomic_data
);
161 hwords
+= sizeof(ohdr
->u
.at
) / sizeof(u32
);
162 bth2
= e
->psn
& QIB_PSN_MASK
;
165 bth0
= qp
->s_ack_state
<< 24;
168 case OP(RDMA_READ_RESPONSE_FIRST
):
169 qp
->s_ack_state
= OP(RDMA_READ_RESPONSE_MIDDLE
);
171 case OP(RDMA_READ_RESPONSE_MIDDLE
):
172 qp
->s_cur_sge
= &qp
->s_ack_rdma_sge
;
173 qp
->s_rdma_mr
= qp
->s_ack_rdma_sge
.sge
.mr
;
175 rvt_get_mr(qp
->s_rdma_mr
);
176 len
= qp
->s_ack_rdma_sge
.sge
.sge_length
;
180 ohdr
->u
.aeth
= qib_compute_aeth(qp
);
182 qp
->s_ack_state
= OP(RDMA_READ_RESPONSE_LAST
);
183 e
= &qp
->s_ack_queue
[qp
->s_tail_ack_queue
];
186 bth0
= qp
->s_ack_state
<< 24;
187 bth2
= qp
->s_ack_rdma_psn
++ & QIB_PSN_MASK
;
193 * Send a regular ACK.
194 * Set the s_ack_state so we wait until after sending
195 * the ACK before setting s_ack_state to ACKNOWLEDGE
198 qp
->s_ack_state
= OP(SEND_ONLY
);
199 qp
->s_flags
&= ~QIB_S_ACK_PENDING
;
200 qp
->s_cur_sge
= NULL
;
203 cpu_to_be32((qp
->r_msn
& QIB_MSN_MASK
) |
205 QIB_AETH_CREDIT_SHIFT
));
207 ohdr
->u
.aeth
= qib_compute_aeth(qp
);
210 bth0
= OP(ACKNOWLEDGE
) << 24;
211 bth2
= qp
->s_ack_psn
& QIB_PSN_MASK
;
213 qp
->s_rdma_ack_cnt
++;
214 qp
->s_hdrwords
= hwords
;
215 qp
->s_cur_size
= len
;
216 qib_make_ruc_header(qp
, ohdr
, bth0
, bth2
);
220 qp
->s_ack_state
= OP(ACKNOWLEDGE
);
221 qp
->s_flags
&= ~(QIB_S_RESP_PENDING
| QIB_S_ACK_PENDING
);
226 * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
227 * @qp: a pointer to the QP
229 * Return 1 if constructed; otherwise, return 0.
231 int qib_make_rc_req(struct rvt_qp
*qp
)
233 struct qib_qp_priv
*priv
= qp
->priv
;
234 struct qib_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
235 struct qib_other_headers
*ohdr
;
236 struct rvt_sge_state
*ss
;
237 struct rvt_swqe
*wqe
;
248 ohdr
= &priv
->s_hdr
->u
.oth
;
249 if (qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)
250 ohdr
= &priv
->s_hdr
->u
.l
.oth
;
253 * The lock is needed to synchronize between the sending tasklet,
254 * the receive interrupt handler, and timeout resends.
256 spin_lock_irqsave(&qp
->s_lock
, flags
);
258 /* Sending responses has higher priority over sending requests. */
259 if ((qp
->s_flags
& QIB_S_RESP_PENDING
) &&
260 qib_make_rc_ack(dev
, qp
, ohdr
, pmtu
))
263 if (!(ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_SEND_OK
)) {
264 if (!(ib_qib_state_ops
[qp
->state
] & QIB_FLUSH_SEND
))
266 /* We are in the error state, flush the work request. */
267 if (qp
->s_last
== qp
->s_head
)
269 /* If DMAs are in progress, we can't flush immediately. */
270 if (atomic_read(&priv
->s_dma_busy
)) {
271 qp
->s_flags
|= QIB_S_WAIT_DMA
;
274 wqe
= get_swqe_ptr(qp
, qp
->s_last
);
275 qib_send_complete(qp
, wqe
, qp
->s_last
!= qp
->s_acked
?
276 IB_WC_SUCCESS
: IB_WC_WR_FLUSH_ERR
);
277 /* will get called again */
281 if (qp
->s_flags
& (QIB_S_WAIT_RNR
| QIB_S_WAIT_ACK
))
284 if (qib_cmp24(qp
->s_psn
, qp
->s_sending_hpsn
) <= 0) {
285 if (qib_cmp24(qp
->s_sending_psn
, qp
->s_sending_hpsn
) <= 0) {
286 qp
->s_flags
|= QIB_S_WAIT_PSN
;
289 qp
->s_sending_psn
= qp
->s_psn
;
290 qp
->s_sending_hpsn
= qp
->s_psn
- 1;
293 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
297 /* Send a request. */
298 wqe
= get_swqe_ptr(qp
, qp
->s_cur
);
299 switch (qp
->s_state
) {
301 if (!(ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_NEXT_SEND_OK
))
304 * Resend an old request or start a new one.
306 * We keep track of the current SWQE so that
307 * we don't reset the "furthest progress" state
308 * if we need to back up.
311 if (qp
->s_cur
== qp
->s_tail
) {
312 /* Check if send work queue is empty. */
313 if (qp
->s_tail
== qp
->s_head
)
316 * If a fence is requested, wait for previous
317 * RDMA read and atomic operations to finish.
319 if ((wqe
->wr
.send_flags
& IB_SEND_FENCE
) &&
320 qp
->s_num_rd_atomic
) {
321 qp
->s_flags
|= QIB_S_WAIT_FENCE
;
324 wqe
->psn
= qp
->s_next_psn
;
328 * Note that we have to be careful not to modify the
329 * original work request since we may need to resend
334 bth2
= qp
->s_psn
& QIB_PSN_MASK
;
335 switch (wqe
->wr
.opcode
) {
337 case IB_WR_SEND_WITH_IMM
:
338 /* If no credit, return. */
339 if (!(qp
->s_flags
& QIB_S_UNLIMITED_CREDIT
) &&
340 qib_cmp24(wqe
->ssn
, qp
->s_lsn
+ 1) > 0) {
341 qp
->s_flags
|= QIB_S_WAIT_SSN_CREDIT
;
344 wqe
->lpsn
= wqe
->psn
;
346 wqe
->lpsn
+= (len
- 1) / pmtu
;
347 qp
->s_state
= OP(SEND_FIRST
);
351 if (wqe
->wr
.opcode
== IB_WR_SEND
)
352 qp
->s_state
= OP(SEND_ONLY
);
354 qp
->s_state
= OP(SEND_ONLY_WITH_IMMEDIATE
);
355 /* Immediate data comes after the BTH */
356 ohdr
->u
.imm_data
= wqe
->wr
.ex
.imm_data
;
359 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
360 bth0
|= IB_BTH_SOLICITED
;
361 bth2
|= IB_BTH_REQ_ACK
;
362 if (++qp
->s_cur
== qp
->s_size
)
366 case IB_WR_RDMA_WRITE
:
367 if (newreq
&& !(qp
->s_flags
& QIB_S_UNLIMITED_CREDIT
))
370 case IB_WR_RDMA_WRITE_WITH_IMM
:
371 /* If no credit, return. */
372 if (!(qp
->s_flags
& QIB_S_UNLIMITED_CREDIT
) &&
373 qib_cmp24(wqe
->ssn
, qp
->s_lsn
+ 1) > 0) {
374 qp
->s_flags
|= QIB_S_WAIT_SSN_CREDIT
;
378 ohdr
->u
.rc
.reth
.vaddr
=
379 cpu_to_be64(wqe
->rdma_wr
.remote_addr
);
380 ohdr
->u
.rc
.reth
.rkey
=
381 cpu_to_be32(wqe
->rdma_wr
.rkey
);
382 ohdr
->u
.rc
.reth
.length
= cpu_to_be32(len
);
383 hwords
+= sizeof(struct ib_reth
) / sizeof(u32
);
384 wqe
->lpsn
= wqe
->psn
;
386 wqe
->lpsn
+= (len
- 1) / pmtu
;
387 qp
->s_state
= OP(RDMA_WRITE_FIRST
);
391 if (wqe
->rdma_wr
.wr
.opcode
== IB_WR_RDMA_WRITE
)
392 qp
->s_state
= OP(RDMA_WRITE_ONLY
);
394 qp
->s_state
= OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE
);
395 /* Immediate data comes after RETH */
396 ohdr
->u
.rc
.imm_data
=
397 wqe
->rdma_wr
.wr
.ex
.imm_data
;
399 if (wqe
->rdma_wr
.wr
.send_flags
& IB_SEND_SOLICITED
)
400 bth0
|= IB_BTH_SOLICITED
;
402 bth2
|= IB_BTH_REQ_ACK
;
403 if (++qp
->s_cur
== qp
->s_size
)
407 case IB_WR_RDMA_READ
:
409 * Don't allow more operations to be started
410 * than the QP limits allow.
413 if (qp
->s_num_rd_atomic
>=
414 qp
->s_max_rd_atomic
) {
415 qp
->s_flags
|= QIB_S_WAIT_RDMAR
;
418 qp
->s_num_rd_atomic
++;
419 if (!(qp
->s_flags
& QIB_S_UNLIMITED_CREDIT
))
422 * Adjust s_next_psn to count the
423 * expected number of responses.
426 qp
->s_next_psn
+= (len
- 1) / pmtu
;
427 wqe
->lpsn
= qp
->s_next_psn
++;
430 ohdr
->u
.rc
.reth
.vaddr
=
431 cpu_to_be64(wqe
->rdma_wr
.remote_addr
);
432 ohdr
->u
.rc
.reth
.rkey
=
433 cpu_to_be32(wqe
->rdma_wr
.rkey
);
434 ohdr
->u
.rc
.reth
.length
= cpu_to_be32(len
);
435 qp
->s_state
= OP(RDMA_READ_REQUEST
);
436 hwords
+= sizeof(ohdr
->u
.rc
.reth
) / sizeof(u32
);
439 bth2
|= IB_BTH_REQ_ACK
;
440 if (++qp
->s_cur
== qp
->s_size
)
444 case IB_WR_ATOMIC_CMP_AND_SWP
:
445 case IB_WR_ATOMIC_FETCH_AND_ADD
:
447 * Don't allow more operations to be started
448 * than the QP limits allow.
451 if (qp
->s_num_rd_atomic
>=
452 qp
->s_max_rd_atomic
) {
453 qp
->s_flags
|= QIB_S_WAIT_RDMAR
;
456 qp
->s_num_rd_atomic
++;
457 if (!(qp
->s_flags
& QIB_S_UNLIMITED_CREDIT
))
459 wqe
->lpsn
= wqe
->psn
;
461 if (wqe
->atomic_wr
.wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
462 qp
->s_state
= OP(COMPARE_SWAP
);
463 ohdr
->u
.atomic_eth
.swap_data
= cpu_to_be64(
464 wqe
->atomic_wr
.swap
);
465 ohdr
->u
.atomic_eth
.compare_data
= cpu_to_be64(
466 wqe
->atomic_wr
.compare_add
);
468 qp
->s_state
= OP(FETCH_ADD
);
469 ohdr
->u
.atomic_eth
.swap_data
= cpu_to_be64(
470 wqe
->atomic_wr
.compare_add
);
471 ohdr
->u
.atomic_eth
.compare_data
= 0;
473 ohdr
->u
.atomic_eth
.vaddr
[0] = cpu_to_be32(
474 wqe
->atomic_wr
.remote_addr
>> 32);
475 ohdr
->u
.atomic_eth
.vaddr
[1] = cpu_to_be32(
476 wqe
->atomic_wr
.remote_addr
);
477 ohdr
->u
.atomic_eth
.rkey
= cpu_to_be32(
478 wqe
->atomic_wr
.rkey
);
479 hwords
+= sizeof(struct ib_atomic_eth
) / sizeof(u32
);
482 bth2
|= IB_BTH_REQ_ACK
;
483 if (++qp
->s_cur
== qp
->s_size
)
490 qp
->s_sge
.sge
= wqe
->sg_list
[0];
491 qp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
492 qp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
493 qp
->s_sge
.total_len
= wqe
->length
;
494 qp
->s_len
= wqe
->length
;
497 if (qp
->s_tail
>= qp
->s_size
)
500 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
)
501 qp
->s_psn
= wqe
->lpsn
+ 1;
504 if (qib_cmp24(qp
->s_psn
, qp
->s_next_psn
) > 0)
505 qp
->s_next_psn
= qp
->s_psn
;
509 case OP(RDMA_READ_RESPONSE_FIRST
):
511 * qp->s_state is normally set to the opcode of the
512 * last packet constructed for new requests and therefore
513 * is never set to RDMA read response.
514 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
515 * thread to indicate a SEND needs to be restarted from an
516 * earlier PSN without interferring with the sending thread.
517 * See qib_restart_rc().
519 qp
->s_len
= restart_sge(&qp
->s_sge
, wqe
, qp
->s_psn
, pmtu
);
522 qp
->s_state
= OP(SEND_MIDDLE
);
524 case OP(SEND_MIDDLE
):
525 bth2
= qp
->s_psn
++ & QIB_PSN_MASK
;
526 if (qib_cmp24(qp
->s_psn
, qp
->s_next_psn
) > 0)
527 qp
->s_next_psn
= qp
->s_psn
;
534 if (wqe
->wr
.opcode
== IB_WR_SEND
)
535 qp
->s_state
= OP(SEND_LAST
);
537 qp
->s_state
= OP(SEND_LAST_WITH_IMMEDIATE
);
538 /* Immediate data comes after the BTH */
539 ohdr
->u
.imm_data
= wqe
->wr
.ex
.imm_data
;
542 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
543 bth0
|= IB_BTH_SOLICITED
;
544 bth2
|= IB_BTH_REQ_ACK
;
546 if (qp
->s_cur
>= qp
->s_size
)
550 case OP(RDMA_READ_RESPONSE_LAST
):
552 * qp->s_state is normally set to the opcode of the
553 * last packet constructed for new requests and therefore
554 * is never set to RDMA read response.
555 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
556 * thread to indicate a RDMA write needs to be restarted from
557 * an earlier PSN without interferring with the sending thread.
558 * See qib_restart_rc().
560 qp
->s_len
= restart_sge(&qp
->s_sge
, wqe
, qp
->s_psn
, pmtu
);
562 case OP(RDMA_WRITE_FIRST
):
563 qp
->s_state
= OP(RDMA_WRITE_MIDDLE
);
565 case OP(RDMA_WRITE_MIDDLE
):
566 bth2
= qp
->s_psn
++ & QIB_PSN_MASK
;
567 if (qib_cmp24(qp
->s_psn
, qp
->s_next_psn
) > 0)
568 qp
->s_next_psn
= qp
->s_psn
;
575 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE
)
576 qp
->s_state
= OP(RDMA_WRITE_LAST
);
578 qp
->s_state
= OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
);
579 /* Immediate data comes after the BTH */
580 ohdr
->u
.imm_data
= wqe
->wr
.ex
.imm_data
;
582 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
583 bth0
|= IB_BTH_SOLICITED
;
585 bth2
|= IB_BTH_REQ_ACK
;
587 if (qp
->s_cur
>= qp
->s_size
)
591 case OP(RDMA_READ_RESPONSE_MIDDLE
):
593 * qp->s_state is normally set to the opcode of the
594 * last packet constructed for new requests and therefore
595 * is never set to RDMA read response.
596 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
597 * thread to indicate a RDMA read needs to be restarted from
598 * an earlier PSN without interferring with the sending thread.
599 * See qib_restart_rc().
601 len
= ((qp
->s_psn
- wqe
->psn
) & QIB_PSN_MASK
) * pmtu
;
602 ohdr
->u
.rc
.reth
.vaddr
=
603 cpu_to_be64(wqe
->rdma_wr
.remote_addr
+ len
);
604 ohdr
->u
.rc
.reth
.rkey
=
605 cpu_to_be32(wqe
->rdma_wr
.rkey
);
606 ohdr
->u
.rc
.reth
.length
= cpu_to_be32(wqe
->length
- len
);
607 qp
->s_state
= OP(RDMA_READ_REQUEST
);
608 hwords
+= sizeof(ohdr
->u
.rc
.reth
) / sizeof(u32
);
609 bth2
= (qp
->s_psn
& QIB_PSN_MASK
) | IB_BTH_REQ_ACK
;
610 qp
->s_psn
= wqe
->lpsn
+ 1;
614 if (qp
->s_cur
== qp
->s_size
)
618 qp
->s_sending_hpsn
= bth2
;
619 delta
= (((int) bth2
- (int) wqe
->psn
) << 8) >> 8;
620 if (delta
&& delta
% QIB_PSN_CREDIT
== 0)
621 bth2
|= IB_BTH_REQ_ACK
;
622 if (qp
->s_flags
& QIB_S_SEND_ONE
) {
623 qp
->s_flags
&= ~QIB_S_SEND_ONE
;
624 qp
->s_flags
|= QIB_S_WAIT_ACK
;
625 bth2
|= IB_BTH_REQ_ACK
;
628 qp
->s_hdrwords
= hwords
;
630 qp
->s_cur_size
= len
;
631 qib_make_ruc_header(qp
, ohdr
, bth0
| (qp
->s_state
<< 24), bth2
);
637 qp
->s_flags
&= ~QIB_S_BUSY
;
639 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
644 * qib_send_rc_ack - Construct an ACK packet and send it
645 * @qp: a pointer to the QP
647 * This is called from qib_rc_rcv() and qib_kreceive().
648 * Note that RDMA reads and atomics are handled in the
649 * send side QP state and tasklet.
651 void qib_send_rc_ack(struct rvt_qp
*qp
)
653 struct qib_devdata
*dd
= dd_from_ibdev(qp
->ibqp
.device
);
654 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
655 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
662 struct qib_ib_header hdr
;
663 struct qib_other_headers
*ohdr
;
667 spin_lock_irqsave(&qp
->s_lock
, flags
);
669 if (!(ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_RECV_OK
))
672 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
673 if ((qp
->s_flags
& QIB_S_RESP_PENDING
) || qp
->s_rdma_ack_cnt
)
676 /* Construct the header with s_lock held so APM doesn't change it. */
679 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
681 if (unlikely(qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)) {
682 hwords
+= qib_make_grh(ibp
, &hdr
.u
.l
.grh
,
683 &qp
->remote_ah_attr
.grh
, hwords
, 0);
687 /* read pkey_index w/o lock (its atomic) */
688 bth0
= qib_get_pkey(ibp
, qp
->s_pkey_index
) | (OP(ACKNOWLEDGE
) << 24);
689 if (qp
->s_mig_state
== IB_MIG_MIGRATED
)
690 bth0
|= IB_BTH_MIG_REQ
;
692 ohdr
->u
.aeth
= cpu_to_be32((qp
->r_msn
& QIB_MSN_MASK
) |
694 QIB_AETH_CREDIT_SHIFT
));
696 ohdr
->u
.aeth
= qib_compute_aeth(qp
);
697 lrh0
|= ibp
->sl_to_vl
[qp
->remote_ah_attr
.sl
] << 12 |
698 qp
->remote_ah_attr
.sl
<< 4;
699 hdr
.lrh
[0] = cpu_to_be16(lrh0
);
700 hdr
.lrh
[1] = cpu_to_be16(qp
->remote_ah_attr
.dlid
);
701 hdr
.lrh
[2] = cpu_to_be16(hwords
+ SIZE_OF_CRC
);
702 hdr
.lrh
[3] = cpu_to_be16(ppd
->lid
| qp
->remote_ah_attr
.src_path_bits
);
703 ohdr
->bth
[0] = cpu_to_be32(bth0
);
704 ohdr
->bth
[1] = cpu_to_be32(qp
->remote_qpn
);
705 ohdr
->bth
[2] = cpu_to_be32(qp
->r_ack_psn
& QIB_PSN_MASK
);
707 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
709 /* Don't try to send ACKs if the link isn't ACTIVE */
710 if (!(ppd
->lflags
& QIBL_LINKACTIVE
))
713 control
= dd
->f_setpbc_control(ppd
, hwords
+ SIZE_OF_CRC
,
714 qp
->s_srate
, lrh0
>> 12);
715 /* length is + 1 for the control dword */
716 pbc
= ((u64
) control
<< 32) | (hwords
+ 1);
718 piobuf
= dd
->f_getsendbuf(ppd
, pbc
, &pbufn
);
721 * We are out of PIO buffers at the moment.
722 * Pass responsibility for sending the ACK to the
723 * send tasklet so that when a PIO buffer becomes
724 * available, the ACK is sent ahead of other outgoing
727 spin_lock_irqsave(&qp
->s_lock
, flags
);
733 * We have to flush after the PBC for correctness
734 * on some cpus or WC buffer can be written out of order.
738 if (dd
->flags
& QIB_PIO_FLUSH_WC
) {
739 u32
*hdrp
= (u32
*) &hdr
;
742 qib_pio_copy(piobuf
+ 2, hdrp
, hwords
- 1);
744 __raw_writel(hdrp
[hwords
- 1], piobuf
+ hwords
+ 1);
746 qib_pio_copy(piobuf
+ 2, (u32
*) &hdr
, hwords
);
748 if (dd
->flags
& QIB_USE_SPCL_TRIG
) {
749 u32 spcl_off
= (pbufn
>= dd
->piobcnt2k
) ? 2047 : 1023;
752 __raw_writel(0xaebecede, piobuf
+ spcl_off
);
756 qib_sendbuf_done(dd
, pbufn
);
758 this_cpu_inc(ibp
->pmastats
->n_unicast_xmit
);
762 if (ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_RECV_OK
) {
764 qp
->s_flags
|= QIB_S_ACK_PENDING
| QIB_S_RESP_PENDING
;
765 qp
->s_nak_state
= qp
->r_nak_state
;
766 qp
->s_ack_psn
= qp
->r_ack_psn
;
768 /* Schedule the send tasklet. */
769 qib_schedule_send(qp
);
772 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
778 * reset_psn - reset the QP state to send starting from PSN
780 * @psn: the packet sequence number to restart at
782 * This is called from qib_rc_rcv() to process an incoming RC ACK
784 * Called at interrupt level with the QP s_lock held.
786 static void reset_psn(struct rvt_qp
*qp
, u32 psn
)
789 struct rvt_swqe
*wqe
= get_swqe_ptr(qp
, n
);
795 * If we are starting the request from the beginning,
796 * let the normal send code handle initialization.
798 if (qib_cmp24(psn
, wqe
->psn
) <= 0) {
799 qp
->s_state
= OP(SEND_LAST
);
803 /* Find the work request opcode corresponding to the given PSN. */
804 opcode
= wqe
->wr
.opcode
;
808 if (++n
== qp
->s_size
)
812 wqe
= get_swqe_ptr(qp
, n
);
813 diff
= qib_cmp24(psn
, wqe
->psn
);
818 * If we are starting the request from the beginning,
819 * let the normal send code handle initialization.
822 qp
->s_state
= OP(SEND_LAST
);
825 opcode
= wqe
->wr
.opcode
;
829 * Set the state to restart in the middle of a request.
830 * Don't change the s_sge, s_cur_sge, or s_cur_size.
831 * See qib_make_rc_req().
835 case IB_WR_SEND_WITH_IMM
:
836 qp
->s_state
= OP(RDMA_READ_RESPONSE_FIRST
);
839 case IB_WR_RDMA_WRITE
:
840 case IB_WR_RDMA_WRITE_WITH_IMM
:
841 qp
->s_state
= OP(RDMA_READ_RESPONSE_LAST
);
844 case IB_WR_RDMA_READ
:
845 qp
->s_state
= OP(RDMA_READ_RESPONSE_MIDDLE
);
850 * This case shouldn't happen since its only
853 qp
->s_state
= OP(SEND_LAST
);
858 * Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer
859 * asynchronously before the send tasklet can get scheduled.
860 * Doing it in qib_make_rc_req() is too late.
862 if ((qib_cmp24(qp
->s_psn
, qp
->s_sending_hpsn
) <= 0) &&
863 (qib_cmp24(qp
->s_sending_psn
, qp
->s_sending_hpsn
) <= 0))
864 qp
->s_flags
|= QIB_S_WAIT_PSN
;
868 * Back up requester to resend the last un-ACKed request.
869 * The QP r_lock and s_lock should be held and interrupts disabled.
871 static void qib_restart_rc(struct rvt_qp
*qp
, u32 psn
, int wait
)
873 struct rvt_swqe
*wqe
= get_swqe_ptr(qp
, qp
->s_acked
);
874 struct qib_ibport
*ibp
;
876 if (qp
->s_retry
== 0) {
877 if (qp
->s_mig_state
== IB_MIG_ARMED
) {
879 qp
->s_retry
= qp
->s_retry_cnt
;
880 } else if (qp
->s_last
== qp
->s_acked
) {
881 qib_send_complete(qp
, wqe
, IB_WC_RETRY_EXC_ERR
);
882 qib_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
884 } else /* XXX need to handle delayed completion */
889 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
890 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
)
893 ibp
->n_rc_resends
+= (qp
->s_psn
- psn
) & QIB_PSN_MASK
;
895 qp
->s_flags
&= ~(QIB_S_WAIT_FENCE
| QIB_S_WAIT_RDMAR
|
896 QIB_S_WAIT_SSN_CREDIT
| QIB_S_WAIT_PSN
|
899 qp
->s_flags
|= QIB_S_SEND_ONE
;
904 * This is called from s_timer for missing responses.
906 static void rc_timeout(unsigned long arg
)
908 struct rvt_qp
*qp
= (struct rvt_qp
*)arg
;
909 struct qib_ibport
*ibp
;
912 spin_lock_irqsave(&qp
->r_lock
, flags
);
913 spin_lock(&qp
->s_lock
);
914 if (qp
->s_flags
& QIB_S_TIMER
) {
915 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
916 ibp
->n_rc_timeouts
++;
917 qp
->s_flags
&= ~QIB_S_TIMER
;
918 del_timer(&qp
->s_timer
);
919 qib_restart_rc(qp
, qp
->s_last_psn
+ 1, 1);
920 qib_schedule_send(qp
);
922 spin_unlock(&qp
->s_lock
);
923 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
927 * This is called from s_timer for RNR timeouts.
929 void qib_rc_rnr_retry(unsigned long arg
)
931 struct rvt_qp
*qp
= (struct rvt_qp
*)arg
;
934 spin_lock_irqsave(&qp
->s_lock
, flags
);
935 if (qp
->s_flags
& QIB_S_WAIT_RNR
) {
936 qp
->s_flags
&= ~QIB_S_WAIT_RNR
;
937 del_timer(&qp
->s_timer
);
938 qib_schedule_send(qp
);
940 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
944 * Set qp->s_sending_psn to the next PSN after the given one.
945 * This would be psn+1 except when RDMA reads are present.
947 static void reset_sending_psn(struct rvt_qp
*qp
, u32 psn
)
949 struct rvt_swqe
*wqe
;
952 /* Find the work request corresponding to the given PSN. */
954 wqe
= get_swqe_ptr(qp
, n
);
955 if (qib_cmp24(psn
, wqe
->lpsn
) <= 0) {
956 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
)
957 qp
->s_sending_psn
= wqe
->lpsn
+ 1;
959 qp
->s_sending_psn
= psn
+ 1;
962 if (++n
== qp
->s_size
)
970 * This should be called with the QP s_lock held and interrupts disabled.
972 void qib_rc_send_complete(struct rvt_qp
*qp
, struct qib_ib_header
*hdr
)
974 struct qib_other_headers
*ohdr
;
975 struct rvt_swqe
*wqe
;
981 if (!(ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_OR_FLUSH_SEND
))
984 /* Find out where the BTH is */
985 if ((be16_to_cpu(hdr
->lrh
[0]) & 3) == QIB_LRH_BTH
)
988 ohdr
= &hdr
->u
.l
.oth
;
990 opcode
= be32_to_cpu(ohdr
->bth
[0]) >> 24;
991 if (opcode
>= OP(RDMA_READ_RESPONSE_FIRST
) &&
992 opcode
<= OP(ATOMIC_ACKNOWLEDGE
)) {
993 WARN_ON(!qp
->s_rdma_ack_cnt
);
994 qp
->s_rdma_ack_cnt
--;
998 psn
= be32_to_cpu(ohdr
->bth
[2]);
999 reset_sending_psn(qp
, psn
);
1002 * Start timer after a packet requesting an ACK has been sent and
1003 * there are still requests that haven't been acked.
1005 if ((psn
& IB_BTH_REQ_ACK
) && qp
->s_acked
!= qp
->s_tail
&&
1006 !(qp
->s_flags
& (QIB_S_TIMER
| QIB_S_WAIT_RNR
| QIB_S_WAIT_PSN
)) &&
1007 (ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_RECV_OK
))
1010 while (qp
->s_last
!= qp
->s_acked
) {
1011 wqe
= get_swqe_ptr(qp
, qp
->s_last
);
1012 if (qib_cmp24(wqe
->lpsn
, qp
->s_sending_psn
) >= 0 &&
1013 qib_cmp24(qp
->s_sending_psn
, qp
->s_sending_hpsn
) <= 0)
1015 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
1016 struct rvt_sge
*sge
= &wqe
->sg_list
[i
];
1018 rvt_put_mr(sge
->mr
);
1020 /* Post a send completion queue entry if requested. */
1021 if (!(qp
->s_flags
& QIB_S_SIGNAL_REQ_WR
) ||
1022 (wqe
->wr
.send_flags
& IB_SEND_SIGNALED
)) {
1023 memset(&wc
, 0, sizeof(wc
));
1024 wc
.wr_id
= wqe
->wr
.wr_id
;
1025 wc
.status
= IB_WC_SUCCESS
;
1026 wc
.opcode
= ib_qib_wc_opcode
[wqe
->wr
.opcode
];
1027 wc
.byte_len
= wqe
->length
;
1029 qib_cq_enter(to_icq(qp
->ibqp
.send_cq
), &wc
, 0);
1031 if (++qp
->s_last
>= qp
->s_size
)
1035 * If we were waiting for sends to complete before resending,
1036 * and they are now complete, restart sending.
1038 if (qp
->s_flags
& QIB_S_WAIT_PSN
&&
1039 qib_cmp24(qp
->s_sending_psn
, qp
->s_sending_hpsn
) > 0) {
1040 qp
->s_flags
&= ~QIB_S_WAIT_PSN
;
1041 qp
->s_sending_psn
= qp
->s_psn
;
1042 qp
->s_sending_hpsn
= qp
->s_psn
- 1;
1043 qib_schedule_send(qp
);
1047 static inline void update_last_psn(struct rvt_qp
*qp
, u32 psn
)
1049 qp
->s_last_psn
= psn
;
1053 * Generate a SWQE completion.
1054 * This is similar to qib_send_complete but has to check to be sure
1055 * that the SGEs are not being referenced if the SWQE is being resent.
1057 static struct rvt_swqe
*do_rc_completion(struct rvt_qp
*qp
,
1058 struct rvt_swqe
*wqe
,
1059 struct qib_ibport
*ibp
)
1065 * Don't decrement refcount and don't generate a
1066 * completion if the SWQE is being resent until the send
1069 if (qib_cmp24(wqe
->lpsn
, qp
->s_sending_psn
) < 0 ||
1070 qib_cmp24(qp
->s_sending_psn
, qp
->s_sending_hpsn
) > 0) {
1071 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
1072 struct rvt_sge
*sge
= &wqe
->sg_list
[i
];
1074 rvt_put_mr(sge
->mr
);
1076 /* Post a send completion queue entry if requested. */
1077 if (!(qp
->s_flags
& QIB_S_SIGNAL_REQ_WR
) ||
1078 (wqe
->wr
.send_flags
& IB_SEND_SIGNALED
)) {
1079 memset(&wc
, 0, sizeof(wc
));
1080 wc
.wr_id
= wqe
->wr
.wr_id
;
1081 wc
.status
= IB_WC_SUCCESS
;
1082 wc
.opcode
= ib_qib_wc_opcode
[wqe
->wr
.opcode
];
1083 wc
.byte_len
= wqe
->length
;
1085 qib_cq_enter(to_icq(qp
->ibqp
.send_cq
), &wc
, 0);
1087 if (++qp
->s_last
>= qp
->s_size
)
1090 ibp
->n_rc_delayed_comp
++;
1092 qp
->s_retry
= qp
->s_retry_cnt
;
1093 update_last_psn(qp
, wqe
->lpsn
);
1096 * If we are completing a request which is in the process of
1097 * being resent, we can stop resending it since we know the
1098 * responder has already seen it.
1100 if (qp
->s_acked
== qp
->s_cur
) {
1101 if (++qp
->s_cur
>= qp
->s_size
)
1103 qp
->s_acked
= qp
->s_cur
;
1104 wqe
= get_swqe_ptr(qp
, qp
->s_cur
);
1105 if (qp
->s_acked
!= qp
->s_tail
) {
1106 qp
->s_state
= OP(SEND_LAST
);
1107 qp
->s_psn
= wqe
->psn
;
1110 if (++qp
->s_acked
>= qp
->s_size
)
1112 if (qp
->state
== IB_QPS_SQD
&& qp
->s_acked
== qp
->s_cur
)
1114 wqe
= get_swqe_ptr(qp
, qp
->s_acked
);
1120 * do_rc_ack - process an incoming RC ACK
1121 * @qp: the QP the ACK came in on
1122 * @psn: the packet sequence number of the ACK
1123 * @opcode: the opcode of the request that resulted in the ACK
1125 * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
1127 * Called at interrupt level with the QP s_lock held.
1128 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1130 static int do_rc_ack(struct rvt_qp
*qp
, u32 aeth
, u32 psn
, int opcode
,
1131 u64 val
, struct qib_ctxtdata
*rcd
)
1133 struct qib_ibport
*ibp
;
1134 enum ib_wc_status status
;
1135 struct rvt_swqe
*wqe
;
1140 /* Remove QP from retry timer */
1141 if (qp
->s_flags
& (QIB_S_TIMER
| QIB_S_WAIT_RNR
)) {
1142 qp
->s_flags
&= ~(QIB_S_TIMER
| QIB_S_WAIT_RNR
);
1143 del_timer(&qp
->s_timer
);
1147 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1148 * requests and implicitly NAK RDMA read and atomic requests issued
1149 * before the NAK'ed request. The MSN won't include the NAK'ed
1150 * request but will include an ACK'ed request(s).
1155 wqe
= get_swqe_ptr(qp
, qp
->s_acked
);
1156 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
1159 * The MSN might be for a later WQE than the PSN indicates so
1160 * only complete WQEs that the PSN finishes.
1162 while ((diff
= qib_cmp24(ack_psn
, wqe
->lpsn
)) >= 0) {
1164 * RDMA_READ_RESPONSE_ONLY is a special case since
1165 * we want to generate completion events for everything
1166 * before the RDMA read, copy the data, then generate
1167 * the completion for the read.
1169 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
&&
1170 opcode
== OP(RDMA_READ_RESPONSE_ONLY
) &&
1176 * If this request is a RDMA read or atomic, and the ACK is
1177 * for a later operation, this ACK NAKs the RDMA read or
1178 * atomic. In other words, only a RDMA_READ_LAST or ONLY
1179 * can ACK a RDMA read and likewise for atomic ops. Note
1180 * that the NAK case can only happen if relaxed ordering is
1181 * used and requests are sent after an RDMA read or atomic
1182 * is sent but before the response is received.
1184 if ((wqe
->wr
.opcode
== IB_WR_RDMA_READ
&&
1185 (opcode
!= OP(RDMA_READ_RESPONSE_LAST
) || diff
!= 0)) ||
1186 ((wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
1187 wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) &&
1188 (opcode
!= OP(ATOMIC_ACKNOWLEDGE
) || diff
!= 0))) {
1189 /* Retry this request. */
1190 if (!(qp
->r_flags
& QIB_R_RDMAR_SEQ
)) {
1191 qp
->r_flags
|= QIB_R_RDMAR_SEQ
;
1192 qib_restart_rc(qp
, qp
->s_last_psn
+ 1, 0);
1193 if (list_empty(&qp
->rspwait
)) {
1194 qp
->r_flags
|= QIB_R_RSP_SEND
;
1195 atomic_inc(&qp
->refcount
);
1196 list_add_tail(&qp
->rspwait
,
1197 &rcd
->qp_wait_list
);
1201 * No need to process the ACK/NAK since we are
1202 * restarting an earlier request.
1206 if (wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
1207 wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) {
1208 u64
*vaddr
= wqe
->sg_list
[0].vaddr
;
1211 if (qp
->s_num_rd_atomic
&&
1212 (wqe
->wr
.opcode
== IB_WR_RDMA_READ
||
1213 wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
1214 wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
)) {
1215 qp
->s_num_rd_atomic
--;
1216 /* Restart sending task if fence is complete */
1217 if ((qp
->s_flags
& QIB_S_WAIT_FENCE
) &&
1218 !qp
->s_num_rd_atomic
) {
1219 qp
->s_flags
&= ~(QIB_S_WAIT_FENCE
|
1221 qib_schedule_send(qp
);
1222 } else if (qp
->s_flags
& QIB_S_WAIT_RDMAR
) {
1223 qp
->s_flags
&= ~(QIB_S_WAIT_RDMAR
|
1225 qib_schedule_send(qp
);
1228 wqe
= do_rc_completion(qp
, wqe
, ibp
);
1229 if (qp
->s_acked
== qp
->s_tail
)
1233 switch (aeth
>> 29) {
1236 if (qp
->s_acked
!= qp
->s_tail
) {
1238 * We are expecting more ACKs so
1239 * reset the retransmit timer.
1243 * We can stop resending the earlier packets and
1244 * continue with the next packet the receiver wants.
1246 if (qib_cmp24(qp
->s_psn
, psn
) <= 0)
1247 reset_psn(qp
, psn
+ 1);
1248 } else if (qib_cmp24(qp
->s_psn
, psn
) <= 0) {
1249 qp
->s_state
= OP(SEND_LAST
);
1250 qp
->s_psn
= psn
+ 1;
1252 if (qp
->s_flags
& QIB_S_WAIT_ACK
) {
1253 qp
->s_flags
&= ~QIB_S_WAIT_ACK
;
1254 qib_schedule_send(qp
);
1256 qib_get_credit(qp
, aeth
);
1257 qp
->s_rnr_retry
= qp
->s_rnr_retry_cnt
;
1258 qp
->s_retry
= qp
->s_retry_cnt
;
1259 update_last_psn(qp
, psn
);
1263 case 1: /* RNR NAK */
1265 if (qp
->s_acked
== qp
->s_tail
)
1267 if (qp
->s_flags
& QIB_S_WAIT_RNR
)
1269 if (qp
->s_rnr_retry
== 0) {
1270 status
= IB_WC_RNR_RETRY_EXC_ERR
;
1273 if (qp
->s_rnr_retry_cnt
< 7)
1276 /* The last valid PSN is the previous PSN. */
1277 update_last_psn(qp
, psn
- 1);
1279 ibp
->n_rc_resends
+= (qp
->s_psn
- psn
) & QIB_PSN_MASK
;
1283 qp
->s_flags
&= ~(QIB_S_WAIT_SSN_CREDIT
| QIB_S_WAIT_ACK
);
1284 qp
->s_flags
|= QIB_S_WAIT_RNR
;
1285 qp
->s_timer
.function
= qib_rc_rnr_retry
;
1286 qp
->s_timer
.expires
= jiffies
+ usecs_to_jiffies(
1287 ib_qib_rnr_table
[(aeth
>> QIB_AETH_CREDIT_SHIFT
) &
1288 QIB_AETH_CREDIT_MASK
]);
1289 add_timer(&qp
->s_timer
);
1293 if (qp
->s_acked
== qp
->s_tail
)
1295 /* The last valid PSN is the previous PSN. */
1296 update_last_psn(qp
, psn
- 1);
1297 switch ((aeth
>> QIB_AETH_CREDIT_SHIFT
) &
1298 QIB_AETH_CREDIT_MASK
) {
1299 case 0: /* PSN sequence error */
1302 * Back up to the responder's expected PSN.
1303 * Note that we might get a NAK in the middle of an
1304 * RDMA READ response which terminates the RDMA
1307 qib_restart_rc(qp
, psn
, 0);
1308 qib_schedule_send(qp
);
1311 case 1: /* Invalid Request */
1312 status
= IB_WC_REM_INV_REQ_ERR
;
1313 ibp
->n_other_naks
++;
1316 case 2: /* Remote Access Error */
1317 status
= IB_WC_REM_ACCESS_ERR
;
1318 ibp
->n_other_naks
++;
1321 case 3: /* Remote Operation Error */
1322 status
= IB_WC_REM_OP_ERR
;
1323 ibp
->n_other_naks
++;
1325 if (qp
->s_last
== qp
->s_acked
) {
1326 qib_send_complete(qp
, wqe
, status
);
1327 qib_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
1332 /* Ignore other reserved NAK error codes */
1335 qp
->s_retry
= qp
->s_retry_cnt
;
1336 qp
->s_rnr_retry
= qp
->s_rnr_retry_cnt
;
1339 default: /* 2: reserved */
1341 /* Ignore reserved NAK codes. */
1350 * We have seen an out of sequence RDMA read middle or last packet.
1351 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
1353 static void rdma_seq_err(struct rvt_qp
*qp
, struct qib_ibport
*ibp
, u32 psn
,
1354 struct qib_ctxtdata
*rcd
)
1356 struct rvt_swqe
*wqe
;
1358 /* Remove QP from retry timer */
1359 if (qp
->s_flags
& (QIB_S_TIMER
| QIB_S_WAIT_RNR
)) {
1360 qp
->s_flags
&= ~(QIB_S_TIMER
| QIB_S_WAIT_RNR
);
1361 del_timer(&qp
->s_timer
);
1364 wqe
= get_swqe_ptr(qp
, qp
->s_acked
);
1366 while (qib_cmp24(psn
, wqe
->lpsn
) > 0) {
1367 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
||
1368 wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
1369 wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
)
1371 wqe
= do_rc_completion(qp
, wqe
, ibp
);
1375 qp
->r_flags
|= QIB_R_RDMAR_SEQ
;
1376 qib_restart_rc(qp
, qp
->s_last_psn
+ 1, 0);
1377 if (list_empty(&qp
->rspwait
)) {
1378 qp
->r_flags
|= QIB_R_RSP_SEND
;
1379 atomic_inc(&qp
->refcount
);
1380 list_add_tail(&qp
->rspwait
, &rcd
->qp_wait_list
);
1385 * qib_rc_rcv_resp - process an incoming RC response packet
1386 * @ibp: the port this packet came in on
1387 * @ohdr: the other headers for this packet
1388 * @data: the packet data
1389 * @tlen: the packet length
1390 * @qp: the QP for this packet
1391 * @opcode: the opcode for this packet
1392 * @psn: the packet sequence number for this packet
1393 * @hdrsize: the header length
1394 * @pmtu: the path MTU
1396 * This is called from qib_rc_rcv() to process an incoming RC response
1397 * packet for the given QP.
1398 * Called at interrupt level.
1400 static void qib_rc_rcv_resp(struct qib_ibport
*ibp
,
1401 struct qib_other_headers
*ohdr
,
1402 void *data
, u32 tlen
,
1405 u32 psn
, u32 hdrsize
, u32 pmtu
,
1406 struct qib_ctxtdata
*rcd
)
1408 struct rvt_swqe
*wqe
;
1409 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1410 enum ib_wc_status status
;
1411 unsigned long flags
;
1417 if (opcode
!= OP(RDMA_READ_RESPONSE_MIDDLE
)) {
1419 * If ACK'd PSN on SDMA busy list try to make progress to
1420 * reclaim SDMA credits.
1422 if ((qib_cmp24(psn
, qp
->s_sending_psn
) >= 0) &&
1423 (qib_cmp24(qp
->s_sending_psn
, qp
->s_sending_hpsn
) <= 0)) {
1426 * If send tasklet not running attempt to progress
1429 if (!(qp
->s_flags
& QIB_S_BUSY
)) {
1430 /* Acquire SDMA Lock */
1431 spin_lock_irqsave(&ppd
->sdma_lock
, flags
);
1432 /* Invoke sdma make progress */
1433 qib_sdma_make_progress(ppd
);
1434 /* Release SDMA Lock */
1435 spin_unlock_irqrestore(&ppd
->sdma_lock
, flags
);
1440 spin_lock_irqsave(&qp
->s_lock
, flags
);
1441 if (!(ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_RECV_OK
))
1444 /* Ignore invalid responses. */
1445 if (qib_cmp24(psn
, qp
->s_next_psn
) >= 0)
1448 /* Ignore duplicate responses. */
1449 diff
= qib_cmp24(psn
, qp
->s_last_psn
);
1450 if (unlikely(diff
<= 0)) {
1451 /* Update credits for "ghost" ACKs */
1452 if (diff
== 0 && opcode
== OP(ACKNOWLEDGE
)) {
1453 aeth
= be32_to_cpu(ohdr
->u
.aeth
);
1454 if ((aeth
>> 29) == 0)
1455 qib_get_credit(qp
, aeth
);
1461 * Skip everything other than the PSN we expect, if we are waiting
1462 * for a reply to a restarted RDMA read or atomic op.
1464 if (qp
->r_flags
& QIB_R_RDMAR_SEQ
) {
1465 if (qib_cmp24(psn
, qp
->s_last_psn
+ 1) != 0)
1467 qp
->r_flags
&= ~QIB_R_RDMAR_SEQ
;
1470 if (unlikely(qp
->s_acked
== qp
->s_tail
))
1472 wqe
= get_swqe_ptr(qp
, qp
->s_acked
);
1473 status
= IB_WC_SUCCESS
;
1476 case OP(ACKNOWLEDGE
):
1477 case OP(ATOMIC_ACKNOWLEDGE
):
1478 case OP(RDMA_READ_RESPONSE_FIRST
):
1479 aeth
= be32_to_cpu(ohdr
->u
.aeth
);
1480 if (opcode
== OP(ATOMIC_ACKNOWLEDGE
)) {
1481 __be32
*p
= ohdr
->u
.at
.atomic_ack_eth
;
1483 val
= ((u64
) be32_to_cpu(p
[0]) << 32) |
1487 if (!do_rc_ack(qp
, aeth
, psn
, opcode
, val
, rcd
) ||
1488 opcode
!= OP(RDMA_READ_RESPONSE_FIRST
))
1491 wqe
= get_swqe_ptr(qp
, qp
->s_acked
);
1492 if (unlikely(wqe
->wr
.opcode
!= IB_WR_RDMA_READ
))
1495 * If this is a response to a resent RDMA read, we
1496 * have to be careful to copy the data to the right
1499 qp
->s_rdma_read_len
= restart_sge(&qp
->s_rdma_read_sge
,
1503 case OP(RDMA_READ_RESPONSE_MIDDLE
):
1504 /* no AETH, no ACK */
1505 if (unlikely(qib_cmp24(psn
, qp
->s_last_psn
+ 1)))
1507 if (unlikely(wqe
->wr
.opcode
!= IB_WR_RDMA_READ
))
1510 if (unlikely(tlen
!= (hdrsize
+ pmtu
+ 4)))
1512 if (unlikely(pmtu
>= qp
->s_rdma_read_len
))
1516 * We got a response so update the timeout.
1517 * 4.096 usec. * (1 << qp->timeout)
1519 qp
->s_flags
|= QIB_S_TIMER
;
1520 mod_timer(&qp
->s_timer
, jiffies
+ qp
->timeout_jiffies
);
1521 if (qp
->s_flags
& QIB_S_WAIT_ACK
) {
1522 qp
->s_flags
&= ~QIB_S_WAIT_ACK
;
1523 qib_schedule_send(qp
);
1526 if (opcode
== OP(RDMA_READ_RESPONSE_MIDDLE
))
1527 qp
->s_retry
= qp
->s_retry_cnt
;
1530 * Update the RDMA receive state but do the copy w/o
1531 * holding the locks and blocking interrupts.
1533 qp
->s_rdma_read_len
-= pmtu
;
1534 update_last_psn(qp
, psn
);
1535 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1536 qib_copy_sge(&qp
->s_rdma_read_sge
, data
, pmtu
, 0);
1539 case OP(RDMA_READ_RESPONSE_ONLY
):
1540 aeth
= be32_to_cpu(ohdr
->u
.aeth
);
1541 if (!do_rc_ack(qp
, aeth
, psn
, opcode
, 0, rcd
))
1543 /* Get the number of bytes the message was padded by. */
1544 pad
= (be32_to_cpu(ohdr
->bth
[0]) >> 20) & 3;
1546 * Check that the data size is >= 0 && <= pmtu.
1547 * Remember to account for the AETH header (4) and
1550 if (unlikely(tlen
< (hdrsize
+ pad
+ 8)))
1553 * If this is a response to a resent RDMA read, we
1554 * have to be careful to copy the data to the right
1557 wqe
= get_swqe_ptr(qp
, qp
->s_acked
);
1558 qp
->s_rdma_read_len
= restart_sge(&qp
->s_rdma_read_sge
,
1562 case OP(RDMA_READ_RESPONSE_LAST
):
1563 /* ACKs READ req. */
1564 if (unlikely(qib_cmp24(psn
, qp
->s_last_psn
+ 1)))
1566 if (unlikely(wqe
->wr
.opcode
!= IB_WR_RDMA_READ
))
1568 /* Get the number of bytes the message was padded by. */
1569 pad
= (be32_to_cpu(ohdr
->bth
[0]) >> 20) & 3;
1571 * Check that the data size is >= 1 && <= pmtu.
1572 * Remember to account for the AETH header (4) and
1575 if (unlikely(tlen
<= (hdrsize
+ pad
+ 8)))
1578 tlen
-= hdrsize
+ pad
+ 8;
1579 if (unlikely(tlen
!= qp
->s_rdma_read_len
))
1581 aeth
= be32_to_cpu(ohdr
->u
.aeth
);
1582 qib_copy_sge(&qp
->s_rdma_read_sge
, data
, tlen
, 0);
1583 WARN_ON(qp
->s_rdma_read_sge
.num_sge
);
1584 (void) do_rc_ack(qp
, aeth
, psn
,
1585 OP(RDMA_READ_RESPONSE_LAST
), 0, rcd
);
1590 status
= IB_WC_LOC_QP_OP_ERR
;
1594 rdma_seq_err(qp
, ibp
, psn
, rcd
);
1598 status
= IB_WC_LOC_LEN_ERR
;
1600 if (qp
->s_last
== qp
->s_acked
) {
1601 qib_send_complete(qp
, wqe
, status
);
1602 qib_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
1605 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1611 * qib_rc_rcv_error - process an incoming duplicate or error RC packet
1612 * @ohdr: the other headers for this packet
1613 * @data: the packet data
1614 * @qp: the QP for this packet
1615 * @opcode: the opcode for this packet
1616 * @psn: the packet sequence number for this packet
1617 * @diff: the difference between the PSN and the expected PSN
1619 * This is called from qib_rc_rcv() to process an unexpected
1620 * incoming RC packet for the given QP.
1621 * Called at interrupt level.
1622 * Return 1 if no more processing is needed; otherwise return 0 to
1623 * schedule a response to be sent.
1625 static int qib_rc_rcv_error(struct qib_other_headers
*ohdr
,
1631 struct qib_ctxtdata
*rcd
)
1633 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
1634 struct rvt_ack_entry
*e
;
1635 unsigned long flags
;
1641 * Packet sequence error.
1642 * A NAK will ACK earlier sends and RDMA writes.
1643 * Don't queue the NAK if we already sent one.
1645 if (!qp
->r_nak_state
) {
1647 qp
->r_nak_state
= IB_NAK_PSN_ERROR
;
1648 /* Use the expected PSN. */
1649 qp
->r_ack_psn
= qp
->r_psn
;
1651 * Wait to send the sequence NAK until all packets
1652 * in the receive queue have been processed.
1653 * Otherwise, we end up propagating congestion.
1655 if (list_empty(&qp
->rspwait
)) {
1656 qp
->r_flags
|= QIB_R_RSP_NAK
;
1657 atomic_inc(&qp
->refcount
);
1658 list_add_tail(&qp
->rspwait
, &rcd
->qp_wait_list
);
1665 * Handle a duplicate request. Don't re-execute SEND, RDMA
1666 * write or atomic op. Don't NAK errors, just silently drop
1667 * the duplicate request. Note that r_sge, r_len, and
1668 * r_rcv_len may be in use so don't modify them.
1670 * We are supposed to ACK the earliest duplicate PSN but we
1671 * can coalesce an outstanding duplicate ACK. We have to
1672 * send the earliest so that RDMA reads can be restarted at
1673 * the requester's expected PSN.
1675 * First, find where this duplicate PSN falls within the
1676 * ACKs previously sent.
1677 * old_req is true if there is an older response that is scheduled
1678 * to be sent before sending this one.
1684 spin_lock_irqsave(&qp
->s_lock
, flags
);
1686 for (i
= qp
->r_head_ack_queue
; ; i
= prev
) {
1687 if (i
== qp
->s_tail_ack_queue
)
1692 prev
= QIB_MAX_RDMA_ATOMIC
;
1693 if (prev
== qp
->r_head_ack_queue
) {
1697 e
= &qp
->s_ack_queue
[prev
];
1702 if (qib_cmp24(psn
, e
->psn
) >= 0) {
1703 if (prev
== qp
->s_tail_ack_queue
&&
1704 qib_cmp24(psn
, e
->lpsn
) <= 0)
1710 case OP(RDMA_READ_REQUEST
): {
1711 struct ib_reth
*reth
;
1716 * If we didn't find the RDMA read request in the ack queue,
1717 * we can ignore this request.
1719 if (!e
|| e
->opcode
!= OP(RDMA_READ_REQUEST
))
1721 /* RETH comes after BTH */
1722 reth
= &ohdr
->u
.rc
.reth
;
1724 * Address range must be a subset of the original
1725 * request and start on pmtu boundaries.
1726 * We reuse the old ack_queue slot since the requester
1727 * should not back up and request an earlier PSN for the
1730 offset
= ((psn
- e
->psn
) & QIB_PSN_MASK
) *
1732 len
= be32_to_cpu(reth
->length
);
1733 if (unlikely(offset
+ len
!= e
->rdma_sge
.sge_length
))
1735 if (e
->rdma_sge
.mr
) {
1736 rvt_put_mr(e
->rdma_sge
.mr
);
1737 e
->rdma_sge
.mr
= NULL
;
1740 u32 rkey
= be32_to_cpu(reth
->rkey
);
1741 u64 vaddr
= be64_to_cpu(reth
->vaddr
);
1744 ok
= rvt_rkey_ok(qp
, &e
->rdma_sge
, len
, vaddr
, rkey
,
1745 IB_ACCESS_REMOTE_READ
);
1749 e
->rdma_sge
.vaddr
= NULL
;
1750 e
->rdma_sge
.length
= 0;
1751 e
->rdma_sge
.sge_length
= 0;
1756 qp
->s_tail_ack_queue
= prev
;
1760 case OP(COMPARE_SWAP
):
1761 case OP(FETCH_ADD
): {
1763 * If we didn't find the atomic request in the ack queue
1764 * or the send tasklet is already backed up to send an
1765 * earlier entry, we can ignore this request.
1767 if (!e
|| e
->opcode
!= (u8
) opcode
|| old_req
)
1769 qp
->s_tail_ack_queue
= prev
;
1775 * Ignore this operation if it doesn't request an ACK
1776 * or an earlier RDMA read or atomic is going to be resent.
1778 if (!(psn
& IB_BTH_REQ_ACK
) || old_req
)
1781 * Resend the most recent ACK if this request is
1782 * after all the previous RDMA reads and atomics.
1784 if (i
== qp
->r_head_ack_queue
) {
1785 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1786 qp
->r_nak_state
= 0;
1787 qp
->r_ack_psn
= qp
->r_psn
- 1;
1791 * Try to send a simple ACK to work around a Mellanox bug
1792 * which doesn't accept a RDMA read response or atomic
1793 * response as an ACK for earlier SENDs or RDMA writes.
1795 if (!(qp
->s_flags
& QIB_S_RESP_PENDING
)) {
1796 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1797 qp
->r_nak_state
= 0;
1798 qp
->r_ack_psn
= qp
->s_ack_queue
[i
].psn
- 1;
1802 * Resend the RDMA read or atomic op which
1803 * ACKs this duplicate request.
1805 qp
->s_tail_ack_queue
= i
;
1808 qp
->s_ack_state
= OP(ACKNOWLEDGE
);
1809 qp
->s_flags
|= QIB_S_RESP_PENDING
;
1810 qp
->r_nak_state
= 0;
1811 qib_schedule_send(qp
);
1814 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1822 void qib_rc_error(struct rvt_qp
*qp
, enum ib_wc_status err
)
1824 unsigned long flags
;
1827 spin_lock_irqsave(&qp
->s_lock
, flags
);
1828 lastwqe
= qib_error_qp(qp
, err
);
1829 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1834 ev
.device
= qp
->ibqp
.device
;
1835 ev
.element
.qp
= &qp
->ibqp
;
1836 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
1837 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1841 static inline void qib_update_ack_queue(struct rvt_qp
*qp
, unsigned n
)
1846 if (next
> QIB_MAX_RDMA_ATOMIC
)
1848 qp
->s_tail_ack_queue
= next
;
1849 qp
->s_ack_state
= OP(ACKNOWLEDGE
);
1853 * qib_rc_rcv - process an incoming RC packet
1854 * @rcd: the context pointer
1855 * @hdr: the header of this packet
1856 * @has_grh: true if the header has a GRH
1857 * @data: the packet data
1858 * @tlen: the packet length
1859 * @qp: the QP for this packet
1861 * This is called from qib_qp_rcv() to process an incoming RC packet
1863 * Called at interrupt level.
1865 void qib_rc_rcv(struct qib_ctxtdata
*rcd
, struct qib_ib_header
*hdr
,
1866 int has_grh
, void *data
, u32 tlen
, struct rvt_qp
*qp
)
1868 struct qib_ibport
*ibp
= &rcd
->ppd
->ibport_data
;
1869 struct qib_other_headers
*ohdr
;
1875 u32 pmtu
= qp
->pmtu
;
1877 struct ib_reth
*reth
;
1878 unsigned long flags
;
1884 hdrsize
= 8 + 12; /* LRH + BTH */
1886 ohdr
= &hdr
->u
.l
.oth
;
1887 hdrsize
= 8 + 40 + 12; /* LRH + GRH + BTH */
1890 opcode
= be32_to_cpu(ohdr
->bth
[0]);
1891 if (qib_ruc_check_hdr(ibp
, hdr
, has_grh
, qp
, opcode
))
1894 psn
= be32_to_cpu(ohdr
->bth
[2]);
1898 * Process responses (ACKs) before anything else. Note that the
1899 * packet sequence number will be for something in the send work
1900 * queue rather than the expected receive packet sequence number.
1901 * In other words, this QP is the requester.
1903 if (opcode
>= OP(RDMA_READ_RESPONSE_FIRST
) &&
1904 opcode
<= OP(ATOMIC_ACKNOWLEDGE
)) {
1905 qib_rc_rcv_resp(ibp
, ohdr
, data
, tlen
, qp
, opcode
, psn
,
1906 hdrsize
, pmtu
, rcd
);
1910 /* Compute 24 bits worth of difference. */
1911 diff
= qib_cmp24(psn
, qp
->r_psn
);
1912 if (unlikely(diff
)) {
1913 if (qib_rc_rcv_error(ohdr
, data
, qp
, opcode
, psn
, diff
, rcd
))
1918 /* Check for opcode sequence errors. */
1919 switch (qp
->r_state
) {
1920 case OP(SEND_FIRST
):
1921 case OP(SEND_MIDDLE
):
1922 if (opcode
== OP(SEND_MIDDLE
) ||
1923 opcode
== OP(SEND_LAST
) ||
1924 opcode
== OP(SEND_LAST_WITH_IMMEDIATE
))
1928 case OP(RDMA_WRITE_FIRST
):
1929 case OP(RDMA_WRITE_MIDDLE
):
1930 if (opcode
== OP(RDMA_WRITE_MIDDLE
) ||
1931 opcode
== OP(RDMA_WRITE_LAST
) ||
1932 opcode
== OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
))
1937 if (opcode
== OP(SEND_MIDDLE
) ||
1938 opcode
== OP(SEND_LAST
) ||
1939 opcode
== OP(SEND_LAST_WITH_IMMEDIATE
) ||
1940 opcode
== OP(RDMA_WRITE_MIDDLE
) ||
1941 opcode
== OP(RDMA_WRITE_LAST
) ||
1942 opcode
== OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
))
1945 * Note that it is up to the requester to not send a new
1946 * RDMA read or atomic operation before receiving an ACK
1947 * for the previous operation.
1952 if (qp
->state
== IB_QPS_RTR
&& !(qp
->r_flags
& QIB_R_COMM_EST
)) {
1953 qp
->r_flags
|= QIB_R_COMM_EST
;
1954 if (qp
->ibqp
.event_handler
) {
1957 ev
.device
= qp
->ibqp
.device
;
1958 ev
.element
.qp
= &qp
->ibqp
;
1959 ev
.event
= IB_EVENT_COMM_EST
;
1960 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1964 /* OK, process the packet. */
1966 case OP(SEND_FIRST
):
1967 ret
= qib_get_rwqe(qp
, 0);
1974 case OP(SEND_MIDDLE
):
1975 case OP(RDMA_WRITE_MIDDLE
):
1977 /* Check for invalid length PMTU or posted rwqe len. */
1978 if (unlikely(tlen
!= (hdrsize
+ pmtu
+ 4)))
1980 qp
->r_rcv_len
+= pmtu
;
1981 if (unlikely(qp
->r_rcv_len
> qp
->r_len
))
1983 qib_copy_sge(&qp
->r_sge
, data
, pmtu
, 1);
1986 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
):
1988 ret
= qib_get_rwqe(qp
, 1);
1996 case OP(SEND_ONLY_WITH_IMMEDIATE
):
1997 ret
= qib_get_rwqe(qp
, 0);
2003 if (opcode
== OP(SEND_ONLY
))
2004 goto no_immediate_data
;
2005 /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
2006 case OP(SEND_LAST_WITH_IMMEDIATE
):
2008 wc
.ex
.imm_data
= ohdr
->u
.imm_data
;
2010 wc
.wc_flags
= IB_WC_WITH_IMM
;
2013 case OP(RDMA_WRITE_LAST
):
2018 /* Get the number of bytes the message was padded by. */
2019 pad
= (be32_to_cpu(ohdr
->bth
[0]) >> 20) & 3;
2020 /* Check for invalid length. */
2021 /* XXX LAST len should be >= 1 */
2022 if (unlikely(tlen
< (hdrsize
+ pad
+ 4)))
2024 /* Don't count the CRC. */
2025 tlen
-= (hdrsize
+ pad
+ 4);
2026 wc
.byte_len
= tlen
+ qp
->r_rcv_len
;
2027 if (unlikely(wc
.byte_len
> qp
->r_len
))
2029 qib_copy_sge(&qp
->r_sge
, data
, tlen
, 1);
2030 qib_put_ss(&qp
->r_sge
);
2032 if (!test_and_clear_bit(QIB_R_WRID_VALID
, &qp
->r_aflags
))
2034 wc
.wr_id
= qp
->r_wr_id
;
2035 wc
.status
= IB_WC_SUCCESS
;
2036 if (opcode
== OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
) ||
2037 opcode
== OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE
))
2038 wc
.opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
2040 wc
.opcode
= IB_WC_RECV
;
2042 wc
.src_qp
= qp
->remote_qpn
;
2043 wc
.slid
= qp
->remote_ah_attr
.dlid
;
2044 wc
.sl
= qp
->remote_ah_attr
.sl
;
2045 /* zero fields that are N/A */
2048 wc
.dlid_path_bits
= 0;
2050 /* Signal completion event if the solicited bit is set. */
2051 qib_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
,
2053 cpu_to_be32(IB_BTH_SOLICITED
)) != 0);
2056 case OP(RDMA_WRITE_FIRST
):
2057 case OP(RDMA_WRITE_ONLY
):
2058 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE
):
2059 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
2062 reth
= &ohdr
->u
.rc
.reth
;
2063 hdrsize
+= sizeof(*reth
);
2064 qp
->r_len
= be32_to_cpu(reth
->length
);
2066 qp
->r_sge
.sg_list
= NULL
;
2067 if (qp
->r_len
!= 0) {
2068 u32 rkey
= be32_to_cpu(reth
->rkey
);
2069 u64 vaddr
= be64_to_cpu(reth
->vaddr
);
2072 /* Check rkey & NAK */
2073 ok
= rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, qp
->r_len
, vaddr
,
2074 rkey
, IB_ACCESS_REMOTE_WRITE
);
2077 qp
->r_sge
.num_sge
= 1;
2079 qp
->r_sge
.num_sge
= 0;
2080 qp
->r_sge
.sge
.mr
= NULL
;
2081 qp
->r_sge
.sge
.vaddr
= NULL
;
2082 qp
->r_sge
.sge
.length
= 0;
2083 qp
->r_sge
.sge
.sge_length
= 0;
2085 if (opcode
== OP(RDMA_WRITE_FIRST
))
2087 else if (opcode
== OP(RDMA_WRITE_ONLY
))
2088 goto no_immediate_data
;
2089 ret
= qib_get_rwqe(qp
, 1);
2094 wc
.ex
.imm_data
= ohdr
->u
.rc
.imm_data
;
2096 wc
.wc_flags
= IB_WC_WITH_IMM
;
2099 case OP(RDMA_READ_REQUEST
): {
2100 struct rvt_ack_entry
*e
;
2104 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)))
2106 next
= qp
->r_head_ack_queue
+ 1;
2107 /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
2108 if (next
> QIB_MAX_RDMA_ATOMIC
)
2110 spin_lock_irqsave(&qp
->s_lock
, flags
);
2111 if (unlikely(next
== qp
->s_tail_ack_queue
)) {
2112 if (!qp
->s_ack_queue
[next
].sent
)
2113 goto nack_inv_unlck
;
2114 qib_update_ack_queue(qp
, next
);
2116 e
= &qp
->s_ack_queue
[qp
->r_head_ack_queue
];
2117 if (e
->opcode
== OP(RDMA_READ_REQUEST
) && e
->rdma_sge
.mr
) {
2118 rvt_put_mr(e
->rdma_sge
.mr
);
2119 e
->rdma_sge
.mr
= NULL
;
2121 reth
= &ohdr
->u
.rc
.reth
;
2122 len
= be32_to_cpu(reth
->length
);
2124 u32 rkey
= be32_to_cpu(reth
->rkey
);
2125 u64 vaddr
= be64_to_cpu(reth
->vaddr
);
2128 /* Check rkey & NAK */
2129 ok
= rvt_rkey_ok(qp
, &e
->rdma_sge
, len
, vaddr
,
2130 rkey
, IB_ACCESS_REMOTE_READ
);
2132 goto nack_acc_unlck
;
2134 * Update the next expected PSN. We add 1 later
2135 * below, so only add the remainder here.
2138 qp
->r_psn
+= (len
- 1) / pmtu
;
2140 e
->rdma_sge
.mr
= NULL
;
2141 e
->rdma_sge
.vaddr
= NULL
;
2142 e
->rdma_sge
.length
= 0;
2143 e
->rdma_sge
.sge_length
= 0;
2148 e
->lpsn
= qp
->r_psn
;
2150 * We need to increment the MSN here instead of when we
2151 * finish sending the result since a duplicate request would
2152 * increment it more than once.
2156 qp
->r_state
= opcode
;
2157 qp
->r_nak_state
= 0;
2158 qp
->r_head_ack_queue
= next
;
2160 /* Schedule the send tasklet. */
2161 qp
->s_flags
|= QIB_S_RESP_PENDING
;
2162 qib_schedule_send(qp
);
2167 case OP(COMPARE_SWAP
):
2168 case OP(FETCH_ADD
): {
2169 struct ib_atomic_eth
*ateth
;
2170 struct rvt_ack_entry
*e
;
2177 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
)))
2179 next
= qp
->r_head_ack_queue
+ 1;
2180 if (next
> QIB_MAX_RDMA_ATOMIC
)
2182 spin_lock_irqsave(&qp
->s_lock
, flags
);
2183 if (unlikely(next
== qp
->s_tail_ack_queue
)) {
2184 if (!qp
->s_ack_queue
[next
].sent
)
2185 goto nack_inv_unlck
;
2186 qib_update_ack_queue(qp
, next
);
2188 e
= &qp
->s_ack_queue
[qp
->r_head_ack_queue
];
2189 if (e
->opcode
== OP(RDMA_READ_REQUEST
) && e
->rdma_sge
.mr
) {
2190 rvt_put_mr(e
->rdma_sge
.mr
);
2191 e
->rdma_sge
.mr
= NULL
;
2193 ateth
= &ohdr
->u
.atomic_eth
;
2194 vaddr
= ((u64
) be32_to_cpu(ateth
->vaddr
[0]) << 32) |
2195 be32_to_cpu(ateth
->vaddr
[1]);
2196 if (unlikely(vaddr
& (sizeof(u64
) - 1)))
2197 goto nack_inv_unlck
;
2198 rkey
= be32_to_cpu(ateth
->rkey
);
2199 /* Check rkey & NAK */
2200 if (unlikely(!rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, sizeof(u64
),
2202 IB_ACCESS_REMOTE_ATOMIC
)))
2203 goto nack_acc_unlck
;
2204 /* Perform atomic OP and save result. */
2205 maddr
= (atomic64_t
*) qp
->r_sge
.sge
.vaddr
;
2206 sdata
= be64_to_cpu(ateth
->swap_data
);
2207 e
->atomic_data
= (opcode
== OP(FETCH_ADD
)) ?
2208 (u64
) atomic64_add_return(sdata
, maddr
) - sdata
:
2209 (u64
) cmpxchg((u64
*) qp
->r_sge
.sge
.vaddr
,
2210 be64_to_cpu(ateth
->compare_data
),
2212 rvt_put_mr(qp
->r_sge
.sge
.mr
);
2213 qp
->r_sge
.num_sge
= 0;
2220 qp
->r_state
= opcode
;
2221 qp
->r_nak_state
= 0;
2222 qp
->r_head_ack_queue
= next
;
2224 /* Schedule the send tasklet. */
2225 qp
->s_flags
|= QIB_S_RESP_PENDING
;
2226 qib_schedule_send(qp
);
2232 /* NAK unknown opcodes. */
2236 qp
->r_state
= opcode
;
2237 qp
->r_ack_psn
= psn
;
2238 qp
->r_nak_state
= 0;
2239 /* Send an ACK if requested or required. */
2240 if (psn
& (1 << 31))
2245 qp
->r_nak_state
= IB_RNR_NAK
| qp
->r_min_rnr_timer
;
2246 qp
->r_ack_psn
= qp
->r_psn
;
2247 /* Queue RNR NAK for later */
2248 if (list_empty(&qp
->rspwait
)) {
2249 qp
->r_flags
|= QIB_R_RSP_NAK
;
2250 atomic_inc(&qp
->refcount
);
2251 list_add_tail(&qp
->rspwait
, &rcd
->qp_wait_list
);
2256 qib_rc_error(qp
, IB_WC_LOC_QP_OP_ERR
);
2257 qp
->r_nak_state
= IB_NAK_REMOTE_OPERATIONAL_ERROR
;
2258 qp
->r_ack_psn
= qp
->r_psn
;
2259 /* Queue NAK for later */
2260 if (list_empty(&qp
->rspwait
)) {
2261 qp
->r_flags
|= QIB_R_RSP_NAK
;
2262 atomic_inc(&qp
->refcount
);
2263 list_add_tail(&qp
->rspwait
, &rcd
->qp_wait_list
);
2268 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2270 qib_rc_error(qp
, IB_WC_LOC_QP_OP_ERR
);
2271 qp
->r_nak_state
= IB_NAK_INVALID_REQUEST
;
2272 qp
->r_ack_psn
= qp
->r_psn
;
2273 /* Queue NAK for later */
2274 if (list_empty(&qp
->rspwait
)) {
2275 qp
->r_flags
|= QIB_R_RSP_NAK
;
2276 atomic_inc(&qp
->refcount
);
2277 list_add_tail(&qp
->rspwait
, &rcd
->qp_wait_list
);
2282 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2284 qib_rc_error(qp
, IB_WC_LOC_PROT_ERR
);
2285 qp
->r_nak_state
= IB_NAK_REMOTE_ACCESS_ERROR
;
2286 qp
->r_ack_psn
= qp
->r_psn
;
2288 qib_send_rc_ack(qp
);
2292 spin_unlock_irqrestore(&qp
->s_lock
, flags
);