2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
35 #include <linux/device.h>
36 #include <linux/dmapool.h>
42 * Convert IB-specific error message to RDS error message and call core
45 static void rds_ib_send_complete(struct rds_message
*rm
,
47 void (*complete
)(struct rds_message
*rm
, int status
))
52 case IB_WC_WR_FLUSH_ERR
:
56 notify_status
= RDS_RDMA_SUCCESS
;
59 case IB_WC_REM_ACCESS_ERR
:
60 notify_status
= RDS_RDMA_REMOTE_ERROR
;
64 notify_status
= RDS_RDMA_OTHER_ERROR
;
67 complete(rm
, notify_status
);
70 static void rds_ib_send_unmap_rm(struct rds_ib_connection
*ic
,
71 struct rds_ib_send_work
*send
,
74 struct rds_message
*rm
= send
->s_rm
;
76 rdsdebug("ic %p send %p rm %p\n", ic
, send
, rm
);
78 ib_dma_unmap_sg(ic
->i_cm_id
->device
,
79 rm
->data
.m_sg
, rm
->data
.m_nents
,
82 if (rm
->rdma
.m_rdma_op
.r_active
) {
83 struct rds_rdma_op
*op
= &rm
->rdma
.m_rdma_op
;
86 ib_dma_unmap_sg(ic
->i_cm_id
->device
,
87 op
->r_sg
, op
->r_nents
,
88 op
->r_write
? DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
92 /* If the user asked for a completion notification on this
93 * message, we can implement three different semantics:
94 * 1. Notify when we received the ACK on the RDS message
95 * that was queued with the RDMA. This provides reliable
96 * notification of RDMA status at the expense of a one-way
98 * 2. Notify when the IB stack gives us the completion event for
100 * 3. Notify when the IB stack gives us the completion event for
101 * the accompanying RDS messages.
102 * Here, we implement approach #3. To implement approach #2,
103 * call rds_rdma_send_complete from the cq_handler. To implement #1,
104 * don't call rds_rdma_send_complete at all, and fall back to the notify
105 * handling in the ACK processing code.
107 * Note: There's no need to explicitly sync any RDMA buffers using
108 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
109 * operation itself unmapped the RDMA buffers, which takes care
112 rds_ib_send_complete(rm
, wc_status
, rds_rdma_send_complete
);
114 if (rm
->rdma
.m_rdma_op
.r_write
)
115 rds_stats_add(s_send_rdma_bytes
, rm
->rdma
.m_rdma_op
.r_bytes
);
117 rds_stats_add(s_recv_rdma_bytes
, rm
->rdma
.m_rdma_op
.r_bytes
);
120 if (rm
->atomic
.op_active
) {
121 struct rm_atomic_op
*op
= &rm
->atomic
;
123 /* unmap atomic recvbuf */
125 ib_dma_unmap_sg(ic
->i_cm_id
->device
, op
->op_sg
, 1,
130 rds_ib_send_complete(rm
, wc_status
, rds_atomic_send_complete
);
132 if (rm
->atomic
.op_type
== RDS_ATOMIC_TYPE_CSWP
)
133 rds_stats_inc(s_atomic_cswp
);
135 rds_stats_inc(s_atomic_fadd
);
138 /* If anyone waited for this message to get flushed out, wake
140 rds_message_unmapped(rm
);
146 void rds_ib_send_init_ring(struct rds_ib_connection
*ic
)
148 struct rds_ib_send_work
*send
;
151 for (i
= 0, send
= ic
->i_sends
; i
< ic
->i_send_ring
.w_nr
; i
++, send
++) {
157 send
->s_wr
.wr_id
= i
;
158 send
->s_wr
.sg_list
= send
->s_sge
;
159 send
->s_wr
.num_sge
= 1;
160 send
->s_wr
.opcode
= IB_WR_SEND
;
161 send
->s_wr
.send_flags
= 0;
162 send
->s_wr
.ex
.imm_data
= 0;
164 sge
= rds_ib_data_sge(ic
, send
->s_sge
);
165 sge
->lkey
= ic
->i_mr
->lkey
;
167 sge
= rds_ib_header_sge(ic
, send
->s_sge
);
168 sge
->addr
= ic
->i_send_hdrs_dma
+ (i
* sizeof(struct rds_header
));
169 sge
->length
= sizeof(struct rds_header
);
170 sge
->lkey
= ic
->i_mr
->lkey
;
174 void rds_ib_send_clear_ring(struct rds_ib_connection
*ic
)
176 struct rds_ib_send_work
*send
;
179 for (i
= 0, send
= ic
->i_sends
; i
< ic
->i_send_ring
.w_nr
; i
++, send
++) {
180 if (!send
->s_rm
|| send
->s_wr
.opcode
== 0xdead)
182 rds_ib_send_unmap_rm(ic
, send
, IB_WC_WR_FLUSH_ERR
);
187 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
188 * operations performed in the send path. As the sender allocs and potentially
189 * unallocs the next free entry in the ring it doesn't alter which is
190 * the next to be freed, which is what this is concerned with.
192 void rds_ib_send_cq_comp_handler(struct ib_cq
*cq
, void *context
)
194 struct rds_connection
*conn
= context
;
195 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
197 struct rds_ib_send_work
*send
;
203 rdsdebug("cq %p conn %p\n", cq
, conn
);
204 rds_ib_stats_inc(s_ib_tx_cq_call
);
205 ret
= ib_req_notify_cq(cq
, IB_CQ_NEXT_COMP
);
207 rdsdebug("ib_req_notify_cq send failed: %d\n", ret
);
209 while (ib_poll_cq(cq
, 1, &wc
) > 0) {
210 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
211 (unsigned long long)wc
.wr_id
, wc
.status
, wc
.byte_len
,
212 be32_to_cpu(wc
.ex
.imm_data
));
213 rds_ib_stats_inc(s_ib_tx_cq_event
);
215 if (wc
.wr_id
== RDS_IB_ACK_WR_ID
) {
216 if (ic
->i_ack_queued
+ HZ
/2 < jiffies
)
217 rds_ib_stats_inc(s_ib_tx_stalled
);
218 rds_ib_ack_send_complete(ic
);
222 oldest
= rds_ib_ring_oldest(&ic
->i_send_ring
);
224 completed
= rds_ib_ring_completed(&ic
->i_send_ring
, wc
.wr_id
, oldest
);
226 for (i
= 0; i
< completed
; i
++) {
227 send
= &ic
->i_sends
[oldest
];
229 /* In the error case, wc.opcode sometimes contains garbage */
230 switch (send
->s_wr
.opcode
) {
233 rds_ib_send_unmap_rm(ic
, send
, wc
.status
);
235 case IB_WR_RDMA_WRITE
:
236 case IB_WR_RDMA_READ
:
237 case IB_WR_ATOMIC_FETCH_AND_ADD
:
238 case IB_WR_ATOMIC_CMP_AND_SWP
:
239 /* Nothing to be done - the SG list will be unmapped
240 * when the SEND completes. */
243 if (printk_ratelimit())
245 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
246 __func__
, send
->s_wr
.opcode
);
250 send
->s_wr
.opcode
= 0xdead;
251 send
->s_wr
.num_sge
= 1;
252 if (send
->s_queued
+ HZ
/2 < jiffies
)
253 rds_ib_stats_inc(s_ib_tx_stalled
);
255 /* If a RDMA operation produced an error, signal this right
256 * away. If we don't, the subsequent SEND that goes with this
257 * RDMA will be canceled with ERR_WFLUSH, and the application
258 * never learn that the RDMA failed. */
259 if (unlikely(wc
.status
== IB_WC_REM_ACCESS_ERR
&& send
->s_op
)) {
260 struct rds_message
*rm
;
262 rm
= rds_send_get_message(conn
, send
->s_op
);
264 rds_ib_send_unmap_rm(ic
, send
, wc
.status
);
265 rds_ib_send_complete(rm
, wc
.status
, rds_rdma_send_complete
);
270 oldest
= (oldest
+ 1) % ic
->i_send_ring
.w_nr
;
273 rds_ib_ring_free(&ic
->i_send_ring
, completed
);
275 if (test_and_clear_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
) ||
276 test_bit(0, &conn
->c_map_queued
))
277 queue_delayed_work(rds_wq
, &conn
->c_send_w
, 0);
279 /* We expect errors as the qp is drained during shutdown */
280 if (wc
.status
!= IB_WC_SUCCESS
&& rds_conn_up(conn
)) {
281 rds_ib_conn_error(conn
,
282 "send completion on %pI4 "
283 "had status %u, disconnecting and reconnecting\n",
284 &conn
->c_faddr
, wc
.status
);
290 * This is the main function for allocating credits when sending
293 * Conceptually, we have two counters:
294 * - send credits: this tells us how many WRs we're allowed
295 * to submit without overruning the reciever's queue. For
296 * each SEND WR we post, we decrement this by one.
298 * - posted credits: this tells us how many WRs we recently
299 * posted to the receive queue. This value is transferred
300 * to the peer as a "credit update" in a RDS header field.
301 * Every time we transmit credits to the peer, we subtract
302 * the amount of transferred credits from this counter.
304 * It is essential that we avoid situations where both sides have
305 * exhausted their send credits, and are unable to send new credits
306 * to the peer. We achieve this by requiring that we send at least
307 * one credit update to the peer before exhausting our credits.
308 * When new credits arrive, we subtract one credit that is withheld
309 * until we've posted new buffers and are ready to transmit these
310 * credits (see rds_ib_send_add_credits below).
312 * The RDS send code is essentially single-threaded; rds_send_xmit
313 * grabs c_send_lock to ensure exclusive access to the send ring.
314 * However, the ACK sending code is independent and can race with
317 * In the send path, we need to update the counters for send credits
318 * and the counter of posted buffers atomically - when we use the
319 * last available credit, we cannot allow another thread to race us
320 * and grab the posted credits counter. Hence, we have to use a
321 * spinlock to protect the credit counter, or use atomics.
323 * Spinlocks shared between the send and the receive path are bad,
324 * because they create unnecessary delays. An early implementation
325 * using a spinlock showed a 5% degradation in throughput at some
328 * This implementation avoids spinlocks completely, putting both
329 * counters into a single atomic, and updating that atomic using
330 * atomic_add (in the receive path, when receiving fresh credits),
331 * and using atomic_cmpxchg when updating the two counters.
333 int rds_ib_send_grab_credits(struct rds_ib_connection
*ic
,
334 u32 wanted
, u32
*adv_credits
, int need_posted
, int max_posted
)
336 unsigned int avail
, posted
, got
= 0, advertise
;
345 oldval
= newval
= atomic_read(&ic
->i_credits
);
346 posted
= IB_GET_POST_CREDITS(oldval
);
347 avail
= IB_GET_SEND_CREDITS(oldval
);
349 rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n",
350 wanted
, avail
, posted
);
352 /* The last credit must be used to send a credit update. */
353 if (avail
&& !posted
)
356 if (avail
< wanted
) {
357 struct rds_connection
*conn
= ic
->i_cm_id
->context
;
359 /* Oops, there aren't that many credits left! */
360 set_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
);
363 /* Sometimes you get what you want, lalala. */
366 newval
-= IB_SET_SEND_CREDITS(got
);
369 * If need_posted is non-zero, then the caller wants
370 * the posted regardless of whether any send credits are
373 if (posted
&& (got
|| need_posted
)) {
374 advertise
= min_t(unsigned int, posted
, max_posted
);
375 newval
-= IB_SET_POST_CREDITS(advertise
);
378 /* Finally bill everything */
379 if (atomic_cmpxchg(&ic
->i_credits
, oldval
, newval
) != oldval
)
382 *adv_credits
= advertise
;
386 void rds_ib_send_add_credits(struct rds_connection
*conn
, unsigned int credits
)
388 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
393 rdsdebug("rds_ib_send_add_credits(%u): current=%u%s\n",
395 IB_GET_SEND_CREDITS(atomic_read(&ic
->i_credits
)),
396 test_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
) ? ", ll_send_full" : "");
398 atomic_add(IB_SET_SEND_CREDITS(credits
), &ic
->i_credits
);
399 if (test_and_clear_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
))
400 queue_delayed_work(rds_wq
, &conn
->c_send_w
, 0);
402 WARN_ON(IB_GET_SEND_CREDITS(credits
) >= 16384);
404 rds_ib_stats_inc(s_ib_rx_credit_updates
);
407 void rds_ib_advertise_credits(struct rds_connection
*conn
, unsigned int posted
)
409 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
414 atomic_add(IB_SET_POST_CREDITS(posted
), &ic
->i_credits
);
416 /* Decide whether to send an update to the peer now.
417 * If we would send a credit update for every single buffer we
418 * post, we would end up with an ACK storm (ACK arrives,
419 * consumes buffer, we refill the ring, send ACK to remote
420 * advertising the newly posted buffer... ad inf)
422 * Performance pretty much depends on how often we send
423 * credit updates - too frequent updates mean lots of ACKs.
424 * Too infrequent updates, and the peer will run out of
425 * credits and has to throttle.
426 * For the time being, 16 seems to be a good compromise.
428 if (IB_GET_POST_CREDITS(atomic_read(&ic
->i_credits
)) >= 16)
429 set_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
);
433 rds_ib_xmit_populate_wr(struct rds_ib_connection
*ic
,
434 struct rds_ib_send_work
*send
, unsigned int pos
,
435 unsigned long buffer
, unsigned int length
,
440 WARN_ON(pos
!= send
- ic
->i_sends
);
442 send
->s_wr
.send_flags
= send_flags
;
443 send
->s_wr
.opcode
= IB_WR_SEND
;
444 send
->s_wr
.num_sge
= 2;
445 send
->s_wr
.next
= NULL
;
446 send
->s_queued
= jiffies
;
450 sge
= rds_ib_data_sge(ic
, send
->s_sge
);
452 sge
->length
= length
;
453 sge
->lkey
= ic
->i_mr
->lkey
;
455 sge
= rds_ib_header_sge(ic
, send
->s_sge
);
457 /* We're sending a packet with no payload. There is only
459 send
->s_wr
.num_sge
= 1;
460 sge
= &send
->s_sge
[0];
463 sge
->addr
= ic
->i_send_hdrs_dma
+ (pos
* sizeof(struct rds_header
));
464 sge
->length
= sizeof(struct rds_header
);
465 sge
->lkey
= ic
->i_mr
->lkey
;
469 * This can be called multiple times for a given message. The first time
470 * we see a message we map its scatterlist into the IB device so that
471 * we can provide that mapped address to the IB scatter gather entries
472 * in the IB work requests. We translate the scatterlist into a series
473 * of work requests that fragment the message. These work requests complete
474 * in order so we pass ownership of the message to the completion handler
475 * once we send the final fragment.
477 * The RDS core uses the c_send_lock to only enter this function once
478 * per connection. This makes sure that the tx ring alloc/unalloc pairs
479 * don't get out of sync and confuse the ring.
481 int rds_ib_xmit(struct rds_connection
*conn
, struct rds_message
*rm
,
482 unsigned int hdr_off
, unsigned int sg
, unsigned int off
)
484 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
485 struct ib_device
*dev
= ic
->i_cm_id
->device
;
486 struct rds_ib_send_work
*send
= NULL
;
487 struct rds_ib_send_work
*first
;
488 struct rds_ib_send_work
*prev
;
489 struct ib_send_wr
*failed_wr
;
490 struct scatterlist
*scat
;
500 int flow_controlled
= 0;
502 BUG_ON(off
% RDS_FRAG_SIZE
);
503 BUG_ON(hdr_off
!= 0 && hdr_off
!= sizeof(struct rds_header
));
505 /* Do not send cong updates to IB loopback */
507 && rm
->m_inc
.i_hdr
.h_flags
& RDS_FLAG_CONG_BITMAP
) {
508 rds_cong_map_updated(conn
->c_fcong
, ~(u64
) 0);
509 return sizeof(struct rds_header
) + RDS_CONG_MAP_BYTES
;
512 /* FIXME we may overallocate here */
513 if (be32_to_cpu(rm
->m_inc
.i_hdr
.h_len
) == 0)
516 i
= ceil(be32_to_cpu(rm
->m_inc
.i_hdr
.h_len
), RDS_FRAG_SIZE
);
518 work_alloc
= rds_ib_ring_alloc(&ic
->i_send_ring
, i
, &pos
);
519 if (work_alloc
== 0) {
520 set_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
);
521 rds_ib_stats_inc(s_ib_tx_ring_full
);
526 credit_alloc
= work_alloc
;
528 credit_alloc
= rds_ib_send_grab_credits(ic
, work_alloc
, &posted
, 0, RDS_MAX_ADV_CREDIT
);
529 adv_credits
+= posted
;
530 if (credit_alloc
< work_alloc
) {
531 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
- credit_alloc
);
532 work_alloc
= credit_alloc
;
535 if (work_alloc
== 0) {
536 set_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
);
537 rds_ib_stats_inc(s_ib_tx_throttle
);
543 /* map the message the first time we see it */
545 if (rm
->data
.m_nents
) {
546 rm
->data
.m_count
= ib_dma_map_sg(dev
,
550 rdsdebug("ic %p mapping rm %p: %d\n", ic
, rm
, rm
->data
.m_count
);
551 if (rm
->data
.m_count
== 0) {
552 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure
);
553 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
554 ret
= -ENOMEM
; /* XXX ? */
558 rm
->data
.m_count
= 0;
561 ic
->i_unsignaled_wrs
= rds_ib_sysctl_max_unsig_wrs
;
562 ic
->i_unsignaled_bytes
= rds_ib_sysctl_max_unsig_bytes
;
563 rds_message_addref(rm
);
566 /* Finalize the header */
567 if (test_bit(RDS_MSG_ACK_REQUIRED
, &rm
->m_flags
))
568 rm
->m_inc
.i_hdr
.h_flags
|= RDS_FLAG_ACK_REQUIRED
;
569 if (test_bit(RDS_MSG_RETRANSMITTED
, &rm
->m_flags
))
570 rm
->m_inc
.i_hdr
.h_flags
|= RDS_FLAG_RETRANSMITTED
;
572 /* If it has a RDMA op, tell the peer we did it. This is
573 * used by the peer to release use-once RDMA MRs. */
574 if (rm
->rdma
.m_rdma_op
.r_active
) {
575 struct rds_ext_header_rdma ext_hdr
;
577 ext_hdr
.h_rdma_rkey
= cpu_to_be32(rm
->rdma
.m_rdma_op
.r_key
);
578 rds_message_add_extension(&rm
->m_inc
.i_hdr
,
579 RDS_EXTHDR_RDMA
, &ext_hdr
, sizeof(ext_hdr
));
581 if (rm
->m_rdma_cookie
) {
582 rds_message_add_rdma_dest_extension(&rm
->m_inc
.i_hdr
,
583 rds_rdma_cookie_key(rm
->m_rdma_cookie
),
584 rds_rdma_cookie_offset(rm
->m_rdma_cookie
));
587 /* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
588 * we should not do this unless we have a chance of at least
589 * sticking the header into the send ring. Which is why we
590 * should call rds_ib_ring_alloc first. */
591 rm
->m_inc
.i_hdr
.h_ack
= cpu_to_be64(rds_ib_piggyb_ack(ic
));
592 rds_message_make_checksum(&rm
->m_inc
.i_hdr
);
595 * Update adv_credits since we reset the ACK_REQUIRED bit.
597 rds_ib_send_grab_credits(ic
, 0, &posted
, 1, RDS_MAX_ADV_CREDIT
- adv_credits
);
598 adv_credits
+= posted
;
599 BUG_ON(adv_credits
> 255);
602 send
= &ic
->i_sends
[pos
];
605 scat
= &rm
->data
.m_sg
[sg
];
609 /* Sometimes you want to put a fence between an RDMA
610 * READ and the following SEND.
611 * We could either do this all the time
612 * or when requested by the user. Right now, we let
613 * the application choose.
615 if (rm
->rdma
.m_rdma_op
.r_active
&& rm
->rdma
.m_rdma_op
.r_fence
)
616 send_flags
= IB_SEND_FENCE
;
619 * We could be copying the header into the unused tail of the page.
620 * That would need to be changed in the future when those pages might
621 * be mapped userspace pages or page cache pages. So instead we always
622 * use a second sge and our long-lived ring of mapped headers. We send
623 * the header after the data so that the data payload can be aligned on
627 /* handle a 0-len message */
628 if (be32_to_cpu(rm
->m_inc
.i_hdr
.h_len
) == 0) {
629 rds_ib_xmit_populate_wr(ic
, send
, pos
, 0, 0, send_flags
);
633 /* if there's data reference it with a chain of work reqs */
634 for (; i
< work_alloc
&& scat
!= &rm
->data
.m_sg
[rm
->data
.m_count
]; i
++) {
637 send
= &ic
->i_sends
[pos
];
639 len
= min(RDS_FRAG_SIZE
, ib_sg_dma_len(dev
, scat
) - off
);
640 rds_ib_xmit_populate_wr(ic
, send
, pos
,
641 ib_sg_dma_address(dev
, scat
) + off
, len
,
645 * We want to delay signaling completions just enough to get
646 * the batching benefits but not so much that we create dead time
649 if (ic
->i_unsignaled_wrs
-- == 0) {
650 ic
->i_unsignaled_wrs
= rds_ib_sysctl_max_unsig_wrs
;
651 send
->s_wr
.send_flags
|= IB_SEND_SIGNALED
| IB_SEND_SOLICITED
;
654 ic
->i_unsignaled_bytes
-= len
;
655 if (ic
->i_unsignaled_bytes
<= 0) {
656 ic
->i_unsignaled_bytes
= rds_ib_sysctl_max_unsig_bytes
;
657 send
->s_wr
.send_flags
|= IB_SEND_SIGNALED
| IB_SEND_SOLICITED
;
661 * Always signal the last one if we're stopping due to flow control.
663 if (flow_controlled
&& i
== (work_alloc
-1))
664 send
->s_wr
.send_flags
|= IB_SEND_SIGNALED
| IB_SEND_SOLICITED
;
666 rdsdebug("send %p wr %p num_sge %u next %p\n", send
,
667 &send
->s_wr
, send
->s_wr
.num_sge
, send
->s_wr
.next
);
671 if (off
== ib_sg_dma_len(dev
, scat
)) {
677 /* Tack on the header after the data. The header SGE should already
678 * have been set up to point to the right header buffer. */
679 memcpy(&ic
->i_send_hdrs
[pos
], &rm
->m_inc
.i_hdr
, sizeof(struct rds_header
));
682 struct rds_header
*hdr
= &ic
->i_send_hdrs
[pos
];
684 /* add credit and redo the header checksum */
685 hdr
->h_credit
= adv_credits
;
686 rds_message_make_checksum(hdr
);
688 rds_ib_stats_inc(s_ib_tx_credit_updates
);
692 prev
->s_wr
.next
= &send
->s_wr
;
695 pos
= (pos
+ 1) % ic
->i_send_ring
.w_nr
;
698 /* Account the RDS header in the number of bytes we sent, but just once.
699 * The caller has no concept of fragmentation. */
701 sent
+= sizeof(struct rds_header
);
703 /* if we finished the message then send completion owns it */
704 if (scat
== &rm
->data
.m_sg
[rm
->data
.m_count
]) {
705 prev
->s_rm
= ic
->i_rm
;
706 prev
->s_wr
.send_flags
|= IB_SEND_SIGNALED
| IB_SEND_SOLICITED
;
710 if (i
< work_alloc
) {
711 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
- i
);
714 if (ic
->i_flowctl
&& i
< credit_alloc
)
715 rds_ib_send_add_credits(conn
, credit_alloc
- i
);
717 /* XXX need to worry about failed_wr and partial sends. */
718 failed_wr
= &first
->s_wr
;
719 ret
= ib_post_send(ic
->i_cm_id
->qp
, &first
->s_wr
, &failed_wr
);
720 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic
,
721 first
, &first
->s_wr
, ret
, failed_wr
);
722 BUG_ON(failed_wr
!= &first
->s_wr
);
724 printk(KERN_WARNING
"RDS/IB: ib_post_send to %pI4 "
725 "returned %d\n", &conn
->c_faddr
, ret
);
726 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
728 ic
->i_rm
= prev
->s_rm
;
732 rds_ib_conn_error(ic
->conn
, "ib_post_send failed\n");
743 * Issue atomic operation.
744 * A simplified version of the rdma case, we always map 1 SG, and
745 * only 8 bytes, for the return value from the atomic operation.
747 int rds_ib_xmit_atomic(struct rds_connection
*conn
, struct rm_atomic_op
*op
)
749 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
750 struct rds_ib_send_work
*send
= NULL
;
751 struct ib_send_wr
*failed_wr
;
752 struct rds_ib_device
*rds_ibdev
;
757 rds_ibdev
= ib_get_client_data(ic
->i_cm_id
->device
, &rds_ib_client
);
759 work_alloc
= rds_ib_ring_alloc(&ic
->i_send_ring
, 1, &pos
);
760 if (work_alloc
!= 1) {
761 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
762 rds_ib_stats_inc(s_ib_tx_ring_full
);
767 /* address of send request in ring */
768 send
= &ic
->i_sends
[pos
];
769 send
->s_queued
= jiffies
;
771 if (op
->op_type
== RDS_ATOMIC_TYPE_CSWP
) {
772 send
->s_wr
.opcode
= IB_WR_ATOMIC_CMP_AND_SWP
;
773 send
->s_wr
.wr
.atomic
.compare_add
= op
->op_compare
;
774 send
->s_wr
.wr
.atomic
.swap
= op
->op_swap_add
;
776 send
->s_wr
.opcode
= IB_WR_ATOMIC_FETCH_AND_ADD
;
777 send
->s_wr
.wr
.atomic
.compare_add
= op
->op_swap_add
;
778 send
->s_wr
.wr
.atomic
.swap
= 0;
780 send
->s_wr
.send_flags
= IB_SEND_SIGNALED
;
781 send
->s_wr
.num_sge
= 1;
782 send
->s_wr
.next
= NULL
;
783 send
->s_wr
.wr
.atomic
.remote_addr
= op
->op_remote_addr
;
784 send
->s_wr
.wr
.atomic
.rkey
= op
->op_rkey
;
786 /* map 8 byte retval buffer to the device */
787 ret
= ib_dma_map_sg(ic
->i_cm_id
->device
, op
->op_sg
, 1, DMA_FROM_DEVICE
);
788 rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic
, op
, ret
);
790 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
791 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure
);
792 ret
= -ENOMEM
; /* XXX ? */
796 /* Convert our struct scatterlist to struct ib_sge */
797 send
->s_sge
[0].addr
= ib_sg_dma_address(ic
->i_cm_id
->device
, op
->op_sg
);
798 send
->s_sge
[0].length
= ib_sg_dma_len(ic
->i_cm_id
->device
, op
->op_sg
);
799 send
->s_sge
[0].lkey
= ic
->i_mr
->lkey
;
801 rdsdebug("rva %Lx rpa %Lx len %u\n", op
->op_remote_addr
,
802 send
->s_sge
[0].addr
, send
->s_sge
[0].length
);
804 failed_wr
= &send
->s_wr
;
805 ret
= ib_post_send(ic
->i_cm_id
->qp
, &send
->s_wr
, &failed_wr
);
806 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic
,
807 send
, &send
->s_wr
, ret
, failed_wr
);
808 BUG_ON(failed_wr
!= &send
->s_wr
);
810 printk(KERN_WARNING
"RDS/IB: atomic ib_post_send to %pI4 "
811 "returned %d\n", &conn
->c_faddr
, ret
);
812 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
816 if (unlikely(failed_wr
!= &send
->s_wr
)) {
817 printk(KERN_WARNING
"RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret
);
818 BUG_ON(failed_wr
!= &send
->s_wr
);
825 int rds_ib_xmit_rdma(struct rds_connection
*conn
, struct rds_rdma_op
*op
)
827 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
828 struct rds_ib_send_work
*send
= NULL
;
829 struct rds_ib_send_work
*first
;
830 struct rds_ib_send_work
*prev
;
831 struct ib_send_wr
*failed_wr
;
832 struct rds_ib_device
*rds_ibdev
;
833 struct scatterlist
*scat
;
835 u64 remote_addr
= op
->r_remote_addr
;
844 rds_ibdev
= ib_get_client_data(ic
->i_cm_id
->device
, &rds_ib_client
);
846 /* map the message the first time we see it */
848 op
->r_count
= ib_dma_map_sg(ic
->i_cm_id
->device
,
849 op
->r_sg
, op
->r_nents
, (op
->r_write
) ?
850 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
851 rdsdebug("ic %p mapping op %p: %d\n", ic
, op
, op
->r_count
);
852 if (op
->r_count
== 0) {
853 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure
);
854 ret
= -ENOMEM
; /* XXX ? */
862 * Instead of knowing how to return a partial rdma read/write we insist that there
863 * be enough work requests to send the entire message.
865 i
= ceil(op
->r_count
, rds_ibdev
->max_sge
);
867 work_alloc
= rds_ib_ring_alloc(&ic
->i_send_ring
, i
, &pos
);
868 if (work_alloc
!= i
) {
869 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
870 rds_ib_stats_inc(s_ib_tx_ring_full
);
875 send
= &ic
->i_sends
[pos
];
880 num_sge
= op
->r_count
;
882 for (i
= 0; i
< work_alloc
&& scat
!= &op
->r_sg
[op
->r_count
]; i
++) {
883 send
->s_wr
.send_flags
= 0;
884 send
->s_queued
= jiffies
;
886 * We want to delay signaling completions just enough to get
887 * the batching benefits but not so much that we create dead time on the wire.
889 if (ic
->i_unsignaled_wrs
-- == 0) {
890 ic
->i_unsignaled_wrs
= rds_ib_sysctl_max_unsig_wrs
;
891 send
->s_wr
.send_flags
= IB_SEND_SIGNALED
;
894 send
->s_wr
.opcode
= op
->r_write
? IB_WR_RDMA_WRITE
: IB_WR_RDMA_READ
;
895 send
->s_wr
.wr
.rdma
.remote_addr
= remote_addr
;
896 send
->s_wr
.wr
.rdma
.rkey
= op
->r_key
;
899 if (num_sge
> rds_ibdev
->max_sge
) {
900 send
->s_wr
.num_sge
= rds_ibdev
->max_sge
;
901 num_sge
-= rds_ibdev
->max_sge
;
903 send
->s_wr
.num_sge
= num_sge
;
906 send
->s_wr
.next
= NULL
;
909 prev
->s_wr
.next
= &send
->s_wr
;
911 for (j
= 0; j
< send
->s_wr
.num_sge
&& scat
!= &op
->r_sg
[op
->r_count
]; j
++) {
912 len
= ib_sg_dma_len(ic
->i_cm_id
->device
, scat
);
913 send
->s_sge
[j
].addr
=
914 ib_sg_dma_address(ic
->i_cm_id
->device
, scat
);
915 send
->s_sge
[j
].length
= len
;
916 send
->s_sge
[j
].lkey
= ic
->i_mr
->lkey
;
919 rdsdebug("ic %p sent %d remote_addr %llu\n", ic
, sent
, remote_addr
);
925 rdsdebug("send %p wr %p num_sge %u next %p\n", send
,
926 &send
->s_wr
, send
->s_wr
.num_sge
, send
->s_wr
.next
);
929 if (++send
== &ic
->i_sends
[ic
->i_send_ring
.w_nr
])
933 /* if we finished the message then send completion owns it */
934 if (scat
== &op
->r_sg
[op
->r_count
])
935 prev
->s_wr
.send_flags
= IB_SEND_SIGNALED
;
937 if (i
< work_alloc
) {
938 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
- i
);
942 failed_wr
= &first
->s_wr
;
943 ret
= ib_post_send(ic
->i_cm_id
->qp
, &first
->s_wr
, &failed_wr
);
944 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic
,
945 first
, &first
->s_wr
, ret
, failed_wr
);
946 BUG_ON(failed_wr
!= &first
->s_wr
);
948 printk(KERN_WARNING
"RDS/IB: rdma ib_post_send to %pI4 "
949 "returned %d\n", &conn
->c_faddr
, ret
);
950 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
954 if (unlikely(failed_wr
!= &first
->s_wr
)) {
955 printk(KERN_WARNING
"RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret
);
956 BUG_ON(failed_wr
!= &first
->s_wr
);
964 void rds_ib_xmit_complete(struct rds_connection
*conn
)
966 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
968 /* We may have a pending ACK or window update we were unable
969 * to send previously (due to flow control). Try again. */
970 rds_ib_attempt_ack(ic
);