Commit | Line | Data |
---|---|---|
6a0979df AG |
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/kernel.h> | |
34 | #include <linux/in.h> | |
35 | #include <linux/device.h> | |
36 | #include <linux/dmapool.h> | |
37 | ||
38 | #include "rds.h" | |
6a0979df AG |
39 | #include "ib.h" |
40 | ||
9c030391 AG |
41 | /* |
42 | * Convert IB-specific error message to RDS error message and call core | |
43 | * completion handler. | |
44 | */ | |
45 | static void rds_ib_send_complete(struct rds_message *rm, | |
46 | int wc_status, | |
47 | void (*complete)(struct rds_message *rm, int status)) | |
6a0979df AG |
48 | { |
49 | int notify_status; | |
50 | ||
51 | switch (wc_status) { | |
52 | case IB_WC_WR_FLUSH_ERR: | |
53 | return; | |
54 | ||
55 | case IB_WC_SUCCESS: | |
56 | notify_status = RDS_RDMA_SUCCESS; | |
57 | break; | |
58 | ||
59 | case IB_WC_REM_ACCESS_ERR: | |
60 | notify_status = RDS_RDMA_REMOTE_ERROR; | |
61 | break; | |
62 | ||
63 | default: | |
64 | notify_status = RDS_RDMA_OTHER_ERROR; | |
65 | break; | |
66 | } | |
9c030391 | 67 | complete(rm, notify_status); |
6a0979df AG |
68 | } |
69 | ||
70 | static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic, | |
71 | struct rds_ib_send_work *send, | |
72 | int wc_status) | |
73 | { | |
74 | struct rds_message *rm = send->s_rm; | |
75 | ||
76 | rdsdebug("ic %p send %p rm %p\n", ic, send, rm); | |
77 | ||
78 | ib_dma_unmap_sg(ic->i_cm_id->device, | |
e779137a AG |
79 | rm->data.m_sg, rm->data.m_nents, |
80 | DMA_TO_DEVICE); | |
6a0979df | 81 | |
ff87e97a | 82 | if (rm->rdma.m_rdma_op.r_active) { |
15133f6e AG |
83 | struct rds_rdma_op *op = &rm->rdma.m_rdma_op; |
84 | ||
85 | if (op->r_mapped) { | |
86 | ib_dma_unmap_sg(ic->i_cm_id->device, | |
87 | op->r_sg, op->r_nents, | |
88 | op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | |
89 | op->r_mapped = 0; | |
90 | } | |
6a0979df AG |
91 | |
92 | /* If the user asked for a completion notification on this | |
93 | * message, we can implement three different semantics: | |
94 | * 1. Notify when we received the ACK on the RDS message | |
95 | * that was queued with the RDMA. This provides reliable | |
96 | * notification of RDMA status at the expense of a one-way | |
97 | * packet delay. | |
98 | * 2. Notify when the IB stack gives us the completion event for | |
99 | * the RDMA operation. | |
100 | * 3. Notify when the IB stack gives us the completion event for | |
101 | * the accompanying RDS messages. | |
102 | * Here, we implement approach #3. To implement approach #2, | |
103 | * call rds_rdma_send_complete from the cq_handler. To implement #1, | |
104 | * don't call rds_rdma_send_complete at all, and fall back to the notify | |
105 | * handling in the ACK processing code. | |
106 | * | |
107 | * Note: There's no need to explicitly sync any RDMA buffers using | |
108 | * ib_dma_sync_sg_for_cpu - the completion for the RDMA | |
109 | * operation itself unmapped the RDMA buffers, which takes care | |
110 | * of synching. | |
111 | */ | |
9c030391 | 112 | rds_ib_send_complete(rm, wc_status, rds_rdma_send_complete); |
6a0979df | 113 | |
ff87e97a AG |
114 | if (rm->rdma.m_rdma_op.r_write) |
115 | rds_stats_add(s_send_rdma_bytes, rm->rdma.m_rdma_op.r_bytes); | |
6a0979df | 116 | else |
ff87e97a | 117 | rds_stats_add(s_recv_rdma_bytes, rm->rdma.m_rdma_op.r_bytes); |
6a0979df AG |
118 | } |
119 | ||
15133f6e AG |
120 | if (rm->atomic.op_active) { |
121 | struct rm_atomic_op *op = &rm->atomic; | |
122 | ||
123 | /* unmap atomic recvbuf */ | |
124 | if (op->op_mapped) { | |
125 | ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1, | |
126 | DMA_FROM_DEVICE); | |
127 | op->op_mapped = 0; | |
128 | } | |
129 | ||
9c030391 | 130 | rds_ib_send_complete(rm, wc_status, rds_atomic_send_complete); |
15133f6e AG |
131 | |
132 | if (rm->atomic.op_type == RDS_ATOMIC_TYPE_CSWP) | |
133 | rds_stats_inc(s_atomic_cswp); | |
134 | else | |
135 | rds_stats_inc(s_atomic_fadd); | |
136 | } | |
137 | ||
6a0979df AG |
138 | /* If anyone waited for this message to get flushed out, wake |
139 | * them up now */ | |
140 | rds_message_unmapped(rm); | |
141 | ||
142 | rds_message_put(rm); | |
143 | send->s_rm = NULL; | |
144 | } | |
145 | ||
146 | void rds_ib_send_init_ring(struct rds_ib_connection *ic) | |
147 | { | |
148 | struct rds_ib_send_work *send; | |
149 | u32 i; | |
150 | ||
151 | for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { | |
152 | struct ib_sge *sge; | |
153 | ||
154 | send->s_rm = NULL; | |
155 | send->s_op = NULL; | |
156 | ||
157 | send->s_wr.wr_id = i; | |
158 | send->s_wr.sg_list = send->s_sge; | |
159 | send->s_wr.num_sge = 1; | |
160 | send->s_wr.opcode = IB_WR_SEND; | |
161 | send->s_wr.send_flags = 0; | |
162 | send->s_wr.ex.imm_data = 0; | |
163 | ||
164 | sge = rds_ib_data_sge(ic, send->s_sge); | |
165 | sge->lkey = ic->i_mr->lkey; | |
166 | ||
167 | sge = rds_ib_header_sge(ic, send->s_sge); | |
168 | sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header)); | |
169 | sge->length = sizeof(struct rds_header); | |
170 | sge->lkey = ic->i_mr->lkey; | |
171 | } | |
172 | } | |
173 | ||
174 | void rds_ib_send_clear_ring(struct rds_ib_connection *ic) | |
175 | { | |
176 | struct rds_ib_send_work *send; | |
177 | u32 i; | |
178 | ||
179 | for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { | |
15133f6e | 180 | if (!send->s_rm || send->s_wr.opcode == 0xdead) |
6a0979df | 181 | continue; |
15133f6e | 182 | rds_ib_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR); |
6a0979df AG |
183 | } |
184 | } | |
185 | ||
186 | /* | |
187 | * The _oldest/_free ring operations here race cleanly with the alloc/unalloc | |
188 | * operations performed in the send path. As the sender allocs and potentially | |
189 | * unallocs the next free entry in the ring it doesn't alter which is | |
190 | * the next to be freed, which is what this is concerned with. | |
191 | */ | |
192 | void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context) | |
193 | { | |
194 | struct rds_connection *conn = context; | |
195 | struct rds_ib_connection *ic = conn->c_transport_data; | |
196 | struct ib_wc wc; | |
197 | struct rds_ib_send_work *send; | |
198 | u32 completed; | |
199 | u32 oldest; | |
200 | u32 i = 0; | |
201 | int ret; | |
202 | ||
203 | rdsdebug("cq %p conn %p\n", cq, conn); | |
204 | rds_ib_stats_inc(s_ib_tx_cq_call); | |
205 | ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | |
206 | if (ret) | |
207 | rdsdebug("ib_req_notify_cq send failed: %d\n", ret); | |
208 | ||
209 | while (ib_poll_cq(cq, 1, &wc) > 0) { | |
210 | rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", | |
211 | (unsigned long long)wc.wr_id, wc.status, wc.byte_len, | |
212 | be32_to_cpu(wc.ex.imm_data)); | |
213 | rds_ib_stats_inc(s_ib_tx_cq_event); | |
214 | ||
215 | if (wc.wr_id == RDS_IB_ACK_WR_ID) { | |
216 | if (ic->i_ack_queued + HZ/2 < jiffies) | |
217 | rds_ib_stats_inc(s_ib_tx_stalled); | |
218 | rds_ib_ack_send_complete(ic); | |
219 | continue; | |
220 | } | |
221 | ||
222 | oldest = rds_ib_ring_oldest(&ic->i_send_ring); | |
223 | ||
224 | completed = rds_ib_ring_completed(&ic->i_send_ring, wc.wr_id, oldest); | |
225 | ||
226 | for (i = 0; i < completed; i++) { | |
227 | send = &ic->i_sends[oldest]; | |
228 | ||
229 | /* In the error case, wc.opcode sometimes contains garbage */ | |
230 | switch (send->s_wr.opcode) { | |
231 | case IB_WR_SEND: | |
232 | if (send->s_rm) | |
233 | rds_ib_send_unmap_rm(ic, send, wc.status); | |
234 | break; | |
235 | case IB_WR_RDMA_WRITE: | |
236 | case IB_WR_RDMA_READ: | |
15133f6e AG |
237 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
238 | case IB_WR_ATOMIC_CMP_AND_SWP: | |
6a0979df AG |
239 | /* Nothing to be done - the SG list will be unmapped |
240 | * when the SEND completes. */ | |
241 | break; | |
242 | default: | |
243 | if (printk_ratelimit()) | |
244 | printk(KERN_NOTICE | |
245 | "RDS/IB: %s: unexpected opcode 0x%x in WR!\n", | |
246 | __func__, send->s_wr.opcode); | |
247 | break; | |
248 | } | |
249 | ||
250 | send->s_wr.opcode = 0xdead; | |
251 | send->s_wr.num_sge = 1; | |
252 | if (send->s_queued + HZ/2 < jiffies) | |
253 | rds_ib_stats_inc(s_ib_tx_stalled); | |
254 | ||
255 | /* If a RDMA operation produced an error, signal this right | |
256 | * away. If we don't, the subsequent SEND that goes with this | |
257 | * RDMA will be canceled with ERR_WFLUSH, and the application | |
258 | * never learn that the RDMA failed. */ | |
259 | if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) { | |
260 | struct rds_message *rm; | |
261 | ||
262 | rm = rds_send_get_message(conn, send->s_op); | |
450d06c0 | 263 | if (rm) { |
15133f6e | 264 | rds_ib_send_unmap_rm(ic, send, wc.status); |
9c030391 | 265 | rds_ib_send_complete(rm, wc.status, rds_rdma_send_complete); |
450d06c0 SP |
266 | rds_message_put(rm); |
267 | } | |
6a0979df AG |
268 | } |
269 | ||
270 | oldest = (oldest + 1) % ic->i_send_ring.w_nr; | |
271 | } | |
272 | ||
273 | rds_ib_ring_free(&ic->i_send_ring, completed); | |
274 | ||
f64f9e71 JP |
275 | if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) || |
276 | test_bit(0, &conn->c_map_queued)) | |
6a0979df AG |
277 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
278 | ||
279 | /* We expect errors as the qp is drained during shutdown */ | |
280 | if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) { | |
281 | rds_ib_conn_error(conn, | |
282 | "send completion on %pI4 " | |
283 | "had status %u, disconnecting and reconnecting\n", | |
284 | &conn->c_faddr, wc.status); | |
285 | } | |
286 | } | |
287 | } | |
288 | ||
289 | /* | |
290 | * This is the main function for allocating credits when sending | |
291 | * messages. | |
292 | * | |
293 | * Conceptually, we have two counters: | |
294 | * - send credits: this tells us how many WRs we're allowed | |
295 | * to submit without overruning the reciever's queue. For | |
296 | * each SEND WR we post, we decrement this by one. | |
297 | * | |
298 | * - posted credits: this tells us how many WRs we recently | |
299 | * posted to the receive queue. This value is transferred | |
300 | * to the peer as a "credit update" in a RDS header field. | |
301 | * Every time we transmit credits to the peer, we subtract | |
302 | * the amount of transferred credits from this counter. | |
303 | * | |
304 | * It is essential that we avoid situations where both sides have | |
305 | * exhausted their send credits, and are unable to send new credits | |
306 | * to the peer. We achieve this by requiring that we send at least | |
307 | * one credit update to the peer before exhausting our credits. | |
308 | * When new credits arrive, we subtract one credit that is withheld | |
309 | * until we've posted new buffers and are ready to transmit these | |
310 | * credits (see rds_ib_send_add_credits below). | |
311 | * | |
312 | * The RDS send code is essentially single-threaded; rds_send_xmit | |
313 | * grabs c_send_lock to ensure exclusive access to the send ring. | |
314 | * However, the ACK sending code is independent and can race with | |
315 | * message SENDs. | |
316 | * | |
317 | * In the send path, we need to update the counters for send credits | |
318 | * and the counter of posted buffers atomically - when we use the | |
319 | * last available credit, we cannot allow another thread to race us | |
320 | * and grab the posted credits counter. Hence, we have to use a | |
321 | * spinlock to protect the credit counter, or use atomics. | |
322 | * | |
323 | * Spinlocks shared between the send and the receive path are bad, | |
324 | * because they create unnecessary delays. An early implementation | |
325 | * using a spinlock showed a 5% degradation in throughput at some | |
326 | * loads. | |
327 | * | |
328 | * This implementation avoids spinlocks completely, putting both | |
329 | * counters into a single atomic, and updating that atomic using | |
330 | * atomic_add (in the receive path, when receiving fresh credits), | |
331 | * and using atomic_cmpxchg when updating the two counters. | |
332 | */ | |
333 | int rds_ib_send_grab_credits(struct rds_ib_connection *ic, | |
7b70d033 | 334 | u32 wanted, u32 *adv_credits, int need_posted, int max_posted) |
6a0979df AG |
335 | { |
336 | unsigned int avail, posted, got = 0, advertise; | |
337 | long oldval, newval; | |
338 | ||
339 | *adv_credits = 0; | |
340 | if (!ic->i_flowctl) | |
341 | return wanted; | |
342 | ||
343 | try_again: | |
344 | advertise = 0; | |
345 | oldval = newval = atomic_read(&ic->i_credits); | |
346 | posted = IB_GET_POST_CREDITS(oldval); | |
347 | avail = IB_GET_SEND_CREDITS(oldval); | |
348 | ||
349 | rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n", | |
350 | wanted, avail, posted); | |
351 | ||
352 | /* The last credit must be used to send a credit update. */ | |
353 | if (avail && !posted) | |
354 | avail--; | |
355 | ||
356 | if (avail < wanted) { | |
357 | struct rds_connection *conn = ic->i_cm_id->context; | |
358 | ||
359 | /* Oops, there aren't that many credits left! */ | |
360 | set_bit(RDS_LL_SEND_FULL, &conn->c_flags); | |
361 | got = avail; | |
362 | } else { | |
363 | /* Sometimes you get what you want, lalala. */ | |
364 | got = wanted; | |
365 | } | |
366 | newval -= IB_SET_SEND_CREDITS(got); | |
367 | ||
368 | /* | |
369 | * If need_posted is non-zero, then the caller wants | |
370 | * the posted regardless of whether any send credits are | |
371 | * available. | |
372 | */ | |
373 | if (posted && (got || need_posted)) { | |
7b70d033 | 374 | advertise = min_t(unsigned int, posted, max_posted); |
6a0979df AG |
375 | newval -= IB_SET_POST_CREDITS(advertise); |
376 | } | |
377 | ||
378 | /* Finally bill everything */ | |
379 | if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval) | |
380 | goto try_again; | |
381 | ||
382 | *adv_credits = advertise; | |
383 | return got; | |
384 | } | |
385 | ||
386 | void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits) | |
387 | { | |
388 | struct rds_ib_connection *ic = conn->c_transport_data; | |
389 | ||
390 | if (credits == 0) | |
391 | return; | |
392 | ||
393 | rdsdebug("rds_ib_send_add_credits(%u): current=%u%s\n", | |
394 | credits, | |
395 | IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)), | |
396 | test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : ""); | |
397 | ||
398 | atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits); | |
399 | if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags)) | |
400 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | |
401 | ||
402 | WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384); | |
403 | ||
404 | rds_ib_stats_inc(s_ib_rx_credit_updates); | |
405 | } | |
406 | ||
407 | void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted) | |
408 | { | |
409 | struct rds_ib_connection *ic = conn->c_transport_data; | |
410 | ||
411 | if (posted == 0) | |
412 | return; | |
413 | ||
414 | atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits); | |
415 | ||
416 | /* Decide whether to send an update to the peer now. | |
417 | * If we would send a credit update for every single buffer we | |
418 | * post, we would end up with an ACK storm (ACK arrives, | |
419 | * consumes buffer, we refill the ring, send ACK to remote | |
420 | * advertising the newly posted buffer... ad inf) | |
421 | * | |
422 | * Performance pretty much depends on how often we send | |
423 | * credit updates - too frequent updates mean lots of ACKs. | |
424 | * Too infrequent updates, and the peer will run out of | |
425 | * credits and has to throttle. | |
426 | * For the time being, 16 seems to be a good compromise. | |
427 | */ | |
428 | if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16) | |
429 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
430 | } | |
431 | ||
432 | static inline void | |
433 | rds_ib_xmit_populate_wr(struct rds_ib_connection *ic, | |
434 | struct rds_ib_send_work *send, unsigned int pos, | |
435 | unsigned long buffer, unsigned int length, | |
436 | int send_flags) | |
437 | { | |
438 | struct ib_sge *sge; | |
439 | ||
440 | WARN_ON(pos != send - ic->i_sends); | |
441 | ||
442 | send->s_wr.send_flags = send_flags; | |
443 | send->s_wr.opcode = IB_WR_SEND; | |
444 | send->s_wr.num_sge = 2; | |
445 | send->s_wr.next = NULL; | |
446 | send->s_queued = jiffies; | |
447 | send->s_op = NULL; | |
448 | ||
449 | if (length != 0) { | |
450 | sge = rds_ib_data_sge(ic, send->s_sge); | |
451 | sge->addr = buffer; | |
452 | sge->length = length; | |
453 | sge->lkey = ic->i_mr->lkey; | |
454 | ||
455 | sge = rds_ib_header_sge(ic, send->s_sge); | |
456 | } else { | |
457 | /* We're sending a packet with no payload. There is only | |
458 | * one SGE */ | |
459 | send->s_wr.num_sge = 1; | |
460 | sge = &send->s_sge[0]; | |
461 | } | |
462 | ||
463 | sge->addr = ic->i_send_hdrs_dma + (pos * sizeof(struct rds_header)); | |
464 | sge->length = sizeof(struct rds_header); | |
465 | sge->lkey = ic->i_mr->lkey; | |
466 | } | |
467 | ||
468 | /* | |
469 | * This can be called multiple times for a given message. The first time | |
470 | * we see a message we map its scatterlist into the IB device so that | |
471 | * we can provide that mapped address to the IB scatter gather entries | |
472 | * in the IB work requests. We translate the scatterlist into a series | |
473 | * of work requests that fragment the message. These work requests complete | |
474 | * in order so we pass ownership of the message to the completion handler | |
475 | * once we send the final fragment. | |
476 | * | |
477 | * The RDS core uses the c_send_lock to only enter this function once | |
478 | * per connection. This makes sure that the tx ring alloc/unalloc pairs | |
479 | * don't get out of sync and confuse the ring. | |
480 | */ | |
481 | int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |
482 | unsigned int hdr_off, unsigned int sg, unsigned int off) | |
483 | { | |
484 | struct rds_ib_connection *ic = conn->c_transport_data; | |
485 | struct ib_device *dev = ic->i_cm_id->device; | |
486 | struct rds_ib_send_work *send = NULL; | |
487 | struct rds_ib_send_work *first; | |
488 | struct rds_ib_send_work *prev; | |
489 | struct ib_send_wr *failed_wr; | |
490 | struct scatterlist *scat; | |
491 | u32 pos; | |
492 | u32 i; | |
493 | u32 work_alloc; | |
494 | u32 credit_alloc; | |
495 | u32 posted; | |
496 | u32 adv_credits = 0; | |
497 | int send_flags = 0; | |
498 | int sent; | |
499 | int ret; | |
500 | int flow_controlled = 0; | |
501 | ||
502 | BUG_ON(off % RDS_FRAG_SIZE); | |
503 | BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); | |
504 | ||
2e7b3b99 AG |
505 | /* Do not send cong updates to IB loopback */ |
506 | if (conn->c_loopback | |
507 | && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { | |
508 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); | |
509 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; | |
510 | } | |
511 | ||
6a0979df AG |
512 | /* FIXME we may overallocate here */ |
513 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) | |
514 | i = 1; | |
515 | else | |
516 | i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE); | |
517 | ||
518 | work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); | |
519 | if (work_alloc == 0) { | |
520 | set_bit(RDS_LL_SEND_FULL, &conn->c_flags); | |
521 | rds_ib_stats_inc(s_ib_tx_ring_full); | |
522 | ret = -ENOMEM; | |
523 | goto out; | |
524 | } | |
525 | ||
526 | credit_alloc = work_alloc; | |
527 | if (ic->i_flowctl) { | |
7b70d033 | 528 | credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT); |
6a0979df AG |
529 | adv_credits += posted; |
530 | if (credit_alloc < work_alloc) { | |
531 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc); | |
532 | work_alloc = credit_alloc; | |
533 | flow_controlled++; | |
534 | } | |
535 | if (work_alloc == 0) { | |
d39e0602 | 536 | set_bit(RDS_LL_SEND_FULL, &conn->c_flags); |
6a0979df AG |
537 | rds_ib_stats_inc(s_ib_tx_throttle); |
538 | ret = -ENOMEM; | |
539 | goto out; | |
540 | } | |
541 | } | |
542 | ||
543 | /* map the message the first time we see it */ | |
8690bfa1 | 544 | if (!ic->i_rm) { |
e779137a AG |
545 | if (rm->data.m_nents) { |
546 | rm->data.m_count = ib_dma_map_sg(dev, | |
547 | rm->data.m_sg, | |
548 | rm->data.m_nents, | |
549 | DMA_TO_DEVICE); | |
550 | rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.m_count); | |
551 | if (rm->data.m_count == 0) { | |
6a0979df AG |
552 | rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); |
553 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); | |
554 | ret = -ENOMEM; /* XXX ? */ | |
555 | goto out; | |
556 | } | |
557 | } else { | |
e779137a | 558 | rm->data.m_count = 0; |
6a0979df AG |
559 | } |
560 | ||
561 | ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; | |
562 | ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes; | |
563 | rds_message_addref(rm); | |
564 | ic->i_rm = rm; | |
565 | ||
566 | /* Finalize the header */ | |
567 | if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags)) | |
568 | rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED; | |
569 | if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) | |
570 | rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED; | |
571 | ||
572 | /* If it has a RDMA op, tell the peer we did it. This is | |
573 | * used by the peer to release use-once RDMA MRs. */ | |
ff87e97a | 574 | if (rm->rdma.m_rdma_op.r_active) { |
6a0979df AG |
575 | struct rds_ext_header_rdma ext_hdr; |
576 | ||
ff87e97a | 577 | ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.m_rdma_op.r_key); |
6a0979df AG |
578 | rds_message_add_extension(&rm->m_inc.i_hdr, |
579 | RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); | |
580 | } | |
581 | if (rm->m_rdma_cookie) { | |
582 | rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr, | |
583 | rds_rdma_cookie_key(rm->m_rdma_cookie), | |
584 | rds_rdma_cookie_offset(rm->m_rdma_cookie)); | |
585 | } | |
586 | ||
587 | /* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so | |
588 | * we should not do this unless we have a chance of at least | |
589 | * sticking the header into the send ring. Which is why we | |
590 | * should call rds_ib_ring_alloc first. */ | |
591 | rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic)); | |
592 | rds_message_make_checksum(&rm->m_inc.i_hdr); | |
593 | ||
594 | /* | |
595 | * Update adv_credits since we reset the ACK_REQUIRED bit. | |
596 | */ | |
7b70d033 | 597 | rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); |
6a0979df AG |
598 | adv_credits += posted; |
599 | BUG_ON(adv_credits > 255); | |
735f61e6 | 600 | } |
6a0979df AG |
601 | |
602 | send = &ic->i_sends[pos]; | |
603 | first = send; | |
604 | prev = NULL; | |
e779137a | 605 | scat = &rm->data.m_sg[sg]; |
6a0979df AG |
606 | sent = 0; |
607 | i = 0; | |
608 | ||
609 | /* Sometimes you want to put a fence between an RDMA | |
610 | * READ and the following SEND. | |
611 | * We could either do this all the time | |
612 | * or when requested by the user. Right now, we let | |
613 | * the application choose. | |
614 | */ | |
ff87e97a | 615 | if (rm->rdma.m_rdma_op.r_active && rm->rdma.m_rdma_op.r_fence) |
6a0979df AG |
616 | send_flags = IB_SEND_FENCE; |
617 | ||
618 | /* | |
619 | * We could be copying the header into the unused tail of the page. | |
620 | * That would need to be changed in the future when those pages might | |
621 | * be mapped userspace pages or page cache pages. So instead we always | |
622 | * use a second sge and our long-lived ring of mapped headers. We send | |
623 | * the header after the data so that the data payload can be aligned on | |
624 | * the receiver. | |
625 | */ | |
626 | ||
627 | /* handle a 0-len message */ | |
628 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) { | |
629 | rds_ib_xmit_populate_wr(ic, send, pos, 0, 0, send_flags); | |
630 | goto add_header; | |
631 | } | |
632 | ||
633 | /* if there's data reference it with a chain of work reqs */ | |
e779137a | 634 | for (; i < work_alloc && scat != &rm->data.m_sg[rm->data.m_count]; i++) { |
6a0979df AG |
635 | unsigned int len; |
636 | ||
637 | send = &ic->i_sends[pos]; | |
638 | ||
639 | len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off); | |
640 | rds_ib_xmit_populate_wr(ic, send, pos, | |
641 | ib_sg_dma_address(dev, scat) + off, len, | |
642 | send_flags); | |
643 | ||
644 | /* | |
645 | * We want to delay signaling completions just enough to get | |
646 | * the batching benefits but not so much that we create dead time | |
647 | * on the wire. | |
648 | */ | |
649 | if (ic->i_unsignaled_wrs-- == 0) { | |
650 | ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; | |
651 | send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; | |
652 | } | |
653 | ||
654 | ic->i_unsignaled_bytes -= len; | |
655 | if (ic->i_unsignaled_bytes <= 0) { | |
656 | ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes; | |
657 | send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; | |
658 | } | |
659 | ||
660 | /* | |
661 | * Always signal the last one if we're stopping due to flow control. | |
662 | */ | |
663 | if (flow_controlled && i == (work_alloc-1)) | |
664 | send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; | |
665 | ||
666 | rdsdebug("send %p wr %p num_sge %u next %p\n", send, | |
667 | &send->s_wr, send->s_wr.num_sge, send->s_wr.next); | |
668 | ||
669 | sent += len; | |
670 | off += len; | |
671 | if (off == ib_sg_dma_len(dev, scat)) { | |
672 | scat++; | |
673 | off = 0; | |
674 | } | |
675 | ||
676 | add_header: | |
677 | /* Tack on the header after the data. The header SGE should already | |
678 | * have been set up to point to the right header buffer. */ | |
679 | memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header)); | |
680 | ||
6a0979df AG |
681 | if (adv_credits) { |
682 | struct rds_header *hdr = &ic->i_send_hdrs[pos]; | |
683 | ||
684 | /* add credit and redo the header checksum */ | |
685 | hdr->h_credit = adv_credits; | |
686 | rds_message_make_checksum(hdr); | |
687 | adv_credits = 0; | |
688 | rds_ib_stats_inc(s_ib_tx_credit_updates); | |
689 | } | |
690 | ||
691 | if (prev) | |
692 | prev->s_wr.next = &send->s_wr; | |
693 | prev = send; | |
694 | ||
695 | pos = (pos + 1) % ic->i_send_ring.w_nr; | |
696 | } | |
697 | ||
698 | /* Account the RDS header in the number of bytes we sent, but just once. | |
699 | * The caller has no concept of fragmentation. */ | |
700 | if (hdr_off == 0) | |
701 | sent += sizeof(struct rds_header); | |
702 | ||
703 | /* if we finished the message then send completion owns it */ | |
e779137a | 704 | if (scat == &rm->data.m_sg[rm->data.m_count]) { |
6a0979df AG |
705 | prev->s_rm = ic->i_rm; |
706 | prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; | |
707 | ic->i_rm = NULL; | |
708 | } | |
709 | ||
710 | if (i < work_alloc) { | |
711 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); | |
712 | work_alloc = i; | |
713 | } | |
714 | if (ic->i_flowctl && i < credit_alloc) | |
715 | rds_ib_send_add_credits(conn, credit_alloc - i); | |
716 | ||
717 | /* XXX need to worry about failed_wr and partial sends. */ | |
718 | failed_wr = &first->s_wr; | |
719 | ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); | |
720 | rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, | |
721 | first, &first->s_wr, ret, failed_wr); | |
722 | BUG_ON(failed_wr != &first->s_wr); | |
723 | if (ret) { | |
724 | printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 " | |
725 | "returned %d\n", &conn->c_faddr, ret); | |
726 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); | |
727 | if (prev->s_rm) { | |
728 | ic->i_rm = prev->s_rm; | |
729 | prev->s_rm = NULL; | |
730 | } | |
735f61e6 AG |
731 | |
732 | rds_ib_conn_error(ic->conn, "ib_post_send failed\n"); | |
6a0979df AG |
733 | goto out; |
734 | } | |
735 | ||
736 | ret = sent; | |
737 | out: | |
738 | BUG_ON(adv_credits); | |
739 | return ret; | |
740 | } | |
741 | ||
15133f6e AG |
742 | /* |
743 | * Issue atomic operation. | |
744 | * A simplified version of the rdma case, we always map 1 SG, and | |
745 | * only 8 bytes, for the return value from the atomic operation. | |
746 | */ | |
747 | int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op) | |
748 | { | |
749 | struct rds_ib_connection *ic = conn->c_transport_data; | |
750 | struct rds_ib_send_work *send = NULL; | |
751 | struct ib_send_wr *failed_wr; | |
752 | struct rds_ib_device *rds_ibdev; | |
753 | u32 pos; | |
754 | u32 work_alloc; | |
755 | int ret; | |
756 | ||
757 | rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); | |
758 | ||
759 | work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos); | |
760 | if (work_alloc != 1) { | |
761 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); | |
762 | rds_ib_stats_inc(s_ib_tx_ring_full); | |
763 | ret = -ENOMEM; | |
764 | goto out; | |
765 | } | |
766 | ||
767 | /* address of send request in ring */ | |
768 | send = &ic->i_sends[pos]; | |
769 | send->s_queued = jiffies; | |
770 | ||
771 | if (op->op_type == RDS_ATOMIC_TYPE_CSWP) { | |
772 | send->s_wr.opcode = IB_WR_ATOMIC_CMP_AND_SWP; | |
773 | send->s_wr.wr.atomic.compare_add = op->op_compare; | |
774 | send->s_wr.wr.atomic.swap = op->op_swap_add; | |
775 | } else { /* FADD */ | |
776 | send->s_wr.opcode = IB_WR_ATOMIC_FETCH_AND_ADD; | |
777 | send->s_wr.wr.atomic.compare_add = op->op_swap_add; | |
778 | send->s_wr.wr.atomic.swap = 0; | |
779 | } | |
780 | send->s_wr.send_flags = IB_SEND_SIGNALED; | |
781 | send->s_wr.num_sge = 1; | |
782 | send->s_wr.next = NULL; | |
783 | send->s_wr.wr.atomic.remote_addr = op->op_remote_addr; | |
784 | send->s_wr.wr.atomic.rkey = op->op_rkey; | |
785 | ||
786 | /* map 8 byte retval buffer to the device */ | |
787 | ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE); | |
788 | rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret); | |
789 | if (ret != 1) { | |
790 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); | |
791 | rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); | |
792 | ret = -ENOMEM; /* XXX ? */ | |
793 | goto out; | |
794 | } | |
795 | ||
796 | /* Convert our struct scatterlist to struct ib_sge */ | |
797 | send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg); | |
798 | send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg); | |
799 | send->s_sge[0].lkey = ic->i_mr->lkey; | |
800 | ||
801 | rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr, | |
802 | send->s_sge[0].addr, send->s_sge[0].length); | |
803 | ||
804 | failed_wr = &send->s_wr; | |
805 | ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr); | |
806 | rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic, | |
807 | send, &send->s_wr, ret, failed_wr); | |
808 | BUG_ON(failed_wr != &send->s_wr); | |
809 | if (ret) { | |
810 | printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 " | |
811 | "returned %d\n", &conn->c_faddr, ret); | |
812 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); | |
813 | goto out; | |
814 | } | |
815 | ||
816 | if (unlikely(failed_wr != &send->s_wr)) { | |
817 | printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret); | |
818 | BUG_ON(failed_wr != &send->s_wr); | |
819 | } | |
820 | ||
821 | out: | |
822 | return ret; | |
823 | } | |
824 | ||
6a0979df AG |
825 | int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) |
826 | { | |
827 | struct rds_ib_connection *ic = conn->c_transport_data; | |
828 | struct rds_ib_send_work *send = NULL; | |
829 | struct rds_ib_send_work *first; | |
830 | struct rds_ib_send_work *prev; | |
831 | struct ib_send_wr *failed_wr; | |
832 | struct rds_ib_device *rds_ibdev; | |
833 | struct scatterlist *scat; | |
834 | unsigned long len; | |
835 | u64 remote_addr = op->r_remote_addr; | |
836 | u32 pos; | |
837 | u32 work_alloc; | |
838 | u32 i; | |
839 | u32 j; | |
840 | int sent; | |
841 | int ret; | |
842 | int num_sge; | |
843 | ||
844 | rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); | |
845 | ||
846 | /* map the message the first time we see it */ | |
847 | if (!op->r_mapped) { | |
848 | op->r_count = ib_dma_map_sg(ic->i_cm_id->device, | |
849 | op->r_sg, op->r_nents, (op->r_write) ? | |
850 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | |
851 | rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count); | |
852 | if (op->r_count == 0) { | |
853 | rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); | |
854 | ret = -ENOMEM; /* XXX ? */ | |
855 | goto out; | |
856 | } | |
857 | ||
858 | op->r_mapped = 1; | |
859 | } | |
860 | ||
861 | /* | |
862 | * Instead of knowing how to return a partial rdma read/write we insist that there | |
863 | * be enough work requests to send the entire message. | |
864 | */ | |
865 | i = ceil(op->r_count, rds_ibdev->max_sge); | |
866 | ||
867 | work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); | |
868 | if (work_alloc != i) { | |
869 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); | |
870 | rds_ib_stats_inc(s_ib_tx_ring_full); | |
871 | ret = -ENOMEM; | |
872 | goto out; | |
873 | } | |
874 | ||
875 | send = &ic->i_sends[pos]; | |
876 | first = send; | |
877 | prev = NULL; | |
878 | scat = &op->r_sg[0]; | |
879 | sent = 0; | |
880 | num_sge = op->r_count; | |
881 | ||
882 | for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) { | |
883 | send->s_wr.send_flags = 0; | |
884 | send->s_queued = jiffies; | |
885 | /* | |
886 | * We want to delay signaling completions just enough to get | |
887 | * the batching benefits but not so much that we create dead time on the wire. | |
888 | */ | |
889 | if (ic->i_unsignaled_wrs-- == 0) { | |
890 | ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; | |
891 | send->s_wr.send_flags = IB_SEND_SIGNALED; | |
892 | } | |
893 | ||
894 | send->s_wr.opcode = op->r_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; | |
895 | send->s_wr.wr.rdma.remote_addr = remote_addr; | |
896 | send->s_wr.wr.rdma.rkey = op->r_key; | |
897 | send->s_op = op; | |
898 | ||
899 | if (num_sge > rds_ibdev->max_sge) { | |
900 | send->s_wr.num_sge = rds_ibdev->max_sge; | |
901 | num_sge -= rds_ibdev->max_sge; | |
902 | } else { | |
903 | send->s_wr.num_sge = num_sge; | |
904 | } | |
905 | ||
906 | send->s_wr.next = NULL; | |
907 | ||
908 | if (prev) | |
909 | prev->s_wr.next = &send->s_wr; | |
910 | ||
911 | for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) { | |
912 | len = ib_sg_dma_len(ic->i_cm_id->device, scat); | |
913 | send->s_sge[j].addr = | |
914 | ib_sg_dma_address(ic->i_cm_id->device, scat); | |
915 | send->s_sge[j].length = len; | |
916 | send->s_sge[j].lkey = ic->i_mr->lkey; | |
917 | ||
918 | sent += len; | |
919 | rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr); | |
920 | ||
921 | remote_addr += len; | |
922 | scat++; | |
923 | } | |
924 | ||
925 | rdsdebug("send %p wr %p num_sge %u next %p\n", send, | |
926 | &send->s_wr, send->s_wr.num_sge, send->s_wr.next); | |
927 | ||
928 | prev = send; | |
929 | if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) | |
930 | send = ic->i_sends; | |
931 | } | |
932 | ||
933 | /* if we finished the message then send completion owns it */ | |
934 | if (scat == &op->r_sg[op->r_count]) | |
935 | prev->s_wr.send_flags = IB_SEND_SIGNALED; | |
936 | ||
937 | if (i < work_alloc) { | |
938 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); | |
939 | work_alloc = i; | |
940 | } | |
941 | ||
942 | failed_wr = &first->s_wr; | |
943 | ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); | |
944 | rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, | |
945 | first, &first->s_wr, ret, failed_wr); | |
946 | BUG_ON(failed_wr != &first->s_wr); | |
947 | if (ret) { | |
948 | printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 " | |
949 | "returned %d\n", &conn->c_faddr, ret); | |
950 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); | |
951 | goto out; | |
952 | } | |
953 | ||
954 | if (unlikely(failed_wr != &first->s_wr)) { | |
955 | printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret); | |
956 | BUG_ON(failed_wr != &first->s_wr); | |
957 | } | |
958 | ||
959 | ||
960 | out: | |
961 | return ret; | |
962 | } | |
963 | ||
964 | void rds_ib_xmit_complete(struct rds_connection *conn) | |
965 | { | |
966 | struct rds_ib_connection *ic = conn->c_transport_data; | |
967 | ||
968 | /* We may have a pending ACK or window update we were unable | |
969 | * to send previously (due to flow control). Try again. */ | |
970 | rds_ib_attempt_ack(ic); | |
971 | } |