Commit | Line | Data |
---|---|---|
5c115590 AG |
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/kernel.h> | |
d9b93842 | 34 | #include <linux/moduleparam.h> |
5a0e3ad6 | 35 | #include <linux/gfp.h> |
5c115590 AG |
36 | #include <net/sock.h> |
37 | #include <linux/in.h> | |
38 | #include <linux/list.h> | |
cb0a6056 | 39 | #include <linux/ratelimit.h> |
bc3b2d7f | 40 | #include <linux/export.h> |
4bebdd7a | 41 | #include <linux/sizes.h> |
5c115590 AG |
42 | |
43 | #include "rds.h" | |
5c115590 AG |
44 | |
45 | /* When transmitting messages in rds_send_xmit, we need to emerge from | |
46 | * time to time and briefly release the CPU. Otherwise the softlock watchdog | |
47 | * will kick our shin. | |
48 | * Also, it seems fairer to not let one busy connection stall all the | |
49 | * others. | |
50 | * | |
51 | * send_batch_count is the number of times we'll loop in send_xmit. Setting | |
52 | * it to 0 will restore the old behavior (where we looped until we had | |
53 | * drained the queue). | |
54 | */ | |
4bebdd7a | 55 | static int send_batch_count = SZ_1K; |
5c115590 AG |
56 | module_param(send_batch_count, int, 0444); |
57 | MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue"); | |
58 | ||
ff51bf84 | 59 | static void rds_send_remove_from_sock(struct list_head *messages, int status); |
60 | ||
5c115590 | 61 | /* |
0f4b1c7e ZB |
62 | * Reset the send state. Callers must ensure that this doesn't race with |
63 | * rds_send_xmit(). | |
5c115590 | 64 | */ |
4e9b551c | 65 | static void rds_send_path_reset(struct rds_conn_path *cp) |
5c115590 AG |
66 | { |
67 | struct rds_message *rm, *tmp; | |
68 | unsigned long flags; | |
69 | ||
4e9b551c SV |
70 | if (cp->cp_xmit_rm) { |
71 | rm = cp->cp_xmit_rm; | |
72 | cp->cp_xmit_rm = NULL; | |
5c115590 AG |
73 | /* Tell the user the RDMA op is no longer mapped by the |
74 | * transport. This isn't entirely true (it's flushed out | |
75 | * independently) but as the connection is down, there's | |
76 | * no ongoing RDMA to/from that memory */ | |
7e3f2952 | 77 | rds_message_unmapped(rm); |
7e3f2952 | 78 | rds_message_put(rm); |
5c115590 | 79 | } |
7e3f2952 | 80 | |
4e9b551c SV |
81 | cp->cp_xmit_sg = 0; |
82 | cp->cp_xmit_hdr_off = 0; | |
83 | cp->cp_xmit_data_off = 0; | |
84 | cp->cp_xmit_atomic_sent = 0; | |
85 | cp->cp_xmit_rdma_sent = 0; | |
86 | cp->cp_xmit_data_sent = 0; | |
5c115590 | 87 | |
4e9b551c | 88 | cp->cp_conn->c_map_queued = 0; |
5c115590 | 89 | |
4e9b551c SV |
90 | cp->cp_unacked_packets = rds_sysctl_max_unacked_packets; |
91 | cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes; | |
5c115590 AG |
92 | |
93 | /* Mark messages as retransmissions, and move them to the send q */ | |
4e9b551c SV |
94 | spin_lock_irqsave(&cp->cp_lock, flags); |
95 | list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { | |
5c115590 AG |
96 | set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); |
97 | set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags); | |
98 | } | |
4e9b551c SV |
99 | list_splice_init(&cp->cp_retrans, &cp->cp_send_queue); |
100 | spin_unlock_irqrestore(&cp->cp_lock, flags); | |
101 | } | |
102 | ||
103 | void rds_send_reset(struct rds_connection *conn) | |
104 | { | |
105 | rds_send_path_reset(&conn->c_path[0]); | |
5c115590 | 106 | } |
0b6f760c | 107 | EXPORT_SYMBOL_GPL(rds_send_reset); |
5c115590 | 108 | |
1f9ecd7e | 109 | static int acquire_in_xmit(struct rds_conn_path *cp) |
0f4b1c7e | 110 | { |
1f9ecd7e | 111 | return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0; |
0f4b1c7e ZB |
112 | } |
113 | ||
1f9ecd7e | 114 | static void release_in_xmit(struct rds_conn_path *cp) |
0f4b1c7e | 115 | { |
1f9ecd7e | 116 | clear_bit(RDS_IN_XMIT, &cp->cp_flags); |
4e857c58 | 117 | smp_mb__after_atomic(); |
0f4b1c7e ZB |
118 | /* |
119 | * We don't use wait_on_bit()/wake_up_bit() because our waking is in a | |
120 | * hot path and finding waiters is very rare. We don't want to walk | |
121 | * the system-wide hashed waitqueue buckets in the fast path only to | |
122 | * almost never find waiters. | |
123 | */ | |
1f9ecd7e SV |
124 | if (waitqueue_active(&cp->cp_waitq)) |
125 | wake_up_all(&cp->cp_waitq); | |
0f4b1c7e ZB |
126 | } |
127 | ||
5c115590 | 128 | /* |
25985edc | 129 | * We're making the conscious trade-off here to only send one message |
5c115590 AG |
130 | * down the connection at a time. |
131 | * Pro: | |
132 | * - tx queueing is a simple fifo list | |
133 | * - reassembly is optional and easily done by transports per conn | |
134 | * - no per flow rx lookup at all, straight to the socket | |
135 | * - less per-frag memory and wire overhead | |
136 | * Con: | |
137 | * - queued acks can be delayed behind large messages | |
138 | * Depends: | |
139 | * - small message latency is higher behind queued large messages | |
140 | * - large message latency isn't starved by intervening small sends | |
141 | */ | |
1f9ecd7e | 142 | int rds_send_xmit(struct rds_conn_path *cp) |
5c115590 | 143 | { |
1f9ecd7e | 144 | struct rds_connection *conn = cp->cp_conn; |
5c115590 AG |
145 | struct rds_message *rm; |
146 | unsigned long flags; | |
147 | unsigned int tmp; | |
5c115590 AG |
148 | struct scatterlist *sg; |
149 | int ret = 0; | |
5c115590 | 150 | LIST_HEAD(to_be_dropped); |
443be0e5 SV |
151 | int batch_count; |
152 | unsigned long send_gen = 0; | |
5c115590 | 153 | |
fcc5450c | 154 | restart: |
443be0e5 | 155 | batch_count = 0; |
049ee3f5 | 156 | |
5c115590 AG |
157 | /* |
158 | * sendmsg calls here after having queued its message on the send | |
159 | * queue. We only have one task feeding the connection at a time. If | |
160 | * another thread is already feeding the queue then we back off. This | |
161 | * avoids blocking the caller and trading per-connection data between | |
162 | * caches per message. | |
5c115590 | 163 | */ |
1f9ecd7e | 164 | if (!acquire_in_xmit(cp)) { |
049ee3f5 | 165 | rds_stats_inc(s_send_lock_contention); |
5c115590 AG |
166 | ret = -ENOMEM; |
167 | goto out; | |
168 | } | |
0f4b1c7e | 169 | |
443be0e5 SV |
170 | /* |
171 | * we record the send generation after doing the xmit acquire. | |
172 | * if someone else manages to jump in and do some work, we'll use | |
173 | * this to avoid a goto restart farther down. | |
174 | * | |
175 | * The acquire_in_xmit() check above ensures that only one | |
176 | * caller can increment c_send_gen at any time. | |
177 | */ | |
1f9ecd7e SV |
178 | cp->cp_send_gen++; |
179 | send_gen = cp->cp_send_gen; | |
443be0e5 | 180 | |
0f4b1c7e ZB |
181 | /* |
182 | * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT, | |
183 | * we do the opposite to avoid races. | |
184 | */ | |
1f9ecd7e SV |
185 | if (!rds_conn_path_up(cp)) { |
186 | release_in_xmit(cp); | |
0f4b1c7e ZB |
187 | ret = 0; |
188 | goto out; | |
189 | } | |
5c115590 | 190 | |
1f9ecd7e SV |
191 | if (conn->c_trans->t_mp_capable) { |
192 | if (conn->c_trans->xmit_path_prepare) | |
193 | conn->c_trans->xmit_path_prepare(cp); | |
194 | } else if (conn->c_trans->xmit_prepare) { | |
5c115590 | 195 | conn->c_trans->xmit_prepare(conn); |
1f9ecd7e | 196 | } |
5c115590 AG |
197 | |
198 | /* | |
199 | * spin trying to push headers and data down the connection until | |
5b2366bd | 200 | * the connection doesn't make forward progress. |
5c115590 | 201 | */ |
fcc5450c | 202 | while (1) { |
5c115590 | 203 | |
1f9ecd7e | 204 | rm = cp->cp_xmit_rm; |
5c115590 | 205 | |
5b2366bd AG |
206 | /* |
207 | * If between sending messages, we can send a pending congestion | |
208 | * map update. | |
5c115590 | 209 | */ |
8690bfa1 | 210 | if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) { |
77dd550e AG |
211 | rm = rds_cong_update_alloc(conn); |
212 | if (IS_ERR(rm)) { | |
213 | ret = PTR_ERR(rm); | |
214 | break; | |
5b2366bd | 215 | } |
77dd550e | 216 | rm->data.op_active = 1; |
1f9ecd7e SV |
217 | rm->m_inc.i_conn_path = cp; |
218 | rm->m_inc.i_conn = cp->cp_conn; | |
77dd550e | 219 | |
1f9ecd7e | 220 | cp->cp_xmit_rm = rm; |
5c115590 AG |
221 | } |
222 | ||
223 | /* | |
5b2366bd | 224 | * If not already working on one, grab the next message. |
5c115590 | 225 | * |
1f9ecd7e | 226 | * cp_xmit_rm holds a ref while we're sending this message down |
5c115590 AG |
227 | * the connction. We can use this ref while holding the |
228 | * send_sem.. rds_send_reset() is serialized with it. | |
229 | */ | |
8690bfa1 | 230 | if (!rm) { |
5c115590 AG |
231 | unsigned int len; |
232 | ||
443be0e5 SV |
233 | batch_count++; |
234 | ||
235 | /* we want to process as big a batch as we can, but | |
236 | * we also want to avoid softlockups. If we've been | |
237 | * through a lot of messages, lets back off and see | |
238 | * if anyone else jumps in | |
239 | */ | |
4bebdd7a | 240 | if (batch_count >= send_batch_count) |
443be0e5 SV |
241 | goto over_batch; |
242 | ||
1f9ecd7e | 243 | spin_lock_irqsave(&cp->cp_lock, flags); |
5c115590 | 244 | |
1f9ecd7e SV |
245 | if (!list_empty(&cp->cp_send_queue)) { |
246 | rm = list_entry(cp->cp_send_queue.next, | |
5c115590 AG |
247 | struct rds_message, |
248 | m_conn_item); | |
249 | rds_message_addref(rm); | |
250 | ||
251 | /* | |
252 | * Move the message from the send queue to the retransmit | |
253 | * list right away. | |
254 | */ | |
1f9ecd7e SV |
255 | list_move_tail(&rm->m_conn_item, |
256 | &cp->cp_retrans); | |
5c115590 AG |
257 | } |
258 | ||
1f9ecd7e | 259 | spin_unlock_irqrestore(&cp->cp_lock, flags); |
5c115590 | 260 | |
fcc5450c | 261 | if (!rm) |
5c115590 | 262 | break; |
5c115590 AG |
263 | |
264 | /* Unfortunately, the way Infiniband deals with | |
265 | * RDMA to a bad MR key is by moving the entire | |
266 | * queue pair to error state. We cold possibly | |
267 | * recover from that, but right now we drop the | |
268 | * connection. | |
269 | * Therefore, we never retransmit messages with RDMA ops. | |
270 | */ | |
f8b3aaf2 | 271 | if (rm->rdma.op_active && |
f64f9e71 | 272 | test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { |
1f9ecd7e | 273 | spin_lock_irqsave(&cp->cp_lock, flags); |
5c115590 AG |
274 | if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) |
275 | list_move(&rm->m_conn_item, &to_be_dropped); | |
1f9ecd7e | 276 | spin_unlock_irqrestore(&cp->cp_lock, flags); |
5c115590 AG |
277 | continue; |
278 | } | |
279 | ||
280 | /* Require an ACK every once in a while */ | |
281 | len = ntohl(rm->m_inc.i_hdr.h_len); | |
1f9ecd7e SV |
282 | if (cp->cp_unacked_packets == 0 || |
283 | cp->cp_unacked_bytes < len) { | |
5c115590 AG |
284 | __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); |
285 | ||
1f9ecd7e SV |
286 | cp->cp_unacked_packets = |
287 | rds_sysctl_max_unacked_packets; | |
288 | cp->cp_unacked_bytes = | |
289 | rds_sysctl_max_unacked_bytes; | |
5c115590 AG |
290 | rds_stats_inc(s_send_ack_required); |
291 | } else { | |
1f9ecd7e SV |
292 | cp->cp_unacked_bytes -= len; |
293 | cp->cp_unacked_packets--; | |
5c115590 AG |
294 | } |
295 | ||
1f9ecd7e | 296 | cp->cp_xmit_rm = rm; |
5c115590 AG |
297 | } |
298 | ||
2c3a5f9a | 299 | /* The transport either sends the whole rdma or none of it */ |
1f9ecd7e | 300 | if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) { |
ff3d7d36 | 301 | rm->m_final_op = &rm->rdma; |
4f73113c | 302 | /* The transport owns the mapped memory for now. |
303 | * You can't unmap it while it's on the send queue | |
304 | */ | |
305 | set_bit(RDS_MSG_MAPPED, &rm->m_flags); | |
2c3a5f9a | 306 | ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); |
4f73113c | 307 | if (ret) { |
308 | clear_bit(RDS_MSG_MAPPED, &rm->m_flags); | |
309 | wake_up_interruptible(&rm->m_flush_wait); | |
15133f6e | 310 | break; |
4f73113c | 311 | } |
1f9ecd7e | 312 | cp->cp_xmit_rdma_sent = 1; |
2c3a5f9a | 313 | |
15133f6e AG |
314 | } |
315 | ||
1f9ecd7e | 316 | if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) { |
ff3d7d36 | 317 | rm->m_final_op = &rm->atomic; |
4f73113c | 318 | /* The transport owns the mapped memory for now. |
319 | * You can't unmap it while it's on the send queue | |
320 | */ | |
321 | set_bit(RDS_MSG_MAPPED, &rm->m_flags); | |
ff3d7d36 | 322 | ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); |
4f73113c | 323 | if (ret) { |
324 | clear_bit(RDS_MSG_MAPPED, &rm->m_flags); | |
325 | wake_up_interruptible(&rm->m_flush_wait); | |
5c115590 | 326 | break; |
4f73113c | 327 | } |
1f9ecd7e | 328 | cp->cp_xmit_atomic_sent = 1; |
ff3d7d36 | 329 | |
5c115590 AG |
330 | } |
331 | ||
2c3a5f9a AG |
332 | /* |
333 | * A number of cases require an RDS header to be sent | |
334 | * even if there is no data. | |
335 | * We permit 0-byte sends; rds-ping depends on this. | |
336 | * However, if there are exclusively attached silent ops, | |
337 | * we skip the hdr/data send, to enable silent operation. | |
338 | */ | |
339 | if (rm->data.op_nents == 0) { | |
340 | int ops_present; | |
341 | int all_ops_are_silent = 1; | |
342 | ||
343 | ops_present = (rm->atomic.op_active || rm->rdma.op_active); | |
344 | if (rm->atomic.op_active && !rm->atomic.op_silent) | |
345 | all_ops_are_silent = 0; | |
346 | if (rm->rdma.op_active && !rm->rdma.op_silent) | |
347 | all_ops_are_silent = 0; | |
348 | ||
349 | if (ops_present && all_ops_are_silent | |
350 | && !rm->m_rdma_cookie) | |
351 | rm->data.op_active = 0; | |
352 | } | |
353 | ||
1f9ecd7e | 354 | if (rm->data.op_active && !cp->cp_xmit_data_sent) { |
ff3d7d36 | 355 | rm->m_final_op = &rm->data; |
1f9ecd7e | 356 | |
5c115590 | 357 | ret = conn->c_trans->xmit(conn, rm, |
1f9ecd7e SV |
358 | cp->cp_xmit_hdr_off, |
359 | cp->cp_xmit_sg, | |
360 | cp->cp_xmit_data_off); | |
5c115590 AG |
361 | if (ret <= 0) |
362 | break; | |
363 | ||
1f9ecd7e | 364 | if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) { |
5c115590 AG |
365 | tmp = min_t(int, ret, |
366 | sizeof(struct rds_header) - | |
1f9ecd7e SV |
367 | cp->cp_xmit_hdr_off); |
368 | cp->cp_xmit_hdr_off += tmp; | |
5c115590 AG |
369 | ret -= tmp; |
370 | } | |
371 | ||
1f9ecd7e | 372 | sg = &rm->data.op_sg[cp->cp_xmit_sg]; |
5c115590 AG |
373 | while (ret) { |
374 | tmp = min_t(int, ret, sg->length - | |
1f9ecd7e SV |
375 | cp->cp_xmit_data_off); |
376 | cp->cp_xmit_data_off += tmp; | |
5c115590 | 377 | ret -= tmp; |
1f9ecd7e SV |
378 | if (cp->cp_xmit_data_off == sg->length) { |
379 | cp->cp_xmit_data_off = 0; | |
5c115590 | 380 | sg++; |
1f9ecd7e SV |
381 | cp->cp_xmit_sg++; |
382 | BUG_ON(ret != 0 && cp->cp_xmit_sg == | |
383 | rm->data.op_nents); | |
5c115590 AG |
384 | } |
385 | } | |
5b2366bd | 386 | |
1f9ecd7e SV |
387 | if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) && |
388 | (cp->cp_xmit_sg == rm->data.op_nents)) | |
389 | cp->cp_xmit_data_sent = 1; | |
5b2366bd AG |
390 | } |
391 | ||
392 | /* | |
393 | * A rm will only take multiple times through this loop | |
394 | * if there is a data op. Thus, if the data is sent (or there was | |
395 | * none), then we're done with the rm. | |
396 | */ | |
1f9ecd7e SV |
397 | if (!rm->data.op_active || cp->cp_xmit_data_sent) { |
398 | cp->cp_xmit_rm = NULL; | |
399 | cp->cp_xmit_sg = 0; | |
400 | cp->cp_xmit_hdr_off = 0; | |
401 | cp->cp_xmit_data_off = 0; | |
402 | cp->cp_xmit_rdma_sent = 0; | |
403 | cp->cp_xmit_atomic_sent = 0; | |
404 | cp->cp_xmit_data_sent = 0; | |
5b2366bd AG |
405 | |
406 | rds_message_put(rm); | |
5c115590 AG |
407 | } |
408 | } | |
409 | ||
443be0e5 | 410 | over_batch: |
1f9ecd7e SV |
411 | if (conn->c_trans->t_mp_capable) { |
412 | if (conn->c_trans->xmit_path_complete) | |
413 | conn->c_trans->xmit_path_complete(cp); | |
414 | } else if (conn->c_trans->xmit_complete) { | |
5c115590 | 415 | conn->c_trans->xmit_complete(conn); |
1f9ecd7e SV |
416 | } |
417 | release_in_xmit(cp); | |
5c115590 | 418 | |
2ad8099b AG |
419 | /* Nuke any messages we decided not to retransmit. */ |
420 | if (!list_empty(&to_be_dropped)) { | |
421 | /* irqs on here, so we can put(), unlike above */ | |
422 | list_for_each_entry(rm, &to_be_dropped, m_conn_item) | |
423 | rds_message_put(rm); | |
424 | rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED); | |
425 | } | |
426 | ||
fcc5450c | 427 | /* |
0f4b1c7e ZB |
428 | * Other senders can queue a message after we last test the send queue |
429 | * but before we clear RDS_IN_XMIT. In that case they'd back off and | |
430 | * not try and send their newly queued message. We need to check the | |
431 | * send queue after having cleared RDS_IN_XMIT so that their message | |
432 | * doesn't get stuck on the send queue. | |
fcc5450c AG |
433 | * |
434 | * If the transport cannot continue (i.e ret != 0), then it must | |
435 | * call us when more room is available, such as from the tx | |
436 | * completion handler. | |
443be0e5 SV |
437 | * |
438 | * We have an extra generation check here so that if someone manages | |
439 | * to jump in after our release_in_xmit, we'll see that they have done | |
440 | * some work and we will skip our goto | |
fcc5450c AG |
441 | */ |
442 | if (ret == 0) { | |
9e29db0e | 443 | smp_mb(); |
0c484240 | 444 | if ((test_bit(0, &conn->c_map_queued) || |
1f9ecd7e SV |
445 | !list_empty(&cp->cp_send_queue)) && |
446 | send_gen == cp->cp_send_gen) { | |
049ee3f5 | 447 | rds_stats_inc(s_send_lock_queue_raced); |
4bebdd7a SS |
448 | if (batch_count < send_batch_count) |
449 | goto restart; | |
1f9ecd7e | 450 | queue_delayed_work(rds_wq, &cp->cp_send_w, 1); |
5c115590 | 451 | } |
5c115590 AG |
452 | } |
453 | out: | |
454 | return ret; | |
455 | } | |
0c28c045 | 456 | EXPORT_SYMBOL_GPL(rds_send_xmit); |
5c115590 AG |
457 | |
458 | static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm) | |
459 | { | |
460 | u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); | |
461 | ||
462 | assert_spin_locked(&rs->rs_lock); | |
463 | ||
464 | BUG_ON(rs->rs_snd_bytes < len); | |
465 | rs->rs_snd_bytes -= len; | |
466 | ||
467 | if (rs->rs_snd_bytes == 0) | |
468 | rds_stats_inc(s_send_queue_empty); | |
469 | } | |
470 | ||
471 | static inline int rds_send_is_acked(struct rds_message *rm, u64 ack, | |
472 | is_acked_func is_acked) | |
473 | { | |
474 | if (is_acked) | |
475 | return is_acked(rm, ack); | |
476 | return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack; | |
477 | } | |
478 | ||
5c115590 AG |
479 | /* |
480 | * This is pretty similar to what happens below in the ACK | |
481 | * handling code - except that we call here as soon as we get | |
482 | * the IB send completion on the RDMA op and the accompanying | |
483 | * message. | |
484 | */ | |
485 | void rds_rdma_send_complete(struct rds_message *rm, int status) | |
486 | { | |
487 | struct rds_sock *rs = NULL; | |
f8b3aaf2 | 488 | struct rm_rdma_op *ro; |
5c115590 | 489 | struct rds_notifier *notifier; |
9de0864c | 490 | unsigned long flags; |
5c115590 | 491 | |
9de0864c | 492 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
5c115590 | 493 | |
f8b3aaf2 | 494 | ro = &rm->rdma; |
f64f9e71 | 495 | if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && |
f8b3aaf2 AG |
496 | ro->op_active && ro->op_notify && ro->op_notifier) { |
497 | notifier = ro->op_notifier; | |
5c115590 AG |
498 | rs = rm->m_rs; |
499 | sock_hold(rds_rs_to_sk(rs)); | |
500 | ||
501 | notifier->n_status = status; | |
502 | spin_lock(&rs->rs_lock); | |
503 | list_add_tail(¬ifier->n_list, &rs->rs_notify_queue); | |
504 | spin_unlock(&rs->rs_lock); | |
505 | ||
f8b3aaf2 | 506 | ro->op_notifier = NULL; |
5c115590 AG |
507 | } |
508 | ||
9de0864c | 509 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
5c115590 AG |
510 | |
511 | if (rs) { | |
512 | rds_wake_sk_sleep(rs); | |
513 | sock_put(rds_rs_to_sk(rs)); | |
514 | } | |
515 | } | |
616b757a | 516 | EXPORT_SYMBOL_GPL(rds_rdma_send_complete); |
5c115590 | 517 | |
15133f6e AG |
518 | /* |
519 | * Just like above, except looks at atomic op | |
520 | */ | |
521 | void rds_atomic_send_complete(struct rds_message *rm, int status) | |
522 | { | |
523 | struct rds_sock *rs = NULL; | |
524 | struct rm_atomic_op *ao; | |
525 | struct rds_notifier *notifier; | |
cf4b7389 | 526 | unsigned long flags; |
15133f6e | 527 | |
cf4b7389 | 528 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
15133f6e AG |
529 | |
530 | ao = &rm->atomic; | |
531 | if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) | |
532 | && ao->op_active && ao->op_notify && ao->op_notifier) { | |
533 | notifier = ao->op_notifier; | |
534 | rs = rm->m_rs; | |
535 | sock_hold(rds_rs_to_sk(rs)); | |
536 | ||
537 | notifier->n_status = status; | |
538 | spin_lock(&rs->rs_lock); | |
539 | list_add_tail(¬ifier->n_list, &rs->rs_notify_queue); | |
540 | spin_unlock(&rs->rs_lock); | |
541 | ||
542 | ao->op_notifier = NULL; | |
543 | } | |
544 | ||
cf4b7389 | 545 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
15133f6e AG |
546 | |
547 | if (rs) { | |
548 | rds_wake_sk_sleep(rs); | |
549 | sock_put(rds_rs_to_sk(rs)); | |
550 | } | |
551 | } | |
552 | EXPORT_SYMBOL_GPL(rds_atomic_send_complete); | |
553 | ||
5c115590 AG |
554 | /* |
555 | * This is the same as rds_rdma_send_complete except we | |
556 | * don't do any locking - we have all the ingredients (message, | |
557 | * socket, socket lock) and can just move the notifier. | |
558 | */ | |
559 | static inline void | |
940786eb | 560 | __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status) |
5c115590 | 561 | { |
f8b3aaf2 | 562 | struct rm_rdma_op *ro; |
940786eb | 563 | struct rm_atomic_op *ao; |
5c115590 | 564 | |
f8b3aaf2 AG |
565 | ro = &rm->rdma; |
566 | if (ro->op_active && ro->op_notify && ro->op_notifier) { | |
567 | ro->op_notifier->n_status = status; | |
568 | list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue); | |
569 | ro->op_notifier = NULL; | |
5c115590 AG |
570 | } |
571 | ||
940786eb AG |
572 | ao = &rm->atomic; |
573 | if (ao->op_active && ao->op_notify && ao->op_notifier) { | |
574 | ao->op_notifier->n_status = status; | |
575 | list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue); | |
576 | ao->op_notifier = NULL; | |
577 | } | |
578 | ||
5c115590 AG |
579 | /* No need to wake the app - caller does this */ |
580 | } | |
581 | ||
5c115590 AG |
582 | /* |
583 | * This removes messages from the socket's list if they're on it. The list | |
584 | * argument must be private to the caller, we must be able to modify it | |
585 | * without locks. The messages must have a reference held for their | |
586 | * position on the list. This function will drop that reference after | |
587 | * removing the messages from the 'messages' list regardless of if it found | |
588 | * the messages on the socket list or not. | |
589 | */ | |
ff51bf84 | 590 | static void rds_send_remove_from_sock(struct list_head *messages, int status) |
5c115590 | 591 | { |
561c7df6 | 592 | unsigned long flags; |
5c115590 AG |
593 | struct rds_sock *rs = NULL; |
594 | struct rds_message *rm; | |
595 | ||
5c115590 | 596 | while (!list_empty(messages)) { |
561c7df6 AG |
597 | int was_on_sock = 0; |
598 | ||
5c115590 AG |
599 | rm = list_entry(messages->next, struct rds_message, |
600 | m_conn_item); | |
601 | list_del_init(&rm->m_conn_item); | |
602 | ||
603 | /* | |
604 | * If we see this flag cleared then we're *sure* that someone | |
605 | * else beat us to removing it from the sock. If we race | |
606 | * with their flag update we'll get the lock and then really | |
607 | * see that the flag has been cleared. | |
608 | * | |
609 | * The message spinlock makes sure nobody clears rm->m_rs | |
610 | * while we're messing with it. It does not prevent the | |
611 | * message from being removed from the socket, though. | |
612 | */ | |
561c7df6 | 613 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
5c115590 AG |
614 | if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) |
615 | goto unlock_and_drop; | |
616 | ||
617 | if (rs != rm->m_rs) { | |
618 | if (rs) { | |
5c115590 AG |
619 | rds_wake_sk_sleep(rs); |
620 | sock_put(rds_rs_to_sk(rs)); | |
621 | } | |
622 | rs = rm->m_rs; | |
593cbb3e HK |
623 | if (rs) |
624 | sock_hold(rds_rs_to_sk(rs)); | |
5c115590 | 625 | } |
593cbb3e HK |
626 | if (!rs) |
627 | goto unlock_and_drop; | |
048c15e6 | 628 | spin_lock(&rs->rs_lock); |
5c115590 AG |
629 | |
630 | if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { | |
f8b3aaf2 | 631 | struct rm_rdma_op *ro = &rm->rdma; |
5c115590 AG |
632 | struct rds_notifier *notifier; |
633 | ||
634 | list_del_init(&rm->m_sock_item); | |
635 | rds_send_sndbuf_remove(rs, rm); | |
636 | ||
f8b3aaf2 AG |
637 | if (ro->op_active && ro->op_notifier && |
638 | (ro->op_notify || (ro->op_recverr && status))) { | |
639 | notifier = ro->op_notifier; | |
5c115590 AG |
640 | list_add_tail(¬ifier->n_list, |
641 | &rs->rs_notify_queue); | |
642 | if (!notifier->n_status) | |
643 | notifier->n_status = status; | |
f8b3aaf2 | 644 | rm->rdma.op_notifier = NULL; |
5c115590 | 645 | } |
561c7df6 | 646 | was_on_sock = 1; |
5c115590 AG |
647 | rm->m_rs = NULL; |
648 | } | |
048c15e6 | 649 | spin_unlock(&rs->rs_lock); |
5c115590 AG |
650 | |
651 | unlock_and_drop: | |
561c7df6 | 652 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
5c115590 | 653 | rds_message_put(rm); |
561c7df6 AG |
654 | if (was_on_sock) |
655 | rds_message_put(rm); | |
5c115590 AG |
656 | } |
657 | ||
658 | if (rs) { | |
5c115590 AG |
659 | rds_wake_sk_sleep(rs); |
660 | sock_put(rds_rs_to_sk(rs)); | |
661 | } | |
5c115590 AG |
662 | } |
663 | ||
664 | /* | |
665 | * Transports call here when they've determined that the receiver queued | |
666 | * messages up to, and including, the given sequence number. Messages are | |
667 | * moved to the retrans queue when rds_send_xmit picks them off the send | |
668 | * queue. This means that in the TCP case, the message may not have been | |
669 | * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked | |
670 | * checks the RDS_MSG_HAS_ACK_SEQ bit. | |
5c115590 | 671 | */ |
5c3d274c SV |
672 | void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack, |
673 | is_acked_func is_acked) | |
5c115590 AG |
674 | { |
675 | struct rds_message *rm, *tmp; | |
676 | unsigned long flags; | |
677 | LIST_HEAD(list); | |
678 | ||
5c3d274c | 679 | spin_lock_irqsave(&cp->cp_lock, flags); |
5c115590 | 680 | |
5c3d274c | 681 | list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { |
5c115590 AG |
682 | if (!rds_send_is_acked(rm, ack, is_acked)) |
683 | break; | |
684 | ||
685 | list_move(&rm->m_conn_item, &list); | |
686 | clear_bit(RDS_MSG_ON_CONN, &rm->m_flags); | |
687 | } | |
688 | ||
689 | /* order flag updates with spin locks */ | |
690 | if (!list_empty(&list)) | |
4e857c58 | 691 | smp_mb__after_atomic(); |
5c115590 | 692 | |
5c3d274c | 693 | spin_unlock_irqrestore(&cp->cp_lock, flags); |
5c115590 AG |
694 | |
695 | /* now remove the messages from the sock list as needed */ | |
696 | rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS); | |
697 | } | |
5c3d274c SV |
698 | EXPORT_SYMBOL_GPL(rds_send_path_drop_acked); |
699 | ||
700 | void rds_send_drop_acked(struct rds_connection *conn, u64 ack, | |
701 | is_acked_func is_acked) | |
702 | { | |
703 | WARN_ON(conn->c_trans->t_mp_capable); | |
704 | rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked); | |
705 | } | |
616b757a | 706 | EXPORT_SYMBOL_GPL(rds_send_drop_acked); |
5c115590 AG |
707 | |
708 | void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |
709 | { | |
710 | struct rds_message *rm, *tmp; | |
711 | struct rds_connection *conn; | |
01ff34ed | 712 | struct rds_conn_path *cp; |
7c82eaf0 | 713 | unsigned long flags; |
5c115590 | 714 | LIST_HEAD(list); |
5c115590 AG |
715 | |
716 | /* get all the messages we're dropping under the rs lock */ | |
717 | spin_lock_irqsave(&rs->rs_lock, flags); | |
718 | ||
719 | list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) { | |
720 | if (dest && (dest->sin_addr.s_addr != rm->m_daddr || | |
721 | dest->sin_port != rm->m_inc.i_hdr.h_dport)) | |
722 | continue; | |
723 | ||
5c115590 AG |
724 | list_move(&rm->m_sock_item, &list); |
725 | rds_send_sndbuf_remove(rs, rm); | |
726 | clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); | |
5c115590 AG |
727 | } |
728 | ||
729 | /* order flag updates with the rs lock */ | |
4e857c58 | 730 | smp_mb__after_atomic(); |
5c115590 AG |
731 | |
732 | spin_unlock_irqrestore(&rs->rs_lock, flags); | |
733 | ||
7c82eaf0 AG |
734 | if (list_empty(&list)) |
735 | return; | |
5c115590 | 736 | |
7c82eaf0 | 737 | /* Remove the messages from the conn */ |
5c115590 | 738 | list_for_each_entry(rm, &list, m_sock_item) { |
7c82eaf0 AG |
739 | |
740 | conn = rm->m_inc.i_conn; | |
01ff34ed SV |
741 | if (conn->c_trans->t_mp_capable) |
742 | cp = rm->m_inc.i_conn_path; | |
743 | else | |
744 | cp = &conn->c_path[0]; | |
5c115590 | 745 | |
01ff34ed | 746 | spin_lock_irqsave(&cp->cp_lock, flags); |
5c115590 | 747 | /* |
7c82eaf0 AG |
748 | * Maybe someone else beat us to removing rm from the conn. |
749 | * If we race with their flag update we'll get the lock and | |
750 | * then really see that the flag has been cleared. | |
5c115590 | 751 | */ |
7c82eaf0 | 752 | if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { |
01ff34ed | 753 | spin_unlock_irqrestore(&cp->cp_lock, flags); |
593cbb3e HK |
754 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
755 | rm->m_rs = NULL; | |
756 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); | |
5c115590 | 757 | continue; |
5c115590 | 758 | } |
9de0864c | 759 | list_del_init(&rm->m_conn_item); |
01ff34ed | 760 | spin_unlock_irqrestore(&cp->cp_lock, flags); |
5c115590 | 761 | |
7c82eaf0 AG |
762 | /* |
763 | * Couldn't grab m_rs_lock in top loop (lock ordering), | |
764 | * but we can now. | |
765 | */ | |
9de0864c | 766 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
5c115590 | 767 | |
7c82eaf0 | 768 | spin_lock(&rs->rs_lock); |
940786eb | 769 | __rds_send_complete(rs, rm, RDS_RDMA_CANCELED); |
7c82eaf0 AG |
770 | spin_unlock(&rs->rs_lock); |
771 | ||
772 | rm->m_rs = NULL; | |
9de0864c | 773 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
7c82eaf0 | 774 | |
7c82eaf0 | 775 | rds_message_put(rm); |
7c82eaf0 | 776 | } |
5c115590 | 777 | |
7c82eaf0 | 778 | rds_wake_sk_sleep(rs); |
550a8002 | 779 | |
5c115590 AG |
780 | while (!list_empty(&list)) { |
781 | rm = list_entry(list.next, struct rds_message, m_sock_item); | |
782 | list_del_init(&rm->m_sock_item); | |
5c115590 | 783 | rds_message_wait(rm); |
dfcec251 | 784 | |
785 | /* just in case the code above skipped this message | |
786 | * because RDS_MSG_ON_CONN wasn't set, run it again here | |
787 | * taking m_rs_lock is the only thing that keeps us | |
788 | * from racing with ack processing. | |
789 | */ | |
790 | spin_lock_irqsave(&rm->m_rs_lock, flags); | |
791 | ||
792 | spin_lock(&rs->rs_lock); | |
793 | __rds_send_complete(rs, rm, RDS_RDMA_CANCELED); | |
794 | spin_unlock(&rs->rs_lock); | |
795 | ||
796 | rm->m_rs = NULL; | |
797 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); | |
798 | ||
5c115590 AG |
799 | rds_message_put(rm); |
800 | } | |
801 | } | |
802 | ||
803 | /* | |
804 | * we only want this to fire once so we use the callers 'queued'. It's | |
805 | * possible that another thread can race with us and remove the | |
806 | * message from the flow with RDS_CANCEL_SENT_TO. | |
807 | */ | |
808 | static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn, | |
780a6d9e | 809 | struct rds_conn_path *cp, |
5c115590 AG |
810 | struct rds_message *rm, __be16 sport, |
811 | __be16 dport, int *queued) | |
812 | { | |
813 | unsigned long flags; | |
814 | u32 len; | |
815 | ||
816 | if (*queued) | |
817 | goto out; | |
818 | ||
819 | len = be32_to_cpu(rm->m_inc.i_hdr.h_len); | |
820 | ||
821 | /* this is the only place which holds both the socket's rs_lock | |
822 | * and the connection's c_lock */ | |
823 | spin_lock_irqsave(&rs->rs_lock, flags); | |
824 | ||
825 | /* | |
826 | * If there is a little space in sndbuf, we don't queue anything, | |
827 | * and userspace gets -EAGAIN. But poll() indicates there's send | |
828 | * room. This can lead to bad behavior (spinning) if snd_bytes isn't | |
829 | * freed up by incoming acks. So we check the *old* value of | |
830 | * rs_snd_bytes here to allow the last msg to exceed the buffer, | |
831 | * and poll() now knows no more data can be sent. | |
832 | */ | |
833 | if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) { | |
834 | rs->rs_snd_bytes += len; | |
835 | ||
836 | /* let recv side know we are close to send space exhaustion. | |
837 | * This is probably not the optimal way to do it, as this | |
838 | * means we set the flag on *all* messages as soon as our | |
839 | * throughput hits a certain threshold. | |
840 | */ | |
841 | if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2) | |
842 | __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); | |
843 | ||
844 | list_add_tail(&rm->m_sock_item, &rs->rs_send_queue); | |
845 | set_bit(RDS_MSG_ON_SOCK, &rm->m_flags); | |
846 | rds_message_addref(rm); | |
847 | rm->m_rs = rs; | |
848 | ||
849 | /* The code ordering is a little weird, but we're | |
850 | trying to minimize the time we hold c_lock */ | |
851 | rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0); | |
852 | rm->m_inc.i_conn = conn; | |
780a6d9e | 853 | rm->m_inc.i_conn_path = cp; |
5c115590 AG |
854 | rds_message_addref(rm); |
855 | ||
780a6d9e SV |
856 | spin_lock(&cp->cp_lock); |
857 | rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++); | |
858 | list_add_tail(&rm->m_conn_item, &cp->cp_send_queue); | |
5c115590 | 859 | set_bit(RDS_MSG_ON_CONN, &rm->m_flags); |
780a6d9e | 860 | spin_unlock(&cp->cp_lock); |
5c115590 AG |
861 | |
862 | rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n", | |
863 | rm, len, rs, rs->rs_snd_bytes, | |
864 | (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence)); | |
865 | ||
866 | *queued = 1; | |
867 | } | |
868 | ||
869 | spin_unlock_irqrestore(&rs->rs_lock, flags); | |
870 | out: | |
871 | return *queued; | |
872 | } | |
873 | ||
fc445084 AG |
874 | /* |
875 | * rds_message is getting to be quite complicated, and we'd like to allocate | |
876 | * it all in one go. This figures out how big it needs to be up front. | |
877 | */ | |
878 | static int rds_rm_size(struct msghdr *msg, int data_len) | |
879 | { | |
ff87e97a | 880 | struct cmsghdr *cmsg; |
fc445084 | 881 | int size = 0; |
aa0a4ef4 | 882 | int cmsg_groups = 0; |
ff87e97a AG |
883 | int retval; |
884 | ||
f95b414e | 885 | for_each_cmsghdr(cmsg, msg) { |
ff87e97a AG |
886 | if (!CMSG_OK(msg, cmsg)) |
887 | return -EINVAL; | |
888 | ||
889 | if (cmsg->cmsg_level != SOL_RDS) | |
890 | continue; | |
891 | ||
892 | switch (cmsg->cmsg_type) { | |
893 | case RDS_CMSG_RDMA_ARGS: | |
aa0a4ef4 | 894 | cmsg_groups |= 1; |
ff87e97a AG |
895 | retval = rds_rdma_extra_size(CMSG_DATA(cmsg)); |
896 | if (retval < 0) | |
897 | return retval; | |
898 | size += retval; | |
aa0a4ef4 | 899 | |
ff87e97a AG |
900 | break; |
901 | ||
902 | case RDS_CMSG_RDMA_DEST: | |
903 | case RDS_CMSG_RDMA_MAP: | |
aa0a4ef4 | 904 | cmsg_groups |= 2; |
ff87e97a AG |
905 | /* these are valid but do no add any size */ |
906 | break; | |
907 | ||
15133f6e AG |
908 | case RDS_CMSG_ATOMIC_CSWP: |
909 | case RDS_CMSG_ATOMIC_FADD: | |
20c72bd5 AG |
910 | case RDS_CMSG_MASKED_ATOMIC_CSWP: |
911 | case RDS_CMSG_MASKED_ATOMIC_FADD: | |
aa0a4ef4 | 912 | cmsg_groups |= 1; |
15133f6e AG |
913 | size += sizeof(struct scatterlist); |
914 | break; | |
915 | ||
ff87e97a AG |
916 | default: |
917 | return -EINVAL; | |
918 | } | |
919 | ||
920 | } | |
fc445084 | 921 | |
ff87e97a | 922 | size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist); |
fc445084 | 923 | |
aa0a4ef4 AG |
924 | /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */ |
925 | if (cmsg_groups == 3) | |
926 | return -EINVAL; | |
927 | ||
fc445084 AG |
928 | return size; |
929 | } | |
930 | ||
5c115590 AG |
931 | static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, |
932 | struct msghdr *msg, int *allocated_mr) | |
933 | { | |
934 | struct cmsghdr *cmsg; | |
935 | int ret = 0; | |
936 | ||
f95b414e | 937 | for_each_cmsghdr(cmsg, msg) { |
5c115590 AG |
938 | if (!CMSG_OK(msg, cmsg)) |
939 | return -EINVAL; | |
940 | ||
941 | if (cmsg->cmsg_level != SOL_RDS) | |
942 | continue; | |
943 | ||
944 | /* As a side effect, RDMA_DEST and RDMA_MAP will set | |
15133f6e | 945 | * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr. |
5c115590 AG |
946 | */ |
947 | switch (cmsg->cmsg_type) { | |
948 | case RDS_CMSG_RDMA_ARGS: | |
949 | ret = rds_cmsg_rdma_args(rs, rm, cmsg); | |
950 | break; | |
951 | ||
952 | case RDS_CMSG_RDMA_DEST: | |
953 | ret = rds_cmsg_rdma_dest(rs, rm, cmsg); | |
954 | break; | |
955 | ||
956 | case RDS_CMSG_RDMA_MAP: | |
957 | ret = rds_cmsg_rdma_map(rs, rm, cmsg); | |
958 | if (!ret) | |
959 | *allocated_mr = 1; | |
960 | break; | |
15133f6e AG |
961 | case RDS_CMSG_ATOMIC_CSWP: |
962 | case RDS_CMSG_ATOMIC_FADD: | |
20c72bd5 AG |
963 | case RDS_CMSG_MASKED_ATOMIC_CSWP: |
964 | case RDS_CMSG_MASKED_ATOMIC_FADD: | |
15133f6e AG |
965 | ret = rds_cmsg_atomic(rs, rm, cmsg); |
966 | break; | |
5c115590 AG |
967 | |
968 | default: | |
969 | return -EINVAL; | |
970 | } | |
971 | ||
972 | if (ret) | |
973 | break; | |
974 | } | |
975 | ||
976 | return ret; | |
977 | } | |
978 | ||
1b784140 | 979 | int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) |
5c115590 AG |
980 | { |
981 | struct sock *sk = sock->sk; | |
982 | struct rds_sock *rs = rds_sk_to_rs(sk); | |
342dfc30 | 983 | DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); |
5c115590 AG |
984 | __be32 daddr; |
985 | __be16 dport; | |
986 | struct rds_message *rm = NULL; | |
987 | struct rds_connection *conn; | |
988 | int ret = 0; | |
989 | int queued = 0, allocated_mr = 0; | |
990 | int nonblock = msg->msg_flags & MSG_DONTWAIT; | |
1123fd73 | 991 | long timeo = sock_sndtimeo(sk, nonblock); |
780a6d9e | 992 | struct rds_conn_path *cpath; |
5c115590 AG |
993 | |
994 | /* Mirror Linux UDP mirror of BSD error message compatibility */ | |
995 | /* XXX: Perhaps MSG_MORE someday */ | |
996 | if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) { | |
5c115590 AG |
997 | ret = -EOPNOTSUPP; |
998 | goto out; | |
999 | } | |
1000 | ||
1001 | if (msg->msg_namelen) { | |
1002 | /* XXX fail non-unicast destination IPs? */ | |
1003 | if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) { | |
1004 | ret = -EINVAL; | |
1005 | goto out; | |
1006 | } | |
1007 | daddr = usin->sin_addr.s_addr; | |
1008 | dport = usin->sin_port; | |
1009 | } else { | |
1010 | /* We only care about consistency with ->connect() */ | |
1011 | lock_sock(sk); | |
1012 | daddr = rs->rs_conn_addr; | |
1013 | dport = rs->rs_conn_port; | |
1014 | release_sock(sk); | |
1015 | } | |
1016 | ||
8c7188b2 | 1017 | lock_sock(sk); |
5c115590 | 1018 | if (daddr == 0 || rs->rs_bound_addr == 0) { |
8c7188b2 | 1019 | release_sock(sk); |
5c115590 AG |
1020 | ret = -ENOTCONN; /* XXX not a great errno */ |
1021 | goto out; | |
1022 | } | |
8c7188b2 | 1023 | release_sock(sk); |
5c115590 | 1024 | |
06e8941e MK |
1025 | if (payload_len > rds_sk_sndbuf(rs)) { |
1026 | ret = -EMSGSIZE; | |
1027 | goto out; | |
1028 | } | |
1029 | ||
fc445084 AG |
1030 | /* size of rm including all sgs */ |
1031 | ret = rds_rm_size(msg, payload_len); | |
1032 | if (ret < 0) | |
1033 | goto out; | |
1034 | ||
1035 | rm = rds_message_alloc(ret, GFP_KERNEL); | |
1036 | if (!rm) { | |
1037 | ret = -ENOMEM; | |
5c115590 AG |
1038 | goto out; |
1039 | } | |
1040 | ||
372cd7de AG |
1041 | /* Attach data to the rm */ |
1042 | if (payload_len) { | |
1043 | rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE)); | |
d139ff09 AG |
1044 | if (!rm->data.op_sg) { |
1045 | ret = -ENOMEM; | |
1046 | goto out; | |
1047 | } | |
c0371da6 | 1048 | ret = rds_message_copy_from_user(rm, &msg->msg_iter); |
372cd7de AG |
1049 | if (ret) |
1050 | goto out; | |
1051 | } | |
1052 | rm->data.op_active = 1; | |
fc445084 | 1053 | |
5c115590 AG |
1054 | rm->m_daddr = daddr; |
1055 | ||
5c115590 AG |
1056 | /* rds_conn_create has a spinlock that runs with IRQ off. |
1057 | * Caching the conn in the socket helps a lot. */ | |
1058 | if (rs->rs_conn && rs->rs_conn->c_faddr == daddr) | |
1059 | conn = rs->rs_conn; | |
1060 | else { | |
d5a8ac28 SV |
1061 | conn = rds_conn_create_outgoing(sock_net(sock->sk), |
1062 | rs->rs_bound_addr, daddr, | |
5c115590 AG |
1063 | rs->rs_transport, |
1064 | sock->sk->sk_allocation); | |
1065 | if (IS_ERR(conn)) { | |
1066 | ret = PTR_ERR(conn); | |
1067 | goto out; | |
1068 | } | |
1069 | rs->rs_conn = conn; | |
1070 | } | |
1071 | ||
49f69691 AG |
1072 | /* Parse any control messages the user may have included. */ |
1073 | ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); | |
1074 | if (ret) | |
1075 | goto out; | |
1076 | ||
2c3a5f9a | 1077 | if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { |
cb0a6056 | 1078 | printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", |
f8b3aaf2 | 1079 | &rm->rdma, conn->c_trans->xmit_rdma); |
15133f6e AG |
1080 | ret = -EOPNOTSUPP; |
1081 | goto out; | |
1082 | } | |
1083 | ||
1084 | if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) { | |
cb0a6056 | 1085 | printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n", |
15133f6e | 1086 | &rm->atomic, conn->c_trans->xmit_atomic); |
5c115590 AG |
1087 | ret = -EOPNOTSUPP; |
1088 | goto out; | |
1089 | } | |
1090 | ||
3c0a5900 SV |
1091 | cpath = &conn->c_path[0]; |
1092 | ||
1093 | rds_conn_path_connect_if_down(cpath); | |
5c115590 AG |
1094 | |
1095 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); | |
b98ba52f AG |
1096 | if (ret) { |
1097 | rs->rs_seen_congestion = 1; | |
5c115590 | 1098 | goto out; |
b98ba52f | 1099 | } |
780a6d9e | 1100 | while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port, |
5c115590 AG |
1101 | dport, &queued)) { |
1102 | rds_stats_inc(s_send_queue_full); | |
06e8941e | 1103 | |
5c115590 AG |
1104 | if (nonblock) { |
1105 | ret = -EAGAIN; | |
1106 | goto out; | |
1107 | } | |
1108 | ||
aa395145 | 1109 | timeo = wait_event_interruptible_timeout(*sk_sleep(sk), |
780a6d9e | 1110 | rds_send_queue_rm(rs, conn, cpath, rm, |
5c115590 AG |
1111 | rs->rs_bound_port, |
1112 | dport, | |
1113 | &queued), | |
1114 | timeo); | |
1115 | rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo); | |
1116 | if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT) | |
1117 | continue; | |
1118 | ||
1119 | ret = timeo; | |
1120 | if (ret == 0) | |
1121 | ret = -ETIMEDOUT; | |
1122 | goto out; | |
1123 | } | |
1124 | ||
1125 | /* | |
1126 | * By now we've committed to the send. We reuse rds_send_worker() | |
1127 | * to retry sends in the rds thread if the transport asks us to. | |
1128 | */ | |
1129 | rds_stats_inc(s_send_queued); | |
1130 | ||
1f9ecd7e | 1131 | ret = rds_send_xmit(cpath); |
db6526dc | 1132 | if (ret == -ENOMEM || ret == -EAGAIN) |
1f9ecd7e | 1133 | queue_delayed_work(rds_wq, &cpath->cp_send_w, 1); |
5c115590 AG |
1134 | |
1135 | rds_message_put(rm); | |
1136 | return payload_len; | |
1137 | ||
1138 | out: | |
1139 | /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly. | |
1140 | * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN | |
1141 | * or in any other way, we need to destroy the MR again */ | |
1142 | if (allocated_mr) | |
1143 | rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1); | |
1144 | ||
1145 | if (rm) | |
1146 | rds_message_put(rm); | |
1147 | return ret; | |
1148 | } | |
1149 | ||
1150 | /* | |
1151 | * Reply to a ping packet. | |
1152 | */ | |
1153 | int | |
45997e9e | 1154 | rds_send_pong(struct rds_conn_path *cp, __be16 dport) |
5c115590 AG |
1155 | { |
1156 | struct rds_message *rm; | |
1157 | unsigned long flags; | |
1158 | int ret = 0; | |
1159 | ||
1160 | rm = rds_message_alloc(0, GFP_ATOMIC); | |
8690bfa1 | 1161 | if (!rm) { |
5c115590 AG |
1162 | ret = -ENOMEM; |
1163 | goto out; | |
1164 | } | |
1165 | ||
45997e9e | 1166 | rm->m_daddr = cp->cp_conn->c_faddr; |
acfcd4d4 | 1167 | rm->data.op_active = 1; |
5c115590 | 1168 | |
3c0a5900 | 1169 | rds_conn_path_connect_if_down(cp); |
5c115590 | 1170 | |
45997e9e | 1171 | ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL); |
5c115590 AG |
1172 | if (ret) |
1173 | goto out; | |
1174 | ||
45997e9e SV |
1175 | spin_lock_irqsave(&cp->cp_lock, flags); |
1176 | list_add_tail(&rm->m_conn_item, &cp->cp_send_queue); | |
5c115590 AG |
1177 | set_bit(RDS_MSG_ON_CONN, &rm->m_flags); |
1178 | rds_message_addref(rm); | |
45997e9e SV |
1179 | rm->m_inc.i_conn = cp->cp_conn; |
1180 | rm->m_inc.i_conn_path = cp; | |
5c115590 AG |
1181 | |
1182 | rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport, | |
45997e9e SV |
1183 | cp->cp_next_tx_seq); |
1184 | cp->cp_next_tx_seq++; | |
1185 | spin_unlock_irqrestore(&cp->cp_lock, flags); | |
5c115590 AG |
1186 | |
1187 | rds_stats_inc(s_send_queued); | |
1188 | rds_stats_inc(s_send_pong); | |
1189 | ||
7b4b0009 | 1190 | /* schedule the send work on rds_wq */ |
45997e9e | 1191 | queue_delayed_work(rds_wq, &cp->cp_send_w, 1); |
acfcd4d4 | 1192 | |
5c115590 AG |
1193 | rds_message_put(rm); |
1194 | return 0; | |
1195 | ||
1196 | out: | |
1197 | if (rm) | |
1198 | rds_message_put(rm); | |
1199 | return ret; | |
1200 | } |