RDS: Make rds_send_queue_rm() rds_conn_path aware
[deliverable/linux.git] / net / rds / send.c
CommitLineData
5c115590
AG
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
d9b93842 34#include <linux/moduleparam.h>
5a0e3ad6 35#include <linux/gfp.h>
5c115590
AG
36#include <net/sock.h>
37#include <linux/in.h>
38#include <linux/list.h>
cb0a6056 39#include <linux/ratelimit.h>
bc3b2d7f 40#include <linux/export.h>
4bebdd7a 41#include <linux/sizes.h>
5c115590 42
0cb43965 43#include "rds_single_path.h"
5c115590 44#include "rds.h"
5c115590
AG
45
46/* When transmitting messages in rds_send_xmit, we need to emerge from
47 * time to time and briefly release the CPU. Otherwise the softlock watchdog
48 * will kick our shin.
49 * Also, it seems fairer to not let one busy connection stall all the
50 * others.
51 *
52 * send_batch_count is the number of times we'll loop in send_xmit. Setting
53 * it to 0 will restore the old behavior (where we looped until we had
54 * drained the queue).
55 */
4bebdd7a 56static int send_batch_count = SZ_1K;
5c115590
AG
57module_param(send_batch_count, int, 0444);
58MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
59
ff51bf84 60static void rds_send_remove_from_sock(struct list_head *messages, int status);
61
5c115590 62/*
0f4b1c7e
ZB
63 * Reset the send state. Callers must ensure that this doesn't race with
64 * rds_send_xmit().
5c115590 65 */
4e9b551c 66static void rds_send_path_reset(struct rds_conn_path *cp)
5c115590
AG
67{
68 struct rds_message *rm, *tmp;
69 unsigned long flags;
70
4e9b551c
SV
71 if (cp->cp_xmit_rm) {
72 rm = cp->cp_xmit_rm;
73 cp->cp_xmit_rm = NULL;
5c115590
AG
74 /* Tell the user the RDMA op is no longer mapped by the
75 * transport. This isn't entirely true (it's flushed out
76 * independently) but as the connection is down, there's
77 * no ongoing RDMA to/from that memory */
7e3f2952 78 rds_message_unmapped(rm);
7e3f2952 79 rds_message_put(rm);
5c115590 80 }
7e3f2952 81
4e9b551c
SV
82 cp->cp_xmit_sg = 0;
83 cp->cp_xmit_hdr_off = 0;
84 cp->cp_xmit_data_off = 0;
85 cp->cp_xmit_atomic_sent = 0;
86 cp->cp_xmit_rdma_sent = 0;
87 cp->cp_xmit_data_sent = 0;
5c115590 88
4e9b551c 89 cp->cp_conn->c_map_queued = 0;
5c115590 90
4e9b551c
SV
91 cp->cp_unacked_packets = rds_sysctl_max_unacked_packets;
92 cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes;
5c115590
AG
93
94 /* Mark messages as retransmissions, and move them to the send q */
4e9b551c
SV
95 spin_lock_irqsave(&cp->cp_lock, flags);
96 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
5c115590
AG
97 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
98 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
99 }
4e9b551c
SV
100 list_splice_init(&cp->cp_retrans, &cp->cp_send_queue);
101 spin_unlock_irqrestore(&cp->cp_lock, flags);
102}
103
104void rds_send_reset(struct rds_connection *conn)
105{
106 rds_send_path_reset(&conn->c_path[0]);
5c115590 107}
0b6f760c 108EXPORT_SYMBOL_GPL(rds_send_reset);
5c115590 109
0f4b1c7e
ZB
110static int acquire_in_xmit(struct rds_connection *conn)
111{
112 return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
113}
114
115static void release_in_xmit(struct rds_connection *conn)
116{
117 clear_bit(RDS_IN_XMIT, &conn->c_flags);
4e857c58 118 smp_mb__after_atomic();
0f4b1c7e
ZB
119 /*
120 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
121 * hot path and finding waiters is very rare. We don't want to walk
122 * the system-wide hashed waitqueue buckets in the fast path only to
123 * almost never find waiters.
124 */
125 if (waitqueue_active(&conn->c_waitq))
126 wake_up_all(&conn->c_waitq);
127}
128
5c115590 129/*
25985edc 130 * We're making the conscious trade-off here to only send one message
5c115590
AG
131 * down the connection at a time.
132 * Pro:
133 * - tx queueing is a simple fifo list
134 * - reassembly is optional and easily done by transports per conn
135 * - no per flow rx lookup at all, straight to the socket
136 * - less per-frag memory and wire overhead
137 * Con:
138 * - queued acks can be delayed behind large messages
139 * Depends:
140 * - small message latency is higher behind queued large messages
141 * - large message latency isn't starved by intervening small sends
142 */
143int rds_send_xmit(struct rds_connection *conn)
144{
145 struct rds_message *rm;
146 unsigned long flags;
147 unsigned int tmp;
5c115590
AG
148 struct scatterlist *sg;
149 int ret = 0;
5c115590 150 LIST_HEAD(to_be_dropped);
443be0e5
SV
151 int batch_count;
152 unsigned long send_gen = 0;
5c115590 153
fcc5450c 154restart:
443be0e5 155 batch_count = 0;
049ee3f5 156
5c115590
AG
157 /*
158 * sendmsg calls here after having queued its message on the send
159 * queue. We only have one task feeding the connection at a time. If
160 * another thread is already feeding the queue then we back off. This
161 * avoids blocking the caller and trading per-connection data between
162 * caches per message.
5c115590 163 */
0f4b1c7e 164 if (!acquire_in_xmit(conn)) {
049ee3f5 165 rds_stats_inc(s_send_lock_contention);
5c115590
AG
166 ret = -ENOMEM;
167 goto out;
168 }
0f4b1c7e 169
443be0e5
SV
170 /*
171 * we record the send generation after doing the xmit acquire.
172 * if someone else manages to jump in and do some work, we'll use
173 * this to avoid a goto restart farther down.
174 *
175 * The acquire_in_xmit() check above ensures that only one
176 * caller can increment c_send_gen at any time.
177 */
178 conn->c_send_gen++;
179 send_gen = conn->c_send_gen;
180
0f4b1c7e
ZB
181 /*
182 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
183 * we do the opposite to avoid races.
184 */
185 if (!rds_conn_up(conn)) {
186 release_in_xmit(conn);
187 ret = 0;
188 goto out;
189 }
5c115590
AG
190
191 if (conn->c_trans->xmit_prepare)
192 conn->c_trans->xmit_prepare(conn);
193
194 /*
195 * spin trying to push headers and data down the connection until
5b2366bd 196 * the connection doesn't make forward progress.
5c115590 197 */
fcc5450c 198 while (1) {
5c115590 199
5c115590 200 rm = conn->c_xmit_rm;
5c115590 201
5b2366bd
AG
202 /*
203 * If between sending messages, we can send a pending congestion
204 * map update.
5c115590 205 */
8690bfa1 206 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
77dd550e
AG
207 rm = rds_cong_update_alloc(conn);
208 if (IS_ERR(rm)) {
209 ret = PTR_ERR(rm);
210 break;
5b2366bd 211 }
77dd550e
AG
212 rm->data.op_active = 1;
213
214 conn->c_xmit_rm = rm;
5c115590
AG
215 }
216
217 /*
5b2366bd 218 * If not already working on one, grab the next message.
5c115590
AG
219 *
220 * c_xmit_rm holds a ref while we're sending this message down
221 * the connction. We can use this ref while holding the
222 * send_sem.. rds_send_reset() is serialized with it.
223 */
8690bfa1 224 if (!rm) {
5c115590
AG
225 unsigned int len;
226
443be0e5
SV
227 batch_count++;
228
229 /* we want to process as big a batch as we can, but
230 * we also want to avoid softlockups. If we've been
231 * through a lot of messages, lets back off and see
232 * if anyone else jumps in
233 */
4bebdd7a 234 if (batch_count >= send_batch_count)
443be0e5
SV
235 goto over_batch;
236
0f4b1c7e 237 spin_lock_irqsave(&conn->c_lock, flags);
5c115590
AG
238
239 if (!list_empty(&conn->c_send_queue)) {
240 rm = list_entry(conn->c_send_queue.next,
241 struct rds_message,
242 m_conn_item);
243 rds_message_addref(rm);
244
245 /*
246 * Move the message from the send queue to the retransmit
247 * list right away.
248 */
249 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
250 }
251
0f4b1c7e 252 spin_unlock_irqrestore(&conn->c_lock, flags);
5c115590 253
fcc5450c 254 if (!rm)
5c115590 255 break;
5c115590
AG
256
257 /* Unfortunately, the way Infiniband deals with
258 * RDMA to a bad MR key is by moving the entire
259 * queue pair to error state. We cold possibly
260 * recover from that, but right now we drop the
261 * connection.
262 * Therefore, we never retransmit messages with RDMA ops.
263 */
f8b3aaf2 264 if (rm->rdma.op_active &&
f64f9e71 265 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
0f4b1c7e 266 spin_lock_irqsave(&conn->c_lock, flags);
5c115590
AG
267 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
268 list_move(&rm->m_conn_item, &to_be_dropped);
0f4b1c7e 269 spin_unlock_irqrestore(&conn->c_lock, flags);
5c115590
AG
270 continue;
271 }
272
273 /* Require an ACK every once in a while */
274 len = ntohl(rm->m_inc.i_hdr.h_len);
f64f9e71
JP
275 if (conn->c_unacked_packets == 0 ||
276 conn->c_unacked_bytes < len) {
5c115590
AG
277 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
278
279 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
280 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
281 rds_stats_inc(s_send_ack_required);
282 } else {
283 conn->c_unacked_bytes -= len;
284 conn->c_unacked_packets--;
285 }
286
287 conn->c_xmit_rm = rm;
288 }
289
2c3a5f9a
AG
290 /* The transport either sends the whole rdma or none of it */
291 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
ff3d7d36 292 rm->m_final_op = &rm->rdma;
4f73113c 293 /* The transport owns the mapped memory for now.
294 * You can't unmap it while it's on the send queue
295 */
296 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
2c3a5f9a 297 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
4f73113c 298 if (ret) {
299 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
300 wake_up_interruptible(&rm->m_flush_wait);
15133f6e 301 break;
4f73113c 302 }
2c3a5f9a
AG
303 conn->c_xmit_rdma_sent = 1;
304
15133f6e
AG
305 }
306
2c3a5f9a 307 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
ff3d7d36 308 rm->m_final_op = &rm->atomic;
4f73113c 309 /* The transport owns the mapped memory for now.
310 * You can't unmap it while it's on the send queue
311 */
312 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
ff3d7d36 313 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
4f73113c 314 if (ret) {
315 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
316 wake_up_interruptible(&rm->m_flush_wait);
5c115590 317 break;
4f73113c 318 }
2c3a5f9a 319 conn->c_xmit_atomic_sent = 1;
ff3d7d36 320
5c115590
AG
321 }
322
2c3a5f9a
AG
323 /*
324 * A number of cases require an RDS header to be sent
325 * even if there is no data.
326 * We permit 0-byte sends; rds-ping depends on this.
327 * However, if there are exclusively attached silent ops,
328 * we skip the hdr/data send, to enable silent operation.
329 */
330 if (rm->data.op_nents == 0) {
331 int ops_present;
332 int all_ops_are_silent = 1;
333
334 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
335 if (rm->atomic.op_active && !rm->atomic.op_silent)
336 all_ops_are_silent = 0;
337 if (rm->rdma.op_active && !rm->rdma.op_silent)
338 all_ops_are_silent = 0;
339
340 if (ops_present && all_ops_are_silent
341 && !rm->m_rdma_cookie)
342 rm->data.op_active = 0;
343 }
344
5b2366bd 345 if (rm->data.op_active && !conn->c_xmit_data_sent) {
ff3d7d36 346 rm->m_final_op = &rm->data;
5c115590
AG
347 ret = conn->c_trans->xmit(conn, rm,
348 conn->c_xmit_hdr_off,
349 conn->c_xmit_sg,
350 conn->c_xmit_data_off);
351 if (ret <= 0)
352 break;
353
354 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
355 tmp = min_t(int, ret,
356 sizeof(struct rds_header) -
357 conn->c_xmit_hdr_off);
358 conn->c_xmit_hdr_off += tmp;
359 ret -= tmp;
360 }
361
6c7cc6e4 362 sg = &rm->data.op_sg[conn->c_xmit_sg];
5c115590
AG
363 while (ret) {
364 tmp = min_t(int, ret, sg->length -
365 conn->c_xmit_data_off);
366 conn->c_xmit_data_off += tmp;
367 ret -= tmp;
368 if (conn->c_xmit_data_off == sg->length) {
369 conn->c_xmit_data_off = 0;
370 sg++;
371 conn->c_xmit_sg++;
372 BUG_ON(ret != 0 &&
6c7cc6e4 373 conn->c_xmit_sg == rm->data.op_nents);
5c115590
AG
374 }
375 }
5b2366bd
AG
376
377 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
378 (conn->c_xmit_sg == rm->data.op_nents))
379 conn->c_xmit_data_sent = 1;
380 }
381
382 /*
383 * A rm will only take multiple times through this loop
384 * if there is a data op. Thus, if the data is sent (or there was
385 * none), then we're done with the rm.
386 */
387 if (!rm->data.op_active || conn->c_xmit_data_sent) {
388 conn->c_xmit_rm = NULL;
389 conn->c_xmit_sg = 0;
390 conn->c_xmit_hdr_off = 0;
391 conn->c_xmit_data_off = 0;
392 conn->c_xmit_rdma_sent = 0;
393 conn->c_xmit_atomic_sent = 0;
394 conn->c_xmit_data_sent = 0;
395
396 rds_message_put(rm);
5c115590
AG
397 }
398 }
399
443be0e5 400over_batch:
5c115590
AG
401 if (conn->c_trans->xmit_complete)
402 conn->c_trans->xmit_complete(conn);
0f4b1c7e 403 release_in_xmit(conn);
5c115590 404
2ad8099b
AG
405 /* Nuke any messages we decided not to retransmit. */
406 if (!list_empty(&to_be_dropped)) {
407 /* irqs on here, so we can put(), unlike above */
408 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
409 rds_message_put(rm);
410 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
411 }
412
fcc5450c 413 /*
0f4b1c7e
ZB
414 * Other senders can queue a message after we last test the send queue
415 * but before we clear RDS_IN_XMIT. In that case they'd back off and
416 * not try and send their newly queued message. We need to check the
417 * send queue after having cleared RDS_IN_XMIT so that their message
418 * doesn't get stuck on the send queue.
fcc5450c
AG
419 *
420 * If the transport cannot continue (i.e ret != 0), then it must
421 * call us when more room is available, such as from the tx
422 * completion handler.
443be0e5
SV
423 *
424 * We have an extra generation check here so that if someone manages
425 * to jump in after our release_in_xmit, we'll see that they have done
426 * some work and we will skip our goto
fcc5450c
AG
427 */
428 if (ret == 0) {
9e29db0e 429 smp_mb();
0c484240 430 if ((test_bit(0, &conn->c_map_queued) ||
431 !list_empty(&conn->c_send_queue)) &&
443be0e5 432 send_gen == conn->c_send_gen) {
049ee3f5 433 rds_stats_inc(s_send_lock_queue_raced);
4bebdd7a
SS
434 if (batch_count < send_batch_count)
435 goto restart;
436 queue_delayed_work(rds_wq, &conn->c_send_w, 1);
5c115590 437 }
5c115590
AG
438 }
439out:
440 return ret;
441}
0c28c045 442EXPORT_SYMBOL_GPL(rds_send_xmit);
5c115590
AG
443
444static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
445{
446 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
447
448 assert_spin_locked(&rs->rs_lock);
449
450 BUG_ON(rs->rs_snd_bytes < len);
451 rs->rs_snd_bytes -= len;
452
453 if (rs->rs_snd_bytes == 0)
454 rds_stats_inc(s_send_queue_empty);
455}
456
457static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
458 is_acked_func is_acked)
459{
460 if (is_acked)
461 return is_acked(rm, ack);
462 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
463}
464
5c115590
AG
465/*
466 * This is pretty similar to what happens below in the ACK
467 * handling code - except that we call here as soon as we get
468 * the IB send completion on the RDMA op and the accompanying
469 * message.
470 */
471void rds_rdma_send_complete(struct rds_message *rm, int status)
472{
473 struct rds_sock *rs = NULL;
f8b3aaf2 474 struct rm_rdma_op *ro;
5c115590 475 struct rds_notifier *notifier;
9de0864c 476 unsigned long flags;
5c115590 477
9de0864c 478 spin_lock_irqsave(&rm->m_rs_lock, flags);
5c115590 479
f8b3aaf2 480 ro = &rm->rdma;
f64f9e71 481 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
f8b3aaf2
AG
482 ro->op_active && ro->op_notify && ro->op_notifier) {
483 notifier = ro->op_notifier;
5c115590
AG
484 rs = rm->m_rs;
485 sock_hold(rds_rs_to_sk(rs));
486
487 notifier->n_status = status;
488 spin_lock(&rs->rs_lock);
489 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
490 spin_unlock(&rs->rs_lock);
491
f8b3aaf2 492 ro->op_notifier = NULL;
5c115590
AG
493 }
494
9de0864c 495 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
5c115590
AG
496
497 if (rs) {
498 rds_wake_sk_sleep(rs);
499 sock_put(rds_rs_to_sk(rs));
500 }
501}
616b757a 502EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
5c115590 503
15133f6e
AG
504/*
505 * Just like above, except looks at atomic op
506 */
507void rds_atomic_send_complete(struct rds_message *rm, int status)
508{
509 struct rds_sock *rs = NULL;
510 struct rm_atomic_op *ao;
511 struct rds_notifier *notifier;
cf4b7389 512 unsigned long flags;
15133f6e 513
cf4b7389 514 spin_lock_irqsave(&rm->m_rs_lock, flags);
15133f6e
AG
515
516 ao = &rm->atomic;
517 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
518 && ao->op_active && ao->op_notify && ao->op_notifier) {
519 notifier = ao->op_notifier;
520 rs = rm->m_rs;
521 sock_hold(rds_rs_to_sk(rs));
522
523 notifier->n_status = status;
524 spin_lock(&rs->rs_lock);
525 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
526 spin_unlock(&rs->rs_lock);
527
528 ao->op_notifier = NULL;
529 }
530
cf4b7389 531 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
15133f6e
AG
532
533 if (rs) {
534 rds_wake_sk_sleep(rs);
535 sock_put(rds_rs_to_sk(rs));
536 }
537}
538EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
539
5c115590
AG
540/*
541 * This is the same as rds_rdma_send_complete except we
542 * don't do any locking - we have all the ingredients (message,
543 * socket, socket lock) and can just move the notifier.
544 */
545static inline void
940786eb 546__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
5c115590 547{
f8b3aaf2 548 struct rm_rdma_op *ro;
940786eb 549 struct rm_atomic_op *ao;
5c115590 550
f8b3aaf2
AG
551 ro = &rm->rdma;
552 if (ro->op_active && ro->op_notify && ro->op_notifier) {
553 ro->op_notifier->n_status = status;
554 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
555 ro->op_notifier = NULL;
5c115590
AG
556 }
557
940786eb
AG
558 ao = &rm->atomic;
559 if (ao->op_active && ao->op_notify && ao->op_notifier) {
560 ao->op_notifier->n_status = status;
561 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
562 ao->op_notifier = NULL;
563 }
564
5c115590
AG
565 /* No need to wake the app - caller does this */
566}
567
5c115590
AG
568/*
569 * This removes messages from the socket's list if they're on it. The list
570 * argument must be private to the caller, we must be able to modify it
571 * without locks. The messages must have a reference held for their
572 * position on the list. This function will drop that reference after
573 * removing the messages from the 'messages' list regardless of if it found
574 * the messages on the socket list or not.
575 */
ff51bf84 576static void rds_send_remove_from_sock(struct list_head *messages, int status)
5c115590 577{
561c7df6 578 unsigned long flags;
5c115590
AG
579 struct rds_sock *rs = NULL;
580 struct rds_message *rm;
581
5c115590 582 while (!list_empty(messages)) {
561c7df6
AG
583 int was_on_sock = 0;
584
5c115590
AG
585 rm = list_entry(messages->next, struct rds_message,
586 m_conn_item);
587 list_del_init(&rm->m_conn_item);
588
589 /*
590 * If we see this flag cleared then we're *sure* that someone
591 * else beat us to removing it from the sock. If we race
592 * with their flag update we'll get the lock and then really
593 * see that the flag has been cleared.
594 *
595 * The message spinlock makes sure nobody clears rm->m_rs
596 * while we're messing with it. It does not prevent the
597 * message from being removed from the socket, though.
598 */
561c7df6 599 spin_lock_irqsave(&rm->m_rs_lock, flags);
5c115590
AG
600 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
601 goto unlock_and_drop;
602
603 if (rs != rm->m_rs) {
604 if (rs) {
5c115590
AG
605 rds_wake_sk_sleep(rs);
606 sock_put(rds_rs_to_sk(rs));
607 }
608 rs = rm->m_rs;
593cbb3e
HK
609 if (rs)
610 sock_hold(rds_rs_to_sk(rs));
5c115590 611 }
593cbb3e
HK
612 if (!rs)
613 goto unlock_and_drop;
048c15e6 614 spin_lock(&rs->rs_lock);
5c115590
AG
615
616 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
f8b3aaf2 617 struct rm_rdma_op *ro = &rm->rdma;
5c115590
AG
618 struct rds_notifier *notifier;
619
620 list_del_init(&rm->m_sock_item);
621 rds_send_sndbuf_remove(rs, rm);
622
f8b3aaf2
AG
623 if (ro->op_active && ro->op_notifier &&
624 (ro->op_notify || (ro->op_recverr && status))) {
625 notifier = ro->op_notifier;
5c115590
AG
626 list_add_tail(&notifier->n_list,
627 &rs->rs_notify_queue);
628 if (!notifier->n_status)
629 notifier->n_status = status;
f8b3aaf2 630 rm->rdma.op_notifier = NULL;
5c115590 631 }
561c7df6 632 was_on_sock = 1;
5c115590
AG
633 rm->m_rs = NULL;
634 }
048c15e6 635 spin_unlock(&rs->rs_lock);
5c115590
AG
636
637unlock_and_drop:
561c7df6 638 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
5c115590 639 rds_message_put(rm);
561c7df6
AG
640 if (was_on_sock)
641 rds_message_put(rm);
5c115590
AG
642 }
643
644 if (rs) {
5c115590
AG
645 rds_wake_sk_sleep(rs);
646 sock_put(rds_rs_to_sk(rs));
647 }
5c115590
AG
648}
649
650/*
651 * Transports call here when they've determined that the receiver queued
652 * messages up to, and including, the given sequence number. Messages are
653 * moved to the retrans queue when rds_send_xmit picks them off the send
654 * queue. This means that in the TCP case, the message may not have been
655 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
656 * checks the RDS_MSG_HAS_ACK_SEQ bit.
5c115590 657 */
5c3d274c
SV
658void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
659 is_acked_func is_acked)
5c115590
AG
660{
661 struct rds_message *rm, *tmp;
662 unsigned long flags;
663 LIST_HEAD(list);
664
5c3d274c 665 spin_lock_irqsave(&cp->cp_lock, flags);
5c115590 666
5c3d274c 667 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
5c115590
AG
668 if (!rds_send_is_acked(rm, ack, is_acked))
669 break;
670
671 list_move(&rm->m_conn_item, &list);
672 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
673 }
674
675 /* order flag updates with spin locks */
676 if (!list_empty(&list))
4e857c58 677 smp_mb__after_atomic();
5c115590 678
5c3d274c 679 spin_unlock_irqrestore(&cp->cp_lock, flags);
5c115590
AG
680
681 /* now remove the messages from the sock list as needed */
682 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
683}
5c3d274c
SV
684EXPORT_SYMBOL_GPL(rds_send_path_drop_acked);
685
686void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
687 is_acked_func is_acked)
688{
689 WARN_ON(conn->c_trans->t_mp_capable);
690 rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked);
691}
616b757a 692EXPORT_SYMBOL_GPL(rds_send_drop_acked);
5c115590
AG
693
694void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
695{
696 struct rds_message *rm, *tmp;
697 struct rds_connection *conn;
7c82eaf0 698 unsigned long flags;
5c115590 699 LIST_HEAD(list);
5c115590
AG
700
701 /* get all the messages we're dropping under the rs lock */
702 spin_lock_irqsave(&rs->rs_lock, flags);
703
704 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
705 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
706 dest->sin_port != rm->m_inc.i_hdr.h_dport))
707 continue;
708
5c115590
AG
709 list_move(&rm->m_sock_item, &list);
710 rds_send_sndbuf_remove(rs, rm);
711 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
5c115590
AG
712 }
713
714 /* order flag updates with the rs lock */
4e857c58 715 smp_mb__after_atomic();
5c115590
AG
716
717 spin_unlock_irqrestore(&rs->rs_lock, flags);
718
7c82eaf0
AG
719 if (list_empty(&list))
720 return;
5c115590 721
7c82eaf0 722 /* Remove the messages from the conn */
5c115590 723 list_for_each_entry(rm, &list, m_sock_item) {
7c82eaf0
AG
724
725 conn = rm->m_inc.i_conn;
5c115590 726
9de0864c 727 spin_lock_irqsave(&conn->c_lock, flags);
5c115590 728 /*
7c82eaf0
AG
729 * Maybe someone else beat us to removing rm from the conn.
730 * If we race with their flag update we'll get the lock and
731 * then really see that the flag has been cleared.
5c115590 732 */
7c82eaf0
AG
733 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
734 spin_unlock_irqrestore(&conn->c_lock, flags);
593cbb3e
HK
735 spin_lock_irqsave(&rm->m_rs_lock, flags);
736 rm->m_rs = NULL;
737 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
5c115590 738 continue;
5c115590 739 }
9de0864c
AG
740 list_del_init(&rm->m_conn_item);
741 spin_unlock_irqrestore(&conn->c_lock, flags);
5c115590 742
7c82eaf0
AG
743 /*
744 * Couldn't grab m_rs_lock in top loop (lock ordering),
745 * but we can now.
746 */
9de0864c 747 spin_lock_irqsave(&rm->m_rs_lock, flags);
5c115590 748
7c82eaf0 749 spin_lock(&rs->rs_lock);
940786eb 750 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
7c82eaf0
AG
751 spin_unlock(&rs->rs_lock);
752
753 rm->m_rs = NULL;
9de0864c 754 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
7c82eaf0 755
7c82eaf0 756 rds_message_put(rm);
7c82eaf0 757 }
5c115590 758
7c82eaf0 759 rds_wake_sk_sleep(rs);
550a8002 760
5c115590
AG
761 while (!list_empty(&list)) {
762 rm = list_entry(list.next, struct rds_message, m_sock_item);
763 list_del_init(&rm->m_sock_item);
5c115590 764 rds_message_wait(rm);
dfcec251 765
766 /* just in case the code above skipped this message
767 * because RDS_MSG_ON_CONN wasn't set, run it again here
768 * taking m_rs_lock is the only thing that keeps us
769 * from racing with ack processing.
770 */
771 spin_lock_irqsave(&rm->m_rs_lock, flags);
772
773 spin_lock(&rs->rs_lock);
774 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
775 spin_unlock(&rs->rs_lock);
776
777 rm->m_rs = NULL;
778 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
779
5c115590
AG
780 rds_message_put(rm);
781 }
782}
783
784/*
785 * we only want this to fire once so we use the callers 'queued'. It's
786 * possible that another thread can race with us and remove the
787 * message from the flow with RDS_CANCEL_SENT_TO.
788 */
789static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
780a6d9e 790 struct rds_conn_path *cp,
5c115590
AG
791 struct rds_message *rm, __be16 sport,
792 __be16 dport, int *queued)
793{
794 unsigned long flags;
795 u32 len;
796
797 if (*queued)
798 goto out;
799
800 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
801
802 /* this is the only place which holds both the socket's rs_lock
803 * and the connection's c_lock */
804 spin_lock_irqsave(&rs->rs_lock, flags);
805
806 /*
807 * If there is a little space in sndbuf, we don't queue anything,
808 * and userspace gets -EAGAIN. But poll() indicates there's send
809 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
810 * freed up by incoming acks. So we check the *old* value of
811 * rs_snd_bytes here to allow the last msg to exceed the buffer,
812 * and poll() now knows no more data can be sent.
813 */
814 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
815 rs->rs_snd_bytes += len;
816
817 /* let recv side know we are close to send space exhaustion.
818 * This is probably not the optimal way to do it, as this
819 * means we set the flag on *all* messages as soon as our
820 * throughput hits a certain threshold.
821 */
822 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
823 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
824
825 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
826 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
827 rds_message_addref(rm);
828 rm->m_rs = rs;
829
830 /* The code ordering is a little weird, but we're
831 trying to minimize the time we hold c_lock */
832 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
833 rm->m_inc.i_conn = conn;
780a6d9e 834 rm->m_inc.i_conn_path = cp;
5c115590
AG
835 rds_message_addref(rm);
836
780a6d9e
SV
837 spin_lock(&cp->cp_lock);
838 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++);
839 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
5c115590 840 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
780a6d9e 841 spin_unlock(&cp->cp_lock);
5c115590
AG
842
843 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
844 rm, len, rs, rs->rs_snd_bytes,
845 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
846
847 *queued = 1;
848 }
849
850 spin_unlock_irqrestore(&rs->rs_lock, flags);
851out:
852 return *queued;
853}
854
fc445084
AG
855/*
856 * rds_message is getting to be quite complicated, and we'd like to allocate
857 * it all in one go. This figures out how big it needs to be up front.
858 */
859static int rds_rm_size(struct msghdr *msg, int data_len)
860{
ff87e97a 861 struct cmsghdr *cmsg;
fc445084 862 int size = 0;
aa0a4ef4 863 int cmsg_groups = 0;
ff87e97a
AG
864 int retval;
865
f95b414e 866 for_each_cmsghdr(cmsg, msg) {
ff87e97a
AG
867 if (!CMSG_OK(msg, cmsg))
868 return -EINVAL;
869
870 if (cmsg->cmsg_level != SOL_RDS)
871 continue;
872
873 switch (cmsg->cmsg_type) {
874 case RDS_CMSG_RDMA_ARGS:
aa0a4ef4 875 cmsg_groups |= 1;
ff87e97a
AG
876 retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
877 if (retval < 0)
878 return retval;
879 size += retval;
aa0a4ef4 880
ff87e97a
AG
881 break;
882
883 case RDS_CMSG_RDMA_DEST:
884 case RDS_CMSG_RDMA_MAP:
aa0a4ef4 885 cmsg_groups |= 2;
ff87e97a
AG
886 /* these are valid but do no add any size */
887 break;
888
15133f6e
AG
889 case RDS_CMSG_ATOMIC_CSWP:
890 case RDS_CMSG_ATOMIC_FADD:
20c72bd5
AG
891 case RDS_CMSG_MASKED_ATOMIC_CSWP:
892 case RDS_CMSG_MASKED_ATOMIC_FADD:
aa0a4ef4 893 cmsg_groups |= 1;
15133f6e
AG
894 size += sizeof(struct scatterlist);
895 break;
896
ff87e97a
AG
897 default:
898 return -EINVAL;
899 }
900
901 }
fc445084 902
ff87e97a 903 size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
fc445084 904
aa0a4ef4
AG
905 /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
906 if (cmsg_groups == 3)
907 return -EINVAL;
908
fc445084
AG
909 return size;
910}
911
5c115590
AG
912static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
913 struct msghdr *msg, int *allocated_mr)
914{
915 struct cmsghdr *cmsg;
916 int ret = 0;
917
f95b414e 918 for_each_cmsghdr(cmsg, msg) {
5c115590
AG
919 if (!CMSG_OK(msg, cmsg))
920 return -EINVAL;
921
922 if (cmsg->cmsg_level != SOL_RDS)
923 continue;
924
925 /* As a side effect, RDMA_DEST and RDMA_MAP will set
15133f6e 926 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
5c115590
AG
927 */
928 switch (cmsg->cmsg_type) {
929 case RDS_CMSG_RDMA_ARGS:
930 ret = rds_cmsg_rdma_args(rs, rm, cmsg);
931 break;
932
933 case RDS_CMSG_RDMA_DEST:
934 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
935 break;
936
937 case RDS_CMSG_RDMA_MAP:
938 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
939 if (!ret)
940 *allocated_mr = 1;
941 break;
15133f6e
AG
942 case RDS_CMSG_ATOMIC_CSWP:
943 case RDS_CMSG_ATOMIC_FADD:
20c72bd5
AG
944 case RDS_CMSG_MASKED_ATOMIC_CSWP:
945 case RDS_CMSG_MASKED_ATOMIC_FADD:
15133f6e
AG
946 ret = rds_cmsg_atomic(rs, rm, cmsg);
947 break;
5c115590
AG
948
949 default:
950 return -EINVAL;
951 }
952
953 if (ret)
954 break;
955 }
956
957 return ret;
958}
959
1b784140 960int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
5c115590
AG
961{
962 struct sock *sk = sock->sk;
963 struct rds_sock *rs = rds_sk_to_rs(sk);
342dfc30 964 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
5c115590
AG
965 __be32 daddr;
966 __be16 dport;
967 struct rds_message *rm = NULL;
968 struct rds_connection *conn;
969 int ret = 0;
970 int queued = 0, allocated_mr = 0;
971 int nonblock = msg->msg_flags & MSG_DONTWAIT;
1123fd73 972 long timeo = sock_sndtimeo(sk, nonblock);
780a6d9e 973 struct rds_conn_path *cpath;
5c115590
AG
974
975 /* Mirror Linux UDP mirror of BSD error message compatibility */
976 /* XXX: Perhaps MSG_MORE someday */
977 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
5c115590
AG
978 ret = -EOPNOTSUPP;
979 goto out;
980 }
981
982 if (msg->msg_namelen) {
983 /* XXX fail non-unicast destination IPs? */
984 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
985 ret = -EINVAL;
986 goto out;
987 }
988 daddr = usin->sin_addr.s_addr;
989 dport = usin->sin_port;
990 } else {
991 /* We only care about consistency with ->connect() */
992 lock_sock(sk);
993 daddr = rs->rs_conn_addr;
994 dport = rs->rs_conn_port;
995 release_sock(sk);
996 }
997
8c7188b2 998 lock_sock(sk);
5c115590 999 if (daddr == 0 || rs->rs_bound_addr == 0) {
8c7188b2 1000 release_sock(sk);
5c115590
AG
1001 ret = -ENOTCONN; /* XXX not a great errno */
1002 goto out;
1003 }
8c7188b2 1004 release_sock(sk);
5c115590 1005
06e8941e
MK
1006 if (payload_len > rds_sk_sndbuf(rs)) {
1007 ret = -EMSGSIZE;
1008 goto out;
1009 }
1010
fc445084
AG
1011 /* size of rm including all sgs */
1012 ret = rds_rm_size(msg, payload_len);
1013 if (ret < 0)
1014 goto out;
1015
1016 rm = rds_message_alloc(ret, GFP_KERNEL);
1017 if (!rm) {
1018 ret = -ENOMEM;
5c115590
AG
1019 goto out;
1020 }
1021
372cd7de
AG
1022 /* Attach data to the rm */
1023 if (payload_len) {
1024 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
d139ff09
AG
1025 if (!rm->data.op_sg) {
1026 ret = -ENOMEM;
1027 goto out;
1028 }
c0371da6 1029 ret = rds_message_copy_from_user(rm, &msg->msg_iter);
372cd7de
AG
1030 if (ret)
1031 goto out;
1032 }
1033 rm->data.op_active = 1;
fc445084 1034
5c115590
AG
1035 rm->m_daddr = daddr;
1036
5c115590
AG
1037 /* rds_conn_create has a spinlock that runs with IRQ off.
1038 * Caching the conn in the socket helps a lot. */
1039 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1040 conn = rs->rs_conn;
1041 else {
d5a8ac28
SV
1042 conn = rds_conn_create_outgoing(sock_net(sock->sk),
1043 rs->rs_bound_addr, daddr,
5c115590
AG
1044 rs->rs_transport,
1045 sock->sk->sk_allocation);
1046 if (IS_ERR(conn)) {
1047 ret = PTR_ERR(conn);
1048 goto out;
1049 }
1050 rs->rs_conn = conn;
1051 }
1052
49f69691
AG
1053 /* Parse any control messages the user may have included. */
1054 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1055 if (ret)
1056 goto out;
1057
2c3a5f9a 1058 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
cb0a6056 1059 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
f8b3aaf2 1060 &rm->rdma, conn->c_trans->xmit_rdma);
15133f6e
AG
1061 ret = -EOPNOTSUPP;
1062 goto out;
1063 }
1064
1065 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
cb0a6056 1066 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
15133f6e 1067 &rm->atomic, conn->c_trans->xmit_atomic);
5c115590
AG
1068 ret = -EOPNOTSUPP;
1069 goto out;
1070 }
1071
f3c6808d 1072 rds_conn_connect_if_down(conn);
5c115590
AG
1073
1074 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
b98ba52f
AG
1075 if (ret) {
1076 rs->rs_seen_congestion = 1;
5c115590 1077 goto out;
b98ba52f 1078 }
5c115590 1079
780a6d9e
SV
1080 cpath = &conn->c_path[0];
1081
1082 while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port,
5c115590
AG
1083 dport, &queued)) {
1084 rds_stats_inc(s_send_queue_full);
06e8941e 1085
5c115590
AG
1086 if (nonblock) {
1087 ret = -EAGAIN;
1088 goto out;
1089 }
1090
aa395145 1091 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
780a6d9e 1092 rds_send_queue_rm(rs, conn, cpath, rm,
5c115590
AG
1093 rs->rs_bound_port,
1094 dport,
1095 &queued),
1096 timeo);
1097 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1098 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1099 continue;
1100
1101 ret = timeo;
1102 if (ret == 0)
1103 ret = -ETIMEDOUT;
1104 goto out;
1105 }
1106
1107 /*
1108 * By now we've committed to the send. We reuse rds_send_worker()
1109 * to retry sends in the rds thread if the transport asks us to.
1110 */
1111 rds_stats_inc(s_send_queued);
1112
db6526dc
SS
1113 ret = rds_send_xmit(conn);
1114 if (ret == -ENOMEM || ret == -EAGAIN)
1115 queue_delayed_work(rds_wq, &conn->c_send_w, 1);
5c115590
AG
1116
1117 rds_message_put(rm);
1118 return payload_len;
1119
1120out:
1121 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1122 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1123 * or in any other way, we need to destroy the MR again */
1124 if (allocated_mr)
1125 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1126
1127 if (rm)
1128 rds_message_put(rm);
1129 return ret;
1130}
1131
1132/*
1133 * Reply to a ping packet.
1134 */
1135int
1136rds_send_pong(struct rds_connection *conn, __be16 dport)
1137{
1138 struct rds_message *rm;
1139 unsigned long flags;
1140 int ret = 0;
1141
1142 rm = rds_message_alloc(0, GFP_ATOMIC);
8690bfa1 1143 if (!rm) {
5c115590
AG
1144 ret = -ENOMEM;
1145 goto out;
1146 }
1147
1148 rm->m_daddr = conn->c_faddr;
acfcd4d4 1149 rm->data.op_active = 1;
5c115590 1150
f3c6808d 1151 rds_conn_connect_if_down(conn);
5c115590
AG
1152
1153 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1154 if (ret)
1155 goto out;
1156
1157 spin_lock_irqsave(&conn->c_lock, flags);
1158 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1159 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1160 rds_message_addref(rm);
1161 rm->m_inc.i_conn = conn;
1162
1163 rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1164 conn->c_next_tx_seq);
1165 conn->c_next_tx_seq++;
1166 spin_unlock_irqrestore(&conn->c_lock, flags);
1167
1168 rds_stats_inc(s_send_queued);
1169 rds_stats_inc(s_send_pong);
1170
7b4b0009 1171 /* schedule the send work on rds_wq */
1172 queue_delayed_work(rds_wq, &conn->c_send_w, 1);
acfcd4d4 1173
5c115590
AG
1174 rds_message_put(rm);
1175 return 0;
1176
1177out:
1178 if (rm)
1179 rds_message_put(rm);
1180 return ret;
1181}
This page took 0.487775 seconds and 5 git commands to generate.