RDS: make sure rds_send_drop_to properly takes the m_rs_lock
[deliverable/linux.git] / net / rds / send.c
CommitLineData
5c115590
AG
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
d9b93842 34#include <linux/moduleparam.h>
5a0e3ad6 35#include <linux/gfp.h>
5c115590
AG
36#include <net/sock.h>
37#include <linux/in.h>
38#include <linux/list.h>
cb0a6056 39#include <linux/ratelimit.h>
bc3b2d7f 40#include <linux/export.h>
5c115590
AG
41
42#include "rds.h"
5c115590
AG
43
44/* When transmitting messages in rds_send_xmit, we need to emerge from
45 * time to time and briefly release the CPU. Otherwise the softlock watchdog
46 * will kick our shin.
47 * Also, it seems fairer to not let one busy connection stall all the
48 * others.
49 *
50 * send_batch_count is the number of times we'll loop in send_xmit. Setting
51 * it to 0 will restore the old behavior (where we looped until we had
52 * drained the queue).
53 */
54static int send_batch_count = 64;
55module_param(send_batch_count, int, 0444);
56MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
57
ff51bf84 58static void rds_send_remove_from_sock(struct list_head *messages, int status);
59
5c115590 60/*
0f4b1c7e
ZB
61 * Reset the send state. Callers must ensure that this doesn't race with
62 * rds_send_xmit().
5c115590
AG
63 */
64void rds_send_reset(struct rds_connection *conn)
65{
66 struct rds_message *rm, *tmp;
67 unsigned long flags;
68
69 if (conn->c_xmit_rm) {
7e3f2952
CM
70 rm = conn->c_xmit_rm;
71 conn->c_xmit_rm = NULL;
5c115590
AG
72 /* Tell the user the RDMA op is no longer mapped by the
73 * transport. This isn't entirely true (it's flushed out
74 * independently) but as the connection is down, there's
75 * no ongoing RDMA to/from that memory */
7e3f2952 76 rds_message_unmapped(rm);
7e3f2952 77 rds_message_put(rm);
5c115590 78 }
7e3f2952 79
5c115590
AG
80 conn->c_xmit_sg = 0;
81 conn->c_xmit_hdr_off = 0;
82 conn->c_xmit_data_off = 0;
15133f6e 83 conn->c_xmit_atomic_sent = 0;
5b2366bd
AG
84 conn->c_xmit_rdma_sent = 0;
85 conn->c_xmit_data_sent = 0;
5c115590
AG
86
87 conn->c_map_queued = 0;
88
89 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
90 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
91
92 /* Mark messages as retransmissions, and move them to the send q */
93 spin_lock_irqsave(&conn->c_lock, flags);
94 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
95 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
96 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
97 }
98 list_splice_init(&conn->c_retrans, &conn->c_send_queue);
99 spin_unlock_irqrestore(&conn->c_lock, flags);
100}
101
0f4b1c7e
ZB
102static int acquire_in_xmit(struct rds_connection *conn)
103{
104 return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
105}
106
107static void release_in_xmit(struct rds_connection *conn)
108{
109 clear_bit(RDS_IN_XMIT, &conn->c_flags);
4e857c58 110 smp_mb__after_atomic();
0f4b1c7e
ZB
111 /*
112 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
113 * hot path and finding waiters is very rare. We don't want to walk
114 * the system-wide hashed waitqueue buckets in the fast path only to
115 * almost never find waiters.
116 */
117 if (waitqueue_active(&conn->c_waitq))
118 wake_up_all(&conn->c_waitq);
119}
120
5c115590 121/*
25985edc 122 * We're making the conscious trade-off here to only send one message
5c115590
AG
123 * down the connection at a time.
124 * Pro:
125 * - tx queueing is a simple fifo list
126 * - reassembly is optional and easily done by transports per conn
127 * - no per flow rx lookup at all, straight to the socket
128 * - less per-frag memory and wire overhead
129 * Con:
130 * - queued acks can be delayed behind large messages
131 * Depends:
132 * - small message latency is higher behind queued large messages
133 * - large message latency isn't starved by intervening small sends
134 */
135int rds_send_xmit(struct rds_connection *conn)
136{
137 struct rds_message *rm;
138 unsigned long flags;
139 unsigned int tmp;
5c115590
AG
140 struct scatterlist *sg;
141 int ret = 0;
5c115590 142 LIST_HEAD(to_be_dropped);
443be0e5
SV
143 int batch_count;
144 unsigned long send_gen = 0;
5c115590 145
fcc5450c 146restart:
443be0e5 147 batch_count = 0;
049ee3f5 148
5c115590
AG
149 /*
150 * sendmsg calls here after having queued its message on the send
151 * queue. We only have one task feeding the connection at a time. If
152 * another thread is already feeding the queue then we back off. This
153 * avoids blocking the caller and trading per-connection data between
154 * caches per message.
5c115590 155 */
0f4b1c7e 156 if (!acquire_in_xmit(conn)) {
049ee3f5 157 rds_stats_inc(s_send_lock_contention);
5c115590
AG
158 ret = -ENOMEM;
159 goto out;
160 }
0f4b1c7e 161
443be0e5
SV
162 /*
163 * we record the send generation after doing the xmit acquire.
164 * if someone else manages to jump in and do some work, we'll use
165 * this to avoid a goto restart farther down.
166 *
167 * The acquire_in_xmit() check above ensures that only one
168 * caller can increment c_send_gen at any time.
169 */
170 conn->c_send_gen++;
171 send_gen = conn->c_send_gen;
172
0f4b1c7e
ZB
173 /*
174 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
175 * we do the opposite to avoid races.
176 */
177 if (!rds_conn_up(conn)) {
178 release_in_xmit(conn);
179 ret = 0;
180 goto out;
181 }
5c115590
AG
182
183 if (conn->c_trans->xmit_prepare)
184 conn->c_trans->xmit_prepare(conn);
185
186 /*
187 * spin trying to push headers and data down the connection until
5b2366bd 188 * the connection doesn't make forward progress.
5c115590 189 */
fcc5450c 190 while (1) {
5c115590 191
5c115590 192 rm = conn->c_xmit_rm;
5c115590 193
5b2366bd
AG
194 /*
195 * If between sending messages, we can send a pending congestion
196 * map update.
5c115590 197 */
8690bfa1 198 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
77dd550e
AG
199 rm = rds_cong_update_alloc(conn);
200 if (IS_ERR(rm)) {
201 ret = PTR_ERR(rm);
202 break;
5b2366bd 203 }
77dd550e
AG
204 rm->data.op_active = 1;
205
206 conn->c_xmit_rm = rm;
5c115590
AG
207 }
208
209 /*
5b2366bd 210 * If not already working on one, grab the next message.
5c115590
AG
211 *
212 * c_xmit_rm holds a ref while we're sending this message down
213 * the connction. We can use this ref while holding the
214 * send_sem.. rds_send_reset() is serialized with it.
215 */
8690bfa1 216 if (!rm) {
5c115590
AG
217 unsigned int len;
218
443be0e5
SV
219 batch_count++;
220
221 /* we want to process as big a batch as we can, but
222 * we also want to avoid softlockups. If we've been
223 * through a lot of messages, lets back off and see
224 * if anyone else jumps in
225 */
226 if (batch_count >= 1024)
227 goto over_batch;
228
0f4b1c7e 229 spin_lock_irqsave(&conn->c_lock, flags);
5c115590
AG
230
231 if (!list_empty(&conn->c_send_queue)) {
232 rm = list_entry(conn->c_send_queue.next,
233 struct rds_message,
234 m_conn_item);
235 rds_message_addref(rm);
236
237 /*
238 * Move the message from the send queue to the retransmit
239 * list right away.
240 */
241 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
242 }
243
0f4b1c7e 244 spin_unlock_irqrestore(&conn->c_lock, flags);
5c115590 245
fcc5450c 246 if (!rm)
5c115590 247 break;
5c115590
AG
248
249 /* Unfortunately, the way Infiniband deals with
250 * RDMA to a bad MR key is by moving the entire
251 * queue pair to error state. We cold possibly
252 * recover from that, but right now we drop the
253 * connection.
254 * Therefore, we never retransmit messages with RDMA ops.
255 */
f8b3aaf2 256 if (rm->rdma.op_active &&
f64f9e71 257 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
0f4b1c7e 258 spin_lock_irqsave(&conn->c_lock, flags);
5c115590
AG
259 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
260 list_move(&rm->m_conn_item, &to_be_dropped);
0f4b1c7e 261 spin_unlock_irqrestore(&conn->c_lock, flags);
5c115590
AG
262 continue;
263 }
264
265 /* Require an ACK every once in a while */
266 len = ntohl(rm->m_inc.i_hdr.h_len);
f64f9e71
JP
267 if (conn->c_unacked_packets == 0 ||
268 conn->c_unacked_bytes < len) {
5c115590
AG
269 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
270
271 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
272 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
273 rds_stats_inc(s_send_ack_required);
274 } else {
275 conn->c_unacked_bytes -= len;
276 conn->c_unacked_packets--;
277 }
278
279 conn->c_xmit_rm = rm;
280 }
281
2c3a5f9a
AG
282 /* The transport either sends the whole rdma or none of it */
283 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
ff3d7d36 284 rm->m_final_op = &rm->rdma;
4f73113c 285 /* The transport owns the mapped memory for now.
286 * You can't unmap it while it's on the send queue
287 */
288 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
2c3a5f9a 289 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
4f73113c 290 if (ret) {
291 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
292 wake_up_interruptible(&rm->m_flush_wait);
15133f6e 293 break;
4f73113c 294 }
2c3a5f9a
AG
295 conn->c_xmit_rdma_sent = 1;
296
15133f6e
AG
297 }
298
2c3a5f9a 299 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
ff3d7d36 300 rm->m_final_op = &rm->atomic;
4f73113c 301 /* The transport owns the mapped memory for now.
302 * You can't unmap it while it's on the send queue
303 */
304 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
ff3d7d36 305 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
4f73113c 306 if (ret) {
307 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
308 wake_up_interruptible(&rm->m_flush_wait);
5c115590 309 break;
4f73113c 310 }
2c3a5f9a 311 conn->c_xmit_atomic_sent = 1;
ff3d7d36 312
5c115590
AG
313 }
314
2c3a5f9a
AG
315 /*
316 * A number of cases require an RDS header to be sent
317 * even if there is no data.
318 * We permit 0-byte sends; rds-ping depends on this.
319 * However, if there are exclusively attached silent ops,
320 * we skip the hdr/data send, to enable silent operation.
321 */
322 if (rm->data.op_nents == 0) {
323 int ops_present;
324 int all_ops_are_silent = 1;
325
326 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
327 if (rm->atomic.op_active && !rm->atomic.op_silent)
328 all_ops_are_silent = 0;
329 if (rm->rdma.op_active && !rm->rdma.op_silent)
330 all_ops_are_silent = 0;
331
332 if (ops_present && all_ops_are_silent
333 && !rm->m_rdma_cookie)
334 rm->data.op_active = 0;
335 }
336
5b2366bd 337 if (rm->data.op_active && !conn->c_xmit_data_sent) {
ff3d7d36 338 rm->m_final_op = &rm->data;
5c115590
AG
339 ret = conn->c_trans->xmit(conn, rm,
340 conn->c_xmit_hdr_off,
341 conn->c_xmit_sg,
342 conn->c_xmit_data_off);
343 if (ret <= 0)
344 break;
345
346 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
347 tmp = min_t(int, ret,
348 sizeof(struct rds_header) -
349 conn->c_xmit_hdr_off);
350 conn->c_xmit_hdr_off += tmp;
351 ret -= tmp;
352 }
353
6c7cc6e4 354 sg = &rm->data.op_sg[conn->c_xmit_sg];
5c115590
AG
355 while (ret) {
356 tmp = min_t(int, ret, sg->length -
357 conn->c_xmit_data_off);
358 conn->c_xmit_data_off += tmp;
359 ret -= tmp;
360 if (conn->c_xmit_data_off == sg->length) {
361 conn->c_xmit_data_off = 0;
362 sg++;
363 conn->c_xmit_sg++;
364 BUG_ON(ret != 0 &&
6c7cc6e4 365 conn->c_xmit_sg == rm->data.op_nents);
5c115590
AG
366 }
367 }
5b2366bd
AG
368
369 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
370 (conn->c_xmit_sg == rm->data.op_nents))
371 conn->c_xmit_data_sent = 1;
372 }
373
374 /*
375 * A rm will only take multiple times through this loop
376 * if there is a data op. Thus, if the data is sent (or there was
377 * none), then we're done with the rm.
378 */
379 if (!rm->data.op_active || conn->c_xmit_data_sent) {
380 conn->c_xmit_rm = NULL;
381 conn->c_xmit_sg = 0;
382 conn->c_xmit_hdr_off = 0;
383 conn->c_xmit_data_off = 0;
384 conn->c_xmit_rdma_sent = 0;
385 conn->c_xmit_atomic_sent = 0;
386 conn->c_xmit_data_sent = 0;
387
388 rds_message_put(rm);
5c115590
AG
389 }
390 }
391
443be0e5 392over_batch:
5c115590
AG
393 if (conn->c_trans->xmit_complete)
394 conn->c_trans->xmit_complete(conn);
0f4b1c7e 395 release_in_xmit(conn);
5c115590 396
2ad8099b
AG
397 /* Nuke any messages we decided not to retransmit. */
398 if (!list_empty(&to_be_dropped)) {
399 /* irqs on here, so we can put(), unlike above */
400 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
401 rds_message_put(rm);
402 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
403 }
404
fcc5450c 405 /*
0f4b1c7e
ZB
406 * Other senders can queue a message after we last test the send queue
407 * but before we clear RDS_IN_XMIT. In that case they'd back off and
408 * not try and send their newly queued message. We need to check the
409 * send queue after having cleared RDS_IN_XMIT so that their message
410 * doesn't get stuck on the send queue.
fcc5450c
AG
411 *
412 * If the transport cannot continue (i.e ret != 0), then it must
413 * call us when more room is available, such as from the tx
414 * completion handler.
443be0e5
SV
415 *
416 * We have an extra generation check here so that if someone manages
417 * to jump in after our release_in_xmit, we'll see that they have done
418 * some work and we will skip our goto
fcc5450c
AG
419 */
420 if (ret == 0) {
9e29db0e 421 smp_mb();
0c484240 422 if ((test_bit(0, &conn->c_map_queued) ||
423 !list_empty(&conn->c_send_queue)) &&
443be0e5 424 send_gen == conn->c_send_gen) {
049ee3f5 425 rds_stats_inc(s_send_lock_queue_raced);
0f4b1c7e 426 goto restart;
5c115590 427 }
5c115590
AG
428 }
429out:
430 return ret;
431}
432
433static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
434{
435 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
436
437 assert_spin_locked(&rs->rs_lock);
438
439 BUG_ON(rs->rs_snd_bytes < len);
440 rs->rs_snd_bytes -= len;
441
442 if (rs->rs_snd_bytes == 0)
443 rds_stats_inc(s_send_queue_empty);
444}
445
446static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
447 is_acked_func is_acked)
448{
449 if (is_acked)
450 return is_acked(rm, ack);
451 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
452}
453
5c115590
AG
454/*
455 * This is pretty similar to what happens below in the ACK
456 * handling code - except that we call here as soon as we get
457 * the IB send completion on the RDMA op and the accompanying
458 * message.
459 */
460void rds_rdma_send_complete(struct rds_message *rm, int status)
461{
462 struct rds_sock *rs = NULL;
f8b3aaf2 463 struct rm_rdma_op *ro;
5c115590 464 struct rds_notifier *notifier;
9de0864c 465 unsigned long flags;
5c115590 466
9de0864c 467 spin_lock_irqsave(&rm->m_rs_lock, flags);
5c115590 468
f8b3aaf2 469 ro = &rm->rdma;
f64f9e71 470 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
f8b3aaf2
AG
471 ro->op_active && ro->op_notify && ro->op_notifier) {
472 notifier = ro->op_notifier;
5c115590
AG
473 rs = rm->m_rs;
474 sock_hold(rds_rs_to_sk(rs));
475
476 notifier->n_status = status;
477 spin_lock(&rs->rs_lock);
478 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
479 spin_unlock(&rs->rs_lock);
480
f8b3aaf2 481 ro->op_notifier = NULL;
5c115590
AG
482 }
483
9de0864c 484 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
5c115590
AG
485
486 if (rs) {
487 rds_wake_sk_sleep(rs);
488 sock_put(rds_rs_to_sk(rs));
489 }
490}
616b757a 491EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
5c115590 492
15133f6e
AG
493/*
494 * Just like above, except looks at atomic op
495 */
496void rds_atomic_send_complete(struct rds_message *rm, int status)
497{
498 struct rds_sock *rs = NULL;
499 struct rm_atomic_op *ao;
500 struct rds_notifier *notifier;
cf4b7389 501 unsigned long flags;
15133f6e 502
cf4b7389 503 spin_lock_irqsave(&rm->m_rs_lock, flags);
15133f6e
AG
504
505 ao = &rm->atomic;
506 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
507 && ao->op_active && ao->op_notify && ao->op_notifier) {
508 notifier = ao->op_notifier;
509 rs = rm->m_rs;
510 sock_hold(rds_rs_to_sk(rs));
511
512 notifier->n_status = status;
513 spin_lock(&rs->rs_lock);
514 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
515 spin_unlock(&rs->rs_lock);
516
517 ao->op_notifier = NULL;
518 }
519
cf4b7389 520 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
15133f6e
AG
521
522 if (rs) {
523 rds_wake_sk_sleep(rs);
524 sock_put(rds_rs_to_sk(rs));
525 }
526}
527EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
528
5c115590
AG
529/*
530 * This is the same as rds_rdma_send_complete except we
531 * don't do any locking - we have all the ingredients (message,
532 * socket, socket lock) and can just move the notifier.
533 */
534static inline void
940786eb 535__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
5c115590 536{
f8b3aaf2 537 struct rm_rdma_op *ro;
940786eb 538 struct rm_atomic_op *ao;
5c115590 539
f8b3aaf2
AG
540 ro = &rm->rdma;
541 if (ro->op_active && ro->op_notify && ro->op_notifier) {
542 ro->op_notifier->n_status = status;
543 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
544 ro->op_notifier = NULL;
5c115590
AG
545 }
546
940786eb
AG
547 ao = &rm->atomic;
548 if (ao->op_active && ao->op_notify && ao->op_notifier) {
549 ao->op_notifier->n_status = status;
550 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
551 ao->op_notifier = NULL;
552 }
553
5c115590
AG
554 /* No need to wake the app - caller does this */
555}
556
557/*
558 * This is called from the IB send completion when we detect
559 * a RDMA operation that failed with remote access error.
560 * So speed is not an issue here.
561 */
562struct rds_message *rds_send_get_message(struct rds_connection *conn,
f8b3aaf2 563 struct rm_rdma_op *op)
5c115590
AG
564{
565 struct rds_message *rm, *tmp, *found = NULL;
566 unsigned long flags;
567
568 spin_lock_irqsave(&conn->c_lock, flags);
569
570 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
f8b3aaf2 571 if (&rm->rdma == op) {
5c115590
AG
572 atomic_inc(&rm->m_refcount);
573 found = rm;
574 goto out;
575 }
576 }
577
578 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
f8b3aaf2 579 if (&rm->rdma == op) {
5c115590
AG
580 atomic_inc(&rm->m_refcount);
581 found = rm;
582 break;
583 }
584 }
585
586out:
587 spin_unlock_irqrestore(&conn->c_lock, flags);
588
589 return found;
590}
616b757a 591EXPORT_SYMBOL_GPL(rds_send_get_message);
5c115590
AG
592
593/*
594 * This removes messages from the socket's list if they're on it. The list
595 * argument must be private to the caller, we must be able to modify it
596 * without locks. The messages must have a reference held for their
597 * position on the list. This function will drop that reference after
598 * removing the messages from the 'messages' list regardless of if it found
599 * the messages on the socket list or not.
600 */
ff51bf84 601static void rds_send_remove_from_sock(struct list_head *messages, int status)
5c115590 602{
561c7df6 603 unsigned long flags;
5c115590
AG
604 struct rds_sock *rs = NULL;
605 struct rds_message *rm;
606
5c115590 607 while (!list_empty(messages)) {
561c7df6
AG
608 int was_on_sock = 0;
609
5c115590
AG
610 rm = list_entry(messages->next, struct rds_message,
611 m_conn_item);
612 list_del_init(&rm->m_conn_item);
613
614 /*
615 * If we see this flag cleared then we're *sure* that someone
616 * else beat us to removing it from the sock. If we race
617 * with their flag update we'll get the lock and then really
618 * see that the flag has been cleared.
619 *
620 * The message spinlock makes sure nobody clears rm->m_rs
621 * while we're messing with it. It does not prevent the
622 * message from being removed from the socket, though.
623 */
561c7df6 624 spin_lock_irqsave(&rm->m_rs_lock, flags);
5c115590
AG
625 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
626 goto unlock_and_drop;
627
628 if (rs != rm->m_rs) {
629 if (rs) {
5c115590
AG
630 rds_wake_sk_sleep(rs);
631 sock_put(rds_rs_to_sk(rs));
632 }
633 rs = rm->m_rs;
593cbb3e
HK
634 if (rs)
635 sock_hold(rds_rs_to_sk(rs));
5c115590 636 }
593cbb3e
HK
637 if (!rs)
638 goto unlock_and_drop;
048c15e6 639 spin_lock(&rs->rs_lock);
5c115590
AG
640
641 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
f8b3aaf2 642 struct rm_rdma_op *ro = &rm->rdma;
5c115590
AG
643 struct rds_notifier *notifier;
644
645 list_del_init(&rm->m_sock_item);
646 rds_send_sndbuf_remove(rs, rm);
647
f8b3aaf2
AG
648 if (ro->op_active && ro->op_notifier &&
649 (ro->op_notify || (ro->op_recverr && status))) {
650 notifier = ro->op_notifier;
5c115590
AG
651 list_add_tail(&notifier->n_list,
652 &rs->rs_notify_queue);
653 if (!notifier->n_status)
654 notifier->n_status = status;
f8b3aaf2 655 rm->rdma.op_notifier = NULL;
5c115590 656 }
561c7df6 657 was_on_sock = 1;
5c115590
AG
658 rm->m_rs = NULL;
659 }
048c15e6 660 spin_unlock(&rs->rs_lock);
5c115590
AG
661
662unlock_and_drop:
561c7df6 663 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
5c115590 664 rds_message_put(rm);
561c7df6
AG
665 if (was_on_sock)
666 rds_message_put(rm);
5c115590
AG
667 }
668
669 if (rs) {
5c115590
AG
670 rds_wake_sk_sleep(rs);
671 sock_put(rds_rs_to_sk(rs));
672 }
5c115590
AG
673}
674
675/*
676 * Transports call here when they've determined that the receiver queued
677 * messages up to, and including, the given sequence number. Messages are
678 * moved to the retrans queue when rds_send_xmit picks them off the send
679 * queue. This means that in the TCP case, the message may not have been
680 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
681 * checks the RDS_MSG_HAS_ACK_SEQ bit.
5c115590
AG
682 */
683void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
684 is_acked_func is_acked)
685{
686 struct rds_message *rm, *tmp;
687 unsigned long flags;
688 LIST_HEAD(list);
689
690 spin_lock_irqsave(&conn->c_lock, flags);
691
692 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
693 if (!rds_send_is_acked(rm, ack, is_acked))
694 break;
695
696 list_move(&rm->m_conn_item, &list);
697 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
698 }
699
700 /* order flag updates with spin locks */
701 if (!list_empty(&list))
4e857c58 702 smp_mb__after_atomic();
5c115590
AG
703
704 spin_unlock_irqrestore(&conn->c_lock, flags);
705
706 /* now remove the messages from the sock list as needed */
707 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
708}
616b757a 709EXPORT_SYMBOL_GPL(rds_send_drop_acked);
5c115590
AG
710
711void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
712{
713 struct rds_message *rm, *tmp;
714 struct rds_connection *conn;
7c82eaf0 715 unsigned long flags;
5c115590 716 LIST_HEAD(list);
5c115590
AG
717
718 /* get all the messages we're dropping under the rs lock */
719 spin_lock_irqsave(&rs->rs_lock, flags);
720
721 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
722 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
723 dest->sin_port != rm->m_inc.i_hdr.h_dport))
724 continue;
725
5c115590
AG
726 list_move(&rm->m_sock_item, &list);
727 rds_send_sndbuf_remove(rs, rm);
728 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
5c115590
AG
729 }
730
731 /* order flag updates with the rs lock */
4e857c58 732 smp_mb__after_atomic();
5c115590
AG
733
734 spin_unlock_irqrestore(&rs->rs_lock, flags);
735
7c82eaf0
AG
736 if (list_empty(&list))
737 return;
5c115590 738
7c82eaf0 739 /* Remove the messages from the conn */
5c115590 740 list_for_each_entry(rm, &list, m_sock_item) {
7c82eaf0
AG
741
742 conn = rm->m_inc.i_conn;
5c115590 743
9de0864c 744 spin_lock_irqsave(&conn->c_lock, flags);
5c115590 745 /*
7c82eaf0
AG
746 * Maybe someone else beat us to removing rm from the conn.
747 * If we race with their flag update we'll get the lock and
748 * then really see that the flag has been cleared.
5c115590 749 */
7c82eaf0
AG
750 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
751 spin_unlock_irqrestore(&conn->c_lock, flags);
593cbb3e
HK
752 spin_lock_irqsave(&rm->m_rs_lock, flags);
753 rm->m_rs = NULL;
754 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
5c115590 755 continue;
5c115590 756 }
9de0864c
AG
757 list_del_init(&rm->m_conn_item);
758 spin_unlock_irqrestore(&conn->c_lock, flags);
5c115590 759
7c82eaf0
AG
760 /*
761 * Couldn't grab m_rs_lock in top loop (lock ordering),
762 * but we can now.
763 */
9de0864c 764 spin_lock_irqsave(&rm->m_rs_lock, flags);
5c115590 765
7c82eaf0 766 spin_lock(&rs->rs_lock);
940786eb 767 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
7c82eaf0
AG
768 spin_unlock(&rs->rs_lock);
769
770 rm->m_rs = NULL;
9de0864c 771 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
7c82eaf0 772
7c82eaf0 773 rds_message_put(rm);
7c82eaf0 774 }
5c115590 775
7c82eaf0 776 rds_wake_sk_sleep(rs);
550a8002 777
5c115590
AG
778 while (!list_empty(&list)) {
779 rm = list_entry(list.next, struct rds_message, m_sock_item);
780 list_del_init(&rm->m_sock_item);
5c115590 781 rds_message_wait(rm);
dfcec251 782
783 /* just in case the code above skipped this message
784 * because RDS_MSG_ON_CONN wasn't set, run it again here
785 * taking m_rs_lock is the only thing that keeps us
786 * from racing with ack processing.
787 */
788 spin_lock_irqsave(&rm->m_rs_lock, flags);
789
790 spin_lock(&rs->rs_lock);
791 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
792 spin_unlock(&rs->rs_lock);
793
794 rm->m_rs = NULL;
795 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
796
5c115590
AG
797 rds_message_put(rm);
798 }
799}
800
801/*
802 * we only want this to fire once so we use the callers 'queued'. It's
803 * possible that another thread can race with us and remove the
804 * message from the flow with RDS_CANCEL_SENT_TO.
805 */
806static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
807 struct rds_message *rm, __be16 sport,
808 __be16 dport, int *queued)
809{
810 unsigned long flags;
811 u32 len;
812
813 if (*queued)
814 goto out;
815
816 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
817
818 /* this is the only place which holds both the socket's rs_lock
819 * and the connection's c_lock */
820 spin_lock_irqsave(&rs->rs_lock, flags);
821
822 /*
823 * If there is a little space in sndbuf, we don't queue anything,
824 * and userspace gets -EAGAIN. But poll() indicates there's send
825 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
826 * freed up by incoming acks. So we check the *old* value of
827 * rs_snd_bytes here to allow the last msg to exceed the buffer,
828 * and poll() now knows no more data can be sent.
829 */
830 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
831 rs->rs_snd_bytes += len;
832
833 /* let recv side know we are close to send space exhaustion.
834 * This is probably not the optimal way to do it, as this
835 * means we set the flag on *all* messages as soon as our
836 * throughput hits a certain threshold.
837 */
838 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
839 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
840
841 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
842 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
843 rds_message_addref(rm);
844 rm->m_rs = rs;
845
846 /* The code ordering is a little weird, but we're
847 trying to minimize the time we hold c_lock */
848 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
849 rm->m_inc.i_conn = conn;
850 rds_message_addref(rm);
851
852 spin_lock(&conn->c_lock);
853 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
854 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
855 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
856 spin_unlock(&conn->c_lock);
857
858 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
859 rm, len, rs, rs->rs_snd_bytes,
860 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
861
862 *queued = 1;
863 }
864
865 spin_unlock_irqrestore(&rs->rs_lock, flags);
866out:
867 return *queued;
868}
869
fc445084
AG
870/*
871 * rds_message is getting to be quite complicated, and we'd like to allocate
872 * it all in one go. This figures out how big it needs to be up front.
873 */
874static int rds_rm_size(struct msghdr *msg, int data_len)
875{
ff87e97a 876 struct cmsghdr *cmsg;
fc445084 877 int size = 0;
aa0a4ef4 878 int cmsg_groups = 0;
ff87e97a
AG
879 int retval;
880
f95b414e 881 for_each_cmsghdr(cmsg, msg) {
ff87e97a
AG
882 if (!CMSG_OK(msg, cmsg))
883 return -EINVAL;
884
885 if (cmsg->cmsg_level != SOL_RDS)
886 continue;
887
888 switch (cmsg->cmsg_type) {
889 case RDS_CMSG_RDMA_ARGS:
aa0a4ef4 890 cmsg_groups |= 1;
ff87e97a
AG
891 retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
892 if (retval < 0)
893 return retval;
894 size += retval;
aa0a4ef4 895
ff87e97a
AG
896 break;
897
898 case RDS_CMSG_RDMA_DEST:
899 case RDS_CMSG_RDMA_MAP:
aa0a4ef4 900 cmsg_groups |= 2;
ff87e97a
AG
901 /* these are valid but do no add any size */
902 break;
903
15133f6e
AG
904 case RDS_CMSG_ATOMIC_CSWP:
905 case RDS_CMSG_ATOMIC_FADD:
20c72bd5
AG
906 case RDS_CMSG_MASKED_ATOMIC_CSWP:
907 case RDS_CMSG_MASKED_ATOMIC_FADD:
aa0a4ef4 908 cmsg_groups |= 1;
15133f6e
AG
909 size += sizeof(struct scatterlist);
910 break;
911
ff87e97a
AG
912 default:
913 return -EINVAL;
914 }
915
916 }
fc445084 917
ff87e97a 918 size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
fc445084 919
aa0a4ef4
AG
920 /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
921 if (cmsg_groups == 3)
922 return -EINVAL;
923
fc445084
AG
924 return size;
925}
926
5c115590
AG
927static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
928 struct msghdr *msg, int *allocated_mr)
929{
930 struct cmsghdr *cmsg;
931 int ret = 0;
932
f95b414e 933 for_each_cmsghdr(cmsg, msg) {
5c115590
AG
934 if (!CMSG_OK(msg, cmsg))
935 return -EINVAL;
936
937 if (cmsg->cmsg_level != SOL_RDS)
938 continue;
939
940 /* As a side effect, RDMA_DEST and RDMA_MAP will set
15133f6e 941 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
5c115590
AG
942 */
943 switch (cmsg->cmsg_type) {
944 case RDS_CMSG_RDMA_ARGS:
945 ret = rds_cmsg_rdma_args(rs, rm, cmsg);
946 break;
947
948 case RDS_CMSG_RDMA_DEST:
949 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
950 break;
951
952 case RDS_CMSG_RDMA_MAP:
953 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
954 if (!ret)
955 *allocated_mr = 1;
956 break;
15133f6e
AG
957 case RDS_CMSG_ATOMIC_CSWP:
958 case RDS_CMSG_ATOMIC_FADD:
20c72bd5
AG
959 case RDS_CMSG_MASKED_ATOMIC_CSWP:
960 case RDS_CMSG_MASKED_ATOMIC_FADD:
15133f6e
AG
961 ret = rds_cmsg_atomic(rs, rm, cmsg);
962 break;
5c115590
AG
963
964 default:
965 return -EINVAL;
966 }
967
968 if (ret)
969 break;
970 }
971
972 return ret;
973}
974
1b784140 975int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
5c115590
AG
976{
977 struct sock *sk = sock->sk;
978 struct rds_sock *rs = rds_sk_to_rs(sk);
342dfc30 979 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
5c115590
AG
980 __be32 daddr;
981 __be16 dport;
982 struct rds_message *rm = NULL;
983 struct rds_connection *conn;
984 int ret = 0;
985 int queued = 0, allocated_mr = 0;
986 int nonblock = msg->msg_flags & MSG_DONTWAIT;
1123fd73 987 long timeo = sock_sndtimeo(sk, nonblock);
5c115590
AG
988
989 /* Mirror Linux UDP mirror of BSD error message compatibility */
990 /* XXX: Perhaps MSG_MORE someday */
991 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
5c115590
AG
992 ret = -EOPNOTSUPP;
993 goto out;
994 }
995
996 if (msg->msg_namelen) {
997 /* XXX fail non-unicast destination IPs? */
998 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
999 ret = -EINVAL;
1000 goto out;
1001 }
1002 daddr = usin->sin_addr.s_addr;
1003 dport = usin->sin_port;
1004 } else {
1005 /* We only care about consistency with ->connect() */
1006 lock_sock(sk);
1007 daddr = rs->rs_conn_addr;
1008 dport = rs->rs_conn_port;
1009 release_sock(sk);
1010 }
1011
1012 /* racing with another thread binding seems ok here */
1013 if (daddr == 0 || rs->rs_bound_addr == 0) {
1014 ret = -ENOTCONN; /* XXX not a great errno */
1015 goto out;
1016 }
1017
fc445084
AG
1018 /* size of rm including all sgs */
1019 ret = rds_rm_size(msg, payload_len);
1020 if (ret < 0)
1021 goto out;
1022
1023 rm = rds_message_alloc(ret, GFP_KERNEL);
1024 if (!rm) {
1025 ret = -ENOMEM;
5c115590
AG
1026 goto out;
1027 }
1028
372cd7de
AG
1029 /* Attach data to the rm */
1030 if (payload_len) {
1031 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
d139ff09
AG
1032 if (!rm->data.op_sg) {
1033 ret = -ENOMEM;
1034 goto out;
1035 }
c0371da6 1036 ret = rds_message_copy_from_user(rm, &msg->msg_iter);
372cd7de
AG
1037 if (ret)
1038 goto out;
1039 }
1040 rm->data.op_active = 1;
fc445084 1041
5c115590
AG
1042 rm->m_daddr = daddr;
1043
5c115590
AG
1044 /* rds_conn_create has a spinlock that runs with IRQ off.
1045 * Caching the conn in the socket helps a lot. */
1046 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1047 conn = rs->rs_conn;
1048 else {
d5a8ac28
SV
1049 conn = rds_conn_create_outgoing(sock_net(sock->sk),
1050 rs->rs_bound_addr, daddr,
5c115590
AG
1051 rs->rs_transport,
1052 sock->sk->sk_allocation);
1053 if (IS_ERR(conn)) {
1054 ret = PTR_ERR(conn);
1055 goto out;
1056 }
1057 rs->rs_conn = conn;
1058 }
1059
49f69691
AG
1060 /* Parse any control messages the user may have included. */
1061 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1062 if (ret)
1063 goto out;
1064
2c3a5f9a 1065 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
cb0a6056 1066 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
f8b3aaf2 1067 &rm->rdma, conn->c_trans->xmit_rdma);
15133f6e
AG
1068 ret = -EOPNOTSUPP;
1069 goto out;
1070 }
1071
1072 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
cb0a6056 1073 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
15133f6e 1074 &rm->atomic, conn->c_trans->xmit_atomic);
5c115590
AG
1075 ret = -EOPNOTSUPP;
1076 goto out;
1077 }
1078
f3c6808d 1079 rds_conn_connect_if_down(conn);
5c115590
AG
1080
1081 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
b98ba52f
AG
1082 if (ret) {
1083 rs->rs_seen_congestion = 1;
5c115590 1084 goto out;
b98ba52f 1085 }
5c115590
AG
1086
1087 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1088 dport, &queued)) {
1089 rds_stats_inc(s_send_queue_full);
1090 /* XXX make sure this is reasonable */
1091 if (payload_len > rds_sk_sndbuf(rs)) {
1092 ret = -EMSGSIZE;
1093 goto out;
1094 }
1095 if (nonblock) {
1096 ret = -EAGAIN;
1097 goto out;
1098 }
1099
aa395145 1100 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
5c115590
AG
1101 rds_send_queue_rm(rs, conn, rm,
1102 rs->rs_bound_port,
1103 dport,
1104 &queued),
1105 timeo);
1106 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1107 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1108 continue;
1109
1110 ret = timeo;
1111 if (ret == 0)
1112 ret = -ETIMEDOUT;
1113 goto out;
1114 }
1115
1116 /*
1117 * By now we've committed to the send. We reuse rds_send_worker()
1118 * to retry sends in the rds thread if the transport asks us to.
1119 */
1120 rds_stats_inc(s_send_queued);
1121
1122 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
a7d3a281 1123 rds_send_xmit(conn);
5c115590
AG
1124
1125 rds_message_put(rm);
1126 return payload_len;
1127
1128out:
1129 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1130 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1131 * or in any other way, we need to destroy the MR again */
1132 if (allocated_mr)
1133 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1134
1135 if (rm)
1136 rds_message_put(rm);
1137 return ret;
1138}
1139
1140/*
1141 * Reply to a ping packet.
1142 */
1143int
1144rds_send_pong(struct rds_connection *conn, __be16 dport)
1145{
1146 struct rds_message *rm;
1147 unsigned long flags;
1148 int ret = 0;
1149
1150 rm = rds_message_alloc(0, GFP_ATOMIC);
8690bfa1 1151 if (!rm) {
5c115590
AG
1152 ret = -ENOMEM;
1153 goto out;
1154 }
1155
1156 rm->m_daddr = conn->c_faddr;
acfcd4d4 1157 rm->data.op_active = 1;
5c115590 1158
f3c6808d 1159 rds_conn_connect_if_down(conn);
5c115590
AG
1160
1161 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1162 if (ret)
1163 goto out;
1164
1165 spin_lock_irqsave(&conn->c_lock, flags);
1166 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1167 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1168 rds_message_addref(rm);
1169 rm->m_inc.i_conn = conn;
1170
1171 rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1172 conn->c_next_tx_seq);
1173 conn->c_next_tx_seq++;
1174 spin_unlock_irqrestore(&conn->c_lock, flags);
1175
1176 rds_stats_inc(s_send_queued);
1177 rds_stats_inc(s_send_pong);
1178
acfcd4d4 1179 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
5175a5e7 1180 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
acfcd4d4 1181
5c115590
AG
1182 rds_message_put(rm);
1183 return 0;
1184
1185out:
1186 if (rm)
1187 rds_message_put(rm);
1188 return ret;
1189}
This page took 0.403816 seconds and 5 git commands to generate.