2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36 #include <linux/export.h>
37 #include <net/inet_hashtables.h>
39 #include "rds_single_path.h"
43 #define RDS_CONNECTION_HASH_BITS 12
44 #define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
45 #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
47 /* converting this to RCU is a chore for another day.. */
48 static DEFINE_SPINLOCK(rds_conn_lock
);
49 static unsigned long rds_conn_count
;
50 static struct hlist_head rds_conn_hash
[RDS_CONNECTION_HASH_ENTRIES
];
51 static struct kmem_cache
*rds_conn_slab
;
53 static struct hlist_head
*rds_conn_bucket(__be32 laddr
, __be32 faddr
)
55 static u32 rds_hash_secret __read_mostly
;
59 net_get_random_once(&rds_hash_secret
, sizeof(rds_hash_secret
));
61 /* Pass NULL, don't need struct net for hash */
62 hash
= __inet_ehashfn(be32_to_cpu(laddr
), 0,
63 be32_to_cpu(faddr
), 0,
65 return &rds_conn_hash
[hash
& RDS_CONNECTION_HASH_MASK
];
68 #define rds_conn_info_set(var, test, suffix) do { \
70 var |= RDS_INFO_CONNECTION_FLAG_##suffix; \
73 /* rcu read lock must be held or the connection spinlock */
74 static struct rds_connection
*rds_conn_lookup(struct net
*net
,
75 struct hlist_head
*head
,
76 __be32 laddr
, __be32 faddr
,
77 struct rds_transport
*trans
)
79 struct rds_connection
*conn
, *ret
= NULL
;
81 hlist_for_each_entry_rcu(conn
, head
, c_hash_node
) {
82 if (conn
->c_faddr
== faddr
&& conn
->c_laddr
== laddr
&&
83 conn
->c_trans
== trans
&& net
== rds_conn_net(conn
)) {
88 rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret
,
94 * This is called by transports as they're bringing down a connection.
95 * It clears partial message state so that the transport can start sending
96 * and receiving over this connection again in the future. It is up to
97 * the transport to have serialized this call with its send and recv.
99 static void rds_conn_path_reset(struct rds_conn_path
*cp
)
101 struct rds_connection
*conn
= cp
->cp_conn
;
103 rdsdebug("connection %pI4 to %pI4 reset\n",
104 &conn
->c_laddr
, &conn
->c_faddr
);
106 rds_stats_inc(s_conn_reset
);
107 rds_send_path_reset(cp
);
110 /* Do not clear next_rx_seq here, else we cannot distinguish
111 * retransmitted packets from new packets, and will hand all
112 * of them to the application. That is not consistent with the
113 * reliability guarantees of RDS. */
116 static void __rds_conn_path_init(struct rds_connection
*conn
,
117 struct rds_conn_path
*cp
, bool is_outgoing
)
119 spin_lock_init(&cp
->cp_lock
);
120 cp
->cp_next_tx_seq
= 1;
121 init_waitqueue_head(&cp
->cp_waitq
);
122 INIT_LIST_HEAD(&cp
->cp_send_queue
);
123 INIT_LIST_HEAD(&cp
->cp_retrans
);
126 atomic_set(&cp
->cp_state
, RDS_CONN_DOWN
);
128 /* cp_outgoing is per-path. So we can only set it here
129 * for the single-path transports.
131 if (!conn
->c_trans
->t_mp_capable
)
132 cp
->cp_outgoing
= (is_outgoing
? 1 : 0);
133 cp
->cp_reconnect_jiffies
= 0;
134 INIT_DELAYED_WORK(&cp
->cp_send_w
, rds_send_worker
);
135 INIT_DELAYED_WORK(&cp
->cp_recv_w
, rds_recv_worker
);
136 INIT_DELAYED_WORK(&cp
->cp_conn_w
, rds_connect_worker
);
137 INIT_WORK(&cp
->cp_down_w
, rds_shutdown_worker
);
138 mutex_init(&cp
->cp_cm_lock
);
143 * There is only every one 'conn' for a given pair of addresses in the
144 * system at a time. They contain messages to be retransmitted and so
145 * span the lifetime of the actual underlying transport connections.
147 * For now they are not garbage collected once they're created. They
148 * are torn down as the module is removed, if ever.
150 static struct rds_connection
*__rds_conn_create(struct net
*net
,
151 __be32 laddr
, __be32 faddr
,
152 struct rds_transport
*trans
, gfp_t gfp
,
155 struct rds_connection
*conn
, *parent
= NULL
;
156 struct hlist_head
*head
= rds_conn_bucket(laddr
, faddr
);
157 struct rds_transport
*loop_trans
;
162 conn
= rds_conn_lookup(net
, head
, laddr
, faddr
, trans
);
163 if (conn
&& conn
->c_loopback
&& conn
->c_trans
!= &rds_loop_transport
&&
164 laddr
== faddr
&& !is_outgoing
) {
165 /* This is a looped back IB connection, and we're
166 * called by the code handling the incoming connect.
167 * We need a second connection object into which we
168 * can stick the other QP. */
170 conn
= parent
->c_passive
;
176 conn
= kmem_cache_zalloc(rds_conn_slab
, gfp
);
178 conn
= ERR_PTR(-ENOMEM
);
182 INIT_HLIST_NODE(&conn
->c_hash_node
);
183 conn
->c_laddr
= laddr
;
184 conn
->c_faddr
= faddr
;
186 rds_conn_net_set(conn
, net
);
188 ret
= rds_cong_get_maps(conn
);
190 kmem_cache_free(rds_conn_slab
, conn
);
196 * This is where a connection becomes loopback. If *any* RDS sockets
197 * can bind to the destination address then we'd rather the messages
198 * flow through loopback rather than either transport.
200 loop_trans
= rds_trans_get_preferred(net
, faddr
);
202 rds_trans_put(loop_trans
);
203 conn
->c_loopback
= 1;
204 if (is_outgoing
&& trans
->t_prefer_loopback
) {
205 /* "outgoing" connection - and the transport
206 * says it wants the connection handled by the
207 * loopback transport. This is what TCP does.
209 trans
= &rds_loop_transport
;
213 conn
->c_trans
= trans
;
215 ret
= trans
->conn_alloc(conn
, gfp
);
217 kmem_cache_free(rds_conn_slab
, conn
);
222 rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
223 conn
, &laddr
, &faddr
,
224 trans
->t_name
? trans
->t_name
: "[unknown]",
225 is_outgoing
? "(outgoing)" : "");
228 * Since we ran without holding the conn lock, someone could
229 * have created the same conn (either normal or passive) in the
230 * interim. We check while holding the lock. If we won, we complete
231 * init and return our conn. If we lost, we rollback and return the
234 spin_lock_irqsave(&rds_conn_lock
, flags
);
236 /* Creating passive conn */
237 if (parent
->c_passive
) {
238 trans
->conn_free(conn
->c_path
[0].cp_transport_data
);
239 kmem_cache_free(rds_conn_slab
, conn
);
240 conn
= parent
->c_passive
;
242 parent
->c_passive
= conn
;
243 rds_cong_add_conn(conn
);
247 /* Creating normal conn */
248 struct rds_connection
*found
;
250 found
= rds_conn_lookup(net
, head
, laddr
, faddr
, trans
);
252 struct rds_conn_path
*cp
;
255 for (i
= 0; i
< RDS_MPATH_WORKERS
; i
++) {
256 cp
= &conn
->c_path
[i
];
257 trans
->conn_free(cp
->cp_transport_data
);
258 if (!trans
->t_mp_capable
)
261 kmem_cache_free(rds_conn_slab
, conn
);
266 for (i
= 0; i
< RDS_MPATH_WORKERS
; i
++) {
267 __rds_conn_path_init(conn
, &conn
->c_path
[i
],
269 conn
->c_path
[i
].cp_index
= i
;
272 hlist_add_head_rcu(&conn
->c_hash_node
, head
);
273 rds_cong_add_conn(conn
);
277 spin_unlock_irqrestore(&rds_conn_lock
, flags
);
283 struct rds_connection
*rds_conn_create(struct net
*net
,
284 __be32 laddr
, __be32 faddr
,
285 struct rds_transport
*trans
, gfp_t gfp
)
287 return __rds_conn_create(net
, laddr
, faddr
, trans
, gfp
, 0);
289 EXPORT_SYMBOL_GPL(rds_conn_create
);
291 struct rds_connection
*rds_conn_create_outgoing(struct net
*net
,
292 __be32 laddr
, __be32 faddr
,
293 struct rds_transport
*trans
, gfp_t gfp
)
295 return __rds_conn_create(net
, laddr
, faddr
, trans
, gfp
, 1);
297 EXPORT_SYMBOL_GPL(rds_conn_create_outgoing
);
299 void rds_conn_shutdown(struct rds_conn_path
*cp
)
301 struct rds_connection
*conn
= cp
->cp_conn
;
303 /* shut it down unless it's down already */
304 if (!rds_conn_path_transition(cp
, RDS_CONN_DOWN
, RDS_CONN_DOWN
)) {
306 * Quiesce the connection mgmt handlers before we start tearing
307 * things down. We don't hold the mutex for the entire
308 * duration of the shutdown operation, else we may be
309 * deadlocking with the CM handler. Instead, the CM event
310 * handler is supposed to check for state DISCONNECTING
312 mutex_lock(&cp
->cp_cm_lock
);
313 if (!rds_conn_path_transition(cp
, RDS_CONN_UP
,
314 RDS_CONN_DISCONNECTING
) &&
315 !rds_conn_path_transition(cp
, RDS_CONN_ERROR
,
316 RDS_CONN_DISCONNECTING
)) {
317 rds_conn_path_error(cp
,
318 "shutdown called in state %d\n",
319 atomic_read(&cp
->cp_state
));
320 mutex_unlock(&cp
->cp_cm_lock
);
323 mutex_unlock(&cp
->cp_cm_lock
);
325 wait_event(cp
->cp_waitq
,
326 !test_bit(RDS_IN_XMIT
, &cp
->cp_flags
));
327 wait_event(cp
->cp_waitq
,
328 !test_bit(RDS_RECV_REFILL
, &cp
->cp_flags
));
330 if (!conn
->c_trans
->t_mp_capable
)
331 conn
->c_trans
->conn_shutdown(conn
);
333 conn
->c_trans
->conn_path_shutdown(cp
);
334 rds_conn_path_reset(cp
);
336 if (!rds_conn_path_transition(cp
, RDS_CONN_DISCONNECTING
,
338 /* This can happen - eg when we're in the middle of tearing
339 * down the connection, and someone unloads the rds module.
340 * Quite reproduceable with loopback connections.
343 rds_conn_path_error(cp
, "%s: failed to transition "
344 "to state DOWN, current state "
346 atomic_read(&cp
->cp_state
));
351 /* Then reconnect if it's still live.
352 * The passive side of an IB loopback connection is never added
353 * to the conn hash, so we never trigger a reconnect on this
354 * conn - the reconnect is always triggered by the active peer. */
355 cancel_delayed_work_sync(&cp
->cp_conn_w
);
357 if (!hlist_unhashed(&conn
->c_hash_node
)) {
359 if (conn
->c_trans
->t_type
!= RDS_TRANS_TCP
||
360 cp
->cp_outgoing
== 1)
361 rds_queue_reconnect(cp
);
368 * Stop and free a connection.
370 * This can only be used in very limited circumstances. It assumes that once
371 * the conn has been shutdown that no one else is referencing the connection.
372 * We can only ensure this in the rmmod path in the current code.
374 void rds_conn_destroy(struct rds_connection
*conn
)
376 struct rds_message
*rm
, *rtmp
;
379 rdsdebug("freeing conn %p for %pI4 -> "
380 "%pI4\n", conn
, &conn
->c_laddr
,
383 /* Ensure conn will not be scheduled for reconnect */
384 spin_lock_irq(&rds_conn_lock
);
385 hlist_del_init_rcu(&conn
->c_hash_node
);
386 spin_unlock_irq(&rds_conn_lock
);
389 /* shut the connection down */
391 flush_work(&conn
->c_down_w
);
393 /* make sure lingering queued work won't try to ref the conn */
394 cancel_delayed_work_sync(&conn
->c_send_w
);
395 cancel_delayed_work_sync(&conn
->c_recv_w
);
397 /* tear down queued messages */
398 list_for_each_entry_safe(rm
, rtmp
,
401 list_del_init(&rm
->m_conn_item
);
402 BUG_ON(!list_empty(&rm
->m_sock_item
));
406 rds_message_put(conn
->c_xmit_rm
);
408 conn
->c_trans
->conn_free(conn
->c_transport_data
);
411 * The congestion maps aren't freed up here. They're
412 * freed by rds_cong_exit() after all the connections
415 rds_cong_remove_conn(conn
);
417 BUG_ON(!list_empty(&conn
->c_retrans
));
418 kmem_cache_free(rds_conn_slab
, conn
);
420 spin_lock_irqsave(&rds_conn_lock
, flags
);
422 spin_unlock_irqrestore(&rds_conn_lock
, flags
);
424 EXPORT_SYMBOL_GPL(rds_conn_destroy
);
426 static void rds_conn_message_info(struct socket
*sock
, unsigned int len
,
427 struct rds_info_iterator
*iter
,
428 struct rds_info_lengths
*lens
,
431 struct hlist_head
*head
;
432 struct list_head
*list
;
433 struct rds_connection
*conn
;
434 struct rds_message
*rm
;
435 unsigned int total
= 0;
440 len
/= sizeof(struct rds_info_message
);
444 for (i
= 0, head
= rds_conn_hash
; i
< ARRAY_SIZE(rds_conn_hash
);
446 hlist_for_each_entry_rcu(conn
, head
, c_hash_node
) {
447 struct rds_conn_path
*cp
;
449 for (j
= 0; j
< RDS_MPATH_WORKERS
; j
++) {
450 cp
= &conn
->c_path
[j
];
452 list
= &cp
->cp_send_queue
;
454 list
= &cp
->cp_retrans
;
456 spin_lock_irqsave(&cp
->cp_lock
, flags
);
458 /* XXX too lazy to maintain counts.. */
459 list_for_each_entry(rm
, list
, m_conn_item
) {
462 rds_inc_info_copy(&rm
->m_inc
,
469 spin_unlock_irqrestore(&cp
->cp_lock
, flags
);
470 if (!conn
->c_trans
->t_mp_capable
)
478 lens
->each
= sizeof(struct rds_info_message
);
481 static void rds_conn_message_info_send(struct socket
*sock
, unsigned int len
,
482 struct rds_info_iterator
*iter
,
483 struct rds_info_lengths
*lens
)
485 rds_conn_message_info(sock
, len
, iter
, lens
, 1);
488 static void rds_conn_message_info_retrans(struct socket
*sock
,
490 struct rds_info_iterator
*iter
,
491 struct rds_info_lengths
*lens
)
493 rds_conn_message_info(sock
, len
, iter
, lens
, 0);
496 void rds_for_each_conn_info(struct socket
*sock
, unsigned int len
,
497 struct rds_info_iterator
*iter
,
498 struct rds_info_lengths
*lens
,
499 int (*visitor
)(struct rds_connection
*, void *),
502 uint64_t buffer
[(item_len
+ 7) / 8];
503 struct hlist_head
*head
;
504 struct rds_connection
*conn
;
510 lens
->each
= item_len
;
512 for (i
= 0, head
= rds_conn_hash
; i
< ARRAY_SIZE(rds_conn_hash
);
514 hlist_for_each_entry_rcu(conn
, head
, c_hash_node
) {
516 /* XXX no c_lock usage.. */
517 if (!visitor(conn
, buffer
))
520 /* We copy as much as we can fit in the buffer,
521 * but we count all items so that the caller
522 * can resize the buffer. */
523 if (len
>= item_len
) {
524 rds_info_copy(iter
, buffer
, item_len
);
532 EXPORT_SYMBOL_GPL(rds_for_each_conn_info
);
534 void rds_walk_conn_path_info(struct socket
*sock
, unsigned int len
,
535 struct rds_info_iterator
*iter
,
536 struct rds_info_lengths
*lens
,
537 int (*visitor
)(struct rds_conn_path
*, void *),
540 u64 buffer
[(item_len
+ 7) / 8];
541 struct hlist_head
*head
;
542 struct rds_connection
*conn
;
549 lens
->each
= item_len
;
551 for (i
= 0, head
= rds_conn_hash
; i
< ARRAY_SIZE(rds_conn_hash
);
553 hlist_for_each_entry_rcu(conn
, head
, c_hash_node
) {
554 struct rds_conn_path
*cp
;
556 for (j
= 0; j
< RDS_MPATH_WORKERS
; j
++) {
557 cp
= &conn
->c_path
[j
];
559 /* XXX no cp_lock usage.. */
560 if (!visitor(cp
, buffer
))
562 if (!conn
->c_trans
->t_mp_capable
)
566 /* We copy as much as we can fit in the buffer,
567 * but we count all items so that the caller
568 * can resize the buffer.
570 if (len
>= item_len
) {
571 rds_info_copy(iter
, buffer
, item_len
);
580 static int rds_conn_info_visitor(struct rds_conn_path
*cp
, void *buffer
)
582 struct rds_info_connection
*cinfo
= buffer
;
584 cinfo
->next_tx_seq
= cp
->cp_next_tx_seq
;
585 cinfo
->next_rx_seq
= cp
->cp_next_rx_seq
;
586 cinfo
->laddr
= cp
->cp_conn
->c_laddr
;
587 cinfo
->faddr
= cp
->cp_conn
->c_faddr
;
588 strncpy(cinfo
->transport
, cp
->cp_conn
->c_trans
->t_name
,
589 sizeof(cinfo
->transport
));
592 rds_conn_info_set(cinfo
->flags
, test_bit(RDS_IN_XMIT
, &cp
->cp_flags
),
594 /* XXX Future: return the state rather than these funky bits */
595 rds_conn_info_set(cinfo
->flags
,
596 atomic_read(&cp
->cp_state
) == RDS_CONN_CONNECTING
,
598 rds_conn_info_set(cinfo
->flags
,
599 atomic_read(&cp
->cp_state
) == RDS_CONN_UP
,
604 static void rds_conn_info(struct socket
*sock
, unsigned int len
,
605 struct rds_info_iterator
*iter
,
606 struct rds_info_lengths
*lens
)
608 rds_walk_conn_path_info(sock
, len
, iter
, lens
,
609 rds_conn_info_visitor
,
610 sizeof(struct rds_info_connection
));
613 int rds_conn_init(void)
615 rds_conn_slab
= kmem_cache_create("rds_connection",
616 sizeof(struct rds_connection
),
621 rds_info_register_func(RDS_INFO_CONNECTIONS
, rds_conn_info
);
622 rds_info_register_func(RDS_INFO_SEND_MESSAGES
,
623 rds_conn_message_info_send
);
624 rds_info_register_func(RDS_INFO_RETRANS_MESSAGES
,
625 rds_conn_message_info_retrans
);
630 void rds_conn_exit(void)
634 WARN_ON(!hlist_empty(rds_conn_hash
));
636 kmem_cache_destroy(rds_conn_slab
);
638 rds_info_deregister_func(RDS_INFO_CONNECTIONS
, rds_conn_info
);
639 rds_info_deregister_func(RDS_INFO_SEND_MESSAGES
,
640 rds_conn_message_info_send
);
641 rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES
,
642 rds_conn_message_info_retrans
);
648 void rds_conn_path_drop(struct rds_conn_path
*cp
)
650 atomic_set(&cp
->cp_state
, RDS_CONN_ERROR
);
651 queue_work(rds_wq
, &cp
->cp_down_w
);
653 EXPORT_SYMBOL_GPL(rds_conn_path_drop
);
655 void rds_conn_drop(struct rds_connection
*conn
)
657 rds_conn_path_drop(&conn
->c_path
[0]);
659 EXPORT_SYMBOL_GPL(rds_conn_drop
);
662 * If the connection is down, trigger a connect. We may have scheduled a
663 * delayed reconnect however - in this case we should not interfere.
665 void rds_conn_path_connect_if_down(struct rds_conn_path
*cp
)
667 if (rds_conn_path_state(cp
) == RDS_CONN_DOWN
&&
668 !test_and_set_bit(RDS_RECONNECT_PENDING
, &cp
->cp_flags
))
669 queue_delayed_work(rds_wq
, &cp
->cp_conn_w
, 0);
672 void rds_conn_connect_if_down(struct rds_connection
*conn
)
674 WARN_ON(conn
->c_trans
->t_mp_capable
);
675 rds_conn_path_connect_if_down(&conn
->c_path
[0]);
677 EXPORT_SYMBOL_GPL(rds_conn_connect_if_down
);
680 * An error occurred on the connection
683 __rds_conn_error(struct rds_connection
*conn
, const char *fmt
, ...)
695 __rds_conn_path_error(struct rds_conn_path
*cp
, const char *fmt
, ...)
703 rds_conn_path_drop(cp
);