RDS-TCP: Make RDS-TCP work correctly when it is set up in a netns other than init_net
[deliverable/linux.git] / net / rds / tcp_listen.c
1 /*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33 #include <linux/kernel.h>
34 #include <linux/gfp.h>
35 #include <linux/in.h>
36 #include <net/tcp.h>
37
38 #include "rds.h"
39 #include "tcp.h"
40
41 /*
42 * cheesy, but simple..
43 */
44 static void rds_tcp_accept_worker(struct work_struct *work);
45 static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker);
46 static struct socket *rds_tcp_listen_sock;
47
48 static int rds_tcp_keepalive(struct socket *sock)
49 {
50 /* values below based on xs_udp_default_timeout */
51 int keepidle = 5; /* send a probe 'keepidle' secs after last data */
52 int keepcnt = 5; /* number of unack'ed probes before declaring dead */
53 int keepalive = 1;
54 int ret = 0;
55
56 ret = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
57 (char *)&keepalive, sizeof(keepalive));
58 if (ret < 0)
59 goto bail;
60
61 ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPCNT,
62 (char *)&keepcnt, sizeof(keepcnt));
63 if (ret < 0)
64 goto bail;
65
66 ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPIDLE,
67 (char *)&keepidle, sizeof(keepidle));
68 if (ret < 0)
69 goto bail;
70
71 /* KEEPINTVL is the interval between successive probes. We follow
72 * the model in xs_tcp_finish_connecting() and re-use keepidle.
73 */
74 ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPINTVL,
75 (char *)&keepidle, sizeof(keepidle));
76 bail:
77 return ret;
78 }
79
80 static int rds_tcp_accept_one(struct socket *sock)
81 {
82 struct socket *new_sock = NULL;
83 struct rds_connection *conn;
84 int ret;
85 struct inet_sock *inet;
86 struct rds_tcp_connection *rs_tcp;
87
88 ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
89 sock->sk->sk_type, sock->sk->sk_protocol,
90 &new_sock);
91 if (ret)
92 goto out;
93
94 new_sock->type = sock->type;
95 new_sock->ops = sock->ops;
96 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
97 if (ret < 0)
98 goto out;
99
100 ret = rds_tcp_keepalive(new_sock);
101 if (ret < 0)
102 goto out;
103
104 rds_tcp_tune(new_sock);
105
106 inet = inet_sk(new_sock->sk);
107
108 rdsdebug("accepted tcp %pI4:%u -> %pI4:%u\n",
109 &inet->inet_saddr, ntohs(inet->inet_sport),
110 &inet->inet_daddr, ntohs(inet->inet_dport));
111
112 conn = rds_conn_create(sock_net(sock->sk),
113 inet->inet_saddr, inet->inet_daddr,
114 &rds_tcp_transport, GFP_KERNEL);
115 if (IS_ERR(conn)) {
116 ret = PTR_ERR(conn);
117 goto out;
118 }
119 /* An incoming SYN request came in, and TCP just accepted it.
120 * We always create a new conn for listen side of TCP, and do not
121 * add it to the c_hash_list.
122 *
123 * If the client reboots, this conn will need to be cleaned up.
124 * rds_tcp_state_change() will do that cleanup
125 */
126 rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
127 WARN_ON(!rs_tcp || rs_tcp->t_sock);
128
129 /*
130 * see the comment above rds_queue_delayed_reconnect()
131 */
132 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
133 if (rds_conn_state(conn) == RDS_CONN_UP)
134 rds_tcp_stats_inc(s_tcp_listen_closed_stale);
135 else
136 rds_tcp_stats_inc(s_tcp_connect_raced);
137 rds_conn_drop(conn);
138 ret = 0;
139 goto out;
140 }
141
142 rds_tcp_set_callbacks(new_sock, conn);
143 rds_connect_complete(conn);
144 new_sock = NULL;
145 ret = 0;
146
147 out:
148 if (new_sock)
149 sock_release(new_sock);
150 return ret;
151 }
152
153 static void rds_tcp_accept_worker(struct work_struct *work)
154 {
155 while (rds_tcp_accept_one(rds_tcp_listen_sock) == 0)
156 cond_resched();
157 }
158
159 void rds_tcp_listen_data_ready(struct sock *sk)
160 {
161 void (*ready)(struct sock *sk);
162
163 rdsdebug("listen data ready sk %p\n", sk);
164
165 read_lock(&sk->sk_callback_lock);
166 ready = sk->sk_user_data;
167 if (!ready) { /* check for teardown race */
168 ready = sk->sk_data_ready;
169 goto out;
170 }
171
172 /*
173 * ->sk_data_ready is also called for a newly established child socket
174 * before it has been accepted and the accepter has set up their
175 * data_ready.. we only want to queue listen work for our listening
176 * socket
177 */
178 if (sk->sk_state == TCP_LISTEN)
179 queue_work(rds_wq, &rds_tcp_listen_work);
180
181 out:
182 read_unlock(&sk->sk_callback_lock);
183 ready(sk);
184 }
185
186 int rds_tcp_listen_init(void)
187 {
188 struct sockaddr_in sin;
189 struct socket *sock = NULL;
190 int ret;
191
192 /* MUST call sock_create_kern directly so that we avoid get_net()
193 * in sk_alloc(). Doing a get_net() will result in cleanup_net()
194 * never getting invoked, which will leave sock and other things
195 * in limbo.
196 */
197 ret = sock_create_kern(current->nsproxy->net_ns, PF_INET,
198 SOCK_STREAM, IPPROTO_TCP, &sock);
199 if (ret < 0)
200 goto out;
201
202 sock->sk->sk_reuse = SK_CAN_REUSE;
203 rds_tcp_nonagle(sock);
204
205 write_lock_bh(&sock->sk->sk_callback_lock);
206 sock->sk->sk_user_data = sock->sk->sk_data_ready;
207 sock->sk->sk_data_ready = rds_tcp_listen_data_ready;
208 write_unlock_bh(&sock->sk->sk_callback_lock);
209
210 sin.sin_family = PF_INET;
211 sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
212 sin.sin_port = (__force u16)htons(RDS_TCP_PORT);
213
214 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
215 if (ret < 0)
216 goto out;
217
218 ret = sock->ops->listen(sock, 64);
219 if (ret < 0)
220 goto out;
221
222 rds_tcp_listen_sock = sock;
223 sock = NULL;
224 out:
225 if (sock)
226 sock_release(sock);
227 return ret;
228 }
229
230 void rds_tcp_listen_stop(void)
231 {
232 struct socket *sock = rds_tcp_listen_sock;
233 struct sock *sk;
234
235 if (!sock)
236 return;
237
238 sk = sock->sk;
239
240 /* serialize with and prevent further callbacks */
241 lock_sock(sk);
242 write_lock_bh(&sk->sk_callback_lock);
243 if (sk->sk_user_data) {
244 sk->sk_data_ready = sk->sk_user_data;
245 sk->sk_user_data = NULL;
246 }
247 write_unlock_bh(&sk->sk_callback_lock);
248 release_sock(sk);
249
250 /* wait for accepts to stop and close the socket */
251 flush_workqueue(rds_wq);
252 sock_release(sock);
253 rds_tcp_listen_sock = NULL;
254 }
This page took 0.037813 seconds and 5 git commands to generate.