rds: duplicate include net/tcp.h
[deliverable/linux.git] / net / rds / tcp.c
1 /*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/in.h>
36 #include <linux/module.h>
37 #include <net/tcp.h>
38 #include <net/net_namespace.h>
39 #include <net/netns/generic.h>
40
41 #include "rds.h"
42 #include "tcp.h"
43
44 /* only for info exporting */
45 static DEFINE_SPINLOCK(rds_tcp_tc_list_lock);
46 static LIST_HEAD(rds_tcp_tc_list);
47 static unsigned int rds_tcp_tc_count;
48
49 /* Track rds_tcp_connection structs so they can be cleaned up */
50 static DEFINE_SPINLOCK(rds_tcp_conn_lock);
51 static LIST_HEAD(rds_tcp_conn_list);
52
53 static struct kmem_cache *rds_tcp_conn_slab;
54
55 #define RDS_TCP_DEFAULT_BUFSIZE (128 * 1024)
56
57 /* doing it this way avoids calling tcp_sk() */
58 void rds_tcp_nonagle(struct socket *sock)
59 {
60 mm_segment_t oldfs = get_fs();
61 int val = 1;
62
63 set_fs(KERNEL_DS);
64 sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
65 sizeof(val));
66 set_fs(oldfs);
67 }
68
69 /* All module specific customizations to the RDS-TCP socket should be done in
70 * rds_tcp_tune() and applied after socket creation. In general these
71 * customizations should be tunable via module_param()
72 */
73 void rds_tcp_tune(struct socket *sock)
74 {
75 rds_tcp_nonagle(sock);
76 }
77
78 u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc)
79 {
80 return tcp_sk(tc->t_sock->sk)->snd_nxt;
81 }
82
83 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
84 {
85 return tcp_sk(tc->t_sock->sk)->snd_una;
86 }
87
88 void rds_tcp_restore_callbacks(struct socket *sock,
89 struct rds_tcp_connection *tc)
90 {
91 rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc);
92 write_lock_bh(&sock->sk->sk_callback_lock);
93
94 /* done under the callback_lock to serialize with write_space */
95 spin_lock(&rds_tcp_tc_list_lock);
96 list_del_init(&tc->t_list_item);
97 rds_tcp_tc_count--;
98 spin_unlock(&rds_tcp_tc_list_lock);
99
100 tc->t_sock = NULL;
101
102 sock->sk->sk_write_space = tc->t_orig_write_space;
103 sock->sk->sk_data_ready = tc->t_orig_data_ready;
104 sock->sk->sk_state_change = tc->t_orig_state_change;
105 sock->sk->sk_user_data = NULL;
106
107 write_unlock_bh(&sock->sk->sk_callback_lock);
108 }
109
110 /*
111 * This is the only path that sets tc->t_sock. Send and receive trust that
112 * it is set. The RDS_CONN_CONNECTED bit protects those paths from being
113 * called while it isn't set.
114 */
115 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
116 {
117 struct rds_tcp_connection *tc = conn->c_transport_data;
118
119 rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc);
120 write_lock_bh(&sock->sk->sk_callback_lock);
121
122 /* done under the callback_lock to serialize with write_space */
123 spin_lock(&rds_tcp_tc_list_lock);
124 list_add_tail(&tc->t_list_item, &rds_tcp_tc_list);
125 rds_tcp_tc_count++;
126 spin_unlock(&rds_tcp_tc_list_lock);
127
128 /* accepted sockets need our listen data ready undone */
129 if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready)
130 sock->sk->sk_data_ready = sock->sk->sk_user_data;
131
132 tc->t_sock = sock;
133 tc->conn = conn;
134 tc->t_orig_data_ready = sock->sk->sk_data_ready;
135 tc->t_orig_write_space = sock->sk->sk_write_space;
136 tc->t_orig_state_change = sock->sk->sk_state_change;
137
138 sock->sk->sk_user_data = conn;
139 sock->sk->sk_data_ready = rds_tcp_data_ready;
140 sock->sk->sk_write_space = rds_tcp_write_space;
141 sock->sk->sk_state_change = rds_tcp_state_change;
142
143 write_unlock_bh(&sock->sk->sk_callback_lock);
144 }
145
146 static void rds_tcp_tc_info(struct socket *sock, unsigned int len,
147 struct rds_info_iterator *iter,
148 struct rds_info_lengths *lens)
149 {
150 struct rds_info_tcp_socket tsinfo;
151 struct rds_tcp_connection *tc;
152 unsigned long flags;
153 struct sockaddr_in sin;
154 int sinlen;
155
156 spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
157
158 if (len / sizeof(tsinfo) < rds_tcp_tc_count)
159 goto out;
160
161 list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
162
163 sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 0);
164 tsinfo.local_addr = sin.sin_addr.s_addr;
165 tsinfo.local_port = sin.sin_port;
166 sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 1);
167 tsinfo.peer_addr = sin.sin_addr.s_addr;
168 tsinfo.peer_port = sin.sin_port;
169
170 tsinfo.hdr_rem = tc->t_tinc_hdr_rem;
171 tsinfo.data_rem = tc->t_tinc_data_rem;
172 tsinfo.last_sent_nxt = tc->t_last_sent_nxt;
173 tsinfo.last_expected_una = tc->t_last_expected_una;
174 tsinfo.last_seen_una = tc->t_last_seen_una;
175
176 rds_info_copy(iter, &tsinfo, sizeof(tsinfo));
177 }
178
179 out:
180 lens->nr = rds_tcp_tc_count;
181 lens->each = sizeof(tsinfo);
182
183 spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
184 }
185
186 static int rds_tcp_laddr_check(struct net *net, __be32 addr)
187 {
188 if (inet_addr_type(net, addr) == RTN_LOCAL)
189 return 0;
190 return -EADDRNOTAVAIL;
191 }
192
193 static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
194 {
195 struct rds_tcp_connection *tc;
196
197 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
198 if (!tc)
199 return -ENOMEM;
200
201 tc->t_sock = NULL;
202 tc->t_tinc = NULL;
203 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
204 tc->t_tinc_data_rem = 0;
205
206 conn->c_transport_data = tc;
207
208 spin_lock_irq(&rds_tcp_conn_lock);
209 list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
210 spin_unlock_irq(&rds_tcp_conn_lock);
211
212 rdsdebug("alloced tc %p\n", conn->c_transport_data);
213 return 0;
214 }
215
216 static void rds_tcp_conn_free(void *arg)
217 {
218 struct rds_tcp_connection *tc = arg;
219 unsigned long flags;
220 rdsdebug("freeing tc %p\n", tc);
221
222 spin_lock_irqsave(&rds_tcp_conn_lock, flags);
223 list_del(&tc->t_tcp_node);
224 spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
225
226 kmem_cache_free(rds_tcp_conn_slab, tc);
227 }
228
229 static void rds_tcp_destroy_conns(void)
230 {
231 struct rds_tcp_connection *tc, *_tc;
232 LIST_HEAD(tmp_list);
233
234 /* avoid calling conn_destroy with irqs off */
235 spin_lock_irq(&rds_tcp_conn_lock);
236 list_splice(&rds_tcp_conn_list, &tmp_list);
237 INIT_LIST_HEAD(&rds_tcp_conn_list);
238 spin_unlock_irq(&rds_tcp_conn_lock);
239
240 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
241 if (tc->conn->c_passive)
242 rds_conn_destroy(tc->conn->c_passive);
243 rds_conn_destroy(tc->conn);
244 }
245 }
246
247 static void rds_tcp_exit(void);
248
249 struct rds_transport rds_tcp_transport = {
250 .laddr_check = rds_tcp_laddr_check,
251 .xmit_prepare = rds_tcp_xmit_prepare,
252 .xmit_complete = rds_tcp_xmit_complete,
253 .xmit = rds_tcp_xmit,
254 .recv = rds_tcp_recv,
255 .conn_alloc = rds_tcp_conn_alloc,
256 .conn_free = rds_tcp_conn_free,
257 .conn_connect = rds_tcp_conn_connect,
258 .conn_shutdown = rds_tcp_conn_shutdown,
259 .inc_copy_to_user = rds_tcp_inc_copy_to_user,
260 .inc_free = rds_tcp_inc_free,
261 .stats_info_copy = rds_tcp_stats_info_copy,
262 .exit = rds_tcp_exit,
263 .t_owner = THIS_MODULE,
264 .t_name = "tcp",
265 .t_type = RDS_TRANS_TCP,
266 .t_prefer_loopback = 1,
267 };
268
269 static int rds_tcp_netid;
270
271 /* per-network namespace private data for this module */
272 struct rds_tcp_net {
273 struct socket *rds_tcp_listen_sock;
274 struct work_struct rds_tcp_accept_w;
275 };
276
277 static void rds_tcp_accept_worker(struct work_struct *work)
278 {
279 struct rds_tcp_net *rtn = container_of(work,
280 struct rds_tcp_net,
281 rds_tcp_accept_w);
282
283 while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0)
284 cond_resched();
285 }
286
287 void rds_tcp_accept_work(struct sock *sk)
288 {
289 struct net *net = sock_net(sk);
290 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
291
292 queue_work(rds_wq, &rtn->rds_tcp_accept_w);
293 }
294
295 static __net_init int rds_tcp_init_net(struct net *net)
296 {
297 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
298
299 rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net);
300 if (!rtn->rds_tcp_listen_sock) {
301 pr_warn("could not set up listen sock\n");
302 return -EAFNOSUPPORT;
303 }
304 INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
305 return 0;
306 }
307
308 static void __net_exit rds_tcp_exit_net(struct net *net)
309 {
310 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
311
312 /* If rds_tcp_exit_net() is called as a result of netns deletion,
313 * the rds_tcp_kill_sock() device notifier would already have cleaned
314 * up the listen socket, thus there is no work to do in this function.
315 *
316 * If rds_tcp_exit_net() is called as a result of module unload,
317 * i.e., due to rds_tcp_exit() -> unregister_pernet_subsys(), then
318 * we do need to clean up the listen socket here.
319 */
320 if (rtn->rds_tcp_listen_sock) {
321 rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
322 rtn->rds_tcp_listen_sock = NULL;
323 flush_work(&rtn->rds_tcp_accept_w);
324 }
325 }
326
327 static struct pernet_operations rds_tcp_net_ops = {
328 .init = rds_tcp_init_net,
329 .exit = rds_tcp_exit_net,
330 .id = &rds_tcp_netid,
331 .size = sizeof(struct rds_tcp_net),
332 };
333
334 static void rds_tcp_kill_sock(struct net *net)
335 {
336 struct rds_tcp_connection *tc, *_tc;
337 struct sock *sk;
338 LIST_HEAD(tmp_list);
339 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
340
341 rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
342 rtn->rds_tcp_listen_sock = NULL;
343 flush_work(&rtn->rds_tcp_accept_w);
344 spin_lock_irq(&rds_tcp_conn_lock);
345 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
346 struct net *c_net = read_pnet(&tc->conn->c_net);
347
348 if (net != c_net || !tc->t_sock)
349 continue;
350 list_move_tail(&tc->t_tcp_node, &tmp_list);
351 }
352 spin_unlock_irq(&rds_tcp_conn_lock);
353 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
354 sk = tc->t_sock->sk;
355 sk->sk_prot->disconnect(sk, 0);
356 tcp_done(sk);
357 if (tc->conn->c_passive)
358 rds_conn_destroy(tc->conn->c_passive);
359 rds_conn_destroy(tc->conn);
360 }
361 }
362
363 static int rds_tcp_dev_event(struct notifier_block *this,
364 unsigned long event, void *ptr)
365 {
366 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
367
368 /* rds-tcp registers as a pernet subys, so the ->exit will only
369 * get invoked after network acitivity has quiesced. We need to
370 * clean up all sockets to quiesce network activity, and use
371 * the unregistration of the per-net loopback device as a trigger
372 * to start that cleanup.
373 */
374 if (event == NETDEV_UNREGISTER_FINAL &&
375 dev->ifindex == LOOPBACK_IFINDEX)
376 rds_tcp_kill_sock(dev_net(dev));
377
378 return NOTIFY_DONE;
379 }
380
381 static struct notifier_block rds_tcp_dev_notifier = {
382 .notifier_call = rds_tcp_dev_event,
383 .priority = -10, /* must be called after other network notifiers */
384 };
385
386 static void rds_tcp_exit(void)
387 {
388 rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
389 unregister_pernet_subsys(&rds_tcp_net_ops);
390 if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
391 pr_warn("could not unregister rds_tcp_dev_notifier\n");
392 rds_tcp_destroy_conns();
393 rds_trans_unregister(&rds_tcp_transport);
394 rds_tcp_recv_exit();
395 kmem_cache_destroy(rds_tcp_conn_slab);
396 }
397 module_exit(rds_tcp_exit);
398
399 static int rds_tcp_init(void)
400 {
401 int ret;
402
403 rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection",
404 sizeof(struct rds_tcp_connection),
405 0, 0, NULL);
406 if (!rds_tcp_conn_slab) {
407 ret = -ENOMEM;
408 goto out;
409 }
410
411 ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
412 if (ret) {
413 pr_warn("could not register rds_tcp_dev_notifier\n");
414 goto out;
415 }
416
417 ret = register_pernet_subsys(&rds_tcp_net_ops);
418 if (ret)
419 goto out_slab;
420
421 ret = rds_tcp_recv_init();
422 if (ret)
423 goto out_slab;
424
425 ret = rds_trans_register(&rds_tcp_transport);
426 if (ret)
427 goto out_recv;
428
429 rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
430
431 goto out;
432
433 out_recv:
434 rds_tcp_recv_exit();
435 out_slab:
436 unregister_pernet_subsys(&rds_tcp_net_ops);
437 kmem_cache_destroy(rds_tcp_conn_slab);
438 out:
439 return ret;
440 }
441 module_init(rds_tcp_init);
442
443 MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
444 MODULE_DESCRIPTION("RDS: TCP transport");
445 MODULE_LICENSE("Dual BSD/GPL");
446
This page took 0.041495 seconds and 6 git commands to generate.