rxrpc: Rework peer object handling to use hash table and RCU
[deliverable/linux.git] / net / rxrpc / transport.c
1 /* RxRPC point-to-point transport session management
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/slab.h>
18 #include <net/sock.h>
19 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
21
22 /*
23 * Time after last use at which transport record is cleaned up.
24 */
25 unsigned int rxrpc_transport_expiry = 3600 * 24;
26
27 static void rxrpc_transport_reaper(struct work_struct *work);
28
29 static LIST_HEAD(rxrpc_transports);
30 static DEFINE_RWLOCK(rxrpc_transport_lock);
31 static DECLARE_DELAYED_WORK(rxrpc_transport_reap, rxrpc_transport_reaper);
32
33 /*
34 * allocate a new transport session manager
35 */
36 static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
37 struct rxrpc_peer *peer,
38 gfp_t gfp)
39 {
40 struct rxrpc_transport *trans;
41
42 _enter("");
43
44 trans = kzalloc(sizeof(struct rxrpc_transport), gfp);
45 if (trans) {
46 trans->local = local;
47 trans->peer = peer;
48 INIT_LIST_HEAD(&trans->link);
49 trans->bundles = RB_ROOT;
50 trans->client_conns = RB_ROOT;
51 trans->server_conns = RB_ROOT;
52 skb_queue_head_init(&trans->error_queue);
53 spin_lock_init(&trans->client_lock);
54 rwlock_init(&trans->conn_lock);
55 atomic_set(&trans->usage, 1);
56 trans->conn_idcounter = peer->srx.srx_service << 16;
57 trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
58
59 if (peer->srx.transport.family == AF_INET) {
60 switch (peer->srx.transport_type) {
61 case SOCK_DGRAM:
62 INIT_WORK(&trans->error_handler,
63 rxrpc_UDP_error_handler);
64 break;
65 default:
66 BUG();
67 break;
68 }
69 } else {
70 BUG();
71 }
72 }
73
74 _leave(" = %p", trans);
75 return trans;
76 }
77
78 /*
79 * obtain a transport session for the nominated endpoints
80 */
81 struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *local,
82 struct rxrpc_peer *peer,
83 gfp_t gfp)
84 {
85 struct rxrpc_transport *trans, *candidate;
86 const char *new = "old";
87 int usage;
88
89 _enter("{%pI4+%hu},{%pI4+%hu},",
90 &local->srx.transport.sin.sin_addr,
91 ntohs(local->srx.transport.sin.sin_port),
92 &peer->srx.transport.sin.sin_addr,
93 ntohs(peer->srx.transport.sin.sin_port));
94
95 /* search the transport list first */
96 read_lock_bh(&rxrpc_transport_lock);
97 list_for_each_entry(trans, &rxrpc_transports, link) {
98 if (trans->local == local && trans->peer == peer)
99 goto found_extant_transport;
100 }
101 read_unlock_bh(&rxrpc_transport_lock);
102
103 /* not yet present - create a candidate for a new record and then
104 * redo the search */
105 candidate = rxrpc_alloc_transport(local, peer, gfp);
106 if (!candidate) {
107 _leave(" = -ENOMEM");
108 return ERR_PTR(-ENOMEM);
109 }
110
111 write_lock_bh(&rxrpc_transport_lock);
112
113 list_for_each_entry(trans, &rxrpc_transports, link) {
114 if (trans->local == local && trans->peer == peer)
115 goto found_extant_second;
116 }
117
118 /* we can now add the new candidate to the list */
119 trans = candidate;
120 candidate = NULL;
121 usage = atomic_read(&trans->usage);
122
123 rxrpc_get_local(trans->local);
124 rxrpc_get_peer(trans->peer);
125 list_add_tail(&trans->link, &rxrpc_transports);
126 write_unlock_bh(&rxrpc_transport_lock);
127 new = "new";
128
129 success:
130 _net("TRANSPORT %s %d local %d -> peer %d",
131 new,
132 trans->debug_id,
133 trans->local->debug_id,
134 trans->peer->debug_id);
135
136 _leave(" = %p {u=%d}", trans, usage);
137 return trans;
138
139 /* we found the transport in the list immediately */
140 found_extant_transport:
141 usage = atomic_inc_return(&trans->usage);
142 read_unlock_bh(&rxrpc_transport_lock);
143 goto success;
144
145 /* we found the transport on the second time through the list */
146 found_extant_second:
147 usage = atomic_inc_return(&trans->usage);
148 write_unlock_bh(&rxrpc_transport_lock);
149 kfree(candidate);
150 goto success;
151 }
152
153 /*
154 * find the transport connecting two endpoints
155 */
156 struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *local,
157 struct rxrpc_peer *peer)
158 {
159 struct rxrpc_transport *trans;
160
161 _enter("{%pI4+%hu},{%pI4+%hu},",
162 &local->srx.transport.sin.sin_addr,
163 ntohs(local->srx.transport.sin.sin_port),
164 &peer->srx.transport.sin.sin_addr,
165 ntohs(peer->srx.transport.sin.sin_port));
166
167 /* search the transport list */
168 read_lock_bh(&rxrpc_transport_lock);
169
170 list_for_each_entry(trans, &rxrpc_transports, link) {
171 if (trans->local == local && trans->peer == peer)
172 goto found_extant_transport;
173 }
174
175 read_unlock_bh(&rxrpc_transport_lock);
176 _leave(" = NULL");
177 return NULL;
178
179 found_extant_transport:
180 atomic_inc(&trans->usage);
181 read_unlock_bh(&rxrpc_transport_lock);
182 _leave(" = %p", trans);
183 return trans;
184 }
185
186 /*
187 * release a transport session
188 */
189 void rxrpc_put_transport(struct rxrpc_transport *trans)
190 {
191 _enter("%p{u=%d}", trans, atomic_read(&trans->usage));
192
193 ASSERTCMP(atomic_read(&trans->usage), >, 0);
194
195 trans->put_time = ktime_get_seconds();
196 if (unlikely(atomic_dec_and_test(&trans->usage))) {
197 _debug("zombie");
198 /* let the reaper determine the timeout to avoid a race with
199 * overextending the timeout if the reaper is running at the
200 * same time */
201 rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0);
202 }
203 _leave("");
204 }
205
206 /*
207 * clean up a transport session
208 */
209 static void rxrpc_cleanup_transport(struct rxrpc_transport *trans)
210 {
211 _net("DESTROY TRANS %d", trans->debug_id);
212
213 rxrpc_purge_queue(&trans->error_queue);
214
215 rxrpc_put_local(trans->local);
216 rxrpc_put_peer(trans->peer);
217 kfree(trans);
218 }
219
220 /*
221 * reap dead transports that have passed their expiry date
222 */
223 static void rxrpc_transport_reaper(struct work_struct *work)
224 {
225 struct rxrpc_transport *trans, *_p;
226 unsigned long now, earliest, reap_time;
227
228 LIST_HEAD(graveyard);
229
230 _enter("");
231
232 now = ktime_get_seconds();
233 earliest = ULONG_MAX;
234
235 /* extract all the transports that have been dead too long */
236 write_lock_bh(&rxrpc_transport_lock);
237 list_for_each_entry_safe(trans, _p, &rxrpc_transports, link) {
238 _debug("reap TRANS %d { u=%d t=%ld }",
239 trans->debug_id, atomic_read(&trans->usage),
240 (long) now - (long) trans->put_time);
241
242 if (likely(atomic_read(&trans->usage) > 0))
243 continue;
244
245 reap_time = trans->put_time + rxrpc_transport_expiry;
246 if (reap_time <= now)
247 list_move_tail(&trans->link, &graveyard);
248 else if (reap_time < earliest)
249 earliest = reap_time;
250 }
251 write_unlock_bh(&rxrpc_transport_lock);
252
253 if (earliest != ULONG_MAX) {
254 _debug("reschedule reaper %ld", (long) earliest - now);
255 ASSERTCMP(earliest, >, now);
256 rxrpc_queue_delayed_work(&rxrpc_transport_reap,
257 (earliest - now) * HZ);
258 }
259
260 /* then destroy all those pulled out */
261 while (!list_empty(&graveyard)) {
262 trans = list_entry(graveyard.next, struct rxrpc_transport,
263 link);
264 list_del_init(&trans->link);
265
266 ASSERTCMP(atomic_read(&trans->usage), ==, 0);
267 rxrpc_cleanup_transport(trans);
268 }
269
270 _leave("");
271 }
272
273 /*
274 * preemptively destroy all the transport session records rather than waiting
275 * for them to time out
276 */
277 void __exit rxrpc_destroy_all_transports(void)
278 {
279 _enter("");
280
281 rxrpc_transport_expiry = 0;
282 cancel_delayed_work(&rxrpc_transport_reap);
283 rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0);
284
285 _leave("");
286 }
This page took 0.059278 seconds and 6 git commands to generate.