Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/lockd/host.c | |
3 | * | |
4 | * Management for NLM peer hosts. The nlm_host struct is shared | |
5 | * between client and server implementation. The only reason to | |
6 | * do so is to reduce code bloat. | |
7 | * | |
8 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | |
9 | */ | |
10 | ||
11 | #include <linux/types.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/in.h> | |
15 | #include <linux/sunrpc/clnt.h> | |
16 | #include <linux/sunrpc/svc.h> | |
17 | #include <linux/lockd/lockd.h> | |
18 | #include <linux/lockd/sm_inter.h> | |
19 | ||
20 | ||
21 | #define NLMDBG_FACILITY NLMDBG_HOSTCACHE | |
22 | #define NLM_HOST_MAX 64 | |
23 | #define NLM_HOST_NRHASH 32 | |
24 | #define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1)) | |
25 | #define NLM_HOST_REBIND (60 * HZ) | |
26 | #define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ) | |
27 | #define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ) | |
28 | #define NLM_HOST_ADDR(sv) (&(sv)->s_nlmclnt->cl_xprt->addr) | |
29 | ||
30 | static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH]; | |
31 | static unsigned long next_gc; | |
32 | static int nrhosts; | |
33 | static DECLARE_MUTEX(nlm_host_sema); | |
34 | ||
35 | ||
36 | static void nlm_gc_hosts(void); | |
37 | ||
38 | /* | |
39 | * Find an NLM server handle in the cache. If there is none, create it. | |
40 | */ | |
41 | struct nlm_host * | |
42 | nlmclnt_lookup_host(struct sockaddr_in *sin, int proto, int version) | |
43 | { | |
44 | return nlm_lookup_host(0, sin, proto, version); | |
45 | } | |
46 | ||
47 | /* | |
48 | * Find an NLM client handle in the cache. If there is none, create it. | |
49 | */ | |
50 | struct nlm_host * | |
51 | nlmsvc_lookup_host(struct svc_rqst *rqstp) | |
52 | { | |
53 | return nlm_lookup_host(1, &rqstp->rq_addr, | |
54 | rqstp->rq_prot, rqstp->rq_vers); | |
55 | } | |
56 | ||
57 | /* | |
58 | * Common host lookup routine for server & client | |
59 | */ | |
60 | struct nlm_host * | |
61 | nlm_lookup_host(int server, struct sockaddr_in *sin, | |
62 | int proto, int version) | |
63 | { | |
64 | struct nlm_host *host, **hp; | |
65 | u32 addr; | |
66 | int hash; | |
67 | ||
68 | dprintk("lockd: nlm_lookup_host(%08x, p=%d, v=%d)\n", | |
69 | (unsigned)(sin? ntohl(sin->sin_addr.s_addr) : 0), proto, version); | |
70 | ||
71 | hash = NLM_ADDRHASH(sin->sin_addr.s_addr); | |
72 | ||
73 | /* Lock hash table */ | |
74 | down(&nlm_host_sema); | |
75 | ||
76 | if (time_after_eq(jiffies, next_gc)) | |
77 | nlm_gc_hosts(); | |
78 | ||
79 | for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { | |
80 | if (host->h_proto != proto) | |
81 | continue; | |
82 | if (host->h_version != version) | |
83 | continue; | |
84 | if (host->h_server != server) | |
85 | continue; | |
86 | ||
87 | if (nlm_cmp_addr(&host->h_addr, sin)) { | |
88 | if (hp != nlm_hosts + hash) { | |
89 | *hp = host->h_next; | |
90 | host->h_next = nlm_hosts[hash]; | |
91 | nlm_hosts[hash] = host; | |
92 | } | |
93 | nlm_get_host(host); | |
94 | up(&nlm_host_sema); | |
95 | return host; | |
96 | } | |
97 | } | |
98 | ||
99 | /* Ooops, no host found, create it */ | |
100 | dprintk("lockd: creating host entry\n"); | |
101 | ||
102 | if (!(host = (struct nlm_host *) kmalloc(sizeof(*host), GFP_KERNEL))) | |
103 | goto nohost; | |
104 | memset(host, 0, sizeof(*host)); | |
105 | ||
106 | addr = sin->sin_addr.s_addr; | |
107 | sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr)); | |
108 | ||
109 | host->h_addr = *sin; | |
110 | host->h_addr.sin_port = 0; /* ouch! */ | |
111 | host->h_version = version; | |
112 | host->h_proto = proto; | |
113 | host->h_rpcclnt = NULL; | |
114 | init_MUTEX(&host->h_sema); | |
115 | host->h_nextrebind = jiffies + NLM_HOST_REBIND; | |
116 | host->h_expires = jiffies + NLM_HOST_EXPIRE; | |
117 | atomic_set(&host->h_count, 1); | |
118 | init_waitqueue_head(&host->h_gracewait); | |
119 | host->h_state = 0; /* pseudo NSM state */ | |
120 | host->h_nsmstate = 0; /* real NSM state */ | |
121 | host->h_server = server; | |
122 | host->h_next = nlm_hosts[hash]; | |
123 | nlm_hosts[hash] = host; | |
124 | INIT_LIST_HEAD(&host->h_lockowners); | |
125 | spin_lock_init(&host->h_lock); | |
126 | ||
127 | if (++nrhosts > NLM_HOST_MAX) | |
128 | next_gc = 0; | |
129 | ||
130 | nohost: | |
131 | up(&nlm_host_sema); | |
132 | return host; | |
133 | } | |
134 | ||
135 | struct nlm_host * | |
136 | nlm_find_client(void) | |
137 | { | |
138 | /* find a nlm_host for a client for which h_killed == 0. | |
139 | * and return it | |
140 | */ | |
141 | int hash; | |
142 | down(&nlm_host_sema); | |
143 | for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) { | |
144 | struct nlm_host *host, **hp; | |
145 | for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { | |
146 | if (host->h_server && | |
147 | host->h_killed == 0) { | |
148 | nlm_get_host(host); | |
149 | up(&nlm_host_sema); | |
150 | return host; | |
151 | } | |
152 | } | |
153 | } | |
154 | up(&nlm_host_sema); | |
155 | return NULL; | |
156 | } | |
157 | ||
158 | ||
159 | /* | |
160 | * Create the NLM RPC client for an NLM peer | |
161 | */ | |
162 | struct rpc_clnt * | |
163 | nlm_bind_host(struct nlm_host *host) | |
164 | { | |
165 | struct rpc_clnt *clnt; | |
166 | struct rpc_xprt *xprt; | |
167 | ||
168 | dprintk("lockd: nlm_bind_host(%08x)\n", | |
169 | (unsigned)ntohl(host->h_addr.sin_addr.s_addr)); | |
170 | ||
171 | /* Lock host handle */ | |
172 | down(&host->h_sema); | |
173 | ||
174 | /* If we've already created an RPC client, check whether | |
175 | * RPC rebind is required | |
1da177e4 LT |
176 | */ |
177 | if ((clnt = host->h_rpcclnt) != NULL) { | |
178 | xprt = clnt->cl_xprt; | |
43118c29 | 179 | if (time_after_eq(jiffies, host->h_nextrebind)) { |
35f5a422 | 180 | rpc_force_rebind(clnt); |
1da177e4 LT |
181 | host->h_nextrebind = jiffies + NLM_HOST_REBIND; |
182 | dprintk("lockd: next rebind in %ld jiffies\n", | |
183 | host->h_nextrebind - jiffies); | |
184 | } | |
185 | } else { | |
186 | xprt = xprt_create_proto(host->h_proto, &host->h_addr, NULL); | |
187 | if (IS_ERR(xprt)) | |
188 | goto forgetit; | |
189 | ||
190 | xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout); | |
5ee0ed7d | 191 | xprt->resvport = 1; /* NLM requires a reserved port */ |
1da177e4 LT |
192 | |
193 | /* Existing NLM servers accept AUTH_UNIX only */ | |
194 | clnt = rpc_create_client(xprt, host->h_name, &nlm_program, | |
195 | host->h_version, RPC_AUTH_UNIX); | |
5b616f5d | 196 | if (IS_ERR(clnt)) |
1da177e4 | 197 | goto forgetit; |
1da177e4 | 198 | clnt->cl_autobind = 1; /* turn on pmap queries */ |
1da177e4 LT |
199 | |
200 | host->h_rpcclnt = clnt; | |
201 | } | |
202 | ||
203 | up(&host->h_sema); | |
204 | return clnt; | |
205 | ||
206 | forgetit: | |
207 | printk("lockd: couldn't create RPC handle for %s\n", host->h_name); | |
208 | up(&host->h_sema); | |
209 | return NULL; | |
210 | } | |
211 | ||
212 | /* | |
213 | * Force a portmap lookup of the remote lockd port | |
214 | */ | |
215 | void | |
216 | nlm_rebind_host(struct nlm_host *host) | |
217 | { | |
218 | dprintk("lockd: rebind host %s\n", host->h_name); | |
219 | if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) { | |
35f5a422 | 220 | rpc_force_rebind(host->h_rpcclnt); |
1da177e4 LT |
221 | host->h_nextrebind = jiffies + NLM_HOST_REBIND; |
222 | } | |
223 | } | |
224 | ||
225 | /* | |
226 | * Increment NLM host count | |
227 | */ | |
228 | struct nlm_host * nlm_get_host(struct nlm_host *host) | |
229 | { | |
230 | if (host) { | |
231 | dprintk("lockd: get host %s\n", host->h_name); | |
232 | atomic_inc(&host->h_count); | |
233 | host->h_expires = jiffies + NLM_HOST_EXPIRE; | |
234 | } | |
235 | return host; | |
236 | } | |
237 | ||
238 | /* | |
239 | * Release NLM host after use | |
240 | */ | |
241 | void nlm_release_host(struct nlm_host *host) | |
242 | { | |
243 | if (host != NULL) { | |
244 | dprintk("lockd: release host %s\n", host->h_name); | |
245 | atomic_dec(&host->h_count); | |
246 | BUG_ON(atomic_read(&host->h_count) < 0); | |
247 | } | |
248 | } | |
249 | ||
250 | /* | |
251 | * Shut down the hosts module. | |
252 | * Note that this routine is called only at server shutdown time. | |
253 | */ | |
254 | void | |
255 | nlm_shutdown_hosts(void) | |
256 | { | |
257 | struct nlm_host *host; | |
258 | int i; | |
259 | ||
260 | dprintk("lockd: shutting down host module\n"); | |
261 | down(&nlm_host_sema); | |
262 | ||
263 | /* First, make all hosts eligible for gc */ | |
264 | dprintk("lockd: nuking all hosts...\n"); | |
265 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | |
266 | for (host = nlm_hosts[i]; host; host = host->h_next) | |
267 | host->h_expires = jiffies - 1; | |
268 | } | |
269 | ||
270 | /* Then, perform a garbage collection pass */ | |
271 | nlm_gc_hosts(); | |
272 | up(&nlm_host_sema); | |
273 | ||
274 | /* complain if any hosts are left */ | |
275 | if (nrhosts) { | |
276 | printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); | |
277 | dprintk("lockd: %d hosts left:\n", nrhosts); | |
278 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | |
279 | for (host = nlm_hosts[i]; host; host = host->h_next) { | |
280 | dprintk(" %s (cnt %d use %d exp %ld)\n", | |
281 | host->h_name, atomic_read(&host->h_count), | |
282 | host->h_inuse, host->h_expires); | |
283 | } | |
284 | } | |
285 | } | |
286 | } | |
287 | ||
288 | /* | |
289 | * Garbage collect any unused NLM hosts. | |
290 | * This GC combines reference counting for async operations with | |
291 | * mark & sweep for resources held by remote clients. | |
292 | */ | |
293 | static void | |
294 | nlm_gc_hosts(void) | |
295 | { | |
296 | struct nlm_host **q, *host; | |
297 | struct rpc_clnt *clnt; | |
298 | int i; | |
299 | ||
300 | dprintk("lockd: host garbage collection\n"); | |
301 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | |
302 | for (host = nlm_hosts[i]; host; host = host->h_next) | |
303 | host->h_inuse = 0; | |
304 | } | |
305 | ||
306 | /* Mark all hosts that hold locks, blocks or shares */ | |
307 | nlmsvc_mark_resources(); | |
308 | ||
309 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | |
310 | q = &nlm_hosts[i]; | |
311 | while ((host = *q) != NULL) { | |
312 | if (atomic_read(&host->h_count) || host->h_inuse | |
313 | || time_before(jiffies, host->h_expires)) { | |
314 | dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n", | |
315 | host->h_name, atomic_read(&host->h_count), | |
316 | host->h_inuse, host->h_expires); | |
317 | q = &host->h_next; | |
318 | continue; | |
319 | } | |
320 | dprintk("lockd: delete host %s\n", host->h_name); | |
321 | *q = host->h_next; | |
322 | /* Don't unmonitor hosts that have been invalidated */ | |
323 | if (host->h_monitored && !host->h_killed) | |
324 | nsm_unmonitor(host); | |
325 | if ((clnt = host->h_rpcclnt) != NULL) { | |
326 | if (atomic_read(&clnt->cl_users)) { | |
327 | printk(KERN_WARNING | |
328 | "lockd: active RPC handle\n"); | |
329 | clnt->cl_dead = 1; | |
330 | } else { | |
331 | rpc_destroy_client(host->h_rpcclnt); | |
332 | } | |
333 | } | |
334 | BUG_ON(!list_empty(&host->h_lockowners)); | |
335 | kfree(host); | |
336 | nrhosts--; | |
337 | } | |
338 | } | |
339 | ||
340 | next_gc = jiffies + NLM_HOST_COLLECT; | |
341 | } | |
342 |