Merge master.kernel.org:/home/rmk/linux-2.6-arm
[deliverable/linux.git] / fs / lockd / host.c
1 /*
2 * linux/fs/lockd/host.c
3 *
4 * Management for NLM peer hosts. The nlm_host struct is shared
5 * between client and server implementation. The only reason to
6 * do so is to reduce code bloat.
7 *
8 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
9 */
10
11 #include <linux/types.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/in.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/sunrpc/svc.h>
17 #include <linux/lockd/lockd.h>
18 #include <linux/lockd/sm_inter.h>
19
20
21 #define NLMDBG_FACILITY NLMDBG_HOSTCACHE
22 #define NLM_HOST_MAX 64
23 #define NLM_HOST_NRHASH 32
24 #define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1))
25 #define NLM_HOST_REBIND (60 * HZ)
26 #define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ)
27 #define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ)
28 #define NLM_HOST_ADDR(sv) (&(sv)->s_nlmclnt->cl_xprt->addr)
29
30 static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH];
31 static unsigned long next_gc;
32 static int nrhosts;
33 static DECLARE_MUTEX(nlm_host_sema);
34
35
36 static void nlm_gc_hosts(void);
37
38 /*
39 * Find an NLM server handle in the cache. If there is none, create it.
40 */
41 struct nlm_host *
42 nlmclnt_lookup_host(struct sockaddr_in *sin, int proto, int version)
43 {
44 return nlm_lookup_host(0, sin, proto, version);
45 }
46
47 /*
48 * Find an NLM client handle in the cache. If there is none, create it.
49 */
50 struct nlm_host *
51 nlmsvc_lookup_host(struct svc_rqst *rqstp)
52 {
53 return nlm_lookup_host(1, &rqstp->rq_addr,
54 rqstp->rq_prot, rqstp->rq_vers);
55 }
56
57 /*
58 * Common host lookup routine for server & client
59 */
60 struct nlm_host *
61 nlm_lookup_host(int server, struct sockaddr_in *sin,
62 int proto, int version)
63 {
64 struct nlm_host *host, **hp;
65 u32 addr;
66 int hash;
67
68 dprintk("lockd: nlm_lookup_host(%08x, p=%d, v=%d)\n",
69 (unsigned)(sin? ntohl(sin->sin_addr.s_addr) : 0), proto, version);
70
71 hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
72
73 /* Lock hash table */
74 down(&nlm_host_sema);
75
76 if (time_after_eq(jiffies, next_gc))
77 nlm_gc_hosts();
78
79 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
80 if (host->h_proto != proto)
81 continue;
82 if (host->h_version != version)
83 continue;
84 if (host->h_server != server)
85 continue;
86
87 if (nlm_cmp_addr(&host->h_addr, sin)) {
88 if (hp != nlm_hosts + hash) {
89 *hp = host->h_next;
90 host->h_next = nlm_hosts[hash];
91 nlm_hosts[hash] = host;
92 }
93 nlm_get_host(host);
94 up(&nlm_host_sema);
95 return host;
96 }
97 }
98
99 /* Ooops, no host found, create it */
100 dprintk("lockd: creating host entry\n");
101
102 if (!(host = (struct nlm_host *) kmalloc(sizeof(*host), GFP_KERNEL)))
103 goto nohost;
104 memset(host, 0, sizeof(*host));
105
106 addr = sin->sin_addr.s_addr;
107 sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr));
108
109 host->h_addr = *sin;
110 host->h_addr.sin_port = 0; /* ouch! */
111 host->h_version = version;
112 host->h_proto = proto;
113 host->h_rpcclnt = NULL;
114 init_MUTEX(&host->h_sema);
115 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
116 host->h_expires = jiffies + NLM_HOST_EXPIRE;
117 atomic_set(&host->h_count, 1);
118 init_waitqueue_head(&host->h_gracewait);
119 host->h_state = 0; /* pseudo NSM state */
120 host->h_nsmstate = 0; /* real NSM state */
121 host->h_server = server;
122 host->h_next = nlm_hosts[hash];
123 nlm_hosts[hash] = host;
124 INIT_LIST_HEAD(&host->h_lockowners);
125 spin_lock_init(&host->h_lock);
126
127 if (++nrhosts > NLM_HOST_MAX)
128 next_gc = 0;
129
130 nohost:
131 up(&nlm_host_sema);
132 return host;
133 }
134
135 struct nlm_host *
136 nlm_find_client(void)
137 {
138 /* find a nlm_host for a client for which h_killed == 0.
139 * and return it
140 */
141 int hash;
142 down(&nlm_host_sema);
143 for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) {
144 struct nlm_host *host, **hp;
145 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
146 if (host->h_server &&
147 host->h_killed == 0) {
148 nlm_get_host(host);
149 up(&nlm_host_sema);
150 return host;
151 }
152 }
153 }
154 up(&nlm_host_sema);
155 return NULL;
156 }
157
158
159 /*
160 * Create the NLM RPC client for an NLM peer
161 */
162 struct rpc_clnt *
163 nlm_bind_host(struct nlm_host *host)
164 {
165 struct rpc_clnt *clnt;
166 struct rpc_xprt *xprt;
167
168 dprintk("lockd: nlm_bind_host(%08x)\n",
169 (unsigned)ntohl(host->h_addr.sin_addr.s_addr));
170
171 /* Lock host handle */
172 down(&host->h_sema);
173
174 /* If we've already created an RPC client, check whether
175 * RPC rebind is required
176 * Note: why keep rebinding if we're on a tcp connection?
177 */
178 if ((clnt = host->h_rpcclnt) != NULL) {
179 xprt = clnt->cl_xprt;
180 if (!xprt->stream && time_after_eq(jiffies, host->h_nextrebind)) {
181 clnt->cl_port = 0;
182 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
183 dprintk("lockd: next rebind in %ld jiffies\n",
184 host->h_nextrebind - jiffies);
185 }
186 } else {
187 xprt = xprt_create_proto(host->h_proto, &host->h_addr, NULL);
188 if (IS_ERR(xprt))
189 goto forgetit;
190
191 xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout);
192 xprt->nocong = 1; /* No congestion control for NLM */
193 xprt->resvport = 1; /* NLM requires a reserved port */
194
195 /* Existing NLM servers accept AUTH_UNIX only */
196 clnt = rpc_create_client(xprt, host->h_name, &nlm_program,
197 host->h_version, RPC_AUTH_UNIX);
198 if (IS_ERR(clnt))
199 goto forgetit;
200 clnt->cl_autobind = 1; /* turn on pmap queries */
201
202 host->h_rpcclnt = clnt;
203 }
204
205 up(&host->h_sema);
206 return clnt;
207
208 forgetit:
209 printk("lockd: couldn't create RPC handle for %s\n", host->h_name);
210 up(&host->h_sema);
211 return NULL;
212 }
213
214 /*
215 * Force a portmap lookup of the remote lockd port
216 */
217 void
218 nlm_rebind_host(struct nlm_host *host)
219 {
220 dprintk("lockd: rebind host %s\n", host->h_name);
221 if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
222 host->h_rpcclnt->cl_port = 0;
223 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
224 }
225 }
226
227 /*
228 * Increment NLM host count
229 */
230 struct nlm_host * nlm_get_host(struct nlm_host *host)
231 {
232 if (host) {
233 dprintk("lockd: get host %s\n", host->h_name);
234 atomic_inc(&host->h_count);
235 host->h_expires = jiffies + NLM_HOST_EXPIRE;
236 }
237 return host;
238 }
239
240 /*
241 * Release NLM host after use
242 */
243 void nlm_release_host(struct nlm_host *host)
244 {
245 if (host != NULL) {
246 dprintk("lockd: release host %s\n", host->h_name);
247 atomic_dec(&host->h_count);
248 BUG_ON(atomic_read(&host->h_count) < 0);
249 }
250 }
251
252 /*
253 * Shut down the hosts module.
254 * Note that this routine is called only at server shutdown time.
255 */
256 void
257 nlm_shutdown_hosts(void)
258 {
259 struct nlm_host *host;
260 int i;
261
262 dprintk("lockd: shutting down host module\n");
263 down(&nlm_host_sema);
264
265 /* First, make all hosts eligible for gc */
266 dprintk("lockd: nuking all hosts...\n");
267 for (i = 0; i < NLM_HOST_NRHASH; i++) {
268 for (host = nlm_hosts[i]; host; host = host->h_next)
269 host->h_expires = jiffies - 1;
270 }
271
272 /* Then, perform a garbage collection pass */
273 nlm_gc_hosts();
274 up(&nlm_host_sema);
275
276 /* complain if any hosts are left */
277 if (nrhosts) {
278 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
279 dprintk("lockd: %d hosts left:\n", nrhosts);
280 for (i = 0; i < NLM_HOST_NRHASH; i++) {
281 for (host = nlm_hosts[i]; host; host = host->h_next) {
282 dprintk(" %s (cnt %d use %d exp %ld)\n",
283 host->h_name, atomic_read(&host->h_count),
284 host->h_inuse, host->h_expires);
285 }
286 }
287 }
288 }
289
290 /*
291 * Garbage collect any unused NLM hosts.
292 * This GC combines reference counting for async operations with
293 * mark & sweep for resources held by remote clients.
294 */
295 static void
296 nlm_gc_hosts(void)
297 {
298 struct nlm_host **q, *host;
299 struct rpc_clnt *clnt;
300 int i;
301
302 dprintk("lockd: host garbage collection\n");
303 for (i = 0; i < NLM_HOST_NRHASH; i++) {
304 for (host = nlm_hosts[i]; host; host = host->h_next)
305 host->h_inuse = 0;
306 }
307
308 /* Mark all hosts that hold locks, blocks or shares */
309 nlmsvc_mark_resources();
310
311 for (i = 0; i < NLM_HOST_NRHASH; i++) {
312 q = &nlm_hosts[i];
313 while ((host = *q) != NULL) {
314 if (atomic_read(&host->h_count) || host->h_inuse
315 || time_before(jiffies, host->h_expires)) {
316 dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
317 host->h_name, atomic_read(&host->h_count),
318 host->h_inuse, host->h_expires);
319 q = &host->h_next;
320 continue;
321 }
322 dprintk("lockd: delete host %s\n", host->h_name);
323 *q = host->h_next;
324 /* Don't unmonitor hosts that have been invalidated */
325 if (host->h_monitored && !host->h_killed)
326 nsm_unmonitor(host);
327 if ((clnt = host->h_rpcclnt) != NULL) {
328 if (atomic_read(&clnt->cl_users)) {
329 printk(KERN_WARNING
330 "lockd: active RPC handle\n");
331 clnt->cl_dead = 1;
332 } else {
333 rpc_destroy_client(host->h_rpcclnt);
334 }
335 }
336 BUG_ON(!list_empty(&host->h_lockowners));
337 kfree(host);
338 nrhosts--;
339 }
340 }
341
342 next_gc = jiffies + NLM_HOST_COLLECT;
343 }
344
This page took 0.039922 seconds and 5 git commands to generate.