lockd: Make lockd use rpc_new_client() instead of rpc_create_client
[deliverable/linux.git] / fs / lockd / host.c
1 /*
2 * linux/fs/lockd/host.c
3 *
4 * Management for NLM peer hosts. The nlm_host struct is shared
5 * between client and server implementation. The only reason to
6 * do so is to reduce code bloat.
7 *
8 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
9 */
10
11 #include <linux/types.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/in.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/sunrpc/svc.h>
17 #include <linux/lockd/lockd.h>
18 #include <linux/lockd/sm_inter.h>
19
20
21 #define NLMDBG_FACILITY NLMDBG_HOSTCACHE
22 #define NLM_HOST_MAX 64
23 #define NLM_HOST_NRHASH 32
24 #define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1))
25 #define NLM_HOST_REBIND (60 * HZ)
26 #define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ)
27 #define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ)
28 #define NLM_HOST_ADDR(sv) (&(sv)->s_nlmclnt->cl_xprt->addr)
29
30 static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH];
31 static unsigned long next_gc;
32 static int nrhosts;
33 static DECLARE_MUTEX(nlm_host_sema);
34
35
36 static void nlm_gc_hosts(void);
37
38 /*
39 * Find an NLM server handle in the cache. If there is none, create it.
40 */
41 struct nlm_host *
42 nlmclnt_lookup_host(struct sockaddr_in *sin, int proto, int version)
43 {
44 return nlm_lookup_host(0, sin, proto, version);
45 }
46
47 /*
48 * Find an NLM client handle in the cache. If there is none, create it.
49 */
50 struct nlm_host *
51 nlmsvc_lookup_host(struct svc_rqst *rqstp)
52 {
53 return nlm_lookup_host(1, &rqstp->rq_addr,
54 rqstp->rq_prot, rqstp->rq_vers);
55 }
56
57 /*
58 * Common host lookup routine for server & client
59 */
60 struct nlm_host *
61 nlm_lookup_host(int server, struct sockaddr_in *sin,
62 int proto, int version)
63 {
64 struct nlm_host *host, **hp;
65 u32 addr;
66 int hash;
67
68 dprintk("lockd: nlm_lookup_host(%08x, p=%d, v=%d)\n",
69 (unsigned)(sin? ntohl(sin->sin_addr.s_addr) : 0), proto, version);
70
71 hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
72
73 /* Lock hash table */
74 down(&nlm_host_sema);
75
76 if (time_after_eq(jiffies, next_gc))
77 nlm_gc_hosts();
78
79 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
80 if (host->h_proto != proto)
81 continue;
82 if (host->h_version != version)
83 continue;
84 if (host->h_server != server)
85 continue;
86
87 if (nlm_cmp_addr(&host->h_addr, sin)) {
88 if (hp != nlm_hosts + hash) {
89 *hp = host->h_next;
90 host->h_next = nlm_hosts[hash];
91 nlm_hosts[hash] = host;
92 }
93 nlm_get_host(host);
94 up(&nlm_host_sema);
95 return host;
96 }
97 }
98
99 /* Ooops, no host found, create it */
100 dprintk("lockd: creating host entry\n");
101
102 if (!(host = (struct nlm_host *) kmalloc(sizeof(*host), GFP_KERNEL)))
103 goto nohost;
104 memset(host, 0, sizeof(*host));
105
106 addr = sin->sin_addr.s_addr;
107 sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr));
108
109 host->h_addr = *sin;
110 host->h_addr.sin_port = 0; /* ouch! */
111 host->h_version = version;
112 host->h_proto = proto;
113 host->h_rpcclnt = NULL;
114 init_MUTEX(&host->h_sema);
115 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
116 host->h_expires = jiffies + NLM_HOST_EXPIRE;
117 atomic_set(&host->h_count, 1);
118 init_waitqueue_head(&host->h_gracewait);
119 host->h_state = 0; /* pseudo NSM state */
120 host->h_nsmstate = 0; /* real NSM state */
121 host->h_server = server;
122 host->h_next = nlm_hosts[hash];
123 nlm_hosts[hash] = host;
124 INIT_LIST_HEAD(&host->h_lockowners);
125 spin_lock_init(&host->h_lock);
126
127 if (++nrhosts > NLM_HOST_MAX)
128 next_gc = 0;
129
130 nohost:
131 up(&nlm_host_sema);
132 return host;
133 }
134
135 struct nlm_host *
136 nlm_find_client(void)
137 {
138 /* find a nlm_host for a client for which h_killed == 0.
139 * and return it
140 */
141 int hash;
142 down(&nlm_host_sema);
143 for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) {
144 struct nlm_host *host, **hp;
145 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
146 if (host->h_server &&
147 host->h_killed == 0) {
148 nlm_get_host(host);
149 up(&nlm_host_sema);
150 return host;
151 }
152 }
153 }
154 up(&nlm_host_sema);
155 return NULL;
156 }
157
158
159 /*
160 * Create the NLM RPC client for an NLM peer
161 */
162 struct rpc_clnt *
163 nlm_bind_host(struct nlm_host *host)
164 {
165 struct rpc_clnt *clnt;
166 struct rpc_xprt *xprt;
167
168 dprintk("lockd: nlm_bind_host(%08x)\n",
169 (unsigned)ntohl(host->h_addr.sin_addr.s_addr));
170
171 /* Lock host handle */
172 down(&host->h_sema);
173
174 /* If we've already created an RPC client, check whether
175 * RPC rebind is required
176 */
177 if ((clnt = host->h_rpcclnt) != NULL) {
178 xprt = clnt->cl_xprt;
179 if (time_after_eq(jiffies, host->h_nextrebind)) {
180 rpc_force_rebind(clnt);
181 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
182 dprintk("lockd: next rebind in %ld jiffies\n",
183 host->h_nextrebind - jiffies);
184 }
185 } else {
186 xprt = xprt_create_proto(host->h_proto, &host->h_addr, NULL);
187 if (IS_ERR(xprt))
188 goto forgetit;
189
190 xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout);
191 xprt->resvport = 1; /* NLM requires a reserved port */
192
193 /* Existing NLM servers accept AUTH_UNIX only */
194 clnt = rpc_new_client(xprt, host->h_name, &nlm_program,
195 host->h_version, RPC_AUTH_UNIX);
196 if (IS_ERR(clnt))
197 goto forgetit;
198 clnt->cl_autobind = 1; /* turn on pmap queries */
199 clnt->cl_softrtry = 1; /* All queries are soft */
200
201 host->h_rpcclnt = clnt;
202 }
203
204 up(&host->h_sema);
205 return clnt;
206
207 forgetit:
208 printk("lockd: couldn't create RPC handle for %s\n", host->h_name);
209 up(&host->h_sema);
210 return NULL;
211 }
212
213 /*
214 * Force a portmap lookup of the remote lockd port
215 */
216 void
217 nlm_rebind_host(struct nlm_host *host)
218 {
219 dprintk("lockd: rebind host %s\n", host->h_name);
220 if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
221 rpc_force_rebind(host->h_rpcclnt);
222 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
223 }
224 }
225
226 /*
227 * Increment NLM host count
228 */
229 struct nlm_host * nlm_get_host(struct nlm_host *host)
230 {
231 if (host) {
232 dprintk("lockd: get host %s\n", host->h_name);
233 atomic_inc(&host->h_count);
234 host->h_expires = jiffies + NLM_HOST_EXPIRE;
235 }
236 return host;
237 }
238
239 /*
240 * Release NLM host after use
241 */
242 void nlm_release_host(struct nlm_host *host)
243 {
244 if (host != NULL) {
245 dprintk("lockd: release host %s\n", host->h_name);
246 atomic_dec(&host->h_count);
247 BUG_ON(atomic_read(&host->h_count) < 0);
248 }
249 }
250
251 /*
252 * Shut down the hosts module.
253 * Note that this routine is called only at server shutdown time.
254 */
255 void
256 nlm_shutdown_hosts(void)
257 {
258 struct nlm_host *host;
259 int i;
260
261 dprintk("lockd: shutting down host module\n");
262 down(&nlm_host_sema);
263
264 /* First, make all hosts eligible for gc */
265 dprintk("lockd: nuking all hosts...\n");
266 for (i = 0; i < NLM_HOST_NRHASH; i++) {
267 for (host = nlm_hosts[i]; host; host = host->h_next)
268 host->h_expires = jiffies - 1;
269 }
270
271 /* Then, perform a garbage collection pass */
272 nlm_gc_hosts();
273 up(&nlm_host_sema);
274
275 /* complain if any hosts are left */
276 if (nrhosts) {
277 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
278 dprintk("lockd: %d hosts left:\n", nrhosts);
279 for (i = 0; i < NLM_HOST_NRHASH; i++) {
280 for (host = nlm_hosts[i]; host; host = host->h_next) {
281 dprintk(" %s (cnt %d use %d exp %ld)\n",
282 host->h_name, atomic_read(&host->h_count),
283 host->h_inuse, host->h_expires);
284 }
285 }
286 }
287 }
288
289 /*
290 * Garbage collect any unused NLM hosts.
291 * This GC combines reference counting for async operations with
292 * mark & sweep for resources held by remote clients.
293 */
294 static void
295 nlm_gc_hosts(void)
296 {
297 struct nlm_host **q, *host;
298 struct rpc_clnt *clnt;
299 int i;
300
301 dprintk("lockd: host garbage collection\n");
302 for (i = 0; i < NLM_HOST_NRHASH; i++) {
303 for (host = nlm_hosts[i]; host; host = host->h_next)
304 host->h_inuse = 0;
305 }
306
307 /* Mark all hosts that hold locks, blocks or shares */
308 nlmsvc_mark_resources();
309
310 for (i = 0; i < NLM_HOST_NRHASH; i++) {
311 q = &nlm_hosts[i];
312 while ((host = *q) != NULL) {
313 if (atomic_read(&host->h_count) || host->h_inuse
314 || time_before(jiffies, host->h_expires)) {
315 dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
316 host->h_name, atomic_read(&host->h_count),
317 host->h_inuse, host->h_expires);
318 q = &host->h_next;
319 continue;
320 }
321 dprintk("lockd: delete host %s\n", host->h_name);
322 *q = host->h_next;
323 /* Don't unmonitor hosts that have been invalidated */
324 if (host->h_monitored && !host->h_killed)
325 nsm_unmonitor(host);
326 if ((clnt = host->h_rpcclnt) != NULL) {
327 if (atomic_read(&clnt->cl_users)) {
328 printk(KERN_WARNING
329 "lockd: active RPC handle\n");
330 clnt->cl_dead = 1;
331 } else {
332 rpc_destroy_client(host->h_rpcclnt);
333 }
334 }
335 BUG_ON(!list_empty(&host->h_lockowners));
336 kfree(host);
337 nrhosts--;
338 }
339 }
340
341 next_gc = jiffies + NLM_HOST_COLLECT;
342 }
343
This page took 0.049746 seconds and 5 git commands to generate.