Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/lockd/clntlock.c | |
3 | * | |
4 | * Lock handling for the client side NLM implementation | |
5 | * | |
6 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/types.h> | |
11 | #include <linux/time.h> | |
12 | #include <linux/nfs_fs.h> | |
13 | #include <linux/sunrpc/clnt.h> | |
14 | #include <linux/sunrpc/svc.h> | |
15 | #include <linux/lockd/lockd.h> | |
16 | #include <linux/smp_lock.h> | |
17 | ||
18 | #define NLMDBG_FACILITY NLMDBG_CLIENT | |
19 | ||
20 | /* | |
21 | * Local function prototypes | |
22 | */ | |
23 | static int reclaimer(void *ptr); | |
24 | ||
25 | /* | |
26 | * The following functions handle blocking and granting from the | |
27 | * client perspective. | |
28 | */ | |
29 | ||
30 | /* | |
31 | * This is the representation of a blocked client lock. | |
32 | */ | |
33 | struct nlm_wait { | |
4f15e2b1 | 34 | struct list_head b_list; /* linked list */ |
1da177e4 LT |
35 | wait_queue_head_t b_wait; /* where to wait on */ |
36 | struct nlm_host * b_host; | |
37 | struct file_lock * b_lock; /* local file lock */ | |
38 | unsigned short b_reclaim; /* got to reclaim lock */ | |
39 | u32 b_status; /* grant callback status */ | |
40 | }; | |
41 | ||
4f15e2b1 | 42 | static LIST_HEAD(nlm_blocked); |
1da177e4 LT |
43 | |
44 | /* | |
ecdbf769 | 45 | * Queue up a lock for blocking so that the GRANTED request can see it |
1da177e4 | 46 | */ |
ecdbf769 TM |
47 | int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl) |
48 | { | |
49 | struct nlm_wait *block; | |
50 | ||
51 | BUG_ON(req->a_block != NULL); | |
52 | block = kmalloc(sizeof(*block), GFP_KERNEL); | |
53 | if (block == NULL) | |
54 | return -ENOMEM; | |
55 | block->b_host = host; | |
56 | block->b_lock = fl; | |
57 | init_waitqueue_head(&block->b_wait); | |
58 | block->b_status = NLM_LCK_BLOCKED; | |
59 | ||
60 | list_add(&block->b_list, &nlm_blocked); | |
61 | req->a_block = block; | |
62 | ||
63 | return 0; | |
64 | } | |
65 | ||
66 | void nlmclnt_finish_block(struct nlm_rqst *req) | |
1da177e4 | 67 | { |
ecdbf769 TM |
68 | struct nlm_wait *block = req->a_block; |
69 | ||
70 | if (block == NULL) | |
71 | return; | |
72 | req->a_block = NULL; | |
73 | list_del(&block->b_list); | |
74 | kfree(block); | |
75 | } | |
1da177e4 | 76 | |
ecdbf769 TM |
77 | /* |
78 | * Block on a lock | |
79 | */ | |
80 | long nlmclnt_block(struct nlm_rqst *req, long timeout) | |
81 | { | |
82 | struct nlm_wait *block = req->a_block; | |
83 | long ret; | |
1da177e4 | 84 | |
ecdbf769 TM |
85 | /* A borken server might ask us to block even if we didn't |
86 | * request it. Just say no! | |
87 | */ | |
88 | if (!req->a_args.block) | |
89 | return -EAGAIN; | |
1da177e4 LT |
90 | |
91 | /* Go to sleep waiting for GRANT callback. Some servers seem | |
92 | * to lose callbacks, however, so we're going to poll from | |
93 | * time to time just to make sure. | |
94 | * | |
95 | * For now, the retry frequency is pretty high; normally | |
96 | * a 1 minute timeout would do. See the comment before | |
97 | * nlmclnt_lock for an explanation. | |
98 | */ | |
ecdbf769 TM |
99 | ret = wait_event_interruptible_timeout(block->b_wait, |
100 | block->b_status != NLM_LCK_BLOCKED, | |
101 | timeout); | |
1da177e4 | 102 | |
ecdbf769 TM |
103 | if (block->b_status != NLM_LCK_BLOCKED) { |
104 | req->a_res.status = block->b_status; | |
105 | block->b_status = NLM_LCK_BLOCKED; | |
1da177e4 LT |
106 | } |
107 | ||
ecdbf769 | 108 | return ret; |
1da177e4 LT |
109 | } |
110 | ||
111 | /* | |
112 | * The server lockd has called us back to tell us the lock was granted | |
113 | */ | |
5ac5f9d1 | 114 | u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock) |
1da177e4 | 115 | { |
5ac5f9d1 TM |
116 | const struct file_lock *fl = &lock->fl; |
117 | const struct nfs_fh *fh = &lock->fh; | |
1da177e4 | 118 | struct nlm_wait *block; |
ecdbf769 | 119 | u32 res = nlm_lck_denied; |
1da177e4 LT |
120 | |
121 | /* | |
122 | * Look up blocked request based on arguments. | |
123 | * Warning: must not use cookie to match it! | |
124 | */ | |
4f15e2b1 | 125 | list_for_each_entry(block, &nlm_blocked, b_list) { |
5ac5f9d1 TM |
126 | struct file_lock *fl_blocked = block->b_lock; |
127 | ||
128 | if (!nlm_compare_locks(fl_blocked, fl)) | |
129 | continue; | |
130 | if (!nlm_cmp_addr(&block->b_host->h_addr, addr)) | |
131 | continue; | |
132 | if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_dentry->d_inode) ,fh) != 0) | |
133 | continue; | |
134 | /* Alright, we found a lock. Set the return status | |
135 | * and wake up the caller | |
136 | */ | |
137 | block->b_status = NLM_LCK_GRANTED; | |
138 | wake_up(&block->b_wait); | |
139 | res = nlm_granted; | |
1da177e4 | 140 | } |
ecdbf769 | 141 | return res; |
1da177e4 LT |
142 | } |
143 | ||
144 | /* | |
145 | * The following procedures deal with the recovery of locks after a | |
146 | * server crash. | |
147 | */ | |
148 | ||
149 | /* | |
150 | * Mark the locks for reclaiming. | |
151 | * FIXME: In 2.5 we don't want to iterate through any global file_lock_list. | |
152 | * Maintain NLM lock reclaiming lists in the nlm_host instead. | |
153 | */ | |
154 | static | |
155 | void nlmclnt_mark_reclaim(struct nlm_host *host) | |
156 | { | |
157 | struct file_lock *fl; | |
158 | struct inode *inode; | |
159 | struct list_head *tmp; | |
160 | ||
161 | list_for_each(tmp, &file_lock_list) { | |
162 | fl = list_entry(tmp, struct file_lock, fl_link); | |
163 | ||
164 | inode = fl->fl_file->f_dentry->d_inode; | |
165 | if (inode->i_sb->s_magic != NFS_SUPER_MAGIC) | |
166 | continue; | |
9b5b1f5b TM |
167 | if (fl->fl_u.nfs_fl.owner == NULL) |
168 | continue; | |
1da177e4 LT |
169 | if (fl->fl_u.nfs_fl.owner->host != host) |
170 | continue; | |
171 | if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_GRANTED)) | |
172 | continue; | |
173 | fl->fl_u.nfs_fl.flags |= NFS_LCK_RECLAIM; | |
174 | } | |
175 | } | |
176 | ||
177 | /* | |
178 | * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number, | |
179 | * that we mark locks for reclaiming, and that we bump the pseudo NSM state. | |
180 | */ | |
181 | static inline | |
182 | void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate) | |
183 | { | |
184 | host->h_monitored = 0; | |
185 | host->h_nsmstate = newstate; | |
186 | host->h_state++; | |
187 | host->h_nextrebind = 0; | |
188 | nlm_rebind_host(host); | |
189 | nlmclnt_mark_reclaim(host); | |
190 | dprintk("NLM: reclaiming locks for host %s", host->h_name); | |
191 | } | |
192 | ||
193 | /* | |
194 | * Reclaim all locks on server host. We do this by spawning a separate | |
195 | * reclaimer thread. | |
196 | */ | |
197 | void | |
198 | nlmclnt_recovery(struct nlm_host *host, u32 newstate) | |
199 | { | |
200 | if (host->h_reclaiming++) { | |
201 | if (host->h_nsmstate == newstate) | |
202 | return; | |
203 | nlmclnt_prepare_reclaim(host, newstate); | |
204 | } else { | |
205 | nlmclnt_prepare_reclaim(host, newstate); | |
206 | nlm_get_host(host); | |
207 | __module_get(THIS_MODULE); | |
208 | if (kernel_thread(reclaimer, host, CLONE_KERNEL) < 0) | |
209 | module_put(THIS_MODULE); | |
210 | } | |
211 | } | |
212 | ||
213 | static int | |
214 | reclaimer(void *ptr) | |
215 | { | |
216 | struct nlm_host *host = (struct nlm_host *) ptr; | |
217 | struct nlm_wait *block; | |
218 | struct list_head *tmp; | |
219 | struct file_lock *fl; | |
220 | struct inode *inode; | |
221 | ||
222 | daemonize("%s-reclaim", host->h_name); | |
223 | allow_signal(SIGKILL); | |
224 | ||
225 | /* This one ensures that our parent doesn't terminate while the | |
226 | * reclaim is in progress */ | |
227 | lock_kernel(); | |
228 | lockd_up(); | |
229 | ||
230 | /* First, reclaim all locks that have been marked. */ | |
231 | restart: | |
232 | list_for_each(tmp, &file_lock_list) { | |
233 | fl = list_entry(tmp, struct file_lock, fl_link); | |
234 | ||
235 | inode = fl->fl_file->f_dentry->d_inode; | |
236 | if (inode->i_sb->s_magic != NFS_SUPER_MAGIC) | |
237 | continue; | |
9b5b1f5b TM |
238 | if (fl->fl_u.nfs_fl.owner == NULL) |
239 | continue; | |
1da177e4 LT |
240 | if (fl->fl_u.nfs_fl.owner->host != host) |
241 | continue; | |
242 | if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_RECLAIM)) | |
243 | continue; | |
244 | ||
245 | fl->fl_u.nfs_fl.flags &= ~NFS_LCK_RECLAIM; | |
246 | nlmclnt_reclaim(host, fl); | |
247 | if (signalled()) | |
248 | break; | |
249 | goto restart; | |
250 | } | |
251 | ||
252 | host->h_reclaiming = 0; | |
253 | ||
254 | /* Now, wake up all processes that sleep on a blocked lock */ | |
4f15e2b1 | 255 | list_for_each_entry(block, &nlm_blocked, b_list) { |
1da177e4 LT |
256 | if (block->b_host == host) { |
257 | block->b_status = NLM_LCK_DENIED_GRACE_PERIOD; | |
258 | wake_up(&block->b_wait); | |
259 | } | |
260 | } | |
261 | ||
262 | /* Release host handle after use */ | |
263 | nlm_release_host(host); | |
264 | lockd_down(); | |
265 | unlock_kernel(); | |
266 | module_put_and_exit(0); | |
267 | } |