Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/lockd/clntlock.c | |
3 | * | |
4 | * Lock handling for the client side NLM implementation | |
5 | * | |
6 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/types.h> | |
11 | #include <linux/time.h> | |
12 | #include <linux/nfs_fs.h> | |
13 | #include <linux/sunrpc/clnt.h> | |
14 | #include <linux/sunrpc/svc.h> | |
15 | #include <linux/lockd/lockd.h> | |
16 | #include <linux/smp_lock.h> | |
17 | ||
18 | #define NLMDBG_FACILITY NLMDBG_CLIENT | |
19 | ||
20 | /* | |
21 | * Local function prototypes | |
22 | */ | |
23 | static int reclaimer(void *ptr); | |
24 | ||
25 | /* | |
26 | * The following functions handle blocking and granting from the | |
27 | * client perspective. | |
28 | */ | |
29 | ||
30 | /* | |
31 | * This is the representation of a blocked client lock. | |
32 | */ | |
33 | struct nlm_wait { | |
4f15e2b1 | 34 | struct list_head b_list; /* linked list */ |
1da177e4 LT |
35 | wait_queue_head_t b_wait; /* where to wait on */ |
36 | struct nlm_host * b_host; | |
37 | struct file_lock * b_lock; /* local file lock */ | |
38 | unsigned short b_reclaim; /* got to reclaim lock */ | |
39 | u32 b_status; /* grant callback status */ | |
40 | }; | |
41 | ||
4f15e2b1 | 42 | static LIST_HEAD(nlm_blocked); |
1da177e4 LT |
43 | |
44 | /* | |
45 | * Block on a lock | |
46 | */ | |
47 | int | |
48 | nlmclnt_block(struct nlm_host *host, struct file_lock *fl, u32 *statp) | |
49 | { | |
50 | struct nlm_wait block, **head; | |
51 | int err; | |
52 | u32 pstate; | |
53 | ||
54 | block.b_host = host; | |
55 | block.b_lock = fl; | |
56 | init_waitqueue_head(&block.b_wait); | |
57 | block.b_status = NLM_LCK_BLOCKED; | |
4f15e2b1 | 58 | list_add(&block.b_list, &nlm_blocked); |
1da177e4 LT |
59 | |
60 | /* Remember pseudo nsm state */ | |
61 | pstate = host->h_state; | |
62 | ||
63 | /* Go to sleep waiting for GRANT callback. Some servers seem | |
64 | * to lose callbacks, however, so we're going to poll from | |
65 | * time to time just to make sure. | |
66 | * | |
67 | * For now, the retry frequency is pretty high; normally | |
68 | * a 1 minute timeout would do. See the comment before | |
69 | * nlmclnt_lock for an explanation. | |
70 | */ | |
71 | sleep_on_timeout(&block.b_wait, 30*HZ); | |
72 | ||
4f15e2b1 | 73 | list_del(&block.b_list); |
1da177e4 LT |
74 | |
75 | if (!signalled()) { | |
76 | *statp = block.b_status; | |
77 | return 0; | |
78 | } | |
79 | ||
80 | /* Okay, we were interrupted. Cancel the pending request | |
81 | * unless the server has rebooted. | |
82 | */ | |
83 | if (pstate == host->h_state && (err = nlmclnt_cancel(host, fl)) < 0) | |
84 | printk(KERN_NOTICE | |
85 | "lockd: CANCEL call failed (errno %d)\n", -err); | |
86 | ||
87 | return -ERESTARTSYS; | |
88 | } | |
89 | ||
90 | /* | |
91 | * The server lockd has called us back to tell us the lock was granted | |
92 | */ | |
93 | u32 | |
94 | nlmclnt_grant(struct nlm_lock *lock) | |
95 | { | |
96 | struct nlm_wait *block; | |
97 | ||
98 | /* | |
99 | * Look up blocked request based on arguments. | |
100 | * Warning: must not use cookie to match it! | |
101 | */ | |
4f15e2b1 | 102 | list_for_each_entry(block, &nlm_blocked, b_list) { |
1da177e4 LT |
103 | if (nlm_compare_locks(block->b_lock, &lock->fl)) |
104 | break; | |
105 | } | |
106 | ||
107 | /* Ooops, no blocked request found. */ | |
108 | if (block == NULL) | |
109 | return nlm_lck_denied; | |
110 | ||
111 | /* Alright, we found the lock. Set the return status and | |
112 | * wake up the caller. | |
113 | */ | |
114 | block->b_status = NLM_LCK_GRANTED; | |
115 | wake_up(&block->b_wait); | |
116 | ||
117 | return nlm_granted; | |
118 | } | |
119 | ||
120 | /* | |
121 | * The following procedures deal with the recovery of locks after a | |
122 | * server crash. | |
123 | */ | |
124 | ||
125 | /* | |
126 | * Mark the locks for reclaiming. | |
127 | * FIXME: In 2.5 we don't want to iterate through any global file_lock_list. | |
128 | * Maintain NLM lock reclaiming lists in the nlm_host instead. | |
129 | */ | |
130 | static | |
131 | void nlmclnt_mark_reclaim(struct nlm_host *host) | |
132 | { | |
133 | struct file_lock *fl; | |
134 | struct inode *inode; | |
135 | struct list_head *tmp; | |
136 | ||
137 | list_for_each(tmp, &file_lock_list) { | |
138 | fl = list_entry(tmp, struct file_lock, fl_link); | |
139 | ||
140 | inode = fl->fl_file->f_dentry->d_inode; | |
141 | if (inode->i_sb->s_magic != NFS_SUPER_MAGIC) | |
142 | continue; | |
143 | if (fl->fl_u.nfs_fl.owner->host != host) | |
144 | continue; | |
145 | if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_GRANTED)) | |
146 | continue; | |
147 | fl->fl_u.nfs_fl.flags |= NFS_LCK_RECLAIM; | |
148 | } | |
149 | } | |
150 | ||
151 | /* | |
152 | * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number, | |
153 | * that we mark locks for reclaiming, and that we bump the pseudo NSM state. | |
154 | */ | |
155 | static inline | |
156 | void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate) | |
157 | { | |
158 | host->h_monitored = 0; | |
159 | host->h_nsmstate = newstate; | |
160 | host->h_state++; | |
161 | host->h_nextrebind = 0; | |
162 | nlm_rebind_host(host); | |
163 | nlmclnt_mark_reclaim(host); | |
164 | dprintk("NLM: reclaiming locks for host %s", host->h_name); | |
165 | } | |
166 | ||
167 | /* | |
168 | * Reclaim all locks on server host. We do this by spawning a separate | |
169 | * reclaimer thread. | |
170 | */ | |
171 | void | |
172 | nlmclnt_recovery(struct nlm_host *host, u32 newstate) | |
173 | { | |
174 | if (host->h_reclaiming++) { | |
175 | if (host->h_nsmstate == newstate) | |
176 | return; | |
177 | nlmclnt_prepare_reclaim(host, newstate); | |
178 | } else { | |
179 | nlmclnt_prepare_reclaim(host, newstate); | |
180 | nlm_get_host(host); | |
181 | __module_get(THIS_MODULE); | |
182 | if (kernel_thread(reclaimer, host, CLONE_KERNEL) < 0) | |
183 | module_put(THIS_MODULE); | |
184 | } | |
185 | } | |
186 | ||
187 | static int | |
188 | reclaimer(void *ptr) | |
189 | { | |
190 | struct nlm_host *host = (struct nlm_host *) ptr; | |
191 | struct nlm_wait *block; | |
192 | struct list_head *tmp; | |
193 | struct file_lock *fl; | |
194 | struct inode *inode; | |
195 | ||
196 | daemonize("%s-reclaim", host->h_name); | |
197 | allow_signal(SIGKILL); | |
198 | ||
199 | /* This one ensures that our parent doesn't terminate while the | |
200 | * reclaim is in progress */ | |
201 | lock_kernel(); | |
202 | lockd_up(); | |
203 | ||
204 | /* First, reclaim all locks that have been marked. */ | |
205 | restart: | |
206 | list_for_each(tmp, &file_lock_list) { | |
207 | fl = list_entry(tmp, struct file_lock, fl_link); | |
208 | ||
209 | inode = fl->fl_file->f_dentry->d_inode; | |
210 | if (inode->i_sb->s_magic != NFS_SUPER_MAGIC) | |
211 | continue; | |
212 | if (fl->fl_u.nfs_fl.owner->host != host) | |
213 | continue; | |
214 | if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_RECLAIM)) | |
215 | continue; | |
216 | ||
217 | fl->fl_u.nfs_fl.flags &= ~NFS_LCK_RECLAIM; | |
218 | nlmclnt_reclaim(host, fl); | |
219 | if (signalled()) | |
220 | break; | |
221 | goto restart; | |
222 | } | |
223 | ||
224 | host->h_reclaiming = 0; | |
225 | ||
226 | /* Now, wake up all processes that sleep on a blocked lock */ | |
4f15e2b1 | 227 | list_for_each_entry(block, &nlm_blocked, b_list) { |
1da177e4 LT |
228 | if (block->b_host == host) { |
229 | block->b_status = NLM_LCK_DENIED_GRACE_PERIOD; | |
230 | wake_up(&block->b_wait); | |
231 | } | |
232 | } | |
233 | ||
234 | /* Release host handle after use */ | |
235 | nlm_release_host(host); | |
236 | lockd_down(); | |
237 | unlock_kernel(); | |
238 | module_put_and_exit(0); | |
239 | } |