Commit | Line | Data |
---|---|---|
d7e09d03 PT |
1 | /* |
2 | * GPL HEADER START | |
3 | * | |
4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 only, | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License version 2 for more details (a copy is included | |
14 | * in the LICENSE file that accompanied this code). | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * version 2 along with this program; If not, see | |
18 | * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf | |
19 | * | |
20 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
21 | * CA 95054 USA or visit www.sun.com if you need additional information or | |
22 | * have any questions. | |
23 | * | |
24 | * GPL HEADER END | |
25 | */ | |
26 | /* | |
27 | * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. | |
28 | * Use is subject to license terms. | |
29 | * | |
30 | * Copyright (c) 2010, 2012, Intel Corporation. | |
31 | */ | |
32 | /* | |
33 | * This file is part of Lustre, http://www.lustre.org/ | |
34 | * Lustre is a trademark of Sun Microsystems, Inc. | |
35 | * | |
36 | * lustre/ldlm/ldlm_lock.c | |
37 | * | |
38 | * Author: Peter Braam <braam@clusterfs.com> | |
39 | * Author: Phil Schwan <phil@clusterfs.com> | |
40 | */ | |
41 | ||
42 | #define DEBUG_SUBSYSTEM S_LDLM | |
43 | ||
44 | # include <linux/libcfs/libcfs.h> | |
45 | # include <linux/lustre_intent.h> | |
46 | ||
47 | #include <obd_class.h> | |
48 | #include "ldlm_internal.h" | |
49 | ||
50 | /* lock types */ | |
51 | char *ldlm_lockname[] = { | |
52 | [0] "--", | |
53 | [LCK_EX] "EX", | |
54 | [LCK_PW] "PW", | |
55 | [LCK_PR] "PR", | |
56 | [LCK_CW] "CW", | |
57 | [LCK_CR] "CR", | |
58 | [LCK_NL] "NL", | |
59 | [LCK_GROUP] "GROUP", | |
60 | [LCK_COS] "COS" | |
61 | }; | |
62 | EXPORT_SYMBOL(ldlm_lockname); | |
63 | ||
64 | char *ldlm_typename[] = { | |
65 | [LDLM_PLAIN] "PLN", | |
66 | [LDLM_EXTENT] "EXT", | |
67 | [LDLM_FLOCK] "FLK", | |
68 | [LDLM_IBITS] "IBT", | |
69 | }; | |
70 | EXPORT_SYMBOL(ldlm_typename); | |
71 | ||
72 | static ldlm_policy_wire_to_local_t ldlm_policy_wire18_to_local[] = { | |
73 | [LDLM_PLAIN - LDLM_MIN_TYPE] ldlm_plain_policy_wire_to_local, | |
74 | [LDLM_EXTENT - LDLM_MIN_TYPE] ldlm_extent_policy_wire_to_local, | |
75 | [LDLM_FLOCK - LDLM_MIN_TYPE] ldlm_flock_policy_wire18_to_local, | |
76 | [LDLM_IBITS - LDLM_MIN_TYPE] ldlm_ibits_policy_wire_to_local, | |
77 | }; | |
78 | ||
79 | static ldlm_policy_wire_to_local_t ldlm_policy_wire21_to_local[] = { | |
80 | [LDLM_PLAIN - LDLM_MIN_TYPE] ldlm_plain_policy_wire_to_local, | |
81 | [LDLM_EXTENT - LDLM_MIN_TYPE] ldlm_extent_policy_wire_to_local, | |
82 | [LDLM_FLOCK - LDLM_MIN_TYPE] ldlm_flock_policy_wire21_to_local, | |
83 | [LDLM_IBITS - LDLM_MIN_TYPE] ldlm_ibits_policy_wire_to_local, | |
84 | }; | |
85 | ||
86 | static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = { | |
87 | [LDLM_PLAIN - LDLM_MIN_TYPE] ldlm_plain_policy_local_to_wire, | |
88 | [LDLM_EXTENT - LDLM_MIN_TYPE] ldlm_extent_policy_local_to_wire, | |
89 | [LDLM_FLOCK - LDLM_MIN_TYPE] ldlm_flock_policy_local_to_wire, | |
90 | [LDLM_IBITS - LDLM_MIN_TYPE] ldlm_ibits_policy_local_to_wire, | |
91 | }; | |
92 | ||
93 | /** | |
94 | * Converts lock policy from local format to on the wire lock_desc format | |
95 | */ | |
96 | void ldlm_convert_policy_to_wire(ldlm_type_t type, | |
97 | const ldlm_policy_data_t *lpolicy, | |
98 | ldlm_wire_policy_data_t *wpolicy) | |
99 | { | |
100 | ldlm_policy_local_to_wire_t convert; | |
101 | ||
102 | convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE]; | |
103 | ||
104 | convert(lpolicy, wpolicy); | |
105 | } | |
106 | ||
107 | /** | |
108 | * Converts lock policy from on the wire lock_desc format to local format | |
109 | */ | |
110 | void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type, | |
111 | const ldlm_wire_policy_data_t *wpolicy, | |
112 | ldlm_policy_data_t *lpolicy) | |
113 | { | |
114 | ldlm_policy_wire_to_local_t convert; | |
115 | int new_client; | |
116 | ||
117 | /** some badness for 2.0.0 clients, but 2.0.0 isn't supported */ | |
118 | new_client = (exp_connect_flags(exp) & OBD_CONNECT_FULL20) != 0; | |
119 | if (new_client) | |
120 | convert = ldlm_policy_wire21_to_local[type - LDLM_MIN_TYPE]; | |
121 | else | |
122 | convert = ldlm_policy_wire18_to_local[type - LDLM_MIN_TYPE]; | |
123 | ||
124 | convert(wpolicy, lpolicy); | |
125 | } | |
126 | ||
127 | char *ldlm_it2str(int it) | |
128 | { | |
129 | switch (it) { | |
130 | case IT_OPEN: | |
131 | return "open"; | |
132 | case IT_CREAT: | |
133 | return "creat"; | |
134 | case (IT_OPEN | IT_CREAT): | |
135 | return "open|creat"; | |
136 | case IT_READDIR: | |
137 | return "readdir"; | |
138 | case IT_GETATTR: | |
139 | return "getattr"; | |
140 | case IT_LOOKUP: | |
141 | return "lookup"; | |
142 | case IT_UNLINK: | |
143 | return "unlink"; | |
144 | case IT_GETXATTR: | |
145 | return "getxattr"; | |
146 | case IT_LAYOUT: | |
147 | return "layout"; | |
148 | default: | |
149 | CERROR("Unknown intent %d\n", it); | |
150 | return "UNKNOWN"; | |
151 | } | |
152 | } | |
153 | EXPORT_SYMBOL(ldlm_it2str); | |
154 | ||
155 | extern struct kmem_cache *ldlm_lock_slab; | |
156 | ||
157 | ||
158 | void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg) | |
159 | { | |
160 | ns->ns_policy = arg; | |
161 | } | |
162 | EXPORT_SYMBOL(ldlm_register_intent); | |
163 | ||
164 | /* | |
165 | * REFCOUNTED LOCK OBJECTS | |
166 | */ | |
167 | ||
168 | ||
169 | /** | |
170 | * Get a reference on a lock. | |
171 | * | |
172 | * Lock refcounts, during creation: | |
173 | * - one special one for allocation, dec'd only once in destroy | |
174 | * - one for being a lock that's in-use | |
175 | * - one for the addref associated with a new lock | |
176 | */ | |
177 | struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock) | |
178 | { | |
179 | atomic_inc(&lock->l_refc); | |
180 | return lock; | |
181 | } | |
182 | EXPORT_SYMBOL(ldlm_lock_get); | |
183 | ||
184 | /** | |
185 | * Release lock reference. | |
186 | * | |
187 | * Also frees the lock if it was last reference. | |
188 | */ | |
189 | void ldlm_lock_put(struct ldlm_lock *lock) | |
190 | { | |
191 | ENTRY; | |
192 | ||
193 | LASSERT(lock->l_resource != LP_POISON); | |
194 | LASSERT(atomic_read(&lock->l_refc) > 0); | |
195 | if (atomic_dec_and_test(&lock->l_refc)) { | |
196 | struct ldlm_resource *res; | |
197 | ||
198 | LDLM_DEBUG(lock, | |
199 | "final lock_put on destroyed lock, freeing it."); | |
200 | ||
201 | res = lock->l_resource; | |
f2145eae | 202 | LASSERT(lock->l_flags & LDLM_FL_DESTROYED); |
d7e09d03 PT |
203 | LASSERT(list_empty(&lock->l_res_link)); |
204 | LASSERT(list_empty(&lock->l_pending_chain)); | |
205 | ||
206 | lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats, | |
207 | LDLM_NSS_LOCKS); | |
208 | lu_ref_del(&res->lr_reference, "lock", lock); | |
209 | ldlm_resource_putref(res); | |
210 | lock->l_resource = NULL; | |
211 | if (lock->l_export) { | |
212 | class_export_lock_put(lock->l_export, lock); | |
213 | lock->l_export = NULL; | |
214 | } | |
215 | ||
216 | if (lock->l_lvb_data != NULL) | |
217 | OBD_FREE(lock->l_lvb_data, lock->l_lvb_len); | |
218 | ||
219 | ldlm_interval_free(ldlm_interval_detach(lock)); | |
220 | lu_ref_fini(&lock->l_reference); | |
221 | OBD_FREE_RCU(lock, sizeof(*lock), &lock->l_handle); | |
222 | } | |
223 | ||
224 | EXIT; | |
225 | } | |
226 | EXPORT_SYMBOL(ldlm_lock_put); | |
227 | ||
228 | /** | |
229 | * Removes LDLM lock \a lock from LRU. Assumes LRU is already locked. | |
230 | */ | |
231 | int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock) | |
232 | { | |
233 | int rc = 0; | |
234 | if (!list_empty(&lock->l_lru)) { | |
235 | struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); | |
236 | ||
237 | LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); | |
238 | list_del_init(&lock->l_lru); | |
239 | if (lock->l_flags & LDLM_FL_SKIPPED) | |
240 | lock->l_flags &= ~LDLM_FL_SKIPPED; | |
241 | LASSERT(ns->ns_nr_unused > 0); | |
242 | ns->ns_nr_unused--; | |
243 | rc = 1; | |
244 | } | |
245 | return rc; | |
246 | } | |
247 | ||
248 | /** | |
249 | * Removes LDLM lock \a lock from LRU. Obtains the LRU lock first. | |
250 | */ | |
251 | int ldlm_lock_remove_from_lru(struct ldlm_lock *lock) | |
252 | { | |
253 | struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); | |
254 | int rc; | |
255 | ||
256 | ENTRY; | |
f2145eae | 257 | if (lock->l_flags & LDLM_FL_NS_SRV) { |
d7e09d03 PT |
258 | LASSERT(list_empty(&lock->l_lru)); |
259 | RETURN(0); | |
260 | } | |
261 | ||
262 | spin_lock(&ns->ns_lock); | |
263 | rc = ldlm_lock_remove_from_lru_nolock(lock); | |
264 | spin_unlock(&ns->ns_lock); | |
265 | EXIT; | |
266 | return rc; | |
267 | } | |
268 | ||
269 | /** | |
270 | * Adds LDLM lock \a lock to namespace LRU. Assumes LRU is already locked. | |
271 | */ | |
272 | void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock) | |
273 | { | |
274 | struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); | |
275 | ||
276 | lock->l_last_used = cfs_time_current(); | |
277 | LASSERT(list_empty(&lock->l_lru)); | |
278 | LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); | |
279 | list_add_tail(&lock->l_lru, &ns->ns_unused_list); | |
280 | LASSERT(ns->ns_nr_unused >= 0); | |
281 | ns->ns_nr_unused++; | |
282 | } | |
283 | ||
284 | /** | |
285 | * Adds LDLM lock \a lock to namespace LRU. Obtains necessary LRU locks | |
286 | * first. | |
287 | */ | |
288 | void ldlm_lock_add_to_lru(struct ldlm_lock *lock) | |
289 | { | |
290 | struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); | |
291 | ||
292 | ENTRY; | |
293 | spin_lock(&ns->ns_lock); | |
294 | ldlm_lock_add_to_lru_nolock(lock); | |
295 | spin_unlock(&ns->ns_lock); | |
296 | EXIT; | |
297 | } | |
298 | ||
299 | /** | |
300 | * Moves LDLM lock \a lock that is already in namespace LRU to the tail of | |
301 | * the LRU. Performs necessary LRU locking | |
302 | */ | |
303 | void ldlm_lock_touch_in_lru(struct ldlm_lock *lock) | |
304 | { | |
305 | struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); | |
306 | ||
307 | ENTRY; | |
f2145eae | 308 | if (lock->l_flags & LDLM_FL_NS_SRV) { |
d7e09d03 PT |
309 | LASSERT(list_empty(&lock->l_lru)); |
310 | EXIT; | |
311 | return; | |
312 | } | |
313 | ||
314 | spin_lock(&ns->ns_lock); | |
315 | if (!list_empty(&lock->l_lru)) { | |
316 | ldlm_lock_remove_from_lru_nolock(lock); | |
317 | ldlm_lock_add_to_lru_nolock(lock); | |
318 | } | |
319 | spin_unlock(&ns->ns_lock); | |
320 | EXIT; | |
321 | } | |
322 | ||
323 | /** | |
324 | * Helper to destroy a locked lock. | |
325 | * | |
326 | * Used by ldlm_lock_destroy and ldlm_lock_destroy_nolock | |
327 | * Must be called with l_lock and lr_lock held. | |
328 | * | |
329 | * Does not actually free the lock data, but rather marks the lock as | |
330 | * destroyed by setting l_destroyed field in the lock to 1. Destroys a | |
331 | * handle->lock association too, so that the lock can no longer be found | |
332 | * and removes the lock from LRU list. Actual lock freeing occurs when | |
333 | * last lock reference goes away. | |
334 | * | |
335 | * Original comment (of some historical value): | |
336 | * This used to have a 'strict' flag, which recovery would use to mark an | |
337 | * in-use lock as needing-to-die. Lest I am ever tempted to put it back, I | |
338 | * shall explain why it's gone: with the new hash table scheme, once you call | |
339 | * ldlm_lock_destroy, you can never drop your final references on this lock. | |
340 | * Because it's not in the hash table anymore. -phil | |
341 | */ | |
342 | int ldlm_lock_destroy_internal(struct ldlm_lock *lock) | |
343 | { | |
344 | ENTRY; | |
345 | ||
346 | if (lock->l_readers || lock->l_writers) { | |
347 | LDLM_ERROR(lock, "lock still has references"); | |
348 | LBUG(); | |
349 | } | |
350 | ||
351 | if (!list_empty(&lock->l_res_link)) { | |
352 | LDLM_ERROR(lock, "lock still on resource"); | |
353 | LBUG(); | |
354 | } | |
355 | ||
f2145eae | 356 | if (lock->l_flags & LDLM_FL_DESTROYED) { |
d7e09d03 PT |
357 | LASSERT(list_empty(&lock->l_lru)); |
358 | EXIT; | |
359 | return 0; | |
360 | } | |
f2145eae | 361 | lock->l_flags |= LDLM_FL_DESTROYED; |
d7e09d03 PT |
362 | |
363 | if (lock->l_export && lock->l_export->exp_lock_hash) { | |
364 | /* NB: it's safe to call cfs_hash_del() even lock isn't | |
365 | * in exp_lock_hash. */ | |
366 | /* In the function below, .hs_keycmp resolves to | |
367 | * ldlm_export_lock_keycmp() */ | |
368 | /* coverity[overrun-buffer-val] */ | |
369 | cfs_hash_del(lock->l_export->exp_lock_hash, | |
370 | &lock->l_remote_handle, &lock->l_exp_hash); | |
371 | } | |
372 | ||
373 | ldlm_lock_remove_from_lru(lock); | |
374 | class_handle_unhash(&lock->l_handle); | |
375 | ||
376 | #if 0 | |
377 | /* Wake anyone waiting for this lock */ | |
378 | /* FIXME: I should probably add yet another flag, instead of using | |
379 | * l_export to only call this on clients */ | |
380 | if (lock->l_export) | |
381 | class_export_put(lock->l_export); | |
382 | lock->l_export = NULL; | |
383 | if (lock->l_export && lock->l_completion_ast) | |
384 | lock->l_completion_ast(lock, 0); | |
385 | #endif | |
386 | EXIT; | |
387 | return 1; | |
388 | } | |
389 | ||
390 | /** | |
391 | * Destroys a LDLM lock \a lock. Performs necessary locking first. | |
392 | */ | |
393 | void ldlm_lock_destroy(struct ldlm_lock *lock) | |
394 | { | |
395 | int first; | |
396 | ENTRY; | |
397 | lock_res_and_lock(lock); | |
398 | first = ldlm_lock_destroy_internal(lock); | |
399 | unlock_res_and_lock(lock); | |
400 | ||
401 | /* drop reference from hashtable only for first destroy */ | |
402 | if (first) { | |
403 | lu_ref_del(&lock->l_reference, "hash", lock); | |
404 | LDLM_LOCK_RELEASE(lock); | |
405 | } | |
406 | EXIT; | |
407 | } | |
408 | ||
409 | /** | |
410 | * Destroys a LDLM lock \a lock that is already locked. | |
411 | */ | |
412 | void ldlm_lock_destroy_nolock(struct ldlm_lock *lock) | |
413 | { | |
414 | int first; | |
415 | ENTRY; | |
416 | first = ldlm_lock_destroy_internal(lock); | |
417 | /* drop reference from hashtable only for first destroy */ | |
418 | if (first) { | |
419 | lu_ref_del(&lock->l_reference, "hash", lock); | |
420 | LDLM_LOCK_RELEASE(lock); | |
421 | } | |
422 | EXIT; | |
423 | } | |
424 | ||
425 | /* this is called by portals_handle2object with the handle lock taken */ | |
426 | static void lock_handle_addref(void *lock) | |
427 | { | |
428 | LDLM_LOCK_GET((struct ldlm_lock *)lock); | |
429 | } | |
430 | ||
431 | static void lock_handle_free(void *lock, int size) | |
432 | { | |
433 | LASSERT(size == sizeof(struct ldlm_lock)); | |
434 | OBD_SLAB_FREE(lock, ldlm_lock_slab, size); | |
435 | } | |
436 | ||
437 | struct portals_handle_ops lock_handle_ops = { | |
438 | .hop_addref = lock_handle_addref, | |
439 | .hop_free = lock_handle_free, | |
440 | }; | |
441 | ||
442 | /** | |
443 | * | |
444 | * Allocate and initialize new lock structure. | |
445 | * | |
446 | * usage: pass in a resource on which you have done ldlm_resource_get | |
447 | * new lock will take over the refcount. | |
448 | * returns: lock with refcount 2 - one for current caller and one for remote | |
449 | */ | |
450 | static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource) | |
451 | { | |
452 | struct ldlm_lock *lock; | |
453 | ENTRY; | |
454 | ||
455 | if (resource == NULL) | |
456 | LBUG(); | |
457 | ||
458 | OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, __GFP_IO); | |
459 | if (lock == NULL) | |
460 | RETURN(NULL); | |
461 | ||
462 | spin_lock_init(&lock->l_lock); | |
463 | lock->l_resource = resource; | |
464 | lu_ref_add(&resource->lr_reference, "lock", lock); | |
465 | ||
466 | atomic_set(&lock->l_refc, 2); | |
467 | INIT_LIST_HEAD(&lock->l_res_link); | |
468 | INIT_LIST_HEAD(&lock->l_lru); | |
469 | INIT_LIST_HEAD(&lock->l_pending_chain); | |
470 | INIT_LIST_HEAD(&lock->l_bl_ast); | |
471 | INIT_LIST_HEAD(&lock->l_cp_ast); | |
472 | INIT_LIST_HEAD(&lock->l_rk_ast); | |
473 | init_waitqueue_head(&lock->l_waitq); | |
474 | lock->l_blocking_lock = NULL; | |
475 | INIT_LIST_HEAD(&lock->l_sl_mode); | |
476 | INIT_LIST_HEAD(&lock->l_sl_policy); | |
477 | INIT_HLIST_NODE(&lock->l_exp_hash); | |
478 | INIT_HLIST_NODE(&lock->l_exp_flock_hash); | |
479 | ||
480 | lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats, | |
481 | LDLM_NSS_LOCKS); | |
482 | INIT_LIST_HEAD(&lock->l_handle.h_link); | |
483 | class_handle_hash(&lock->l_handle, &lock_handle_ops); | |
484 | ||
485 | lu_ref_init(&lock->l_reference); | |
486 | lu_ref_add(&lock->l_reference, "hash", lock); | |
487 | lock->l_callback_timeout = 0; | |
488 | ||
489 | #if LUSTRE_TRACKS_LOCK_EXP_REFS | |
490 | INIT_LIST_HEAD(&lock->l_exp_refs_link); | |
491 | lock->l_exp_refs_nr = 0; | |
492 | lock->l_exp_refs_target = NULL; | |
493 | #endif | |
494 | INIT_LIST_HEAD(&lock->l_exp_list); | |
495 | ||
496 | RETURN(lock); | |
497 | } | |
498 | ||
499 | /** | |
500 | * Moves LDLM lock \a lock to another resource. | |
501 | * This is used on client when server returns some other lock than requested | |
502 | * (typically as a result of intent operation) | |
503 | */ | |
504 | int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock, | |
505 | const struct ldlm_res_id *new_resid) | |
506 | { | |
507 | struct ldlm_resource *oldres = lock->l_resource; | |
508 | struct ldlm_resource *newres; | |
509 | int type; | |
510 | ENTRY; | |
511 | ||
512 | LASSERT(ns_is_client(ns)); | |
513 | ||
514 | lock_res_and_lock(lock); | |
515 | if (memcmp(new_resid, &lock->l_resource->lr_name, | |
516 | sizeof(lock->l_resource->lr_name)) == 0) { | |
517 | /* Nothing to do */ | |
518 | unlock_res_and_lock(lock); | |
519 | RETURN(0); | |
520 | } | |
521 | ||
522 | LASSERT(new_resid->name[0] != 0); | |
523 | ||
524 | /* This function assumes that the lock isn't on any lists */ | |
525 | LASSERT(list_empty(&lock->l_res_link)); | |
526 | ||
527 | type = oldres->lr_type; | |
528 | unlock_res_and_lock(lock); | |
529 | ||
530 | newres = ldlm_resource_get(ns, NULL, new_resid, type, 1); | |
531 | if (newres == NULL) | |
532 | RETURN(-ENOMEM); | |
533 | ||
534 | lu_ref_add(&newres->lr_reference, "lock", lock); | |
535 | /* | |
536 | * To flip the lock from the old to the new resource, lock, oldres and | |
537 | * newres have to be locked. Resource spin-locks are nested within | |
538 | * lock->l_lock, and are taken in the memory address order to avoid | |
539 | * dead-locks. | |
540 | */ | |
541 | spin_lock(&lock->l_lock); | |
542 | oldres = lock->l_resource; | |
543 | if (oldres < newres) { | |
544 | lock_res(oldres); | |
545 | lock_res_nested(newres, LRT_NEW); | |
546 | } else { | |
547 | lock_res(newres); | |
548 | lock_res_nested(oldres, LRT_NEW); | |
549 | } | |
550 | LASSERT(memcmp(new_resid, &oldres->lr_name, | |
551 | sizeof oldres->lr_name) != 0); | |
552 | lock->l_resource = newres; | |
553 | unlock_res(oldres); | |
554 | unlock_res_and_lock(lock); | |
555 | ||
556 | /* ...and the flowers are still standing! */ | |
557 | lu_ref_del(&oldres->lr_reference, "lock", lock); | |
558 | ldlm_resource_putref(oldres); | |
559 | ||
560 | RETURN(0); | |
561 | } | |
562 | EXPORT_SYMBOL(ldlm_lock_change_resource); | |
563 | ||
564 | /** \defgroup ldlm_handles LDLM HANDLES | |
565 | * Ways to get hold of locks without any addresses. | |
566 | * @{ | |
567 | */ | |
568 | ||
569 | /** | |
570 | * Fills in handle for LDLM lock \a lock into supplied \a lockh | |
571 | * Does not take any references. | |
572 | */ | |
573 | void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh) | |
574 | { | |
575 | lockh->cookie = lock->l_handle.h_cookie; | |
576 | } | |
577 | EXPORT_SYMBOL(ldlm_lock2handle); | |
578 | ||
579 | /** | |
580 | * Obtain a lock reference by handle. | |
581 | * | |
582 | * if \a flags: atomically get the lock and set the flags. | |
583 | * Return NULL if flag already set | |
584 | */ | |
585 | struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle, | |
586 | __u64 flags) | |
587 | { | |
588 | struct ldlm_lock *lock; | |
589 | ENTRY; | |
590 | ||
591 | LASSERT(handle); | |
592 | ||
593 | lock = class_handle2object(handle->cookie); | |
594 | if (lock == NULL) | |
595 | RETURN(NULL); | |
596 | ||
597 | /* It's unlikely but possible that someone marked the lock as | |
598 | * destroyed after we did handle2object on it */ | |
f2145eae | 599 | if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED)== 0)) { |
d7e09d03 PT |
600 | lu_ref_add(&lock->l_reference, "handle", current); |
601 | RETURN(lock); | |
602 | } | |
603 | ||
604 | lock_res_and_lock(lock); | |
605 | ||
606 | LASSERT(lock->l_resource != NULL); | |
607 | ||
608 | lu_ref_add_atomic(&lock->l_reference, "handle", current); | |
f2145eae | 609 | if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) { |
d7e09d03 PT |
610 | unlock_res_and_lock(lock); |
611 | CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock); | |
612 | LDLM_LOCK_PUT(lock); | |
613 | RETURN(NULL); | |
614 | } | |
615 | ||
616 | if (flags && (lock->l_flags & flags)) { | |
617 | unlock_res_and_lock(lock); | |
618 | LDLM_LOCK_PUT(lock); | |
619 | RETURN(NULL); | |
620 | } | |
621 | ||
622 | if (flags) | |
623 | lock->l_flags |= flags; | |
624 | ||
625 | unlock_res_and_lock(lock); | |
626 | RETURN(lock); | |
627 | } | |
628 | EXPORT_SYMBOL(__ldlm_handle2lock); | |
629 | /** @} ldlm_handles */ | |
630 | ||
631 | /** | |
632 | * Fill in "on the wire" representation for given LDLM lock into supplied | |
633 | * lock descriptor \a desc structure. | |
634 | */ | |
635 | void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc) | |
636 | { | |
637 | struct obd_export *exp = lock->l_export ?: lock->l_conn_export; | |
638 | ||
639 | /* INODEBITS_INTEROP: If the other side does not support | |
640 | * inodebits, reply with a plain lock descriptor. */ | |
641 | if ((lock->l_resource->lr_type == LDLM_IBITS) && | |
642 | (exp && !(exp_connect_flags(exp) & OBD_CONNECT_IBITS))) { | |
643 | /* Make sure all the right bits are set in this lock we | |
644 | are going to pass to client */ | |
645 | LASSERTF(lock->l_policy_data.l_inodebits.bits == | |
646 | (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE | | |
647 | MDS_INODELOCK_LAYOUT), | |
648 | "Inappropriate inode lock bits during " | |
649 | "conversion " LPU64 "\n", | |
650 | lock->l_policy_data.l_inodebits.bits); | |
651 | ||
652 | ldlm_res2desc(lock->l_resource, &desc->l_resource); | |
653 | desc->l_resource.lr_type = LDLM_PLAIN; | |
654 | ||
655 | /* Convert "new" lock mode to something old client can | |
656 | understand */ | |
657 | if ((lock->l_req_mode == LCK_CR) || | |
658 | (lock->l_req_mode == LCK_CW)) | |
659 | desc->l_req_mode = LCK_PR; | |
660 | else | |
661 | desc->l_req_mode = lock->l_req_mode; | |
662 | if ((lock->l_granted_mode == LCK_CR) || | |
663 | (lock->l_granted_mode == LCK_CW)) { | |
664 | desc->l_granted_mode = LCK_PR; | |
665 | } else { | |
666 | /* We never grant PW/EX locks to clients */ | |
667 | LASSERT((lock->l_granted_mode != LCK_PW) && | |
668 | (lock->l_granted_mode != LCK_EX)); | |
669 | desc->l_granted_mode = lock->l_granted_mode; | |
670 | } | |
671 | ||
672 | /* We do not copy policy here, because there is no | |
673 | policy for plain locks */ | |
674 | } else { | |
675 | ldlm_res2desc(lock->l_resource, &desc->l_resource); | |
676 | desc->l_req_mode = lock->l_req_mode; | |
677 | desc->l_granted_mode = lock->l_granted_mode; | |
678 | ldlm_convert_policy_to_wire(lock->l_resource->lr_type, | |
679 | &lock->l_policy_data, | |
680 | &desc->l_policy_data); | |
681 | } | |
682 | } | |
683 | EXPORT_SYMBOL(ldlm_lock2desc); | |
684 | ||
685 | /** | |
686 | * Add a lock to list of conflicting locks to send AST to. | |
687 | * | |
688 | * Only add if we have not sent a blocking AST to the lock yet. | |
689 | */ | |
690 | void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, | |
691 | struct list_head *work_list) | |
692 | { | |
693 | if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) { | |
694 | LDLM_DEBUG(lock, "lock incompatible; sending blocking AST."); | |
695 | lock->l_flags |= LDLM_FL_AST_SENT; | |
696 | /* If the enqueuing client said so, tell the AST recipient to | |
697 | * discard dirty data, rather than writing back. */ | |
f2145eae | 698 | if (new->l_flags & LDLM_FL_AST_DISCARD_DATA) |
d7e09d03 PT |
699 | lock->l_flags |= LDLM_FL_DISCARD_DATA; |
700 | LASSERT(list_empty(&lock->l_bl_ast)); | |
701 | list_add(&lock->l_bl_ast, work_list); | |
702 | LDLM_LOCK_GET(lock); | |
703 | LASSERT(lock->l_blocking_lock == NULL); | |
704 | lock->l_blocking_lock = LDLM_LOCK_GET(new); | |
705 | } | |
706 | } | |
707 | ||
708 | /** | |
709 | * Add a lock to list of just granted locks to send completion AST to. | |
710 | */ | |
711 | void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list) | |
712 | { | |
713 | if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) { | |
714 | lock->l_flags |= LDLM_FL_CP_REQD; | |
715 | LDLM_DEBUG(lock, "lock granted; sending completion AST."); | |
716 | LASSERT(list_empty(&lock->l_cp_ast)); | |
717 | list_add(&lock->l_cp_ast, work_list); | |
718 | LDLM_LOCK_GET(lock); | |
719 | } | |
720 | } | |
721 | ||
722 | /** | |
723 | * Aggregator function to add AST work items into a list. Determines | |
724 | * what sort of an AST work needs to be done and calls the proper | |
725 | * adding function. | |
726 | * Must be called with lr_lock held. | |
727 | */ | |
728 | void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, | |
729 | struct list_head *work_list) | |
730 | { | |
731 | ENTRY; | |
732 | check_res_locked(lock->l_resource); | |
733 | if (new) | |
734 | ldlm_add_bl_work_item(lock, new, work_list); | |
735 | else | |
736 | ldlm_add_cp_work_item(lock, work_list); | |
737 | EXIT; | |
738 | } | |
739 | ||
740 | /** | |
741 | * Add specified reader/writer reference to LDLM lock with handle \a lockh. | |
742 | * r/w reference type is determined by \a mode | |
743 | * Calls ldlm_lock_addref_internal. | |
744 | */ | |
745 | void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode) | |
746 | { | |
747 | struct ldlm_lock *lock; | |
748 | ||
749 | lock = ldlm_handle2lock(lockh); | |
750 | LASSERT(lock != NULL); | |
751 | ldlm_lock_addref_internal(lock, mode); | |
752 | LDLM_LOCK_PUT(lock); | |
753 | } | |
754 | EXPORT_SYMBOL(ldlm_lock_addref); | |
755 | ||
756 | /** | |
757 | * Helper function. | |
758 | * Add specified reader/writer reference to LDLM lock \a lock. | |
759 | * r/w reference type is determined by \a mode | |
760 | * Removes lock from LRU if it is there. | |
761 | * Assumes the LDLM lock is already locked. | |
762 | */ | |
763 | void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode) | |
764 | { | |
765 | ldlm_lock_remove_from_lru(lock); | |
766 | if (mode & (LCK_NL | LCK_CR | LCK_PR)) { | |
767 | lock->l_readers++; | |
768 | lu_ref_add_atomic(&lock->l_reference, "reader", lock); | |
769 | } | |
770 | if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) { | |
771 | lock->l_writers++; | |
772 | lu_ref_add_atomic(&lock->l_reference, "writer", lock); | |
773 | } | |
774 | LDLM_LOCK_GET(lock); | |
775 | lu_ref_add_atomic(&lock->l_reference, "user", lock); | |
776 | LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]); | |
777 | } | |
778 | ||
779 | /** | |
780 | * Attempts to add reader/writer reference to a lock with handle \a lockh, and | |
781 | * fails if lock is already LDLM_FL_CBPENDING or destroyed. | |
782 | * | |
783 | * \retval 0 success, lock was addref-ed | |
784 | * | |
785 | * \retval -EAGAIN lock is being canceled. | |
786 | */ | |
787 | int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode) | |
788 | { | |
789 | struct ldlm_lock *lock; | |
790 | int result; | |
791 | ||
792 | result = -EAGAIN; | |
793 | lock = ldlm_handle2lock(lockh); | |
794 | if (lock != NULL) { | |
795 | lock_res_and_lock(lock); | |
796 | if (lock->l_readers != 0 || lock->l_writers != 0 || | |
797 | !(lock->l_flags & LDLM_FL_CBPENDING)) { | |
798 | ldlm_lock_addref_internal_nolock(lock, mode); | |
799 | result = 0; | |
800 | } | |
801 | unlock_res_and_lock(lock); | |
802 | LDLM_LOCK_PUT(lock); | |
803 | } | |
804 | return result; | |
805 | } | |
806 | EXPORT_SYMBOL(ldlm_lock_addref_try); | |
807 | ||
808 | /** | |
809 | * Add specified reader/writer reference to LDLM lock \a lock. | |
810 | * Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work. | |
811 | * Only called for local locks. | |
812 | */ | |
813 | void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode) | |
814 | { | |
815 | lock_res_and_lock(lock); | |
816 | ldlm_lock_addref_internal_nolock(lock, mode); | |
817 | unlock_res_and_lock(lock); | |
818 | } | |
819 | ||
820 | /** | |
821 | * Removes reader/writer reference for LDLM lock \a lock. | |
822 | * Assumes LDLM lock is already locked. | |
823 | * only called in ldlm_flock_destroy and for local locks. | |
824 | * Does NOT add lock to LRU if no r/w references left to accomodate flock locks | |
825 | * that cannot be placed in LRU. | |
826 | */ | |
827 | void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode) | |
828 | { | |
829 | LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); | |
830 | if (mode & (LCK_NL | LCK_CR | LCK_PR)) { | |
831 | LASSERT(lock->l_readers > 0); | |
832 | lu_ref_del(&lock->l_reference, "reader", lock); | |
833 | lock->l_readers--; | |
834 | } | |
835 | if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) { | |
836 | LASSERT(lock->l_writers > 0); | |
837 | lu_ref_del(&lock->l_reference, "writer", lock); | |
838 | lock->l_writers--; | |
839 | } | |
840 | ||
841 | lu_ref_del(&lock->l_reference, "user", lock); | |
842 | LDLM_LOCK_RELEASE(lock); /* matches the LDLM_LOCK_GET() in addref */ | |
843 | } | |
844 | ||
845 | /** | |
846 | * Removes reader/writer reference for LDLM lock \a lock. | |
847 | * Locks LDLM lock first. | |
848 | * If the lock is determined to be client lock on a client and r/w refcount | |
849 | * drops to zero and the lock is not blocked, the lock is added to LRU lock | |
850 | * on the namespace. | |
851 | * For blocked LDLM locks if r/w count drops to zero, blocking_ast is called. | |
852 | */ | |
853 | void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) | |
854 | { | |
855 | struct ldlm_namespace *ns; | |
856 | ENTRY; | |
857 | ||
858 | lock_res_and_lock(lock); | |
859 | ||
860 | ns = ldlm_lock_to_ns(lock); | |
861 | ||
862 | ldlm_lock_decref_internal_nolock(lock, mode); | |
863 | ||
d7e09d03 PT |
864 | if (lock->l_flags & LDLM_FL_LOCAL && |
865 | !lock->l_readers && !lock->l_writers) { | |
866 | /* If this is a local lock on a server namespace and this was | |
867 | * the last reference, cancel the lock. */ | |
868 | CDEBUG(D_INFO, "forcing cancel of local lock\n"); | |
869 | lock->l_flags |= LDLM_FL_CBPENDING; | |
870 | } | |
871 | ||
872 | if (!lock->l_readers && !lock->l_writers && | |
873 | (lock->l_flags & LDLM_FL_CBPENDING)) { | |
874 | /* If we received a blocked AST and this was the last reference, | |
875 | * run the callback. */ | |
f2145eae | 876 | if ((lock->l_flags & LDLM_FL_NS_SRV) && lock->l_export) |
d7e09d03 PT |
877 | CERROR("FL_CBPENDING set on non-local lock--just a " |
878 | "warning\n"); | |
879 | ||
880 | LDLM_DEBUG(lock, "final decref done on cbpending lock"); | |
881 | ||
882 | LDLM_LOCK_GET(lock); /* dropped by bl thread */ | |
883 | ldlm_lock_remove_from_lru(lock); | |
884 | unlock_res_and_lock(lock); | |
885 | ||
886 | if (lock->l_flags & LDLM_FL_FAIL_LOC) | |
887 | OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE); | |
888 | ||
889 | if ((lock->l_flags & LDLM_FL_ATOMIC_CB) || | |
890 | ldlm_bl_to_thread_lock(ns, NULL, lock) != 0) | |
891 | ldlm_handle_bl_callback(ns, NULL, lock); | |
892 | } else if (ns_is_client(ns) && | |
893 | !lock->l_readers && !lock->l_writers && | |
894 | !(lock->l_flags & LDLM_FL_NO_LRU) && | |
895 | !(lock->l_flags & LDLM_FL_BL_AST)) { | |
896 | ||
897 | LDLM_DEBUG(lock, "add lock into lru list"); | |
898 | ||
899 | /* If this is a client-side namespace and this was the last | |
900 | * reference, put it on the LRU. */ | |
901 | ldlm_lock_add_to_lru(lock); | |
902 | unlock_res_and_lock(lock); | |
903 | ||
904 | if (lock->l_flags & LDLM_FL_FAIL_LOC) | |
905 | OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE); | |
906 | ||
907 | /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE | |
908 | * are not supported by the server, otherwise, it is done on | |
909 | * enqueue. */ | |
910 | if (!exp_connect_cancelset(lock->l_conn_export) && | |
911 | !ns_connect_lru_resize(ns)) | |
912 | ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0); | |
913 | } else { | |
914 | LDLM_DEBUG(lock, "do not add lock into lru list"); | |
915 | unlock_res_and_lock(lock); | |
916 | } | |
917 | ||
918 | EXIT; | |
919 | } | |
920 | ||
921 | /** | |
922 | * Decrease reader/writer refcount for LDLM lock with handle \a lockh | |
923 | */ | |
924 | void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode) | |
925 | { | |
926 | struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); | |
927 | LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie); | |
928 | ldlm_lock_decref_internal(lock, mode); | |
929 | LDLM_LOCK_PUT(lock); | |
930 | } | |
931 | EXPORT_SYMBOL(ldlm_lock_decref); | |
932 | ||
933 | /** | |
934 | * Decrease reader/writer refcount for LDLM lock with handle | |
935 | * \a lockh and mark it for subsequent cancellation once r/w refcount | |
936 | * drops to zero instead of putting into LRU. | |
937 | * | |
938 | * Typical usage is for GROUP locks which we cannot allow to be cached. | |
939 | */ | |
940 | void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode) | |
941 | { | |
942 | struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); | |
943 | ENTRY; | |
944 | ||
945 | LASSERT(lock != NULL); | |
946 | ||
947 | LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); | |
948 | lock_res_and_lock(lock); | |
949 | lock->l_flags |= LDLM_FL_CBPENDING; | |
950 | unlock_res_and_lock(lock); | |
951 | ldlm_lock_decref_internal(lock, mode); | |
952 | LDLM_LOCK_PUT(lock); | |
953 | } | |
954 | EXPORT_SYMBOL(ldlm_lock_decref_and_cancel); | |
955 | ||
956 | struct sl_insert_point { | |
957 | struct list_head *res_link; | |
958 | struct list_head *mode_link; | |
959 | struct list_head *policy_link; | |
960 | }; | |
961 | ||
962 | /** | |
963 | * Finds a position to insert the new lock into granted lock list. | |
964 | * | |
965 | * Used for locks eligible for skiplist optimization. | |
966 | * | |
967 | * Parameters: | |
968 | * queue [input]: the granted list where search acts on; | |
969 | * req [input]: the lock whose position to be located; | |
970 | * prev [output]: positions within 3 lists to insert @req to | |
971 | * Return Value: | |
972 | * filled @prev | |
973 | * NOTE: called by | |
974 | * - ldlm_grant_lock_with_skiplist | |
975 | */ | |
976 | static void search_granted_lock(struct list_head *queue, | |
977 | struct ldlm_lock *req, | |
978 | struct sl_insert_point *prev) | |
979 | { | |
980 | struct list_head *tmp; | |
981 | struct ldlm_lock *lock, *mode_end, *policy_end; | |
982 | ENTRY; | |
983 | ||
984 | list_for_each(tmp, queue) { | |
985 | lock = list_entry(tmp, struct ldlm_lock, l_res_link); | |
986 | ||
987 | mode_end = list_entry(lock->l_sl_mode.prev, | |
988 | struct ldlm_lock, l_sl_mode); | |
989 | ||
990 | if (lock->l_req_mode != req->l_req_mode) { | |
991 | /* jump to last lock of mode group */ | |
992 | tmp = &mode_end->l_res_link; | |
993 | continue; | |
994 | } | |
995 | ||
996 | /* suitable mode group is found */ | |
997 | if (lock->l_resource->lr_type == LDLM_PLAIN) { | |
998 | /* insert point is last lock of the mode group */ | |
999 | prev->res_link = &mode_end->l_res_link; | |
1000 | prev->mode_link = &mode_end->l_sl_mode; | |
1001 | prev->policy_link = &req->l_sl_policy; | |
1002 | EXIT; | |
1003 | return; | |
1004 | } else if (lock->l_resource->lr_type == LDLM_IBITS) { | |
1005 | for (;;) { | |
1006 | policy_end = | |
1007 | list_entry(lock->l_sl_policy.prev, | |
1008 | struct ldlm_lock, | |
1009 | l_sl_policy); | |
1010 | ||
1011 | if (lock->l_policy_data.l_inodebits.bits == | |
1012 | req->l_policy_data.l_inodebits.bits) { | |
1013 | /* insert point is last lock of | |
1014 | * the policy group */ | |
1015 | prev->res_link = | |
1016 | &policy_end->l_res_link; | |
1017 | prev->mode_link = | |
1018 | &policy_end->l_sl_mode; | |
1019 | prev->policy_link = | |
1020 | &policy_end->l_sl_policy; | |
1021 | EXIT; | |
1022 | return; | |
1023 | } | |
1024 | ||
1025 | if (policy_end == mode_end) | |
1026 | /* done with mode group */ | |
1027 | break; | |
1028 | ||
1029 | /* go to next policy group within mode group */ | |
1030 | tmp = policy_end->l_res_link.next; | |
1031 | lock = list_entry(tmp, struct ldlm_lock, | |
1032 | l_res_link); | |
1033 | } /* loop over policy groups within the mode group */ | |
1034 | ||
1035 | /* insert point is last lock of the mode group, | |
1036 | * new policy group is started */ | |
1037 | prev->res_link = &mode_end->l_res_link; | |
1038 | prev->mode_link = &mode_end->l_sl_mode; | |
1039 | prev->policy_link = &req->l_sl_policy; | |
1040 | EXIT; | |
1041 | return; | |
1042 | } else { | |
1043 | LDLM_ERROR(lock,"is not LDLM_PLAIN or LDLM_IBITS lock"); | |
1044 | LBUG(); | |
1045 | } | |
1046 | } | |
1047 | ||
1048 | /* insert point is last lock on the queue, | |
1049 | * new mode group and new policy group are started */ | |
1050 | prev->res_link = queue->prev; | |
1051 | prev->mode_link = &req->l_sl_mode; | |
1052 | prev->policy_link = &req->l_sl_policy; | |
1053 | EXIT; | |
1054 | return; | |
1055 | } | |
1056 | ||
1057 | /** | |
1058 | * Add a lock into resource granted list after a position described by | |
1059 | * \a prev. | |
1060 | */ | |
1061 | static void ldlm_granted_list_add_lock(struct ldlm_lock *lock, | |
1062 | struct sl_insert_point *prev) | |
1063 | { | |
1064 | struct ldlm_resource *res = lock->l_resource; | |
1065 | ENTRY; | |
1066 | ||
1067 | check_res_locked(res); | |
1068 | ||
1069 | ldlm_resource_dump(D_INFO, res); | |
1070 | LDLM_DEBUG(lock, "About to add lock:"); | |
1071 | ||
f2145eae | 1072 | if (lock->l_flags & LDLM_FL_DESTROYED) { |
d7e09d03 PT |
1073 | CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n"); |
1074 | return; | |
1075 | } | |
1076 | ||
1077 | LASSERT(list_empty(&lock->l_res_link)); | |
1078 | LASSERT(list_empty(&lock->l_sl_mode)); | |
1079 | LASSERT(list_empty(&lock->l_sl_policy)); | |
1080 | ||
1081 | /* | |
1082 | * lock->link == prev->link means lock is first starting the group. | |
1083 | * Don't re-add to itself to suppress kernel warnings. | |
1084 | */ | |
1085 | if (&lock->l_res_link != prev->res_link) | |
1086 | list_add(&lock->l_res_link, prev->res_link); | |
1087 | if (&lock->l_sl_mode != prev->mode_link) | |
1088 | list_add(&lock->l_sl_mode, prev->mode_link); | |
1089 | if (&lock->l_sl_policy != prev->policy_link) | |
1090 | list_add(&lock->l_sl_policy, prev->policy_link); | |
1091 | ||
1092 | EXIT; | |
1093 | } | |
1094 | ||
1095 | /** | |
1096 | * Add a lock to granted list on a resource maintaining skiplist | |
1097 | * correctness. | |
1098 | */ | |
1099 | static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock) | |
1100 | { | |
1101 | struct sl_insert_point prev; | |
1102 | ENTRY; | |
1103 | ||
1104 | LASSERT(lock->l_req_mode == lock->l_granted_mode); | |
1105 | ||
1106 | search_granted_lock(&lock->l_resource->lr_granted, lock, &prev); | |
1107 | ldlm_granted_list_add_lock(lock, &prev); | |
1108 | EXIT; | |
1109 | } | |
1110 | ||
1111 | /** | |
1112 | * Perform lock granting bookkeeping. | |
1113 | * | |
1114 | * Includes putting the lock into granted list and updating lock mode. | |
1115 | * NOTE: called by | |
1116 | * - ldlm_lock_enqueue | |
1117 | * - ldlm_reprocess_queue | |
1118 | * - ldlm_lock_convert | |
1119 | * | |
1120 | * must be called with lr_lock held | |
1121 | */ | |
1122 | void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) | |
1123 | { | |
1124 | struct ldlm_resource *res = lock->l_resource; | |
1125 | ENTRY; | |
1126 | ||
1127 | check_res_locked(res); | |
1128 | ||
1129 | lock->l_granted_mode = lock->l_req_mode; | |
1130 | if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) | |
1131 | ldlm_grant_lock_with_skiplist(lock); | |
1132 | else if (res->lr_type == LDLM_EXTENT) | |
1133 | ldlm_extent_add_lock(res, lock); | |
1134 | else | |
1135 | ldlm_resource_add_lock(res, &res->lr_granted, lock); | |
1136 | ||
1137 | if (lock->l_granted_mode < res->lr_most_restr) | |
1138 | res->lr_most_restr = lock->l_granted_mode; | |
1139 | ||
1140 | if (work_list && lock->l_completion_ast != NULL) | |
1141 | ldlm_add_ast_work_item(lock, NULL, work_list); | |
1142 | ||
1143 | ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock); | |
1144 | EXIT; | |
1145 | } | |
1146 | ||
1147 | /** | |
1148 | * Search for a lock with given properties in a queue. | |
1149 | * | |
1150 | * \retval a referenced lock or NULL. See the flag descriptions below, in the | |
1151 | * comment above ldlm_lock_match | |
1152 | */ | |
1153 | static struct ldlm_lock *search_queue(struct list_head *queue, | |
1154 | ldlm_mode_t *mode, | |
1155 | ldlm_policy_data_t *policy, | |
1156 | struct ldlm_lock *old_lock, | |
1157 | __u64 flags, int unref) | |
1158 | { | |
1159 | struct ldlm_lock *lock; | |
1160 | struct list_head *tmp; | |
1161 | ||
1162 | list_for_each(tmp, queue) { | |
1163 | ldlm_mode_t match; | |
1164 | ||
1165 | lock = list_entry(tmp, struct ldlm_lock, l_res_link); | |
1166 | ||
1167 | if (lock == old_lock) | |
1168 | break; | |
1169 | ||
1170 | /* llite sometimes wants to match locks that will be | |
1171 | * canceled when their users drop, but we allow it to match | |
1172 | * if it passes in CBPENDING and the lock still has users. | |
1173 | * this is generally only going to be used by children | |
1174 | * whose parents already hold a lock so forward progress | |
1175 | * can still happen. */ | |
1176 | if (lock->l_flags & LDLM_FL_CBPENDING && | |
1177 | !(flags & LDLM_FL_CBPENDING)) | |
1178 | continue; | |
1179 | if (!unref && lock->l_flags & LDLM_FL_CBPENDING && | |
1180 | lock->l_readers == 0 && lock->l_writers == 0) | |
1181 | continue; | |
1182 | ||
1183 | if (!(lock->l_req_mode & *mode)) | |
1184 | continue; | |
1185 | match = lock->l_req_mode; | |
1186 | ||
1187 | if (lock->l_resource->lr_type == LDLM_EXTENT && | |
1188 | (lock->l_policy_data.l_extent.start > | |
1189 | policy->l_extent.start || | |
1190 | lock->l_policy_data.l_extent.end < policy->l_extent.end)) | |
1191 | continue; | |
1192 | ||
1193 | if (unlikely(match == LCK_GROUP) && | |
1194 | lock->l_resource->lr_type == LDLM_EXTENT && | |
1195 | lock->l_policy_data.l_extent.gid != policy->l_extent.gid) | |
1196 | continue; | |
1197 | ||
1198 | /* We match if we have existing lock with same or wider set | |
1199 | of bits. */ | |
1200 | if (lock->l_resource->lr_type == LDLM_IBITS && | |
1201 | ((lock->l_policy_data.l_inodebits.bits & | |
1202 | policy->l_inodebits.bits) != | |
1203 | policy->l_inodebits.bits)) | |
1204 | continue; | |
1205 | ||
f2145eae | 1206 | if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK)) |
d7e09d03 PT |
1207 | continue; |
1208 | ||
1209 | if ((flags & LDLM_FL_LOCAL_ONLY) && | |
1210 | !(lock->l_flags & LDLM_FL_LOCAL)) | |
1211 | continue; | |
1212 | ||
1213 | if (flags & LDLM_FL_TEST_LOCK) { | |
1214 | LDLM_LOCK_GET(lock); | |
1215 | ldlm_lock_touch_in_lru(lock); | |
1216 | } else { | |
1217 | ldlm_lock_addref_internal_nolock(lock, match); | |
1218 | } | |
1219 | *mode = match; | |
1220 | return lock; | |
1221 | } | |
1222 | ||
1223 | return NULL; | |
1224 | } | |
1225 | ||
1226 | void ldlm_lock_fail_match_locked(struct ldlm_lock *lock) | |
1227 | { | |
f2145eae BK |
1228 | if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) { |
1229 | lock->l_flags |= LDLM_FL_FAIL_NOTIFIED; | |
d7e09d03 PT |
1230 | wake_up_all(&lock->l_waitq); |
1231 | } | |
1232 | } | |
1233 | EXPORT_SYMBOL(ldlm_lock_fail_match_locked); | |
1234 | ||
1235 | void ldlm_lock_fail_match(struct ldlm_lock *lock) | |
1236 | { | |
1237 | lock_res_and_lock(lock); | |
1238 | ldlm_lock_fail_match_locked(lock); | |
1239 | unlock_res_and_lock(lock); | |
1240 | } | |
1241 | EXPORT_SYMBOL(ldlm_lock_fail_match); | |
1242 | ||
1243 | /** | |
1244 | * Mark lock as "matchable" by OST. | |
1245 | * | |
1246 | * Used to prevent certain races in LOV/OSC where the lock is granted, but LVB | |
1247 | * is not yet valid. | |
1248 | * Assumes LDLM lock is already locked. | |
1249 | */ | |
1250 | void ldlm_lock_allow_match_locked(struct ldlm_lock *lock) | |
1251 | { | |
1252 | lock->l_flags |= LDLM_FL_LVB_READY; | |
1253 | wake_up_all(&lock->l_waitq); | |
1254 | } | |
1255 | EXPORT_SYMBOL(ldlm_lock_allow_match_locked); | |
1256 | ||
1257 | /** | |
1258 | * Mark lock as "matchable" by OST. | |
1259 | * Locks the lock and then \see ldlm_lock_allow_match_locked | |
1260 | */ | |
1261 | void ldlm_lock_allow_match(struct ldlm_lock *lock) | |
1262 | { | |
1263 | lock_res_and_lock(lock); | |
1264 | ldlm_lock_allow_match_locked(lock); | |
1265 | unlock_res_and_lock(lock); | |
1266 | } | |
1267 | EXPORT_SYMBOL(ldlm_lock_allow_match); | |
1268 | ||
1269 | /** | |
1270 | * Attempt to find a lock with specified properties. | |
1271 | * | |
1272 | * Typically returns a reference to matched lock unless LDLM_FL_TEST_LOCK is | |
1273 | * set in \a flags | |
1274 | * | |
1275 | * Can be called in two ways: | |
1276 | * | |
1277 | * If 'ns' is NULL, then lockh describes an existing lock that we want to look | |
1278 | * for a duplicate of. | |
1279 | * | |
1280 | * Otherwise, all of the fields must be filled in, to match against. | |
1281 | * | |
1282 | * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the | |
1283 | * server (ie, connh is NULL) | |
1284 | * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted | |
1285 | * list will be considered | |
1286 | * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked | |
1287 | * to be canceled can still be matched as long as they still have reader | |
1288 | * or writer refernces | |
1289 | * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock, | |
1290 | * just tell us if we would have matched. | |
1291 | * | |
1292 | * \retval 1 if it finds an already-existing lock that is compatible; in this | |
1293 | * case, lockh is filled in with a addref()ed lock | |
1294 | * | |
1295 | * We also check security context, and if that fails we simply return 0 (to | |
1296 | * keep caller code unchanged), the context failure will be discovered by | |
1297 | * caller sometime later. | |
1298 | */ | |
1299 | ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, | |
1300 | const struct ldlm_res_id *res_id, ldlm_type_t type, | |
1301 | ldlm_policy_data_t *policy, ldlm_mode_t mode, | |
1302 | struct lustre_handle *lockh, int unref) | |
1303 | { | |
1304 | struct ldlm_resource *res; | |
1305 | struct ldlm_lock *lock, *old_lock = NULL; | |
1306 | int rc = 0; | |
1307 | ENTRY; | |
1308 | ||
1309 | if (ns == NULL) { | |
1310 | old_lock = ldlm_handle2lock(lockh); | |
1311 | LASSERT(old_lock); | |
1312 | ||
1313 | ns = ldlm_lock_to_ns(old_lock); | |
1314 | res_id = &old_lock->l_resource->lr_name; | |
1315 | type = old_lock->l_resource->lr_type; | |
1316 | mode = old_lock->l_req_mode; | |
1317 | } | |
1318 | ||
1319 | res = ldlm_resource_get(ns, NULL, res_id, type, 0); | |
1320 | if (res == NULL) { | |
1321 | LASSERT(old_lock == NULL); | |
1322 | RETURN(0); | |
1323 | } | |
1324 | ||
1325 | LDLM_RESOURCE_ADDREF(res); | |
1326 | lock_res(res); | |
1327 | ||
1328 | lock = search_queue(&res->lr_granted, &mode, policy, old_lock, | |
1329 | flags, unref); | |
1330 | if (lock != NULL) | |
1331 | GOTO(out, rc = 1); | |
1332 | if (flags & LDLM_FL_BLOCK_GRANTED) | |
1333 | GOTO(out, rc = 0); | |
1334 | lock = search_queue(&res->lr_converting, &mode, policy, old_lock, | |
1335 | flags, unref); | |
1336 | if (lock != NULL) | |
1337 | GOTO(out, rc = 1); | |
1338 | lock = search_queue(&res->lr_waiting, &mode, policy, old_lock, | |
1339 | flags, unref); | |
1340 | if (lock != NULL) | |
1341 | GOTO(out, rc = 1); | |
1342 | ||
1343 | EXIT; | |
1344 | out: | |
1345 | unlock_res(res); | |
1346 | LDLM_RESOURCE_DELREF(res); | |
1347 | ldlm_resource_putref(res); | |
1348 | ||
1349 | if (lock) { | |
1350 | ldlm_lock2handle(lock, lockh); | |
1351 | if ((flags & LDLM_FL_LVB_READY) && | |
1352 | (!(lock->l_flags & LDLM_FL_LVB_READY))) { | |
f2145eae BK |
1353 | __u64 wait_flags = LDLM_FL_LVB_READY | |
1354 | LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED; | |
d7e09d03 PT |
1355 | struct l_wait_info lwi; |
1356 | if (lock->l_completion_ast) { | |
1357 | int err = lock->l_completion_ast(lock, | |
1358 | LDLM_FL_WAIT_NOREPROC, | |
1359 | NULL); | |
1360 | if (err) { | |
1361 | if (flags & LDLM_FL_TEST_LOCK) | |
1362 | LDLM_LOCK_RELEASE(lock); | |
1363 | else | |
1364 | ldlm_lock_decref_internal(lock, | |
1365 | mode); | |
1366 | rc = 0; | |
1367 | goto out2; | |
1368 | } | |
1369 | } | |
1370 | ||
1371 | lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout), | |
1372 | NULL, LWI_ON_SIGNAL_NOOP, NULL); | |
1373 | ||
1374 | /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */ | |
1375 | l_wait_event(lock->l_waitq, | |
f2145eae | 1376 | lock->l_flags & wait_flags, |
d7e09d03 PT |
1377 | &lwi); |
1378 | if (!(lock->l_flags & LDLM_FL_LVB_READY)) { | |
1379 | if (flags & LDLM_FL_TEST_LOCK) | |
1380 | LDLM_LOCK_RELEASE(lock); | |
1381 | else | |
1382 | ldlm_lock_decref_internal(lock, mode); | |
1383 | rc = 0; | |
1384 | } | |
1385 | } | |
1386 | } | |
1387 | out2: | |
1388 | if (rc) { | |
1389 | LDLM_DEBUG(lock, "matched ("LPU64" "LPU64")", | |
1390 | (type == LDLM_PLAIN || type == LDLM_IBITS) ? | |
1391 | res_id->name[2] : policy->l_extent.start, | |
1392 | (type == LDLM_PLAIN || type == LDLM_IBITS) ? | |
1393 | res_id->name[3] : policy->l_extent.end); | |
1394 | ||
1395 | /* check user's security context */ | |
1396 | if (lock->l_conn_export && | |
1397 | sptlrpc_import_check_ctx( | |
1398 | class_exp2cliimp(lock->l_conn_export))) { | |
1399 | if (!(flags & LDLM_FL_TEST_LOCK)) | |
1400 | ldlm_lock_decref_internal(lock, mode); | |
1401 | rc = 0; | |
1402 | } | |
1403 | ||
1404 | if (flags & LDLM_FL_TEST_LOCK) | |
1405 | LDLM_LOCK_RELEASE(lock); | |
1406 | ||
1407 | } else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/ | |
1408 | LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res " | |
1409 | LPU64"/"LPU64" ("LPU64" "LPU64")", ns, | |
1410 | type, mode, res_id->name[0], res_id->name[1], | |
1411 | (type == LDLM_PLAIN || type == LDLM_IBITS) ? | |
1412 | res_id->name[2] :policy->l_extent.start, | |
1413 | (type == LDLM_PLAIN || type == LDLM_IBITS) ? | |
1414 | res_id->name[3] : policy->l_extent.end); | |
1415 | } | |
1416 | if (old_lock) | |
1417 | LDLM_LOCK_PUT(old_lock); | |
1418 | ||
1419 | return rc ? mode : 0; | |
1420 | } | |
1421 | EXPORT_SYMBOL(ldlm_lock_match); | |
1422 | ||
1423 | ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh, | |
1424 | __u64 *bits) | |
1425 | { | |
1426 | struct ldlm_lock *lock; | |
1427 | ldlm_mode_t mode = 0; | |
1428 | ENTRY; | |
1429 | ||
1430 | lock = ldlm_handle2lock(lockh); | |
1431 | if (lock != NULL) { | |
1432 | lock_res_and_lock(lock); | |
f2145eae | 1433 | if (lock->l_flags & LDLM_FL_GONE_MASK) |
d7e09d03 PT |
1434 | GOTO(out, mode); |
1435 | ||
1436 | if (lock->l_flags & LDLM_FL_CBPENDING && | |
1437 | lock->l_readers == 0 && lock->l_writers == 0) | |
1438 | GOTO(out, mode); | |
1439 | ||
1440 | if (bits) | |
1441 | *bits = lock->l_policy_data.l_inodebits.bits; | |
1442 | mode = lock->l_granted_mode; | |
1443 | ldlm_lock_addref_internal_nolock(lock, mode); | |
1444 | } | |
1445 | ||
1446 | EXIT; | |
1447 | ||
1448 | out: | |
1449 | if (lock != NULL) { | |
1450 | unlock_res_and_lock(lock); | |
1451 | LDLM_LOCK_PUT(lock); | |
1452 | } | |
1453 | return mode; | |
1454 | } | |
1455 | EXPORT_SYMBOL(ldlm_revalidate_lock_handle); | |
1456 | ||
1457 | /** The caller must guarantee that the buffer is large enough. */ | |
1458 | int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, | |
1459 | enum req_location loc, void *data, int size) | |
1460 | { | |
1461 | void *lvb; | |
1462 | ENTRY; | |
1463 | ||
1464 | LASSERT(data != NULL); | |
1465 | LASSERT(size >= 0); | |
1466 | ||
1467 | switch (lock->l_lvb_type) { | |
1468 | case LVB_T_OST: | |
1469 | if (size == sizeof(struct ost_lvb)) { | |
1470 | if (loc == RCL_CLIENT) | |
1471 | lvb = req_capsule_client_swab_get(pill, | |
1472 | &RMF_DLM_LVB, | |
1473 | lustre_swab_ost_lvb); | |
1474 | else | |
1475 | lvb = req_capsule_server_swab_get(pill, | |
1476 | &RMF_DLM_LVB, | |
1477 | lustre_swab_ost_lvb); | |
1478 | if (unlikely(lvb == NULL)) { | |
1479 | LDLM_ERROR(lock, "no LVB"); | |
1480 | RETURN(-EPROTO); | |
1481 | } | |
1482 | ||
1483 | memcpy(data, lvb, size); | |
1484 | } else if (size == sizeof(struct ost_lvb_v1)) { | |
1485 | struct ost_lvb *olvb = data; | |
1486 | ||
1487 | if (loc == RCL_CLIENT) | |
1488 | lvb = req_capsule_client_swab_get(pill, | |
1489 | &RMF_DLM_LVB, | |
1490 | lustre_swab_ost_lvb_v1); | |
1491 | else | |
1492 | lvb = req_capsule_server_sized_swab_get(pill, | |
1493 | &RMF_DLM_LVB, size, | |
1494 | lustre_swab_ost_lvb_v1); | |
1495 | if (unlikely(lvb == NULL)) { | |
1496 | LDLM_ERROR(lock, "no LVB"); | |
1497 | RETURN(-EPROTO); | |
1498 | } | |
1499 | ||
1500 | memcpy(data, lvb, size); | |
1501 | olvb->lvb_mtime_ns = 0; | |
1502 | olvb->lvb_atime_ns = 0; | |
1503 | olvb->lvb_ctime_ns = 0; | |
1504 | } else { | |
1505 | LDLM_ERROR(lock, "Replied unexpected ost LVB size %d", | |
1506 | size); | |
1507 | RETURN(-EINVAL); | |
1508 | } | |
1509 | break; | |
1510 | case LVB_T_LQUOTA: | |
1511 | if (size == sizeof(struct lquota_lvb)) { | |
1512 | if (loc == RCL_CLIENT) | |
1513 | lvb = req_capsule_client_swab_get(pill, | |
1514 | &RMF_DLM_LVB, | |
1515 | lustre_swab_lquota_lvb); | |
1516 | else | |
1517 | lvb = req_capsule_server_swab_get(pill, | |
1518 | &RMF_DLM_LVB, | |
1519 | lustre_swab_lquota_lvb); | |
1520 | if (unlikely(lvb == NULL)) { | |
1521 | LDLM_ERROR(lock, "no LVB"); | |
1522 | RETURN(-EPROTO); | |
1523 | } | |
1524 | ||
1525 | memcpy(data, lvb, size); | |
1526 | } else { | |
1527 | LDLM_ERROR(lock, "Replied unexpected lquota LVB size %d", | |
1528 | size); | |
1529 | RETURN(-EINVAL); | |
1530 | } | |
1531 | break; | |
1532 | case LVB_T_LAYOUT: | |
1533 | if (size == 0) | |
1534 | break; | |
1535 | ||
1536 | if (loc == RCL_CLIENT) | |
1537 | lvb = req_capsule_client_get(pill, &RMF_DLM_LVB); | |
1538 | else | |
1539 | lvb = req_capsule_server_get(pill, &RMF_DLM_LVB); | |
1540 | if (unlikely(lvb == NULL)) { | |
1541 | LDLM_ERROR(lock, "no LVB"); | |
1542 | RETURN(-EPROTO); | |
1543 | } | |
1544 | ||
1545 | memcpy(data, lvb, size); | |
1546 | break; | |
1547 | default: | |
1548 | LDLM_ERROR(lock, "Unknown LVB type: %d\n", lock->l_lvb_type); | |
5d4450c4 | 1549 | dump_stack(); |
d7e09d03 PT |
1550 | RETURN(-EINVAL); |
1551 | } | |
1552 | ||
1553 | RETURN(0); | |
1554 | } | |
1555 | ||
1556 | /** | |
1557 | * Create and fill in new LDLM lock with specified properties. | |
1558 | * Returns a referenced lock | |
1559 | */ | |
1560 | struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, | |
1561 | const struct ldlm_res_id *res_id, | |
1562 | ldlm_type_t type, | |
1563 | ldlm_mode_t mode, | |
1564 | const struct ldlm_callback_suite *cbs, | |
1565 | void *data, __u32 lvb_len, | |
1566 | enum lvb_type lvb_type) | |
1567 | { | |
1568 | struct ldlm_lock *lock; | |
1569 | struct ldlm_resource *res; | |
1570 | ENTRY; | |
1571 | ||
1572 | res = ldlm_resource_get(ns, NULL, res_id, type, 1); | |
1573 | if (res == NULL) | |
1574 | RETURN(NULL); | |
1575 | ||
1576 | lock = ldlm_lock_new(res); | |
1577 | ||
1578 | if (lock == NULL) | |
1579 | RETURN(NULL); | |
1580 | ||
1581 | lock->l_req_mode = mode; | |
1582 | lock->l_ast_data = data; | |
1583 | lock->l_pid = current_pid(); | |
f2145eae BK |
1584 | if (ns_is_server(ns)) |
1585 | lock->l_flags |= LDLM_FL_NS_SRV; | |
d7e09d03 PT |
1586 | if (cbs) { |
1587 | lock->l_blocking_ast = cbs->lcs_blocking; | |
1588 | lock->l_completion_ast = cbs->lcs_completion; | |
1589 | lock->l_glimpse_ast = cbs->lcs_glimpse; | |
d7e09d03 PT |
1590 | } |
1591 | ||
1592 | lock->l_tree_node = NULL; | |
1593 | /* if this is the extent lock, allocate the interval tree node */ | |
1594 | if (type == LDLM_EXTENT) { | |
1595 | if (ldlm_interval_alloc(lock) == NULL) | |
1596 | GOTO(out, 0); | |
1597 | } | |
1598 | ||
1599 | if (lvb_len) { | |
1600 | lock->l_lvb_len = lvb_len; | |
1601 | OBD_ALLOC(lock->l_lvb_data, lvb_len); | |
1602 | if (lock->l_lvb_data == NULL) | |
1603 | GOTO(out, 0); | |
1604 | } | |
1605 | ||
1606 | lock->l_lvb_type = lvb_type; | |
1607 | if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK)) | |
1608 | GOTO(out, 0); | |
1609 | ||
1610 | RETURN(lock); | |
1611 | ||
1612 | out: | |
1613 | ldlm_lock_destroy(lock); | |
1614 | LDLM_LOCK_RELEASE(lock); | |
1615 | return NULL; | |
1616 | } | |
1617 | ||
1618 | /** | |
1619 | * Enqueue (request) a lock. | |
1620 | * | |
1621 | * Does not block. As a result of enqueue the lock would be put | |
1622 | * into granted or waiting list. | |
1623 | * | |
1624 | * If namespace has intent policy sent and the lock has LDLM_FL_HAS_INTENT flag | |
1625 | * set, skip all the enqueueing and delegate lock processing to intent policy | |
1626 | * function. | |
1627 | */ | |
1628 | ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, | |
1629 | struct ldlm_lock **lockp, | |
1630 | void *cookie, __u64 *flags) | |
1631 | { | |
1632 | struct ldlm_lock *lock = *lockp; | |
1633 | struct ldlm_resource *res = lock->l_resource; | |
1634 | int local = ns_is_client(ldlm_res_to_ns(res)); | |
1635 | ldlm_error_t rc = ELDLM_OK; | |
1636 | struct ldlm_interval *node = NULL; | |
1637 | ENTRY; | |
1638 | ||
1639 | lock->l_last_activity = cfs_time_current_sec(); | |
1640 | /* policies are not executed on the client or during replay */ | |
1641 | if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT | |
1642 | && !local && ns->ns_policy) { | |
1643 | rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags, | |
1644 | NULL); | |
1645 | if (rc == ELDLM_LOCK_REPLACED) { | |
1646 | /* The lock that was returned has already been granted, | |
1647 | * and placed into lockp. If it's not the same as the | |
1648 | * one we passed in, then destroy the old one and our | |
1649 | * work here is done. */ | |
1650 | if (lock != *lockp) { | |
1651 | ldlm_lock_destroy(lock); | |
1652 | LDLM_LOCK_RELEASE(lock); | |
1653 | } | |
1654 | *flags |= LDLM_FL_LOCK_CHANGED; | |
1655 | RETURN(0); | |
1656 | } else if (rc != ELDLM_OK || | |
1657 | (rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) { | |
1658 | ldlm_lock_destroy(lock); | |
1659 | RETURN(rc); | |
1660 | } | |
1661 | } | |
1662 | ||
1663 | /* For a replaying lock, it might be already in granted list. So | |
1664 | * unlinking the lock will cause the interval node to be freed, we | |
1665 | * have to allocate the interval node early otherwise we can't regrant | |
1666 | * this lock in the future. - jay */ | |
1667 | if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT) | |
1668 | OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO); | |
1669 | ||
1670 | lock_res_and_lock(lock); | |
1671 | if (local && lock->l_req_mode == lock->l_granted_mode) { | |
1672 | /* The server returned a blocked lock, but it was granted | |
1673 | * before we got a chance to actually enqueue it. We don't | |
1674 | * need to do anything else. */ | |
1675 | *flags &= ~(LDLM_FL_BLOCK_GRANTED | | |
1676 | LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT); | |
1677 | GOTO(out, ELDLM_OK); | |
1678 | } | |
1679 | ||
1680 | ldlm_resource_unlink_lock(lock); | |
1681 | if (res->lr_type == LDLM_EXTENT && lock->l_tree_node == NULL) { | |
1682 | if (node == NULL) { | |
1683 | ldlm_lock_destroy_nolock(lock); | |
1684 | GOTO(out, rc = -ENOMEM); | |
1685 | } | |
1686 | ||
1687 | INIT_LIST_HEAD(&node->li_group); | |
1688 | ldlm_interval_attach(node, lock); | |
1689 | node = NULL; | |
1690 | } | |
1691 | ||
1692 | /* Some flags from the enqueue want to make it into the AST, via the | |
1693 | * lock's l_flags. */ | |
f2145eae | 1694 | lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA; |
d7e09d03 PT |
1695 | |
1696 | /* This distinction between local lock trees is very important; a client | |
1697 | * namespace only has information about locks taken by that client, and | |
1698 | * thus doesn't have enough information to decide for itself if it can | |
1699 | * be granted (below). In this case, we do exactly what the server | |
1700 | * tells us to do, as dictated by the 'flags'. | |
1701 | * | |
1702 | * We do exactly the same thing during recovery, when the server is | |
1703 | * more or less trusting the clients not to lie. | |
1704 | * | |
1705 | * FIXME (bug 268): Detect obvious lies by checking compatibility in | |
1706 | * granted/converting queues. */ | |
1707 | if (local) { | |
1708 | if (*flags & LDLM_FL_BLOCK_CONV) | |
1709 | ldlm_resource_add_lock(res, &res->lr_converting, lock); | |
1710 | else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED)) | |
1711 | ldlm_resource_add_lock(res, &res->lr_waiting, lock); | |
1712 | else | |
1713 | ldlm_grant_lock(lock, NULL); | |
1714 | GOTO(out, ELDLM_OK); | |
1715 | } else { | |
1716 | CERROR("This is client-side-only module, cannot handle " | |
1717 | "LDLM_NAMESPACE_SERVER resource type lock.\n"); | |
1718 | LBUG(); | |
1719 | } | |
1720 | ||
1721 | out: | |
1722 | unlock_res_and_lock(lock); | |
1723 | if (node) | |
1724 | OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node)); | |
1725 | return rc; | |
1726 | } | |
1727 | ||
1728 | ||
1729 | /** | |
1730 | * Process a call to blocking AST callback for a lock in ast_work list | |
1731 | */ | |
1732 | static int | |
1733 | ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) | |
1734 | { | |
1735 | struct ldlm_cb_set_arg *arg = opaq; | |
1736 | struct ldlm_lock_desc d; | |
1737 | int rc; | |
1738 | struct ldlm_lock *lock; | |
1739 | ENTRY; | |
1740 | ||
1741 | if (list_empty(arg->list)) | |
1742 | RETURN(-ENOENT); | |
1743 | ||
1744 | lock = list_entry(arg->list->next, struct ldlm_lock, l_bl_ast); | |
1745 | ||
1746 | /* nobody should touch l_bl_ast */ | |
1747 | lock_res_and_lock(lock); | |
1748 | list_del_init(&lock->l_bl_ast); | |
1749 | ||
1750 | LASSERT(lock->l_flags & LDLM_FL_AST_SENT); | |
1751 | LASSERT(lock->l_bl_ast_run == 0); | |
1752 | LASSERT(lock->l_blocking_lock); | |
1753 | lock->l_bl_ast_run++; | |
1754 | unlock_res_and_lock(lock); | |
1755 | ||
1756 | ldlm_lock2desc(lock->l_blocking_lock, &d); | |
1757 | ||
1758 | rc = lock->l_blocking_ast(lock, &d, (void *)arg, LDLM_CB_BLOCKING); | |
1759 | LDLM_LOCK_RELEASE(lock->l_blocking_lock); | |
1760 | lock->l_blocking_lock = NULL; | |
1761 | LDLM_LOCK_RELEASE(lock); | |
1762 | ||
1763 | RETURN(rc); | |
1764 | } | |
1765 | ||
1766 | /** | |
1767 | * Process a call to completion AST callback for a lock in ast_work list | |
1768 | */ | |
1769 | static int | |
1770 | ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) | |
1771 | { | |
1772 | struct ldlm_cb_set_arg *arg = opaq; | |
1773 | int rc = 0; | |
1774 | struct ldlm_lock *lock; | |
1775 | ldlm_completion_callback completion_callback; | |
1776 | ENTRY; | |
1777 | ||
1778 | if (list_empty(arg->list)) | |
1779 | RETURN(-ENOENT); | |
1780 | ||
1781 | lock = list_entry(arg->list->next, struct ldlm_lock, l_cp_ast); | |
1782 | ||
1783 | /* It's possible to receive a completion AST before we've set | |
1784 | * the l_completion_ast pointer: either because the AST arrived | |
1785 | * before the reply, or simply because there's a small race | |
1786 | * window between receiving the reply and finishing the local | |
1787 | * enqueue. (bug 842) | |
1788 | * | |
1789 | * This can't happen with the blocking_ast, however, because we | |
1790 | * will never call the local blocking_ast until we drop our | |
1791 | * reader/writer reference, which we won't do until we get the | |
1792 | * reply and finish enqueueing. */ | |
1793 | ||
1794 | /* nobody should touch l_cp_ast */ | |
1795 | lock_res_and_lock(lock); | |
1796 | list_del_init(&lock->l_cp_ast); | |
1797 | LASSERT(lock->l_flags & LDLM_FL_CP_REQD); | |
1798 | /* save l_completion_ast since it can be changed by | |
1799 | * mds_intent_policy(), see bug 14225 */ | |
1800 | completion_callback = lock->l_completion_ast; | |
1801 | lock->l_flags &= ~LDLM_FL_CP_REQD; | |
1802 | unlock_res_and_lock(lock); | |
1803 | ||
1804 | if (completion_callback != NULL) | |
1805 | rc = completion_callback(lock, 0, (void *)arg); | |
1806 | LDLM_LOCK_RELEASE(lock); | |
1807 | ||
1808 | RETURN(rc); | |
1809 | } | |
1810 | ||
1811 | /** | |
1812 | * Process a call to revocation AST callback for a lock in ast_work list | |
1813 | */ | |
1814 | static int | |
1815 | ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) | |
1816 | { | |
1817 | struct ldlm_cb_set_arg *arg = opaq; | |
1818 | struct ldlm_lock_desc desc; | |
1819 | int rc; | |
1820 | struct ldlm_lock *lock; | |
1821 | ENTRY; | |
1822 | ||
1823 | if (list_empty(arg->list)) | |
1824 | RETURN(-ENOENT); | |
1825 | ||
1826 | lock = list_entry(arg->list->next, struct ldlm_lock, l_rk_ast); | |
1827 | list_del_init(&lock->l_rk_ast); | |
1828 | ||
1829 | /* the desc just pretend to exclusive */ | |
1830 | ldlm_lock2desc(lock, &desc); | |
1831 | desc.l_req_mode = LCK_EX; | |
1832 | desc.l_granted_mode = 0; | |
1833 | ||
1834 | rc = lock->l_blocking_ast(lock, &desc, (void*)arg, LDLM_CB_BLOCKING); | |
1835 | LDLM_LOCK_RELEASE(lock); | |
1836 | ||
1837 | RETURN(rc); | |
1838 | } | |
1839 | ||
1840 | /** | |
1841 | * Process a call to glimpse AST callback for a lock in ast_work list | |
1842 | */ | |
1843 | int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) | |
1844 | { | |
1845 | struct ldlm_cb_set_arg *arg = opaq; | |
1846 | struct ldlm_glimpse_work *gl_work; | |
1847 | struct ldlm_lock *lock; | |
1848 | int rc = 0; | |
1849 | ENTRY; | |
1850 | ||
1851 | if (list_empty(arg->list)) | |
1852 | RETURN(-ENOENT); | |
1853 | ||
1854 | gl_work = list_entry(arg->list->next, struct ldlm_glimpse_work, | |
1855 | gl_list); | |
1856 | list_del_init(&gl_work->gl_list); | |
1857 | ||
1858 | lock = gl_work->gl_lock; | |
1859 | ||
1860 | /* transfer the glimpse descriptor to ldlm_cb_set_arg */ | |
1861 | arg->gl_desc = gl_work->gl_desc; | |
1862 | ||
1863 | /* invoke the actual glimpse callback */ | |
1864 | if (lock->l_glimpse_ast(lock, (void*)arg) == 0) | |
1865 | rc = 1; | |
1866 | ||
1867 | LDLM_LOCK_RELEASE(lock); | |
1868 | ||
1869 | if ((gl_work->gl_flags & LDLM_GL_WORK_NOFREE) == 0) | |
1870 | OBD_FREE_PTR(gl_work); | |
1871 | ||
1872 | RETURN(rc); | |
1873 | } | |
1874 | ||
1875 | /** | |
1876 | * Process list of locks in need of ASTs being sent. | |
1877 | * | |
1878 | * Used on server to send multiple ASTs together instead of sending one by | |
1879 | * one. | |
1880 | */ | |
1881 | int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list, | |
1882 | ldlm_desc_ast_t ast_type) | |
1883 | { | |
1884 | struct ldlm_cb_set_arg *arg; | |
1885 | set_producer_func work_ast_lock; | |
1886 | int rc; | |
1887 | ||
1888 | if (list_empty(rpc_list)) | |
1889 | RETURN(0); | |
1890 | ||
1891 | OBD_ALLOC_PTR(arg); | |
1892 | if (arg == NULL) | |
1893 | RETURN(-ENOMEM); | |
1894 | ||
1895 | atomic_set(&arg->restart, 0); | |
1896 | arg->list = rpc_list; | |
1897 | ||
1898 | switch (ast_type) { | |
1899 | case LDLM_WORK_BL_AST: | |
1900 | arg->type = LDLM_BL_CALLBACK; | |
1901 | work_ast_lock = ldlm_work_bl_ast_lock; | |
1902 | break; | |
1903 | case LDLM_WORK_CP_AST: | |
1904 | arg->type = LDLM_CP_CALLBACK; | |
1905 | work_ast_lock = ldlm_work_cp_ast_lock; | |
1906 | break; | |
1907 | case LDLM_WORK_REVOKE_AST: | |
1908 | arg->type = LDLM_BL_CALLBACK; | |
1909 | work_ast_lock = ldlm_work_revoke_ast_lock; | |
1910 | break; | |
1911 | case LDLM_WORK_GL_AST: | |
1912 | arg->type = LDLM_GL_CALLBACK; | |
1913 | work_ast_lock = ldlm_work_gl_ast_lock; | |
1914 | break; | |
1915 | default: | |
1916 | LBUG(); | |
1917 | } | |
1918 | ||
1919 | /* We create a ptlrpc request set with flow control extension. | |
1920 | * This request set will use the work_ast_lock function to produce new | |
1921 | * requests and will send a new request each time one completes in order | |
1922 | * to keep the number of requests in flight to ns_max_parallel_ast */ | |
1923 | arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX, | |
1924 | work_ast_lock, arg); | |
1925 | if (arg->set == NULL) | |
1926 | GOTO(out, rc = -ENOMEM); | |
1927 | ||
1928 | ptlrpc_set_wait(arg->set); | |
1929 | ptlrpc_set_destroy(arg->set); | |
1930 | ||
1931 | rc = atomic_read(&arg->restart) ? -ERESTART : 0; | |
1932 | GOTO(out, rc); | |
1933 | out: | |
1934 | OBD_FREE_PTR(arg); | |
1935 | return rc; | |
1936 | } | |
1937 | ||
1938 | static int reprocess_one_queue(struct ldlm_resource *res, void *closure) | |
1939 | { | |
1940 | ldlm_reprocess_all(res); | |
1941 | return LDLM_ITER_CONTINUE; | |
1942 | } | |
1943 | ||
1944 | static int ldlm_reprocess_res(cfs_hash_t *hs, cfs_hash_bd_t *bd, | |
1945 | struct hlist_node *hnode, void *arg) | |
1946 | { | |
1947 | struct ldlm_resource *res = cfs_hash_object(hs, hnode); | |
1948 | int rc; | |
1949 | ||
1950 | rc = reprocess_one_queue(res, arg); | |
1951 | ||
1952 | return rc == LDLM_ITER_STOP; | |
1953 | } | |
1954 | ||
1955 | /** | |
1956 | * Iterate through all resources on a namespace attempting to grant waiting | |
1957 | * locks. | |
1958 | */ | |
1959 | void ldlm_reprocess_all_ns(struct ldlm_namespace *ns) | |
1960 | { | |
1961 | ENTRY; | |
1962 | ||
1963 | if (ns != NULL) { | |
1964 | cfs_hash_for_each_nolock(ns->ns_rs_hash, | |
1965 | ldlm_reprocess_res, NULL); | |
1966 | } | |
1967 | EXIT; | |
1968 | } | |
1969 | EXPORT_SYMBOL(ldlm_reprocess_all_ns); | |
1970 | ||
1971 | /** | |
1972 | * Try to grant all waiting locks on a resource. | |
1973 | * | |
1974 | * Calls ldlm_reprocess_queue on converting and waiting queues. | |
1975 | * | |
1976 | * Typically called after some resource locks are cancelled to see | |
1977 | * if anything could be granted as a result of the cancellation. | |
1978 | */ | |
1979 | void ldlm_reprocess_all(struct ldlm_resource *res) | |
1980 | { | |
1981 | LIST_HEAD(rpc_list); | |
1982 | ||
1983 | ENTRY; | |
1984 | if (!ns_is_client(ldlm_res_to_ns(res))) { | |
1985 | CERROR("This is client-side-only module, cannot handle " | |
1986 | "LDLM_NAMESPACE_SERVER resource type lock.\n"); | |
1987 | LBUG(); | |
1988 | } | |
1989 | EXIT; | |
1990 | } | |
1991 | ||
1992 | /** | |
1993 | * Helper function to call blocking AST for LDLM lock \a lock in a | |
1994 | * "cancelling" mode. | |
1995 | */ | |
1996 | void ldlm_cancel_callback(struct ldlm_lock *lock) | |
1997 | { | |
1998 | check_res_locked(lock->l_resource); | |
1999 | if (!(lock->l_flags & LDLM_FL_CANCEL)) { | |
2000 | lock->l_flags |= LDLM_FL_CANCEL; | |
2001 | if (lock->l_blocking_ast) { | |
2002 | unlock_res_and_lock(lock); | |
2003 | lock->l_blocking_ast(lock, NULL, lock->l_ast_data, | |
2004 | LDLM_CB_CANCELING); | |
2005 | lock_res_and_lock(lock); | |
2006 | } else { | |
2007 | LDLM_DEBUG(lock, "no blocking ast"); | |
2008 | } | |
2009 | } | |
2010 | lock->l_flags |= LDLM_FL_BL_DONE; | |
2011 | } | |
2012 | ||
2013 | /** | |
2014 | * Remove skiplist-enabled LDLM lock \a req from granted list | |
2015 | */ | |
2016 | void ldlm_unlink_lock_skiplist(struct ldlm_lock *req) | |
2017 | { | |
2018 | if (req->l_resource->lr_type != LDLM_PLAIN && | |
2019 | req->l_resource->lr_type != LDLM_IBITS) | |
2020 | return; | |
2021 | ||
2022 | list_del_init(&req->l_sl_policy); | |
2023 | list_del_init(&req->l_sl_mode); | |
2024 | } | |
2025 | ||
2026 | /** | |
2027 | * Attempts to cancel LDLM lock \a lock that has no reader/writer references. | |
2028 | */ | |
2029 | void ldlm_lock_cancel(struct ldlm_lock *lock) | |
2030 | { | |
2031 | struct ldlm_resource *res; | |
2032 | struct ldlm_namespace *ns; | |
2033 | ENTRY; | |
2034 | ||
2035 | lock_res_and_lock(lock); | |
2036 | ||
2037 | res = lock->l_resource; | |
2038 | ns = ldlm_res_to_ns(res); | |
2039 | ||
2040 | /* Please do not, no matter how tempting, remove this LBUG without | |
2041 | * talking to me first. -phik */ | |
2042 | if (lock->l_readers || lock->l_writers) { | |
2043 | LDLM_ERROR(lock, "lock still has references"); | |
2044 | LBUG(); | |
2045 | } | |
2046 | ||
f2145eae | 2047 | if (lock->l_flags & LDLM_FL_WAITED) |
d7e09d03 PT |
2048 | ldlm_del_waiting_lock(lock); |
2049 | ||
2050 | /* Releases cancel callback. */ | |
2051 | ldlm_cancel_callback(lock); | |
2052 | ||
2053 | /* Yes, second time, just in case it was added again while we were | |
f2145eae BK |
2054 | * running with no res lock in ldlm_cancel_callback */ |
2055 | if (lock->l_flags & LDLM_FL_WAITED) | |
d7e09d03 PT |
2056 | ldlm_del_waiting_lock(lock); |
2057 | ||
2058 | ldlm_resource_unlink_lock(lock); | |
2059 | ldlm_lock_destroy_nolock(lock); | |
2060 | ||
2061 | if (lock->l_granted_mode == lock->l_req_mode) | |
2062 | ldlm_pool_del(&ns->ns_pool, lock); | |
2063 | ||
2064 | /* Make sure we will not be called again for same lock what is possible | |
2065 | * if not to zero out lock->l_granted_mode */ | |
2066 | lock->l_granted_mode = LCK_MINMODE; | |
2067 | unlock_res_and_lock(lock); | |
2068 | ||
2069 | EXIT; | |
2070 | } | |
2071 | EXPORT_SYMBOL(ldlm_lock_cancel); | |
2072 | ||
2073 | /** | |
2074 | * Set opaque data into the lock that only makes sense to upper layer. | |
2075 | */ | |
2076 | int ldlm_lock_set_data(struct lustre_handle *lockh, void *data) | |
2077 | { | |
2078 | struct ldlm_lock *lock = ldlm_handle2lock(lockh); | |
2079 | int rc = -EINVAL; | |
2080 | ENTRY; | |
2081 | ||
2082 | if (lock) { | |
2083 | if (lock->l_ast_data == NULL) | |
2084 | lock->l_ast_data = data; | |
2085 | if (lock->l_ast_data == data) | |
2086 | rc = 0; | |
2087 | LDLM_LOCK_PUT(lock); | |
2088 | } | |
2089 | RETURN(rc); | |
2090 | } | |
2091 | EXPORT_SYMBOL(ldlm_lock_set_data); | |
2092 | ||
2093 | struct export_cl_data { | |
2094 | struct obd_export *ecl_exp; | |
2095 | int ecl_loop; | |
2096 | }; | |
2097 | ||
2098 | /** | |
2099 | * Iterator function for ldlm_cancel_locks_for_export. | |
2100 | * Cancels passed locks. | |
2101 | */ | |
2102 | int ldlm_cancel_locks_for_export_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd, | |
2103 | struct hlist_node *hnode, void *data) | |
2104 | ||
2105 | { | |
2106 | struct export_cl_data *ecl = (struct export_cl_data *)data; | |
2107 | struct obd_export *exp = ecl->ecl_exp; | |
2108 | struct ldlm_lock *lock = cfs_hash_object(hs, hnode); | |
2109 | struct ldlm_resource *res; | |
2110 | ||
2111 | res = ldlm_resource_getref(lock->l_resource); | |
2112 | LDLM_LOCK_GET(lock); | |
2113 | ||
2114 | LDLM_DEBUG(lock, "export %p", exp); | |
2115 | ldlm_res_lvbo_update(res, NULL, 1); | |
2116 | ldlm_lock_cancel(lock); | |
2117 | ldlm_reprocess_all(res); | |
2118 | ldlm_resource_putref(res); | |
2119 | LDLM_LOCK_RELEASE(lock); | |
2120 | ||
2121 | ecl->ecl_loop++; | |
2122 | if ((ecl->ecl_loop & -ecl->ecl_loop) == ecl->ecl_loop) { | |
2123 | CDEBUG(D_INFO, | |
2124 | "Cancel lock %p for export %p (loop %d), still have " | |
2125 | "%d locks left on hash table.\n", | |
2126 | lock, exp, ecl->ecl_loop, | |
2127 | atomic_read(&hs->hs_count)); | |
2128 | } | |
2129 | ||
2130 | return 0; | |
2131 | } | |
2132 | ||
2133 | /** | |
2134 | * Cancel all locks for given export. | |
2135 | * | |
2136 | * Typically called on client disconnection/eviction | |
2137 | */ | |
2138 | void ldlm_cancel_locks_for_export(struct obd_export *exp) | |
2139 | { | |
2140 | struct export_cl_data ecl = { | |
2141 | .ecl_exp = exp, | |
2142 | .ecl_loop = 0, | |
2143 | }; | |
2144 | ||
2145 | cfs_hash_for_each_empty(exp->exp_lock_hash, | |
2146 | ldlm_cancel_locks_for_export_cb, &ecl); | |
2147 | } | |
2148 | ||
2149 | /** | |
2150 | * Downgrade an exclusive lock. | |
2151 | * | |
2152 | * A fast variant of ldlm_lock_convert for convertion of exclusive | |
2153 | * locks. The convertion is always successful. | |
2154 | * Used by Commit on Sharing (COS) code. | |
2155 | * | |
2156 | * \param lock A lock to convert | |
2157 | * \param new_mode new lock mode | |
2158 | */ | |
2159 | void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode) | |
2160 | { | |
2161 | ENTRY; | |
2162 | ||
2163 | LASSERT(lock->l_granted_mode & (LCK_PW | LCK_EX)); | |
2164 | LASSERT(new_mode == LCK_COS); | |
2165 | ||
2166 | lock_res_and_lock(lock); | |
2167 | ldlm_resource_unlink_lock(lock); | |
2168 | /* | |
2169 | * Remove the lock from pool as it will be added again in | |
2170 | * ldlm_grant_lock() called below. | |
2171 | */ | |
2172 | ldlm_pool_del(&ldlm_lock_to_ns(lock)->ns_pool, lock); | |
2173 | ||
2174 | lock->l_req_mode = new_mode; | |
2175 | ldlm_grant_lock(lock, NULL); | |
2176 | unlock_res_and_lock(lock); | |
2177 | ldlm_reprocess_all(lock->l_resource); | |
2178 | ||
2179 | EXIT; | |
2180 | } | |
2181 | EXPORT_SYMBOL(ldlm_lock_downgrade); | |
2182 | ||
2183 | /** | |
2184 | * Attempt to convert already granted lock to a different mode. | |
2185 | * | |
2186 | * While lock conversion is not currently used, future client-side | |
2187 | * optimizations could take advantage of it to avoid discarding cached | |
2188 | * pages on a file. | |
2189 | */ | |
2190 | struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, | |
2191 | __u32 *flags) | |
2192 | { | |
2193 | LIST_HEAD(rpc_list); | |
2194 | struct ldlm_resource *res; | |
2195 | struct ldlm_namespace *ns; | |
2196 | int granted = 0; | |
2197 | struct ldlm_interval *node; | |
2198 | ENTRY; | |
2199 | ||
2200 | /* Just return if mode is unchanged. */ | |
2201 | if (new_mode == lock->l_granted_mode) { | |
2202 | *flags |= LDLM_FL_BLOCK_GRANTED; | |
2203 | RETURN(lock->l_resource); | |
2204 | } | |
2205 | ||
2206 | /* I can't check the type of lock here because the bitlock of lock | |
2207 | * is not held here, so do the allocation blindly. -jay */ | |
2208 | OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO); | |
2d58de78 LW |
2209 | if (node == NULL) |
2210 | /* Actually, this causes EDEADLOCK to be returned */ | |
d7e09d03 PT |
2211 | RETURN(NULL); |
2212 | ||
2213 | LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR), | |
2214 | "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode); | |
2215 | ||
2216 | lock_res_and_lock(lock); | |
2217 | ||
2218 | res = lock->l_resource; | |
2219 | ns = ldlm_res_to_ns(res); | |
2220 | ||
2221 | lock->l_req_mode = new_mode; | |
2222 | if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) { | |
2223 | ldlm_resource_unlink_lock(lock); | |
2224 | } else { | |
2225 | ldlm_resource_unlink_lock(lock); | |
2226 | if (res->lr_type == LDLM_EXTENT) { | |
2227 | /* FIXME: ugly code, I have to attach the lock to a | |
2228 | * interval node again since perhaps it will be granted | |
2229 | * soon */ | |
2230 | INIT_LIST_HEAD(&node->li_group); | |
2231 | ldlm_interval_attach(node, lock); | |
2232 | node = NULL; | |
2233 | } | |
2234 | } | |
2235 | ||
2236 | /* | |
2237 | * Remove old lock from the pool before adding the lock with new | |
2238 | * mode below in ->policy() | |
2239 | */ | |
2240 | ldlm_pool_del(&ns->ns_pool, lock); | |
2241 | ||
2242 | /* If this is a local resource, put it on the appropriate list. */ | |
2243 | if (ns_is_client(ldlm_res_to_ns(res))) { | |
2244 | if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) { | |
2245 | ldlm_resource_add_lock(res, &res->lr_converting, lock); | |
2246 | } else { | |
2247 | /* This should never happen, because of the way the | |
2248 | * server handles conversions. */ | |
2249 | LDLM_ERROR(lock, "Erroneous flags %x on local lock\n", | |
2250 | *flags); | |
2251 | LBUG(); | |
2252 | ||
2253 | ldlm_grant_lock(lock, &rpc_list); | |
2254 | granted = 1; | |
2255 | /* FIXME: completion handling not with lr_lock held ! */ | |
2256 | if (lock->l_completion_ast) | |
2257 | lock->l_completion_ast(lock, 0, NULL); | |
2258 | } | |
2259 | } else { | |
2260 | CERROR("This is client-side-only module, cannot handle " | |
2261 | "LDLM_NAMESPACE_SERVER resource type lock.\n"); | |
2262 | LBUG(); | |
2263 | } | |
2264 | unlock_res_and_lock(lock); | |
2265 | ||
2266 | if (granted) | |
2267 | ldlm_run_ast_work(ns, &rpc_list, LDLM_WORK_CP_AST); | |
2268 | if (node) | |
2269 | OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node)); | |
2270 | RETURN(res); | |
2271 | } | |
2272 | EXPORT_SYMBOL(ldlm_lock_convert); | |
2273 | ||
2274 | /** | |
2275 | * Print lock with lock handle \a lockh description into debug log. | |
2276 | * | |
2277 | * Used when printing all locks on a resource for debug purposes. | |
2278 | */ | |
2279 | void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh) | |
2280 | { | |
2281 | struct ldlm_lock *lock; | |
2282 | ||
2283 | if (!((libcfs_debug | D_ERROR) & level)) | |
2284 | return; | |
2285 | ||
2286 | lock = ldlm_handle2lock(lockh); | |
2287 | if (lock == NULL) | |
2288 | return; | |
2289 | ||
2290 | LDLM_DEBUG_LIMIT(level, lock, "###"); | |
2291 | ||
2292 | LDLM_LOCK_PUT(lock); | |
2293 | } | |
2294 | EXPORT_SYMBOL(ldlm_lock_dump_handle); | |
2295 | ||
2296 | /** | |
2297 | * Print lock information with custom message into debug log. | |
2298 | * Helper function. | |
2299 | */ | |
2300 | void _ldlm_lock_debug(struct ldlm_lock *lock, | |
2301 | struct libcfs_debug_msg_data *msgdata, | |
2302 | const char *fmt, ...) | |
2303 | { | |
2304 | va_list args; | |
2305 | struct obd_export *exp = lock->l_export; | |
2306 | struct ldlm_resource *resource = lock->l_resource; | |
2307 | char *nid = "local"; | |
2308 | ||
2309 | va_start(args, fmt); | |
2310 | ||
2311 | if (exp && exp->exp_connection) { | |
2312 | nid = libcfs_nid2str(exp->exp_connection->c_peer.nid); | |
2313 | } else if (exp && exp->exp_obd != NULL) { | |
2314 | struct obd_import *imp = exp->exp_obd->u.cli.cl_import; | |
2315 | nid = libcfs_nid2str(imp->imp_connection->c_peer.nid); | |
2316 | } | |
2317 | ||
2318 | if (resource == NULL) { | |
2319 | libcfs_debug_vmsg2(msgdata, fmt, args, | |
2320 | " ns: \?\? lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s " | |
2321 | "res: \?\? rrc=\?\? type: \?\?\? flags: "LPX64" nid: %s " | |
2322 | "remote: "LPX64" expref: %d pid: %u timeout: %lu " | |
2323 | "lvb_type: %d\n", | |
2324 | lock, | |
2325 | lock->l_handle.h_cookie, atomic_read(&lock->l_refc), | |
2326 | lock->l_readers, lock->l_writers, | |
2327 | ldlm_lockname[lock->l_granted_mode], | |
2328 | ldlm_lockname[lock->l_req_mode], | |
2329 | lock->l_flags, nid, lock->l_remote_handle.cookie, | |
2330 | exp ? atomic_read(&exp->exp_refcount) : -99, | |
2331 | lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type); | |
2332 | va_end(args); | |
2333 | return; | |
2334 | } | |
2335 | ||
2336 | switch (resource->lr_type) { | |
2337 | case LDLM_EXTENT: | |
2338 | libcfs_debug_vmsg2(msgdata, fmt, args, | |
ce74f92d AD |
2339 | " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s " |
2340 | "res: "DLDLMRES" rrc: %d type: %s ["LPU64"->"LPU64"] " | |
2341 | "(req "LPU64"->"LPU64") flags: "LPX64" nid: %s remote: " | |
2342 | LPX64" expref: %d pid: %u timeout: %lu lvb_type: %d\n", | |
2343 | ldlm_lock_to_ns_name(lock), lock, | |
2344 | lock->l_handle.h_cookie, atomic_read(&lock->l_refc), | |
2345 | lock->l_readers, lock->l_writers, | |
2346 | ldlm_lockname[lock->l_granted_mode], | |
2347 | ldlm_lockname[lock->l_req_mode], | |
2348 | PLDLMRES(resource), | |
2349 | atomic_read(&resource->lr_refcount), | |
2350 | ldlm_typename[resource->lr_type], | |
2351 | lock->l_policy_data.l_extent.start, | |
2352 | lock->l_policy_data.l_extent.end, | |
2353 | lock->l_req_extent.start, lock->l_req_extent.end, | |
2354 | lock->l_flags, nid, lock->l_remote_handle.cookie, | |
2355 | exp ? atomic_read(&exp->exp_refcount) : -99, | |
2356 | lock->l_pid, lock->l_callback_timeout, | |
2357 | lock->l_lvb_type); | |
d7e09d03 PT |
2358 | break; |
2359 | ||
2360 | case LDLM_FLOCK: | |
2361 | libcfs_debug_vmsg2(msgdata, fmt, args, | |
ce74f92d AD |
2362 | " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s " |
2363 | "res: "DLDLMRES" rrc: %d type: %s pid: %d " | |
2364 | "["LPU64"->"LPU64"] flags: "LPX64" nid: %s " | |
2365 | "remote: "LPX64" expref: %d pid: %u timeout: %lu\n", | |
2366 | ldlm_lock_to_ns_name(lock), lock, | |
2367 | lock->l_handle.h_cookie, atomic_read(&lock->l_refc), | |
2368 | lock->l_readers, lock->l_writers, | |
2369 | ldlm_lockname[lock->l_granted_mode], | |
2370 | ldlm_lockname[lock->l_req_mode], | |
2371 | PLDLMRES(resource), | |
2372 | atomic_read(&resource->lr_refcount), | |
2373 | ldlm_typename[resource->lr_type], | |
2374 | lock->l_policy_data.l_flock.pid, | |
2375 | lock->l_policy_data.l_flock.start, | |
2376 | lock->l_policy_data.l_flock.end, | |
2377 | lock->l_flags, nid, lock->l_remote_handle.cookie, | |
2378 | exp ? atomic_read(&exp->exp_refcount) : -99, | |
2379 | lock->l_pid, lock->l_callback_timeout); | |
d7e09d03 PT |
2380 | break; |
2381 | ||
2382 | case LDLM_IBITS: | |
2383 | libcfs_debug_vmsg2(msgdata, fmt, args, | |
ce74f92d AD |
2384 | " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s " |
2385 | "res: "DLDLMRES" bits "LPX64" rrc: %d type: %s " | |
2386 | "flags: "LPX64" nid: %s remote: "LPX64" expref: %d " | |
2387 | "pid: %u timeout: %lu lvb_type: %d\n", | |
2388 | ldlm_lock_to_ns_name(lock), | |
2389 | lock, lock->l_handle.h_cookie, | |
2390 | atomic_read(&lock->l_refc), | |
2391 | lock->l_readers, lock->l_writers, | |
2392 | ldlm_lockname[lock->l_granted_mode], | |
2393 | ldlm_lockname[lock->l_req_mode], | |
2394 | PLDLMRES(resource), | |
2395 | lock->l_policy_data.l_inodebits.bits, | |
2396 | atomic_read(&resource->lr_refcount), | |
2397 | ldlm_typename[resource->lr_type], | |
2398 | lock->l_flags, nid, lock->l_remote_handle.cookie, | |
2399 | exp ? atomic_read(&exp->exp_refcount) : -99, | |
2400 | lock->l_pid, lock->l_callback_timeout, | |
2401 | lock->l_lvb_type); | |
d7e09d03 PT |
2402 | break; |
2403 | ||
2404 | default: | |
2405 | libcfs_debug_vmsg2(msgdata, fmt, args, | |
ce74f92d AD |
2406 | " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s " |
2407 | "res: "DLDLMRES" rrc: %d type: %s flags: "LPX64" " | |
2408 | "nid: %s remote: "LPX64" expref: %d pid: %u " | |
2409 | "timeout: %lu lvb_type: %d\n", | |
2410 | ldlm_lock_to_ns_name(lock), | |
2411 | lock, lock->l_handle.h_cookie, | |
2412 | atomic_read(&lock->l_refc), | |
2413 | lock->l_readers, lock->l_writers, | |
2414 | ldlm_lockname[lock->l_granted_mode], | |
2415 | ldlm_lockname[lock->l_req_mode], | |
2416 | PLDLMRES(resource), | |
2417 | atomic_read(&resource->lr_refcount), | |
2418 | ldlm_typename[resource->lr_type], | |
2419 | lock->l_flags, nid, lock->l_remote_handle.cookie, | |
2420 | exp ? atomic_read(&exp->exp_refcount) : -99, | |
2421 | lock->l_pid, lock->l_callback_timeout, | |
2422 | lock->l_lvb_type); | |
d7e09d03 PT |
2423 | break; |
2424 | } | |
2425 | va_end(args); | |
2426 | } | |
2427 | EXPORT_SYMBOL(_ldlm_lock_debug); |