Commit | Line | Data |
---|---|---|
d7e09d03 PT |
1 | /* |
2 | * GPL HEADER START | |
3 | * | |
4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 only, | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License version 2 for more details (a copy is included | |
14 | * in the LICENSE file that accompanied this code). | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * version 2 along with this program; If not, see | |
18 | * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf | |
19 | * | |
20 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
21 | * CA 95054 USA or visit www.sun.com if you need additional information or | |
22 | * have any questions. | |
23 | * | |
24 | * GPL HEADER END | |
25 | */ | |
26 | /* | |
27 | * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. | |
28 | * Use is subject to license terms. | |
29 | * | |
30 | * Copyright (c) 2011, 2012, Intel Corporation. | |
31 | */ | |
32 | /* | |
33 | * This file is part of Lustre, http://www.lustre.org/ | |
34 | * Lustre is a trademark of Sun Microsystems, Inc. | |
35 | * | |
36 | * Client Extent Lock. | |
37 | * | |
38 | * Author: Nikita Danilov <nikita.danilov@sun.com> | |
39 | */ | |
40 | ||
41 | #define DEBUG_SUBSYSTEM S_CLASS | |
42 | ||
43 | #include <obd_class.h> | |
44 | #include <obd_support.h> | |
45 | #include <lustre_fid.h> | |
46 | #include <linux/list.h> | |
47 | #include <cl_object.h> | |
48 | #include "cl_internal.h" | |
49 | ||
50 | /** Lock class of cl_lock::cll_guard */ | |
51 | static struct lock_class_key cl_lock_guard_class; | |
52 | static struct kmem_cache *cl_lock_kmem; | |
53 | ||
54 | static struct lu_kmem_descr cl_lock_caches[] = { | |
55 | { | |
56 | .ckd_cache = &cl_lock_kmem, | |
57 | .ckd_name = "cl_lock_kmem", | |
58 | .ckd_size = sizeof (struct cl_lock) | |
59 | }, | |
60 | { | |
61 | .ckd_cache = NULL | |
62 | } | |
63 | }; | |
64 | ||
65 | #define CS_LOCK_INC(o, item) | |
66 | #define CS_LOCK_DEC(o, item) | |
67 | #define CS_LOCKSTATE_INC(o, state) | |
68 | #define CS_LOCKSTATE_DEC(o, state) | |
69 | ||
70 | /** | |
71 | * Basic lock invariant that is maintained at all times. Caller either has a | |
72 | * reference to \a lock, or somehow assures that \a lock cannot be freed. | |
73 | * | |
74 | * \see cl_lock_invariant() | |
75 | */ | |
76 | static int cl_lock_invariant_trusted(const struct lu_env *env, | |
77 | const struct cl_lock *lock) | |
78 | { | |
79 | return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) && | |
80 | atomic_read(&lock->cll_ref) >= lock->cll_holds && | |
81 | lock->cll_holds >= lock->cll_users && | |
82 | lock->cll_holds >= 0 && | |
83 | lock->cll_users >= 0 && | |
84 | lock->cll_depth >= 0; | |
85 | } | |
86 | ||
87 | /** | |
88 | * Stronger lock invariant, checking that caller has a reference on a lock. | |
89 | * | |
90 | * \see cl_lock_invariant_trusted() | |
91 | */ | |
92 | static int cl_lock_invariant(const struct lu_env *env, | |
93 | const struct cl_lock *lock) | |
94 | { | |
95 | int result; | |
96 | ||
97 | result = atomic_read(&lock->cll_ref) > 0 && | |
98 | cl_lock_invariant_trusted(env, lock); | |
99 | if (!result && env != NULL) | |
100 | CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken"); | |
101 | return result; | |
102 | } | |
103 | ||
104 | /** | |
105 | * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock. | |
106 | */ | |
107 | static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock) | |
108 | { | |
109 | return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting; | |
110 | } | |
111 | ||
112 | /** | |
113 | * Returns a set of counters for this lock, depending on a lock nesting. | |
114 | */ | |
115 | static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env, | |
116 | const struct cl_lock *lock) | |
117 | { | |
118 | struct cl_thread_info *info; | |
119 | enum clt_nesting_level nesting; | |
120 | ||
121 | info = cl_env_info(env); | |
122 | nesting = cl_lock_nesting(lock); | |
123 | LASSERT(nesting < ARRAY_SIZE(info->clt_counters)); | |
124 | return &info->clt_counters[nesting]; | |
125 | } | |
126 | ||
127 | static void cl_lock_trace0(int level, const struct lu_env *env, | |
128 | const char *prefix, const struct cl_lock *lock, | |
129 | const char *func, const int line) | |
130 | { | |
131 | struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj); | |
132 | CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)" | |
133 | "(%p/%d/%d) at %s():%d\n", | |
134 | prefix, lock, atomic_read(&lock->cll_ref), | |
135 | lock->cll_guarder, lock->cll_depth, | |
136 | lock->cll_state, lock->cll_error, lock->cll_holds, | |
137 | lock->cll_users, lock->cll_flags, | |
138 | env, h->coh_nesting, cl_lock_nr_mutexed(env), | |
139 | func, line); | |
140 | } | |
141 | #define cl_lock_trace(level, env, prefix, lock) \ | |
142 | cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__) | |
143 | ||
144 | #define RETIP ((unsigned long)__builtin_return_address(0)) | |
145 | ||
146 | #ifdef CONFIG_LOCKDEP | |
147 | static struct lock_class_key cl_lock_key; | |
148 | ||
149 | static void cl_lock_lockdep_init(struct cl_lock *lock) | |
150 | { | |
151 | lockdep_set_class_and_name(lock, &cl_lock_key, "EXT"); | |
152 | } | |
153 | ||
154 | static void cl_lock_lockdep_acquire(const struct lu_env *env, | |
155 | struct cl_lock *lock, __u32 enqflags) | |
156 | { | |
157 | cl_lock_counters(env, lock)->ctc_nr_locks_acquired++; | |
158 | lock_map_acquire(&lock->dep_map); | |
159 | } | |
160 | ||
161 | static void cl_lock_lockdep_release(const struct lu_env *env, | |
162 | struct cl_lock *lock) | |
163 | { | |
164 | cl_lock_counters(env, lock)->ctc_nr_locks_acquired--; | |
165 | lock_release(&lock->dep_map, 0, RETIP); | |
166 | } | |
167 | ||
168 | #else /* !CONFIG_LOCKDEP */ | |
169 | ||
170 | static void cl_lock_lockdep_init(struct cl_lock *lock) | |
171 | {} | |
172 | static void cl_lock_lockdep_acquire(const struct lu_env *env, | |
173 | struct cl_lock *lock, __u32 enqflags) | |
174 | {} | |
175 | static void cl_lock_lockdep_release(const struct lu_env *env, | |
176 | struct cl_lock *lock) | |
177 | {} | |
178 | ||
179 | #endif /* !CONFIG_LOCKDEP */ | |
180 | ||
181 | /** | |
182 | * Adds lock slice to the compound lock. | |
183 | * | |
184 | * This is called by cl_object_operations::coo_lock_init() methods to add a | |
185 | * per-layer state to the lock. New state is added at the end of | |
186 | * cl_lock::cll_layers list, that is, it is at the bottom of the stack. | |
187 | * | |
188 | * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add() | |
189 | */ | |
190 | void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice, | |
191 | struct cl_object *obj, | |
192 | const struct cl_lock_operations *ops) | |
193 | { | |
194 | ENTRY; | |
195 | slice->cls_lock = lock; | |
196 | list_add_tail(&slice->cls_linkage, &lock->cll_layers); | |
197 | slice->cls_obj = obj; | |
198 | slice->cls_ops = ops; | |
199 | EXIT; | |
200 | } | |
201 | EXPORT_SYMBOL(cl_lock_slice_add); | |
202 | ||
203 | /** | |
204 | * Returns true iff a lock with the mode \a has provides at least the same | |
205 | * guarantees as a lock with the mode \a need. | |
206 | */ | |
207 | int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need) | |
208 | { | |
209 | LINVRNT(need == CLM_READ || need == CLM_WRITE || | |
210 | need == CLM_PHANTOM || need == CLM_GROUP); | |
211 | LINVRNT(has == CLM_READ || has == CLM_WRITE || | |
212 | has == CLM_PHANTOM || has == CLM_GROUP); | |
213 | CLASSERT(CLM_PHANTOM < CLM_READ); | |
214 | CLASSERT(CLM_READ < CLM_WRITE); | |
215 | CLASSERT(CLM_WRITE < CLM_GROUP); | |
216 | ||
217 | if (has != CLM_GROUP) | |
218 | return need <= has; | |
219 | else | |
220 | return need == has; | |
221 | } | |
222 | EXPORT_SYMBOL(cl_lock_mode_match); | |
223 | ||
224 | /** | |
225 | * Returns true iff extent portions of lock descriptions match. | |
226 | */ | |
227 | int cl_lock_ext_match(const struct cl_lock_descr *has, | |
228 | const struct cl_lock_descr *need) | |
229 | { | |
230 | return | |
231 | has->cld_start <= need->cld_start && | |
232 | has->cld_end >= need->cld_end && | |
233 | cl_lock_mode_match(has->cld_mode, need->cld_mode) && | |
234 | (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid); | |
235 | } | |
236 | EXPORT_SYMBOL(cl_lock_ext_match); | |
237 | ||
238 | /** | |
239 | * Returns true iff a lock with the description \a has provides at least the | |
240 | * same guarantees as a lock with the description \a need. | |
241 | */ | |
242 | int cl_lock_descr_match(const struct cl_lock_descr *has, | |
243 | const struct cl_lock_descr *need) | |
244 | { | |
245 | return | |
246 | cl_object_same(has->cld_obj, need->cld_obj) && | |
247 | cl_lock_ext_match(has, need); | |
248 | } | |
249 | EXPORT_SYMBOL(cl_lock_descr_match); | |
250 | ||
251 | static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock) | |
252 | { | |
253 | struct cl_object *obj = lock->cll_descr.cld_obj; | |
254 | ||
255 | LINVRNT(!cl_lock_is_mutexed(lock)); | |
256 | ||
257 | ENTRY; | |
258 | cl_lock_trace(D_DLMTRACE, env, "free lock", lock); | |
259 | might_sleep(); | |
260 | while (!list_empty(&lock->cll_layers)) { | |
261 | struct cl_lock_slice *slice; | |
262 | ||
263 | slice = list_entry(lock->cll_layers.next, | |
264 | struct cl_lock_slice, cls_linkage); | |
265 | list_del_init(lock->cll_layers.next); | |
266 | slice->cls_ops->clo_fini(env, slice); | |
267 | } | |
268 | CS_LOCK_DEC(obj, total); | |
269 | CS_LOCKSTATE_DEC(obj, lock->cll_state); | |
270 | lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock); | |
271 | cl_object_put(env, obj); | |
272 | lu_ref_fini(&lock->cll_reference); | |
273 | lu_ref_fini(&lock->cll_holders); | |
274 | mutex_destroy(&lock->cll_guard); | |
275 | OBD_SLAB_FREE_PTR(lock, cl_lock_kmem); | |
276 | EXIT; | |
277 | } | |
278 | ||
279 | /** | |
280 | * Releases a reference on a lock. | |
281 | * | |
282 | * When last reference is released, lock is returned to the cache, unless it | |
283 | * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed | |
284 | * immediately. | |
285 | * | |
286 | * \see cl_object_put(), cl_page_put() | |
287 | */ | |
288 | void cl_lock_put(const struct lu_env *env, struct cl_lock *lock) | |
289 | { | |
290 | struct cl_object *obj; | |
291 | ||
292 | LINVRNT(cl_lock_invariant(env, lock)); | |
293 | ENTRY; | |
294 | obj = lock->cll_descr.cld_obj; | |
295 | LINVRNT(obj != NULL); | |
296 | ||
297 | CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n", | |
298 | atomic_read(&lock->cll_ref), lock, RETIP); | |
299 | ||
300 | if (atomic_dec_and_test(&lock->cll_ref)) { | |
301 | if (lock->cll_state == CLS_FREEING) { | |
302 | LASSERT(list_empty(&lock->cll_linkage)); | |
303 | cl_lock_free(env, lock); | |
304 | } | |
305 | CS_LOCK_DEC(obj, busy); | |
306 | } | |
307 | EXIT; | |
308 | } | |
309 | EXPORT_SYMBOL(cl_lock_put); | |
310 | ||
311 | /** | |
312 | * Acquires an additional reference to a lock. | |
313 | * | |
314 | * This can be called only by caller already possessing a reference to \a | |
315 | * lock. | |
316 | * | |
317 | * \see cl_object_get(), cl_page_get() | |
318 | */ | |
319 | void cl_lock_get(struct cl_lock *lock) | |
320 | { | |
321 | LINVRNT(cl_lock_invariant(NULL, lock)); | |
322 | CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n", | |
323 | atomic_read(&lock->cll_ref), lock, RETIP); | |
324 | atomic_inc(&lock->cll_ref); | |
325 | } | |
326 | EXPORT_SYMBOL(cl_lock_get); | |
327 | ||
328 | /** | |
329 | * Acquires a reference to a lock. | |
330 | * | |
331 | * This is much like cl_lock_get(), except that this function can be used to | |
332 | * acquire initial reference to the cached lock. Caller has to deal with all | |
333 | * possible races. Use with care! | |
334 | * | |
335 | * \see cl_page_get_trust() | |
336 | */ | |
337 | void cl_lock_get_trust(struct cl_lock *lock) | |
338 | { | |
339 | CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n", | |
340 | atomic_read(&lock->cll_ref), lock, RETIP); | |
341 | if (atomic_inc_return(&lock->cll_ref) == 1) | |
342 | CS_LOCK_INC(lock->cll_descr.cld_obj, busy); | |
343 | } | |
344 | EXPORT_SYMBOL(cl_lock_get_trust); | |
345 | ||
346 | /** | |
347 | * Helper function destroying the lock that wasn't completely initialized. | |
348 | * | |
349 | * Other threads can acquire references to the top-lock through its | |
350 | * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately. | |
351 | */ | |
352 | static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock) | |
353 | { | |
354 | cl_lock_mutex_get(env, lock); | |
355 | cl_lock_cancel(env, lock); | |
356 | cl_lock_delete(env, lock); | |
357 | cl_lock_mutex_put(env, lock); | |
358 | cl_lock_put(env, lock); | |
359 | } | |
360 | ||
361 | static struct cl_lock *cl_lock_alloc(const struct lu_env *env, | |
362 | struct cl_object *obj, | |
363 | const struct cl_io *io, | |
364 | const struct cl_lock_descr *descr) | |
365 | { | |
366 | struct cl_lock *lock; | |
367 | struct lu_object_header *head; | |
368 | ||
369 | ENTRY; | |
370 | OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, __GFP_IO); | |
371 | if (lock != NULL) { | |
372 | atomic_set(&lock->cll_ref, 1); | |
373 | lock->cll_descr = *descr; | |
374 | lock->cll_state = CLS_NEW; | |
375 | cl_object_get(obj); | |
376 | lock->cll_obj_ref = lu_object_ref_add(&obj->co_lu, | |
377 | "cl_lock", lock); | |
378 | INIT_LIST_HEAD(&lock->cll_layers); | |
379 | INIT_LIST_HEAD(&lock->cll_linkage); | |
380 | INIT_LIST_HEAD(&lock->cll_inclosure); | |
381 | lu_ref_init(&lock->cll_reference); | |
382 | lu_ref_init(&lock->cll_holders); | |
383 | mutex_init(&lock->cll_guard); | |
384 | lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class); | |
385 | init_waitqueue_head(&lock->cll_wq); | |
386 | head = obj->co_lu.lo_header; | |
387 | CS_LOCKSTATE_INC(obj, CLS_NEW); | |
388 | CS_LOCK_INC(obj, total); | |
389 | CS_LOCK_INC(obj, create); | |
390 | cl_lock_lockdep_init(lock); | |
391 | list_for_each_entry(obj, &head->loh_layers, | |
392 | co_lu.lo_linkage) { | |
393 | int err; | |
394 | ||
395 | err = obj->co_ops->coo_lock_init(env, obj, lock, io); | |
396 | if (err != 0) { | |
397 | cl_lock_finish(env, lock); | |
398 | lock = ERR_PTR(err); | |
399 | break; | |
400 | } | |
401 | } | |
402 | } else | |
403 | lock = ERR_PTR(-ENOMEM); | |
404 | RETURN(lock); | |
405 | } | |
406 | ||
407 | /** | |
408 | * Transfer the lock into INTRANSIT state and return the original state. | |
409 | * | |
410 | * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED | |
411 | * \post state: CLS_INTRANSIT | |
412 | * \see CLS_INTRANSIT | |
413 | */ | |
414 | enum cl_lock_state cl_lock_intransit(const struct lu_env *env, | |
415 | struct cl_lock *lock) | |
416 | { | |
417 | enum cl_lock_state state = lock->cll_state; | |
418 | ||
419 | LASSERT(cl_lock_is_mutexed(lock)); | |
420 | LASSERT(state != CLS_INTRANSIT); | |
421 | LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED, | |
422 | "Malformed lock state %d.\n", state); | |
423 | ||
424 | cl_lock_state_set(env, lock, CLS_INTRANSIT); | |
425 | lock->cll_intransit_owner = current; | |
426 | cl_lock_hold_add(env, lock, "intransit", current); | |
427 | return state; | |
428 | } | |
429 | EXPORT_SYMBOL(cl_lock_intransit); | |
430 | ||
431 | /** | |
432 | * Exit the intransit state and restore the lock state to the original state | |
433 | */ | |
434 | void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock, | |
435 | enum cl_lock_state state) | |
436 | { | |
437 | LASSERT(cl_lock_is_mutexed(lock)); | |
438 | LASSERT(lock->cll_state == CLS_INTRANSIT); | |
439 | LASSERT(state != CLS_INTRANSIT); | |
440 | LASSERT(lock->cll_intransit_owner == current); | |
441 | ||
442 | lock->cll_intransit_owner = NULL; | |
443 | cl_lock_state_set(env, lock, state); | |
444 | cl_lock_unhold(env, lock, "intransit", current); | |
445 | } | |
446 | EXPORT_SYMBOL(cl_lock_extransit); | |
447 | ||
448 | /** | |
449 | * Checking whether the lock is intransit state | |
450 | */ | |
451 | int cl_lock_is_intransit(struct cl_lock *lock) | |
452 | { | |
453 | LASSERT(cl_lock_is_mutexed(lock)); | |
454 | return lock->cll_state == CLS_INTRANSIT && | |
455 | lock->cll_intransit_owner != current; | |
456 | } | |
457 | EXPORT_SYMBOL(cl_lock_is_intransit); | |
458 | /** | |
459 | * Returns true iff lock is "suitable" for given io. E.g., locks acquired by | |
460 | * truncate and O_APPEND cannot be reused for read/non-append-write, as they | |
461 | * cover multiple stripes and can trigger cascading timeouts. | |
462 | */ | |
463 | static int cl_lock_fits_into(const struct lu_env *env, | |
464 | const struct cl_lock *lock, | |
465 | const struct cl_lock_descr *need, | |
466 | const struct cl_io *io) | |
467 | { | |
468 | const struct cl_lock_slice *slice; | |
469 | ||
470 | LINVRNT(cl_lock_invariant_trusted(env, lock)); | |
471 | ENTRY; | |
472 | list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { | |
473 | if (slice->cls_ops->clo_fits_into != NULL && | |
474 | !slice->cls_ops->clo_fits_into(env, slice, need, io)) | |
475 | RETURN(0); | |
476 | } | |
477 | RETURN(1); | |
478 | } | |
479 | ||
480 | static struct cl_lock *cl_lock_lookup(const struct lu_env *env, | |
481 | struct cl_object *obj, | |
482 | const struct cl_io *io, | |
483 | const struct cl_lock_descr *need) | |
484 | { | |
485 | struct cl_lock *lock; | |
486 | struct cl_object_header *head; | |
487 | ||
488 | ENTRY; | |
489 | ||
490 | head = cl_object_header(obj); | |
491 | LINVRNT(spin_is_locked(&head->coh_lock_guard)); | |
492 | CS_LOCK_INC(obj, lookup); | |
493 | list_for_each_entry(lock, &head->coh_locks, cll_linkage) { | |
494 | int matched; | |
495 | ||
496 | matched = cl_lock_ext_match(&lock->cll_descr, need) && | |
497 | lock->cll_state < CLS_FREEING && | |
498 | lock->cll_error == 0 && | |
499 | !(lock->cll_flags & CLF_CANCELLED) && | |
500 | cl_lock_fits_into(env, lock, need, io); | |
501 | CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n", | |
502 | PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need), | |
503 | matched); | |
504 | if (matched) { | |
505 | cl_lock_get_trust(lock); | |
506 | CS_LOCK_INC(obj, hit); | |
507 | RETURN(lock); | |
508 | } | |
509 | } | |
510 | RETURN(NULL); | |
511 | } | |
512 | ||
513 | /** | |
514 | * Returns a lock matching description \a need. | |
515 | * | |
516 | * This is the main entry point into the cl_lock caching interface. First, a | |
517 | * cache (implemented as a per-object linked list) is consulted. If lock is | |
518 | * found there, it is returned immediately. Otherwise new lock is allocated | |
519 | * and returned. In any case, additional reference to lock is acquired. | |
520 | * | |
521 | * \see cl_object_find(), cl_page_find() | |
522 | */ | |
523 | static struct cl_lock *cl_lock_find(const struct lu_env *env, | |
524 | const struct cl_io *io, | |
525 | const struct cl_lock_descr *need) | |
526 | { | |
527 | struct cl_object_header *head; | |
528 | struct cl_object *obj; | |
529 | struct cl_lock *lock; | |
530 | ||
531 | ENTRY; | |
532 | ||
533 | obj = need->cld_obj; | |
534 | head = cl_object_header(obj); | |
535 | ||
536 | spin_lock(&head->coh_lock_guard); | |
537 | lock = cl_lock_lookup(env, obj, io, need); | |
538 | spin_unlock(&head->coh_lock_guard); | |
539 | ||
540 | if (lock == NULL) { | |
541 | lock = cl_lock_alloc(env, obj, io, need); | |
542 | if (!IS_ERR(lock)) { | |
543 | struct cl_lock *ghost; | |
544 | ||
545 | spin_lock(&head->coh_lock_guard); | |
546 | ghost = cl_lock_lookup(env, obj, io, need); | |
547 | if (ghost == NULL) { | |
548 | list_add_tail(&lock->cll_linkage, | |
549 | &head->coh_locks); | |
550 | spin_unlock(&head->coh_lock_guard); | |
551 | CS_LOCK_INC(obj, busy); | |
552 | } else { | |
553 | spin_unlock(&head->coh_lock_guard); | |
554 | /* | |
555 | * Other threads can acquire references to the | |
556 | * top-lock through its sub-locks. Hence, it | |
557 | * cannot be cl_lock_free()-ed immediately. | |
558 | */ | |
559 | cl_lock_finish(env, lock); | |
560 | lock = ghost; | |
561 | } | |
562 | } | |
563 | } | |
564 | RETURN(lock); | |
565 | } | |
566 | ||
567 | /** | |
568 | * Returns existing lock matching given description. This is similar to | |
569 | * cl_lock_find() except that no new lock is created, and returned lock is | |
570 | * guaranteed to be in enum cl_lock_state::CLS_HELD state. | |
571 | */ | |
572 | struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io, | |
573 | const struct cl_lock_descr *need, | |
574 | const char *scope, const void *source) | |
575 | { | |
576 | struct cl_object_header *head; | |
577 | struct cl_object *obj; | |
578 | struct cl_lock *lock; | |
579 | ||
580 | obj = need->cld_obj; | |
581 | head = cl_object_header(obj); | |
582 | ||
583 | do { | |
584 | spin_lock(&head->coh_lock_guard); | |
585 | lock = cl_lock_lookup(env, obj, io, need); | |
586 | spin_unlock(&head->coh_lock_guard); | |
587 | if (lock == NULL) | |
588 | return NULL; | |
589 | ||
590 | cl_lock_mutex_get(env, lock); | |
591 | if (lock->cll_state == CLS_INTRANSIT) | |
592 | /* Don't care return value. */ | |
593 | cl_lock_state_wait(env, lock); | |
594 | if (lock->cll_state == CLS_FREEING) { | |
595 | cl_lock_mutex_put(env, lock); | |
596 | cl_lock_put(env, lock); | |
597 | lock = NULL; | |
598 | } | |
599 | } while (lock == NULL); | |
600 | ||
601 | cl_lock_hold_add(env, lock, scope, source); | |
602 | cl_lock_user_add(env, lock); | |
603 | if (lock->cll_state == CLS_CACHED) | |
604 | cl_use_try(env, lock, 1); | |
605 | if (lock->cll_state == CLS_HELD) { | |
606 | cl_lock_mutex_put(env, lock); | |
607 | cl_lock_lockdep_acquire(env, lock, 0); | |
608 | cl_lock_put(env, lock); | |
609 | } else { | |
610 | cl_unuse_try(env, lock); | |
611 | cl_lock_unhold(env, lock, scope, source); | |
612 | cl_lock_mutex_put(env, lock); | |
613 | cl_lock_put(env, lock); | |
614 | lock = NULL; | |
615 | } | |
616 | ||
617 | return lock; | |
618 | } | |
619 | EXPORT_SYMBOL(cl_lock_peek); | |
620 | ||
621 | /** | |
622 | * Returns a slice within a lock, corresponding to the given layer in the | |
623 | * device stack. | |
624 | * | |
625 | * \see cl_page_at() | |
626 | */ | |
627 | const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock, | |
628 | const struct lu_device_type *dtype) | |
629 | { | |
630 | const struct cl_lock_slice *slice; | |
631 | ||
632 | LINVRNT(cl_lock_invariant_trusted(NULL, lock)); | |
633 | ENTRY; | |
634 | ||
635 | list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { | |
636 | if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype) | |
637 | RETURN(slice); | |
638 | } | |
639 | RETURN(NULL); | |
640 | } | |
641 | EXPORT_SYMBOL(cl_lock_at); | |
642 | ||
643 | static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock) | |
644 | { | |
645 | struct cl_thread_counters *counters; | |
646 | ||
647 | counters = cl_lock_counters(env, lock); | |
648 | lock->cll_depth++; | |
649 | counters->ctc_nr_locks_locked++; | |
650 | lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock); | |
651 | cl_lock_trace(D_TRACE, env, "got mutex", lock); | |
652 | } | |
653 | ||
654 | /** | |
655 | * Locks cl_lock object. | |
656 | * | |
657 | * This is used to manipulate cl_lock fields, and to serialize state | |
658 | * transitions in the lock state machine. | |
659 | * | |
660 | * \post cl_lock_is_mutexed(lock) | |
661 | * | |
662 | * \see cl_lock_mutex_put() | |
663 | */ | |
664 | void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock) | |
665 | { | |
666 | LINVRNT(cl_lock_invariant(env, lock)); | |
667 | ||
668 | if (lock->cll_guarder == current) { | |
669 | LINVRNT(cl_lock_is_mutexed(lock)); | |
670 | LINVRNT(lock->cll_depth > 0); | |
671 | } else { | |
672 | struct cl_object_header *hdr; | |
673 | struct cl_thread_info *info; | |
674 | int i; | |
675 | ||
676 | LINVRNT(lock->cll_guarder != current); | |
677 | hdr = cl_object_header(lock->cll_descr.cld_obj); | |
678 | /* | |
679 | * Check that mutices are taken in the bottom-to-top order. | |
680 | */ | |
681 | info = cl_env_info(env); | |
682 | for (i = 0; i < hdr->coh_nesting; ++i) | |
683 | LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0); | |
684 | mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting); | |
685 | lock->cll_guarder = current; | |
686 | LINVRNT(lock->cll_depth == 0); | |
687 | } | |
688 | cl_lock_mutex_tail(env, lock); | |
689 | } | |
690 | EXPORT_SYMBOL(cl_lock_mutex_get); | |
691 | ||
692 | /** | |
693 | * Try-locks cl_lock object. | |
694 | * | |
695 | * \retval 0 \a lock was successfully locked | |
696 | * | |
697 | * \retval -EBUSY \a lock cannot be locked right now | |
698 | * | |
699 | * \post ergo(result == 0, cl_lock_is_mutexed(lock)) | |
700 | * | |
701 | * \see cl_lock_mutex_get() | |
702 | */ | |
703 | int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock) | |
704 | { | |
705 | int result; | |
706 | ||
707 | LINVRNT(cl_lock_invariant_trusted(env, lock)); | |
708 | ENTRY; | |
709 | ||
710 | result = 0; | |
711 | if (lock->cll_guarder == current) { | |
712 | LINVRNT(lock->cll_depth > 0); | |
713 | cl_lock_mutex_tail(env, lock); | |
714 | } else if (mutex_trylock(&lock->cll_guard)) { | |
715 | LINVRNT(lock->cll_depth == 0); | |
716 | lock->cll_guarder = current; | |
717 | cl_lock_mutex_tail(env, lock); | |
718 | } else | |
719 | result = -EBUSY; | |
720 | RETURN(result); | |
721 | } | |
722 | EXPORT_SYMBOL(cl_lock_mutex_try); | |
723 | ||
724 | /** | |
725 | {* Unlocks cl_lock object. | |
726 | * | |
727 | * \pre cl_lock_is_mutexed(lock) | |
728 | * | |
729 | * \see cl_lock_mutex_get() | |
730 | */ | |
731 | void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock) | |
732 | { | |
733 | struct cl_thread_counters *counters; | |
734 | ||
735 | LINVRNT(cl_lock_invariant(env, lock)); | |
736 | LINVRNT(cl_lock_is_mutexed(lock)); | |
737 | LINVRNT(lock->cll_guarder == current); | |
738 | LINVRNT(lock->cll_depth > 0); | |
739 | ||
740 | counters = cl_lock_counters(env, lock); | |
741 | LINVRNT(counters->ctc_nr_locks_locked > 0); | |
742 | ||
743 | cl_lock_trace(D_TRACE, env, "put mutex", lock); | |
744 | lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock); | |
745 | counters->ctc_nr_locks_locked--; | |
746 | if (--lock->cll_depth == 0) { | |
747 | lock->cll_guarder = NULL; | |
748 | mutex_unlock(&lock->cll_guard); | |
749 | } | |
750 | } | |
751 | EXPORT_SYMBOL(cl_lock_mutex_put); | |
752 | ||
753 | /** | |
754 | * Returns true iff lock's mutex is owned by the current thread. | |
755 | */ | |
756 | int cl_lock_is_mutexed(struct cl_lock *lock) | |
757 | { | |
758 | return lock->cll_guarder == current; | |
759 | } | |
760 | EXPORT_SYMBOL(cl_lock_is_mutexed); | |
761 | ||
762 | /** | |
763 | * Returns number of cl_lock mutices held by the current thread (environment). | |
764 | */ | |
765 | int cl_lock_nr_mutexed(const struct lu_env *env) | |
766 | { | |
767 | struct cl_thread_info *info; | |
768 | int i; | |
769 | int locked; | |
770 | ||
771 | /* | |
772 | * NOTE: if summation across all nesting levels (currently 2) proves | |
773 | * too expensive, a summary counter can be added to | |
774 | * struct cl_thread_info. | |
775 | */ | |
776 | info = cl_env_info(env); | |
777 | for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i) | |
778 | locked += info->clt_counters[i].ctc_nr_locks_locked; | |
779 | return locked; | |
780 | } | |
781 | EXPORT_SYMBOL(cl_lock_nr_mutexed); | |
782 | ||
783 | static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock) | |
784 | { | |
785 | LINVRNT(cl_lock_is_mutexed(lock)); | |
786 | LINVRNT(cl_lock_invariant(env, lock)); | |
787 | ENTRY; | |
788 | if (!(lock->cll_flags & CLF_CANCELLED)) { | |
789 | const struct cl_lock_slice *slice; | |
790 | ||
791 | lock->cll_flags |= CLF_CANCELLED; | |
792 | list_for_each_entry_reverse(slice, &lock->cll_layers, | |
793 | cls_linkage) { | |
794 | if (slice->cls_ops->clo_cancel != NULL) | |
795 | slice->cls_ops->clo_cancel(env, slice); | |
796 | } | |
797 | } | |
798 | EXIT; | |
799 | } | |
800 | ||
801 | static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock) | |
802 | { | |
803 | struct cl_object_header *head; | |
804 | const struct cl_lock_slice *slice; | |
805 | ||
806 | LINVRNT(cl_lock_is_mutexed(lock)); | |
807 | LINVRNT(cl_lock_invariant(env, lock)); | |
808 | ||
809 | ENTRY; | |
810 | if (lock->cll_state < CLS_FREEING) { | |
811 | LASSERT(lock->cll_state != CLS_INTRANSIT); | |
812 | cl_lock_state_set(env, lock, CLS_FREEING); | |
813 | ||
814 | head = cl_object_header(lock->cll_descr.cld_obj); | |
815 | ||
816 | spin_lock(&head->coh_lock_guard); | |
817 | list_del_init(&lock->cll_linkage); | |
818 | spin_unlock(&head->coh_lock_guard); | |
819 | ||
820 | /* | |
821 | * From now on, no new references to this lock can be acquired | |
822 | * by cl_lock_lookup(). | |
823 | */ | |
824 | list_for_each_entry_reverse(slice, &lock->cll_layers, | |
825 | cls_linkage) { | |
826 | if (slice->cls_ops->clo_delete != NULL) | |
827 | slice->cls_ops->clo_delete(env, slice); | |
828 | } | |
829 | /* | |
830 | * From now on, no new references to this lock can be acquired | |
831 | * by layer-specific means (like a pointer from struct | |
832 | * ldlm_lock in osc, or a pointer from top-lock to sub-lock in | |
833 | * lov). | |
834 | * | |
835 | * Lock will be finally freed in cl_lock_put() when last of | |
836 | * existing references goes away. | |
837 | */ | |
838 | } | |
839 | EXIT; | |
840 | } | |
841 | ||
842 | /** | |
843 | * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a | |
844 | * top-lock (nesting == 0) accounts for this modification in the per-thread | |
845 | * debugging counters. Sub-lock holds can be released by a thread different | |
846 | * from one that acquired it. | |
847 | */ | |
848 | static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock, | |
849 | int delta) | |
850 | { | |
851 | struct cl_thread_counters *counters; | |
852 | enum clt_nesting_level nesting; | |
853 | ||
854 | lock->cll_holds += delta; | |
855 | nesting = cl_lock_nesting(lock); | |
856 | if (nesting == CNL_TOP) { | |
857 | counters = &cl_env_info(env)->clt_counters[CNL_TOP]; | |
858 | counters->ctc_nr_held += delta; | |
859 | LASSERT(counters->ctc_nr_held >= 0); | |
860 | } | |
861 | } | |
862 | ||
863 | /** | |
864 | * Mod(ifie)s cl_lock::cll_users counter for a given lock. See | |
865 | * cl_lock_hold_mod() for the explanation of the debugging code. | |
866 | */ | |
867 | static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock, | |
868 | int delta) | |
869 | { | |
870 | struct cl_thread_counters *counters; | |
871 | enum clt_nesting_level nesting; | |
872 | ||
873 | lock->cll_users += delta; | |
874 | nesting = cl_lock_nesting(lock); | |
875 | if (nesting == CNL_TOP) { | |
876 | counters = &cl_env_info(env)->clt_counters[CNL_TOP]; | |
877 | counters->ctc_nr_used += delta; | |
878 | LASSERT(counters->ctc_nr_used >= 0); | |
879 | } | |
880 | } | |
881 | ||
882 | void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock, | |
883 | const char *scope, const void *source) | |
884 | { | |
885 | LINVRNT(cl_lock_is_mutexed(lock)); | |
886 | LINVRNT(cl_lock_invariant(env, lock)); | |
887 | LASSERT(lock->cll_holds > 0); | |
888 | ||
889 | ENTRY; | |
890 | cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock); | |
891 | lu_ref_del(&lock->cll_holders, scope, source); | |
892 | cl_lock_hold_mod(env, lock, -1); | |
893 | if (lock->cll_holds == 0) { | |
894 | CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock); | |
895 | if (lock->cll_descr.cld_mode == CLM_PHANTOM || | |
896 | lock->cll_descr.cld_mode == CLM_GROUP || | |
897 | lock->cll_state != CLS_CACHED) | |
898 | /* | |
899 | * If lock is still phantom or grouplock when user is | |
900 | * done with it---destroy the lock. | |
901 | */ | |
902 | lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED; | |
903 | if (lock->cll_flags & CLF_CANCELPEND) { | |
904 | lock->cll_flags &= ~CLF_CANCELPEND; | |
905 | cl_lock_cancel0(env, lock); | |
906 | } | |
907 | if (lock->cll_flags & CLF_DOOMED) { | |
908 | /* no longer doomed: it's dead... Jim. */ | |
909 | lock->cll_flags &= ~CLF_DOOMED; | |
910 | cl_lock_delete0(env, lock); | |
911 | } | |
912 | } | |
913 | EXIT; | |
914 | } | |
915 | EXPORT_SYMBOL(cl_lock_hold_release); | |
916 | ||
917 | /** | |
918 | * Waits until lock state is changed. | |
919 | * | |
920 | * This function is called with cl_lock mutex locked, atomically releases | |
921 | * mutex and goes to sleep, waiting for a lock state change (signaled by | |
922 | * cl_lock_signal()), and re-acquires the mutex before return. | |
923 | * | |
924 | * This function is used to wait until lock state machine makes some progress | |
925 | * and to emulate synchronous operations on top of asynchronous lock | |
926 | * interface. | |
927 | * | |
928 | * \retval -EINTR wait was interrupted | |
929 | * | |
930 | * \retval 0 wait wasn't interrupted | |
931 | * | |
932 | * \pre cl_lock_is_mutexed(lock) | |
933 | * | |
934 | * \see cl_lock_signal() | |
935 | */ | |
936 | int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock) | |
937 | { | |
938 | wait_queue_t waiter; | |
939 | sigset_t blocked; | |
940 | int result; | |
941 | ||
942 | ENTRY; | |
943 | LINVRNT(cl_lock_is_mutexed(lock)); | |
944 | LINVRNT(cl_lock_invariant(env, lock)); | |
945 | LASSERT(lock->cll_depth == 1); | |
946 | LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */ | |
947 | ||
948 | cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock); | |
949 | result = lock->cll_error; | |
950 | if (result == 0) { | |
951 | /* To avoid being interrupted by the 'non-fatal' signals | |
952 | * (SIGCHLD, for instance), we'd block them temporarily. | |
953 | * LU-305 */ | |
954 | blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); | |
955 | ||
956 | init_waitqueue_entry_current(&waiter); | |
957 | add_wait_queue(&lock->cll_wq, &waiter); | |
958 | set_current_state(TASK_INTERRUPTIBLE); | |
959 | cl_lock_mutex_put(env, lock); | |
960 | ||
961 | LASSERT(cl_lock_nr_mutexed(env) == 0); | |
962 | ||
963 | /* Returning ERESTARTSYS instead of EINTR so syscalls | |
964 | * can be restarted if signals are pending here */ | |
965 | result = -ERESTARTSYS; | |
966 | if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) { | |
967 | waitq_wait(&waiter, TASK_INTERRUPTIBLE); | |
968 | if (!cfs_signal_pending()) | |
969 | result = 0; | |
970 | } | |
971 | ||
972 | cl_lock_mutex_get(env, lock); | |
973 | set_current_state(TASK_RUNNING); | |
974 | remove_wait_queue(&lock->cll_wq, &waiter); | |
975 | ||
976 | /* Restore old blocked signals */ | |
977 | cfs_restore_sigs(blocked); | |
978 | } | |
979 | RETURN(result); | |
980 | } | |
981 | EXPORT_SYMBOL(cl_lock_state_wait); | |
982 | ||
983 | static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock, | |
984 | enum cl_lock_state state) | |
985 | { | |
986 | const struct cl_lock_slice *slice; | |
987 | ||
988 | ENTRY; | |
989 | LINVRNT(cl_lock_is_mutexed(lock)); | |
990 | LINVRNT(cl_lock_invariant(env, lock)); | |
991 | ||
992 | list_for_each_entry(slice, &lock->cll_layers, cls_linkage) | |
993 | if (slice->cls_ops->clo_state != NULL) | |
994 | slice->cls_ops->clo_state(env, slice, state); | |
995 | wake_up_all(&lock->cll_wq); | |
996 | EXIT; | |
997 | } | |
998 | ||
999 | /** | |
1000 | * Notifies waiters that lock state changed. | |
1001 | * | |
1002 | * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all | |
1003 | * layers about state change by calling cl_lock_operations::clo_state() | |
1004 | * top-to-bottom. | |
1005 | */ | |
1006 | void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock) | |
1007 | { | |
1008 | ENTRY; | |
1009 | cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock); | |
1010 | cl_lock_state_signal(env, lock, lock->cll_state); | |
1011 | EXIT; | |
1012 | } | |
1013 | EXPORT_SYMBOL(cl_lock_signal); | |
1014 | ||
1015 | /** | |
1016 | * Changes lock state. | |
1017 | * | |
1018 | * This function is invoked to notify layers that lock state changed, possible | |
1019 | * as a result of an asynchronous event such as call-back reception. | |
1020 | * | |
1021 | * \post lock->cll_state == state | |
1022 | * | |
1023 | * \see cl_lock_operations::clo_state() | |
1024 | */ | |
1025 | void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock, | |
1026 | enum cl_lock_state state) | |
1027 | { | |
1028 | ENTRY; | |
1029 | LASSERT(lock->cll_state <= state || | |
1030 | (lock->cll_state == CLS_CACHED && | |
1031 | (state == CLS_HELD || /* lock found in cache */ | |
1032 | state == CLS_NEW || /* sub-lock canceled */ | |
1033 | state == CLS_INTRANSIT)) || | |
1034 | /* lock is in transit state */ | |
1035 | lock->cll_state == CLS_INTRANSIT); | |
1036 | ||
1037 | if (lock->cll_state != state) { | |
1038 | CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state); | |
1039 | CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state); | |
1040 | ||
1041 | cl_lock_state_signal(env, lock, state); | |
1042 | lock->cll_state = state; | |
1043 | } | |
1044 | EXIT; | |
1045 | } | |
1046 | EXPORT_SYMBOL(cl_lock_state_set); | |
1047 | ||
1048 | static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock) | |
1049 | { | |
1050 | const struct cl_lock_slice *slice; | |
1051 | int result; | |
1052 | ||
1053 | do { | |
1054 | result = 0; | |
1055 | ||
1056 | LINVRNT(cl_lock_is_mutexed(lock)); | |
1057 | LINVRNT(cl_lock_invariant(env, lock)); | |
1058 | LASSERT(lock->cll_state == CLS_INTRANSIT); | |
1059 | ||
1060 | result = -ENOSYS; | |
1061 | list_for_each_entry_reverse(slice, &lock->cll_layers, | |
1062 | cls_linkage) { | |
1063 | if (slice->cls_ops->clo_unuse != NULL) { | |
1064 | result = slice->cls_ops->clo_unuse(env, slice); | |
1065 | if (result != 0) | |
1066 | break; | |
1067 | } | |
1068 | } | |
1069 | LASSERT(result != -ENOSYS); | |
1070 | } while (result == CLO_REPEAT); | |
1071 | ||
1072 | return result; | |
1073 | } | |
1074 | ||
1075 | /** | |
1076 | * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling | |
1077 | * cl_lock_operations::clo_use() top-to-bottom to notify layers. | |
1078 | * @atomic = 1, it must unuse the lock to recovery the lock to keep the | |
1079 | * use process atomic | |
1080 | */ | |
1081 | int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic) | |
1082 | { | |
1083 | const struct cl_lock_slice *slice; | |
1084 | int result; | |
1085 | enum cl_lock_state state; | |
1086 | ||
1087 | ENTRY; | |
1088 | cl_lock_trace(D_DLMTRACE, env, "use lock", lock); | |
1089 | ||
1090 | LASSERT(lock->cll_state == CLS_CACHED); | |
1091 | if (lock->cll_error) | |
1092 | RETURN(lock->cll_error); | |
1093 | ||
1094 | result = -ENOSYS; | |
1095 | state = cl_lock_intransit(env, lock); | |
1096 | list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { | |
1097 | if (slice->cls_ops->clo_use != NULL) { | |
1098 | result = slice->cls_ops->clo_use(env, slice); | |
1099 | if (result != 0) | |
1100 | break; | |
1101 | } | |
1102 | } | |
1103 | LASSERT(result != -ENOSYS); | |
1104 | ||
1105 | LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n", | |
1106 | lock->cll_state); | |
1107 | ||
1108 | if (result == 0) { | |
1109 | state = CLS_HELD; | |
1110 | } else { | |
1111 | if (result == -ESTALE) { | |
1112 | /* | |
1113 | * ESTALE means sublock being cancelled | |
1114 | * at this time, and set lock state to | |
1115 | * be NEW here and ask the caller to repeat. | |
1116 | */ | |
1117 | state = CLS_NEW; | |
1118 | result = CLO_REPEAT; | |
1119 | } | |
1120 | ||
1121 | /* @atomic means back-off-on-failure. */ | |
1122 | if (atomic) { | |
1123 | int rc; | |
1124 | rc = cl_unuse_try_internal(env, lock); | |
1125 | /* Vet the results. */ | |
1126 | if (rc < 0 && result > 0) | |
1127 | result = rc; | |
1128 | } | |
1129 | ||
1130 | } | |
1131 | cl_lock_extransit(env, lock, state); | |
1132 | RETURN(result); | |
1133 | } | |
1134 | EXPORT_SYMBOL(cl_use_try); | |
1135 | ||
1136 | /** | |
1137 | * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers | |
1138 | * top-to-bottom. | |
1139 | */ | |
1140 | static int cl_enqueue_kick(const struct lu_env *env, | |
1141 | struct cl_lock *lock, | |
1142 | struct cl_io *io, __u32 flags) | |
1143 | { | |
1144 | int result; | |
1145 | const struct cl_lock_slice *slice; | |
1146 | ||
1147 | ENTRY; | |
1148 | result = -ENOSYS; | |
1149 | list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { | |
1150 | if (slice->cls_ops->clo_enqueue != NULL) { | |
1151 | result = slice->cls_ops->clo_enqueue(env, | |
1152 | slice, io, flags); | |
1153 | if (result != 0) | |
1154 | break; | |
1155 | } | |
1156 | } | |
1157 | LASSERT(result != -ENOSYS); | |
1158 | RETURN(result); | |
1159 | } | |
1160 | ||
1161 | /** | |
1162 | * Tries to enqueue a lock. | |
1163 | * | |
1164 | * This function is called repeatedly by cl_enqueue() until either lock is | |
1165 | * enqueued, or error occurs. This function does not block waiting for | |
1166 | * networking communication to complete. | |
1167 | * | |
1168 | * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED || | |
1169 | * lock->cll_state == CLS_HELD) | |
1170 | * | |
1171 | * \see cl_enqueue() cl_lock_operations::clo_enqueue() | |
1172 | * \see cl_lock_state::CLS_ENQUEUED | |
1173 | */ | |
1174 | int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, | |
1175 | struct cl_io *io, __u32 flags) | |
1176 | { | |
1177 | int result; | |
1178 | ||
1179 | ENTRY; | |
1180 | cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock); | |
1181 | do { | |
1182 | LINVRNT(cl_lock_is_mutexed(lock)); | |
1183 | ||
1184 | result = lock->cll_error; | |
1185 | if (result != 0) | |
1186 | break; | |
1187 | ||
1188 | switch (lock->cll_state) { | |
1189 | case CLS_NEW: | |
1190 | cl_lock_state_set(env, lock, CLS_QUEUING); | |
1191 | /* fall-through */ | |
1192 | case CLS_QUEUING: | |
1193 | /* kick layers. */ | |
1194 | result = cl_enqueue_kick(env, lock, io, flags); | |
1195 | /* For AGL case, the cl_lock::cll_state may | |
1196 | * become CLS_HELD already. */ | |
1197 | if (result == 0 && lock->cll_state == CLS_QUEUING) | |
1198 | cl_lock_state_set(env, lock, CLS_ENQUEUED); | |
1199 | break; | |
1200 | case CLS_INTRANSIT: | |
1201 | LASSERT(cl_lock_is_intransit(lock)); | |
1202 | result = CLO_WAIT; | |
1203 | break; | |
1204 | case CLS_CACHED: | |
1205 | /* yank lock from the cache. */ | |
1206 | result = cl_use_try(env, lock, 0); | |
1207 | break; | |
1208 | case CLS_ENQUEUED: | |
1209 | case CLS_HELD: | |
1210 | result = 0; | |
1211 | break; | |
1212 | default: | |
1213 | case CLS_FREEING: | |
1214 | /* | |
1215 | * impossible, only held locks with increased | |
1216 | * ->cll_holds can be enqueued, and they cannot be | |
1217 | * freed. | |
1218 | */ | |
1219 | LBUG(); | |
1220 | } | |
1221 | } while (result == CLO_REPEAT); | |
1222 | RETURN(result); | |
1223 | } | |
1224 | EXPORT_SYMBOL(cl_enqueue_try); | |
1225 | ||
1226 | /** | |
1227 | * Cancel the conflicting lock found during previous enqueue. | |
1228 | * | |
1229 | * \retval 0 conflicting lock has been canceled. | |
1230 | * \retval -ve error code. | |
1231 | */ | |
1232 | int cl_lock_enqueue_wait(const struct lu_env *env, | |
1233 | struct cl_lock *lock, | |
1234 | int keep_mutex) | |
1235 | { | |
1236 | struct cl_lock *conflict; | |
1237 | int rc = 0; | |
1238 | ENTRY; | |
1239 | ||
1240 | LASSERT(cl_lock_is_mutexed(lock)); | |
1241 | LASSERT(lock->cll_state == CLS_QUEUING); | |
1242 | LASSERT(lock->cll_conflict != NULL); | |
1243 | ||
1244 | conflict = lock->cll_conflict; | |
1245 | lock->cll_conflict = NULL; | |
1246 | ||
1247 | cl_lock_mutex_put(env, lock); | |
1248 | LASSERT(cl_lock_nr_mutexed(env) == 0); | |
1249 | ||
1250 | cl_lock_mutex_get(env, conflict); | |
1251 | cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict); | |
1252 | cl_lock_cancel(env, conflict); | |
1253 | cl_lock_delete(env, conflict); | |
1254 | ||
1255 | while (conflict->cll_state != CLS_FREEING) { | |
1256 | rc = cl_lock_state_wait(env, conflict); | |
1257 | if (rc != 0) | |
1258 | break; | |
1259 | } | |
1260 | cl_lock_mutex_put(env, conflict); | |
1261 | lu_ref_del(&conflict->cll_reference, "cancel-wait", lock); | |
1262 | cl_lock_put(env, conflict); | |
1263 | ||
1264 | if (keep_mutex) | |
1265 | cl_lock_mutex_get(env, lock); | |
1266 | ||
1267 | LASSERT(rc <= 0); | |
1268 | RETURN(rc); | |
1269 | } | |
1270 | EXPORT_SYMBOL(cl_lock_enqueue_wait); | |
1271 | ||
1272 | static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock, | |
1273 | struct cl_io *io, __u32 enqflags) | |
1274 | { | |
1275 | int result; | |
1276 | ||
1277 | ENTRY; | |
1278 | ||
1279 | LINVRNT(cl_lock_is_mutexed(lock)); | |
1280 | LINVRNT(cl_lock_invariant(env, lock)); | |
1281 | LASSERT(lock->cll_holds > 0); | |
1282 | ||
1283 | cl_lock_user_add(env, lock); | |
1284 | do { | |
1285 | result = cl_enqueue_try(env, lock, io, enqflags); | |
1286 | if (result == CLO_WAIT) { | |
1287 | if (lock->cll_conflict != NULL) | |
1288 | result = cl_lock_enqueue_wait(env, lock, 1); | |
1289 | else | |
1290 | result = cl_lock_state_wait(env, lock); | |
1291 | if (result == 0) | |
1292 | continue; | |
1293 | } | |
1294 | break; | |
1295 | } while (1); | |
1296 | if (result != 0) | |
1297 | cl_unuse_try(env, lock); | |
1298 | LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL), | |
1299 | lock->cll_state == CLS_ENQUEUED || | |
1300 | lock->cll_state == CLS_HELD)); | |
1301 | RETURN(result); | |
1302 | } | |
1303 | ||
1304 | /** | |
1305 | * Enqueues a lock. | |
1306 | * | |
1307 | * \pre current thread or io owns a hold on lock. | |
1308 | * | |
1309 | * \post ergo(result == 0, lock->users increased) | |
1310 | * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED || | |
1311 | * lock->cll_state == CLS_HELD) | |
1312 | */ | |
1313 | int cl_enqueue(const struct lu_env *env, struct cl_lock *lock, | |
1314 | struct cl_io *io, __u32 enqflags) | |
1315 | { | |
1316 | int result; | |
1317 | ||
1318 | ENTRY; | |
1319 | ||
1320 | cl_lock_lockdep_acquire(env, lock, enqflags); | |
1321 | cl_lock_mutex_get(env, lock); | |
1322 | result = cl_enqueue_locked(env, lock, io, enqflags); | |
1323 | cl_lock_mutex_put(env, lock); | |
1324 | if (result != 0) | |
1325 | cl_lock_lockdep_release(env, lock); | |
1326 | LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED || | |
1327 | lock->cll_state == CLS_HELD)); | |
1328 | RETURN(result); | |
1329 | } | |
1330 | EXPORT_SYMBOL(cl_enqueue); | |
1331 | ||
1332 | /** | |
1333 | * Tries to unlock a lock. | |
1334 | * | |
1335 | * This function is called to release underlying resource: | |
1336 | * 1. for top lock, the resource is sublocks it held; | |
1337 | * 2. for sublock, the resource is the reference to dlmlock. | |
1338 | * | |
1339 | * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT. | |
1340 | * | |
1341 | * \see cl_unuse() cl_lock_operations::clo_unuse() | |
1342 | * \see cl_lock_state::CLS_CACHED | |
1343 | */ | |
1344 | int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock) | |
1345 | { | |
1346 | int result; | |
1347 | enum cl_lock_state state = CLS_NEW; | |
1348 | ||
1349 | ENTRY; | |
1350 | cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock); | |
1351 | ||
1352 | if (lock->cll_users > 1) { | |
1353 | cl_lock_user_del(env, lock); | |
1354 | RETURN(0); | |
1355 | } | |
1356 | ||
1357 | /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold | |
1358 | * underlying resources. */ | |
1359 | if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) { | |
1360 | cl_lock_user_del(env, lock); | |
1361 | RETURN(0); | |
1362 | } | |
1363 | ||
1364 | /* | |
1365 | * New lock users (->cll_users) are not protecting unlocking | |
1366 | * from proceeding. From this point, lock eventually reaches | |
1367 | * CLS_CACHED, is reinitialized to CLS_NEW or fails into | |
1368 | * CLS_FREEING. | |
1369 | */ | |
1370 | state = cl_lock_intransit(env, lock); | |
1371 | ||
1372 | result = cl_unuse_try_internal(env, lock); | |
1373 | LASSERT(lock->cll_state == CLS_INTRANSIT); | |
1374 | LASSERT(result != CLO_WAIT); | |
1375 | cl_lock_user_del(env, lock); | |
1376 | if (result == 0 || result == -ESTALE) { | |
1377 | /* | |
1378 | * Return lock back to the cache. This is the only | |
1379 | * place where lock is moved into CLS_CACHED state. | |
1380 | * | |
1381 | * If one of ->clo_unuse() methods returned -ESTALE, lock | |
1382 | * cannot be placed into cache and has to be | |
1383 | * re-initialized. This happens e.g., when a sub-lock was | |
1384 | * canceled while unlocking was in progress. | |
1385 | */ | |
1386 | if (state == CLS_HELD && result == 0) | |
1387 | state = CLS_CACHED; | |
1388 | else | |
1389 | state = CLS_NEW; | |
1390 | cl_lock_extransit(env, lock, state); | |
1391 | ||
1392 | /* | |
1393 | * Hide -ESTALE error. | |
1394 | * If the lock is a glimpse lock, and it has multiple | |
1395 | * stripes. Assuming that one of its sublock returned -ENAVAIL, | |
1396 | * and other sublocks are matched write locks. In this case, | |
1397 | * we can't set this lock to error because otherwise some of | |
1398 | * its sublocks may not be canceled. This causes some dirty | |
1399 | * pages won't be written to OSTs. -jay | |
1400 | */ | |
1401 | result = 0; | |
1402 | } else { | |
1403 | CERROR("result = %d, this is unlikely!\n", result); | |
1404 | state = CLS_NEW; | |
1405 | cl_lock_extransit(env, lock, state); | |
1406 | } | |
1407 | RETURN(result ?: lock->cll_error); | |
1408 | } | |
1409 | EXPORT_SYMBOL(cl_unuse_try); | |
1410 | ||
1411 | static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock) | |
1412 | { | |
1413 | int result; | |
1414 | ENTRY; | |
1415 | ||
1416 | result = cl_unuse_try(env, lock); | |
1417 | if (result) | |
1418 | CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result); | |
1419 | ||
1420 | EXIT; | |
1421 | } | |
1422 | ||
1423 | /** | |
1424 | * Unlocks a lock. | |
1425 | */ | |
1426 | void cl_unuse(const struct lu_env *env, struct cl_lock *lock) | |
1427 | { | |
1428 | ENTRY; | |
1429 | cl_lock_mutex_get(env, lock); | |
1430 | cl_unuse_locked(env, lock); | |
1431 | cl_lock_mutex_put(env, lock); | |
1432 | cl_lock_lockdep_release(env, lock); | |
1433 | EXIT; | |
1434 | } | |
1435 | EXPORT_SYMBOL(cl_unuse); | |
1436 | ||
1437 | /** | |
1438 | * Tries to wait for a lock. | |
1439 | * | |
1440 | * This function is called repeatedly by cl_wait() until either lock is | |
1441 | * granted, or error occurs. This function does not block waiting for network | |
1442 | * communication to complete. | |
1443 | * | |
1444 | * \see cl_wait() cl_lock_operations::clo_wait() | |
1445 | * \see cl_lock_state::CLS_HELD | |
1446 | */ | |
1447 | int cl_wait_try(const struct lu_env *env, struct cl_lock *lock) | |
1448 | { | |
1449 | const struct cl_lock_slice *slice; | |
1450 | int result; | |
1451 | ||
1452 | ENTRY; | |
1453 | cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock); | |
1454 | do { | |
1455 | LINVRNT(cl_lock_is_mutexed(lock)); | |
1456 | LINVRNT(cl_lock_invariant(env, lock)); | |
1457 | LASSERTF(lock->cll_state == CLS_QUEUING || | |
1458 | lock->cll_state == CLS_ENQUEUED || | |
1459 | lock->cll_state == CLS_HELD || | |
1460 | lock->cll_state == CLS_INTRANSIT, | |
1461 | "lock state: %d\n", lock->cll_state); | |
1462 | LASSERT(lock->cll_users > 0); | |
1463 | LASSERT(lock->cll_holds > 0); | |
1464 | ||
1465 | result = lock->cll_error; | |
1466 | if (result != 0) | |
1467 | break; | |
1468 | ||
1469 | if (cl_lock_is_intransit(lock)) { | |
1470 | result = CLO_WAIT; | |
1471 | break; | |
1472 | } | |
1473 | ||
1474 | if (lock->cll_state == CLS_HELD) | |
1475 | /* nothing to do */ | |
1476 | break; | |
1477 | ||
1478 | result = -ENOSYS; | |
1479 | list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { | |
1480 | if (slice->cls_ops->clo_wait != NULL) { | |
1481 | result = slice->cls_ops->clo_wait(env, slice); | |
1482 | if (result != 0) | |
1483 | break; | |
1484 | } | |
1485 | } | |
1486 | LASSERT(result != -ENOSYS); | |
1487 | if (result == 0) { | |
1488 | LASSERT(lock->cll_state != CLS_INTRANSIT); | |
1489 | cl_lock_state_set(env, lock, CLS_HELD); | |
1490 | } | |
1491 | } while (result == CLO_REPEAT); | |
1492 | RETURN(result); | |
1493 | } | |
1494 | EXPORT_SYMBOL(cl_wait_try); | |
1495 | ||
1496 | /** | |
1497 | * Waits until enqueued lock is granted. | |
1498 | * | |
1499 | * \pre current thread or io owns a hold on the lock | |
1500 | * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED || | |
1501 | * lock->cll_state == CLS_HELD) | |
1502 | * | |
1503 | * \post ergo(result == 0, lock->cll_state == CLS_HELD) | |
1504 | */ | |
1505 | int cl_wait(const struct lu_env *env, struct cl_lock *lock) | |
1506 | { | |
1507 | int result; | |
1508 | ||
1509 | ENTRY; | |
1510 | cl_lock_mutex_get(env, lock); | |
1511 | ||
1512 | LINVRNT(cl_lock_invariant(env, lock)); | |
1513 | LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD, | |
1514 | "Wrong state %d \n", lock->cll_state); | |
1515 | LASSERT(lock->cll_holds > 0); | |
1516 | ||
1517 | do { | |
1518 | result = cl_wait_try(env, lock); | |
1519 | if (result == CLO_WAIT) { | |
1520 | result = cl_lock_state_wait(env, lock); | |
1521 | if (result == 0) | |
1522 | continue; | |
1523 | } | |
1524 | break; | |
1525 | } while (1); | |
1526 | if (result < 0) { | |
1527 | cl_unuse_try(env, lock); | |
1528 | cl_lock_lockdep_release(env, lock); | |
1529 | } | |
1530 | cl_lock_trace(D_DLMTRACE, env, "wait lock", lock); | |
1531 | cl_lock_mutex_put(env, lock); | |
1532 | LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD)); | |
1533 | RETURN(result); | |
1534 | } | |
1535 | EXPORT_SYMBOL(cl_wait); | |
1536 | ||
1537 | /** | |
1538 | * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock | |
1539 | * value. | |
1540 | */ | |
1541 | unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock) | |
1542 | { | |
1543 | const struct cl_lock_slice *slice; | |
1544 | unsigned long pound; | |
1545 | unsigned long ounce; | |
1546 | ||
1547 | ENTRY; | |
1548 | LINVRNT(cl_lock_is_mutexed(lock)); | |
1549 | LINVRNT(cl_lock_invariant(env, lock)); | |
1550 | ||
1551 | pound = 0; | |
1552 | list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { | |
1553 | if (slice->cls_ops->clo_weigh != NULL) { | |
1554 | ounce = slice->cls_ops->clo_weigh(env, slice); | |
1555 | pound += ounce; | |
1556 | if (pound < ounce) /* over-weight^Wflow */ | |
1557 | pound = ~0UL; | |
1558 | } | |
1559 | } | |
1560 | RETURN(pound); | |
1561 | } | |
1562 | EXPORT_SYMBOL(cl_lock_weigh); | |
1563 | ||
1564 | /** | |
1565 | * Notifies layers that lock description changed. | |
1566 | * | |
1567 | * The server can grant client a lock different from one that was requested | |
1568 | * (e.g., larger in extent). This method is called when actually granted lock | |
1569 | * description becomes known to let layers to accommodate for changed lock | |
1570 | * description. | |
1571 | * | |
1572 | * \see cl_lock_operations::clo_modify() | |
1573 | */ | |
1574 | int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock, | |
1575 | const struct cl_lock_descr *desc) | |
1576 | { | |
1577 | const struct cl_lock_slice *slice; | |
1578 | struct cl_object *obj = lock->cll_descr.cld_obj; | |
1579 | struct cl_object_header *hdr = cl_object_header(obj); | |
1580 | int result; | |
1581 | ||
1582 | ENTRY; | |
1583 | cl_lock_trace(D_DLMTRACE, env, "modify lock", lock); | |
1584 | /* don't allow object to change */ | |
1585 | LASSERT(obj == desc->cld_obj); | |
1586 | LINVRNT(cl_lock_is_mutexed(lock)); | |
1587 | LINVRNT(cl_lock_invariant(env, lock)); | |
1588 | ||
1589 | list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { | |
1590 | if (slice->cls_ops->clo_modify != NULL) { | |
1591 | result = slice->cls_ops->clo_modify(env, slice, desc); | |
1592 | if (result != 0) | |
1593 | RETURN(result); | |
1594 | } | |
1595 | } | |
1596 | CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n", | |
1597 | PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu))); | |
1598 | /* | |
1599 | * Just replace description in place. Nothing more is needed for | |
1600 | * now. If locks were indexed according to their extent and/or mode, | |
1601 | * that index would have to be updated here. | |
1602 | */ | |
1603 | spin_lock(&hdr->coh_lock_guard); | |
1604 | lock->cll_descr = *desc; | |
1605 | spin_unlock(&hdr->coh_lock_guard); | |
1606 | RETURN(0); | |
1607 | } | |
1608 | EXPORT_SYMBOL(cl_lock_modify); | |
1609 | ||
1610 | /** | |
1611 | * Initializes lock closure with a given origin. | |
1612 | * | |
1613 | * \see cl_lock_closure | |
1614 | */ | |
1615 | void cl_lock_closure_init(const struct lu_env *env, | |
1616 | struct cl_lock_closure *closure, | |
1617 | struct cl_lock *origin, int wait) | |
1618 | { | |
1619 | LINVRNT(cl_lock_is_mutexed(origin)); | |
1620 | LINVRNT(cl_lock_invariant(env, origin)); | |
1621 | ||
1622 | INIT_LIST_HEAD(&closure->clc_list); | |
1623 | closure->clc_origin = origin; | |
1624 | closure->clc_wait = wait; | |
1625 | closure->clc_nr = 0; | |
1626 | } | |
1627 | EXPORT_SYMBOL(cl_lock_closure_init); | |
1628 | ||
1629 | /** | |
1630 | * Builds a closure of \a lock. | |
1631 | * | |
1632 | * Building of a closure consists of adding initial lock (\a lock) into it, | |
1633 | * and calling cl_lock_operations::clo_closure() methods of \a lock. These | |
1634 | * methods might call cl_lock_closure_build() recursively again, adding more | |
1635 | * locks to the closure, etc. | |
1636 | * | |
1637 | * \see cl_lock_closure | |
1638 | */ | |
1639 | int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock, | |
1640 | struct cl_lock_closure *closure) | |
1641 | { | |
1642 | const struct cl_lock_slice *slice; | |
1643 | int result; | |
1644 | ||
1645 | ENTRY; | |
1646 | LINVRNT(cl_lock_is_mutexed(closure->clc_origin)); | |
1647 | LINVRNT(cl_lock_invariant(env, closure->clc_origin)); | |
1648 | ||
1649 | result = cl_lock_enclosure(env, lock, closure); | |
1650 | if (result == 0) { | |
1651 | list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { | |
1652 | if (slice->cls_ops->clo_closure != NULL) { | |
1653 | result = slice->cls_ops->clo_closure(env, slice, | |
1654 | closure); | |
1655 | if (result != 0) | |
1656 | break; | |
1657 | } | |
1658 | } | |
1659 | } | |
1660 | if (result != 0) | |
1661 | cl_lock_disclosure(env, closure); | |
1662 | RETURN(result); | |
1663 | } | |
1664 | EXPORT_SYMBOL(cl_lock_closure_build); | |
1665 | ||
1666 | /** | |
1667 | * Adds new lock to a closure. | |
1668 | * | |
1669 | * Try-locks \a lock and if succeeded, adds it to the closure (never more than | |
1670 | * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting | |
1671 | * until next try-lock is likely to succeed. | |
1672 | */ | |
1673 | int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock, | |
1674 | struct cl_lock_closure *closure) | |
1675 | { | |
1676 | int result = 0; | |
1677 | ENTRY; | |
1678 | cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock); | |
1679 | if (!cl_lock_mutex_try(env, lock)) { | |
1680 | /* | |
1681 | * If lock->cll_inclosure is not empty, lock is already in | |
1682 | * this closure. | |
1683 | */ | |
1684 | if (list_empty(&lock->cll_inclosure)) { | |
1685 | cl_lock_get_trust(lock); | |
1686 | lu_ref_add(&lock->cll_reference, "closure", closure); | |
1687 | list_add(&lock->cll_inclosure, &closure->clc_list); | |
1688 | closure->clc_nr++; | |
1689 | } else | |
1690 | cl_lock_mutex_put(env, lock); | |
1691 | result = 0; | |
1692 | } else { | |
1693 | cl_lock_disclosure(env, closure); | |
1694 | if (closure->clc_wait) { | |
1695 | cl_lock_get_trust(lock); | |
1696 | lu_ref_add(&lock->cll_reference, "closure-w", closure); | |
1697 | cl_lock_mutex_put(env, closure->clc_origin); | |
1698 | ||
1699 | LASSERT(cl_lock_nr_mutexed(env) == 0); | |
1700 | cl_lock_mutex_get(env, lock); | |
1701 | cl_lock_mutex_put(env, lock); | |
1702 | ||
1703 | cl_lock_mutex_get(env, closure->clc_origin); | |
1704 | lu_ref_del(&lock->cll_reference, "closure-w", closure); | |
1705 | cl_lock_put(env, lock); | |
1706 | } | |
1707 | result = CLO_REPEAT; | |
1708 | } | |
1709 | RETURN(result); | |
1710 | } | |
1711 | EXPORT_SYMBOL(cl_lock_enclosure); | |
1712 | ||
1713 | /** Releases mutices of enclosed locks. */ | |
1714 | void cl_lock_disclosure(const struct lu_env *env, | |
1715 | struct cl_lock_closure *closure) | |
1716 | { | |
1717 | struct cl_lock *scan; | |
1718 | struct cl_lock *temp; | |
1719 | ||
1720 | cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin); | |
1721 | list_for_each_entry_safe(scan, temp, &closure->clc_list, | |
1722 | cll_inclosure){ | |
1723 | list_del_init(&scan->cll_inclosure); | |
1724 | cl_lock_mutex_put(env, scan); | |
1725 | lu_ref_del(&scan->cll_reference, "closure", closure); | |
1726 | cl_lock_put(env, scan); | |
1727 | closure->clc_nr--; | |
1728 | } | |
1729 | LASSERT(closure->clc_nr == 0); | |
1730 | } | |
1731 | EXPORT_SYMBOL(cl_lock_disclosure); | |
1732 | ||
1733 | /** Finalizes a closure. */ | |
1734 | void cl_lock_closure_fini(struct cl_lock_closure *closure) | |
1735 | { | |
1736 | LASSERT(closure->clc_nr == 0); | |
1737 | LASSERT(list_empty(&closure->clc_list)); | |
1738 | } | |
1739 | EXPORT_SYMBOL(cl_lock_closure_fini); | |
1740 | ||
1741 | /** | |
1742 | * Destroys this lock. Notifies layers (bottom-to-top) that lock is being | |
1743 | * destroyed, then destroy the lock. If there are holds on the lock, postpone | |
1744 | * destruction until all holds are released. This is called when a decision is | |
1745 | * made to destroy the lock in the future. E.g., when a blocking AST is | |
1746 | * received on it, or fatal communication error happens. | |
1747 | * | |
1748 | * Caller must have a reference on this lock to prevent a situation, when | |
1749 | * deleted lock lingers in memory for indefinite time, because nobody calls | |
1750 | * cl_lock_put() to finish it. | |
1751 | * | |
1752 | * \pre atomic_read(&lock->cll_ref) > 0 | |
1753 | * \pre ergo(cl_lock_nesting(lock) == CNL_TOP, | |
1754 | * cl_lock_nr_mutexed(env) == 1) | |
1755 | * [i.e., if a top-lock is deleted, mutices of no other locks can be | |
1756 | * held, as deletion of sub-locks might require releasing a top-lock | |
1757 | * mutex] | |
1758 | * | |
1759 | * \see cl_lock_operations::clo_delete() | |
1760 | * \see cl_lock::cll_holds | |
1761 | */ | |
1762 | void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock) | |
1763 | { | |
1764 | LINVRNT(cl_lock_is_mutexed(lock)); | |
1765 | LINVRNT(cl_lock_invariant(env, lock)); | |
1766 | LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP, | |
1767 | cl_lock_nr_mutexed(env) == 1)); | |
1768 | ||
1769 | ENTRY; | |
1770 | cl_lock_trace(D_DLMTRACE, env, "delete lock", lock); | |
1771 | if (lock->cll_holds == 0) | |
1772 | cl_lock_delete0(env, lock); | |
1773 | else | |
1774 | lock->cll_flags |= CLF_DOOMED; | |
1775 | EXIT; | |
1776 | } | |
1777 | EXPORT_SYMBOL(cl_lock_delete); | |
1778 | ||
1779 | /** | |
1780 | * Mark lock as irrecoverably failed, and mark it for destruction. This | |
1781 | * happens when, e.g., server fails to grant a lock to us, or networking | |
1782 | * time-out happens. | |
1783 | * | |
1784 | * \pre atomic_read(&lock->cll_ref) > 0 | |
1785 | * | |
1786 | * \see clo_lock_delete() | |
1787 | * \see cl_lock::cll_holds | |
1788 | */ | |
1789 | void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error) | |
1790 | { | |
1791 | LINVRNT(cl_lock_is_mutexed(lock)); | |
1792 | LINVRNT(cl_lock_invariant(env, lock)); | |
1793 | ||
1794 | ENTRY; | |
1795 | if (lock->cll_error == 0 && error != 0) { | |
1796 | cl_lock_trace(D_DLMTRACE, env, "set lock error", lock); | |
1797 | lock->cll_error = error; | |
1798 | cl_lock_signal(env, lock); | |
1799 | cl_lock_cancel(env, lock); | |
1800 | cl_lock_delete(env, lock); | |
1801 | } | |
1802 | EXIT; | |
1803 | } | |
1804 | EXPORT_SYMBOL(cl_lock_error); | |
1805 | ||
1806 | /** | |
1807 | * Cancels this lock. Notifies layers | |
1808 | * (bottom-to-top) that lock is being cancelled, then destroy the lock. If | |
1809 | * there are holds on the lock, postpone cancellation until | |
1810 | * all holds are released. | |
1811 | * | |
1812 | * Cancellation notification is delivered to layers at most once. | |
1813 | * | |
1814 | * \see cl_lock_operations::clo_cancel() | |
1815 | * \see cl_lock::cll_holds | |
1816 | */ | |
1817 | void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock) | |
1818 | { | |
1819 | LINVRNT(cl_lock_is_mutexed(lock)); | |
1820 | LINVRNT(cl_lock_invariant(env, lock)); | |
1821 | ||
1822 | ENTRY; | |
1823 | cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock); | |
1824 | if (lock->cll_holds == 0) | |
1825 | cl_lock_cancel0(env, lock); | |
1826 | else | |
1827 | lock->cll_flags |= CLF_CANCELPEND; | |
1828 | EXIT; | |
1829 | } | |
1830 | EXPORT_SYMBOL(cl_lock_cancel); | |
1831 | ||
1832 | /** | |
1833 | * Finds an existing lock covering given index and optionally different from a | |
1834 | * given \a except lock. | |
1835 | */ | |
1836 | struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env, | |
1837 | struct cl_object *obj, pgoff_t index, | |
1838 | struct cl_lock *except, | |
1839 | int pending, int canceld) | |
1840 | { | |
1841 | struct cl_object_header *head; | |
1842 | struct cl_lock *scan; | |
1843 | struct cl_lock *lock; | |
1844 | struct cl_lock_descr *need; | |
1845 | ||
1846 | ENTRY; | |
1847 | ||
1848 | head = cl_object_header(obj); | |
1849 | need = &cl_env_info(env)->clt_descr; | |
1850 | lock = NULL; | |
1851 | ||
1852 | need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but | |
1853 | * not PHANTOM */ | |
1854 | need->cld_start = need->cld_end = index; | |
1855 | need->cld_enq_flags = 0; | |
1856 | ||
1857 | spin_lock(&head->coh_lock_guard); | |
1858 | /* It is fine to match any group lock since there could be only one | |
1859 | * with a uniq gid and it conflicts with all other lock modes too */ | |
1860 | list_for_each_entry(scan, &head->coh_locks, cll_linkage) { | |
1861 | if (scan != except && | |
1862 | (scan->cll_descr.cld_mode == CLM_GROUP || | |
1863 | cl_lock_ext_match(&scan->cll_descr, need)) && | |
1864 | scan->cll_state >= CLS_HELD && | |
1865 | scan->cll_state < CLS_FREEING && | |
1866 | /* | |
1867 | * This check is racy as the lock can be canceled right | |
1868 | * after it is done, but this is fine, because page exists | |
1869 | * already. | |
1870 | */ | |
1871 | (canceld || !(scan->cll_flags & CLF_CANCELLED)) && | |
1872 | (pending || !(scan->cll_flags & CLF_CANCELPEND))) { | |
1873 | /* Don't increase cs_hit here since this | |
1874 | * is just a helper function. */ | |
1875 | cl_lock_get_trust(scan); | |
1876 | lock = scan; | |
1877 | break; | |
1878 | } | |
1879 | } | |
1880 | spin_unlock(&head->coh_lock_guard); | |
1881 | RETURN(lock); | |
1882 | } | |
1883 | EXPORT_SYMBOL(cl_lock_at_pgoff); | |
1884 | ||
1885 | /** | |
1886 | * Calculate the page offset at the layer of @lock. | |
1887 | * At the time of this writing, @page is top page and @lock is sub lock. | |
1888 | */ | |
1889 | static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock) | |
1890 | { | |
1891 | struct lu_device_type *dtype; | |
1892 | const struct cl_page_slice *slice; | |
1893 | ||
1894 | dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type; | |
1895 | slice = cl_page_at(page, dtype); | |
1896 | LASSERT(slice != NULL); | |
1897 | return slice->cpl_page->cp_index; | |
1898 | } | |
1899 | ||
1900 | /** | |
1901 | * Check if page @page is covered by an extra lock or discard it. | |
1902 | */ | |
1903 | static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io, | |
1904 | struct cl_page *page, void *cbdata) | |
1905 | { | |
1906 | struct cl_thread_info *info = cl_env_info(env); | |
1907 | struct cl_lock *lock = cbdata; | |
1908 | pgoff_t index = pgoff_at_lock(page, lock); | |
1909 | ||
1910 | if (index >= info->clt_fn_index) { | |
1911 | struct cl_lock *tmp; | |
1912 | ||
1913 | /* refresh non-overlapped index */ | |
1914 | tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index, | |
1915 | lock, 1, 0); | |
1916 | if (tmp != NULL) { | |
1917 | /* Cache the first-non-overlapped index so as to skip | |
1918 | * all pages within [index, clt_fn_index). This | |
1919 | * is safe because if tmp lock is canceled, it will | |
1920 | * discard these pages. */ | |
1921 | info->clt_fn_index = tmp->cll_descr.cld_end + 1; | |
1922 | if (tmp->cll_descr.cld_end == CL_PAGE_EOF) | |
1923 | info->clt_fn_index = CL_PAGE_EOF; | |
1924 | cl_lock_put(env, tmp); | |
1925 | } else if (cl_page_own(env, io, page) == 0) { | |
1926 | /* discard the page */ | |
1927 | cl_page_unmap(env, io, page); | |
1928 | cl_page_discard(env, io, page); | |
1929 | cl_page_disown(env, io, page); | |
1930 | } else { | |
1931 | LASSERT(page->cp_state == CPS_FREEING); | |
1932 | } | |
1933 | } | |
1934 | ||
1935 | info->clt_next_index = index + 1; | |
1936 | return CLP_GANG_OKAY; | |
1937 | } | |
1938 | ||
1939 | static int discard_cb(const struct lu_env *env, struct cl_io *io, | |
1940 | struct cl_page *page, void *cbdata) | |
1941 | { | |
1942 | struct cl_thread_info *info = cl_env_info(env); | |
1943 | struct cl_lock *lock = cbdata; | |
1944 | ||
1945 | LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE); | |
1946 | KLASSERT(ergo(page->cp_type == CPT_CACHEABLE, | |
1947 | !PageWriteback(cl_page_vmpage(env, page)))); | |
1948 | KLASSERT(ergo(page->cp_type == CPT_CACHEABLE, | |
1949 | !PageDirty(cl_page_vmpage(env, page)))); | |
1950 | ||
1951 | info->clt_next_index = pgoff_at_lock(page, lock) + 1; | |
1952 | if (cl_page_own(env, io, page) == 0) { | |
1953 | /* discard the page */ | |
1954 | cl_page_unmap(env, io, page); | |
1955 | cl_page_discard(env, io, page); | |
1956 | cl_page_disown(env, io, page); | |
1957 | } else { | |
1958 | LASSERT(page->cp_state == CPS_FREEING); | |
1959 | } | |
1960 | ||
1961 | return CLP_GANG_OKAY; | |
1962 | } | |
1963 | ||
1964 | /** | |
1965 | * Discard pages protected by the given lock. This function traverses radix | |
1966 | * tree to find all covering pages and discard them. If a page is being covered | |
1967 | * by other locks, it should remain in cache. | |
1968 | * | |
1969 | * If error happens on any step, the process continues anyway (the reasoning | |
1970 | * behind this being that lock cancellation cannot be delayed indefinitely). | |
1971 | */ | |
1972 | int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock) | |
1973 | { | |
1974 | struct cl_thread_info *info = cl_env_info(env); | |
1975 | struct cl_io *io = &info->clt_io; | |
1976 | struct cl_lock_descr *descr = &lock->cll_descr; | |
1977 | cl_page_gang_cb_t cb; | |
1978 | int res; | |
1979 | int result; | |
1980 | ||
1981 | LINVRNT(cl_lock_invariant(env, lock)); | |
1982 | ENTRY; | |
1983 | ||
1984 | io->ci_obj = cl_object_top(descr->cld_obj); | |
1985 | io->ci_ignore_layout = 1; | |
1986 | result = cl_io_init(env, io, CIT_MISC, io->ci_obj); | |
1987 | if (result != 0) | |
1988 | GOTO(out, result); | |
1989 | ||
1990 | cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb; | |
1991 | info->clt_fn_index = info->clt_next_index = descr->cld_start; | |
1992 | do { | |
1993 | res = cl_page_gang_lookup(env, descr->cld_obj, io, | |
1994 | info->clt_next_index, descr->cld_end, | |
1995 | cb, (void *)lock); | |
1996 | if (info->clt_next_index > descr->cld_end) | |
1997 | break; | |
1998 | ||
1999 | if (res == CLP_GANG_RESCHED) | |
2000 | cond_resched(); | |
2001 | } while (res != CLP_GANG_OKAY); | |
2002 | out: | |
2003 | cl_io_fini(env, io); | |
2004 | RETURN(result); | |
2005 | } | |
2006 | EXPORT_SYMBOL(cl_lock_discard_pages); | |
2007 | ||
2008 | /** | |
2009 | * Eliminate all locks for a given object. | |
2010 | * | |
2011 | * Caller has to guarantee that no lock is in active use. | |
2012 | * | |
2013 | * \param cancel when this is set, cl_locks_prune() cancels locks before | |
2014 | * destroying. | |
2015 | */ | |
2016 | void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel) | |
2017 | { | |
2018 | struct cl_object_header *head; | |
2019 | struct cl_lock *lock; | |
2020 | ||
2021 | ENTRY; | |
2022 | head = cl_object_header(obj); | |
2023 | /* | |
2024 | * If locks are destroyed without cancellation, all pages must be | |
2025 | * already destroyed (as otherwise they will be left unprotected). | |
2026 | */ | |
2027 | LASSERT(ergo(!cancel, | |
2028 | head->coh_tree.rnode == NULL && head->coh_pages == 0)); | |
2029 | ||
2030 | spin_lock(&head->coh_lock_guard); | |
2031 | while (!list_empty(&head->coh_locks)) { | |
2032 | lock = container_of(head->coh_locks.next, | |
2033 | struct cl_lock, cll_linkage); | |
2034 | cl_lock_get_trust(lock); | |
2035 | spin_unlock(&head->coh_lock_guard); | |
2036 | lu_ref_add(&lock->cll_reference, "prune", current); | |
2037 | ||
2038 | again: | |
2039 | cl_lock_mutex_get(env, lock); | |
2040 | if (lock->cll_state < CLS_FREEING) { | |
2041 | LASSERT(lock->cll_users <= 1); | |
2042 | if (unlikely(lock->cll_users == 1)) { | |
2043 | struct l_wait_info lwi = { 0 }; | |
2044 | ||
2045 | cl_lock_mutex_put(env, lock); | |
2046 | l_wait_event(lock->cll_wq, | |
2047 | lock->cll_users == 0, | |
2048 | &lwi); | |
2049 | goto again; | |
2050 | } | |
2051 | ||
2052 | if (cancel) | |
2053 | cl_lock_cancel(env, lock); | |
2054 | cl_lock_delete(env, lock); | |
2055 | } | |
2056 | cl_lock_mutex_put(env, lock); | |
2057 | lu_ref_del(&lock->cll_reference, "prune", current); | |
2058 | cl_lock_put(env, lock); | |
2059 | spin_lock(&head->coh_lock_guard); | |
2060 | } | |
2061 | spin_unlock(&head->coh_lock_guard); | |
2062 | EXIT; | |
2063 | } | |
2064 | EXPORT_SYMBOL(cl_locks_prune); | |
2065 | ||
2066 | static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env, | |
2067 | const struct cl_io *io, | |
2068 | const struct cl_lock_descr *need, | |
2069 | const char *scope, const void *source) | |
2070 | { | |
2071 | struct cl_lock *lock; | |
2072 | ||
2073 | ENTRY; | |
2074 | ||
2075 | while (1) { | |
2076 | lock = cl_lock_find(env, io, need); | |
2077 | if (IS_ERR(lock)) | |
2078 | break; | |
2079 | cl_lock_mutex_get(env, lock); | |
2080 | if (lock->cll_state < CLS_FREEING && | |
2081 | !(lock->cll_flags & CLF_CANCELLED)) { | |
2082 | cl_lock_hold_mod(env, lock, +1); | |
2083 | lu_ref_add(&lock->cll_holders, scope, source); | |
2084 | lu_ref_add(&lock->cll_reference, scope, source); | |
2085 | break; | |
2086 | } | |
2087 | cl_lock_mutex_put(env, lock); | |
2088 | cl_lock_put(env, lock); | |
2089 | } | |
2090 | RETURN(lock); | |
2091 | } | |
2092 | ||
2093 | /** | |
2094 | * Returns a lock matching \a need description with a reference and a hold on | |
2095 | * it. | |
2096 | * | |
2097 | * This is much like cl_lock_find(), except that cl_lock_hold() additionally | |
2098 | * guarantees that lock is not in the CLS_FREEING state on return. | |
2099 | */ | |
2100 | struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io, | |
2101 | const struct cl_lock_descr *need, | |
2102 | const char *scope, const void *source) | |
2103 | { | |
2104 | struct cl_lock *lock; | |
2105 | ||
2106 | ENTRY; | |
2107 | ||
2108 | lock = cl_lock_hold_mutex(env, io, need, scope, source); | |
2109 | if (!IS_ERR(lock)) | |
2110 | cl_lock_mutex_put(env, lock); | |
2111 | RETURN(lock); | |
2112 | } | |
2113 | EXPORT_SYMBOL(cl_lock_hold); | |
2114 | ||
2115 | /** | |
2116 | * Main high-level entry point of cl_lock interface that finds existing or | |
2117 | * enqueues new lock matching given description. | |
2118 | */ | |
2119 | struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io, | |
2120 | const struct cl_lock_descr *need, | |
2121 | const char *scope, const void *source) | |
2122 | { | |
2123 | struct cl_lock *lock; | |
2124 | int rc; | |
2125 | __u32 enqflags = need->cld_enq_flags; | |
2126 | ||
2127 | ENTRY; | |
2128 | do { | |
2129 | lock = cl_lock_hold_mutex(env, io, need, scope, source); | |
2130 | if (IS_ERR(lock)) | |
2131 | break; | |
2132 | ||
2133 | rc = cl_enqueue_locked(env, lock, io, enqflags); | |
2134 | if (rc == 0) { | |
2135 | if (cl_lock_fits_into(env, lock, need, io)) { | |
2136 | if (!(enqflags & CEF_AGL)) { | |
2137 | cl_lock_mutex_put(env, lock); | |
2138 | cl_lock_lockdep_acquire(env, lock, | |
2139 | enqflags); | |
2140 | break; | |
2141 | } | |
2142 | rc = 1; | |
2143 | } | |
2144 | cl_unuse_locked(env, lock); | |
2145 | } | |
2146 | cl_lock_trace(D_DLMTRACE, env, | |
2147 | rc <= 0 ? "enqueue failed" : "agl succeed", lock); | |
2148 | cl_lock_hold_release(env, lock, scope, source); | |
2149 | cl_lock_mutex_put(env, lock); | |
2150 | lu_ref_del(&lock->cll_reference, scope, source); | |
2151 | cl_lock_put(env, lock); | |
2152 | if (rc > 0) { | |
2153 | LASSERT(enqflags & CEF_AGL); | |
2154 | lock = NULL; | |
2155 | } else if (rc != 0) { | |
2156 | lock = ERR_PTR(rc); | |
2157 | } | |
2158 | } while (rc == 0); | |
2159 | RETURN(lock); | |
2160 | } | |
2161 | EXPORT_SYMBOL(cl_lock_request); | |
2162 | ||
2163 | /** | |
2164 | * Adds a hold to a known lock. | |
2165 | */ | |
2166 | void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock, | |
2167 | const char *scope, const void *source) | |
2168 | { | |
2169 | LINVRNT(cl_lock_is_mutexed(lock)); | |
2170 | LINVRNT(cl_lock_invariant(env, lock)); | |
2171 | LASSERT(lock->cll_state != CLS_FREEING); | |
2172 | ||
2173 | ENTRY; | |
2174 | cl_lock_hold_mod(env, lock, +1); | |
2175 | cl_lock_get(lock); | |
2176 | lu_ref_add(&lock->cll_holders, scope, source); | |
2177 | lu_ref_add(&lock->cll_reference, scope, source); | |
2178 | EXIT; | |
2179 | } | |
2180 | EXPORT_SYMBOL(cl_lock_hold_add); | |
2181 | ||
2182 | /** | |
2183 | * Releases a hold and a reference on a lock, on which caller acquired a | |
2184 | * mutex. | |
2185 | */ | |
2186 | void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock, | |
2187 | const char *scope, const void *source) | |
2188 | { | |
2189 | LINVRNT(cl_lock_invariant(env, lock)); | |
2190 | ENTRY; | |
2191 | cl_lock_hold_release(env, lock, scope, source); | |
2192 | lu_ref_del(&lock->cll_reference, scope, source); | |
2193 | cl_lock_put(env, lock); | |
2194 | EXIT; | |
2195 | } | |
2196 | EXPORT_SYMBOL(cl_lock_unhold); | |
2197 | ||
2198 | /** | |
2199 | * Releases a hold and a reference on a lock, obtained by cl_lock_hold(). | |
2200 | */ | |
2201 | void cl_lock_release(const struct lu_env *env, struct cl_lock *lock, | |
2202 | const char *scope, const void *source) | |
2203 | { | |
2204 | LINVRNT(cl_lock_invariant(env, lock)); | |
2205 | ENTRY; | |
2206 | cl_lock_trace(D_DLMTRACE, env, "release lock", lock); | |
2207 | cl_lock_mutex_get(env, lock); | |
2208 | cl_lock_hold_release(env, lock, scope, source); | |
2209 | cl_lock_mutex_put(env, lock); | |
2210 | lu_ref_del(&lock->cll_reference, scope, source); | |
2211 | cl_lock_put(env, lock); | |
2212 | EXIT; | |
2213 | } | |
2214 | EXPORT_SYMBOL(cl_lock_release); | |
2215 | ||
2216 | void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock) | |
2217 | { | |
2218 | LINVRNT(cl_lock_is_mutexed(lock)); | |
2219 | LINVRNT(cl_lock_invariant(env, lock)); | |
2220 | ||
2221 | ENTRY; | |
2222 | cl_lock_used_mod(env, lock, +1); | |
2223 | EXIT; | |
2224 | } | |
2225 | EXPORT_SYMBOL(cl_lock_user_add); | |
2226 | ||
2227 | void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock) | |
2228 | { | |
2229 | LINVRNT(cl_lock_is_mutexed(lock)); | |
2230 | LINVRNT(cl_lock_invariant(env, lock)); | |
2231 | LASSERT(lock->cll_users > 0); | |
2232 | ||
2233 | ENTRY; | |
2234 | cl_lock_used_mod(env, lock, -1); | |
2235 | if (lock->cll_users == 0) | |
2236 | wake_up_all(&lock->cll_wq); | |
2237 | EXIT; | |
2238 | } | |
2239 | EXPORT_SYMBOL(cl_lock_user_del); | |
2240 | ||
2241 | const char *cl_lock_mode_name(const enum cl_lock_mode mode) | |
2242 | { | |
2243 | static const char *names[] = { | |
2244 | [CLM_PHANTOM] = "P", | |
2245 | [CLM_READ] = "R", | |
2246 | [CLM_WRITE] = "W", | |
2247 | [CLM_GROUP] = "G" | |
2248 | }; | |
2249 | if (0 <= mode && mode < ARRAY_SIZE(names)) | |
2250 | return names[mode]; | |
2251 | else | |
2252 | return "U"; | |
2253 | } | |
2254 | EXPORT_SYMBOL(cl_lock_mode_name); | |
2255 | ||
2256 | /** | |
2257 | * Prints human readable representation of a lock description. | |
2258 | */ | |
2259 | void cl_lock_descr_print(const struct lu_env *env, void *cookie, | |
2260 | lu_printer_t printer, | |
2261 | const struct cl_lock_descr *descr) | |
2262 | { | |
2263 | const struct lu_fid *fid; | |
2264 | ||
2265 | fid = lu_object_fid(&descr->cld_obj->co_lu); | |
2266 | (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid)); | |
2267 | } | |
2268 | EXPORT_SYMBOL(cl_lock_descr_print); | |
2269 | ||
2270 | /** | |
2271 | * Prints human readable representation of \a lock to the \a f. | |
2272 | */ | |
2273 | void cl_lock_print(const struct lu_env *env, void *cookie, | |
2274 | lu_printer_t printer, const struct cl_lock *lock) | |
2275 | { | |
2276 | const struct cl_lock_slice *slice; | |
2277 | (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ", | |
2278 | lock, atomic_read(&lock->cll_ref), | |
2279 | lock->cll_state, lock->cll_error, lock->cll_holds, | |
2280 | lock->cll_users, lock->cll_flags); | |
2281 | cl_lock_descr_print(env, cookie, printer, &lock->cll_descr); | |
2282 | (*printer)(env, cookie, " {\n"); | |
2283 | ||
2284 | list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { | |
2285 | (*printer)(env, cookie, " %s@%p: ", | |
2286 | slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name, | |
2287 | slice); | |
2288 | if (slice->cls_ops->clo_print != NULL) | |
2289 | slice->cls_ops->clo_print(env, cookie, printer, slice); | |
2290 | (*printer)(env, cookie, "\n"); | |
2291 | } | |
2292 | (*printer)(env, cookie, "} lock@%p\n", lock); | |
2293 | } | |
2294 | EXPORT_SYMBOL(cl_lock_print); | |
2295 | ||
2296 | int cl_lock_init(void) | |
2297 | { | |
2298 | return lu_kmem_init(cl_lock_caches); | |
2299 | } | |
2300 | ||
2301 | void cl_lock_fini(void) | |
2302 | { | |
2303 | lu_kmem_fini(cl_lock_caches); | |
2304 | } |