staging/lustre/ldlm: drop redundant ibits lock interoperability check
[deliverable/linux.git] / drivers / staging / lustre / lustre / ldlm / ldlm_resource.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2010, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/ldlm/ldlm_resource.c
37 *
38 * Author: Phil Schwan <phil@clusterfs.com>
39 * Author: Peter Braam <braam@clusterfs.com>
40 */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43 #include "../include/lustre_dlm.h"
44 #include "../include/lustre_fid.h"
45 #include "../include/obd_class.h"
46 #include "ldlm_internal.h"
47
48 struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
49
50 int ldlm_srv_namespace_nr = 0;
51 int ldlm_cli_namespace_nr = 0;
52
53 struct mutex ldlm_srv_namespace_lock;
54 LIST_HEAD(ldlm_srv_namespace_list);
55
56 struct mutex ldlm_cli_namespace_lock;
57 /* Client Namespaces that have active resources in them.
58 * Once all resources go away, ldlm_poold moves such namespaces to the
59 * inactive list */
60 LIST_HEAD(ldlm_cli_active_namespace_list);
61 /* Client namespaces that don't have any locks in them */
62 LIST_HEAD(ldlm_cli_inactive_namespace_list);
63
64 struct proc_dir_entry *ldlm_type_proc_dir = NULL;
65 struct proc_dir_entry *ldlm_ns_proc_dir = NULL;
66 struct proc_dir_entry *ldlm_svc_proc_dir = NULL;
67
68 extern unsigned int ldlm_cancel_unused_locks_before_replay;
69
70 /* during debug dump certain amount of granted locks for one resource to avoid
71 * DDOS. */
72 unsigned int ldlm_dump_granted_max = 256;
73
74 #if defined (CONFIG_PROC_FS)
75 static ssize_t lprocfs_wr_dump_ns(struct file *file, const char *buffer,
76 size_t count, loff_t *off)
77 {
78 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
79 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
80 return count;
81 }
82 LPROC_SEQ_FOPS_WR_ONLY(ldlm, dump_ns);
83
84 LPROC_SEQ_FOPS_RW_TYPE(ldlm_rw, uint);
85 LPROC_SEQ_FOPS_RO_TYPE(ldlm, uint);
86
87 int ldlm_proc_setup(void)
88 {
89 int rc;
90 struct lprocfs_vars list[] = {
91 { "dump_namespaces", &ldlm_dump_ns_fops, NULL, 0222 },
92 { "dump_granted_max", &ldlm_rw_uint_fops,
93 &ldlm_dump_granted_max },
94 { "cancel_unused_locks_before_replay", &ldlm_rw_uint_fops,
95 &ldlm_cancel_unused_locks_before_replay },
96 { NULL }};
97 LASSERT(ldlm_ns_proc_dir == NULL);
98
99 ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
100 proc_lustre_root,
101 NULL, NULL);
102 if (IS_ERR(ldlm_type_proc_dir)) {
103 CERROR("LProcFS failed in ldlm-init\n");
104 rc = PTR_ERR(ldlm_type_proc_dir);
105 GOTO(err, rc);
106 }
107
108 ldlm_ns_proc_dir = lprocfs_register("namespaces",
109 ldlm_type_proc_dir,
110 NULL, NULL);
111 if (IS_ERR(ldlm_ns_proc_dir)) {
112 CERROR("LProcFS failed in ldlm-init\n");
113 rc = PTR_ERR(ldlm_ns_proc_dir);
114 GOTO(err_type, rc);
115 }
116
117 ldlm_svc_proc_dir = lprocfs_register("services",
118 ldlm_type_proc_dir,
119 NULL, NULL);
120 if (IS_ERR(ldlm_svc_proc_dir)) {
121 CERROR("LProcFS failed in ldlm-init\n");
122 rc = PTR_ERR(ldlm_svc_proc_dir);
123 GOTO(err_ns, rc);
124 }
125
126 rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
127
128 return 0;
129
130 err_ns:
131 lprocfs_remove(&ldlm_ns_proc_dir);
132 err_type:
133 lprocfs_remove(&ldlm_type_proc_dir);
134 err:
135 ldlm_svc_proc_dir = NULL;
136 ldlm_type_proc_dir = NULL;
137 ldlm_ns_proc_dir = NULL;
138 return rc;
139 }
140
141 void ldlm_proc_cleanup(void)
142 {
143 if (ldlm_svc_proc_dir)
144 lprocfs_remove(&ldlm_svc_proc_dir);
145
146 if (ldlm_ns_proc_dir)
147 lprocfs_remove(&ldlm_ns_proc_dir);
148
149 if (ldlm_type_proc_dir)
150 lprocfs_remove(&ldlm_type_proc_dir);
151
152 ldlm_svc_proc_dir = NULL;
153 ldlm_type_proc_dir = NULL;
154 ldlm_ns_proc_dir = NULL;
155 }
156
157 static int lprocfs_ns_resources_seq_show(struct seq_file *m, void *v)
158 {
159 struct ldlm_namespace *ns = m->private;
160 __u64 res = 0;
161 struct cfs_hash_bd bd;
162 int i;
163
164 /* result is not strictly consistent */
165 cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
166 res += cfs_hash_bd_count_get(&bd);
167 return lprocfs_rd_u64(m, &res);
168 }
169 LPROC_SEQ_FOPS_RO(lprocfs_ns_resources);
170
171 static int lprocfs_ns_locks_seq_show(struct seq_file *m, void *v)
172 {
173 struct ldlm_namespace *ns = m->private;
174 __u64 locks;
175
176 locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
177 LPROCFS_FIELDS_FLAGS_SUM);
178 return lprocfs_rd_u64(m, &locks);
179 }
180 LPROC_SEQ_FOPS_RO(lprocfs_ns_locks);
181
182 static int lprocfs_lru_size_seq_show(struct seq_file *m, void *v)
183 {
184 struct ldlm_namespace *ns = m->private;
185 __u32 *nr = &ns->ns_max_unused;
186
187 if (ns_connect_lru_resize(ns))
188 nr = &ns->ns_nr_unused;
189 return lprocfs_rd_uint(m, nr);
190 }
191
192 static ssize_t lprocfs_lru_size_seq_write(struct file *file,
193 const char __user *buffer,
194 size_t count, loff_t *off)
195 {
196 struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
197 char dummy[MAX_STRING_SIZE + 1];
198 unsigned long tmp;
199 int lru_resize;
200 int err;
201
202 dummy[MAX_STRING_SIZE] = '\0';
203 if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
204 return -EFAULT;
205
206 if (strncmp(dummy, "clear", 5) == 0) {
207 CDEBUG(D_DLMTRACE,
208 "dropping all unused locks from namespace %s\n",
209 ldlm_ns_name(ns));
210 if (ns_connect_lru_resize(ns)) {
211 int canceled, unused = ns->ns_nr_unused;
212
213 /* Try to cancel all @ns_nr_unused locks. */
214 canceled = ldlm_cancel_lru(ns, unused, 0,
215 LDLM_CANCEL_PASSED);
216 if (canceled < unused) {
217 CDEBUG(D_DLMTRACE,
218 "not all requested locks are canceled, "
219 "requested: %d, canceled: %d\n", unused,
220 canceled);
221 return -EINVAL;
222 }
223 } else {
224 tmp = ns->ns_max_unused;
225 ns->ns_max_unused = 0;
226 ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED);
227 ns->ns_max_unused = tmp;
228 }
229 return count;
230 }
231
232 err = kstrtoul(dummy, 10, &tmp);
233 if (err != 0) {
234 CERROR("invalid value written\n");
235 return -EINVAL;
236 }
237 lru_resize = (tmp == 0);
238
239 if (ns_connect_lru_resize(ns)) {
240 if (!lru_resize)
241 ns->ns_max_unused = (unsigned int)tmp;
242
243 if (tmp > ns->ns_nr_unused)
244 tmp = ns->ns_nr_unused;
245 tmp = ns->ns_nr_unused - tmp;
246
247 CDEBUG(D_DLMTRACE,
248 "changing namespace %s unused locks from %u to %u\n",
249 ldlm_ns_name(ns), ns->ns_nr_unused,
250 (unsigned int)tmp);
251 ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED);
252
253 if (!lru_resize) {
254 CDEBUG(D_DLMTRACE,
255 "disable lru_resize for namespace %s\n",
256 ldlm_ns_name(ns));
257 ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
258 }
259 } else {
260 CDEBUG(D_DLMTRACE,
261 "changing namespace %s max_unused from %u to %u\n",
262 ldlm_ns_name(ns), ns->ns_max_unused,
263 (unsigned int)tmp);
264 ns->ns_max_unused = (unsigned int)tmp;
265 ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED);
266
267 /* Make sure that LRU resize was originally supported before
268 * turning it on here. */
269 if (lru_resize &&
270 (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
271 CDEBUG(D_DLMTRACE,
272 "enable lru_resize for namespace %s\n",
273 ldlm_ns_name(ns));
274 ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
275 }
276 }
277
278 return count;
279 }
280 LPROC_SEQ_FOPS(lprocfs_lru_size);
281
282 static int lprocfs_elc_seq_show(struct seq_file *m, void *v)
283 {
284 struct ldlm_namespace *ns = m->private;
285 unsigned int supp = ns_connect_cancelset(ns);
286
287 return lprocfs_rd_uint(m, &supp);
288 }
289
290 static ssize_t lprocfs_elc_seq_write(struct file *file, const char *buffer,
291 size_t count, loff_t *off)
292 {
293 struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
294 unsigned int supp = -1;
295 int rc;
296
297 rc = lprocfs_wr_uint(file, buffer, count, &supp);
298 if (rc < 0)
299 return rc;
300
301 if (supp == 0)
302 ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
303 else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
304 ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
305 return count;
306 }
307 LPROC_SEQ_FOPS(lprocfs_elc);
308
309 void ldlm_namespace_proc_unregister(struct ldlm_namespace *ns)
310 {
311 if (ns->ns_proc_dir_entry == NULL)
312 CERROR("dlm namespace %s has no procfs dir?\n",
313 ldlm_ns_name(ns));
314 else
315 lprocfs_remove(&ns->ns_proc_dir_entry);
316
317 if (ns->ns_stats != NULL)
318 lprocfs_free_stats(&ns->ns_stats);
319 }
320
321 #define LDLM_NS_ADD_VAR(name, var, ops) \
322 do { \
323 snprintf(lock_name, MAX_STRING_SIZE, name); \
324 lock_vars[0].data = var; \
325 lock_vars[0].fops = ops; \
326 lprocfs_add_vars(ns_pde, lock_vars, NULL); \
327 } while (0)
328
329 int ldlm_namespace_proc_register(struct ldlm_namespace *ns)
330 {
331 struct lprocfs_vars lock_vars[2];
332 char lock_name[MAX_STRING_SIZE + 1];
333 struct proc_dir_entry *ns_pde;
334
335 LASSERT(ns != NULL);
336 LASSERT(ns->ns_rs_hash != NULL);
337
338 if (ns->ns_proc_dir_entry != NULL) {
339 ns_pde = ns->ns_proc_dir_entry;
340 } else {
341 ns_pde = proc_mkdir(ldlm_ns_name(ns), ldlm_ns_proc_dir);
342 if (ns_pde == NULL)
343 return -ENOMEM;
344 ns->ns_proc_dir_entry = ns_pde;
345 }
346
347 ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
348 if (ns->ns_stats == NULL)
349 return -ENOMEM;
350
351 lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
352 LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
353
354 lock_name[MAX_STRING_SIZE] = '\0';
355
356 memset(lock_vars, 0, sizeof(lock_vars));
357 lock_vars[0].name = lock_name;
358
359 LDLM_NS_ADD_VAR("resource_count", ns, &lprocfs_ns_resources_fops);
360 LDLM_NS_ADD_VAR("lock_count", ns, &lprocfs_ns_locks_fops);
361
362 if (ns_is_client(ns)) {
363 LDLM_NS_ADD_VAR("lock_unused_count", &ns->ns_nr_unused,
364 &ldlm_uint_fops);
365 LDLM_NS_ADD_VAR("lru_size", ns, &lprocfs_lru_size_fops);
366 LDLM_NS_ADD_VAR("lru_max_age", &ns->ns_max_age,
367 &ldlm_rw_uint_fops);
368 LDLM_NS_ADD_VAR("early_lock_cancel", ns, &lprocfs_elc_fops);
369 } else {
370 LDLM_NS_ADD_VAR("ctime_age_limit", &ns->ns_ctime_age_limit,
371 &ldlm_rw_uint_fops);
372 LDLM_NS_ADD_VAR("lock_timeouts", &ns->ns_timeouts,
373 &ldlm_uint_fops);
374 LDLM_NS_ADD_VAR("max_nolock_bytes", &ns->ns_max_nolock_size,
375 &ldlm_rw_uint_fops);
376 LDLM_NS_ADD_VAR("contention_seconds", &ns->ns_contention_time,
377 &ldlm_rw_uint_fops);
378 LDLM_NS_ADD_VAR("contended_locks", &ns->ns_contended_locks,
379 &ldlm_rw_uint_fops);
380 LDLM_NS_ADD_VAR("max_parallel_ast", &ns->ns_max_parallel_ast,
381 &ldlm_rw_uint_fops);
382 }
383 return 0;
384 }
385 #undef MAX_STRING_SIZE
386 #else /* CONFIG_PROC_FS */
387
388 #define ldlm_namespace_proc_unregister(ns) ({;})
389 #define ldlm_namespace_proc_register(ns) ({0;})
390
391 #endif /* CONFIG_PROC_FS */
392
393 static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
394 const void *key, unsigned mask)
395 {
396 const struct ldlm_res_id *id = key;
397 unsigned val = 0;
398 unsigned i;
399
400 for (i = 0; i < RES_NAME_SIZE; i++)
401 val += id->name[i];
402 return val & mask;
403 }
404
405 static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
406 const void *key, unsigned mask)
407 {
408 const struct ldlm_res_id *id = key;
409 struct lu_fid fid;
410 __u32 hash;
411 __u32 val;
412
413 fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
414 fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
415 fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
416
417 hash = fid_flatten32(&fid);
418 hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
419 if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
420 val = id->name[LUSTRE_RES_ID_HSH_OFF];
421 hash += (val >> 5) + (val << 11);
422 } else {
423 val = fid_oid(&fid);
424 }
425 hash = hash_long(hash, hs->hs_bkt_bits);
426 /* give me another random factor */
427 hash -= hash_long((unsigned long)hs, val % 11 + 3);
428
429 hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
430 hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
431
432 return hash & mask;
433 }
434
435 static void *ldlm_res_hop_key(struct hlist_node *hnode)
436 {
437 struct ldlm_resource *res;
438
439 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
440 return &res->lr_name;
441 }
442
443 static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
444 {
445 struct ldlm_resource *res;
446
447 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
448 return ldlm_res_eq((const struct ldlm_res_id *)key,
449 (const struct ldlm_res_id *)&res->lr_name);
450 }
451
452 static void *ldlm_res_hop_object(struct hlist_node *hnode)
453 {
454 return hlist_entry(hnode, struct ldlm_resource, lr_hash);
455 }
456
457 static void ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode)
458 {
459 struct ldlm_resource *res;
460
461 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
462 ldlm_resource_getref(res);
463 }
464
465 static void ldlm_res_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
466 {
467 struct ldlm_resource *res;
468
469 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
470 /* cfs_hash_for_each_nolock is the only chance we call it */
471 ldlm_resource_putref_locked(res);
472 }
473
474 static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
475 {
476 struct ldlm_resource *res;
477
478 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
479 ldlm_resource_putref(res);
480 }
481
482 cfs_hash_ops_t ldlm_ns_hash_ops = {
483 .hs_hash = ldlm_res_hop_hash,
484 .hs_key = ldlm_res_hop_key,
485 .hs_keycmp = ldlm_res_hop_keycmp,
486 .hs_keycpy = NULL,
487 .hs_object = ldlm_res_hop_object,
488 .hs_get = ldlm_res_hop_get_locked,
489 .hs_put_locked = ldlm_res_hop_put_locked,
490 .hs_put = ldlm_res_hop_put
491 };
492
493 cfs_hash_ops_t ldlm_ns_fid_hash_ops = {
494 .hs_hash = ldlm_res_hop_fid_hash,
495 .hs_key = ldlm_res_hop_key,
496 .hs_keycmp = ldlm_res_hop_keycmp,
497 .hs_keycpy = NULL,
498 .hs_object = ldlm_res_hop_object,
499 .hs_get = ldlm_res_hop_get_locked,
500 .hs_put_locked = ldlm_res_hop_put_locked,
501 .hs_put = ldlm_res_hop_put
502 };
503
504 typedef struct {
505 ldlm_ns_type_t nsd_type;
506 /** hash bucket bits */
507 unsigned nsd_bkt_bits;
508 /** hash bits */
509 unsigned nsd_all_bits;
510 /** hash operations */
511 cfs_hash_ops_t *nsd_hops;
512 } ldlm_ns_hash_def_t;
513
514 ldlm_ns_hash_def_t ldlm_ns_hash_defs[] =
515 {
516 {
517 .nsd_type = LDLM_NS_TYPE_MDC,
518 .nsd_bkt_bits = 11,
519 .nsd_all_bits = 16,
520 .nsd_hops = &ldlm_ns_fid_hash_ops,
521 },
522 {
523 .nsd_type = LDLM_NS_TYPE_MDT,
524 .nsd_bkt_bits = 14,
525 .nsd_all_bits = 21,
526 .nsd_hops = &ldlm_ns_fid_hash_ops,
527 },
528 {
529 .nsd_type = LDLM_NS_TYPE_OSC,
530 .nsd_bkt_bits = 8,
531 .nsd_all_bits = 12,
532 .nsd_hops = &ldlm_ns_hash_ops,
533 },
534 {
535 .nsd_type = LDLM_NS_TYPE_OST,
536 .nsd_bkt_bits = 11,
537 .nsd_all_bits = 17,
538 .nsd_hops = &ldlm_ns_hash_ops,
539 },
540 {
541 .nsd_type = LDLM_NS_TYPE_MGC,
542 .nsd_bkt_bits = 4,
543 .nsd_all_bits = 4,
544 .nsd_hops = &ldlm_ns_hash_ops,
545 },
546 {
547 .nsd_type = LDLM_NS_TYPE_MGT,
548 .nsd_bkt_bits = 4,
549 .nsd_all_bits = 4,
550 .nsd_hops = &ldlm_ns_hash_ops,
551 },
552 {
553 .nsd_type = LDLM_NS_TYPE_UNKNOWN,
554 },
555 };
556
557 /**
558 * Create and initialize new empty namespace.
559 */
560 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
561 ldlm_side_t client,
562 ldlm_appetite_t apt,
563 ldlm_ns_type_t ns_type)
564 {
565 struct ldlm_namespace *ns = NULL;
566 struct ldlm_ns_bucket *nsb;
567 ldlm_ns_hash_def_t *nsd;
568 struct cfs_hash_bd bd;
569 int idx;
570 int rc;
571
572 LASSERT(obd != NULL);
573
574 rc = ldlm_get_ref();
575 if (rc) {
576 CERROR("ldlm_get_ref failed: %d\n", rc);
577 return NULL;
578 }
579
580 for (idx = 0;;idx++) {
581 nsd = &ldlm_ns_hash_defs[idx];
582 if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
583 CERROR("Unknown type %d for ns %s\n", ns_type, name);
584 GOTO(out_ref, NULL);
585 }
586
587 if (nsd->nsd_type == ns_type)
588 break;
589 }
590
591 OBD_ALLOC_PTR(ns);
592 if (!ns)
593 GOTO(out_ref, NULL);
594
595 ns->ns_rs_hash = cfs_hash_create(name,
596 nsd->nsd_all_bits, nsd->nsd_all_bits,
597 nsd->nsd_bkt_bits, sizeof(*nsb),
598 CFS_HASH_MIN_THETA,
599 CFS_HASH_MAX_THETA,
600 nsd->nsd_hops,
601 CFS_HASH_DEPTH |
602 CFS_HASH_BIGNAME |
603 CFS_HASH_SPIN_BKTLOCK |
604 CFS_HASH_NO_ITEMREF);
605 if (ns->ns_rs_hash == NULL)
606 GOTO(out_ns, NULL);
607
608 cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
609 nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
610 at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
611 nsb->nsb_namespace = ns;
612 }
613
614 ns->ns_obd = obd;
615 ns->ns_appetite = apt;
616 ns->ns_client = client;
617
618 INIT_LIST_HEAD(&ns->ns_list_chain);
619 INIT_LIST_HEAD(&ns->ns_unused_list);
620 spin_lock_init(&ns->ns_lock);
621 atomic_set(&ns->ns_bref, 0);
622 init_waitqueue_head(&ns->ns_waitq);
623
624 ns->ns_max_nolock_size = NS_DEFAULT_MAX_NOLOCK_BYTES;
625 ns->ns_contention_time = NS_DEFAULT_CONTENTION_SECONDS;
626 ns->ns_contended_locks = NS_DEFAULT_CONTENDED_LOCKS;
627
628 ns->ns_max_parallel_ast = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
629 ns->ns_nr_unused = 0;
630 ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE;
631 ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE;
632 ns->ns_ctime_age_limit = LDLM_CTIME_AGE_LIMIT;
633 ns->ns_timeouts = 0;
634 ns->ns_orig_connect_flags = 0;
635 ns->ns_connect_flags = 0;
636 ns->ns_stopping = 0;
637 rc = ldlm_namespace_proc_register(ns);
638 if (rc != 0) {
639 CERROR("Can't initialize ns proc, rc %d\n", rc);
640 GOTO(out_hash, rc);
641 }
642
643 idx = ldlm_namespace_nr_read(client);
644 rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
645 if (rc) {
646 CERROR("Can't initialize lock pool, rc %d\n", rc);
647 GOTO(out_proc, rc);
648 }
649
650 ldlm_namespace_register(ns, client);
651 return ns;
652 out_proc:
653 ldlm_namespace_proc_unregister(ns);
654 ldlm_namespace_cleanup(ns, 0);
655 out_hash:
656 cfs_hash_putref(ns->ns_rs_hash);
657 out_ns:
658 OBD_FREE_PTR(ns);
659 out_ref:
660 ldlm_put_ref();
661 return NULL;
662 }
663 EXPORT_SYMBOL(ldlm_namespace_new);
664
665 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
666
667 /**
668 * Cancel and destroy all locks on a resource.
669 *
670 * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
671 * clean up. This is currently only used for recovery, and we make
672 * certain assumptions as a result--notably, that we shouldn't cancel
673 * locks with refs.
674 */
675 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
676 __u64 flags)
677 {
678 struct list_head *tmp;
679 int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
680 bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
681
682 do {
683 struct ldlm_lock *lock = NULL;
684
685 /* First, we look for non-cleaned-yet lock
686 * all cleaned locks are marked by CLEANED flag. */
687 lock_res(res);
688 list_for_each(tmp, q) {
689 lock = list_entry(tmp, struct ldlm_lock,
690 l_res_link);
691 if (lock->l_flags & LDLM_FL_CLEANED) {
692 lock = NULL;
693 continue;
694 }
695 LDLM_LOCK_GET(lock);
696 lock->l_flags |= LDLM_FL_CLEANED;
697 break;
698 }
699
700 if (lock == NULL) {
701 unlock_res(res);
702 break;
703 }
704
705 /* Set CBPENDING so nothing in the cancellation path
706 * can match this lock. */
707 lock->l_flags |= LDLM_FL_CBPENDING;
708 lock->l_flags |= LDLM_FL_FAILED;
709 lock->l_flags |= flags;
710
711 /* ... without sending a CANCEL message for local_only. */
712 if (local_only)
713 lock->l_flags |= LDLM_FL_LOCAL_ONLY;
714
715 if (local_only && (lock->l_readers || lock->l_writers)) {
716 /* This is a little bit gross, but much better than the
717 * alternative: pretend that we got a blocking AST from
718 * the server, so that when the lock is decref'd, it
719 * will go away ... */
720 unlock_res(res);
721 LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
722 if (lock->l_completion_ast)
723 lock->l_completion_ast(lock, 0, NULL);
724 LDLM_LOCK_RELEASE(lock);
725 continue;
726 }
727
728 if (client) {
729 struct lustre_handle lockh;
730
731 unlock_res(res);
732 ldlm_lock2handle(lock, &lockh);
733 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
734 if (rc)
735 CERROR("ldlm_cli_cancel: %d\n", rc);
736 } else {
737 ldlm_resource_unlink_lock(lock);
738 unlock_res(res);
739 LDLM_DEBUG(lock, "Freeing a lock still held by a "
740 "client node");
741 ldlm_lock_destroy(lock);
742 }
743 LDLM_LOCK_RELEASE(lock);
744 } while (1);
745 }
746
747 static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
748 struct hlist_node *hnode, void *arg)
749 {
750 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
751 __u64 flags = *(__u64 *)arg;
752
753 cleanup_resource(res, &res->lr_granted, flags);
754 cleanup_resource(res, &res->lr_converting, flags);
755 cleanup_resource(res, &res->lr_waiting, flags);
756
757 return 0;
758 }
759
760 static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
761 struct hlist_node *hnode, void *arg)
762 {
763 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
764
765 lock_res(res);
766 CERROR("%s: namespace resource "DLDLMRES
767 " (%p) refcount nonzero (%d) after lock cleanup; forcing cleanup.\n",
768 ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
769 atomic_read(&res->lr_refcount) - 1);
770
771 ldlm_resource_dump(D_ERROR, res);
772 unlock_res(res);
773 return 0;
774 }
775
776 /**
777 * Cancel and destroy all locks in the namespace.
778 *
779 * Typically used during evictions when server notified client that it was
780 * evicted and all of its state needs to be destroyed.
781 * Also used during shutdown.
782 */
783 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
784 {
785 if (ns == NULL) {
786 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
787 return ELDLM_OK;
788 }
789
790 cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, &flags);
791 cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, NULL);
792 return ELDLM_OK;
793 }
794 EXPORT_SYMBOL(ldlm_namespace_cleanup);
795
796 /**
797 * Attempts to free namespace.
798 *
799 * Only used when namespace goes away, like during an unmount.
800 */
801 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
802 {
803 /* At shutdown time, don't call the cancellation callback */
804 ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
805
806 if (atomic_read(&ns->ns_bref) > 0) {
807 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
808 int rc;
809 CDEBUG(D_DLMTRACE,
810 "dlm namespace %s free waiting on refcount %d\n",
811 ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
812 force_wait:
813 if (force)
814 lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
815
816 rc = l_wait_event(ns->ns_waitq,
817 atomic_read(&ns->ns_bref) == 0, &lwi);
818
819 /* Forced cleanups should be able to reclaim all references,
820 * so it's safe to wait forever... we can't leak locks... */
821 if (force && rc == -ETIMEDOUT) {
822 LCONSOLE_ERROR("Forced cleanup waiting for %s "
823 "namespace with %d resources in use, "
824 "(rc=%d)\n", ldlm_ns_name(ns),
825 atomic_read(&ns->ns_bref), rc);
826 GOTO(force_wait, rc);
827 }
828
829 if (atomic_read(&ns->ns_bref)) {
830 LCONSOLE_ERROR("Cleanup waiting for %s namespace "
831 "with %d resources in use, (rc=%d)\n",
832 ldlm_ns_name(ns),
833 atomic_read(&ns->ns_bref), rc);
834 return ELDLM_NAMESPACE_EXISTS;
835 }
836 CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
837 ldlm_ns_name(ns));
838 }
839
840 return ELDLM_OK;
841 }
842
843 /**
844 * Performs various cleanups for passed \a ns to make it drop refc and be
845 * ready for freeing. Waits for refc == 0.
846 *
847 * The following is done:
848 * (0) Unregister \a ns from its list to make inaccessible for potential
849 * users like pools thread and others;
850 * (1) Clear all locks in \a ns.
851 */
852 void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
853 struct obd_import *imp,
854 int force)
855 {
856 int rc;
857
858 if (!ns)
859 return;
860
861 spin_lock(&ns->ns_lock);
862 ns->ns_stopping = 1;
863 spin_unlock(&ns->ns_lock);
864
865 /*
866 * Can fail with -EINTR when force == 0 in which case try harder.
867 */
868 rc = __ldlm_namespace_free(ns, force);
869 if (rc != ELDLM_OK) {
870 if (imp) {
871 ptlrpc_disconnect_import(imp, 0);
872 ptlrpc_invalidate_import(imp);
873 }
874
875 /*
876 * With all requests dropped and the import inactive
877 * we are guaranteed all reference will be dropped.
878 */
879 rc = __ldlm_namespace_free(ns, 1);
880 LASSERT(rc == 0);
881 }
882 }
883
884 /**
885 * Performs freeing memory structures related to \a ns. This is only done
886 * when ldlm_namespce_free_prior() successfully removed all resources
887 * referencing \a ns and its refc == 0.
888 */
889 void ldlm_namespace_free_post(struct ldlm_namespace *ns)
890 {
891 if (!ns)
892 return;
893
894 /* Make sure that nobody can find this ns in its list. */
895 ldlm_namespace_unregister(ns, ns->ns_client);
896 /* Fini pool _before_ parent proc dir is removed. This is important as
897 * ldlm_pool_fini() removes own proc dir which is child to @dir.
898 * Removing it after @dir may cause oops. */
899 ldlm_pool_fini(&ns->ns_pool);
900
901 ldlm_namespace_proc_unregister(ns);
902 cfs_hash_putref(ns->ns_rs_hash);
903 /* Namespace \a ns should be not on list at this time, otherwise
904 * this will cause issues related to using freed \a ns in poold
905 * thread. */
906 LASSERT(list_empty(&ns->ns_list_chain));
907 OBD_FREE_PTR(ns);
908 ldlm_put_ref();
909 }
910
911 /**
912 * Cleanup the resource, and free namespace.
913 * bug 12864:
914 * Deadlock issue:
915 * proc1: destroy import
916 * class_disconnect_export(grab cl_sem) ->
917 * -> ldlm_namespace_free ->
918 * -> lprocfs_remove(grab _lprocfs_lock).
919 * proc2: read proc info
920 * lprocfs_fops_read(grab _lprocfs_lock) ->
921 * -> osc_rd_active, etc(grab cl_sem).
922 *
923 * So that I have to split the ldlm_namespace_free into two parts - the first
924 * part ldlm_namespace_free_prior is used to cleanup the resource which is
925 * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
926 * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
927 * held.
928 */
929 void ldlm_namespace_free(struct ldlm_namespace *ns,
930 struct obd_import *imp,
931 int force)
932 {
933 ldlm_namespace_free_prior(ns, imp, force);
934 ldlm_namespace_free_post(ns);
935 }
936 EXPORT_SYMBOL(ldlm_namespace_free);
937
938 void ldlm_namespace_get(struct ldlm_namespace *ns)
939 {
940 atomic_inc(&ns->ns_bref);
941 }
942 EXPORT_SYMBOL(ldlm_namespace_get);
943
944 /* This is only for callers that care about refcount */
945 int ldlm_namespace_get_return(struct ldlm_namespace *ns)
946 {
947 return atomic_inc_return(&ns->ns_bref);
948 }
949
950 void ldlm_namespace_put(struct ldlm_namespace *ns)
951 {
952 if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
953 wake_up(&ns->ns_waitq);
954 spin_unlock(&ns->ns_lock);
955 }
956 }
957 EXPORT_SYMBOL(ldlm_namespace_put);
958
959 /** Register \a ns in the list of namespaces */
960 void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
961 {
962 mutex_lock(ldlm_namespace_lock(client));
963 LASSERT(list_empty(&ns->ns_list_chain));
964 list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
965 ldlm_namespace_nr_inc(client);
966 mutex_unlock(ldlm_namespace_lock(client));
967 }
968
969 /** Unregister \a ns from the list of namespaces. */
970 void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
971 {
972 mutex_lock(ldlm_namespace_lock(client));
973 LASSERT(!list_empty(&ns->ns_list_chain));
974 /* Some asserts and possibly other parts of the code are still
975 * using list_empty(&ns->ns_list_chain). This is why it is
976 * important to use list_del_init() here. */
977 list_del_init(&ns->ns_list_chain);
978 ldlm_namespace_nr_dec(client);
979 mutex_unlock(ldlm_namespace_lock(client));
980 }
981
982 /** Should be called with ldlm_namespace_lock(client) taken. */
983 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
984 ldlm_side_t client)
985 {
986 LASSERT(!list_empty(&ns->ns_list_chain));
987 LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
988 list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
989 }
990
991 /** Should be called with ldlm_namespace_lock(client) taken. */
992 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
993 ldlm_side_t client)
994 {
995 LASSERT(!list_empty(&ns->ns_list_chain));
996 LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
997 list_move_tail(&ns->ns_list_chain,
998 ldlm_namespace_inactive_list(client));
999 }
1000
1001 /** Should be called with ldlm_namespace_lock(client) taken. */
1002 struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
1003 {
1004 LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1005 LASSERT(!list_empty(ldlm_namespace_list(client)));
1006 return container_of(ldlm_namespace_list(client)->next,
1007 struct ldlm_namespace, ns_list_chain);
1008 }
1009
1010 /** Create and initialize new resource. */
1011 static struct ldlm_resource *ldlm_resource_new(void)
1012 {
1013 struct ldlm_resource *res;
1014 int idx;
1015
1016 OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, GFP_NOFS);
1017 if (res == NULL)
1018 return NULL;
1019
1020 INIT_LIST_HEAD(&res->lr_granted);
1021 INIT_LIST_HEAD(&res->lr_converting);
1022 INIT_LIST_HEAD(&res->lr_waiting);
1023
1024 /* Initialize interval trees for each lock mode. */
1025 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1026 res->lr_itree[idx].lit_size = 0;
1027 res->lr_itree[idx].lit_mode = 1 << idx;
1028 res->lr_itree[idx].lit_root = NULL;
1029 }
1030
1031 atomic_set(&res->lr_refcount, 1);
1032 spin_lock_init(&res->lr_lock);
1033 lu_ref_init(&res->lr_reference);
1034
1035 /* The creator of the resource must unlock the mutex after LVB
1036 * initialization. */
1037 mutex_init(&res->lr_lvb_mutex);
1038 mutex_lock(&res->lr_lvb_mutex);
1039
1040 return res;
1041 }
1042
1043 /**
1044 * Return a reference to resource with given name, creating it if necessary.
1045 * Args: namespace with ns_lock unlocked
1046 * Locks: takes and releases NS hash-lock and res->lr_lock
1047 * Returns: referenced, unlocked ldlm_resource or NULL
1048 */
1049 struct ldlm_resource *
1050 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
1051 const struct ldlm_res_id *name, ldlm_type_t type, int create)
1052 {
1053 struct hlist_node *hnode;
1054 struct ldlm_resource *res;
1055 struct cfs_hash_bd bd;
1056 __u64 version;
1057 int ns_refcount = 0;
1058
1059 LASSERT(ns != NULL);
1060 LASSERT(parent == NULL);
1061 LASSERT(ns->ns_rs_hash != NULL);
1062 LASSERT(name->name[0] != 0);
1063
1064 cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
1065 hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1066 if (hnode != NULL) {
1067 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1068 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1069 /* Synchronize with regard to resource creation. */
1070 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
1071 mutex_lock(&res->lr_lvb_mutex);
1072 mutex_unlock(&res->lr_lvb_mutex);
1073 }
1074
1075 if (unlikely(res->lr_lvb_len < 0)) {
1076 ldlm_resource_putref(res);
1077 res = NULL;
1078 }
1079 return res;
1080 }
1081
1082 version = cfs_hash_bd_version_get(&bd);
1083 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1084
1085 if (create == 0)
1086 return NULL;
1087
1088 LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
1089 "type: %d\n", type);
1090 res = ldlm_resource_new();
1091 if (!res)
1092 return NULL;
1093
1094 res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
1095 res->lr_name = *name;
1096 res->lr_type = type;
1097 res->lr_most_restr = LCK_NL;
1098
1099 cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1100 hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
1101 cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1102
1103 if (hnode != NULL) {
1104 /* Someone won the race and already added the resource. */
1105 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1106 /* Clean lu_ref for failed resource. */
1107 lu_ref_fini(&res->lr_reference);
1108 /* We have taken lr_lvb_mutex. Drop it. */
1109 mutex_unlock(&res->lr_lvb_mutex);
1110 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res));
1111
1112 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1113 /* Synchronize with regard to resource creation. */
1114 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
1115 mutex_lock(&res->lr_lvb_mutex);
1116 mutex_unlock(&res->lr_lvb_mutex);
1117 }
1118
1119 if (unlikely(res->lr_lvb_len < 0)) {
1120 ldlm_resource_putref(res);
1121 res = NULL;
1122 }
1123 return res;
1124 }
1125 /* We won! Let's add the resource. */
1126 cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
1127 if (cfs_hash_bd_count_get(&bd) == 1)
1128 ns_refcount = ldlm_namespace_get_return(ns);
1129
1130 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1131 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
1132 int rc;
1133
1134 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
1135 rc = ns->ns_lvbo->lvbo_init(res);
1136 if (rc < 0) {
1137 CERROR("%s: lvbo_init failed for resource %#llx:%#llx: rc = %d\n",
1138 ns->ns_obd->obd_name, name->name[0],
1139 name->name[1], rc);
1140 if (res->lr_lvb_data) {
1141 OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
1142 res->lr_lvb_data = NULL;
1143 }
1144 res->lr_lvb_len = rc;
1145 mutex_unlock(&res->lr_lvb_mutex);
1146 ldlm_resource_putref(res);
1147 return NULL;
1148 }
1149 }
1150
1151 /* We create resource with locked lr_lvb_mutex. */
1152 mutex_unlock(&res->lr_lvb_mutex);
1153
1154 /* Let's see if we happened to be the very first resource in this
1155 * namespace. If so, and this is a client namespace, we need to move
1156 * the namespace into the active namespaces list to be patrolled by
1157 * the ldlm_poold. */
1158 if (ns_is_client(ns) && ns_refcount == 1) {
1159 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1160 ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
1161 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1162 }
1163
1164 return res;
1165 }
1166 EXPORT_SYMBOL(ldlm_resource_get);
1167
1168 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
1169 {
1170 LASSERT(res != NULL);
1171 LASSERT(res != LP_POISON);
1172 atomic_inc(&res->lr_refcount);
1173 CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
1174 atomic_read(&res->lr_refcount));
1175 return res;
1176 }
1177
1178 static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
1179 struct ldlm_resource *res)
1180 {
1181 struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
1182
1183 if (!list_empty(&res->lr_granted)) {
1184 ldlm_resource_dump(D_ERROR, res);
1185 LBUG();
1186 }
1187
1188 if (!list_empty(&res->lr_converting)) {
1189 ldlm_resource_dump(D_ERROR, res);
1190 LBUG();
1191 }
1192
1193 if (!list_empty(&res->lr_waiting)) {
1194 ldlm_resource_dump(D_ERROR, res);
1195 LBUG();
1196 }
1197
1198 cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
1199 bd, &res->lr_hash);
1200 lu_ref_fini(&res->lr_reference);
1201 if (cfs_hash_bd_count_get(bd) == 0)
1202 ldlm_namespace_put(nsb->nsb_namespace);
1203 }
1204
1205 /* Returns 1 if the resource was freed, 0 if it remains. */
1206 int ldlm_resource_putref(struct ldlm_resource *res)
1207 {
1208 struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1209 struct cfs_hash_bd bd;
1210
1211 LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1212 CDEBUG(D_INFO, "putref res: %p count: %d\n",
1213 res, atomic_read(&res->lr_refcount) - 1);
1214
1215 cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
1216 if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
1217 __ldlm_resource_putref_final(&bd, res);
1218 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1219 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1220 ns->ns_lvbo->lvbo_free(res);
1221 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res));
1222 return 1;
1223 }
1224 return 0;
1225 }
1226 EXPORT_SYMBOL(ldlm_resource_putref);
1227
1228 /* Returns 1 if the resource was freed, 0 if it remains. */
1229 int ldlm_resource_putref_locked(struct ldlm_resource *res)
1230 {
1231 struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1232
1233 LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1234 CDEBUG(D_INFO, "putref res: %p count: %d\n",
1235 res, atomic_read(&res->lr_refcount) - 1);
1236
1237 if (atomic_dec_and_test(&res->lr_refcount)) {
1238 struct cfs_hash_bd bd;
1239
1240 cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash,
1241 &res->lr_name, &bd);
1242 __ldlm_resource_putref_final(&bd, res);
1243 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1244 /* NB: ns_rs_hash is created with CFS_HASH_NO_ITEMREF,
1245 * so we should never be here while calling cfs_hash_del,
1246 * cfs_hash_for_each_nolock is the only case we can get
1247 * here, which is safe to release cfs_hash_bd_lock.
1248 */
1249 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1250 ns->ns_lvbo->lvbo_free(res);
1251 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res));
1252
1253 cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1254 return 1;
1255 }
1256 return 0;
1257 }
1258
1259 /**
1260 * Add a lock into a given resource into specified lock list.
1261 */
1262 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
1263 struct ldlm_lock *lock)
1264 {
1265 check_res_locked(res);
1266
1267 LDLM_DEBUG(lock, "About to add this lock:\n");
1268
1269 if (lock->l_flags & LDLM_FL_DESTROYED) {
1270 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1271 return;
1272 }
1273
1274 LASSERT(list_empty(&lock->l_res_link));
1275
1276 list_add_tail(&lock->l_res_link, head);
1277 }
1278
1279 /**
1280 * Insert a lock into resource after specified lock.
1281 *
1282 * Obtain resource description from the lock we are inserting after.
1283 */
1284 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
1285 struct ldlm_lock *new)
1286 {
1287 struct ldlm_resource *res = original->l_resource;
1288
1289 check_res_locked(res);
1290
1291 ldlm_resource_dump(D_INFO, res);
1292 LDLM_DEBUG(new, "About to insert this lock after %p:\n", original);
1293
1294 if (new->l_flags & LDLM_FL_DESTROYED) {
1295 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1296 goto out;
1297 }
1298
1299 LASSERT(list_empty(&new->l_res_link));
1300
1301 list_add(&new->l_res_link, &original->l_res_link);
1302 out:;
1303 }
1304
1305 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1306 {
1307 int type = lock->l_resource->lr_type;
1308
1309 check_res_locked(lock->l_resource);
1310 if (type == LDLM_IBITS || type == LDLM_PLAIN)
1311 ldlm_unlink_lock_skiplist(lock);
1312 else if (type == LDLM_EXTENT)
1313 ldlm_extent_unlink_lock(lock);
1314 list_del_init(&lock->l_res_link);
1315 }
1316 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
1317
1318 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1319 {
1320 desc->lr_type = res->lr_type;
1321 desc->lr_name = res->lr_name;
1322 }
1323
1324 /**
1325 * Print information about all locks in all namespaces on this node to debug
1326 * log.
1327 */
1328 void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
1329 {
1330 struct list_head *tmp;
1331
1332 if (!((libcfs_debug | D_ERROR) & level))
1333 return;
1334
1335 mutex_lock(ldlm_namespace_lock(client));
1336
1337 list_for_each(tmp, ldlm_namespace_list(client)) {
1338 struct ldlm_namespace *ns;
1339 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
1340 ldlm_namespace_dump(level, ns);
1341 }
1342
1343 mutex_unlock(ldlm_namespace_lock(client));
1344 }
1345 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
1346
1347 static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1348 struct hlist_node *hnode, void *arg)
1349 {
1350 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1351 int level = (int)(unsigned long)arg;
1352
1353 lock_res(res);
1354 ldlm_resource_dump(level, res);
1355 unlock_res(res);
1356
1357 return 0;
1358 }
1359
1360 /**
1361 * Print information about all locks in this namespace on this node to debug
1362 * log.
1363 */
1364 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1365 {
1366 if (!((libcfs_debug | D_ERROR) & level))
1367 return;
1368
1369 CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
1370 ldlm_ns_name(ns), atomic_read(&ns->ns_bref),
1371 ns_is_client(ns) ? "client" : "server");
1372
1373 if (time_before(cfs_time_current(), ns->ns_next_dump))
1374 return;
1375
1376 cfs_hash_for_each_nolock(ns->ns_rs_hash,
1377 ldlm_res_hash_dump,
1378 (void *)(unsigned long)level);
1379 spin_lock(&ns->ns_lock);
1380 ns->ns_next_dump = cfs_time_shift(10);
1381 spin_unlock(&ns->ns_lock);
1382 }
1383 EXPORT_SYMBOL(ldlm_namespace_dump);
1384
1385 /**
1386 * Print information about all locks in this resource to debug log.
1387 */
1388 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1389 {
1390 struct ldlm_lock *lock;
1391 unsigned int granted = 0;
1392
1393 CLASSERT(RES_NAME_SIZE == 4);
1394
1395 if (!((libcfs_debug | D_ERROR) & level))
1396 return;
1397
1398 CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
1399 PLDLMRES(res), res, atomic_read(&res->lr_refcount));
1400
1401 if (!list_empty(&res->lr_granted)) {
1402 CDEBUG(level, "Granted locks (in reverse order):\n");
1403 list_for_each_entry_reverse(lock, &res->lr_granted,
1404 l_res_link) {
1405 LDLM_DEBUG_LIMIT(level, lock, "###");
1406 if (!(level & D_CANTMASK) &&
1407 ++granted > ldlm_dump_granted_max) {
1408 CDEBUG(level, "only dump %d granted locks to "
1409 "avoid DDOS.\n", granted);
1410 break;
1411 }
1412 }
1413 }
1414 if (!list_empty(&res->lr_converting)) {
1415 CDEBUG(level, "Converting locks:\n");
1416 list_for_each_entry(lock, &res->lr_converting, l_res_link)
1417 LDLM_DEBUG_LIMIT(level, lock, "###");
1418 }
1419 if (!list_empty(&res->lr_waiting)) {
1420 CDEBUG(level, "Waiting locks:\n");
1421 list_for_each_entry(lock, &res->lr_waiting, l_res_link)
1422 LDLM_DEBUG_LIMIT(level, lock, "###");
1423 }
1424 }
This page took 0.081412 seconds and 5 git commands to generate.