staging: lustre: remove RETURN macro
[deliverable/linux.git] / drivers / staging / lustre / lustre / ldlm / ldlm_resource.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2010, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/ldlm/ldlm_resource.c
37 *
38 * Author: Phil Schwan <phil@clusterfs.com>
39 * Author: Peter Braam <braam@clusterfs.com>
40 */
41
42#define DEBUG_SUBSYSTEM S_LDLM
43# include <lustre_dlm.h>
44
45#include <lustre_fid.h>
46#include <obd_class.h>
47#include "ldlm_internal.h"
48
49struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
50
91a50030
OD
51int ldlm_srv_namespace_nr = 0;
52int ldlm_cli_namespace_nr = 0;
d7e09d03
PT
53
54struct mutex ldlm_srv_namespace_lock;
55LIST_HEAD(ldlm_srv_namespace_list);
56
57struct mutex ldlm_cli_namespace_lock;
91a50030
OD
58/* Client Namespaces that have active resources in them.
59 * Once all resources go away, ldlm_poold moves such namespaces to the
60 * inactive list */
61LIST_HEAD(ldlm_cli_active_namespace_list);
62/* Client namespaces that don't have any locks in them */
63LIST_HEAD(ldlm_cli_inactive_namespace_list);
d7e09d03
PT
64
65proc_dir_entry_t *ldlm_type_proc_dir = NULL;
66proc_dir_entry_t *ldlm_ns_proc_dir = NULL;
67proc_dir_entry_t *ldlm_svc_proc_dir = NULL;
68
69extern unsigned int ldlm_cancel_unused_locks_before_replay;
70
71/* during debug dump certain amount of granted locks for one resource to avoid
72 * DDOS. */
73unsigned int ldlm_dump_granted_max = 256;
74
75#ifdef LPROCFS
73bb1da6
PT
76static ssize_t lprocfs_wr_dump_ns(struct file *file, const char *buffer,
77 size_t count, loff_t *off)
d7e09d03
PT
78{
79 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
80 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
0a3bdb00 81 return count;
d7e09d03 82}
73bb1da6
PT
83LPROC_SEQ_FOPS_WR_ONLY(ldlm, dump_ns);
84
85LPROC_SEQ_FOPS_RW_TYPE(ldlm_rw, uint);
86LPROC_SEQ_FOPS_RO_TYPE(ldlm, uint);
d7e09d03
PT
87
88int ldlm_proc_setup(void)
89{
90 int rc;
91 struct lprocfs_vars list[] = {
73bb1da6
PT
92 { "dump_namespaces", &ldlm_dump_ns_fops, 0, 0222 },
93 { "dump_granted_max", &ldlm_rw_uint_fops,
94 &ldlm_dump_granted_max },
95 { "cancel_unused_locks_before_replay", &ldlm_rw_uint_fops,
96 &ldlm_cancel_unused_locks_before_replay },
d7e09d03 97 { NULL }};
d7e09d03
PT
98 LASSERT(ldlm_ns_proc_dir == NULL);
99
100 ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
101 proc_lustre_root,
102 NULL, NULL);
103 if (IS_ERR(ldlm_type_proc_dir)) {
104 CERROR("LProcFS failed in ldlm-init\n");
105 rc = PTR_ERR(ldlm_type_proc_dir);
106 GOTO(err, rc);
107 }
108
109 ldlm_ns_proc_dir = lprocfs_register("namespaces",
110 ldlm_type_proc_dir,
111 NULL, NULL);
112 if (IS_ERR(ldlm_ns_proc_dir)) {
113 CERROR("LProcFS failed in ldlm-init\n");
114 rc = PTR_ERR(ldlm_ns_proc_dir);
115 GOTO(err_type, rc);
116 }
117
118 ldlm_svc_proc_dir = lprocfs_register("services",
119 ldlm_type_proc_dir,
120 NULL, NULL);
121 if (IS_ERR(ldlm_svc_proc_dir)) {
122 CERROR("LProcFS failed in ldlm-init\n");
123 rc = PTR_ERR(ldlm_svc_proc_dir);
124 GOTO(err_ns, rc);
125 }
126
127 rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
128
0a3bdb00 129 return 0;
d7e09d03
PT
130
131err_ns:
132 lprocfs_remove(&ldlm_ns_proc_dir);
133err_type:
134 lprocfs_remove(&ldlm_type_proc_dir);
135err:
136 ldlm_svc_proc_dir = NULL;
1e4db2b3
PT
137 ldlm_type_proc_dir = NULL;
138 ldlm_ns_proc_dir = NULL;
0a3bdb00 139 return rc;
d7e09d03
PT
140}
141
142void ldlm_proc_cleanup(void)
143{
144 if (ldlm_svc_proc_dir)
145 lprocfs_remove(&ldlm_svc_proc_dir);
146
147 if (ldlm_ns_proc_dir)
148 lprocfs_remove(&ldlm_ns_proc_dir);
149
150 if (ldlm_type_proc_dir)
151 lprocfs_remove(&ldlm_type_proc_dir);
1e4db2b3
PT
152
153 ldlm_svc_proc_dir = NULL;
154 ldlm_type_proc_dir = NULL;
155 ldlm_ns_proc_dir = NULL;
d7e09d03
PT
156}
157
73bb1da6 158static int lprocfs_ns_resources_seq_show(struct seq_file *m, void *v)
d7e09d03 159{
73bb1da6 160 struct ldlm_namespace *ns = m->private;
d7e09d03
PT
161 __u64 res = 0;
162 cfs_hash_bd_t bd;
163 int i;
164
165 /* result is not strictly consistant */
166 cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
167 res += cfs_hash_bd_count_get(&bd);
73bb1da6 168 return lprocfs_rd_u64(m, &res);
d7e09d03 169}
73bb1da6 170LPROC_SEQ_FOPS_RO(lprocfs_ns_resources);
d7e09d03 171
73bb1da6 172static int lprocfs_ns_locks_seq_show(struct seq_file *m, void *v)
d7e09d03 173{
73bb1da6 174 struct ldlm_namespace *ns = m->private;
d7e09d03
PT
175 __u64 locks;
176
177 locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
178 LPROCFS_FIELDS_FLAGS_SUM);
73bb1da6 179 return lprocfs_rd_u64(m, &locks);
d7e09d03 180}
73bb1da6 181LPROC_SEQ_FOPS_RO(lprocfs_ns_locks);
d7e09d03 182
73bb1da6 183static int lprocfs_lru_size_seq_show(struct seq_file *m, void *v)
d7e09d03 184{
73bb1da6 185 struct ldlm_namespace *ns = m->private;
d7e09d03
PT
186 __u32 *nr = &ns->ns_max_unused;
187
188 if (ns_connect_lru_resize(ns))
189 nr = &ns->ns_nr_unused;
73bb1da6 190 return lprocfs_rd_uint(m, nr);
d7e09d03
PT
191}
192
73bb1da6
PT
193static ssize_t lprocfs_lru_size_seq_write(struct file *file, const char *buffer,
194 size_t count, loff_t *off)
d7e09d03 195{
73bb1da6 196 struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
d7e09d03
PT
197 char dummy[MAX_STRING_SIZE + 1], *end;
198 unsigned long tmp;
199 int lru_resize;
200
201 dummy[MAX_STRING_SIZE] = '\0';
202 if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
203 return -EFAULT;
204
205 if (strncmp(dummy, "clear", 5) == 0) {
206 CDEBUG(D_DLMTRACE,
207 "dropping all unused locks from namespace %s\n",
208 ldlm_ns_name(ns));
209 if (ns_connect_lru_resize(ns)) {
210 int canceled, unused = ns->ns_nr_unused;
211
212 /* Try to cancel all @ns_nr_unused locks. */
213 canceled = ldlm_cancel_lru(ns, unused, 0,
214 LDLM_CANCEL_PASSED);
215 if (canceled < unused) {
216 CDEBUG(D_DLMTRACE,
217 "not all requested locks are canceled, "
218 "requested: %d, canceled: %d\n", unused,
219 canceled);
220 return -EINVAL;
221 }
222 } else {
223 tmp = ns->ns_max_unused;
224 ns->ns_max_unused = 0;
225 ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED);
226 ns->ns_max_unused = tmp;
227 }
228 return count;
229 }
230
231 tmp = simple_strtoul(dummy, &end, 0);
232 if (dummy == end) {
233 CERROR("invalid value written\n");
234 return -EINVAL;
235 }
236 lru_resize = (tmp == 0);
237
238 if (ns_connect_lru_resize(ns)) {
239 if (!lru_resize)
240 ns->ns_max_unused = (unsigned int)tmp;
241
242 if (tmp > ns->ns_nr_unused)
243 tmp = ns->ns_nr_unused;
244 tmp = ns->ns_nr_unused - tmp;
245
246 CDEBUG(D_DLMTRACE,
247 "changing namespace %s unused locks from %u to %u\n",
248 ldlm_ns_name(ns), ns->ns_nr_unused,
249 (unsigned int)tmp);
250 ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED);
251
252 if (!lru_resize) {
253 CDEBUG(D_DLMTRACE,
254 "disable lru_resize for namespace %s\n",
255 ldlm_ns_name(ns));
256 ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
257 }
258 } else {
259 CDEBUG(D_DLMTRACE,
260 "changing namespace %s max_unused from %u to %u\n",
261 ldlm_ns_name(ns), ns->ns_max_unused,
262 (unsigned int)tmp);
263 ns->ns_max_unused = (unsigned int)tmp;
264 ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED);
265
266 /* Make sure that LRU resize was originally supported before
267 * turning it on here. */
268 if (lru_resize &&
269 (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
270 CDEBUG(D_DLMTRACE,
271 "enable lru_resize for namespace %s\n",
272 ldlm_ns_name(ns));
273 ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
274 }
275 }
276
277 return count;
278}
73bb1da6 279LPROC_SEQ_FOPS(lprocfs_lru_size);
d7e09d03 280
73bb1da6 281static int lprocfs_elc_seq_show(struct seq_file *m, void *v)
d7e09d03 282{
73bb1da6 283 struct ldlm_namespace *ns = m->private;
d7e09d03
PT
284 unsigned int supp = ns_connect_cancelset(ns);
285
73bb1da6 286 return lprocfs_rd_uint(m, &supp);
d7e09d03
PT
287}
288
73bb1da6
PT
289static ssize_t lprocfs_elc_seq_write(struct file *file, const char *buffer,
290 size_t count, loff_t *off)
d7e09d03 291{
73bb1da6 292 struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
d7e09d03
PT
293 unsigned int supp = -1;
294 int rc;
295
296 rc = lprocfs_wr_uint(file, buffer, count, &supp);
297 if (rc < 0)
298 return rc;
299
300 if (supp == 0)
301 ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
302 else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
303 ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
304 return count;
305}
73bb1da6 306LPROC_SEQ_FOPS(lprocfs_elc);
d7e09d03
PT
307
308void ldlm_namespace_proc_unregister(struct ldlm_namespace *ns)
309{
73bb1da6 310 if (ns->ns_proc_dir_entry == NULL)
d7e09d03
PT
311 CERROR("dlm namespace %s has no procfs dir?\n",
312 ldlm_ns_name(ns));
73bb1da6
PT
313 else
314 lprocfs_remove(&ns->ns_proc_dir_entry);
d7e09d03
PT
315
316 if (ns->ns_stats != NULL)
317 lprocfs_free_stats(&ns->ns_stats);
318}
319
73bb1da6
PT
320#define LDLM_NS_ADD_VAR(name, var, ops) \
321 do { \
322 snprintf(lock_name, MAX_STRING_SIZE, name); \
323 lock_vars[0].data = var; \
324 lock_vars[0].fops = ops; \
325 lprocfs_add_vars(ns_pde, lock_vars, 0); \
326 } while (0)
327
d7e09d03
PT
328int ldlm_namespace_proc_register(struct ldlm_namespace *ns)
329{
330 struct lprocfs_vars lock_vars[2];
331 char lock_name[MAX_STRING_SIZE + 1];
73bb1da6 332 proc_dir_entry_t *ns_pde;
d7e09d03
PT
333
334 LASSERT(ns != NULL);
335 LASSERT(ns->ns_rs_hash != NULL);
336
73bb1da6
PT
337 if (ns->ns_proc_dir_entry != NULL) {
338 ns_pde = ns->ns_proc_dir_entry;
339 } else {
340 ns_pde = proc_mkdir(ldlm_ns_name(ns), ldlm_ns_proc_dir);
341 if (ns_pde == NULL)
342 return -ENOMEM;
343 ns->ns_proc_dir_entry = ns_pde;
344 }
345
d7e09d03
PT
346 ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
347 if (ns->ns_stats == NULL)
348 return -ENOMEM;
349
350 lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
351 LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
352
353 lock_name[MAX_STRING_SIZE] = '\0';
354
355 memset(lock_vars, 0, sizeof(lock_vars));
356 lock_vars[0].name = lock_name;
357
73bb1da6
PT
358 LDLM_NS_ADD_VAR("resource_count", ns, &lprocfs_ns_resources_fops);
359 LDLM_NS_ADD_VAR("lock_count", ns, &lprocfs_ns_locks_fops);
d7e09d03
PT
360
361 if (ns_is_client(ns)) {
73bb1da6
PT
362 LDLM_NS_ADD_VAR("lock_unused_count", &ns->ns_nr_unused,
363 &ldlm_uint_fops);
364 LDLM_NS_ADD_VAR("lru_size", ns, &lprocfs_lru_size_fops);
365 LDLM_NS_ADD_VAR("lru_max_age", &ns->ns_max_age,
366 &ldlm_rw_uint_fops);
367 LDLM_NS_ADD_VAR("early_lock_cancel", ns, &lprocfs_elc_fops);
d7e09d03 368 } else {
73bb1da6
PT
369 LDLM_NS_ADD_VAR("ctime_age_limit", &ns->ns_ctime_age_limit,
370 &ldlm_rw_uint_fops);
371 LDLM_NS_ADD_VAR("lock_timeouts", &ns->ns_timeouts,
372 &ldlm_uint_fops);
373 LDLM_NS_ADD_VAR("max_nolock_bytes", &ns->ns_max_nolock_size,
374 &ldlm_rw_uint_fops);
375 LDLM_NS_ADD_VAR("contention_seconds", &ns->ns_contention_time,
376 &ldlm_rw_uint_fops);
377 LDLM_NS_ADD_VAR("contended_locks", &ns->ns_contended_locks,
378 &ldlm_rw_uint_fops);
379 LDLM_NS_ADD_VAR("max_parallel_ast", &ns->ns_max_parallel_ast,
380 &ldlm_rw_uint_fops);
d7e09d03
PT
381 }
382 return 0;
383}
384#undef MAX_STRING_SIZE
385#else /* LPROCFS */
386
387#define ldlm_namespace_proc_unregister(ns) ({;})
388#define ldlm_namespace_proc_register(ns) ({0;})
389
390#endif /* LPROCFS */
391
392static unsigned ldlm_res_hop_hash(cfs_hash_t *hs,
393 const void *key, unsigned mask)
394{
395 const struct ldlm_res_id *id = key;
396 unsigned val = 0;
397 unsigned i;
398
399 for (i = 0; i < RES_NAME_SIZE; i++)
400 val += id->name[i];
401 return val & mask;
402}
403
404static unsigned ldlm_res_hop_fid_hash(cfs_hash_t *hs,
405 const void *key, unsigned mask)
406{
407 const struct ldlm_res_id *id = key;
408 struct lu_fid fid;
409 __u32 hash;
410 __u32 val;
411
412 fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
413 fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
414 fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
415
416 hash = fid_flatten32(&fid);
417 hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
418 if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
419 val = id->name[LUSTRE_RES_ID_HSH_OFF];
420 hash += (val >> 5) + (val << 11);
421 } else {
422 val = fid_oid(&fid);
423 }
424 hash = cfs_hash_long(hash, hs->hs_bkt_bits);
425 /* give me another random factor */
426 hash -= cfs_hash_long((unsigned long)hs, val % 11 + 3);
427
428 hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
429 hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
430
431 return hash & mask;
432}
433
434static void *ldlm_res_hop_key(struct hlist_node *hnode)
435{
436 struct ldlm_resource *res;
437
438 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
439 return &res->lr_name;
440}
441
442static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
443{
444 struct ldlm_resource *res;
445
446 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
447 return ldlm_res_eq((const struct ldlm_res_id *)key,
448 (const struct ldlm_res_id *)&res->lr_name);
449}
450
451static void *ldlm_res_hop_object(struct hlist_node *hnode)
452{
453 return hlist_entry(hnode, struct ldlm_resource, lr_hash);
454}
455
456static void ldlm_res_hop_get_locked(cfs_hash_t *hs, struct hlist_node *hnode)
457{
458 struct ldlm_resource *res;
459
460 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
461 ldlm_resource_getref(res);
462}
463
464static void ldlm_res_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
465{
466 struct ldlm_resource *res;
467
468 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
469 /* cfs_hash_for_each_nolock is the only chance we call it */
470 ldlm_resource_putref_locked(res);
471}
472
473static void ldlm_res_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
474{
475 struct ldlm_resource *res;
476
477 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
478 ldlm_resource_putref(res);
479}
480
481cfs_hash_ops_t ldlm_ns_hash_ops = {
482 .hs_hash = ldlm_res_hop_hash,
483 .hs_key = ldlm_res_hop_key,
484 .hs_keycmp = ldlm_res_hop_keycmp,
485 .hs_keycpy = NULL,
486 .hs_object = ldlm_res_hop_object,
487 .hs_get = ldlm_res_hop_get_locked,
488 .hs_put_locked = ldlm_res_hop_put_locked,
489 .hs_put = ldlm_res_hop_put
490};
491
492cfs_hash_ops_t ldlm_ns_fid_hash_ops = {
493 .hs_hash = ldlm_res_hop_fid_hash,
494 .hs_key = ldlm_res_hop_key,
495 .hs_keycmp = ldlm_res_hop_keycmp,
496 .hs_keycpy = NULL,
497 .hs_object = ldlm_res_hop_object,
498 .hs_get = ldlm_res_hop_get_locked,
499 .hs_put_locked = ldlm_res_hop_put_locked,
500 .hs_put = ldlm_res_hop_put
501};
502
503typedef struct {
504 ldlm_ns_type_t nsd_type;
505 /** hash bucket bits */
506 unsigned nsd_bkt_bits;
507 /** hash bits */
508 unsigned nsd_all_bits;
509 /** hash operations */
510 cfs_hash_ops_t *nsd_hops;
511} ldlm_ns_hash_def_t;
512
513ldlm_ns_hash_def_t ldlm_ns_hash_defs[] =
514{
515 {
516 .nsd_type = LDLM_NS_TYPE_MDC,
517 .nsd_bkt_bits = 11,
518 .nsd_all_bits = 16,
519 .nsd_hops = &ldlm_ns_fid_hash_ops,
520 },
521 {
522 .nsd_type = LDLM_NS_TYPE_MDT,
523 .nsd_bkt_bits = 14,
524 .nsd_all_bits = 21,
525 .nsd_hops = &ldlm_ns_fid_hash_ops,
526 },
527 {
528 .nsd_type = LDLM_NS_TYPE_OSC,
529 .nsd_bkt_bits = 8,
530 .nsd_all_bits = 12,
531 .nsd_hops = &ldlm_ns_hash_ops,
532 },
533 {
534 .nsd_type = LDLM_NS_TYPE_OST,
535 .nsd_bkt_bits = 11,
536 .nsd_all_bits = 17,
537 .nsd_hops = &ldlm_ns_hash_ops,
538 },
539 {
540 .nsd_type = LDLM_NS_TYPE_MGC,
541 .nsd_bkt_bits = 4,
542 .nsd_all_bits = 4,
543 .nsd_hops = &ldlm_ns_hash_ops,
544 },
545 {
546 .nsd_type = LDLM_NS_TYPE_MGT,
547 .nsd_bkt_bits = 4,
548 .nsd_all_bits = 4,
549 .nsd_hops = &ldlm_ns_hash_ops,
550 },
551 {
552 .nsd_type = LDLM_NS_TYPE_UNKNOWN,
553 },
554};
555
556/**
557 * Create and initialize new empty namespace.
558 */
559struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
560 ldlm_side_t client,
561 ldlm_appetite_t apt,
562 ldlm_ns_type_t ns_type)
563{
564 struct ldlm_namespace *ns = NULL;
565 struct ldlm_ns_bucket *nsb;
566 ldlm_ns_hash_def_t *nsd;
567 cfs_hash_bd_t bd;
568 int idx;
569 int rc;
d7e09d03
PT
570
571 LASSERT(obd != NULL);
572
573 rc = ldlm_get_ref();
574 if (rc) {
575 CERROR("ldlm_get_ref failed: %d\n", rc);
0a3bdb00 576 return NULL;
d7e09d03
PT
577 }
578
579 for (idx = 0;;idx++) {
580 nsd = &ldlm_ns_hash_defs[idx];
581 if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
582 CERROR("Unknown type %d for ns %s\n", ns_type, name);
583 GOTO(out_ref, NULL);
584 }
585
586 if (nsd->nsd_type == ns_type)
587 break;
588 }
589
590 OBD_ALLOC_PTR(ns);
591 if (!ns)
592 GOTO(out_ref, NULL);
593
594 ns->ns_rs_hash = cfs_hash_create(name,
595 nsd->nsd_all_bits, nsd->nsd_all_bits,
596 nsd->nsd_bkt_bits, sizeof(*nsb),
597 CFS_HASH_MIN_THETA,
598 CFS_HASH_MAX_THETA,
599 nsd->nsd_hops,
600 CFS_HASH_DEPTH |
601 CFS_HASH_BIGNAME |
602 CFS_HASH_SPIN_BKTLOCK |
603 CFS_HASH_NO_ITEMREF);
604 if (ns->ns_rs_hash == NULL)
605 GOTO(out_ns, NULL);
606
607 cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
608 nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
609 at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
610 nsb->nsb_namespace = ns;
611 }
612
613 ns->ns_obd = obd;
614 ns->ns_appetite = apt;
615 ns->ns_client = client;
616
617 INIT_LIST_HEAD(&ns->ns_list_chain);
618 INIT_LIST_HEAD(&ns->ns_unused_list);
619 spin_lock_init(&ns->ns_lock);
620 atomic_set(&ns->ns_bref, 0);
621 init_waitqueue_head(&ns->ns_waitq);
622
623 ns->ns_max_nolock_size = NS_DEFAULT_MAX_NOLOCK_BYTES;
624 ns->ns_contention_time = NS_DEFAULT_CONTENTION_SECONDS;
625 ns->ns_contended_locks = NS_DEFAULT_CONTENDED_LOCKS;
626
627 ns->ns_max_parallel_ast = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
628 ns->ns_nr_unused = 0;
629 ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE;
630 ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE;
631 ns->ns_ctime_age_limit = LDLM_CTIME_AGE_LIMIT;
632 ns->ns_timeouts = 0;
633 ns->ns_orig_connect_flags = 0;
634 ns->ns_connect_flags = 0;
635 ns->ns_stopping = 0;
636 rc = ldlm_namespace_proc_register(ns);
637 if (rc != 0) {
638 CERROR("Can't initialize ns proc, rc %d\n", rc);
639 GOTO(out_hash, rc);
640 }
641
91a50030 642 idx = ldlm_namespace_nr_read(client);
d7e09d03
PT
643 rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
644 if (rc) {
645 CERROR("Can't initialize lock pool, rc %d\n", rc);
646 GOTO(out_proc, rc);
647 }
648
649 ldlm_namespace_register(ns, client);
0a3bdb00 650 return ns;
d7e09d03
PT
651out_proc:
652 ldlm_namespace_proc_unregister(ns);
653 ldlm_namespace_cleanup(ns, 0);
654out_hash:
655 cfs_hash_putref(ns->ns_rs_hash);
656out_ns:
657 OBD_FREE_PTR(ns);
658out_ref:
659 ldlm_put_ref();
0a3bdb00 660 return NULL;
d7e09d03
PT
661}
662EXPORT_SYMBOL(ldlm_namespace_new);
663
664extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
665
666/**
667 * Cancel and destroy all locks on a resource.
668 *
669 * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
670 * clean up. This is currently only used for recovery, and we make
671 * certain assumptions as a result--notably, that we shouldn't cancel
672 * locks with refs.
673 */
674static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
675 __u64 flags)
676{
677 struct list_head *tmp;
678 int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
679 bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
680
681 do {
682 struct ldlm_lock *lock = NULL;
683
684 /* First, we look for non-cleaned-yet lock
685 * all cleaned locks are marked by CLEANED flag. */
686 lock_res(res);
687 list_for_each(tmp, q) {
688 lock = list_entry(tmp, struct ldlm_lock,
689 l_res_link);
690 if (lock->l_flags & LDLM_FL_CLEANED) {
691 lock = NULL;
692 continue;
693 }
694 LDLM_LOCK_GET(lock);
695 lock->l_flags |= LDLM_FL_CLEANED;
696 break;
697 }
698
699 if (lock == NULL) {
700 unlock_res(res);
701 break;
702 }
703
704 /* Set CBPENDING so nothing in the cancellation path
705 * can match this lock. */
706 lock->l_flags |= LDLM_FL_CBPENDING;
707 lock->l_flags |= LDLM_FL_FAILED;
708 lock->l_flags |= flags;
709
710 /* ... without sending a CANCEL message for local_only. */
711 if (local_only)
712 lock->l_flags |= LDLM_FL_LOCAL_ONLY;
713
714 if (local_only && (lock->l_readers || lock->l_writers)) {
715 /* This is a little bit gross, but much better than the
716 * alternative: pretend that we got a blocking AST from
717 * the server, so that when the lock is decref'd, it
718 * will go away ... */
719 unlock_res(res);
720 LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
721 if (lock->l_completion_ast)
722 lock->l_completion_ast(lock, 0, NULL);
723 LDLM_LOCK_RELEASE(lock);
724 continue;
725 }
726
727 if (client) {
728 struct lustre_handle lockh;
729
730 unlock_res(res);
731 ldlm_lock2handle(lock, &lockh);
732 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
733 if (rc)
734 CERROR("ldlm_cli_cancel: %d\n", rc);
735 } else {
736 ldlm_resource_unlink_lock(lock);
737 unlock_res(res);
738 LDLM_DEBUG(lock, "Freeing a lock still held by a "
739 "client node");
740 ldlm_lock_destroy(lock);
741 }
742 LDLM_LOCK_RELEASE(lock);
743 } while (1);
744}
745
746static int ldlm_resource_clean(cfs_hash_t *hs, cfs_hash_bd_t *bd,
747 struct hlist_node *hnode, void *arg)
748{
749 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
750 __u64 flags = *(__u64 *)arg;
751
752 cleanup_resource(res, &res->lr_granted, flags);
753 cleanup_resource(res, &res->lr_converting, flags);
754 cleanup_resource(res, &res->lr_waiting, flags);
755
756 return 0;
757}
758
759static int ldlm_resource_complain(cfs_hash_t *hs, cfs_hash_bd_t *bd,
760 struct hlist_node *hnode, void *arg)
761{
762 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
763
764 lock_res(res);
765 CERROR("Namespace %s resource refcount nonzero "
766 "(%d) after lock cleanup; forcing "
767 "cleanup.\n",
768 ldlm_ns_name(ldlm_res_to_ns(res)),
769 atomic_read(&res->lr_refcount) - 1);
770
771 CERROR("Resource: %p ("LPU64"/"LPU64"/"LPU64"/"
772 LPU64") (rc: %d)\n", res,
773 res->lr_name.name[0], res->lr_name.name[1],
774 res->lr_name.name[2], res->lr_name.name[3],
775 atomic_read(&res->lr_refcount) - 1);
776
777 ldlm_resource_dump(D_ERROR, res);
778 unlock_res(res);
779 return 0;
780}
781
782/**
783 * Cancel and destroy all locks in the namespace.
784 *
785 * Typically used during evictions when server notified client that it was
786 * evicted and all of its state needs to be destroyed.
787 * Also used during shutdown.
788 */
789int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
790{
791 if (ns == NULL) {
792 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
793 return ELDLM_OK;
794 }
795
796 cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, &flags);
797 cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, NULL);
798 return ELDLM_OK;
799}
800EXPORT_SYMBOL(ldlm_namespace_cleanup);
801
802/**
803 * Attempts to free namespace.
804 *
805 * Only used when namespace goes away, like during an unmount.
806 */
807static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
808{
d7e09d03
PT
809 /* At shutdown time, don't call the cancellation callback */
810 ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
811
812 if (atomic_read(&ns->ns_bref) > 0) {
813 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
814 int rc;
815 CDEBUG(D_DLMTRACE,
816 "dlm namespace %s free waiting on refcount %d\n",
817 ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
818force_wait:
819 if (force)
820 lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
821
822 rc = l_wait_event(ns->ns_waitq,
823 atomic_read(&ns->ns_bref) == 0, &lwi);
824
825 /* Forced cleanups should be able to reclaim all references,
826 * so it's safe to wait forever... we can't leak locks... */
827 if (force && rc == -ETIMEDOUT) {
828 LCONSOLE_ERROR("Forced cleanup waiting for %s "
829 "namespace with %d resources in use, "
830 "(rc=%d)\n", ldlm_ns_name(ns),
831 atomic_read(&ns->ns_bref), rc);
832 GOTO(force_wait, rc);
833 }
834
835 if (atomic_read(&ns->ns_bref)) {
836 LCONSOLE_ERROR("Cleanup waiting for %s namespace "
837 "with %d resources in use, (rc=%d)\n",
838 ldlm_ns_name(ns),
839 atomic_read(&ns->ns_bref), rc);
0a3bdb00 840 return ELDLM_NAMESPACE_EXISTS;
d7e09d03
PT
841 }
842 CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
843 ldlm_ns_name(ns));
844 }
845
0a3bdb00 846 return ELDLM_OK;
d7e09d03
PT
847}
848
849/**
850 * Performs various cleanups for passed \a ns to make it drop refc and be
851 * ready for freeing. Waits for refc == 0.
852 *
853 * The following is done:
854 * (0) Unregister \a ns from its list to make inaccessible for potential
855 * users like pools thread and others;
856 * (1) Clear all locks in \a ns.
857 */
858void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
859 struct obd_import *imp,
860 int force)
861{
862 int rc;
29aaf496 863
d7e09d03 864 if (!ns) {
d7e09d03
PT
865 return;
866 }
867
868 spin_lock(&ns->ns_lock);
869 ns->ns_stopping = 1;
870 spin_unlock(&ns->ns_lock);
871
872 /*
873 * Can fail with -EINTR when force == 0 in which case try harder.
874 */
875 rc = __ldlm_namespace_free(ns, force);
876 if (rc != ELDLM_OK) {
877 if (imp) {
878 ptlrpc_disconnect_import(imp, 0);
879 ptlrpc_invalidate_import(imp);
880 }
881
882 /*
883 * With all requests dropped and the import inactive
884 * we are gaurenteed all reference will be dropped.
885 */
886 rc = __ldlm_namespace_free(ns, 1);
887 LASSERT(rc == 0);
888 }
d7e09d03
PT
889}
890
891/**
892 * Performs freeing memory structures related to \a ns. This is only done
893 * when ldlm_namespce_free_prior() successfully removed all resources
894 * referencing \a ns and its refc == 0.
895 */
896void ldlm_namespace_free_post(struct ldlm_namespace *ns)
897{
d7e09d03 898 if (!ns) {
d7e09d03
PT
899 return;
900 }
901
902 /* Make sure that nobody can find this ns in its list. */
903 ldlm_namespace_unregister(ns, ns->ns_client);
904 /* Fini pool _before_ parent proc dir is removed. This is important as
905 * ldlm_pool_fini() removes own proc dir which is child to @dir.
906 * Removing it after @dir may cause oops. */
907 ldlm_pool_fini(&ns->ns_pool);
908
909 ldlm_namespace_proc_unregister(ns);
910 cfs_hash_putref(ns->ns_rs_hash);
911 /* Namespace \a ns should be not on list at this time, otherwise
912 * this will cause issues related to using freed \a ns in poold
913 * thread. */
914 LASSERT(list_empty(&ns->ns_list_chain));
915 OBD_FREE_PTR(ns);
916 ldlm_put_ref();
d7e09d03
PT
917}
918
919/**
920 * Cleanup the resource, and free namespace.
921 * bug 12864:
922 * Deadlock issue:
923 * proc1: destroy import
924 * class_disconnect_export(grab cl_sem) ->
925 * -> ldlm_namespace_free ->
926 * -> lprocfs_remove(grab _lprocfs_lock).
927 * proc2: read proc info
928 * lprocfs_fops_read(grab _lprocfs_lock) ->
929 * -> osc_rd_active, etc(grab cl_sem).
930 *
931 * So that I have to split the ldlm_namespace_free into two parts - the first
932 * part ldlm_namespace_free_prior is used to cleanup the resource which is
933 * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
934 * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
935 * held.
936 */
937void ldlm_namespace_free(struct ldlm_namespace *ns,
938 struct obd_import *imp,
939 int force)
940{
941 ldlm_namespace_free_prior(ns, imp, force);
942 ldlm_namespace_free_post(ns);
943}
944EXPORT_SYMBOL(ldlm_namespace_free);
945
946void ldlm_namespace_get(struct ldlm_namespace *ns)
947{
948 atomic_inc(&ns->ns_bref);
949}
950EXPORT_SYMBOL(ldlm_namespace_get);
951
91a50030
OD
952/* This is only for callers that care about refcount */
953int ldlm_namespace_get_return(struct ldlm_namespace *ns)
954{
955 return atomic_inc_return(&ns->ns_bref);
956}
957
d7e09d03
PT
958void ldlm_namespace_put(struct ldlm_namespace *ns)
959{
960 if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
961 wake_up(&ns->ns_waitq);
962 spin_unlock(&ns->ns_lock);
963 }
964}
965EXPORT_SYMBOL(ldlm_namespace_put);
966
967/** Register \a ns in the list of namespaces */
968void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
969{
970 mutex_lock(ldlm_namespace_lock(client));
971 LASSERT(list_empty(&ns->ns_list_chain));
91a50030
OD
972 list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
973 ldlm_namespace_nr_inc(client);
d7e09d03
PT
974 mutex_unlock(ldlm_namespace_lock(client));
975}
976
977/** Unregister \a ns from the list of namespaces. */
978void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
979{
980 mutex_lock(ldlm_namespace_lock(client));
981 LASSERT(!list_empty(&ns->ns_list_chain));
982 /* Some asserts and possibly other parts of the code are still
983 * using list_empty(&ns->ns_list_chain). This is why it is
984 * important to use list_del_init() here. */
985 list_del_init(&ns->ns_list_chain);
91a50030 986 ldlm_namespace_nr_dec(client);
d7e09d03
PT
987 mutex_unlock(ldlm_namespace_lock(client));
988}
989
990/** Should be called with ldlm_namespace_lock(client) taken. */
91a50030
OD
991void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
992 ldlm_side_t client)
d7e09d03
PT
993{
994 LASSERT(!list_empty(&ns->ns_list_chain));
995 LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
996 list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
997}
998
91a50030
OD
999/** Should be called with ldlm_namespace_lock(client) taken. */
1000void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
1001 ldlm_side_t client)
1002{
1003 LASSERT(!list_empty(&ns->ns_list_chain));
1004 LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1005 list_move_tail(&ns->ns_list_chain,
1006 ldlm_namespace_inactive_list(client));
1007}
1008
d7e09d03
PT
1009/** Should be called with ldlm_namespace_lock(client) taken. */
1010struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
1011{
1012 LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1013 LASSERT(!list_empty(ldlm_namespace_list(client)));
1014 return container_of(ldlm_namespace_list(client)->next,
1015 struct ldlm_namespace, ns_list_chain);
1016}
1017
1018/** Create and initialize new resource. */
1019static struct ldlm_resource *ldlm_resource_new(void)
1020{
1021 struct ldlm_resource *res;
1022 int idx;
1023
1024 OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, __GFP_IO);
1025 if (res == NULL)
1026 return NULL;
1027
1028 INIT_LIST_HEAD(&res->lr_granted);
1029 INIT_LIST_HEAD(&res->lr_converting);
1030 INIT_LIST_HEAD(&res->lr_waiting);
1031
1032 /* Initialize interval trees for each lock mode. */
1033 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1034 res->lr_itree[idx].lit_size = 0;
1035 res->lr_itree[idx].lit_mode = 1 << idx;
1036 res->lr_itree[idx].lit_root = NULL;
1037 }
1038
1039 atomic_set(&res->lr_refcount, 1);
1040 spin_lock_init(&res->lr_lock);
1041 lu_ref_init(&res->lr_reference);
1042
1043 /* The creator of the resource must unlock the mutex after LVB
1044 * initialization. */
1045 mutex_init(&res->lr_lvb_mutex);
1046 mutex_lock(&res->lr_lvb_mutex);
1047
1048 return res;
1049}
1050
1051/**
1052 * Return a reference to resource with given name, creating it if necessary.
1053 * Args: namespace with ns_lock unlocked
1054 * Locks: takes and releases NS hash-lock and res->lr_lock
1055 * Returns: referenced, unlocked ldlm_resource or NULL
1056 */
1057struct ldlm_resource *
1058ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
1059 const struct ldlm_res_id *name, ldlm_type_t type, int create)
1060{
1061 struct hlist_node *hnode;
1062 struct ldlm_resource *res;
1063 cfs_hash_bd_t bd;
1064 __u64 version;
91a50030 1065 int ns_refcount = 0;
d7e09d03
PT
1066
1067 LASSERT(ns != NULL);
1068 LASSERT(parent == NULL);
1069 LASSERT(ns->ns_rs_hash != NULL);
1070 LASSERT(name->name[0] != 0);
1071
1072 cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
1073 hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1074 if (hnode != NULL) {
1075 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1076 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1077 /* Synchronize with regard to resource creation. */
1078 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
1079 mutex_lock(&res->lr_lvb_mutex);
1080 mutex_unlock(&res->lr_lvb_mutex);
1081 }
1082
1083 if (unlikely(res->lr_lvb_len < 0)) {
1084 ldlm_resource_putref(res);
1085 res = NULL;
1086 }
1087 return res;
1088 }
1089
1090 version = cfs_hash_bd_version_get(&bd);
1091 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1092
1093 if (create == 0)
1094 return NULL;
1095
1096 LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
1097 "type: %d\n", type);
1098 res = ldlm_resource_new();
1099 if (!res)
1100 return NULL;
1101
1102 res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
1103 res->lr_name = *name;
1104 res->lr_type = type;
1105 res->lr_most_restr = LCK_NL;
1106
1107 cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1108 hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
1109 cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1110
1111 if (hnode != NULL) {
1112 /* Someone won the race and already added the resource. */
1113 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1114 /* Clean lu_ref for failed resource. */
1115 lu_ref_fini(&res->lr_reference);
1116 /* We have taken lr_lvb_mutex. Drop it. */
1117 mutex_unlock(&res->lr_lvb_mutex);
1118 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1119
1120 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1121 /* Synchronize with regard to resource creation. */
1122 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
1123 mutex_lock(&res->lr_lvb_mutex);
1124 mutex_unlock(&res->lr_lvb_mutex);
1125 }
1126
1127 if (unlikely(res->lr_lvb_len < 0)) {
1128 ldlm_resource_putref(res);
1129 res = NULL;
1130 }
1131 return res;
1132 }
1133 /* We won! Let's add the resource. */
1134 cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
1135 if (cfs_hash_bd_count_get(&bd) == 1)
91a50030 1136 ns_refcount = ldlm_namespace_get_return(ns);
d7e09d03
PT
1137
1138 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1139 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
1140 int rc;
1141
1142 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
1143 rc = ns->ns_lvbo->lvbo_init(res);
1144 if (rc < 0) {
c5b60ba7
AD
1145 CERROR("%s: lvbo_init failed for resource "LPX64":"
1146 LPX64": rc = %d\n", ns->ns_obd->obd_name,
1147 name->name[0], name->name[1], rc);
d7e09d03
PT
1148 if (res->lr_lvb_data) {
1149 OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
1150 res->lr_lvb_data = NULL;
1151 }
1152 res->lr_lvb_len = rc;
1153 mutex_unlock(&res->lr_lvb_mutex);
1154 ldlm_resource_putref(res);
1155 return NULL;
1156 }
1157 }
1158
1159 /* We create resource with locked lr_lvb_mutex. */
1160 mutex_unlock(&res->lr_lvb_mutex);
1161
91a50030
OD
1162 /* Let's see if we happened to be the very first resource in this
1163 * namespace. If so, and this is a client namespace, we need to move
1164 * the namespace into the active namespaces list to be patrolled by
df9fcbeb 1165 * the ldlm_poold. */
91a50030
OD
1166 if (ns_is_client(ns) && ns_refcount == 1) {
1167 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1168 ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
1169 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1170 }
1171
d7e09d03
PT
1172 return res;
1173}
1174EXPORT_SYMBOL(ldlm_resource_get);
1175
1176struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
1177{
1178 LASSERT(res != NULL);
1179 LASSERT(res != LP_POISON);
1180 atomic_inc(&res->lr_refcount);
1181 CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
1182 atomic_read(&res->lr_refcount));
1183 return res;
1184}
1185
1186static void __ldlm_resource_putref_final(cfs_hash_bd_t *bd,
1187 struct ldlm_resource *res)
1188{
1189 struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
1190
1191 if (!list_empty(&res->lr_granted)) {
1192 ldlm_resource_dump(D_ERROR, res);
1193 LBUG();
1194 }
1195
1196 if (!list_empty(&res->lr_converting)) {
1197 ldlm_resource_dump(D_ERROR, res);
1198 LBUG();
1199 }
1200
1201 if (!list_empty(&res->lr_waiting)) {
1202 ldlm_resource_dump(D_ERROR, res);
1203 LBUG();
1204 }
1205
1206 cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
1207 bd, &res->lr_hash);
1208 lu_ref_fini(&res->lr_reference);
1209 if (cfs_hash_bd_count_get(bd) == 0)
1210 ldlm_namespace_put(nsb->nsb_namespace);
1211}
1212
1213/* Returns 1 if the resource was freed, 0 if it remains. */
1214int ldlm_resource_putref(struct ldlm_resource *res)
1215{
1216 struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1217 cfs_hash_bd_t bd;
1218
1219 LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1220 CDEBUG(D_INFO, "putref res: %p count: %d\n",
1221 res, atomic_read(&res->lr_refcount) - 1);
1222
1223 cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
1224 if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
1225 __ldlm_resource_putref_final(&bd, res);
1226 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1227 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1228 ns->ns_lvbo->lvbo_free(res);
1229 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1230 return 1;
1231 }
1232 return 0;
1233}
1234EXPORT_SYMBOL(ldlm_resource_putref);
1235
1236/* Returns 1 if the resource was freed, 0 if it remains. */
1237int ldlm_resource_putref_locked(struct ldlm_resource *res)
1238{
1239 struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1240
1241 LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1242 CDEBUG(D_INFO, "putref res: %p count: %d\n",
1243 res, atomic_read(&res->lr_refcount) - 1);
1244
1245 if (atomic_dec_and_test(&res->lr_refcount)) {
1246 cfs_hash_bd_t bd;
1247
1248 cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash,
1249 &res->lr_name, &bd);
1250 __ldlm_resource_putref_final(&bd, res);
1251 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1252 /* NB: ns_rs_hash is created with CFS_HASH_NO_ITEMREF,
1253 * so we should never be here while calling cfs_hash_del,
1254 * cfs_hash_for_each_nolock is the only case we can get
1255 * here, which is safe to release cfs_hash_bd_lock.
1256 */
1257 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1258 ns->ns_lvbo->lvbo_free(res);
1259 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1260
1261 cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1262 return 1;
1263 }
1264 return 0;
1265}
1266
1267/**
1268 * Add a lock into a given resource into specified lock list.
1269 */
1270void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
1271 struct ldlm_lock *lock)
1272{
1273 check_res_locked(res);
1274
1275 LDLM_DEBUG(lock, "About to add this lock:\n");
1276
f2145eae 1277 if (lock->l_flags & LDLM_FL_DESTROYED) {
d7e09d03
PT
1278 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1279 return;
1280 }
1281
1282 LASSERT(list_empty(&lock->l_res_link));
1283
1284 list_add_tail(&lock->l_res_link, head);
1285}
1286
1287/**
1288 * Insert a lock into resource after specified lock.
1289 *
1290 * Obtain resource description from the lock we are inserting after.
1291 */
1292void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
1293 struct ldlm_lock *new)
1294{
1295 struct ldlm_resource *res = original->l_resource;
1296
1297 check_res_locked(res);
1298
1299 ldlm_resource_dump(D_INFO, res);
1300 LDLM_DEBUG(new, "About to insert this lock after %p:\n", original);
1301
f2145eae 1302 if (new->l_flags & LDLM_FL_DESTROYED) {
d7e09d03
PT
1303 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1304 goto out;
1305 }
1306
1307 LASSERT(list_empty(&new->l_res_link));
1308
1309 list_add(&new->l_res_link, &original->l_res_link);
1310 out:;
1311}
1312
1313void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1314{
1315 int type = lock->l_resource->lr_type;
1316
1317 check_res_locked(lock->l_resource);
1318 if (type == LDLM_IBITS || type == LDLM_PLAIN)
1319 ldlm_unlink_lock_skiplist(lock);
1320 else if (type == LDLM_EXTENT)
1321 ldlm_extent_unlink_lock(lock);
1322 list_del_init(&lock->l_res_link);
1323}
1324EXPORT_SYMBOL(ldlm_resource_unlink_lock);
1325
1326void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1327{
1328 desc->lr_type = res->lr_type;
1329 desc->lr_name = res->lr_name;
1330}
1331
1332/**
1333 * Print information about all locks in all namespaces on this node to debug
1334 * log.
1335 */
1336void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
1337{
1338 struct list_head *tmp;
1339
1340 if (!((libcfs_debug | D_ERROR) & level))
1341 return;
1342
1343 mutex_lock(ldlm_namespace_lock(client));
1344
1345 list_for_each(tmp, ldlm_namespace_list(client)) {
1346 struct ldlm_namespace *ns;
1347 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
1348 ldlm_namespace_dump(level, ns);
1349 }
1350
1351 mutex_unlock(ldlm_namespace_lock(client));
1352}
1353EXPORT_SYMBOL(ldlm_dump_all_namespaces);
1354
1355static int ldlm_res_hash_dump(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1356 struct hlist_node *hnode, void *arg)
1357{
1358 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1359 int level = (int)(unsigned long)arg;
1360
1361 lock_res(res);
1362 ldlm_resource_dump(level, res);
1363 unlock_res(res);
1364
1365 return 0;
1366}
1367
1368/**
1369 * Print information about all locks in this namespace on this node to debug
1370 * log.
1371 */
1372void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1373{
1374 if (!((libcfs_debug | D_ERROR) & level))
1375 return;
1376
1377 CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
1378 ldlm_ns_name(ns), atomic_read(&ns->ns_bref),
1379 ns_is_client(ns) ? "client" : "server");
1380
1381 if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
1382 return;
1383
1384 cfs_hash_for_each_nolock(ns->ns_rs_hash,
1385 ldlm_res_hash_dump,
1386 (void *)(unsigned long)level);
1387 spin_lock(&ns->ns_lock);
1388 ns->ns_next_dump = cfs_time_shift(10);
1389 spin_unlock(&ns->ns_lock);
1390}
1391EXPORT_SYMBOL(ldlm_namespace_dump);
1392
1393/**
1394 * Print information about all locks in this resource to debug log.
1395 */
1396void ldlm_resource_dump(int level, struct ldlm_resource *res)
1397{
1398 struct ldlm_lock *lock;
1399 unsigned int granted = 0;
1400
1401 CLASSERT(RES_NAME_SIZE == 4);
1402
1403 if (!((libcfs_debug | D_ERROR) & level))
1404 return;
1405
1406 CDEBUG(level, "--- Resource: %p ("LPU64"/"LPU64"/"LPU64"/"LPU64
1407 ") (rc: %d)\n", res, res->lr_name.name[0], res->lr_name.name[1],
1408 res->lr_name.name[2], res->lr_name.name[3],
1409 atomic_read(&res->lr_refcount));
1410
1411 if (!list_empty(&res->lr_granted)) {
1412 CDEBUG(level, "Granted locks (in reverse order):\n");
1413 list_for_each_entry_reverse(lock, &res->lr_granted,
1414 l_res_link) {
1415 LDLM_DEBUG_LIMIT(level, lock, "###");
1416 if (!(level & D_CANTMASK) &&
1417 ++granted > ldlm_dump_granted_max) {
1418 CDEBUG(level, "only dump %d granted locks to "
1419 "avoid DDOS.\n", granted);
1420 break;
1421 }
1422 }
1423 }
1424 if (!list_empty(&res->lr_converting)) {
1425 CDEBUG(level, "Converting locks:\n");
1426 list_for_each_entry(lock, &res->lr_converting, l_res_link)
1427 LDLM_DEBUG_LIMIT(level, lock, "###");
1428 }
1429 if (!list_empty(&res->lr_waiting)) {
1430 CDEBUG(level, "Waiting locks:\n");
1431 list_for_each_entry(lock, &res->lr_waiting, l_res_link)
1432 LDLM_DEBUG_LIMIT(level, lock, "###");
1433 }
1434}
This page took 0.12312 seconds and 5 git commands to generate.