staging/lustre/obdclass: use a dummy structure for lu_ref_link
[deliverable/linux.git] / drivers / staging / lustre / lustre / obdclass / lu_object.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/obdclass/lu_object.c
37 *
38 * Lustre Object.
39 * These are the only exported functions, they provide some generic
40 * infrastructure for managing object devices
41 *
42 * Author: Nikita Danilov <nikita.danilov@sun.com>
43 */
44
45 #define DEBUG_SUBSYSTEM S_CLASS
46
47 #include <linux/libcfs/libcfs.h>
48
49 # include <linux/module.h>
50
51 /* hash_long() */
52 #include <linux/libcfs/libcfs_hash.h>
53 #include <obd_class.h>
54 #include <obd_support.h>
55 #include <lustre_disk.h>
56 #include <lustre_fid.h>
57 #include <lu_object.h>
58 #include <lu_ref.h>
59 #include <linux/list.h>
60
61 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
62
63 /**
64 * Decrease reference counter on object. If last reference is freed, return
65 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
66 * case, free object immediately.
67 */
68 void lu_object_put(const struct lu_env *env, struct lu_object *o)
69 {
70 struct lu_site_bkt_data *bkt;
71 struct lu_object_header *top;
72 struct lu_site *site;
73 struct lu_object *orig;
74 cfs_hash_bd_t bd;
75 const struct lu_fid *fid;
76
77 top = o->lo_header;
78 site = o->lo_dev->ld_site;
79 orig = o;
80
81 /*
82 * till we have full fids-on-OST implemented anonymous objects
83 * are possible in OSP. such an object isn't listed in the site
84 * so we should not remove it from the site.
85 */
86 fid = lu_object_fid(o);
87 if (fid_is_zero(fid)) {
88 LASSERT(top->loh_hash.next == NULL
89 && top->loh_hash.pprev == NULL);
90 LASSERT(list_empty(&top->loh_lru));
91 if (!atomic_dec_and_test(&top->loh_ref))
92 return;
93 list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
94 if (o->lo_ops->loo_object_release != NULL)
95 o->lo_ops->loo_object_release(env, o);
96 }
97 lu_object_free(env, orig);
98 return;
99 }
100
101 cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
102 bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
103
104 if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
105 if (lu_object_is_dying(top)) {
106
107 /*
108 * somebody may be waiting for this, currently only
109 * used for cl_object, see cl_object_put_last().
110 */
111 wake_up_all(&bkt->lsb_marche_funebre);
112 }
113 return;
114 }
115
116 LASSERT(bkt->lsb_busy > 0);
117 bkt->lsb_busy--;
118 /*
119 * When last reference is released, iterate over object
120 * layers, and notify them that object is no longer busy.
121 */
122 list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
123 if (o->lo_ops->loo_object_release != NULL)
124 o->lo_ops->loo_object_release(env, o);
125 }
126
127 if (!lu_object_is_dying(top)) {
128 LASSERT(list_empty(&top->loh_lru));
129 list_add_tail(&top->loh_lru, &bkt->lsb_lru);
130 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
131 return;
132 }
133
134 /*
135 * If object is dying (will not be cached), removed it
136 * from hash table and LRU.
137 *
138 * This is done with hash table and LRU lists locked. As the only
139 * way to acquire first reference to previously unreferenced
140 * object is through hash-table lookup (lu_object_find()),
141 * or LRU scanning (lu_site_purge()), that are done under hash-table
142 * and LRU lock, no race with concurrent object lookup is possible
143 * and we can safely destroy object below.
144 */
145 if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags))
146 cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
147 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
148 /*
149 * Object was already removed from hash and lru above, can
150 * kill it.
151 */
152 lu_object_free(env, orig);
153 }
154 EXPORT_SYMBOL(lu_object_put);
155
156 /**
157 * Put object and don't keep in cache. This is temporary solution for
158 * multi-site objects when its layering is not constant.
159 */
160 void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o)
161 {
162 set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
163 return lu_object_put(env, o);
164 }
165 EXPORT_SYMBOL(lu_object_put_nocache);
166
167 /**
168 * Kill the object and take it out of LRU cache.
169 * Currently used by client code for layout change.
170 */
171 void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
172 {
173 struct lu_object_header *top;
174
175 top = o->lo_header;
176 set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
177 if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
178 cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash;
179 cfs_hash_bd_t bd;
180
181 cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
182 list_del_init(&top->loh_lru);
183 cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash);
184 cfs_hash_bd_unlock(obj_hash, &bd, 1);
185 }
186 }
187 EXPORT_SYMBOL(lu_object_unhash);
188
189 /**
190 * Allocate new object.
191 *
192 * This follows object creation protocol, described in the comment within
193 * struct lu_device_operations definition.
194 */
195 static struct lu_object *lu_object_alloc(const struct lu_env *env,
196 struct lu_device *dev,
197 const struct lu_fid *f,
198 const struct lu_object_conf *conf)
199 {
200 struct lu_object *scan;
201 struct lu_object *top;
202 struct list_head *layers;
203 int clean;
204 int result;
205 ENTRY;
206
207 /*
208 * Create top-level object slice. This will also create
209 * lu_object_header.
210 */
211 top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
212 if (top == NULL)
213 RETURN(ERR_PTR(-ENOMEM));
214 if (IS_ERR(top))
215 RETURN(top);
216 /*
217 * This is the only place where object fid is assigned. It's constant
218 * after this point.
219 */
220 top->lo_header->loh_fid = *f;
221 layers = &top->lo_header->loh_layers;
222 do {
223 /*
224 * Call ->loo_object_init() repeatedly, until no more new
225 * object slices are created.
226 */
227 clean = 1;
228 list_for_each_entry(scan, layers, lo_linkage) {
229 if (scan->lo_flags & LU_OBJECT_ALLOCATED)
230 continue;
231 clean = 0;
232 scan->lo_header = top->lo_header;
233 result = scan->lo_ops->loo_object_init(env, scan, conf);
234 if (result != 0) {
235 lu_object_free(env, top);
236 RETURN(ERR_PTR(result));
237 }
238 scan->lo_flags |= LU_OBJECT_ALLOCATED;
239 }
240 } while (!clean);
241
242 list_for_each_entry_reverse(scan, layers, lo_linkage) {
243 if (scan->lo_ops->loo_object_start != NULL) {
244 result = scan->lo_ops->loo_object_start(env, scan);
245 if (result != 0) {
246 lu_object_free(env, top);
247 RETURN(ERR_PTR(result));
248 }
249 }
250 }
251
252 lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
253 RETURN(top);
254 }
255
256 /**
257 * Free an object.
258 */
259 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
260 {
261 struct lu_site_bkt_data *bkt;
262 struct lu_site *site;
263 struct lu_object *scan;
264 struct list_head *layers;
265 struct list_head splice;
266
267 site = o->lo_dev->ld_site;
268 layers = &o->lo_header->loh_layers;
269 bkt = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
270 /*
271 * First call ->loo_object_delete() method to release all resources.
272 */
273 list_for_each_entry_reverse(scan, layers, lo_linkage) {
274 if (scan->lo_ops->loo_object_delete != NULL)
275 scan->lo_ops->loo_object_delete(env, scan);
276 }
277
278 /*
279 * Then, splice object layers into stand-alone list, and call
280 * ->loo_object_free() on all layers to free memory. Splice is
281 * necessary, because lu_object_header is freed together with the
282 * top-level slice.
283 */
284 INIT_LIST_HEAD(&splice);
285 list_splice_init(layers, &splice);
286 while (!list_empty(&splice)) {
287 /*
288 * Free layers in bottom-to-top order, so that object header
289 * lives as long as possible and ->loo_object_free() methods
290 * can look at its contents.
291 */
292 o = container_of0(splice.prev, struct lu_object, lo_linkage);
293 list_del_init(&o->lo_linkage);
294 LASSERT(o->lo_ops->loo_object_free != NULL);
295 o->lo_ops->loo_object_free(env, o);
296 }
297
298 if (waitqueue_active(&bkt->lsb_marche_funebre))
299 wake_up_all(&bkt->lsb_marche_funebre);
300 }
301
302 /**
303 * Free \a nr objects from the cold end of the site LRU list.
304 */
305 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
306 {
307 struct lu_object_header *h;
308 struct lu_object_header *temp;
309 struct lu_site_bkt_data *bkt;
310 cfs_hash_bd_t bd;
311 cfs_hash_bd_t bd2;
312 struct list_head dispose;
313 int did_sth;
314 int start;
315 int count;
316 int bnr;
317 int i;
318
319 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
320 RETURN(0);
321
322 INIT_LIST_HEAD(&dispose);
323 /*
324 * Under LRU list lock, scan LRU list and move unreferenced objects to
325 * the dispose list, removing them from LRU and hash table.
326 */
327 start = s->ls_purge_start;
328 bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
329 again:
330 did_sth = 0;
331 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
332 if (i < start)
333 continue;
334 count = bnr;
335 cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
336 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
337
338 list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
339 LASSERT(atomic_read(&h->loh_ref) == 0);
340
341 cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
342 LASSERT(bd.bd_bucket == bd2.bd_bucket);
343
344 cfs_hash_bd_del_locked(s->ls_obj_hash,
345 &bd2, &h->loh_hash);
346 list_move(&h->loh_lru, &dispose);
347 if (did_sth == 0)
348 did_sth = 1;
349
350 if (nr != ~0 && --nr == 0)
351 break;
352
353 if (count > 0 && --count == 0)
354 break;
355
356 }
357 cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
358 cond_resched();
359 /*
360 * Free everything on the dispose list. This is safe against
361 * races due to the reasons described in lu_object_put().
362 */
363 while (!list_empty(&dispose)) {
364 h = container_of0(dispose.next,
365 struct lu_object_header, loh_lru);
366 list_del_init(&h->loh_lru);
367 lu_object_free(env, lu_object_top(h));
368 lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
369 }
370
371 if (nr == 0)
372 break;
373 }
374
375 if (nr != 0 && did_sth && start != 0) {
376 start = 0; /* restart from the first bucket */
377 goto again;
378 }
379 /* race on s->ls_purge_start, but nobody cares */
380 s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
381
382 return nr;
383 }
384 EXPORT_SYMBOL(lu_site_purge);
385
386 /*
387 * Object printing.
388 *
389 * Code below has to jump through certain loops to output object description
390 * into libcfs_debug_msg-based log. The problem is that lu_object_print()
391 * composes object description from strings that are parts of _lines_ of
392 * output (i.e., strings that are not terminated by newline). This doesn't fit
393 * very well into libcfs_debug_msg() interface that assumes that each message
394 * supplied to it is a self-contained output line.
395 *
396 * To work around this, strings are collected in a temporary buffer
397 * (implemented as a value of lu_cdebug_key key), until terminating newline
398 * character is detected.
399 *
400 */
401
402 enum {
403 /**
404 * Maximal line size.
405 *
406 * XXX overflow is not handled correctly.
407 */
408 LU_CDEBUG_LINE = 512
409 };
410
411 struct lu_cdebug_data {
412 /**
413 * Temporary buffer.
414 */
415 char lck_area[LU_CDEBUG_LINE];
416 };
417
418 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
419 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
420
421 /**
422 * Key, holding temporary buffer. This key is registered very early by
423 * lu_global_init().
424 */
425 struct lu_context_key lu_global_key = {
426 .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
427 LCT_MG_THREAD | LCT_CL_THREAD,
428 .lct_init = lu_global_key_init,
429 .lct_fini = lu_global_key_fini
430 };
431
432 /**
433 * Printer function emitting messages through libcfs_debug_msg().
434 */
435 int lu_cdebug_printer(const struct lu_env *env,
436 void *cookie, const char *format, ...)
437 {
438 struct libcfs_debug_msg_data *msgdata = cookie;
439 struct lu_cdebug_data *key;
440 int used;
441 int complete;
442 va_list args;
443
444 va_start(args, format);
445
446 key = lu_context_key_get(&env->le_ctx, &lu_global_key);
447 LASSERT(key != NULL);
448
449 used = strlen(key->lck_area);
450 complete = format[strlen(format) - 1] == '\n';
451 /*
452 * Append new chunk to the buffer.
453 */
454 vsnprintf(key->lck_area + used,
455 ARRAY_SIZE(key->lck_area) - used, format, args);
456 if (complete) {
457 if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
458 libcfs_debug_msg(msgdata, "%s", key->lck_area);
459 key->lck_area[0] = 0;
460 }
461 va_end(args);
462 return 0;
463 }
464 EXPORT_SYMBOL(lu_cdebug_printer);
465
466 /**
467 * Print object header.
468 */
469 void lu_object_header_print(const struct lu_env *env, void *cookie,
470 lu_printer_t printer,
471 const struct lu_object_header *hdr)
472 {
473 (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
474 hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
475 PFID(&hdr->loh_fid),
476 hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
477 list_empty((struct list_head *)&hdr->loh_lru) ? \
478 "" : " lru",
479 hdr->loh_attr & LOHA_EXISTS ? " exist":"");
480 }
481 EXPORT_SYMBOL(lu_object_header_print);
482
483 /**
484 * Print human readable representation of the \a o to the \a printer.
485 */
486 void lu_object_print(const struct lu_env *env, void *cookie,
487 lu_printer_t printer, const struct lu_object *o)
488 {
489 static const char ruler[] = "........................................";
490 struct lu_object_header *top;
491 int depth;
492
493 top = o->lo_header;
494 lu_object_header_print(env, cookie, printer, top);
495 (*printer)(env, cookie, "{ \n");
496 list_for_each_entry(o, &top->loh_layers, lo_linkage) {
497 depth = o->lo_depth + 4;
498
499 /*
500 * print `.' \a depth times followed by type name and address
501 */
502 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
503 o->lo_dev->ld_type->ldt_name, o);
504 if (o->lo_ops->loo_object_print != NULL)
505 o->lo_ops->loo_object_print(env, cookie, printer, o);
506 (*printer)(env, cookie, "\n");
507 }
508 (*printer)(env, cookie, "} header@%p\n", top);
509 }
510 EXPORT_SYMBOL(lu_object_print);
511
512 /**
513 * Check object consistency.
514 */
515 int lu_object_invariant(const struct lu_object *o)
516 {
517 struct lu_object_header *top;
518
519 top = o->lo_header;
520 list_for_each_entry(o, &top->loh_layers, lo_linkage) {
521 if (o->lo_ops->loo_object_invariant != NULL &&
522 !o->lo_ops->loo_object_invariant(o))
523 return 0;
524 }
525 return 1;
526 }
527 EXPORT_SYMBOL(lu_object_invariant);
528
529 static struct lu_object *htable_lookup(struct lu_site *s,
530 cfs_hash_bd_t *bd,
531 const struct lu_fid *f,
532 wait_queue_t *waiter,
533 __u64 *version)
534 {
535 struct lu_site_bkt_data *bkt;
536 struct lu_object_header *h;
537 struct hlist_node *hnode;
538 __u64 ver = cfs_hash_bd_version_get(bd);
539
540 if (*version == ver)
541 return ERR_PTR(-ENOENT);
542
543 *version = ver;
544 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
545 /* cfs_hash_bd_peek_locked is a somehow "internal" function
546 * of cfs_hash, it doesn't add refcount on object. */
547 hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
548 if (hnode == NULL) {
549 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
550 return ERR_PTR(-ENOENT);
551 }
552
553 h = container_of0(hnode, struct lu_object_header, loh_hash);
554 if (likely(!lu_object_is_dying(h))) {
555 cfs_hash_get(s->ls_obj_hash, hnode);
556 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
557 list_del_init(&h->loh_lru);
558 return lu_object_top(h);
559 }
560
561 /*
562 * Lookup found an object being destroyed this object cannot be
563 * returned (to assure that references to dying objects are eventually
564 * drained), and moreover, lookup has to wait until object is freed.
565 */
566
567 init_waitqueue_entry_current(waiter);
568 add_wait_queue(&bkt->lsb_marche_funebre, waiter);
569 set_current_state(TASK_UNINTERRUPTIBLE);
570 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
571 return ERR_PTR(-EAGAIN);
572 }
573
574 /**
575 * Search cache for an object with the fid \a f. If such object is found,
576 * return it. Otherwise, create new object, insert it into cache and return
577 * it. In any case, additional reference is acquired on the returned object.
578 */
579 struct lu_object *lu_object_find(const struct lu_env *env,
580 struct lu_device *dev, const struct lu_fid *f,
581 const struct lu_object_conf *conf)
582 {
583 return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
584 }
585 EXPORT_SYMBOL(lu_object_find);
586
587 static struct lu_object *lu_object_new(const struct lu_env *env,
588 struct lu_device *dev,
589 const struct lu_fid *f,
590 const struct lu_object_conf *conf)
591 {
592 struct lu_object *o;
593 cfs_hash_t *hs;
594 cfs_hash_bd_t bd;
595 struct lu_site_bkt_data *bkt;
596
597 o = lu_object_alloc(env, dev, f, conf);
598 if (unlikely(IS_ERR(o)))
599 return o;
600
601 hs = dev->ld_site->ls_obj_hash;
602 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
603 bkt = cfs_hash_bd_extra_get(hs, &bd);
604 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
605 bkt->lsb_busy++;
606 cfs_hash_bd_unlock(hs, &bd, 1);
607 return o;
608 }
609
610 /**
611 * Core logic of lu_object_find*() functions.
612 */
613 static struct lu_object *lu_object_find_try(const struct lu_env *env,
614 struct lu_device *dev,
615 const struct lu_fid *f,
616 const struct lu_object_conf *conf,
617 wait_queue_t *waiter)
618 {
619 struct lu_object *o;
620 struct lu_object *shadow;
621 struct lu_site *s;
622 cfs_hash_t *hs;
623 cfs_hash_bd_t bd;
624 __u64 version = 0;
625
626 /*
627 * This uses standard index maintenance protocol:
628 *
629 * - search index under lock, and return object if found;
630 * - otherwise, unlock index, allocate new object;
631 * - lock index and search again;
632 * - if nothing is found (usual case), insert newly created
633 * object into index;
634 * - otherwise (race: other thread inserted object), free
635 * object just allocated.
636 * - unlock index;
637 * - return object.
638 *
639 * For "LOC_F_NEW" case, we are sure the object is new established.
640 * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
641 * just alloc and insert directly.
642 *
643 * If dying object is found during index search, add @waiter to the
644 * site wait-queue and return ERR_PTR(-EAGAIN).
645 */
646 if (conf != NULL && conf->loc_flags & LOC_F_NEW)
647 return lu_object_new(env, dev, f, conf);
648
649 s = dev->ld_site;
650 hs = s->ls_obj_hash;
651 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
652 o = htable_lookup(s, &bd, f, waiter, &version);
653 cfs_hash_bd_unlock(hs, &bd, 1);
654 if (!IS_ERR(o) || PTR_ERR(o) != -ENOENT)
655 return o;
656
657 /*
658 * Allocate new object. This may result in rather complicated
659 * operations, including fld queries, inode loading, etc.
660 */
661 o = lu_object_alloc(env, dev, f, conf);
662 if (unlikely(IS_ERR(o)))
663 return o;
664
665 LASSERT(lu_fid_eq(lu_object_fid(o), f));
666
667 cfs_hash_bd_lock(hs, &bd, 1);
668
669 shadow = htable_lookup(s, &bd, f, waiter, &version);
670 if (likely(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT)) {
671 struct lu_site_bkt_data *bkt;
672
673 bkt = cfs_hash_bd_extra_get(hs, &bd);
674 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
675 bkt->lsb_busy++;
676 cfs_hash_bd_unlock(hs, &bd, 1);
677 return o;
678 }
679
680 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
681 cfs_hash_bd_unlock(hs, &bd, 1);
682 lu_object_free(env, o);
683 return shadow;
684 }
685
686 /**
687 * Much like lu_object_find(), but top level device of object is specifically
688 * \a dev rather than top level device of the site. This interface allows
689 * objects of different "stacking" to be created within the same site.
690 */
691 struct lu_object *lu_object_find_at(const struct lu_env *env,
692 struct lu_device *dev,
693 const struct lu_fid *f,
694 const struct lu_object_conf *conf)
695 {
696 struct lu_site_bkt_data *bkt;
697 struct lu_object *obj;
698 wait_queue_t wait;
699
700 while (1) {
701 obj = lu_object_find_try(env, dev, f, conf, &wait);
702 if (obj != ERR_PTR(-EAGAIN))
703 return obj;
704 /*
705 * lu_object_find_try() already added waiter into the
706 * wait queue.
707 */
708 waitq_wait(&wait, TASK_UNINTERRUPTIBLE);
709 bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
710 remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
711 }
712 }
713 EXPORT_SYMBOL(lu_object_find_at);
714
715 /**
716 * Find object with given fid, and return its slice belonging to given device.
717 */
718 struct lu_object *lu_object_find_slice(const struct lu_env *env,
719 struct lu_device *dev,
720 const struct lu_fid *f,
721 const struct lu_object_conf *conf)
722 {
723 struct lu_object *top;
724 struct lu_object *obj;
725
726 top = lu_object_find(env, dev, f, conf);
727 if (!IS_ERR(top)) {
728 obj = lu_object_locate(top->lo_header, dev->ld_type);
729 if (obj == NULL)
730 lu_object_put(env, top);
731 } else
732 obj = top;
733 return obj;
734 }
735 EXPORT_SYMBOL(lu_object_find_slice);
736
737 /**
738 * Global list of all device types.
739 */
740 static LIST_HEAD(lu_device_types);
741
742 int lu_device_type_init(struct lu_device_type *ldt)
743 {
744 int result = 0;
745
746 INIT_LIST_HEAD(&ldt->ldt_linkage);
747 if (ldt->ldt_ops->ldto_init)
748 result = ldt->ldt_ops->ldto_init(ldt);
749 if (result == 0)
750 list_add(&ldt->ldt_linkage, &lu_device_types);
751 return result;
752 }
753 EXPORT_SYMBOL(lu_device_type_init);
754
755 void lu_device_type_fini(struct lu_device_type *ldt)
756 {
757 list_del_init(&ldt->ldt_linkage);
758 if (ldt->ldt_ops->ldto_fini)
759 ldt->ldt_ops->ldto_fini(ldt);
760 }
761 EXPORT_SYMBOL(lu_device_type_fini);
762
763 void lu_types_stop(void)
764 {
765 struct lu_device_type *ldt;
766
767 list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
768 if (ldt->ldt_device_nr == 0 && ldt->ldt_ops->ldto_stop)
769 ldt->ldt_ops->ldto_stop(ldt);
770 }
771 }
772 EXPORT_SYMBOL(lu_types_stop);
773
774 /**
775 * Global list of all sites on this node
776 */
777 static LIST_HEAD(lu_sites);
778 static DEFINE_MUTEX(lu_sites_guard);
779
780 /**
781 * Global environment used by site shrinker.
782 */
783 static struct lu_env lu_shrink_env;
784
785 struct lu_site_print_arg {
786 struct lu_env *lsp_env;
787 void *lsp_cookie;
788 lu_printer_t lsp_printer;
789 };
790
791 static int
792 lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
793 struct hlist_node *hnode, void *data)
794 {
795 struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
796 struct lu_object_header *h;
797
798 h = hlist_entry(hnode, struct lu_object_header, loh_hash);
799 if (!list_empty(&h->loh_layers)) {
800 const struct lu_object *o;
801
802 o = lu_object_top(h);
803 lu_object_print(arg->lsp_env, arg->lsp_cookie,
804 arg->lsp_printer, o);
805 } else {
806 lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
807 arg->lsp_printer, h);
808 }
809 return 0;
810 }
811
812 /**
813 * Print all objects in \a s.
814 */
815 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
816 lu_printer_t printer)
817 {
818 struct lu_site_print_arg arg = {
819 .lsp_env = (struct lu_env *)env,
820 .lsp_cookie = cookie,
821 .lsp_printer = printer,
822 };
823
824 cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
825 }
826 EXPORT_SYMBOL(lu_site_print);
827
828 enum {
829 LU_CACHE_PERCENT_MAX = 50,
830 LU_CACHE_PERCENT_DEFAULT = 20
831 };
832
833 static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
834 CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
835 "Percentage of memory to be used as lu_object cache");
836
837 /**
838 * Return desired hash table order.
839 */
840 static int lu_htable_order(void)
841 {
842 unsigned long cache_size;
843 int bits;
844
845 /*
846 * Calculate hash table size, assuming that we want reasonable
847 * performance when 20% of total memory is occupied by cache of
848 * lu_objects.
849 *
850 * Size of lu_object is (arbitrary) taken as 1K (together with inode).
851 */
852 cache_size = totalram_pages;
853
854 #if BITS_PER_LONG == 32
855 /* limit hashtable size for lowmem systems to low RAM */
856 if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT))
857 cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4;
858 #endif
859
860 /* clear off unreasonable cache setting. */
861 if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
862 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
863 " the range of (0, %u]. Will use default value: %u.\n",
864 lu_cache_percent, LU_CACHE_PERCENT_MAX,
865 LU_CACHE_PERCENT_DEFAULT);
866
867 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
868 }
869 cache_size = cache_size / 100 * lu_cache_percent *
870 (PAGE_CACHE_SIZE / 1024);
871
872 for (bits = 1; (1 << bits) < cache_size; ++bits) {
873 ;
874 }
875 return bits;
876 }
877
878 static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
879 const void *key, unsigned mask)
880 {
881 struct lu_fid *fid = (struct lu_fid *)key;
882 __u32 hash;
883
884 hash = fid_flatten32(fid);
885 hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
886 hash = cfs_hash_long(hash, hs->hs_bkt_bits);
887
888 /* give me another random factor */
889 hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
890
891 hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
892 hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
893
894 return hash & mask;
895 }
896
897 static void *lu_obj_hop_object(struct hlist_node *hnode)
898 {
899 return hlist_entry(hnode, struct lu_object_header, loh_hash);
900 }
901
902 static void *lu_obj_hop_key(struct hlist_node *hnode)
903 {
904 struct lu_object_header *h;
905
906 h = hlist_entry(hnode, struct lu_object_header, loh_hash);
907 return &h->loh_fid;
908 }
909
910 static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode)
911 {
912 struct lu_object_header *h;
913
914 h = hlist_entry(hnode, struct lu_object_header, loh_hash);
915 return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
916 }
917
918 static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
919 {
920 struct lu_object_header *h;
921
922 h = hlist_entry(hnode, struct lu_object_header, loh_hash);
923 if (atomic_add_return(1, &h->loh_ref) == 1) {
924 struct lu_site_bkt_data *bkt;
925 cfs_hash_bd_t bd;
926
927 cfs_hash_bd_get(hs, &h->loh_fid, &bd);
928 bkt = cfs_hash_bd_extra_get(hs, &bd);
929 bkt->lsb_busy++;
930 }
931 }
932
933 static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
934 {
935 LBUG(); /* we should never called it */
936 }
937
938 cfs_hash_ops_t lu_site_hash_ops = {
939 .hs_hash = lu_obj_hop_hash,
940 .hs_key = lu_obj_hop_key,
941 .hs_keycmp = lu_obj_hop_keycmp,
942 .hs_object = lu_obj_hop_object,
943 .hs_get = lu_obj_hop_get,
944 .hs_put_locked = lu_obj_hop_put_locked,
945 };
946
947 void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
948 {
949 spin_lock(&s->ls_ld_lock);
950 if (list_empty(&d->ld_linkage))
951 list_add(&d->ld_linkage, &s->ls_ld_linkage);
952 spin_unlock(&s->ls_ld_lock);
953 }
954 EXPORT_SYMBOL(lu_dev_add_linkage);
955
956 void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d)
957 {
958 spin_lock(&s->ls_ld_lock);
959 list_del_init(&d->ld_linkage);
960 spin_unlock(&s->ls_ld_lock);
961 }
962 EXPORT_SYMBOL(lu_dev_del_linkage);
963
964 /**
965 * Initialize site \a s, with \a d as the top level device.
966 */
967 #define LU_SITE_BITS_MIN 12
968 #define LU_SITE_BITS_MAX 24
969 /**
970 * total 256 buckets, we don't want too many buckets because:
971 * - consume too much memory
972 * - avoid unbalanced LRU list
973 */
974 #define LU_SITE_BKT_BITS 8
975
976 int lu_site_init(struct lu_site *s, struct lu_device *top)
977 {
978 struct lu_site_bkt_data *bkt;
979 cfs_hash_bd_t bd;
980 char name[16];
981 int bits;
982 int i;
983 ENTRY;
984
985 memset(s, 0, sizeof *s);
986 bits = lu_htable_order();
987 snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
988 for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
989 bits >= LU_SITE_BITS_MIN; bits--) {
990 s->ls_obj_hash = cfs_hash_create(name, bits, bits,
991 bits - LU_SITE_BKT_BITS,
992 sizeof(*bkt), 0, 0,
993 &lu_site_hash_ops,
994 CFS_HASH_SPIN_BKTLOCK |
995 CFS_HASH_NO_ITEMREF |
996 CFS_HASH_DEPTH |
997 CFS_HASH_ASSERT_EMPTY);
998 if (s->ls_obj_hash != NULL)
999 break;
1000 }
1001
1002 if (s->ls_obj_hash == NULL) {
1003 CERROR("failed to create lu_site hash with bits: %d\n", bits);
1004 return -ENOMEM;
1005 }
1006
1007 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
1008 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
1009 INIT_LIST_HEAD(&bkt->lsb_lru);
1010 init_waitqueue_head(&bkt->lsb_marche_funebre);
1011 }
1012
1013 s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
1014 if (s->ls_stats == NULL) {
1015 cfs_hash_putref(s->ls_obj_hash);
1016 s->ls_obj_hash = NULL;
1017 return -ENOMEM;
1018 }
1019
1020 lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
1021 0, "created", "created");
1022 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
1023 0, "cache_hit", "cache_hit");
1024 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
1025 0, "cache_miss", "cache_miss");
1026 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
1027 0, "cache_race", "cache_race");
1028 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
1029 0, "cache_death_race", "cache_death_race");
1030 lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
1031 0, "lru_purged", "lru_purged");
1032
1033 INIT_LIST_HEAD(&s->ls_linkage);
1034 s->ls_top_dev = top;
1035 top->ld_site = s;
1036 lu_device_get(top);
1037 lu_ref_add(&top->ld_reference, "site-top", s);
1038
1039 INIT_LIST_HEAD(&s->ls_ld_linkage);
1040 spin_lock_init(&s->ls_ld_lock);
1041
1042 lu_dev_add_linkage(s, top);
1043
1044 RETURN(0);
1045 }
1046 EXPORT_SYMBOL(lu_site_init);
1047
1048 /**
1049 * Finalize \a s and release its resources.
1050 */
1051 void lu_site_fini(struct lu_site *s)
1052 {
1053 mutex_lock(&lu_sites_guard);
1054 list_del_init(&s->ls_linkage);
1055 mutex_unlock(&lu_sites_guard);
1056
1057 if (s->ls_obj_hash != NULL) {
1058 cfs_hash_putref(s->ls_obj_hash);
1059 s->ls_obj_hash = NULL;
1060 }
1061
1062 if (s->ls_top_dev != NULL) {
1063 s->ls_top_dev->ld_site = NULL;
1064 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
1065 lu_device_put(s->ls_top_dev);
1066 s->ls_top_dev = NULL;
1067 }
1068
1069 if (s->ls_stats != NULL)
1070 lprocfs_free_stats(&s->ls_stats);
1071 }
1072 EXPORT_SYMBOL(lu_site_fini);
1073
1074 /**
1075 * Called when initialization of stack for this site is completed.
1076 */
1077 int lu_site_init_finish(struct lu_site *s)
1078 {
1079 int result;
1080 mutex_lock(&lu_sites_guard);
1081 result = lu_context_refill(&lu_shrink_env.le_ctx);
1082 if (result == 0)
1083 list_add(&s->ls_linkage, &lu_sites);
1084 mutex_unlock(&lu_sites_guard);
1085 return result;
1086 }
1087 EXPORT_SYMBOL(lu_site_init_finish);
1088
1089 /**
1090 * Acquire additional reference on device \a d
1091 */
1092 void lu_device_get(struct lu_device *d)
1093 {
1094 atomic_inc(&d->ld_ref);
1095 }
1096 EXPORT_SYMBOL(lu_device_get);
1097
1098 /**
1099 * Release reference on device \a d.
1100 */
1101 void lu_device_put(struct lu_device *d)
1102 {
1103 LASSERT(atomic_read(&d->ld_ref) > 0);
1104 atomic_dec(&d->ld_ref);
1105 }
1106 EXPORT_SYMBOL(lu_device_put);
1107
1108 /**
1109 * Initialize device \a d of type \a t.
1110 */
1111 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
1112 {
1113 if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
1114 t->ldt_ops->ldto_start(t);
1115 memset(d, 0, sizeof *d);
1116 atomic_set(&d->ld_ref, 0);
1117 d->ld_type = t;
1118 lu_ref_init(&d->ld_reference);
1119 INIT_LIST_HEAD(&d->ld_linkage);
1120 return 0;
1121 }
1122 EXPORT_SYMBOL(lu_device_init);
1123
1124 /**
1125 * Finalize device \a d.
1126 */
1127 void lu_device_fini(struct lu_device *d)
1128 {
1129 struct lu_device_type *t;
1130
1131 t = d->ld_type;
1132 if (d->ld_obd != NULL) {
1133 d->ld_obd->obd_lu_dev = NULL;
1134 d->ld_obd = NULL;
1135 }
1136
1137 lu_ref_fini(&d->ld_reference);
1138 LASSERTF(atomic_read(&d->ld_ref) == 0,
1139 "Refcount is %u\n", atomic_read(&d->ld_ref));
1140 LASSERT(t->ldt_device_nr > 0);
1141 if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
1142 t->ldt_ops->ldto_stop(t);
1143 }
1144 EXPORT_SYMBOL(lu_device_fini);
1145
1146 /**
1147 * Initialize object \a o that is part of compound object \a h and was created
1148 * by device \a d.
1149 */
1150 int lu_object_init(struct lu_object *o, struct lu_object_header *h,
1151 struct lu_device *d)
1152 {
1153 memset(o, 0, sizeof(*o));
1154 o->lo_header = h;
1155 o->lo_dev = d;
1156 lu_device_get(d);
1157 lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o);
1158 INIT_LIST_HEAD(&o->lo_linkage);
1159
1160 return 0;
1161 }
1162 EXPORT_SYMBOL(lu_object_init);
1163
1164 /**
1165 * Finalize object and release its resources.
1166 */
1167 void lu_object_fini(struct lu_object *o)
1168 {
1169 struct lu_device *dev = o->lo_dev;
1170
1171 LASSERT(list_empty(&o->lo_linkage));
1172
1173 if (dev != NULL) {
1174 lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref,
1175 "lu_object", o);
1176 lu_device_put(dev);
1177 o->lo_dev = NULL;
1178 }
1179 }
1180 EXPORT_SYMBOL(lu_object_fini);
1181
1182 /**
1183 * Add object \a o as first layer of compound object \a h
1184 *
1185 * This is typically called by the ->ldo_object_alloc() method of top-level
1186 * device.
1187 */
1188 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
1189 {
1190 list_move(&o->lo_linkage, &h->loh_layers);
1191 }
1192 EXPORT_SYMBOL(lu_object_add_top);
1193
1194 /**
1195 * Add object \a o as a layer of compound object, going after \a before.
1196 *
1197 * This is typically called by the ->ldo_object_alloc() method of \a
1198 * before->lo_dev.
1199 */
1200 void lu_object_add(struct lu_object *before, struct lu_object *o)
1201 {
1202 list_move(&o->lo_linkage, &before->lo_linkage);
1203 }
1204 EXPORT_SYMBOL(lu_object_add);
1205
1206 /**
1207 * Initialize compound object.
1208 */
1209 int lu_object_header_init(struct lu_object_header *h)
1210 {
1211 memset(h, 0, sizeof *h);
1212 atomic_set(&h->loh_ref, 1);
1213 INIT_HLIST_NODE(&h->loh_hash);
1214 INIT_LIST_HEAD(&h->loh_lru);
1215 INIT_LIST_HEAD(&h->loh_layers);
1216 lu_ref_init(&h->loh_reference);
1217 return 0;
1218 }
1219 EXPORT_SYMBOL(lu_object_header_init);
1220
1221 /**
1222 * Finalize compound object.
1223 */
1224 void lu_object_header_fini(struct lu_object_header *h)
1225 {
1226 LASSERT(list_empty(&h->loh_layers));
1227 LASSERT(list_empty(&h->loh_lru));
1228 LASSERT(hlist_unhashed(&h->loh_hash));
1229 lu_ref_fini(&h->loh_reference);
1230 }
1231 EXPORT_SYMBOL(lu_object_header_fini);
1232
1233 /**
1234 * Given a compound object, find its slice, corresponding to the device type
1235 * \a dtype.
1236 */
1237 struct lu_object *lu_object_locate(struct lu_object_header *h,
1238 const struct lu_device_type *dtype)
1239 {
1240 struct lu_object *o;
1241
1242 list_for_each_entry(o, &h->loh_layers, lo_linkage) {
1243 if (o->lo_dev->ld_type == dtype)
1244 return o;
1245 }
1246 return NULL;
1247 }
1248 EXPORT_SYMBOL(lu_object_locate);
1249
1250
1251
1252 /**
1253 * Finalize and free devices in the device stack.
1254 *
1255 * Finalize device stack by purging object cache, and calling
1256 * lu_device_type_operations::ldto_device_fini() and
1257 * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1258 */
1259 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
1260 {
1261 struct lu_site *site = top->ld_site;
1262 struct lu_device *scan;
1263 struct lu_device *next;
1264
1265 lu_site_purge(env, site, ~0);
1266 for (scan = top; scan != NULL; scan = next) {
1267 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
1268 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
1269 lu_device_put(scan);
1270 }
1271
1272 /* purge again. */
1273 lu_site_purge(env, site, ~0);
1274
1275 for (scan = top; scan != NULL; scan = next) {
1276 const struct lu_device_type *ldt = scan->ld_type;
1277 struct obd_type *type;
1278
1279 next = ldt->ldt_ops->ldto_device_free(env, scan);
1280 type = ldt->ldt_obd_type;
1281 if (type != NULL) {
1282 type->typ_refcnt--;
1283 class_put_type(type);
1284 }
1285 }
1286 }
1287 EXPORT_SYMBOL(lu_stack_fini);
1288
1289 enum {
1290 /**
1291 * Maximal number of tld slots.
1292 */
1293 LU_CONTEXT_KEY_NR = 40
1294 };
1295
1296 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1297
1298 static DEFINE_SPINLOCK(lu_keys_guard);
1299
1300 /**
1301 * Global counter incremented whenever key is registered, unregistered,
1302 * revived or quiesced. This is used to void unnecessary calls to
1303 * lu_context_refill(). No locking is provided, as initialization and shutdown
1304 * are supposed to be externally serialized.
1305 */
1306 static unsigned key_set_version = 0;
1307
1308 /**
1309 * Register new key.
1310 */
1311 int lu_context_key_register(struct lu_context_key *key)
1312 {
1313 int result;
1314 int i;
1315
1316 LASSERT(key->lct_init != NULL);
1317 LASSERT(key->lct_fini != NULL);
1318 LASSERT(key->lct_tags != 0);
1319
1320 result = -ENFILE;
1321 spin_lock(&lu_keys_guard);
1322 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1323 if (lu_keys[i] == NULL) {
1324 key->lct_index = i;
1325 atomic_set(&key->lct_used, 1);
1326 lu_keys[i] = key;
1327 lu_ref_init(&key->lct_reference);
1328 result = 0;
1329 ++key_set_version;
1330 break;
1331 }
1332 }
1333 spin_unlock(&lu_keys_guard);
1334 return result;
1335 }
1336 EXPORT_SYMBOL(lu_context_key_register);
1337
1338 static void key_fini(struct lu_context *ctx, int index)
1339 {
1340 if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1341 struct lu_context_key *key;
1342
1343 key = lu_keys[index];
1344 LASSERT(key != NULL);
1345 LASSERT(key->lct_fini != NULL);
1346 LASSERT(atomic_read(&key->lct_used) > 1);
1347
1348 key->lct_fini(ctx, key, ctx->lc_value[index]);
1349 lu_ref_del(&key->lct_reference, "ctx", ctx);
1350 atomic_dec(&key->lct_used);
1351
1352 if ((ctx->lc_tags & LCT_NOREF) == 0) {
1353 #ifdef CONFIG_MODULE_UNLOAD
1354 LINVRNT(module_refcount(key->lct_owner) > 0);
1355 #endif
1356 module_put(key->lct_owner);
1357 }
1358 ctx->lc_value[index] = NULL;
1359 }
1360 }
1361
1362 /**
1363 * Deregister key.
1364 */
1365 void lu_context_key_degister(struct lu_context_key *key)
1366 {
1367 LASSERT(atomic_read(&key->lct_used) >= 1);
1368 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1369
1370 lu_context_key_quiesce(key);
1371
1372 ++key_set_version;
1373 spin_lock(&lu_keys_guard);
1374 key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1375 if (lu_keys[key->lct_index]) {
1376 lu_keys[key->lct_index] = NULL;
1377 lu_ref_fini(&key->lct_reference);
1378 }
1379 spin_unlock(&lu_keys_guard);
1380
1381 LASSERTF(atomic_read(&key->lct_used) == 1,
1382 "key has instances: %d\n",
1383 atomic_read(&key->lct_used));
1384 }
1385 EXPORT_SYMBOL(lu_context_key_degister);
1386
1387 /**
1388 * Register a number of keys. This has to be called after all keys have been
1389 * initialized by a call to LU_CONTEXT_KEY_INIT().
1390 */
1391 int lu_context_key_register_many(struct lu_context_key *k, ...)
1392 {
1393 struct lu_context_key *key = k;
1394 va_list args;
1395 int result;
1396
1397 va_start(args, k);
1398 do {
1399 result = lu_context_key_register(key);
1400 if (result)
1401 break;
1402 key = va_arg(args, struct lu_context_key *);
1403 } while (key != NULL);
1404 va_end(args);
1405
1406 if (result != 0) {
1407 va_start(args, k);
1408 while (k != key) {
1409 lu_context_key_degister(k);
1410 k = va_arg(args, struct lu_context_key *);
1411 }
1412 va_end(args);
1413 }
1414
1415 return result;
1416 }
1417 EXPORT_SYMBOL(lu_context_key_register_many);
1418
1419 /**
1420 * De-register a number of keys. This is a dual to
1421 * lu_context_key_register_many().
1422 */
1423 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1424 {
1425 va_list args;
1426
1427 va_start(args, k);
1428 do {
1429 lu_context_key_degister(k);
1430 k = va_arg(args, struct lu_context_key*);
1431 } while (k != NULL);
1432 va_end(args);
1433 }
1434 EXPORT_SYMBOL(lu_context_key_degister_many);
1435
1436 /**
1437 * Revive a number of keys.
1438 */
1439 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1440 {
1441 va_list args;
1442
1443 va_start(args, k);
1444 do {
1445 lu_context_key_revive(k);
1446 k = va_arg(args, struct lu_context_key*);
1447 } while (k != NULL);
1448 va_end(args);
1449 }
1450 EXPORT_SYMBOL(lu_context_key_revive_many);
1451
1452 /**
1453 * Quiescent a number of keys.
1454 */
1455 void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1456 {
1457 va_list args;
1458
1459 va_start(args, k);
1460 do {
1461 lu_context_key_quiesce(k);
1462 k = va_arg(args, struct lu_context_key*);
1463 } while (k != NULL);
1464 va_end(args);
1465 }
1466 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1467
1468 /**
1469 * Return value associated with key \a key in context \a ctx.
1470 */
1471 void *lu_context_key_get(const struct lu_context *ctx,
1472 const struct lu_context_key *key)
1473 {
1474 LINVRNT(ctx->lc_state == LCS_ENTERED);
1475 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1476 LASSERT(lu_keys[key->lct_index] == key);
1477 return ctx->lc_value[key->lct_index];
1478 }
1479 EXPORT_SYMBOL(lu_context_key_get);
1480
1481 /**
1482 * List of remembered contexts. XXX document me.
1483 */
1484 static LIST_HEAD(lu_context_remembered);
1485
1486 /**
1487 * Destroy \a key in all remembered contexts. This is used to destroy key
1488 * values in "shared" contexts (like service threads), when a module owning
1489 * the key is about to be unloaded.
1490 */
1491 void lu_context_key_quiesce(struct lu_context_key *key)
1492 {
1493 struct lu_context *ctx;
1494
1495 if (!(key->lct_tags & LCT_QUIESCENT)) {
1496 /*
1497 * XXX layering violation.
1498 */
1499 key->lct_tags |= LCT_QUIESCENT;
1500 /*
1501 * XXX memory barrier has to go here.
1502 */
1503 spin_lock(&lu_keys_guard);
1504 list_for_each_entry(ctx, &lu_context_remembered,
1505 lc_remember)
1506 key_fini(ctx, key->lct_index);
1507 spin_unlock(&lu_keys_guard);
1508 ++key_set_version;
1509 }
1510 }
1511 EXPORT_SYMBOL(lu_context_key_quiesce);
1512
1513 void lu_context_key_revive(struct lu_context_key *key)
1514 {
1515 key->lct_tags &= ~LCT_QUIESCENT;
1516 ++key_set_version;
1517 }
1518 EXPORT_SYMBOL(lu_context_key_revive);
1519
1520 static void keys_fini(struct lu_context *ctx)
1521 {
1522 int i;
1523
1524 if (ctx->lc_value == NULL)
1525 return;
1526
1527 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1528 key_fini(ctx, i);
1529
1530 OBD_FREE(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1531 ctx->lc_value = NULL;
1532 }
1533
1534 static int keys_fill(struct lu_context *ctx)
1535 {
1536 int i;
1537
1538 LINVRNT(ctx->lc_value != NULL);
1539 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1540 struct lu_context_key *key;
1541
1542 key = lu_keys[i];
1543 if (ctx->lc_value[i] == NULL && key != NULL &&
1544 (key->lct_tags & ctx->lc_tags) &&
1545 /*
1546 * Don't create values for a LCT_QUIESCENT key, as this
1547 * will pin module owning a key.
1548 */
1549 !(key->lct_tags & LCT_QUIESCENT)) {
1550 void *value;
1551
1552 LINVRNT(key->lct_init != NULL);
1553 LINVRNT(key->lct_index == i);
1554
1555 value = key->lct_init(ctx, key);
1556 if (unlikely(IS_ERR(value)))
1557 return PTR_ERR(value);
1558
1559 if (!(ctx->lc_tags & LCT_NOREF))
1560 try_module_get(key->lct_owner);
1561 lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1562 atomic_inc(&key->lct_used);
1563 /*
1564 * This is the only place in the code, where an
1565 * element of ctx->lc_value[] array is set to non-NULL
1566 * value.
1567 */
1568 ctx->lc_value[i] = value;
1569 if (key->lct_exit != NULL)
1570 ctx->lc_tags |= LCT_HAS_EXIT;
1571 }
1572 ctx->lc_version = key_set_version;
1573 }
1574 return 0;
1575 }
1576
1577 static int keys_init(struct lu_context *ctx)
1578 {
1579 OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1580 if (likely(ctx->lc_value != NULL))
1581 return keys_fill(ctx);
1582
1583 return -ENOMEM;
1584 }
1585
1586 /**
1587 * Initialize context data-structure. Create values for all keys.
1588 */
1589 int lu_context_init(struct lu_context *ctx, __u32 tags)
1590 {
1591 int rc;
1592
1593 memset(ctx, 0, sizeof *ctx);
1594 ctx->lc_state = LCS_INITIALIZED;
1595 ctx->lc_tags = tags;
1596 if (tags & LCT_REMEMBER) {
1597 spin_lock(&lu_keys_guard);
1598 list_add(&ctx->lc_remember, &lu_context_remembered);
1599 spin_unlock(&lu_keys_guard);
1600 } else {
1601 INIT_LIST_HEAD(&ctx->lc_remember);
1602 }
1603
1604 rc = keys_init(ctx);
1605 if (rc != 0)
1606 lu_context_fini(ctx);
1607
1608 return rc;
1609 }
1610 EXPORT_SYMBOL(lu_context_init);
1611
1612 /**
1613 * Finalize context data-structure. Destroy key values.
1614 */
1615 void lu_context_fini(struct lu_context *ctx)
1616 {
1617 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1618 ctx->lc_state = LCS_FINALIZED;
1619
1620 if ((ctx->lc_tags & LCT_REMEMBER) == 0) {
1621 LASSERT(list_empty(&ctx->lc_remember));
1622 keys_fini(ctx);
1623
1624 } else { /* could race with key degister */
1625 spin_lock(&lu_keys_guard);
1626 keys_fini(ctx);
1627 list_del_init(&ctx->lc_remember);
1628 spin_unlock(&lu_keys_guard);
1629 }
1630 }
1631 EXPORT_SYMBOL(lu_context_fini);
1632
1633 /**
1634 * Called before entering context.
1635 */
1636 void lu_context_enter(struct lu_context *ctx)
1637 {
1638 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1639 ctx->lc_state = LCS_ENTERED;
1640 }
1641 EXPORT_SYMBOL(lu_context_enter);
1642
1643 /**
1644 * Called after exiting from \a ctx
1645 */
1646 void lu_context_exit(struct lu_context *ctx)
1647 {
1648 int i;
1649
1650 LINVRNT(ctx->lc_state == LCS_ENTERED);
1651 ctx->lc_state = LCS_LEFT;
1652 if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1653 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1654 if (ctx->lc_value[i] != NULL) {
1655 struct lu_context_key *key;
1656
1657 key = lu_keys[i];
1658 LASSERT(key != NULL);
1659 if (key->lct_exit != NULL)
1660 key->lct_exit(ctx,
1661 key, ctx->lc_value[i]);
1662 }
1663 }
1664 }
1665 }
1666 EXPORT_SYMBOL(lu_context_exit);
1667
1668 /**
1669 * Allocate for context all missing keys that were registered after context
1670 * creation. key_set_version is only changed in rare cases when modules
1671 * are loaded and removed.
1672 */
1673 int lu_context_refill(struct lu_context *ctx)
1674 {
1675 return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx);
1676 }
1677 EXPORT_SYMBOL(lu_context_refill);
1678
1679 /**
1680 * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1681 * obd being added. Currently, this is only used on client side, specifically
1682 * for echo device client, for other stack (like ptlrpc threads), context are
1683 * predefined when the lu_device type are registered, during the module probe
1684 * phase.
1685 */
1686 __u32 lu_context_tags_default = 0;
1687 __u32 lu_session_tags_default = 0;
1688
1689 void lu_context_tags_update(__u32 tags)
1690 {
1691 spin_lock(&lu_keys_guard);
1692 lu_context_tags_default |= tags;
1693 key_set_version++;
1694 spin_unlock(&lu_keys_guard);
1695 }
1696 EXPORT_SYMBOL(lu_context_tags_update);
1697
1698 void lu_context_tags_clear(__u32 tags)
1699 {
1700 spin_lock(&lu_keys_guard);
1701 lu_context_tags_default &= ~tags;
1702 key_set_version++;
1703 spin_unlock(&lu_keys_guard);
1704 }
1705 EXPORT_SYMBOL(lu_context_tags_clear);
1706
1707 void lu_session_tags_update(__u32 tags)
1708 {
1709 spin_lock(&lu_keys_guard);
1710 lu_session_tags_default |= tags;
1711 key_set_version++;
1712 spin_unlock(&lu_keys_guard);
1713 }
1714 EXPORT_SYMBOL(lu_session_tags_update);
1715
1716 void lu_session_tags_clear(__u32 tags)
1717 {
1718 spin_lock(&lu_keys_guard);
1719 lu_session_tags_default &= ~tags;
1720 key_set_version++;
1721 spin_unlock(&lu_keys_guard);
1722 }
1723 EXPORT_SYMBOL(lu_session_tags_clear);
1724
1725 int lu_env_init(struct lu_env *env, __u32 tags)
1726 {
1727 int result;
1728
1729 env->le_ses = NULL;
1730 result = lu_context_init(&env->le_ctx, tags);
1731 if (likely(result == 0))
1732 lu_context_enter(&env->le_ctx);
1733 return result;
1734 }
1735 EXPORT_SYMBOL(lu_env_init);
1736
1737 void lu_env_fini(struct lu_env *env)
1738 {
1739 lu_context_exit(&env->le_ctx);
1740 lu_context_fini(&env->le_ctx);
1741 env->le_ses = NULL;
1742 }
1743 EXPORT_SYMBOL(lu_env_fini);
1744
1745 int lu_env_refill(struct lu_env *env)
1746 {
1747 int result;
1748
1749 result = lu_context_refill(&env->le_ctx);
1750 if (result == 0 && env->le_ses != NULL)
1751 result = lu_context_refill(env->le_ses);
1752 return result;
1753 }
1754 EXPORT_SYMBOL(lu_env_refill);
1755
1756 /**
1757 * Currently, this API will only be used by echo client.
1758 * Because echo client and normal lustre client will share
1759 * same cl_env cache. So echo client needs to refresh
1760 * the env context after it get one from the cache, especially
1761 * when normal client and echo client co-exist in the same client.
1762 */
1763 int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
1764 __u32 stags)
1765 {
1766 int result;
1767
1768 if ((env->le_ctx.lc_tags & ctags) != ctags) {
1769 env->le_ctx.lc_version = 0;
1770 env->le_ctx.lc_tags |= ctags;
1771 }
1772
1773 if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) {
1774 env->le_ses->lc_version = 0;
1775 env->le_ses->lc_tags |= stags;
1776 }
1777
1778 result = lu_env_refill(env);
1779
1780 return result;
1781 }
1782 EXPORT_SYMBOL(lu_env_refill_by_tags);
1783
1784 static struct shrinker *lu_site_shrinker = NULL;
1785
1786 typedef struct lu_site_stats{
1787 unsigned lss_populated;
1788 unsigned lss_max_search;
1789 unsigned lss_total;
1790 unsigned lss_busy;
1791 } lu_site_stats_t;
1792
1793 static void lu_site_stats_get(cfs_hash_t *hs,
1794 lu_site_stats_t *stats, int populated)
1795 {
1796 cfs_hash_bd_t bd;
1797 int i;
1798
1799 cfs_hash_for_each_bucket(hs, &bd, i) {
1800 struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
1801 struct hlist_head *hhead;
1802
1803 cfs_hash_bd_lock(hs, &bd, 1);
1804 stats->lss_busy += bkt->lsb_busy;
1805 stats->lss_total += cfs_hash_bd_count_get(&bd);
1806 stats->lss_max_search = max((int)stats->lss_max_search,
1807 cfs_hash_bd_depmax_get(&bd));
1808 if (!populated) {
1809 cfs_hash_bd_unlock(hs, &bd, 1);
1810 continue;
1811 }
1812
1813 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1814 if (!hlist_empty(hhead))
1815 stats->lss_populated++;
1816 }
1817 cfs_hash_bd_unlock(hs, &bd, 1);
1818 }
1819 }
1820
1821
1822 /*
1823 * There exists a potential lock inversion deadlock scenario when using
1824 * Lustre on top of ZFS. This occurs between one of ZFS's
1825 * buf_hash_table.ht_lock's, and Lustre's lu_sites_guard lock. Essentially,
1826 * thread A will take the lu_sites_guard lock and sleep on the ht_lock,
1827 * while thread B will take the ht_lock and sleep on the lu_sites_guard
1828 * lock. Obviously neither thread will wake and drop their respective hold
1829 * on their lock.
1830 *
1831 * To prevent this from happening we must ensure the lu_sites_guard lock is
1832 * not taken while down this code path. ZFS reliably does not set the
1833 * __GFP_FS bit in its code paths, so this can be used to determine if it
1834 * is safe to take the lu_sites_guard lock.
1835 *
1836 * Ideally we should accurately return the remaining number of cached
1837 * objects without taking the lu_sites_guard lock, but this is not
1838 * possible in the current implementation.
1839 */
1840 static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1841 {
1842 lu_site_stats_t stats;
1843 struct lu_site *s;
1844 struct lu_site *tmp;
1845 int cached = 0;
1846 int remain = shrink_param(sc, nr_to_scan);
1847 LIST_HEAD(splice);
1848
1849 if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) {
1850 if (remain != 0)
1851 return -1;
1852 else
1853 /* We must not take the lu_sites_guard lock when
1854 * __GFP_FS is *not* set because of the deadlock
1855 * possibility detailed above. Additionally,
1856 * since we cannot determine the number of
1857 * objects in the cache without taking this
1858 * lock, we're in a particularly tough spot. As
1859 * a result, we'll just lie and say our cache is
1860 * empty. This _should_ be ok, as we can't
1861 * reclaim objects when __GFP_FS is *not* set
1862 * anyways.
1863 */
1864 return 0;
1865 }
1866
1867 CDEBUG(D_INODE, "Shrink %d objects\n", remain);
1868
1869 mutex_lock(&lu_sites_guard);
1870 list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1871 if (shrink_param(sc, nr_to_scan) != 0) {
1872 remain = lu_site_purge(&lu_shrink_env, s, remain);
1873 /*
1874 * Move just shrunk site to the tail of site list to
1875 * assure shrinking fairness.
1876 */
1877 list_move_tail(&s->ls_linkage, &splice);
1878 }
1879
1880 memset(&stats, 0, sizeof(stats));
1881 lu_site_stats_get(s->ls_obj_hash, &stats, 0);
1882 cached += stats.lss_total - stats.lss_busy;
1883 if (shrink_param(sc, nr_to_scan) && remain <= 0)
1884 break;
1885 }
1886 list_splice(&splice, lu_sites.prev);
1887 mutex_unlock(&lu_sites_guard);
1888
1889 cached = (cached / 100) * sysctl_vfs_cache_pressure;
1890 if (shrink_param(sc, nr_to_scan) == 0)
1891 CDEBUG(D_INODE, "%d objects cached\n", cached);
1892 return cached;
1893 }
1894
1895 /*
1896 * Debugging stuff.
1897 */
1898
1899 /**
1900 * Environment to be used in debugger, contains all tags.
1901 */
1902 struct lu_env lu_debugging_env;
1903
1904 /**
1905 * Debugging printer function using printk().
1906 */
1907 int lu_printk_printer(const struct lu_env *env,
1908 void *unused, const char *format, ...)
1909 {
1910 va_list args;
1911
1912 va_start(args, format);
1913 vprintk(format, args);
1914 va_end(args);
1915 return 0;
1916 }
1917
1918 /**
1919 * Initialization of global lu_* data.
1920 */
1921 int lu_global_init(void)
1922 {
1923 int result;
1924
1925 CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
1926
1927 result = lu_ref_global_init();
1928 if (result != 0)
1929 return result;
1930
1931 LU_CONTEXT_KEY_INIT(&lu_global_key);
1932 result = lu_context_key_register(&lu_global_key);
1933 if (result != 0)
1934 return result;
1935
1936 /*
1937 * At this level, we don't know what tags are needed, so allocate them
1938 * conservatively. This should not be too bad, because this
1939 * environment is global.
1940 */
1941 mutex_lock(&lu_sites_guard);
1942 result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
1943 mutex_unlock(&lu_sites_guard);
1944 if (result != 0)
1945 return result;
1946
1947 /*
1948 * seeks estimation: 3 seeks to read a record from oi, one to read
1949 * inode, one for ea. Unfortunately setting this high value results in
1950 * lu_object/inode cache consuming all the memory.
1951 */
1952 lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink);
1953 if (lu_site_shrinker == NULL)
1954 return -ENOMEM;
1955
1956 return result;
1957 }
1958
1959 /**
1960 * Dual to lu_global_init().
1961 */
1962 void lu_global_fini(void)
1963 {
1964 if (lu_site_shrinker != NULL) {
1965 remove_shrinker(lu_site_shrinker);
1966 lu_site_shrinker = NULL;
1967 }
1968
1969 lu_context_key_degister(&lu_global_key);
1970
1971 /*
1972 * Tear shrinker environment down _after_ de-registering
1973 * lu_global_key, because the latter has a value in the former.
1974 */
1975 mutex_lock(&lu_sites_guard);
1976 lu_env_fini(&lu_shrink_env);
1977 mutex_unlock(&lu_sites_guard);
1978
1979 lu_ref_global_fini();
1980 }
1981
1982 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
1983 {
1984 #ifdef LPROCFS
1985 struct lprocfs_counter ret;
1986
1987 lprocfs_stats_collect(stats, idx, &ret);
1988 return (__u32)ret.lc_count;
1989 #else
1990 return 0;
1991 #endif
1992 }
1993
1994 /**
1995 * Output site statistical counters into a buffer. Suitable for
1996 * lprocfs_rd_*()-style functions.
1997 */
1998 int lu_site_stats_print(const struct lu_site *s, struct seq_file *m)
1999 {
2000 lu_site_stats_t stats;
2001
2002 memset(&stats, 0, sizeof(stats));
2003 lu_site_stats_get(s->ls_obj_hash, &stats, 1);
2004
2005 return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
2006 stats.lss_busy,
2007 stats.lss_total,
2008 stats.lss_populated,
2009 CFS_HASH_NHLIST(s->ls_obj_hash),
2010 stats.lss_max_search,
2011 ls_stats_read(s->ls_stats, LU_SS_CREATED),
2012 ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
2013 ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
2014 ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
2015 ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
2016 ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
2017 }
2018 EXPORT_SYMBOL(lu_site_stats_print);
2019
2020 /**
2021 * Helper function to initialize a number of kmem slab caches at once.
2022 */
2023 int lu_kmem_init(struct lu_kmem_descr *caches)
2024 {
2025 int result;
2026 struct lu_kmem_descr *iter = caches;
2027
2028 for (result = 0; iter->ckd_cache != NULL; ++iter) {
2029 *iter->ckd_cache = kmem_cache_create(iter->ckd_name,
2030 iter->ckd_size,
2031 0, 0, NULL);
2032 if (*iter->ckd_cache == NULL) {
2033 result = -ENOMEM;
2034 /* free all previously allocated caches */
2035 lu_kmem_fini(caches);
2036 break;
2037 }
2038 }
2039 return result;
2040 }
2041 EXPORT_SYMBOL(lu_kmem_init);
2042
2043 /**
2044 * Helper function to finalize a number of kmem slab cached at once. Dual to
2045 * lu_kmem_init().
2046 */
2047 void lu_kmem_fini(struct lu_kmem_descr *caches)
2048 {
2049 for (; caches->ckd_cache != NULL; ++caches) {
2050 if (*caches->ckd_cache != NULL) {
2051 kmem_cache_destroy(*caches->ckd_cache);
2052 *caches->ckd_cache = NULL;
2053 }
2054 }
2055 }
2056 EXPORT_SYMBOL(lu_kmem_fini);
2057
2058 /**
2059 * Temporary solution to be able to assign fid in ->do_create()
2060 * till we have fully-functional OST fids
2061 */
2062 void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
2063 const struct lu_fid *fid)
2064 {
2065 struct lu_site *s = o->lo_dev->ld_site;
2066 struct lu_fid *old = &o->lo_header->loh_fid;
2067 struct lu_site_bkt_data *bkt;
2068 struct lu_object *shadow;
2069 wait_queue_t waiter;
2070 cfs_hash_t *hs;
2071 cfs_hash_bd_t bd;
2072 __u64 version = 0;
2073
2074 LASSERT(fid_is_zero(old));
2075
2076 hs = s->ls_obj_hash;
2077 cfs_hash_bd_get_and_lock(hs, (void *)fid, &bd, 1);
2078 shadow = htable_lookup(s, &bd, fid, &waiter, &version);
2079 /* supposed to be unique */
2080 LASSERT(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT);
2081 *old = *fid;
2082 bkt = cfs_hash_bd_extra_get(hs, &bd);
2083 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
2084 bkt->lsb_busy++;
2085 cfs_hash_bd_unlock(hs, &bd, 1);
2086 }
2087 EXPORT_SYMBOL(lu_object_assign_fid);
2088
2089 /**
2090 * allocates object with 0 (non-assiged) fid
2091 * XXX: temporary solution to be able to assign fid in ->do_create()
2092 * till we have fully-functional OST fids
2093 */
2094 struct lu_object *lu_object_anon(const struct lu_env *env,
2095 struct lu_device *dev,
2096 const struct lu_object_conf *conf)
2097 {
2098 struct lu_fid fid;
2099 struct lu_object *o;
2100
2101 fid_zero(&fid);
2102 o = lu_object_alloc(env, dev, &fid, conf);
2103
2104 return o;
2105 }
2106 EXPORT_SYMBOL(lu_object_anon);
2107
2108 struct lu_buf LU_BUF_NULL = {
2109 .lb_buf = NULL,
2110 .lb_len = 0
2111 };
2112 EXPORT_SYMBOL(LU_BUF_NULL);
2113
2114 void lu_buf_free(struct lu_buf *buf)
2115 {
2116 LASSERT(buf);
2117 if (buf->lb_buf) {
2118 LASSERT(buf->lb_len > 0);
2119 OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2120 buf->lb_buf = NULL;
2121 buf->lb_len = 0;
2122 }
2123 }
2124 EXPORT_SYMBOL(lu_buf_free);
2125
2126 void lu_buf_alloc(struct lu_buf *buf, int size)
2127 {
2128 LASSERT(buf);
2129 LASSERT(buf->lb_buf == NULL);
2130 LASSERT(buf->lb_len == 0);
2131 OBD_ALLOC_LARGE(buf->lb_buf, size);
2132 if (likely(buf->lb_buf))
2133 buf->lb_len = size;
2134 }
2135 EXPORT_SYMBOL(lu_buf_alloc);
2136
2137 void lu_buf_realloc(struct lu_buf *buf, int size)
2138 {
2139 lu_buf_free(buf);
2140 lu_buf_alloc(buf, size);
2141 }
2142 EXPORT_SYMBOL(lu_buf_realloc);
2143
2144 struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, int len)
2145 {
2146 if (buf->lb_buf == NULL && buf->lb_len == 0)
2147 lu_buf_alloc(buf, len);
2148
2149 if ((len > buf->lb_len) && (buf->lb_buf != NULL))
2150 lu_buf_realloc(buf, len);
2151
2152 return buf;
2153 }
2154 EXPORT_SYMBOL(lu_buf_check_and_alloc);
2155
2156 /**
2157 * Increase the size of the \a buf.
2158 * preserves old data in buffer
2159 * old buffer remains unchanged on error
2160 * \retval 0 or -ENOMEM
2161 */
2162 int lu_buf_check_and_grow(struct lu_buf *buf, int len)
2163 {
2164 char *ptr;
2165
2166 if (len <= buf->lb_len)
2167 return 0;
2168
2169 OBD_ALLOC_LARGE(ptr, len);
2170 if (ptr == NULL)
2171 return -ENOMEM;
2172
2173 /* Free the old buf */
2174 if (buf->lb_buf != NULL) {
2175 memcpy(ptr, buf->lb_buf, buf->lb_len);
2176 OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2177 }
2178
2179 buf->lb_buf = ptr;
2180 buf->lb_len = len;
2181 return 0;
2182 }
2183 EXPORT_SYMBOL(lu_buf_check_and_grow);
This page took 0.078359 seconds and 5 git commands to generate.