Commit | Line | Data |
---|---|---|
d7e09d03 PT |
1 | /* |
2 | * GPL HEADER START | |
3 | * | |
4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 only, | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License version 2 for more details (a copy is included | |
14 | * in the LICENSE file that accompanied this code). | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * version 2 along with this program; If not, see | |
18 | * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf | |
19 | * | |
20 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
21 | * CA 95054 USA or visit www.sun.com if you need additional information or | |
22 | * have any questions. | |
23 | * | |
24 | * GPL HEADER END | |
25 | */ | |
26 | /* | |
27 | * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. | |
28 | * Use is subject to license terms. | |
29 | * | |
30 | * Copyright (c) 2011, 2012, Intel Corporation. | |
31 | */ | |
32 | /* | |
33 | * This file is part of Lustre, http://www.lustre.org/ | |
34 | * Lustre is a trademark of Sun Microsystems, Inc. | |
35 | * | |
36 | * lustre/obdclass/lu_object.c | |
37 | * | |
38 | * Lustre Object. | |
39 | * These are the only exported functions, they provide some generic | |
40 | * infrastructure for managing object devices | |
41 | * | |
42 | * Author: Nikita Danilov <nikita.danilov@sun.com> | |
43 | */ | |
44 | ||
45 | #define DEBUG_SUBSYSTEM S_CLASS | |
46 | ||
47 | #include <linux/libcfs/libcfs.h> | |
48 | ||
49 | # include <linux/module.h> | |
50 | ||
51 | /* hash_long() */ | |
52 | #include <linux/libcfs/libcfs_hash.h> | |
53 | #include <obd_class.h> | |
54 | #include <obd_support.h> | |
55 | #include <lustre_disk.h> | |
56 | #include <lustre_fid.h> | |
57 | #include <lu_object.h> | |
58 | #include <lu_ref.h> | |
59 | #include <linux/list.h> | |
60 | ||
61 | static void lu_object_free(const struct lu_env *env, struct lu_object *o); | |
62 | ||
63 | /** | |
64 | * Decrease reference counter on object. If last reference is freed, return | |
65 | * object to the cache, unless lu_object_is_dying(o) holds. In the latter | |
66 | * case, free object immediately. | |
67 | */ | |
68 | void lu_object_put(const struct lu_env *env, struct lu_object *o) | |
69 | { | |
70 | struct lu_site_bkt_data *bkt; | |
71 | struct lu_object_header *top; | |
72 | struct lu_site *site; | |
73 | struct lu_object *orig; | |
74 | cfs_hash_bd_t bd; | |
75 | const struct lu_fid *fid; | |
76 | ||
77 | top = o->lo_header; | |
78 | site = o->lo_dev->ld_site; | |
79 | orig = o; | |
80 | ||
81 | /* | |
82 | * till we have full fids-on-OST implemented anonymous objects | |
83 | * are possible in OSP. such an object isn't listed in the site | |
84 | * so we should not remove it from the site. | |
85 | */ | |
86 | fid = lu_object_fid(o); | |
87 | if (fid_is_zero(fid)) { | |
88 | LASSERT(top->loh_hash.next == NULL | |
89 | && top->loh_hash.pprev == NULL); | |
90 | LASSERT(list_empty(&top->loh_lru)); | |
91 | if (!atomic_dec_and_test(&top->loh_ref)) | |
92 | return; | |
93 | list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { | |
94 | if (o->lo_ops->loo_object_release != NULL) | |
95 | o->lo_ops->loo_object_release(env, o); | |
96 | } | |
97 | lu_object_free(env, orig); | |
98 | return; | |
99 | } | |
100 | ||
101 | cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd); | |
102 | bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd); | |
103 | ||
104 | if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) { | |
105 | if (lu_object_is_dying(top)) { | |
106 | ||
107 | /* | |
108 | * somebody may be waiting for this, currently only | |
109 | * used for cl_object, see cl_object_put_last(). | |
110 | */ | |
111 | wake_up_all(&bkt->lsb_marche_funebre); | |
112 | } | |
113 | return; | |
114 | } | |
115 | ||
116 | LASSERT(bkt->lsb_busy > 0); | |
117 | bkt->lsb_busy--; | |
118 | /* | |
119 | * When last reference is released, iterate over object | |
120 | * layers, and notify them that object is no longer busy. | |
121 | */ | |
122 | list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { | |
123 | if (o->lo_ops->loo_object_release != NULL) | |
124 | o->lo_ops->loo_object_release(env, o); | |
125 | } | |
126 | ||
127 | if (!lu_object_is_dying(top)) { | |
128 | LASSERT(list_empty(&top->loh_lru)); | |
129 | list_add_tail(&top->loh_lru, &bkt->lsb_lru); | |
130 | cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1); | |
131 | return; | |
132 | } | |
133 | ||
134 | /* | |
135 | * If object is dying (will not be cached), removed it | |
136 | * from hash table and LRU. | |
137 | * | |
138 | * This is done with hash table and LRU lists locked. As the only | |
139 | * way to acquire first reference to previously unreferenced | |
140 | * object is through hash-table lookup (lu_object_find()), | |
141 | * or LRU scanning (lu_site_purge()), that are done under hash-table | |
142 | * and LRU lock, no race with concurrent object lookup is possible | |
143 | * and we can safely destroy object below. | |
144 | */ | |
145 | if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) | |
146 | cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash); | |
147 | cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1); | |
148 | /* | |
149 | * Object was already removed from hash and lru above, can | |
150 | * kill it. | |
151 | */ | |
152 | lu_object_free(env, orig); | |
153 | } | |
154 | EXPORT_SYMBOL(lu_object_put); | |
155 | ||
156 | /** | |
157 | * Put object and don't keep in cache. This is temporary solution for | |
158 | * multi-site objects when its layering is not constant. | |
159 | */ | |
160 | void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o) | |
161 | { | |
162 | set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags); | |
163 | return lu_object_put(env, o); | |
164 | } | |
165 | EXPORT_SYMBOL(lu_object_put_nocache); | |
166 | ||
167 | /** | |
168 | * Kill the object and take it out of LRU cache. | |
169 | * Currently used by client code for layout change. | |
170 | */ | |
171 | void lu_object_unhash(const struct lu_env *env, struct lu_object *o) | |
172 | { | |
173 | struct lu_object_header *top; | |
174 | ||
175 | top = o->lo_header; | |
176 | set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags); | |
177 | if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) { | |
178 | cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash; | |
179 | cfs_hash_bd_t bd; | |
180 | ||
181 | cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1); | |
182 | list_del_init(&top->loh_lru); | |
183 | cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash); | |
184 | cfs_hash_bd_unlock(obj_hash, &bd, 1); | |
185 | } | |
186 | } | |
187 | EXPORT_SYMBOL(lu_object_unhash); | |
188 | ||
189 | /** | |
190 | * Allocate new object. | |
191 | * | |
192 | * This follows object creation protocol, described in the comment within | |
193 | * struct lu_device_operations definition. | |
194 | */ | |
195 | static struct lu_object *lu_object_alloc(const struct lu_env *env, | |
196 | struct lu_device *dev, | |
197 | const struct lu_fid *f, | |
198 | const struct lu_object_conf *conf) | |
199 | { | |
200 | struct lu_object *scan; | |
201 | struct lu_object *top; | |
202 | struct list_head *layers; | |
203 | int clean; | |
204 | int result; | |
205 | ENTRY; | |
206 | ||
207 | /* | |
208 | * Create top-level object slice. This will also create | |
209 | * lu_object_header. | |
210 | */ | |
211 | top = dev->ld_ops->ldo_object_alloc(env, NULL, dev); | |
212 | if (top == NULL) | |
213 | RETURN(ERR_PTR(-ENOMEM)); | |
214 | if (IS_ERR(top)) | |
215 | RETURN(top); | |
216 | /* | |
217 | * This is the only place where object fid is assigned. It's constant | |
218 | * after this point. | |
219 | */ | |
220 | top->lo_header->loh_fid = *f; | |
221 | layers = &top->lo_header->loh_layers; | |
222 | do { | |
223 | /* | |
224 | * Call ->loo_object_init() repeatedly, until no more new | |
225 | * object slices are created. | |
226 | */ | |
227 | clean = 1; | |
228 | list_for_each_entry(scan, layers, lo_linkage) { | |
229 | if (scan->lo_flags & LU_OBJECT_ALLOCATED) | |
230 | continue; | |
231 | clean = 0; | |
232 | scan->lo_header = top->lo_header; | |
233 | result = scan->lo_ops->loo_object_init(env, scan, conf); | |
234 | if (result != 0) { | |
235 | lu_object_free(env, top); | |
236 | RETURN(ERR_PTR(result)); | |
237 | } | |
238 | scan->lo_flags |= LU_OBJECT_ALLOCATED; | |
239 | } | |
240 | } while (!clean); | |
241 | ||
242 | list_for_each_entry_reverse(scan, layers, lo_linkage) { | |
243 | if (scan->lo_ops->loo_object_start != NULL) { | |
244 | result = scan->lo_ops->loo_object_start(env, scan); | |
245 | if (result != 0) { | |
246 | lu_object_free(env, top); | |
247 | RETURN(ERR_PTR(result)); | |
248 | } | |
249 | } | |
250 | } | |
251 | ||
252 | lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED); | |
253 | RETURN(top); | |
254 | } | |
255 | ||
256 | /** | |
257 | * Free an object. | |
258 | */ | |
259 | static void lu_object_free(const struct lu_env *env, struct lu_object *o) | |
260 | { | |
261 | struct lu_site_bkt_data *bkt; | |
262 | struct lu_site *site; | |
263 | struct lu_object *scan; | |
264 | struct list_head *layers; | |
265 | struct list_head splice; | |
266 | ||
267 | site = o->lo_dev->ld_site; | |
268 | layers = &o->lo_header->loh_layers; | |
269 | bkt = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid); | |
270 | /* | |
271 | * First call ->loo_object_delete() method to release all resources. | |
272 | */ | |
273 | list_for_each_entry_reverse(scan, layers, lo_linkage) { | |
274 | if (scan->lo_ops->loo_object_delete != NULL) | |
275 | scan->lo_ops->loo_object_delete(env, scan); | |
276 | } | |
277 | ||
278 | /* | |
279 | * Then, splice object layers into stand-alone list, and call | |
280 | * ->loo_object_free() on all layers to free memory. Splice is | |
281 | * necessary, because lu_object_header is freed together with the | |
282 | * top-level slice. | |
283 | */ | |
284 | INIT_LIST_HEAD(&splice); | |
285 | list_splice_init(layers, &splice); | |
286 | while (!list_empty(&splice)) { | |
287 | /* | |
288 | * Free layers in bottom-to-top order, so that object header | |
289 | * lives as long as possible and ->loo_object_free() methods | |
290 | * can look at its contents. | |
291 | */ | |
292 | o = container_of0(splice.prev, struct lu_object, lo_linkage); | |
293 | list_del_init(&o->lo_linkage); | |
294 | LASSERT(o->lo_ops->loo_object_free != NULL); | |
295 | o->lo_ops->loo_object_free(env, o); | |
296 | } | |
297 | ||
298 | if (waitqueue_active(&bkt->lsb_marche_funebre)) | |
299 | wake_up_all(&bkt->lsb_marche_funebre); | |
300 | } | |
301 | ||
302 | /** | |
303 | * Free \a nr objects from the cold end of the site LRU list. | |
304 | */ | |
305 | int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) | |
306 | { | |
307 | struct lu_object_header *h; | |
308 | struct lu_object_header *temp; | |
309 | struct lu_site_bkt_data *bkt; | |
310 | cfs_hash_bd_t bd; | |
311 | cfs_hash_bd_t bd2; | |
312 | struct list_head dispose; | |
313 | int did_sth; | |
314 | int start; | |
315 | int count; | |
316 | int bnr; | |
317 | int i; | |
318 | ||
319 | if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU)) | |
320 | RETURN(0); | |
321 | ||
322 | INIT_LIST_HEAD(&dispose); | |
323 | /* | |
324 | * Under LRU list lock, scan LRU list and move unreferenced objects to | |
325 | * the dispose list, removing them from LRU and hash table. | |
326 | */ | |
327 | start = s->ls_purge_start; | |
328 | bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1; | |
329 | again: | |
330 | did_sth = 0; | |
331 | cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) { | |
332 | if (i < start) | |
333 | continue; | |
334 | count = bnr; | |
335 | cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1); | |
336 | bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd); | |
337 | ||
338 | list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) { | |
339 | LASSERT(atomic_read(&h->loh_ref) == 0); | |
340 | ||
341 | cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2); | |
342 | LASSERT(bd.bd_bucket == bd2.bd_bucket); | |
343 | ||
344 | cfs_hash_bd_del_locked(s->ls_obj_hash, | |
345 | &bd2, &h->loh_hash); | |
346 | list_move(&h->loh_lru, &dispose); | |
347 | if (did_sth == 0) | |
348 | did_sth = 1; | |
349 | ||
350 | if (nr != ~0 && --nr == 0) | |
351 | break; | |
352 | ||
353 | if (count > 0 && --count == 0) | |
354 | break; | |
355 | ||
356 | } | |
357 | cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1); | |
358 | cond_resched(); | |
359 | /* | |
360 | * Free everything on the dispose list. This is safe against | |
361 | * races due to the reasons described in lu_object_put(). | |
362 | */ | |
363 | while (!list_empty(&dispose)) { | |
364 | h = container_of0(dispose.next, | |
365 | struct lu_object_header, loh_lru); | |
366 | list_del_init(&h->loh_lru); | |
367 | lu_object_free(env, lu_object_top(h)); | |
368 | lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED); | |
369 | } | |
370 | ||
371 | if (nr == 0) | |
372 | break; | |
373 | } | |
374 | ||
375 | if (nr != 0 && did_sth && start != 0) { | |
376 | start = 0; /* restart from the first bucket */ | |
377 | goto again; | |
378 | } | |
379 | /* race on s->ls_purge_start, but nobody cares */ | |
380 | s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash); | |
381 | ||
382 | return nr; | |
383 | } | |
384 | EXPORT_SYMBOL(lu_site_purge); | |
385 | ||
386 | /* | |
387 | * Object printing. | |
388 | * | |
389 | * Code below has to jump through certain loops to output object description | |
390 | * into libcfs_debug_msg-based log. The problem is that lu_object_print() | |
391 | * composes object description from strings that are parts of _lines_ of | |
392 | * output (i.e., strings that are not terminated by newline). This doesn't fit | |
393 | * very well into libcfs_debug_msg() interface that assumes that each message | |
394 | * supplied to it is a self-contained output line. | |
395 | * | |
396 | * To work around this, strings are collected in a temporary buffer | |
397 | * (implemented as a value of lu_cdebug_key key), until terminating newline | |
398 | * character is detected. | |
399 | * | |
400 | */ | |
401 | ||
402 | enum { | |
403 | /** | |
404 | * Maximal line size. | |
405 | * | |
406 | * XXX overflow is not handled correctly. | |
407 | */ | |
408 | LU_CDEBUG_LINE = 512 | |
409 | }; | |
410 | ||
411 | struct lu_cdebug_data { | |
412 | /** | |
413 | * Temporary buffer. | |
414 | */ | |
415 | char lck_area[LU_CDEBUG_LINE]; | |
416 | }; | |
417 | ||
418 | /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */ | |
419 | LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data); | |
420 | ||
421 | /** | |
422 | * Key, holding temporary buffer. This key is registered very early by | |
423 | * lu_global_init(). | |
424 | */ | |
425 | struct lu_context_key lu_global_key = { | |
426 | .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD | | |
427 | LCT_MG_THREAD | LCT_CL_THREAD, | |
428 | .lct_init = lu_global_key_init, | |
429 | .lct_fini = lu_global_key_fini | |
430 | }; | |
431 | ||
432 | /** | |
433 | * Printer function emitting messages through libcfs_debug_msg(). | |
434 | */ | |
435 | int lu_cdebug_printer(const struct lu_env *env, | |
436 | void *cookie, const char *format, ...) | |
437 | { | |
438 | struct libcfs_debug_msg_data *msgdata = cookie; | |
439 | struct lu_cdebug_data *key; | |
440 | int used; | |
441 | int complete; | |
442 | va_list args; | |
443 | ||
444 | va_start(args, format); | |
445 | ||
446 | key = lu_context_key_get(&env->le_ctx, &lu_global_key); | |
447 | LASSERT(key != NULL); | |
448 | ||
449 | used = strlen(key->lck_area); | |
450 | complete = format[strlen(format) - 1] == '\n'; | |
451 | /* | |
452 | * Append new chunk to the buffer. | |
453 | */ | |
454 | vsnprintf(key->lck_area + used, | |
455 | ARRAY_SIZE(key->lck_area) - used, format, args); | |
456 | if (complete) { | |
457 | if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys)) | |
458 | libcfs_debug_msg(msgdata, "%s", key->lck_area); | |
459 | key->lck_area[0] = 0; | |
460 | } | |
461 | va_end(args); | |
462 | return 0; | |
463 | } | |
464 | EXPORT_SYMBOL(lu_cdebug_printer); | |
465 | ||
466 | /** | |
467 | * Print object header. | |
468 | */ | |
469 | void lu_object_header_print(const struct lu_env *env, void *cookie, | |
470 | lu_printer_t printer, | |
471 | const struct lu_object_header *hdr) | |
472 | { | |
473 | (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]", | |
474 | hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref), | |
475 | PFID(&hdr->loh_fid), | |
476 | hlist_unhashed(&hdr->loh_hash) ? "" : " hash", | |
477 | list_empty((struct list_head *)&hdr->loh_lru) ? \ | |
478 | "" : " lru", | |
479 | hdr->loh_attr & LOHA_EXISTS ? " exist":""); | |
480 | } | |
481 | EXPORT_SYMBOL(lu_object_header_print); | |
482 | ||
483 | /** | |
484 | * Print human readable representation of the \a o to the \a printer. | |
485 | */ | |
486 | void lu_object_print(const struct lu_env *env, void *cookie, | |
487 | lu_printer_t printer, const struct lu_object *o) | |
488 | { | |
489 | static const char ruler[] = "........................................"; | |
490 | struct lu_object_header *top; | |
491 | int depth; | |
492 | ||
493 | top = o->lo_header; | |
494 | lu_object_header_print(env, cookie, printer, top); | |
495 | (*printer)(env, cookie, "{ \n"); | |
496 | list_for_each_entry(o, &top->loh_layers, lo_linkage) { | |
497 | depth = o->lo_depth + 4; | |
498 | ||
499 | /* | |
500 | * print `.' \a depth times followed by type name and address | |
501 | */ | |
502 | (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler, | |
503 | o->lo_dev->ld_type->ldt_name, o); | |
504 | if (o->lo_ops->loo_object_print != NULL) | |
505 | o->lo_ops->loo_object_print(env, cookie, printer, o); | |
506 | (*printer)(env, cookie, "\n"); | |
507 | } | |
508 | (*printer)(env, cookie, "} header@%p\n", top); | |
509 | } | |
510 | EXPORT_SYMBOL(lu_object_print); | |
511 | ||
512 | /** | |
513 | * Check object consistency. | |
514 | */ | |
515 | int lu_object_invariant(const struct lu_object *o) | |
516 | { | |
517 | struct lu_object_header *top; | |
518 | ||
519 | top = o->lo_header; | |
520 | list_for_each_entry(o, &top->loh_layers, lo_linkage) { | |
521 | if (o->lo_ops->loo_object_invariant != NULL && | |
522 | !o->lo_ops->loo_object_invariant(o)) | |
523 | return 0; | |
524 | } | |
525 | return 1; | |
526 | } | |
527 | EXPORT_SYMBOL(lu_object_invariant); | |
528 | ||
529 | static struct lu_object *htable_lookup(struct lu_site *s, | |
530 | cfs_hash_bd_t *bd, | |
531 | const struct lu_fid *f, | |
532 | wait_queue_t *waiter, | |
533 | __u64 *version) | |
534 | { | |
535 | struct lu_site_bkt_data *bkt; | |
536 | struct lu_object_header *h; | |
537 | struct hlist_node *hnode; | |
538 | __u64 ver = cfs_hash_bd_version_get(bd); | |
539 | ||
540 | if (*version == ver) | |
541 | return NULL; | |
542 | ||
543 | *version = ver; | |
544 | bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd); | |
545 | /* cfs_hash_bd_peek_locked is a somehow "internal" function | |
546 | * of cfs_hash, it doesn't add refcount on object. */ | |
547 | hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f); | |
548 | if (hnode == NULL) { | |
549 | lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS); | |
550 | return NULL; | |
551 | } | |
552 | ||
553 | h = container_of0(hnode, struct lu_object_header, loh_hash); | |
554 | if (likely(!lu_object_is_dying(h))) { | |
555 | cfs_hash_get(s->ls_obj_hash, hnode); | |
556 | lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT); | |
557 | list_del_init(&h->loh_lru); | |
558 | return lu_object_top(h); | |
559 | } | |
560 | ||
561 | /* | |
562 | * Lookup found an object being destroyed this object cannot be | |
563 | * returned (to assure that references to dying objects are eventually | |
564 | * drained), and moreover, lookup has to wait until object is freed. | |
565 | */ | |
566 | ||
567 | init_waitqueue_entry_current(waiter); | |
568 | add_wait_queue(&bkt->lsb_marche_funebre, waiter); | |
569 | set_current_state(TASK_UNINTERRUPTIBLE); | |
570 | lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE); | |
571 | return ERR_PTR(-EAGAIN); | |
572 | } | |
573 | ||
574 | /** | |
575 | * Search cache for an object with the fid \a f. If such object is found, | |
576 | * return it. Otherwise, create new object, insert it into cache and return | |
577 | * it. In any case, additional reference is acquired on the returned object. | |
578 | */ | |
579 | struct lu_object *lu_object_find(const struct lu_env *env, | |
580 | struct lu_device *dev, const struct lu_fid *f, | |
581 | const struct lu_object_conf *conf) | |
582 | { | |
583 | return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf); | |
584 | } | |
585 | EXPORT_SYMBOL(lu_object_find); | |
586 | ||
587 | static struct lu_object *lu_object_new(const struct lu_env *env, | |
588 | struct lu_device *dev, | |
589 | const struct lu_fid *f, | |
590 | const struct lu_object_conf *conf) | |
591 | { | |
592 | struct lu_object *o; | |
593 | cfs_hash_t *hs; | |
594 | cfs_hash_bd_t bd; | |
595 | struct lu_site_bkt_data *bkt; | |
596 | ||
597 | o = lu_object_alloc(env, dev, f, conf); | |
598 | if (unlikely(IS_ERR(o))) | |
599 | return o; | |
600 | ||
601 | hs = dev->ld_site->ls_obj_hash; | |
602 | cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1); | |
603 | bkt = cfs_hash_bd_extra_get(hs, &bd); | |
604 | cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); | |
605 | bkt->lsb_busy++; | |
606 | cfs_hash_bd_unlock(hs, &bd, 1); | |
607 | return o; | |
608 | } | |
609 | ||
610 | /** | |
611 | * Core logic of lu_object_find*() functions. | |
612 | */ | |
613 | static struct lu_object *lu_object_find_try(const struct lu_env *env, | |
614 | struct lu_device *dev, | |
615 | const struct lu_fid *f, | |
616 | const struct lu_object_conf *conf, | |
617 | wait_queue_t *waiter) | |
618 | { | |
619 | struct lu_object *o; | |
620 | struct lu_object *shadow; | |
621 | struct lu_site *s; | |
622 | cfs_hash_t *hs; | |
623 | cfs_hash_bd_t bd; | |
624 | __u64 version = 0; | |
625 | ||
626 | /* | |
627 | * This uses standard index maintenance protocol: | |
628 | * | |
629 | * - search index under lock, and return object if found; | |
630 | * - otherwise, unlock index, allocate new object; | |
631 | * - lock index and search again; | |
632 | * - if nothing is found (usual case), insert newly created | |
633 | * object into index; | |
634 | * - otherwise (race: other thread inserted object), free | |
635 | * object just allocated. | |
636 | * - unlock index; | |
637 | * - return object. | |
638 | * | |
639 | * For "LOC_F_NEW" case, we are sure the object is new established. | |
640 | * It is unnecessary to perform lookup-alloc-lookup-insert, instead, | |
641 | * just alloc and insert directly. | |
642 | * | |
643 | * If dying object is found during index search, add @waiter to the | |
644 | * site wait-queue and return ERR_PTR(-EAGAIN). | |
645 | */ | |
646 | if (conf != NULL && conf->loc_flags & LOC_F_NEW) | |
647 | return lu_object_new(env, dev, f, conf); | |
648 | ||
649 | s = dev->ld_site; | |
650 | hs = s->ls_obj_hash; | |
651 | cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1); | |
652 | o = htable_lookup(s, &bd, f, waiter, &version); | |
653 | cfs_hash_bd_unlock(hs, &bd, 1); | |
654 | if (o != NULL) | |
655 | return o; | |
656 | ||
657 | /* | |
658 | * Allocate new object. This may result in rather complicated | |
659 | * operations, including fld queries, inode loading, etc. | |
660 | */ | |
661 | o = lu_object_alloc(env, dev, f, conf); | |
662 | if (unlikely(IS_ERR(o))) | |
663 | return o; | |
664 | ||
665 | LASSERT(lu_fid_eq(lu_object_fid(o), f)); | |
666 | ||
667 | cfs_hash_bd_lock(hs, &bd, 1); | |
668 | ||
669 | shadow = htable_lookup(s, &bd, f, waiter, &version); | |
670 | if (likely(shadow == NULL)) { | |
671 | struct lu_site_bkt_data *bkt; | |
672 | ||
673 | bkt = cfs_hash_bd_extra_get(hs, &bd); | |
674 | cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); | |
675 | bkt->lsb_busy++; | |
676 | cfs_hash_bd_unlock(hs, &bd, 1); | |
677 | return o; | |
678 | } | |
679 | ||
680 | lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE); | |
681 | cfs_hash_bd_unlock(hs, &bd, 1); | |
682 | lu_object_free(env, o); | |
683 | return shadow; | |
684 | } | |
685 | ||
686 | /** | |
687 | * Much like lu_object_find(), but top level device of object is specifically | |
688 | * \a dev rather than top level device of the site. This interface allows | |
689 | * objects of different "stacking" to be created within the same site. | |
690 | */ | |
691 | struct lu_object *lu_object_find_at(const struct lu_env *env, | |
692 | struct lu_device *dev, | |
693 | const struct lu_fid *f, | |
694 | const struct lu_object_conf *conf) | |
695 | { | |
696 | struct lu_site_bkt_data *bkt; | |
697 | struct lu_object *obj; | |
698 | wait_queue_t wait; | |
699 | ||
700 | while (1) { | |
701 | obj = lu_object_find_try(env, dev, f, conf, &wait); | |
702 | if (obj != ERR_PTR(-EAGAIN)) | |
703 | return obj; | |
704 | /* | |
705 | * lu_object_find_try() already added waiter into the | |
706 | * wait queue. | |
707 | */ | |
708 | waitq_wait(&wait, TASK_UNINTERRUPTIBLE); | |
709 | bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f); | |
710 | remove_wait_queue(&bkt->lsb_marche_funebre, &wait); | |
711 | } | |
712 | } | |
713 | EXPORT_SYMBOL(lu_object_find_at); | |
714 | ||
715 | /** | |
716 | * Find object with given fid, and return its slice belonging to given device. | |
717 | */ | |
718 | struct lu_object *lu_object_find_slice(const struct lu_env *env, | |
719 | struct lu_device *dev, | |
720 | const struct lu_fid *f, | |
721 | const struct lu_object_conf *conf) | |
722 | { | |
723 | struct lu_object *top; | |
724 | struct lu_object *obj; | |
725 | ||
726 | top = lu_object_find(env, dev, f, conf); | |
727 | if (!IS_ERR(top)) { | |
728 | obj = lu_object_locate(top->lo_header, dev->ld_type); | |
729 | if (obj == NULL) | |
730 | lu_object_put(env, top); | |
731 | } else | |
732 | obj = top; | |
733 | return obj; | |
734 | } | |
735 | EXPORT_SYMBOL(lu_object_find_slice); | |
736 | ||
737 | /** | |
738 | * Global list of all device types. | |
739 | */ | |
740 | static LIST_HEAD(lu_device_types); | |
741 | ||
742 | int lu_device_type_init(struct lu_device_type *ldt) | |
743 | { | |
744 | int result = 0; | |
745 | ||
746 | INIT_LIST_HEAD(&ldt->ldt_linkage); | |
747 | if (ldt->ldt_ops->ldto_init) | |
748 | result = ldt->ldt_ops->ldto_init(ldt); | |
749 | if (result == 0) | |
750 | list_add(&ldt->ldt_linkage, &lu_device_types); | |
751 | return result; | |
752 | } | |
753 | EXPORT_SYMBOL(lu_device_type_init); | |
754 | ||
755 | void lu_device_type_fini(struct lu_device_type *ldt) | |
756 | { | |
757 | list_del_init(&ldt->ldt_linkage); | |
758 | if (ldt->ldt_ops->ldto_fini) | |
759 | ldt->ldt_ops->ldto_fini(ldt); | |
760 | } | |
761 | EXPORT_SYMBOL(lu_device_type_fini); | |
762 | ||
763 | void lu_types_stop(void) | |
764 | { | |
765 | struct lu_device_type *ldt; | |
766 | ||
767 | list_for_each_entry(ldt, &lu_device_types, ldt_linkage) { | |
768 | if (ldt->ldt_device_nr == 0 && ldt->ldt_ops->ldto_stop) | |
769 | ldt->ldt_ops->ldto_stop(ldt); | |
770 | } | |
771 | } | |
772 | EXPORT_SYMBOL(lu_types_stop); | |
773 | ||
774 | /** | |
775 | * Global list of all sites on this node | |
776 | */ | |
777 | static LIST_HEAD(lu_sites); | |
778 | static DEFINE_MUTEX(lu_sites_guard); | |
779 | ||
780 | /** | |
781 | * Global environment used by site shrinker. | |
782 | */ | |
783 | static struct lu_env lu_shrink_env; | |
784 | ||
785 | struct lu_site_print_arg { | |
786 | struct lu_env *lsp_env; | |
787 | void *lsp_cookie; | |
788 | lu_printer_t lsp_printer; | |
789 | }; | |
790 | ||
791 | static int | |
792 | lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd, | |
793 | struct hlist_node *hnode, void *data) | |
794 | { | |
795 | struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data; | |
796 | struct lu_object_header *h; | |
797 | ||
798 | h = hlist_entry(hnode, struct lu_object_header, loh_hash); | |
799 | if (!list_empty(&h->loh_layers)) { | |
800 | const struct lu_object *o; | |
801 | ||
802 | o = lu_object_top(h); | |
803 | lu_object_print(arg->lsp_env, arg->lsp_cookie, | |
804 | arg->lsp_printer, o); | |
805 | } else { | |
806 | lu_object_header_print(arg->lsp_env, arg->lsp_cookie, | |
807 | arg->lsp_printer, h); | |
808 | } | |
809 | return 0; | |
810 | } | |
811 | ||
812 | /** | |
813 | * Print all objects in \a s. | |
814 | */ | |
815 | void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie, | |
816 | lu_printer_t printer) | |
817 | { | |
818 | struct lu_site_print_arg arg = { | |
819 | .lsp_env = (struct lu_env *)env, | |
820 | .lsp_cookie = cookie, | |
821 | .lsp_printer = printer, | |
822 | }; | |
823 | ||
824 | cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg); | |
825 | } | |
826 | EXPORT_SYMBOL(lu_site_print); | |
827 | ||
828 | enum { | |
829 | LU_CACHE_PERCENT_MAX = 50, | |
830 | LU_CACHE_PERCENT_DEFAULT = 20 | |
831 | }; | |
832 | ||
833 | static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; | |
834 | CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644, | |
835 | "Percentage of memory to be used as lu_object cache"); | |
836 | ||
837 | /** | |
838 | * Return desired hash table order. | |
839 | */ | |
840 | static int lu_htable_order(void) | |
841 | { | |
842 | unsigned long cache_size; | |
843 | int bits; | |
844 | ||
845 | /* | |
846 | * Calculate hash table size, assuming that we want reasonable | |
847 | * performance when 20% of total memory is occupied by cache of | |
848 | * lu_objects. | |
849 | * | |
850 | * Size of lu_object is (arbitrary) taken as 1K (together with inode). | |
851 | */ | |
852 | cache_size = num_physpages; | |
853 | ||
854 | #if BITS_PER_LONG == 32 | |
855 | /* limit hashtable size for lowmem systems to low RAM */ | |
856 | if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT)) | |
857 | cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4; | |
858 | #endif | |
859 | ||
860 | /* clear off unreasonable cache setting. */ | |
861 | if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) { | |
862 | CWARN("obdclass: invalid lu_cache_percent: %u, it must be in" | |
863 | " the range of (0, %u]. Will use default value: %u.\n", | |
864 | lu_cache_percent, LU_CACHE_PERCENT_MAX, | |
865 | LU_CACHE_PERCENT_DEFAULT); | |
866 | ||
867 | lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; | |
868 | } | |
869 | cache_size = cache_size / 100 * lu_cache_percent * | |
870 | (PAGE_CACHE_SIZE / 1024); | |
871 | ||
872 | for (bits = 1; (1 << bits) < cache_size; ++bits) { | |
873 | ; | |
874 | } | |
875 | return bits; | |
876 | } | |
877 | ||
878 | static unsigned lu_obj_hop_hash(cfs_hash_t *hs, | |
879 | const void *key, unsigned mask) | |
880 | { | |
881 | struct lu_fid *fid = (struct lu_fid *)key; | |
882 | __u32 hash; | |
883 | ||
884 | hash = fid_flatten32(fid); | |
885 | hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */ | |
886 | hash = cfs_hash_long(hash, hs->hs_bkt_bits); | |
887 | ||
888 | /* give me another random factor */ | |
889 | hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3); | |
890 | ||
891 | hash <<= hs->hs_cur_bits - hs->hs_bkt_bits; | |
892 | hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1); | |
893 | ||
894 | return hash & mask; | |
895 | } | |
896 | ||
897 | static void *lu_obj_hop_object(struct hlist_node *hnode) | |
898 | { | |
899 | return hlist_entry(hnode, struct lu_object_header, loh_hash); | |
900 | } | |
901 | ||
902 | static void *lu_obj_hop_key(struct hlist_node *hnode) | |
903 | { | |
904 | struct lu_object_header *h; | |
905 | ||
906 | h = hlist_entry(hnode, struct lu_object_header, loh_hash); | |
907 | return &h->loh_fid; | |
908 | } | |
909 | ||
910 | static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode) | |
911 | { | |
912 | struct lu_object_header *h; | |
913 | ||
914 | h = hlist_entry(hnode, struct lu_object_header, loh_hash); | |
915 | return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key); | |
916 | } | |
917 | ||
918 | static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode) | |
919 | { | |
920 | struct lu_object_header *h; | |
921 | ||
922 | h = hlist_entry(hnode, struct lu_object_header, loh_hash); | |
923 | if (atomic_add_return(1, &h->loh_ref) == 1) { | |
924 | struct lu_site_bkt_data *bkt; | |
925 | cfs_hash_bd_t bd; | |
926 | ||
927 | cfs_hash_bd_get(hs, &h->loh_fid, &bd); | |
928 | bkt = cfs_hash_bd_extra_get(hs, &bd); | |
929 | bkt->lsb_busy++; | |
930 | } | |
931 | } | |
932 | ||
933 | static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) | |
934 | { | |
935 | LBUG(); /* we should never called it */ | |
936 | } | |
937 | ||
938 | cfs_hash_ops_t lu_site_hash_ops = { | |
939 | .hs_hash = lu_obj_hop_hash, | |
940 | .hs_key = lu_obj_hop_key, | |
941 | .hs_keycmp = lu_obj_hop_keycmp, | |
942 | .hs_object = lu_obj_hop_object, | |
943 | .hs_get = lu_obj_hop_get, | |
944 | .hs_put_locked = lu_obj_hop_put_locked, | |
945 | }; | |
946 | ||
947 | void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d) | |
948 | { | |
949 | spin_lock(&s->ls_ld_lock); | |
950 | if (list_empty(&d->ld_linkage)) | |
951 | list_add(&d->ld_linkage, &s->ls_ld_linkage); | |
952 | spin_unlock(&s->ls_ld_lock); | |
953 | } | |
954 | EXPORT_SYMBOL(lu_dev_add_linkage); | |
955 | ||
956 | void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d) | |
957 | { | |
958 | spin_lock(&s->ls_ld_lock); | |
959 | list_del_init(&d->ld_linkage); | |
960 | spin_unlock(&s->ls_ld_lock); | |
961 | } | |
962 | EXPORT_SYMBOL(lu_dev_del_linkage); | |
963 | ||
964 | /** | |
965 | * Initialize site \a s, with \a d as the top level device. | |
966 | */ | |
967 | #define LU_SITE_BITS_MIN 12 | |
968 | #define LU_SITE_BITS_MAX 24 | |
969 | /** | |
970 | * total 256 buckets, we don't want too many buckets because: | |
971 | * - consume too much memory | |
972 | * - avoid unbalanced LRU list | |
973 | */ | |
974 | #define LU_SITE_BKT_BITS 8 | |
975 | ||
976 | int lu_site_init(struct lu_site *s, struct lu_device *top) | |
977 | { | |
978 | struct lu_site_bkt_data *bkt; | |
979 | cfs_hash_bd_t bd; | |
980 | char name[16]; | |
981 | int bits; | |
982 | int i; | |
983 | ENTRY; | |
984 | ||
985 | memset(s, 0, sizeof *s); | |
986 | bits = lu_htable_order(); | |
987 | snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name); | |
988 | for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX); | |
989 | bits >= LU_SITE_BITS_MIN; bits--) { | |
990 | s->ls_obj_hash = cfs_hash_create(name, bits, bits, | |
991 | bits - LU_SITE_BKT_BITS, | |
992 | sizeof(*bkt), 0, 0, | |
993 | &lu_site_hash_ops, | |
994 | CFS_HASH_SPIN_BKTLOCK | | |
995 | CFS_HASH_NO_ITEMREF | | |
996 | CFS_HASH_DEPTH | | |
997 | CFS_HASH_ASSERT_EMPTY); | |
998 | if (s->ls_obj_hash != NULL) | |
999 | break; | |
1000 | } | |
1001 | ||
1002 | if (s->ls_obj_hash == NULL) { | |
1003 | CERROR("failed to create lu_site hash with bits: %d\n", bits); | |
1004 | return -ENOMEM; | |
1005 | } | |
1006 | ||
1007 | cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) { | |
1008 | bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd); | |
1009 | INIT_LIST_HEAD(&bkt->lsb_lru); | |
1010 | init_waitqueue_head(&bkt->lsb_marche_funebre); | |
1011 | } | |
1012 | ||
1013 | s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0); | |
1014 | if (s->ls_stats == NULL) { | |
1015 | cfs_hash_putref(s->ls_obj_hash); | |
1016 | s->ls_obj_hash = NULL; | |
1017 | return -ENOMEM; | |
1018 | } | |
1019 | ||
1020 | lprocfs_counter_init(s->ls_stats, LU_SS_CREATED, | |
1021 | 0, "created", "created"); | |
1022 | lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT, | |
1023 | 0, "cache_hit", "cache_hit"); | |
1024 | lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS, | |
1025 | 0, "cache_miss", "cache_miss"); | |
1026 | lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE, | |
1027 | 0, "cache_race", "cache_race"); | |
1028 | lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE, | |
1029 | 0, "cache_death_race", "cache_death_race"); | |
1030 | lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED, | |
1031 | 0, "lru_purged", "lru_purged"); | |
1032 | ||
1033 | INIT_LIST_HEAD(&s->ls_linkage); | |
1034 | s->ls_top_dev = top; | |
1035 | top->ld_site = s; | |
1036 | lu_device_get(top); | |
1037 | lu_ref_add(&top->ld_reference, "site-top", s); | |
1038 | ||
1039 | INIT_LIST_HEAD(&s->ls_ld_linkage); | |
1040 | spin_lock_init(&s->ls_ld_lock); | |
1041 | ||
1042 | lu_dev_add_linkage(s, top); | |
1043 | ||
1044 | RETURN(0); | |
1045 | } | |
1046 | EXPORT_SYMBOL(lu_site_init); | |
1047 | ||
1048 | /** | |
1049 | * Finalize \a s and release its resources. | |
1050 | */ | |
1051 | void lu_site_fini(struct lu_site *s) | |
1052 | { | |
1053 | mutex_lock(&lu_sites_guard); | |
1054 | list_del_init(&s->ls_linkage); | |
1055 | mutex_unlock(&lu_sites_guard); | |
1056 | ||
1057 | if (s->ls_obj_hash != NULL) { | |
1058 | cfs_hash_putref(s->ls_obj_hash); | |
1059 | s->ls_obj_hash = NULL; | |
1060 | } | |
1061 | ||
1062 | if (s->ls_top_dev != NULL) { | |
1063 | s->ls_top_dev->ld_site = NULL; | |
1064 | lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s); | |
1065 | lu_device_put(s->ls_top_dev); | |
1066 | s->ls_top_dev = NULL; | |
1067 | } | |
1068 | ||
1069 | if (s->ls_stats != NULL) | |
1070 | lprocfs_free_stats(&s->ls_stats); | |
1071 | } | |
1072 | EXPORT_SYMBOL(lu_site_fini); | |
1073 | ||
1074 | /** | |
1075 | * Called when initialization of stack for this site is completed. | |
1076 | */ | |
1077 | int lu_site_init_finish(struct lu_site *s) | |
1078 | { | |
1079 | int result; | |
1080 | mutex_lock(&lu_sites_guard); | |
1081 | result = lu_context_refill(&lu_shrink_env.le_ctx); | |
1082 | if (result == 0) | |
1083 | list_add(&s->ls_linkage, &lu_sites); | |
1084 | mutex_unlock(&lu_sites_guard); | |
1085 | return result; | |
1086 | } | |
1087 | EXPORT_SYMBOL(lu_site_init_finish); | |
1088 | ||
1089 | /** | |
1090 | * Acquire additional reference on device \a d | |
1091 | */ | |
1092 | void lu_device_get(struct lu_device *d) | |
1093 | { | |
1094 | atomic_inc(&d->ld_ref); | |
1095 | } | |
1096 | EXPORT_SYMBOL(lu_device_get); | |
1097 | ||
1098 | /** | |
1099 | * Release reference on device \a d. | |
1100 | */ | |
1101 | void lu_device_put(struct lu_device *d) | |
1102 | { | |
1103 | LASSERT(atomic_read(&d->ld_ref) > 0); | |
1104 | atomic_dec(&d->ld_ref); | |
1105 | } | |
1106 | EXPORT_SYMBOL(lu_device_put); | |
1107 | ||
1108 | /** | |
1109 | * Initialize device \a d of type \a t. | |
1110 | */ | |
1111 | int lu_device_init(struct lu_device *d, struct lu_device_type *t) | |
1112 | { | |
1113 | if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL) | |
1114 | t->ldt_ops->ldto_start(t); | |
1115 | memset(d, 0, sizeof *d); | |
1116 | atomic_set(&d->ld_ref, 0); | |
1117 | d->ld_type = t; | |
1118 | lu_ref_init(&d->ld_reference); | |
1119 | INIT_LIST_HEAD(&d->ld_linkage); | |
1120 | return 0; | |
1121 | } | |
1122 | EXPORT_SYMBOL(lu_device_init); | |
1123 | ||
1124 | /** | |
1125 | * Finalize device \a d. | |
1126 | */ | |
1127 | void lu_device_fini(struct lu_device *d) | |
1128 | { | |
1129 | struct lu_device_type *t; | |
1130 | ||
1131 | t = d->ld_type; | |
1132 | if (d->ld_obd != NULL) { | |
1133 | d->ld_obd->obd_lu_dev = NULL; | |
1134 | d->ld_obd = NULL; | |
1135 | } | |
1136 | ||
1137 | lu_ref_fini(&d->ld_reference); | |
1138 | LASSERTF(atomic_read(&d->ld_ref) == 0, | |
1139 | "Refcount is %u\n", atomic_read(&d->ld_ref)); | |
1140 | LASSERT(t->ldt_device_nr > 0); | |
1141 | if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL) | |
1142 | t->ldt_ops->ldto_stop(t); | |
1143 | } | |
1144 | EXPORT_SYMBOL(lu_device_fini); | |
1145 | ||
1146 | /** | |
1147 | * Initialize object \a o that is part of compound object \a h and was created | |
1148 | * by device \a d. | |
1149 | */ | |
1150 | int lu_object_init(struct lu_object *o, | |
1151 | struct lu_object_header *h, struct lu_device *d) | |
1152 | { | |
1153 | memset(o, 0, sizeof *o); | |
1154 | o->lo_header = h; | |
1155 | o->lo_dev = d; | |
1156 | lu_device_get(d); | |
1157 | o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o); | |
1158 | INIT_LIST_HEAD(&o->lo_linkage); | |
1159 | return 0; | |
1160 | } | |
1161 | EXPORT_SYMBOL(lu_object_init); | |
1162 | ||
1163 | /** | |
1164 | * Finalize object and release its resources. | |
1165 | */ | |
1166 | void lu_object_fini(struct lu_object *o) | |
1167 | { | |
1168 | struct lu_device *dev = o->lo_dev; | |
1169 | ||
1170 | LASSERT(list_empty(&o->lo_linkage)); | |
1171 | ||
1172 | if (dev != NULL) { | |
1173 | lu_ref_del_at(&dev->ld_reference, | |
1174 | o->lo_dev_ref , "lu_object", o); | |
1175 | lu_device_put(dev); | |
1176 | o->lo_dev = NULL; | |
1177 | } | |
1178 | } | |
1179 | EXPORT_SYMBOL(lu_object_fini); | |
1180 | ||
1181 | /** | |
1182 | * Add object \a o as first layer of compound object \a h | |
1183 | * | |
1184 | * This is typically called by the ->ldo_object_alloc() method of top-level | |
1185 | * device. | |
1186 | */ | |
1187 | void lu_object_add_top(struct lu_object_header *h, struct lu_object *o) | |
1188 | { | |
1189 | list_move(&o->lo_linkage, &h->loh_layers); | |
1190 | } | |
1191 | EXPORT_SYMBOL(lu_object_add_top); | |
1192 | ||
1193 | /** | |
1194 | * Add object \a o as a layer of compound object, going after \a before. | |
1195 | * | |
1196 | * This is typically called by the ->ldo_object_alloc() method of \a | |
1197 | * before->lo_dev. | |
1198 | */ | |
1199 | void lu_object_add(struct lu_object *before, struct lu_object *o) | |
1200 | { | |
1201 | list_move(&o->lo_linkage, &before->lo_linkage); | |
1202 | } | |
1203 | EXPORT_SYMBOL(lu_object_add); | |
1204 | ||
1205 | /** | |
1206 | * Initialize compound object. | |
1207 | */ | |
1208 | int lu_object_header_init(struct lu_object_header *h) | |
1209 | { | |
1210 | memset(h, 0, sizeof *h); | |
1211 | atomic_set(&h->loh_ref, 1); | |
1212 | INIT_HLIST_NODE(&h->loh_hash); | |
1213 | INIT_LIST_HEAD(&h->loh_lru); | |
1214 | INIT_LIST_HEAD(&h->loh_layers); | |
1215 | lu_ref_init(&h->loh_reference); | |
1216 | return 0; | |
1217 | } | |
1218 | EXPORT_SYMBOL(lu_object_header_init); | |
1219 | ||
1220 | /** | |
1221 | * Finalize compound object. | |
1222 | */ | |
1223 | void lu_object_header_fini(struct lu_object_header *h) | |
1224 | { | |
1225 | LASSERT(list_empty(&h->loh_layers)); | |
1226 | LASSERT(list_empty(&h->loh_lru)); | |
1227 | LASSERT(hlist_unhashed(&h->loh_hash)); | |
1228 | lu_ref_fini(&h->loh_reference); | |
1229 | } | |
1230 | EXPORT_SYMBOL(lu_object_header_fini); | |
1231 | ||
1232 | /** | |
1233 | * Given a compound object, find its slice, corresponding to the device type | |
1234 | * \a dtype. | |
1235 | */ | |
1236 | struct lu_object *lu_object_locate(struct lu_object_header *h, | |
1237 | const struct lu_device_type *dtype) | |
1238 | { | |
1239 | struct lu_object *o; | |
1240 | ||
1241 | list_for_each_entry(o, &h->loh_layers, lo_linkage) { | |
1242 | if (o->lo_dev->ld_type == dtype) | |
1243 | return o; | |
1244 | } | |
1245 | return NULL; | |
1246 | } | |
1247 | EXPORT_SYMBOL(lu_object_locate); | |
1248 | ||
1249 | ||
1250 | ||
1251 | /** | |
1252 | * Finalize and free devices in the device stack. | |
1253 | * | |
1254 | * Finalize device stack by purging object cache, and calling | |
1255 | * lu_device_type_operations::ldto_device_fini() and | |
1256 | * lu_device_type_operations::ldto_device_free() on all devices in the stack. | |
1257 | */ | |
1258 | void lu_stack_fini(const struct lu_env *env, struct lu_device *top) | |
1259 | { | |
1260 | struct lu_site *site = top->ld_site; | |
1261 | struct lu_device *scan; | |
1262 | struct lu_device *next; | |
1263 | ||
1264 | lu_site_purge(env, site, ~0); | |
1265 | for (scan = top; scan != NULL; scan = next) { | |
1266 | next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan); | |
1267 | lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init); | |
1268 | lu_device_put(scan); | |
1269 | } | |
1270 | ||
1271 | /* purge again. */ | |
1272 | lu_site_purge(env, site, ~0); | |
1273 | ||
1274 | for (scan = top; scan != NULL; scan = next) { | |
1275 | const struct lu_device_type *ldt = scan->ld_type; | |
1276 | struct obd_type *type; | |
1277 | ||
1278 | next = ldt->ldt_ops->ldto_device_free(env, scan); | |
1279 | type = ldt->ldt_obd_type; | |
1280 | if (type != NULL) { | |
1281 | type->typ_refcnt--; | |
1282 | class_put_type(type); | |
1283 | } | |
1284 | } | |
1285 | } | |
1286 | EXPORT_SYMBOL(lu_stack_fini); | |
1287 | ||
1288 | enum { | |
1289 | /** | |
1290 | * Maximal number of tld slots. | |
1291 | */ | |
1292 | LU_CONTEXT_KEY_NR = 40 | |
1293 | }; | |
1294 | ||
1295 | static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, }; | |
1296 | ||
1297 | static DEFINE_SPINLOCK(lu_keys_guard); | |
1298 | ||
1299 | /** | |
1300 | * Global counter incremented whenever key is registered, unregistered, | |
1301 | * revived or quiesced. This is used to void unnecessary calls to | |
1302 | * lu_context_refill(). No locking is provided, as initialization and shutdown | |
1303 | * are supposed to be externally serialized. | |
1304 | */ | |
1305 | static unsigned key_set_version = 0; | |
1306 | ||
1307 | /** | |
1308 | * Register new key. | |
1309 | */ | |
1310 | int lu_context_key_register(struct lu_context_key *key) | |
1311 | { | |
1312 | int result; | |
1313 | int i; | |
1314 | ||
1315 | LASSERT(key->lct_init != NULL); | |
1316 | LASSERT(key->lct_fini != NULL); | |
1317 | LASSERT(key->lct_tags != 0); | |
1318 | LASSERT(key->lct_owner != NULL); | |
1319 | ||
1320 | result = -ENFILE; | |
1321 | spin_lock(&lu_keys_guard); | |
1322 | for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { | |
1323 | if (lu_keys[i] == NULL) { | |
1324 | key->lct_index = i; | |
1325 | atomic_set(&key->lct_used, 1); | |
1326 | lu_keys[i] = key; | |
1327 | lu_ref_init(&key->lct_reference); | |
1328 | result = 0; | |
1329 | ++key_set_version; | |
1330 | break; | |
1331 | } | |
1332 | } | |
1333 | spin_unlock(&lu_keys_guard); | |
1334 | return result; | |
1335 | } | |
1336 | EXPORT_SYMBOL(lu_context_key_register); | |
1337 | ||
1338 | static void key_fini(struct lu_context *ctx, int index) | |
1339 | { | |
1340 | if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) { | |
1341 | struct lu_context_key *key; | |
1342 | ||
1343 | key = lu_keys[index]; | |
1344 | LASSERT(key != NULL); | |
1345 | LASSERT(key->lct_fini != NULL); | |
1346 | LASSERT(atomic_read(&key->lct_used) > 1); | |
1347 | ||
1348 | key->lct_fini(ctx, key, ctx->lc_value[index]); | |
1349 | lu_ref_del(&key->lct_reference, "ctx", ctx); | |
1350 | atomic_dec(&key->lct_used); | |
1351 | ||
1352 | LASSERT(key->lct_owner != NULL); | |
1353 | if ((ctx->lc_tags & LCT_NOREF) == 0) { | |
1354 | LINVRNT(module_refcount(key->lct_owner) > 0); | |
1355 | module_put(key->lct_owner); | |
1356 | } | |
1357 | ctx->lc_value[index] = NULL; | |
1358 | } | |
1359 | } | |
1360 | ||
1361 | /** | |
1362 | * Deregister key. | |
1363 | */ | |
1364 | void lu_context_key_degister(struct lu_context_key *key) | |
1365 | { | |
1366 | LASSERT(atomic_read(&key->lct_used) >= 1); | |
1367 | LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys)); | |
1368 | ||
1369 | lu_context_key_quiesce(key); | |
1370 | ||
1371 | ++key_set_version; | |
1372 | spin_lock(&lu_keys_guard); | |
1373 | key_fini(&lu_shrink_env.le_ctx, key->lct_index); | |
1374 | if (lu_keys[key->lct_index]) { | |
1375 | lu_keys[key->lct_index] = NULL; | |
1376 | lu_ref_fini(&key->lct_reference); | |
1377 | } | |
1378 | spin_unlock(&lu_keys_guard); | |
1379 | ||
1380 | LASSERTF(atomic_read(&key->lct_used) == 1, | |
1381 | "key has instances: %d\n", | |
1382 | atomic_read(&key->lct_used)); | |
1383 | } | |
1384 | EXPORT_SYMBOL(lu_context_key_degister); | |
1385 | ||
1386 | /** | |
1387 | * Register a number of keys. This has to be called after all keys have been | |
1388 | * initialized by a call to LU_CONTEXT_KEY_INIT(). | |
1389 | */ | |
1390 | int lu_context_key_register_many(struct lu_context_key *k, ...) | |
1391 | { | |
1392 | struct lu_context_key *key = k; | |
1393 | va_list args; | |
1394 | int result; | |
1395 | ||
1396 | va_start(args, k); | |
1397 | do { | |
1398 | result = lu_context_key_register(key); | |
1399 | if (result) | |
1400 | break; | |
1401 | key = va_arg(args, struct lu_context_key *); | |
1402 | } while (key != NULL); | |
1403 | va_end(args); | |
1404 | ||
1405 | if (result != 0) { | |
1406 | va_start(args, k); | |
1407 | while (k != key) { | |
1408 | lu_context_key_degister(k); | |
1409 | k = va_arg(args, struct lu_context_key *); | |
1410 | } | |
1411 | va_end(args); | |
1412 | } | |
1413 | ||
1414 | return result; | |
1415 | } | |
1416 | EXPORT_SYMBOL(lu_context_key_register_many); | |
1417 | ||
1418 | /** | |
1419 | * De-register a number of keys. This is a dual to | |
1420 | * lu_context_key_register_many(). | |
1421 | */ | |
1422 | void lu_context_key_degister_many(struct lu_context_key *k, ...) | |
1423 | { | |
1424 | va_list args; | |
1425 | ||
1426 | va_start(args, k); | |
1427 | do { | |
1428 | lu_context_key_degister(k); | |
1429 | k = va_arg(args, struct lu_context_key*); | |
1430 | } while (k != NULL); | |
1431 | va_end(args); | |
1432 | } | |
1433 | EXPORT_SYMBOL(lu_context_key_degister_many); | |
1434 | ||
1435 | /** | |
1436 | * Revive a number of keys. | |
1437 | */ | |
1438 | void lu_context_key_revive_many(struct lu_context_key *k, ...) | |
1439 | { | |
1440 | va_list args; | |
1441 | ||
1442 | va_start(args, k); | |
1443 | do { | |
1444 | lu_context_key_revive(k); | |
1445 | k = va_arg(args, struct lu_context_key*); | |
1446 | } while (k != NULL); | |
1447 | va_end(args); | |
1448 | } | |
1449 | EXPORT_SYMBOL(lu_context_key_revive_many); | |
1450 | ||
1451 | /** | |
1452 | * Quiescent a number of keys. | |
1453 | */ | |
1454 | void lu_context_key_quiesce_many(struct lu_context_key *k, ...) | |
1455 | { | |
1456 | va_list args; | |
1457 | ||
1458 | va_start(args, k); | |
1459 | do { | |
1460 | lu_context_key_quiesce(k); | |
1461 | k = va_arg(args, struct lu_context_key*); | |
1462 | } while (k != NULL); | |
1463 | va_end(args); | |
1464 | } | |
1465 | EXPORT_SYMBOL(lu_context_key_quiesce_many); | |
1466 | ||
1467 | /** | |
1468 | * Return value associated with key \a key in context \a ctx. | |
1469 | */ | |
1470 | void *lu_context_key_get(const struct lu_context *ctx, | |
1471 | const struct lu_context_key *key) | |
1472 | { | |
1473 | LINVRNT(ctx->lc_state == LCS_ENTERED); | |
1474 | LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys)); | |
1475 | LASSERT(lu_keys[key->lct_index] == key); | |
1476 | return ctx->lc_value[key->lct_index]; | |
1477 | } | |
1478 | EXPORT_SYMBOL(lu_context_key_get); | |
1479 | ||
1480 | /** | |
1481 | * List of remembered contexts. XXX document me. | |
1482 | */ | |
1483 | static LIST_HEAD(lu_context_remembered); | |
1484 | ||
1485 | /** | |
1486 | * Destroy \a key in all remembered contexts. This is used to destroy key | |
1487 | * values in "shared" contexts (like service threads), when a module owning | |
1488 | * the key is about to be unloaded. | |
1489 | */ | |
1490 | void lu_context_key_quiesce(struct lu_context_key *key) | |
1491 | { | |
1492 | struct lu_context *ctx; | |
1493 | ||
1494 | if (!(key->lct_tags & LCT_QUIESCENT)) { | |
1495 | /* | |
1496 | * XXX layering violation. | |
1497 | */ | |
1498 | key->lct_tags |= LCT_QUIESCENT; | |
1499 | /* | |
1500 | * XXX memory barrier has to go here. | |
1501 | */ | |
1502 | spin_lock(&lu_keys_guard); | |
1503 | list_for_each_entry(ctx, &lu_context_remembered, | |
1504 | lc_remember) | |
1505 | key_fini(ctx, key->lct_index); | |
1506 | spin_unlock(&lu_keys_guard); | |
1507 | ++key_set_version; | |
1508 | } | |
1509 | } | |
1510 | EXPORT_SYMBOL(lu_context_key_quiesce); | |
1511 | ||
1512 | void lu_context_key_revive(struct lu_context_key *key) | |
1513 | { | |
1514 | key->lct_tags &= ~LCT_QUIESCENT; | |
1515 | ++key_set_version; | |
1516 | } | |
1517 | EXPORT_SYMBOL(lu_context_key_revive); | |
1518 | ||
1519 | static void keys_fini(struct lu_context *ctx) | |
1520 | { | |
1521 | int i; | |
1522 | ||
1523 | if (ctx->lc_value == NULL) | |
1524 | return; | |
1525 | ||
1526 | for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) | |
1527 | key_fini(ctx, i); | |
1528 | ||
1529 | OBD_FREE(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]); | |
1530 | ctx->lc_value = NULL; | |
1531 | } | |
1532 | ||
1533 | static int keys_fill(struct lu_context *ctx) | |
1534 | { | |
1535 | int i; | |
1536 | ||
1537 | LINVRNT(ctx->lc_value != NULL); | |
1538 | for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { | |
1539 | struct lu_context_key *key; | |
1540 | ||
1541 | key = lu_keys[i]; | |
1542 | if (ctx->lc_value[i] == NULL && key != NULL && | |
1543 | (key->lct_tags & ctx->lc_tags) && | |
1544 | /* | |
1545 | * Don't create values for a LCT_QUIESCENT key, as this | |
1546 | * will pin module owning a key. | |
1547 | */ | |
1548 | !(key->lct_tags & LCT_QUIESCENT)) { | |
1549 | void *value; | |
1550 | ||
1551 | LINVRNT(key->lct_init != NULL); | |
1552 | LINVRNT(key->lct_index == i); | |
1553 | ||
1554 | value = key->lct_init(ctx, key); | |
1555 | if (unlikely(IS_ERR(value))) | |
1556 | return PTR_ERR(value); | |
1557 | ||
1558 | LASSERT(key->lct_owner != NULL); | |
1559 | if (!(ctx->lc_tags & LCT_NOREF)) | |
1560 | try_module_get(key->lct_owner); | |
1561 | lu_ref_add_atomic(&key->lct_reference, "ctx", ctx); | |
1562 | atomic_inc(&key->lct_used); | |
1563 | /* | |
1564 | * This is the only place in the code, where an | |
1565 | * element of ctx->lc_value[] array is set to non-NULL | |
1566 | * value. | |
1567 | */ | |
1568 | ctx->lc_value[i] = value; | |
1569 | if (key->lct_exit != NULL) | |
1570 | ctx->lc_tags |= LCT_HAS_EXIT; | |
1571 | } | |
1572 | ctx->lc_version = key_set_version; | |
1573 | } | |
1574 | return 0; | |
1575 | } | |
1576 | ||
1577 | static int keys_init(struct lu_context *ctx) | |
1578 | { | |
1579 | OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]); | |
1580 | if (likely(ctx->lc_value != NULL)) | |
1581 | return keys_fill(ctx); | |
1582 | ||
1583 | return -ENOMEM; | |
1584 | } | |
1585 | ||
1586 | /** | |
1587 | * Initialize context data-structure. Create values for all keys. | |
1588 | */ | |
1589 | int lu_context_init(struct lu_context *ctx, __u32 tags) | |
1590 | { | |
1591 | int rc; | |
1592 | ||
1593 | memset(ctx, 0, sizeof *ctx); | |
1594 | ctx->lc_state = LCS_INITIALIZED; | |
1595 | ctx->lc_tags = tags; | |
1596 | if (tags & LCT_REMEMBER) { | |
1597 | spin_lock(&lu_keys_guard); | |
1598 | list_add(&ctx->lc_remember, &lu_context_remembered); | |
1599 | spin_unlock(&lu_keys_guard); | |
1600 | } else { | |
1601 | INIT_LIST_HEAD(&ctx->lc_remember); | |
1602 | } | |
1603 | ||
1604 | rc = keys_init(ctx); | |
1605 | if (rc != 0) | |
1606 | lu_context_fini(ctx); | |
1607 | ||
1608 | return rc; | |
1609 | } | |
1610 | EXPORT_SYMBOL(lu_context_init); | |
1611 | ||
1612 | /** | |
1613 | * Finalize context data-structure. Destroy key values. | |
1614 | */ | |
1615 | void lu_context_fini(struct lu_context *ctx) | |
1616 | { | |
1617 | LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT); | |
1618 | ctx->lc_state = LCS_FINALIZED; | |
1619 | ||
1620 | if ((ctx->lc_tags & LCT_REMEMBER) == 0) { | |
1621 | LASSERT(list_empty(&ctx->lc_remember)); | |
1622 | keys_fini(ctx); | |
1623 | ||
1624 | } else { /* could race with key degister */ | |
1625 | spin_lock(&lu_keys_guard); | |
1626 | keys_fini(ctx); | |
1627 | list_del_init(&ctx->lc_remember); | |
1628 | spin_unlock(&lu_keys_guard); | |
1629 | } | |
1630 | } | |
1631 | EXPORT_SYMBOL(lu_context_fini); | |
1632 | ||
1633 | /** | |
1634 | * Called before entering context. | |
1635 | */ | |
1636 | void lu_context_enter(struct lu_context *ctx) | |
1637 | { | |
1638 | LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT); | |
1639 | ctx->lc_state = LCS_ENTERED; | |
1640 | } | |
1641 | EXPORT_SYMBOL(lu_context_enter); | |
1642 | ||
1643 | /** | |
1644 | * Called after exiting from \a ctx | |
1645 | */ | |
1646 | void lu_context_exit(struct lu_context *ctx) | |
1647 | { | |
1648 | int i; | |
1649 | ||
1650 | LINVRNT(ctx->lc_state == LCS_ENTERED); | |
1651 | ctx->lc_state = LCS_LEFT; | |
1652 | if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) { | |
1653 | for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { | |
1654 | if (ctx->lc_value[i] != NULL) { | |
1655 | struct lu_context_key *key; | |
1656 | ||
1657 | key = lu_keys[i]; | |
1658 | LASSERT(key != NULL); | |
1659 | if (key->lct_exit != NULL) | |
1660 | key->lct_exit(ctx, | |
1661 | key, ctx->lc_value[i]); | |
1662 | } | |
1663 | } | |
1664 | } | |
1665 | } | |
1666 | EXPORT_SYMBOL(lu_context_exit); | |
1667 | ||
1668 | /** | |
1669 | * Allocate for context all missing keys that were registered after context | |
1670 | * creation. key_set_version is only changed in rare cases when modules | |
1671 | * are loaded and removed. | |
1672 | */ | |
1673 | int lu_context_refill(struct lu_context *ctx) | |
1674 | { | |
1675 | return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx); | |
1676 | } | |
1677 | EXPORT_SYMBOL(lu_context_refill); | |
1678 | ||
1679 | /** | |
1680 | * lu_ctx_tags/lu_ses_tags will be updated if there are new types of | |
1681 | * obd being added. Currently, this is only used on client side, specifically | |
1682 | * for echo device client, for other stack (like ptlrpc threads), context are | |
1683 | * predefined when the lu_device type are registered, during the module probe | |
1684 | * phase. | |
1685 | */ | |
1686 | __u32 lu_context_tags_default = 0; | |
1687 | __u32 lu_session_tags_default = 0; | |
1688 | ||
1689 | void lu_context_tags_update(__u32 tags) | |
1690 | { | |
1691 | spin_lock(&lu_keys_guard); | |
1692 | lu_context_tags_default |= tags; | |
1693 | key_set_version++; | |
1694 | spin_unlock(&lu_keys_guard); | |
1695 | } | |
1696 | EXPORT_SYMBOL(lu_context_tags_update); | |
1697 | ||
1698 | void lu_context_tags_clear(__u32 tags) | |
1699 | { | |
1700 | spin_lock(&lu_keys_guard); | |
1701 | lu_context_tags_default &= ~tags; | |
1702 | key_set_version++; | |
1703 | spin_unlock(&lu_keys_guard); | |
1704 | } | |
1705 | EXPORT_SYMBOL(lu_context_tags_clear); | |
1706 | ||
1707 | void lu_session_tags_update(__u32 tags) | |
1708 | { | |
1709 | spin_lock(&lu_keys_guard); | |
1710 | lu_session_tags_default |= tags; | |
1711 | key_set_version++; | |
1712 | spin_unlock(&lu_keys_guard); | |
1713 | } | |
1714 | EXPORT_SYMBOL(lu_session_tags_update); | |
1715 | ||
1716 | void lu_session_tags_clear(__u32 tags) | |
1717 | { | |
1718 | spin_lock(&lu_keys_guard); | |
1719 | lu_session_tags_default &= ~tags; | |
1720 | key_set_version++; | |
1721 | spin_unlock(&lu_keys_guard); | |
1722 | } | |
1723 | EXPORT_SYMBOL(lu_session_tags_clear); | |
1724 | ||
1725 | int lu_env_init(struct lu_env *env, __u32 tags) | |
1726 | { | |
1727 | int result; | |
1728 | ||
1729 | env->le_ses = NULL; | |
1730 | result = lu_context_init(&env->le_ctx, tags); | |
1731 | if (likely(result == 0)) | |
1732 | lu_context_enter(&env->le_ctx); | |
1733 | return result; | |
1734 | } | |
1735 | EXPORT_SYMBOL(lu_env_init); | |
1736 | ||
1737 | void lu_env_fini(struct lu_env *env) | |
1738 | { | |
1739 | lu_context_exit(&env->le_ctx); | |
1740 | lu_context_fini(&env->le_ctx); | |
1741 | env->le_ses = NULL; | |
1742 | } | |
1743 | EXPORT_SYMBOL(lu_env_fini); | |
1744 | ||
1745 | int lu_env_refill(struct lu_env *env) | |
1746 | { | |
1747 | int result; | |
1748 | ||
1749 | result = lu_context_refill(&env->le_ctx); | |
1750 | if (result == 0 && env->le_ses != NULL) | |
1751 | result = lu_context_refill(env->le_ses); | |
1752 | return result; | |
1753 | } | |
1754 | EXPORT_SYMBOL(lu_env_refill); | |
1755 | ||
1756 | /** | |
1757 | * Currently, this API will only be used by echo client. | |
1758 | * Because echo client and normal lustre client will share | |
1759 | * same cl_env cache. So echo client needs to refresh | |
1760 | * the env context after it get one from the cache, especially | |
1761 | * when normal client and echo client co-exist in the same client. | |
1762 | */ | |
1763 | int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags, | |
1764 | __u32 stags) | |
1765 | { | |
1766 | int result; | |
1767 | ||
1768 | if ((env->le_ctx.lc_tags & ctags) != ctags) { | |
1769 | env->le_ctx.lc_version = 0; | |
1770 | env->le_ctx.lc_tags |= ctags; | |
1771 | } | |
1772 | ||
1773 | if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) { | |
1774 | env->le_ses->lc_version = 0; | |
1775 | env->le_ses->lc_tags |= stags; | |
1776 | } | |
1777 | ||
1778 | result = lu_env_refill(env); | |
1779 | ||
1780 | return result; | |
1781 | } | |
1782 | EXPORT_SYMBOL(lu_env_refill_by_tags); | |
1783 | ||
1784 | static struct shrinker *lu_site_shrinker = NULL; | |
1785 | ||
1786 | typedef struct lu_site_stats{ | |
1787 | unsigned lss_populated; | |
1788 | unsigned lss_max_search; | |
1789 | unsigned lss_total; | |
1790 | unsigned lss_busy; | |
1791 | } lu_site_stats_t; | |
1792 | ||
1793 | static void lu_site_stats_get(cfs_hash_t *hs, | |
1794 | lu_site_stats_t *stats, int populated) | |
1795 | { | |
1796 | cfs_hash_bd_t bd; | |
1797 | int i; | |
1798 | ||
1799 | cfs_hash_for_each_bucket(hs, &bd, i) { | |
1800 | struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd); | |
1801 | struct hlist_head *hhead; | |
1802 | ||
1803 | cfs_hash_bd_lock(hs, &bd, 1); | |
1804 | stats->lss_busy += bkt->lsb_busy; | |
1805 | stats->lss_total += cfs_hash_bd_count_get(&bd); | |
1806 | stats->lss_max_search = max((int)stats->lss_max_search, | |
1807 | cfs_hash_bd_depmax_get(&bd)); | |
1808 | if (!populated) { | |
1809 | cfs_hash_bd_unlock(hs, &bd, 1); | |
1810 | continue; | |
1811 | } | |
1812 | ||
1813 | cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { | |
1814 | if (!hlist_empty(hhead)) | |
1815 | stats->lss_populated++; | |
1816 | } | |
1817 | cfs_hash_bd_unlock(hs, &bd, 1); | |
1818 | } | |
1819 | } | |
1820 | ||
1821 | ||
1822 | /* | |
1823 | * There exists a potential lock inversion deadlock scenario when using | |
1824 | * Lustre on top of ZFS. This occurs between one of ZFS's | |
1825 | * buf_hash_table.ht_lock's, and Lustre's lu_sites_guard lock. Essentially, | |
1826 | * thread A will take the lu_sites_guard lock and sleep on the ht_lock, | |
1827 | * while thread B will take the ht_lock and sleep on the lu_sites_guard | |
1828 | * lock. Obviously neither thread will wake and drop their respective hold | |
1829 | * on their lock. | |
1830 | * | |
1831 | * To prevent this from happening we must ensure the lu_sites_guard lock is | |
1832 | * not taken while down this code path. ZFS reliably does not set the | |
1833 | * __GFP_FS bit in its code paths, so this can be used to determine if it | |
1834 | * is safe to take the lu_sites_guard lock. | |
1835 | * | |
1836 | * Ideally we should accurately return the remaining number of cached | |
1837 | * objects without taking the lu_sites_guard lock, but this is not | |
1838 | * possible in the current implementation. | |
1839 | */ | |
1840 | static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) | |
1841 | { | |
1842 | lu_site_stats_t stats; | |
1843 | struct lu_site *s; | |
1844 | struct lu_site *tmp; | |
1845 | int cached = 0; | |
1846 | int remain = shrink_param(sc, nr_to_scan); | |
1847 | LIST_HEAD(splice); | |
1848 | ||
1849 | if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) { | |
1850 | if (remain != 0) | |
1851 | return -1; | |
1852 | else | |
1853 | /* We must not take the lu_sites_guard lock when | |
1854 | * __GFP_FS is *not* set because of the deadlock | |
1855 | * possibility detailed above. Additionally, | |
1856 | * since we cannot determine the number of | |
1857 | * objects in the cache without taking this | |
1858 | * lock, we're in a particularly tough spot. As | |
1859 | * a result, we'll just lie and say our cache is | |
1860 | * empty. This _should_ be ok, as we can't | |
1861 | * reclaim objects when __GFP_FS is *not* set | |
1862 | * anyways. | |
1863 | */ | |
1864 | return 0; | |
1865 | } | |
1866 | ||
1867 | CDEBUG(D_INODE, "Shrink %d objects\n", remain); | |
1868 | ||
1869 | mutex_lock(&lu_sites_guard); | |
1870 | list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { | |
1871 | if (shrink_param(sc, nr_to_scan) != 0) { | |
1872 | remain = lu_site_purge(&lu_shrink_env, s, remain); | |
1873 | /* | |
1874 | * Move just shrunk site to the tail of site list to | |
1875 | * assure shrinking fairness. | |
1876 | */ | |
1877 | list_move_tail(&s->ls_linkage, &splice); | |
1878 | } | |
1879 | ||
1880 | memset(&stats, 0, sizeof(stats)); | |
1881 | lu_site_stats_get(s->ls_obj_hash, &stats, 0); | |
1882 | cached += stats.lss_total - stats.lss_busy; | |
1883 | if (shrink_param(sc, nr_to_scan) && remain <= 0) | |
1884 | break; | |
1885 | } | |
1886 | list_splice(&splice, lu_sites.prev); | |
1887 | mutex_unlock(&lu_sites_guard); | |
1888 | ||
1889 | cached = (cached / 100) * sysctl_vfs_cache_pressure; | |
1890 | if (shrink_param(sc, nr_to_scan) == 0) | |
1891 | CDEBUG(D_INODE, "%d objects cached\n", cached); | |
1892 | return cached; | |
1893 | } | |
1894 | ||
1895 | /* | |
1896 | * Debugging stuff. | |
1897 | */ | |
1898 | ||
1899 | /** | |
1900 | * Environment to be used in debugger, contains all tags. | |
1901 | */ | |
1902 | struct lu_env lu_debugging_env; | |
1903 | ||
1904 | /** | |
1905 | * Debugging printer function using printk(). | |
1906 | */ | |
1907 | int lu_printk_printer(const struct lu_env *env, | |
1908 | void *unused, const char *format, ...) | |
1909 | { | |
1910 | va_list args; | |
1911 | ||
1912 | va_start(args, format); | |
1913 | vprintk(format, args); | |
1914 | va_end(args); | |
1915 | return 0; | |
1916 | } | |
1917 | ||
1918 | int lu_debugging_setup(void) | |
1919 | { | |
1920 | return lu_env_init(&lu_debugging_env, ~0); | |
1921 | } | |
1922 | ||
1923 | void lu_context_keys_dump(void) | |
1924 | { | |
1925 | int i; | |
1926 | ||
1927 | for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { | |
1928 | struct lu_context_key *key; | |
1929 | ||
1930 | key = lu_keys[i]; | |
1931 | if (key != NULL) { | |
1932 | CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n", | |
1933 | i, key, key->lct_tags, | |
1934 | key->lct_init, key->lct_fini, key->lct_exit, | |
1935 | key->lct_index, atomic_read(&key->lct_used), | |
1936 | key->lct_owner ? key->lct_owner->name : "", | |
1937 | key->lct_owner); | |
1938 | lu_ref_print(&key->lct_reference); | |
1939 | } | |
1940 | } | |
1941 | } | |
1942 | EXPORT_SYMBOL(lu_context_keys_dump); | |
1943 | ||
1944 | /** | |
1945 | * Initialization of global lu_* data. | |
1946 | */ | |
1947 | int lu_global_init(void) | |
1948 | { | |
1949 | int result; | |
1950 | ||
1951 | CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys); | |
1952 | ||
1953 | result = lu_ref_global_init(); | |
1954 | if (result != 0) | |
1955 | return result; | |
1956 | ||
1957 | LU_CONTEXT_KEY_INIT(&lu_global_key); | |
1958 | result = lu_context_key_register(&lu_global_key); | |
1959 | if (result != 0) | |
1960 | return result; | |
1961 | ||
1962 | /* | |
1963 | * At this level, we don't know what tags are needed, so allocate them | |
1964 | * conservatively. This should not be too bad, because this | |
1965 | * environment is global. | |
1966 | */ | |
1967 | mutex_lock(&lu_sites_guard); | |
1968 | result = lu_env_init(&lu_shrink_env, LCT_SHRINKER); | |
1969 | mutex_unlock(&lu_sites_guard); | |
1970 | if (result != 0) | |
1971 | return result; | |
1972 | ||
1973 | /* | |
1974 | * seeks estimation: 3 seeks to read a record from oi, one to read | |
1975 | * inode, one for ea. Unfortunately setting this high value results in | |
1976 | * lu_object/inode cache consuming all the memory. | |
1977 | */ | |
1978 | lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink); | |
1979 | if (lu_site_shrinker == NULL) | |
1980 | return -ENOMEM; | |
1981 | ||
1982 | return result; | |
1983 | } | |
1984 | ||
1985 | /** | |
1986 | * Dual to lu_global_init(). | |
1987 | */ | |
1988 | void lu_global_fini(void) | |
1989 | { | |
1990 | if (lu_site_shrinker != NULL) { | |
1991 | remove_shrinker(lu_site_shrinker); | |
1992 | lu_site_shrinker = NULL; | |
1993 | } | |
1994 | ||
1995 | lu_context_key_degister(&lu_global_key); | |
1996 | ||
1997 | /* | |
1998 | * Tear shrinker environment down _after_ de-registering | |
1999 | * lu_global_key, because the latter has a value in the former. | |
2000 | */ | |
2001 | mutex_lock(&lu_sites_guard); | |
2002 | lu_env_fini(&lu_shrink_env); | |
2003 | mutex_unlock(&lu_sites_guard); | |
2004 | ||
2005 | lu_ref_global_fini(); | |
2006 | } | |
2007 | ||
2008 | static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx) | |
2009 | { | |
2010 | #ifdef LPROCFS | |
2011 | struct lprocfs_counter ret; | |
2012 | ||
2013 | lprocfs_stats_collect(stats, idx, &ret); | |
2014 | return (__u32)ret.lc_count; | |
2015 | #else | |
2016 | return 0; | |
2017 | #endif | |
2018 | } | |
2019 | ||
2020 | /** | |
2021 | * Output site statistical counters into a buffer. Suitable for | |
2022 | * lprocfs_rd_*()-style functions. | |
2023 | */ | |
2024 | int lu_site_stats_print(const struct lu_site *s, char *page, int count) | |
2025 | { | |
2026 | lu_site_stats_t stats; | |
2027 | ||
2028 | memset(&stats, 0, sizeof(stats)); | |
2029 | lu_site_stats_get(s->ls_obj_hash, &stats, 1); | |
2030 | ||
2031 | return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n", | |
2032 | stats.lss_busy, | |
2033 | stats.lss_total, | |
2034 | stats.lss_populated, | |
2035 | CFS_HASH_NHLIST(s->ls_obj_hash), | |
2036 | stats.lss_max_search, | |
2037 | ls_stats_read(s->ls_stats, LU_SS_CREATED), | |
2038 | ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT), | |
2039 | ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS), | |
2040 | ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE), | |
2041 | ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE), | |
2042 | ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED)); | |
2043 | } | |
2044 | EXPORT_SYMBOL(lu_site_stats_print); | |
2045 | ||
2046 | /** | |
2047 | * Helper function to initialize a number of kmem slab caches at once. | |
2048 | */ | |
2049 | int lu_kmem_init(struct lu_kmem_descr *caches) | |
2050 | { | |
2051 | int result; | |
2052 | struct lu_kmem_descr *iter = caches; | |
2053 | ||
2054 | for (result = 0; iter->ckd_cache != NULL; ++iter) { | |
2055 | *iter->ckd_cache = kmem_cache_create(iter->ckd_name, | |
2056 | iter->ckd_size, | |
2057 | 0, 0, NULL); | |
2058 | if (*iter->ckd_cache == NULL) { | |
2059 | result = -ENOMEM; | |
2060 | /* free all previously allocated caches */ | |
2061 | lu_kmem_fini(caches); | |
2062 | break; | |
2063 | } | |
2064 | } | |
2065 | return result; | |
2066 | } | |
2067 | EXPORT_SYMBOL(lu_kmem_init); | |
2068 | ||
2069 | /** | |
2070 | * Helper function to finalize a number of kmem slab cached at once. Dual to | |
2071 | * lu_kmem_init(). | |
2072 | */ | |
2073 | void lu_kmem_fini(struct lu_kmem_descr *caches) | |
2074 | { | |
2075 | for (; caches->ckd_cache != NULL; ++caches) { | |
2076 | if (*caches->ckd_cache != NULL) { | |
2077 | kmem_cache_destroy(*caches->ckd_cache); | |
2078 | *caches->ckd_cache = NULL; | |
2079 | } | |
2080 | } | |
2081 | } | |
2082 | EXPORT_SYMBOL(lu_kmem_fini); | |
2083 | ||
2084 | /** | |
2085 | * Temporary solution to be able to assign fid in ->do_create() | |
2086 | * till we have fully-functional OST fids | |
2087 | */ | |
2088 | void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o, | |
2089 | const struct lu_fid *fid) | |
2090 | { | |
2091 | struct lu_site *s = o->lo_dev->ld_site; | |
2092 | struct lu_fid *old = &o->lo_header->loh_fid; | |
2093 | struct lu_site_bkt_data *bkt; | |
2094 | struct lu_object *shadow; | |
2095 | wait_queue_t waiter; | |
2096 | cfs_hash_t *hs; | |
2097 | cfs_hash_bd_t bd; | |
2098 | __u64 version = 0; | |
2099 | ||
2100 | LASSERT(fid_is_zero(old)); | |
2101 | ||
2102 | hs = s->ls_obj_hash; | |
2103 | cfs_hash_bd_get_and_lock(hs, (void *)fid, &bd, 1); | |
2104 | shadow = htable_lookup(s, &bd, fid, &waiter, &version); | |
2105 | /* supposed to be unique */ | |
2106 | LASSERT(shadow == NULL); | |
2107 | *old = *fid; | |
2108 | bkt = cfs_hash_bd_extra_get(hs, &bd); | |
2109 | cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); | |
2110 | bkt->lsb_busy++; | |
2111 | cfs_hash_bd_unlock(hs, &bd, 1); | |
2112 | } | |
2113 | EXPORT_SYMBOL(lu_object_assign_fid); | |
2114 | ||
2115 | /** | |
2116 | * allocates object with 0 (non-assiged) fid | |
2117 | * XXX: temporary solution to be able to assign fid in ->do_create() | |
2118 | * till we have fully-functional OST fids | |
2119 | */ | |
2120 | struct lu_object *lu_object_anon(const struct lu_env *env, | |
2121 | struct lu_device *dev, | |
2122 | const struct lu_object_conf *conf) | |
2123 | { | |
2124 | struct lu_fid fid; | |
2125 | struct lu_object *o; | |
2126 | ||
2127 | fid_zero(&fid); | |
2128 | o = lu_object_alloc(env, dev, &fid, conf); | |
2129 | ||
2130 | return o; | |
2131 | } | |
2132 | EXPORT_SYMBOL(lu_object_anon); | |
2133 | ||
2134 | struct lu_buf LU_BUF_NULL = { | |
2135 | .lb_buf = NULL, | |
2136 | .lb_len = 0 | |
2137 | }; | |
2138 | EXPORT_SYMBOL(LU_BUF_NULL); | |
2139 | ||
2140 | void lu_buf_free(struct lu_buf *buf) | |
2141 | { | |
2142 | LASSERT(buf); | |
2143 | if (buf->lb_buf) { | |
2144 | LASSERT(buf->lb_len > 0); | |
2145 | OBD_FREE_LARGE(buf->lb_buf, buf->lb_len); | |
2146 | buf->lb_buf = NULL; | |
2147 | buf->lb_len = 0; | |
2148 | } | |
2149 | } | |
2150 | EXPORT_SYMBOL(lu_buf_free); | |
2151 | ||
2152 | void lu_buf_alloc(struct lu_buf *buf, int size) | |
2153 | { | |
2154 | LASSERT(buf); | |
2155 | LASSERT(buf->lb_buf == NULL); | |
2156 | LASSERT(buf->lb_len == 0); | |
2157 | OBD_ALLOC_LARGE(buf->lb_buf, size); | |
2158 | if (likely(buf->lb_buf)) | |
2159 | buf->lb_len = size; | |
2160 | } | |
2161 | EXPORT_SYMBOL(lu_buf_alloc); | |
2162 | ||
2163 | void lu_buf_realloc(struct lu_buf *buf, int size) | |
2164 | { | |
2165 | lu_buf_free(buf); | |
2166 | lu_buf_alloc(buf, size); | |
2167 | } | |
2168 | EXPORT_SYMBOL(lu_buf_realloc); | |
2169 | ||
2170 | struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, int len) | |
2171 | { | |
2172 | if (buf->lb_buf == NULL && buf->lb_len == 0) | |
2173 | lu_buf_alloc(buf, len); | |
2174 | ||
2175 | if ((len > buf->lb_len) && (buf->lb_buf != NULL)) | |
2176 | lu_buf_realloc(buf, len); | |
2177 | ||
2178 | return buf; | |
2179 | } | |
2180 | EXPORT_SYMBOL(lu_buf_check_and_alloc); | |
2181 | ||
2182 | /** | |
2183 | * Increase the size of the \a buf. | |
2184 | * preserves old data in buffer | |
2185 | * old buffer remains unchanged on error | |
2186 | * \retval 0 or -ENOMEM | |
2187 | */ | |
2188 | int lu_buf_check_and_grow(struct lu_buf *buf, int len) | |
2189 | { | |
2190 | char *ptr; | |
2191 | ||
2192 | if (len <= buf->lb_len) | |
2193 | return 0; | |
2194 | ||
2195 | OBD_ALLOC_LARGE(ptr, len); | |
2196 | if (ptr == NULL) | |
2197 | return -ENOMEM; | |
2198 | ||
2199 | /* Free the old buf */ | |
2200 | if (buf->lb_buf != NULL) { | |
2201 | memcpy(ptr, buf->lb_buf, buf->lb_len); | |
2202 | OBD_FREE_LARGE(buf->lb_buf, buf->lb_len); | |
2203 | } | |
2204 | ||
2205 | buf->lb_buf = ptr; | |
2206 | buf->lb_len = len; | |
2207 | return 0; | |
2208 | } | |
2209 | EXPORT_SYMBOL(lu_buf_check_and_grow); |