Staging: lustre: obdclass: Declare function as static
[deliverable/linux.git] / drivers / staging / lustre / lustre / obdclass / cl_object.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
1dc563a6 30 * Copyright (c) 2011, 2015, Intel Corporation.
d7e09d03
PT
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * Client Lustre Object.
37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 */
40
41/*
42 * Locking.
43 *
44 * i_mutex
45 * PG_locked
46 * ->coh_page_guard
47 * ->coh_lock_guard
48 * ->coh_attr_guard
49 * ->ls_guard
50 */
51
52#define DEBUG_SUBSYSTEM S_CLASS
53
9fdaf8c0 54#include "../../include/linux/libcfs/libcfs.h"
d7e09d03 55/* class_put_type() */
610f7377
GKH
56#include "../include/obd_class.h"
57#include "../include/obd_support.h"
58#include "../include/lustre_fid.h"
d7e09d03 59#include <linux/list.h>
9fdaf8c0 60#include "../../include/linux/libcfs/libcfs_hash.h" /* for cfs_hash stuff */
610f7377 61#include "../include/cl_object.h"
d7e09d03
PT
62#include "cl_internal.h"
63
64static struct kmem_cache *cl_env_kmem;
65
66/** Lock class of cl_object_header::coh_page_guard */
67static struct lock_class_key cl_page_guard_class;
68/** Lock class of cl_object_header::coh_lock_guard */
69static struct lock_class_key cl_lock_guard_class;
70/** Lock class of cl_object_header::coh_attr_guard */
71static struct lock_class_key cl_attr_guard_class;
72
73extern __u32 lu_context_tags_default;
74extern __u32 lu_session_tags_default;
75/**
76 * Initialize cl_object_header.
77 */
78int cl_object_header_init(struct cl_object_header *h)
79{
80 int result;
81
d7e09d03
PT
82 result = lu_object_header_init(&h->coh_lu);
83 if (result == 0) {
84 spin_lock_init(&h->coh_page_guard);
85 spin_lock_init(&h->coh_lock_guard);
86 spin_lock_init(&h->coh_attr_guard);
87 lockdep_set_class(&h->coh_page_guard, &cl_page_guard_class);
88 lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
89 lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
90 h->coh_pages = 0;
91 /* XXX hard coded GFP_* mask. */
92 INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
93 INIT_LIST_HEAD(&h->coh_locks);
94 h->coh_page_bufsize = ALIGN(sizeof(struct cl_page), 8);
95 }
0a3bdb00 96 return result;
d7e09d03
PT
97}
98EXPORT_SYMBOL(cl_object_header_init);
99
d7e09d03
PT
100/**
101 * Returns a cl_object with a given \a fid.
102 *
103 * Returns either cached or newly created object. Additional reference on the
104 * returned object is acquired.
105 *
106 * \see lu_object_find(), cl_page_find(), cl_lock_find()
107 */
108struct cl_object *cl_object_find(const struct lu_env *env,
109 struct cl_device *cd, const struct lu_fid *fid,
110 const struct cl_object_conf *c)
111{
112 might_sleep();
113 return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
114}
115EXPORT_SYMBOL(cl_object_find);
116
117/**
118 * Releases a reference on \a o.
119 *
120 * When last reference is released object is returned to the cache, unless
121 * lu_object_header_flags::LU_OBJECT_HEARD_BANSHEE bit is set in its header.
122 *
123 * \see cl_page_put(), cl_lock_put().
124 */
125void cl_object_put(const struct lu_env *env, struct cl_object *o)
126{
127 lu_object_put(env, &o->co_lu);
128}
129EXPORT_SYMBOL(cl_object_put);
130
131/**
132 * Acquire an additional reference to the object \a o.
133 *
134 * This can only be used to acquire _additional_ reference, i.e., caller
135 * already has to possess at least one reference to \a o before calling this.
136 *
137 * \see cl_page_get(), cl_lock_get().
138 */
139void cl_object_get(struct cl_object *o)
140{
141 lu_object_get(&o->co_lu);
142}
143EXPORT_SYMBOL(cl_object_get);
144
145/**
146 * Returns the top-object for a given \a o.
147 *
148 * \see cl_page_top(), cl_io_top()
149 */
150struct cl_object *cl_object_top(struct cl_object *o)
151{
152 struct cl_object_header *hdr = cl_object_header(o);
153 struct cl_object *top;
154
155 while (hdr->coh_parent != NULL)
156 hdr = hdr->coh_parent;
157
158 top = lu2cl(lu_object_top(&hdr->coh_lu));
159 CDEBUG(D_TRACE, "%p -> %p\n", o, top);
160 return top;
161}
162EXPORT_SYMBOL(cl_object_top);
163
164/**
165 * Returns pointer to the lock protecting data-attributes for the given object
166 * \a o.
167 *
168 * Data-attributes are protected by the cl_object_header::coh_attr_guard
169 * spin-lock in the top-object.
170 *
171 * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
172 */
173static spinlock_t *cl_object_attr_guard(struct cl_object *o)
174{
175 return &cl_object_header(cl_object_top(o))->coh_attr_guard;
176}
177
178/**
179 * Locks data-attributes.
180 *
181 * Prevents data-attributes from changing, until lock is released by
182 * cl_object_attr_unlock(). This has to be called before calls to
183 * cl_object_attr_get(), cl_object_attr_set().
184 */
185void cl_object_attr_lock(struct cl_object *o)
f8bd34d2 186 __acquires(cl_object_attr_guard(o))
d7e09d03
PT
187{
188 spin_lock(cl_object_attr_guard(o));
189}
190EXPORT_SYMBOL(cl_object_attr_lock);
191
192/**
193 * Releases data-attributes lock, acquired by cl_object_attr_lock().
194 */
195void cl_object_attr_unlock(struct cl_object *o)
f8bd34d2 196 __releases(cl_object_attr_guard(o))
d7e09d03
PT
197{
198 spin_unlock(cl_object_attr_guard(o));
199}
200EXPORT_SYMBOL(cl_object_attr_unlock);
201
202/**
203 * Returns data-attributes of an object \a obj.
204 *
205 * Every layer is asked (by calling cl_object_operations::coo_attr_get())
206 * top-to-bottom to fill in parts of \a attr that this layer is responsible
207 * for.
208 */
209int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
210 struct cl_attr *attr)
211{
212 struct lu_object_header *top;
213 int result;
214
5e42bc9d 215 assert_spin_locked(cl_object_attr_guard(obj));
d7e09d03
PT
216
217 top = obj->co_lu.lo_header;
218 result = 0;
219 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
220 if (obj->co_ops->coo_attr_get != NULL) {
221 result = obj->co_ops->coo_attr_get(env, obj, attr);
222 if (result != 0) {
223 if (result > 0)
224 result = 0;
225 break;
226 }
227 }
228 }
0a3bdb00 229 return result;
d7e09d03
PT
230}
231EXPORT_SYMBOL(cl_object_attr_get);
232
233/**
234 * Updates data-attributes of an object \a obj.
235 *
236 * Only attributes, mentioned in a validness bit-mask \a v are
237 * updated. Calls cl_object_operations::coo_attr_set() on every layer, bottom
238 * to top.
239 */
240int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
241 const struct cl_attr *attr, unsigned v)
242{
243 struct lu_object_header *top;
244 int result;
245
5e42bc9d 246 assert_spin_locked(cl_object_attr_guard(obj));
d7e09d03
PT
247
248 top = obj->co_lu.lo_header;
249 result = 0;
250 list_for_each_entry_reverse(obj, &top->loh_layers,
251 co_lu.lo_linkage) {
252 if (obj->co_ops->coo_attr_set != NULL) {
253 result = obj->co_ops->coo_attr_set(env, obj, attr, v);
254 if (result != 0) {
255 if (result > 0)
256 result = 0;
257 break;
258 }
259 }
260 }
0a3bdb00 261 return result;
d7e09d03
PT
262}
263EXPORT_SYMBOL(cl_object_attr_set);
264
265/**
266 * Notifies layers (bottom-to-top) that glimpse AST was received.
267 *
268 * Layers have to fill \a lvb fields with information that will be shipped
269 * back to glimpse issuer.
270 *
271 * \see cl_lock_operations::clo_glimpse()
272 */
273int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
274 struct ost_lvb *lvb)
275{
276 struct lu_object_header *top;
277 int result;
278
d7e09d03
PT
279 top = obj->co_lu.lo_header;
280 result = 0;
281 list_for_each_entry_reverse(obj, &top->loh_layers,
282 co_lu.lo_linkage) {
283 if (obj->co_ops->coo_glimpse != NULL) {
284 result = obj->co_ops->coo_glimpse(env, obj, lvb);
285 if (result != 0)
286 break;
287 }
288 }
289 LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top),
b0f5aad5 290 "size: %llu mtime: %llu atime: %llu ctime: %llu blocks: %llu\n",
d7e09d03
PT
291 lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
292 lvb->lvb_ctime, lvb->lvb_blocks);
0a3bdb00 293 return result;
d7e09d03
PT
294}
295EXPORT_SYMBOL(cl_object_glimpse);
296
297/**
298 * Updates a configuration of an object \a obj.
299 */
300int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
301 const struct cl_object_conf *conf)
302{
303 struct lu_object_header *top;
304 int result;
305
d7e09d03
PT
306 top = obj->co_lu.lo_header;
307 result = 0;
308 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
309 if (obj->co_ops->coo_conf_set != NULL) {
310 result = obj->co_ops->coo_conf_set(env, obj, conf);
311 if (result != 0)
312 break;
313 }
314 }
0a3bdb00 315 return result;
d7e09d03
PT
316}
317EXPORT_SYMBOL(cl_conf_set);
318
319/**
320 * Helper function removing all object locks, and marking object for
321 * deletion. All object pages must have been deleted at this point.
322 *
323 * This is called by cl_inode_fini() and lov_object_delete() to destroy top-
324 * and sub- objects respectively.
325 */
326void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
327{
328 struct cl_object_header *hdr;
329
330 hdr = cl_object_header(obj);
331 LASSERT(hdr->coh_tree.rnode == NULL);
332 LASSERT(hdr->coh_pages == 0);
333
334 set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
335 /*
336 * Destroy all locks. Object destruction (including cl_inode_fini())
337 * cannot cancel the locks, because in the case of a local client,
338 * where client and server share the same thread running
339 * prune_icache(), this can dead-lock with ldlm_cancel_handler()
340 * waiting on __wait_on_freeing_inode().
341 */
342 cl_locks_prune(env, obj, 0);
343}
344EXPORT_SYMBOL(cl_object_kill);
345
346/**
347 * Prunes caches of pages and locks for this object.
348 */
349void cl_object_prune(const struct lu_env *env, struct cl_object *obj)
350{
d7e09d03
PT
351 cl_pages_prune(env, obj);
352 cl_locks_prune(env, obj, 1);
d7e09d03
PT
353}
354EXPORT_SYMBOL(cl_object_prune);
355
d7e09d03
PT
356void cache_stats_init(struct cache_stats *cs, const char *name)
357{
358 int i;
359
360 cs->cs_name = name;
361 for (i = 0; i < CS_NR; i++)
362 atomic_set(&cs->cs_stats[i], 0);
363}
364
63986883
BG
365static int cache_stats_print(const struct cache_stats *cs,
366 struct seq_file *m, int h)
d7e09d03 367{
d7e09d03
PT
368 int i;
369 /*
370 * lookup hit total cached create
371 * env: ...... ...... ...... ...... ......
372 */
373 if (h) {
374 const char *names[CS_NR] = CS_NAMES;
375
73bb1da6 376 seq_printf(m, "%6s", " ");
d7e09d03 377 for (i = 0; i < CS_NR; i++)
73bb1da6
PT
378 seq_printf(m, "%8s", names[i]);
379 seq_printf(m, "\n");
d7e09d03
PT
380 }
381
73bb1da6 382 seq_printf(m, "%5.5s:", cs->cs_name);
d7e09d03 383 for (i = 0; i < CS_NR; i++)
73bb1da6
PT
384 seq_printf(m, "%8u", atomic_read(&cs->cs_stats[i]));
385 return 0;
d7e09d03
PT
386}
387
388/**
389 * Initialize client site.
390 *
391 * Perform common initialization (lu_site_init()), and initialize statistical
392 * counters. Also perform global initializations on the first call.
393 */
394int cl_site_init(struct cl_site *s, struct cl_device *d)
395{
396 int i;
397 int result;
398
399 result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
400 if (result == 0) {
401 cache_stats_init(&s->cs_pages, "pages");
402 cache_stats_init(&s->cs_locks, "locks");
403 for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
404 atomic_set(&s->cs_pages_state[0], 0);
405 for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i)
406 atomic_set(&s->cs_locks_state[i], 0);
407 }
408 return result;
409}
410EXPORT_SYMBOL(cl_site_init);
411
412/**
413 * Finalize client site. Dual to cl_site_init().
414 */
415void cl_site_fini(struct cl_site *s)
416{
417 lu_site_fini(&s->cs_lu);
418}
419EXPORT_SYMBOL(cl_site_fini);
420
421static struct cache_stats cl_env_stats = {
422 .cs_name = "envs",
423 .cs_stats = { ATOMIC_INIT(0), }
424};
425
426/**
427 * Outputs client site statistical counters into a buffer. Suitable for
428 * ll_rd_*()-style functions.
429 */
73bb1da6 430int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
d7e09d03 431{
d7e09d03
PT
432 int i;
433 static const char *pstate[] = {
434 [CPS_CACHED] = "c",
435 [CPS_OWNED] = "o",
436 [CPS_PAGEOUT] = "w",
437 [CPS_PAGEIN] = "r",
438 [CPS_FREEING] = "f"
439 };
440 static const char *lstate[] = {
441 [CLS_NEW] = "n",
442 [CLS_QUEUING] = "q",
443 [CLS_ENQUEUED] = "e",
444 [CLS_HELD] = "h",
445 [CLS_INTRANSIT] = "t",
446 [CLS_CACHED] = "c",
447 [CLS_FREEING] = "f"
448 };
449/*
450 lookup hit total busy create
451pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
452locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
453 env: ...... ...... ...... ...... ......
454 */
73bb1da6
PT
455 lu_site_stats_print(&site->cs_lu, m);
456 cache_stats_print(&site->cs_pages, m, 1);
457 seq_printf(m, " [");
d7e09d03 458 for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
73bb1da6 459 seq_printf(m, "%s: %u ", pstate[i],
d7e09d03 460 atomic_read(&site->cs_pages_state[i]));
73bb1da6
PT
461 seq_printf(m, "]\n");
462 cache_stats_print(&site->cs_locks, m, 0);
463 seq_printf(m, " [");
d7e09d03 464 for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
73bb1da6 465 seq_printf(m, "%s: %u ", lstate[i],
d7e09d03 466 atomic_read(&site->cs_locks_state[i]));
73bb1da6
PT
467 seq_printf(m, "]\n");
468 cache_stats_print(&cl_env_stats, m, 0);
469 seq_printf(m, "\n");
470 return 0;
d7e09d03
PT
471}
472EXPORT_SYMBOL(cl_site_stats_print);
473
474/*****************************************************************************
475 *
476 * lu_env handling on client.
477 *
478 */
479
480/**
481 * The most efficient way is to store cl_env pointer in task specific
482 * structures. On Linux, it wont' be easy to use task_struct->journal_info
483 * because Lustre code may call into other fs which has certain assumptions
484 * about journal_info. Currently following fields in task_struct are identified
485 * can be used for this purpose:
486 * - cl_env: for liblustre.
52cf6acf 487 * - tux_info: only on RedHat kernel.
d7e09d03
PT
488 * - ...
489 * \note As long as we use task_struct to store cl_env, we assume that once
490 * called into Lustre, we'll never call into the other part of the kernel
491 * which will use those fields in task_struct without explicitly exiting
492 * Lustre.
493 *
494 * If there's no space in task_struct is available, hash will be used.
495 * bz20044, bz22683.
496 */
497
498struct cl_env {
499 void *ce_magic;
500 struct lu_env ce_lu;
501 struct lu_context ce_ses;
502
503 /**
504 * This allows cl_env to be entered into cl_env_hash which implements
505 * the current thread -> client environment lookup.
506 */
507 struct hlist_node ce_node;
508 /**
509 * Owner for the current cl_env.
510 *
511 * If LL_TASK_CL_ENV is defined, this point to the owning current,
512 * only for debugging purpose ;
513 * Otherwise hash is used, and this is the key for cfs_hash.
514 * Now current thread pid is stored. Note using thread pointer would
515 * lead to unbalanced hash because of its specific allocation locality
516 * and could be varied for different platforms and OSes, even different
517 * OS versions.
518 */
519 void *ce_owner;
520
521 /*
522 * Linkage into global list of all client environments. Used for
523 * garbage collection.
524 */
525 struct list_head ce_linkage;
526 /*
527 *
528 */
529 int ce_ref;
530 /*
531 * Debugging field: address of the caller who made original
532 * allocation.
533 */
534 void *ce_debug;
535};
536
537#define CL_ENV_INC(counter)
538#define CL_ENV_DEC(counter)
539
540static void cl_env_init0(struct cl_env *cle, void *debug)
541{
542 LASSERT(cle->ce_ref == 0);
543 LASSERT(cle->ce_magic == &cl_env_init0);
544 LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL);
545
546 cle->ce_ref = 1;
547 cle->ce_debug = debug;
548 CL_ENV_INC(busy);
549}
550
d7e09d03
PT
551/*
552 * The implementation of using hash table to connect cl_env and thread
553 */
554
6da6eabe 555static struct cfs_hash *cl_env_hash;
d7e09d03 556
6da6eabe 557static unsigned cl_env_hops_hash(struct cfs_hash *lh,
d7e09d03
PT
558 const void *key, unsigned mask)
559{
560#if BITS_PER_LONG == 64
561 return cfs_hash_u64_hash((__u64)key, mask);
562#else
563 return cfs_hash_u32_hash((__u32)key, mask);
564#endif
565}
566
567static void *cl_env_hops_obj(struct hlist_node *hn)
568{
569 struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
50ffcb7e 570
d7e09d03
PT
571 LASSERT(cle->ce_magic == &cl_env_init0);
572 return (void *)cle;
573}
574
575static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn)
576{
577 struct cl_env *cle = cl_env_hops_obj(hn);
578
579 LASSERT(cle->ce_owner != NULL);
580 return (key == cle->ce_owner);
581}
582
6da6eabe 583static void cl_env_hops_noop(struct cfs_hash *hs, struct hlist_node *hn)
d7e09d03
PT
584{
585 struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
50ffcb7e 586
d7e09d03
PT
587 LASSERT(cle->ce_magic == &cl_env_init0);
588}
589
db9fc06b 590static struct cfs_hash_ops cl_env_hops = {
d7e09d03 591 .hs_hash = cl_env_hops_hash,
db9fc06b 592 .hs_key = cl_env_hops_obj,
d7e09d03
PT
593 .hs_keycmp = cl_env_hops_keycmp,
594 .hs_object = cl_env_hops_obj,
db9fc06b 595 .hs_get = cl_env_hops_noop,
d7e09d03
PT
596 .hs_put_locked = cl_env_hops_noop,
597};
598
599static inline struct cl_env *cl_env_fetch(void)
600{
601 struct cl_env *cle;
602
603 cle = cfs_hash_lookup(cl_env_hash, (void *) (long) current->pid);
604 LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
605 return cle;
606}
607
608static inline void cl_env_attach(struct cl_env *cle)
609{
610 if (cle) {
611 int rc;
612
613 LASSERT(cle->ce_owner == NULL);
614 cle->ce_owner = (void *) (long) current->pid;
615 rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
616 &cle->ce_node);
617 LASSERT(rc == 0);
618 }
619}
620
621static inline void cl_env_do_detach(struct cl_env *cle)
622{
623 void *cookie;
624
625 LASSERT(cle->ce_owner == (void *) (long) current->pid);
626 cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
627 &cle->ce_node);
628 LASSERT(cookie == cle);
629 cle->ce_owner = NULL;
630}
631
60753e90
MR
632static int cl_env_store_init(void)
633{
d7e09d03
PT
634 cl_env_hash = cfs_hash_create("cl_env",
635 HASH_CL_ENV_BITS, HASH_CL_ENV_BITS,
636 HASH_CL_ENV_BKT_BITS, 0,
637 CFS_HASH_MIN_THETA,
638 CFS_HASH_MAX_THETA,
639 &cl_env_hops,
640 CFS_HASH_RW_BKTLOCK);
b2952d62 641 return cl_env_hash != NULL ? 0 : -ENOMEM;
d7e09d03
PT
642}
643
4d4e1eef
DA
644static void cl_env_store_fini(void)
645{
d7e09d03
PT
646 cfs_hash_putref(cl_env_hash);
647}
648
d7e09d03
PT
649static inline struct cl_env *cl_env_detach(struct cl_env *cle)
650{
651 if (cle == NULL)
652 cle = cl_env_fetch();
653
654 if (cle && cle->ce_owner)
655 cl_env_do_detach(cle);
656
657 return cle;
658}
659
660static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
661{
662 struct lu_env *env;
663 struct cl_env *cle;
664
ccaabce1 665 cle = kmem_cache_alloc(cl_env_kmem, GFP_NOFS | __GFP_ZERO);
d7e09d03
PT
666 if (cle != NULL) {
667 int rc;
668
669 INIT_LIST_HEAD(&cle->ce_linkage);
670 cle->ce_magic = &cl_env_init0;
671 env = &cle->ce_lu;
9c5d9fa0 672 rc = lu_env_init(env, ctx_tags | LCT_CL_THREAD);
d7e09d03
PT
673 if (rc == 0) {
674 rc = lu_context_init(&cle->ce_ses,
9c5d9fa0 675 ses_tags | LCT_SESSION);
d7e09d03
PT
676 if (rc == 0) {
677 lu_context_enter(&cle->ce_ses);
678 env->le_ses = &cle->ce_ses;
679 cl_env_init0(cle, debug);
680 } else
681 lu_env_fini(env);
682 }
683 if (rc != 0) {
50d30362 684 kmem_cache_free(cl_env_kmem, cle);
d7e09d03
PT
685 env = ERR_PTR(rc);
686 } else {
687 CL_ENV_INC(create);
688 CL_ENV_INC(total);
689 }
690 } else
691 env = ERR_PTR(-ENOMEM);
692 return env;
693}
694
695static void cl_env_fini(struct cl_env *cle)
696{
697 CL_ENV_DEC(total);
698 lu_context_fini(&cle->ce_lu.le_ctx);
699 lu_context_fini(&cle->ce_ses);
50d30362 700 kmem_cache_free(cl_env_kmem, cle);
d7e09d03
PT
701}
702
703static inline struct cl_env *cl_env_container(struct lu_env *env)
704{
705 return container_of(env, struct cl_env, ce_lu);
706}
707
9569ea54 708static struct lu_env *cl_env_peek(int *refcheck)
d7e09d03
PT
709{
710 struct lu_env *env;
711 struct cl_env *cle;
712
713 CL_ENV_INC(lookup);
714
715 /* check that we don't go far from untrusted pointer */
716 CLASSERT(offsetof(struct cl_env, ce_magic) == 0);
717
718 env = NULL;
719 cle = cl_env_fetch();
720 if (cle != NULL) {
721 CL_ENV_INC(hit);
722 env = &cle->ce_lu;
723 *refcheck = ++cle->ce_ref;
724 }
725 CDEBUG(D_OTHER, "%d@%p\n", cle ? cle->ce_ref : 0, cle);
726 return env;
727}
d7e09d03
PT
728
729/**
730 * Returns lu_env: if there already is an environment associated with the
731 * current thread, it is returned, otherwise, new environment is allocated.
732 *
733 * \param refcheck pointer to a counter used to detect environment leaks. In
734 * the usual case cl_env_get() and cl_env_put() are called in the same lexical
735 * scope and pointer to the same integer is passed as \a refcheck. This is
736 * used to detect missed cl_env_put().
737 *
738 * \see cl_env_put()
739 */
740struct lu_env *cl_env_get(int *refcheck)
741{
742 struct lu_env *env;
743
744 env = cl_env_peek(refcheck);
745 if (env == NULL) {
746 env = cl_env_new(lu_context_tags_default,
747 lu_session_tags_default,
748 __builtin_return_address(0));
749
750 if (!IS_ERR(env)) {
751 struct cl_env *cle;
752
753 cle = cl_env_container(env);
754 cl_env_attach(cle);
755 *refcheck = cle->ce_ref;
756 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
757 }
758 }
759 return env;
760}
761EXPORT_SYMBOL(cl_env_get);
762
763/**
764 * Forces an allocation of a fresh environment with given tags.
765 *
766 * \see cl_env_get()
767 */
768struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
769{
770 struct lu_env *env;
771
772 LASSERT(cl_env_peek(refcheck) == NULL);
773 env = cl_env_new(tags, tags, __builtin_return_address(0));
774 if (!IS_ERR(env)) {
775 struct cl_env *cle;
776
777 cle = cl_env_container(env);
778 *refcheck = cle->ce_ref;
779 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
780 }
781 return env;
782}
783EXPORT_SYMBOL(cl_env_alloc);
784
785static void cl_env_exit(struct cl_env *cle)
786{
787 LASSERT(cle->ce_owner == NULL);
788 lu_context_exit(&cle->ce_lu.le_ctx);
789 lu_context_exit(&cle->ce_ses);
790}
791
792/**
793 * Release an environment.
794 *
795 * Decrement \a env reference counter. When counter drops to 0, nothing in
796 * this thread is using environment and it is returned to the allocation
797 * cache, or freed straight away, if cache is large enough.
798 */
799void cl_env_put(struct lu_env *env, int *refcheck)
800{
801 struct cl_env *cle;
802
803 cle = cl_env_container(env);
804
805 LASSERT(cle->ce_ref > 0);
806 LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck));
807
808 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
809 if (--cle->ce_ref == 0) {
810 CL_ENV_DEC(busy);
811 cl_env_detach(cle);
812 cle->ce_debug = NULL;
813 cl_env_exit(cle);
814 cl_env_fini(cle);
815 }
816}
817EXPORT_SYMBOL(cl_env_put);
818
819/**
820 * Declares a point of re-entrancy.
821 *
822 * \see cl_env_reexit()
823 */
824void *cl_env_reenter(void)
825{
826 return cl_env_detach(NULL);
827}
828EXPORT_SYMBOL(cl_env_reenter);
829
830/**
831 * Exits re-entrancy.
832 */
833void cl_env_reexit(void *cookie)
834{
835 cl_env_detach(NULL);
836 cl_env_attach(cookie);
837}
838EXPORT_SYMBOL(cl_env_reexit);
839
840/**
841 * Setup user-supplied \a env as a current environment. This is to be used to
842 * guaranteed that environment exists even when cl_env_get() fails. It is up
843 * to user to ensure proper concurrency control.
844 *
845 * \see cl_env_unplant()
846 */
847void cl_env_implant(struct lu_env *env, int *refcheck)
848{
849 struct cl_env *cle = cl_env_container(env);
850
851 LASSERT(cle->ce_ref > 0);
852
853 cl_env_attach(cle);
854 cl_env_get(refcheck);
855 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
856}
857EXPORT_SYMBOL(cl_env_implant);
858
859/**
860 * Detach environment installed earlier by cl_env_implant().
861 */
862void cl_env_unplant(struct lu_env *env, int *refcheck)
863{
864 struct cl_env *cle = cl_env_container(env);
865
866 LASSERT(cle->ce_ref > 1);
867
868 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
869
870 cl_env_detach(cle);
871 cl_env_put(env, refcheck);
872}
873EXPORT_SYMBOL(cl_env_unplant);
874
875struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)
876{
877 struct lu_env *env;
878
879 nest->cen_cookie = NULL;
880 env = cl_env_peek(&nest->cen_refcheck);
881 if (env != NULL) {
882 if (!cl_io_is_going(env))
883 return env;
71e8dd9a
AM
884 cl_env_put(env, &nest->cen_refcheck);
885 nest->cen_cookie = cl_env_reenter();
d7e09d03
PT
886 }
887 env = cl_env_get(&nest->cen_refcheck);
888 if (IS_ERR(env)) {
889 cl_env_reexit(nest->cen_cookie);
890 return env;
891 }
892
893 LASSERT(!cl_io_is_going(env));
894 return env;
895}
896EXPORT_SYMBOL(cl_env_nested_get);
897
898void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env)
899{
900 cl_env_put(env, &nest->cen_refcheck);
901 cl_env_reexit(nest->cen_cookie);
902}
903EXPORT_SYMBOL(cl_env_nested_put);
904
d7e09d03
PT
905/**
906 * Converts struct ost_lvb to struct cl_attr.
907 *
908 * \see cl_attr2lvb
909 */
910void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
911{
d7e09d03
PT
912 attr->cat_size = lvb->lvb_size;
913 attr->cat_mtime = lvb->lvb_mtime;
914 attr->cat_atime = lvb->lvb_atime;
915 attr->cat_ctime = lvb->lvb_ctime;
916 attr->cat_blocks = lvb->lvb_blocks;
d7e09d03
PT
917}
918EXPORT_SYMBOL(cl_lvb2attr);
919
920/*****************************************************************************
921 *
922 * Temporary prototype thing: mirror obd-devices into cl devices.
923 *
924 */
925
926struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
927 struct lu_device_type *ldt,
928 struct lu_device *next)
929{
930 const char *typename;
931 struct lu_device *d;
932
933 LASSERT(ldt != NULL);
934
935 typename = ldt->ldt_name;
936 d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL);
937 if (!IS_ERR(d)) {
938 int rc;
939
940 if (site != NULL)
941 d->ld_site = site;
942 rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next);
943 if (rc == 0) {
944 lu_device_get(d);
945 lu_ref_add(&d->ld_reference,
946 "lu-stack", &lu_site_init);
947 } else {
948 ldt->ldt_ops->ldto_device_free(env, d);
949 CERROR("can't init device '%s', %d\n", typename, rc);
950 d = ERR_PTR(rc);
951 }
952 } else
953 CERROR("Cannot allocate device: '%s'\n", typename);
954 return lu2cl_dev(d);
955}
956EXPORT_SYMBOL(cl_type_setup);
957
958/**
959 * Finalize device stack by calling lu_stack_fini().
960 */
961void cl_stack_fini(const struct lu_env *env, struct cl_device *cl)
962{
963 lu_stack_fini(env, cl2lu_dev(cl));
964}
965EXPORT_SYMBOL(cl_stack_fini);
966
967int cl_lock_init(void);
968void cl_lock_fini(void);
969
970int cl_page_init(void);
971void cl_page_fini(void);
972
973static struct lu_context_key cl_key;
974
975struct cl_thread_info *cl_env_info(const struct lu_env *env)
976{
977 return lu_context_key_get(&env->le_ctx, &cl_key);
978}
979
980/* defines cl0_key_{init,fini}() */
981LU_KEY_INIT_FINI(cl0, struct cl_thread_info);
982
983static void *cl_key_init(const struct lu_context *ctx,
984 struct lu_context_key *key)
985{
986 struct cl_thread_info *info;
987
988 info = cl0_key_init(ctx, key);
989 if (!IS_ERR(info)) {
990 int i;
991
992 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
993 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
994 }
995 return info;
996}
997
998static void cl_key_fini(const struct lu_context *ctx,
999 struct lu_context_key *key, void *data)
1000{
1001 struct cl_thread_info *info;
1002 int i;
1003
1004 info = data;
1005 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
1006 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1007 cl0_key_fini(ctx, key, data);
1008}
1009
1010static void cl_key_exit(const struct lu_context *ctx,
1011 struct lu_context_key *key, void *data)
1012{
1013 struct cl_thread_info *info = data;
1014 int i;
1015
1016 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
1017 LASSERT(info->clt_counters[i].ctc_nr_held == 0);
1018 LASSERT(info->clt_counters[i].ctc_nr_used == 0);
1019 LASSERT(info->clt_counters[i].ctc_nr_locks_acquired == 0);
1020 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
1021 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1022 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
1023 }
1024}
1025
1026static struct lu_context_key cl_key = {
1027 .lct_tags = LCT_CL_THREAD,
1028 .lct_init = cl_key_init,
1029 .lct_fini = cl_key_fini,
1030 .lct_exit = cl_key_exit
1031};
1032
1033static struct lu_kmem_descr cl_object_caches[] = {
1034 {
1035 .ckd_cache = &cl_env_kmem,
1036 .ckd_name = "cl_env_kmem",
60aff4ff 1037 .ckd_size = sizeof(struct cl_env)
d7e09d03
PT
1038 },
1039 {
1040 .ckd_cache = NULL
1041 }
1042};
1043
1044/**
1045 * Global initialization of cl-data. Create kmem caches, register
1046 * lu_context_key's, etc.
1047 *
1048 * \see cl_global_fini()
1049 */
1050int cl_global_init(void)
1051{
1052 int result;
1053
1054 result = cl_env_store_init();
1055 if (result)
1056 return result;
1057
1058 result = lu_kmem_init(cl_object_caches);
1059 if (result)
1060 goto out_store;
1061
1062 LU_CONTEXT_KEY_INIT(&cl_key);
1063 result = lu_context_key_register(&cl_key);
1064 if (result)
1065 goto out_kmem;
1066
1067 result = cl_lock_init();
1068 if (result)
1069 goto out_context;
1070
1071 result = cl_page_init();
1072 if (result)
1073 goto out_lock;
1074
1075 return 0;
1076out_lock:
1077 cl_lock_fini();
1078out_context:
1079 lu_context_key_degister(&cl_key);
1080out_kmem:
1081 lu_kmem_fini(cl_object_caches);
1082out_store:
1083 cl_env_store_fini();
1084 return result;
1085}
1086
1087/**
1088 * Finalization of global cl-data. Dual to cl_global_init().
1089 */
1090void cl_global_fini(void)
1091{
1092 cl_lock_fini();
1093 cl_page_fini();
1094 lu_context_key_degister(&cl_key);
1095 lu_kmem_fini(cl_object_caches);
1096 cl_env_store_fini();
1097}
This page took 0.438235 seconds and 5 git commands to generate.