ocfs2: Make the ocfs2_caching_info structure self-contained.
[deliverable/linux.git] / fs / ocfs2 / uptodate.c
CommitLineData
ccd979bd
MF
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * uptodate.c
5 *
6 * Tracking the up-to-date-ness of a local buffer_head with respect to
7 * the cluster.
8 *
9 * Copyright (C) 2002, 2004, 2005 Oracle. All rights reserved.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public
22 * License along with this program; if not, write to the
23 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
24 * Boston, MA 021110-1307, USA.
25 *
26 * Standard buffer head caching flags (uptodate, etc) are insufficient
27 * in a clustered environment - a buffer may be marked up to date on
28 * our local node but could have been modified by another cluster
29 * member. As a result an additional (and performant) caching scheme
30 * is required. A further requirement is that we consume as little
31 * memory as possible - we never pin buffer_head structures in order
32 * to cache them.
33 *
34 * We track the existence of up to date buffers on the inodes which
35 * are associated with them. Because we don't want to pin
36 * buffer_heads, this is only a (strong) hint and several other checks
37 * are made in the I/O path to ensure that we don't use a stale or
38 * invalid buffer without going to disk:
39 * - buffer_jbd is used liberally - if a bh is in the journal on
40 * this node then it *must* be up to date.
41 * - the standard buffer_uptodate() macro is used to detect buffers
42 * which may be invalid (even if we have an up to date tracking
43 * item for them)
44 *
45 * For a full understanding of how this code works together, one
46 * should read the callers in dlmglue.c, the I/O functions in
47 * buffer_head_io.c and ocfs2_journal_access in journal.c
48 */
49
50#include <linux/fs.h>
51#include <linux/types.h>
52#include <linux/slab.h>
53#include <linux/highmem.h>
54#include <linux/buffer_head.h>
55#include <linux/rbtree.h>
2b4e30fb
JB
56#ifndef CONFIG_OCFS2_COMPAT_JBD
57# include <linux/jbd2.h>
58#else
59# include <linux/jbd.h>
60#endif
ccd979bd
MF
61
62#define MLOG_MASK_PREFIX ML_UPTODATE
63
64#include <cluster/masklog.h>
65
66#include "ocfs2.h"
67
68#include "inode.h"
69#include "uptodate.h"
70
71struct ocfs2_meta_cache_item {
72 struct rb_node c_node;
73 sector_t c_block;
74};
75
e18b890b 76static struct kmem_cache *ocfs2_uptodate_cachep = NULL;
ccd979bd 77
47460d65
JB
78void ocfs2_metadata_cache_init(struct ocfs2_caching_info *ci,
79 spinlock_t *cache_lock,
80 struct mutex *io_mutex)
ccd979bd 81{
47460d65
JB
82 ci->ci_lock = cache_lock;
83 ci->ci_io_mutex = io_mutex;
84 ci->ci_flags |= OCFS2_CACHE_FL_INLINE;
ccd979bd
MF
85 ci->ci_num_cached = 0;
86}
87
88/* No lock taken here as 'root' is not expected to be visible to other
89 * processes. */
90static unsigned int ocfs2_purge_copied_metadata_tree(struct rb_root *root)
91{
92 unsigned int purged = 0;
93 struct rb_node *node;
94 struct ocfs2_meta_cache_item *item;
95
96 while ((node = rb_last(root)) != NULL) {
97 item = rb_entry(node, struct ocfs2_meta_cache_item, c_node);
98
99 mlog(0, "Purge item %llu\n",
100 (unsigned long long) item->c_block);
101
102 rb_erase(&item->c_node, root);
103 kmem_cache_free(ocfs2_uptodate_cachep, item);
104
105 purged++;
106 }
107 return purged;
108}
109
110/* Called from locking and called from ocfs2_clear_inode. Dump the
111 * cache for a given inode.
112 *
113 * This function is a few more lines longer than necessary due to some
114 * accounting done here, but I think it's worth tracking down those
115 * bugs sooner -- Mark */
116void ocfs2_metadata_cache_purge(struct inode *inode)
117{
118 struct ocfs2_inode_info *oi = OCFS2_I(inode);
119 unsigned int tree, to_purge, purged;
120 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
121 struct rb_root root = RB_ROOT;
122
47460d65
JB
123 spin_lock(ci->ci_lock);
124 tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE);
ccd979bd
MF
125 to_purge = ci->ci_num_cached;
126
b0697053
MF
127 mlog(0, "Purge %u %s items from Inode %llu\n", to_purge,
128 tree ? "array" : "tree", (unsigned long long)oi->ip_blkno);
ccd979bd
MF
129
130 /* If we're a tree, save off the root so that we can safely
131 * initialize the cache. We do the work to free tree members
132 * without the spinlock. */
133 if (tree)
134 root = ci->ci_cache.ci_tree;
135
47460d65
JB
136 ocfs2_metadata_cache_init(ci, ci->ci_lock, ci->ci_io_mutex);
137 spin_unlock(ci->ci_lock);
ccd979bd
MF
138
139 purged = ocfs2_purge_copied_metadata_tree(&root);
140 /* If possible, track the number wiped so that we can more
141 * easily detect counting errors. Unfortunately, this is only
142 * meaningful for trees. */
143 if (tree && purged != to_purge)
b0697053
MF
144 mlog(ML_ERROR, "Inode %llu, count = %u, purged = %u\n",
145 (unsigned long long)oi->ip_blkno, to_purge, purged);
ccd979bd
MF
146}
147
148/* Returns the index in the cache array, -1 if not found.
149 * Requires ip_lock. */
150static int ocfs2_search_cache_array(struct ocfs2_caching_info *ci,
151 sector_t item)
152{
153 int i;
154
155 for (i = 0; i < ci->ci_num_cached; i++) {
156 if (item == ci->ci_cache.ci_array[i])
157 return i;
158 }
159
160 return -1;
161}
162
163/* Returns the cache item if found, otherwise NULL.
164 * Requires ip_lock. */
165static struct ocfs2_meta_cache_item *
166ocfs2_search_cache_tree(struct ocfs2_caching_info *ci,
167 sector_t block)
168{
169 struct rb_node * n = ci->ci_cache.ci_tree.rb_node;
170 struct ocfs2_meta_cache_item *item = NULL;
171
172 while (n) {
173 item = rb_entry(n, struct ocfs2_meta_cache_item, c_node);
174
175 if (block < item->c_block)
176 n = n->rb_left;
177 else if (block > item->c_block)
178 n = n->rb_right;
179 else
180 return item;
181 }
182
183 return NULL;
184}
185
186static int ocfs2_buffer_cached(struct ocfs2_inode_info *oi,
187 struct buffer_head *bh)
188{
189 int index = -1;
190 struct ocfs2_meta_cache_item *item = NULL;
47460d65 191 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
ccd979bd 192
47460d65 193 spin_lock(ci->ci_lock);
ccd979bd 194
b0697053
MF
195 mlog(0, "Inode %llu, query block %llu (inline = %u)\n",
196 (unsigned long long)oi->ip_blkno,
197 (unsigned long long) bh->b_blocknr,
47460d65 198 !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE));
ccd979bd 199
47460d65 200 if (ci->ci_flags & OCFS2_CACHE_FL_INLINE)
ccd979bd
MF
201 index = ocfs2_search_cache_array(&oi->ip_metadata_cache,
202 bh->b_blocknr);
203 else
204 item = ocfs2_search_cache_tree(&oi->ip_metadata_cache,
205 bh->b_blocknr);
206
47460d65 207 spin_unlock(ci->ci_lock);
ccd979bd
MF
208
209 mlog(0, "index = %d, item = %p\n", index, item);
210
211 return (index != -1) || (item != NULL);
212}
213
214/* Warning: even if it returns true, this does *not* guarantee that
aa958874
MF
215 * the block is stored in our inode metadata cache.
216 *
217 * This can be called under lock_buffer()
218 */
ccd979bd
MF
219int ocfs2_buffer_uptodate(struct inode *inode,
220 struct buffer_head *bh)
221{
222 /* Doesn't matter if the bh is in our cache or not -- if it's
223 * not marked uptodate then we know it can't have correct
224 * data. */
225 if (!buffer_uptodate(bh))
226 return 0;
227
228 /* OCFS2 does not allow multiple nodes to be changing the same
229 * block at the same time. */
230 if (buffer_jbd(bh))
231 return 1;
232
233 /* Ok, locally the buffer is marked as up to date, now search
234 * our cache to see if we can trust that. */
235 return ocfs2_buffer_cached(OCFS2_I(inode), bh);
236}
237
aa958874
MF
238/*
239 * Determine whether a buffer is currently out on a read-ahead request.
47460d65 240 * ci_io_sem should be held to serialize submitters with the logic here.
aa958874
MF
241 */
242int ocfs2_buffer_read_ahead(struct inode *inode,
243 struct buffer_head *bh)
244{
245 return buffer_locked(bh) && ocfs2_buffer_cached(OCFS2_I(inode), bh);
246}
247
ccd979bd
MF
248/* Requires ip_lock */
249static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci,
250 sector_t block)
251{
47460d65 252 BUG_ON(ci->ci_num_cached >= OCFS2_CACHE_INFO_MAX_ARRAY);
ccd979bd
MF
253
254 mlog(0, "block %llu takes position %u\n", (unsigned long long) block,
255 ci->ci_num_cached);
256
257 ci->ci_cache.ci_array[ci->ci_num_cached] = block;
258 ci->ci_num_cached++;
259}
260
261/* By now the caller should have checked that the item does *not*
262 * exist in the tree.
263 * Requires ip_lock. */
264static void __ocfs2_insert_cache_tree(struct ocfs2_caching_info *ci,
265 struct ocfs2_meta_cache_item *new)
266{
267 sector_t block = new->c_block;
268 struct rb_node *parent = NULL;
269 struct rb_node **p = &ci->ci_cache.ci_tree.rb_node;
270 struct ocfs2_meta_cache_item *tmp;
271
272 mlog(0, "Insert block %llu num = %u\n", (unsigned long long) block,
273 ci->ci_num_cached);
274
275 while(*p) {
276 parent = *p;
277
278 tmp = rb_entry(parent, struct ocfs2_meta_cache_item, c_node);
279
280 if (block < tmp->c_block)
281 p = &(*p)->rb_left;
282 else if (block > tmp->c_block)
283 p = &(*p)->rb_right;
284 else {
285 /* This should never happen! */
286 mlog(ML_ERROR, "Duplicate block %llu cached!\n",
287 (unsigned long long) block);
288 BUG();
289 }
290 }
291
292 rb_link_node(&new->c_node, parent, p);
293 rb_insert_color(&new->c_node, &ci->ci_cache.ci_tree);
294 ci->ci_num_cached++;
295}
296
297static inline int ocfs2_insert_can_use_array(struct ocfs2_inode_info *oi,
298 struct ocfs2_caching_info *ci)
299{
47460d65 300 assert_spin_locked(ci->ci_lock);
ccd979bd 301
47460d65
JB
302 return (ci->ci_flags & OCFS2_CACHE_FL_INLINE) &&
303 (ci->ci_num_cached < OCFS2_CACHE_INFO_MAX_ARRAY);
ccd979bd
MF
304}
305
47460d65 306/* tree should be exactly OCFS2_CACHE_INFO_MAX_ARRAY wide. NULL the
ccd979bd
MF
307 * pointers in tree after we use them - this allows caller to detect
308 * when to free in case of error. */
309static void ocfs2_expand_cache(struct ocfs2_inode_info *oi,
310 struct ocfs2_meta_cache_item **tree)
311{
312 int i;
313 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
314
47460d65 315 mlog_bug_on_msg(ci->ci_num_cached != OCFS2_CACHE_INFO_MAX_ARRAY,
b0697053
MF
316 "Inode %llu, num cached = %u, should be %u\n",
317 (unsigned long long)oi->ip_blkno, ci->ci_num_cached,
47460d65
JB
318 OCFS2_CACHE_INFO_MAX_ARRAY);
319 mlog_bug_on_msg(!(ci->ci_flags & OCFS2_CACHE_FL_INLINE),
b0697053
MF
320 "Inode %llu not marked as inline anymore!\n",
321 (unsigned long long)oi->ip_blkno);
47460d65 322 assert_spin_locked(ci->ci_lock);
ccd979bd
MF
323
324 /* Be careful to initialize the tree members *first* because
325 * once the ci_tree is used, the array is junk... */
47460d65 326 for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++)
ccd979bd
MF
327 tree[i]->c_block = ci->ci_cache.ci_array[i];
328
47460d65 329 ci->ci_flags &= ~OCFS2_CACHE_FL_INLINE;
ccd979bd
MF
330 ci->ci_cache.ci_tree = RB_ROOT;
331 /* this will be set again by __ocfs2_insert_cache_tree */
332 ci->ci_num_cached = 0;
333
47460d65 334 for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) {
ccd979bd
MF
335 __ocfs2_insert_cache_tree(ci, tree[i]);
336 tree[i] = NULL;
337 }
338
b0697053 339 mlog(0, "Expanded %llu to a tree cache: flags 0x%x, num = %u\n",
47460d65 340 (unsigned long long)oi->ip_blkno, ci->ci_flags, ci->ci_num_cached);
ccd979bd
MF
341}
342
343/* Slow path function - memory allocation is necessary. See the
344 * comment above ocfs2_set_buffer_uptodate for more information. */
345static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi,
346 sector_t block,
347 int expand_tree)
348{
349 int i;
350 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
351 struct ocfs2_meta_cache_item *new = NULL;
47460d65 352 struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] =
ccd979bd
MF
353 { NULL, };
354
b0697053
MF
355 mlog(0, "Inode %llu, block %llu, expand = %d\n",
356 (unsigned long long)oi->ip_blkno,
357 (unsigned long long)block, expand_tree);
ccd979bd 358
afae00ab 359 new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS);
ccd979bd
MF
360 if (!new) {
361 mlog_errno(-ENOMEM);
362 return;
363 }
364 new->c_block = block;
365
366 if (expand_tree) {
367 /* Do *not* allocate an array here - the removal code
368 * has no way of tracking that. */
47460d65 369 for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) {
ccd979bd 370 tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep,
afae00ab 371 GFP_NOFS);
ccd979bd
MF
372 if (!tree[i]) {
373 mlog_errno(-ENOMEM);
374 goto out_free;
375 }
376
377 /* These are initialized in ocfs2_expand_cache! */
378 }
379 }
380
47460d65 381 spin_lock(ci->ci_lock);
ccd979bd
MF
382 if (ocfs2_insert_can_use_array(oi, ci)) {
383 mlog(0, "Someone cleared the tree underneath us\n");
384 /* Ok, items were removed from the cache in between
385 * locks. Detect this and revert back to the fast path */
386 ocfs2_append_cache_array(ci, block);
47460d65 387 spin_unlock(ci->ci_lock);
ccd979bd
MF
388 goto out_free;
389 }
390
391 if (expand_tree)
392 ocfs2_expand_cache(oi, tree);
393
394 __ocfs2_insert_cache_tree(ci, new);
47460d65 395 spin_unlock(ci->ci_lock);
ccd979bd
MF
396
397 new = NULL;
398out_free:
399 if (new)
400 kmem_cache_free(ocfs2_uptodate_cachep, new);
401
402 /* If these were used, then ocfs2_expand_cache re-set them to
403 * NULL for us. */
404 if (tree[0]) {
47460d65 405 for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++)
ccd979bd
MF
406 if (tree[i])
407 kmem_cache_free(ocfs2_uptodate_cachep,
408 tree[i]);
409 }
410}
411
47460d65 412/* Item insertion is guarded by ci_io_mutex, so the insertion path takes
ccd979bd
MF
413 * advantage of this by not rechecking for a duplicate insert during
414 * the slow case. Additionally, if the cache needs to be bumped up to
415 * a tree, the code will not recheck after acquiring the lock --
416 * multiple paths cannot be expanding to a tree at the same time.
417 *
418 * The slow path takes into account that items can be removed
419 * (including the whole tree wiped and reset) when this process it out
420 * allocating memory. In those cases, it reverts back to the fast
421 * path.
422 *
423 * Note that this function may actually fail to insert the block if
424 * memory cannot be allocated. This is not fatal however (but may
aa958874
MF
425 * result in a performance penalty)
426 *
427 * Readahead buffers can be passed in here before the I/O request is
428 * completed.
429 */
ccd979bd
MF
430void ocfs2_set_buffer_uptodate(struct inode *inode,
431 struct buffer_head *bh)
432{
433 int expand;
434 struct ocfs2_inode_info *oi = OCFS2_I(inode);
435 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
436
437 /* The block may very well exist in our cache already, so avoid
438 * doing any more work in that case. */
439 if (ocfs2_buffer_cached(oi, bh))
440 return;
441
b0697053
MF
442 mlog(0, "Inode %llu, inserting block %llu\n",
443 (unsigned long long)oi->ip_blkno,
444 (unsigned long long)bh->b_blocknr);
ccd979bd
MF
445
446 /* No need to recheck under spinlock - insertion is guarded by
47460d65
JB
447 * ci_io_mutex */
448 spin_lock(ci->ci_lock);
ccd979bd
MF
449 if (ocfs2_insert_can_use_array(oi, ci)) {
450 /* Fast case - it's an array and there's a free
451 * spot. */
452 ocfs2_append_cache_array(ci, bh->b_blocknr);
47460d65 453 spin_unlock(ci->ci_lock);
ccd979bd
MF
454 return;
455 }
456
457 expand = 0;
47460d65 458 if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) {
ccd979bd
MF
459 /* We need to bump things up to a tree. */
460 expand = 1;
461 }
47460d65 462 spin_unlock(ci->ci_lock);
ccd979bd
MF
463
464 __ocfs2_set_buffer_uptodate(oi, bh->b_blocknr, expand);
465}
466
467/* Called against a newly allocated buffer. Most likely nobody should
468 * be able to read this sort of metadata while it's still being
47460d65 469 * allocated, but this is careful to take ci_io_mutex anyway. */
ccd979bd
MF
470void ocfs2_set_new_buffer_uptodate(struct inode *inode,
471 struct buffer_head *bh)
472{
473 struct ocfs2_inode_info *oi = OCFS2_I(inode);
47460d65 474 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
ccd979bd
MF
475
476 /* This should definitely *not* exist in our cache */
477 BUG_ON(ocfs2_buffer_cached(oi, bh));
478
479 set_buffer_uptodate(bh);
480
47460d65 481 mutex_lock(ci->ci_io_mutex);
ccd979bd 482 ocfs2_set_buffer_uptodate(inode, bh);
47460d65 483 mutex_unlock(ci->ci_io_mutex);
ccd979bd
MF
484}
485
486/* Requires ip_lock. */
487static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci,
488 int index)
489{
490 sector_t *array = ci->ci_cache.ci_array;
491 int bytes;
492
47460d65 493 BUG_ON(index < 0 || index >= OCFS2_CACHE_INFO_MAX_ARRAY);
ccd979bd
MF
494 BUG_ON(index >= ci->ci_num_cached);
495 BUG_ON(!ci->ci_num_cached);
496
497 mlog(0, "remove index %d (num_cached = %u\n", index,
498 ci->ci_num_cached);
499
500 ci->ci_num_cached--;
501
502 /* don't need to copy if the array is now empty, or if we
503 * removed at the tail */
504 if (ci->ci_num_cached && index < ci->ci_num_cached) {
505 bytes = sizeof(sector_t) * (ci->ci_num_cached - index);
506 memmove(&array[index], &array[index + 1], bytes);
507 }
508}
509
510/* Requires ip_lock. */
511static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci,
512 struct ocfs2_meta_cache_item *item)
513{
514 mlog(0, "remove block %llu from tree\n",
515 (unsigned long long) item->c_block);
516
517 rb_erase(&item->c_node, &ci->ci_cache.ci_tree);
518 ci->ci_num_cached--;
519}
520
ac11c827
TM
521static void ocfs2_remove_block_from_cache(struct inode *inode,
522 sector_t block)
ccd979bd
MF
523{
524 int index;
ccd979bd
MF
525 struct ocfs2_meta_cache_item *item = NULL;
526 struct ocfs2_inode_info *oi = OCFS2_I(inode);
527 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
528
47460d65 529 spin_lock(ci->ci_lock);
b0697053
MF
530 mlog(0, "Inode %llu, remove %llu, items = %u, array = %u\n",
531 (unsigned long long)oi->ip_blkno,
532 (unsigned long long) block, ci->ci_num_cached,
47460d65 533 ci->ci_flags & OCFS2_CACHE_FL_INLINE);
ccd979bd 534
47460d65 535 if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) {
ccd979bd
MF
536 index = ocfs2_search_cache_array(ci, block);
537 if (index != -1)
538 ocfs2_remove_metadata_array(ci, index);
539 } else {
540 item = ocfs2_search_cache_tree(ci, block);
541 if (item)
542 ocfs2_remove_metadata_tree(ci, item);
543 }
47460d65 544 spin_unlock(ci->ci_lock);
ccd979bd
MF
545
546 if (item)
547 kmem_cache_free(ocfs2_uptodate_cachep, item);
548}
549
ac11c827
TM
550/*
551 * Called when we remove a chunk of metadata from an inode. We don't
552 * bother reverting things to an inlined array in the case of a remove
553 * which moves us back under the limit.
554 */
555void ocfs2_remove_from_cache(struct inode *inode,
556 struct buffer_head *bh)
557{
558 sector_t block = bh->b_blocknr;
559
560 ocfs2_remove_block_from_cache(inode, block);
561}
562
563/* Called when we remove xattr clusters from an inode. */
564void ocfs2_remove_xattr_clusters_from_cache(struct inode *inode,
565 sector_t block,
566 u32 c_len)
567{
fd8351f8 568 unsigned int i, b_len = ocfs2_clusters_to_blocks(inode->i_sb, 1) * c_len;
ac11c827
TM
569
570 for (i = 0; i < b_len; i++, block++)
571 ocfs2_remove_block_from_cache(inode, block);
572}
573
ccd979bd
MF
574int __init init_ocfs2_uptodate_cache(void)
575{
576 ocfs2_uptodate_cachep = kmem_cache_create("ocfs2_uptodate",
577 sizeof(struct ocfs2_meta_cache_item),
20c2df83 578 0, SLAB_HWCACHE_ALIGN, NULL);
ccd979bd
MF
579 if (!ocfs2_uptodate_cachep)
580 return -ENOMEM;
581
582 mlog(0, "%u inlined cache items per inode.\n",
47460d65 583 OCFS2_CACHE_INFO_MAX_ARRAY);
ccd979bd
MF
584
585 return 0;
586}
587
0c6c98fb 588void exit_ocfs2_uptodate_cache(void)
ccd979bd
MF
589{
590 if (ocfs2_uptodate_cachep)
591 kmem_cache_destroy(ocfs2_uptodate_cachep);
592}
This page took 0.411544 seconds and 5 git commands to generate.