md/raid5: correctly update sync_completed when we reach max_resync
[deliverable/linux.git] / fs / nilfs2 / mdt.c
1 /*
2 * mdt.c - meta data file for NILFS
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
21 */
22
23 #include <linux/buffer_head.h>
24 #include <linux/mpage.h>
25 #include <linux/mm.h>
26 #include <linux/writeback.h>
27 #include <linux/backing-dev.h>
28 #include <linux/swap.h>
29 #include "nilfs.h"
30 #include "segment.h"
31 #include "page.h"
32 #include "mdt.h"
33
34
35 #define NILFS_MDT_MAX_RA_BLOCKS (16 - 1)
36
37 #define INIT_UNUSED_INODE_FIELDS
38
39 static int
40 nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
41 struct buffer_head *bh,
42 void (*init_block)(struct inode *,
43 struct buffer_head *, void *))
44 {
45 struct nilfs_inode_info *ii = NILFS_I(inode);
46 void *kaddr;
47 int ret;
48
49 /* Caller exclude read accesses using page lock */
50
51 /* set_buffer_new(bh); */
52 bh->b_blocknr = 0;
53
54 ret = nilfs_bmap_insert(ii->i_bmap, block, (unsigned long)bh);
55 if (unlikely(ret))
56 return ret;
57
58 set_buffer_mapped(bh);
59
60 kaddr = kmap_atomic(bh->b_page, KM_USER0);
61 memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits);
62 if (init_block)
63 init_block(inode, bh, kaddr);
64 flush_dcache_page(bh->b_page);
65 kunmap_atomic(kaddr, KM_USER0);
66
67 set_buffer_uptodate(bh);
68 nilfs_mark_buffer_dirty(bh);
69 nilfs_mdt_mark_dirty(inode);
70 return 0;
71 }
72
73 static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
74 struct buffer_head **out_bh,
75 void (*init_block)(struct inode *,
76 struct buffer_head *,
77 void *))
78 {
79 struct the_nilfs *nilfs = NILFS_MDT(inode)->mi_nilfs;
80 struct super_block *sb = inode->i_sb;
81 struct nilfs_transaction_info ti;
82 struct buffer_head *bh;
83 int err;
84
85 if (!sb) {
86 /*
87 * Make sure this function is not called from any
88 * read-only context.
89 */
90 if (!nilfs->ns_writer) {
91 WARN_ON(1);
92 err = -EROFS;
93 goto out;
94 }
95 sb = nilfs->ns_writer->s_super;
96 }
97
98 nilfs_transaction_begin(sb, &ti, 0);
99
100 err = -ENOMEM;
101 bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0);
102 if (unlikely(!bh))
103 goto failed_unlock;
104
105 err = -EEXIST;
106 if (buffer_uptodate(bh) || buffer_mapped(bh))
107 goto failed_bh;
108 #if 0
109 /* The uptodate flag is not protected by the page lock, but
110 the mapped flag is. Thus, we don't have to wait the buffer. */
111 wait_on_buffer(bh);
112 if (buffer_uptodate(bh))
113 goto failed_bh;
114 #endif
115
116 bh->b_bdev = nilfs->ns_bdev;
117 err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
118 if (likely(!err)) {
119 get_bh(bh);
120 *out_bh = bh;
121 }
122
123 failed_bh:
124 unlock_page(bh->b_page);
125 page_cache_release(bh->b_page);
126 brelse(bh);
127
128 failed_unlock:
129 if (likely(!err))
130 err = nilfs_transaction_commit(sb);
131 else
132 nilfs_transaction_abort(sb);
133 out:
134 return err;
135 }
136
137 static int
138 nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
139 int mode, struct buffer_head **out_bh)
140 {
141 struct buffer_head *bh;
142 unsigned long blknum = 0;
143 int ret = -ENOMEM;
144
145 bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
146 if (unlikely(!bh))
147 goto failed;
148
149 ret = -EEXIST; /* internal code */
150 if (buffer_uptodate(bh))
151 goto out;
152
153 if (mode == READA) {
154 if (!trylock_buffer(bh)) {
155 ret = -EBUSY;
156 goto failed_bh;
157 }
158 } else /* mode == READ */
159 lock_buffer(bh);
160
161 if (buffer_uptodate(bh)) {
162 unlock_buffer(bh);
163 goto out;
164 }
165 if (!buffer_mapped(bh)) { /* unused buffer */
166 ret = nilfs_bmap_lookup(NILFS_I(inode)->i_bmap, blkoff,
167 &blknum);
168 if (unlikely(ret)) {
169 unlock_buffer(bh);
170 goto failed_bh;
171 }
172 bh->b_bdev = NILFS_MDT(inode)->mi_nilfs->ns_bdev;
173 bh->b_blocknr = blknum;
174 set_buffer_mapped(bh);
175 }
176
177 bh->b_end_io = end_buffer_read_sync;
178 get_bh(bh);
179 submit_bh(mode, bh);
180 ret = 0;
181 out:
182 get_bh(bh);
183 *out_bh = bh;
184
185 failed_bh:
186 unlock_page(bh->b_page);
187 page_cache_release(bh->b_page);
188 brelse(bh);
189 failed:
190 return ret;
191 }
192
193 static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
194 struct buffer_head **out_bh)
195 {
196 struct buffer_head *first_bh, *bh;
197 unsigned long blkoff;
198 int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS;
199 int err;
200
201 err = nilfs_mdt_submit_block(inode, block, READ, &first_bh);
202 if (err == -EEXIST) /* internal code */
203 goto out;
204
205 if (unlikely(err))
206 goto failed;
207
208 blkoff = block + 1;
209 for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
210 err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh);
211 if (likely(!err || err == -EEXIST))
212 brelse(bh);
213 else if (err != -EBUSY)
214 break; /* abort readahead if bmap lookup failed */
215
216 if (!buffer_locked(first_bh))
217 goto out_no_wait;
218 }
219
220 wait_on_buffer(first_bh);
221
222 out_no_wait:
223 err = -EIO;
224 if (!buffer_uptodate(first_bh))
225 goto failed_bh;
226 out:
227 *out_bh = first_bh;
228 return 0;
229
230 failed_bh:
231 brelse(first_bh);
232 failed:
233 return err;
234 }
235
236 /**
237 * nilfs_mdt_get_block - read or create a buffer on meta data file.
238 * @inode: inode of the meta data file
239 * @blkoff: block offset
240 * @create: create flag
241 * @init_block: initializer used for newly allocated block
242 * @out_bh: output of a pointer to the buffer_head
243 *
244 * nilfs_mdt_get_block() looks up the specified buffer and tries to create
245 * a new buffer if @create is not zero. On success, the returned buffer is
246 * assured to be either existing or formatted using a buffer lock on success.
247 * @out_bh is substituted only when zero is returned.
248 *
249 * Return Value: On success, it returns 0. On error, the following negative
250 * error code is returned.
251 *
252 * %-ENOMEM - Insufficient memory available.
253 *
254 * %-EIO - I/O error
255 *
256 * %-ENOENT - the specified block does not exist (hole block)
257 *
258 * %-EINVAL - bmap is broken. (the caller should call nilfs_error())
259 *
260 * %-EROFS - Read only filesystem (for create mode)
261 */
262 int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
263 void (*init_block)(struct inode *,
264 struct buffer_head *, void *),
265 struct buffer_head **out_bh)
266 {
267 int ret;
268
269 /* Should be rewritten with merging nilfs_mdt_read_block() */
270 retry:
271 ret = nilfs_mdt_read_block(inode, blkoff, out_bh);
272 if (!create || ret != -ENOENT)
273 return ret;
274
275 ret = nilfs_mdt_create_block(inode, blkoff, out_bh, init_block);
276 if (unlikely(ret == -EEXIST)) {
277 /* create = 0; */ /* limit read-create loop retries */
278 goto retry;
279 }
280 return ret;
281 }
282
283 /**
284 * nilfs_mdt_delete_block - make a hole on the meta data file.
285 * @inode: inode of the meta data file
286 * @block: block offset
287 *
288 * Return Value: On success, zero is returned.
289 * On error, one of the following negative error code is returned.
290 *
291 * %-ENOMEM - Insufficient memory available.
292 *
293 * %-EIO - I/O error
294 *
295 * %-EINVAL - bmap is broken. (the caller should call nilfs_error())
296 */
297 int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
298 {
299 struct nilfs_inode_info *ii = NILFS_I(inode);
300 int err;
301
302 err = nilfs_bmap_delete(ii->i_bmap, block);
303 if (!err || err == -ENOENT) {
304 nilfs_mdt_mark_dirty(inode);
305 nilfs_mdt_forget_block(inode, block);
306 }
307 return err;
308 }
309
310 /**
311 * nilfs_mdt_forget_block - discard dirty state and try to remove the page
312 * @inode: inode of the meta data file
313 * @block: block offset
314 *
315 * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and
316 * tries to release the page including the buffer from a page cache.
317 *
318 * Return Value: On success, 0 is returned. On error, one of the following
319 * negative error code is returned.
320 *
321 * %-EBUSY - page has an active buffer.
322 *
323 * %-ENOENT - page cache has no page addressed by the offset.
324 */
325 int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
326 {
327 pgoff_t index = (pgoff_t)block >>
328 (PAGE_CACHE_SHIFT - inode->i_blkbits);
329 struct page *page;
330 unsigned long first_block;
331 int ret = 0;
332 int still_dirty;
333
334 page = find_lock_page(inode->i_mapping, index);
335 if (!page)
336 return -ENOENT;
337
338 wait_on_page_writeback(page);
339
340 first_block = (unsigned long)index <<
341 (PAGE_CACHE_SHIFT - inode->i_blkbits);
342 if (page_has_buffers(page)) {
343 struct buffer_head *bh;
344
345 bh = nilfs_page_get_nth_block(page, block - first_block);
346 nilfs_forget_buffer(bh);
347 }
348 still_dirty = PageDirty(page);
349 unlock_page(page);
350 page_cache_release(page);
351
352 if (still_dirty ||
353 invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0)
354 ret = -EBUSY;
355 return ret;
356 }
357
358 /**
359 * nilfs_mdt_mark_block_dirty - mark a block on the meta data file dirty.
360 * @inode: inode of the meta data file
361 * @block: block offset
362 *
363 * Return Value: On success, it returns 0. On error, the following negative
364 * error code is returned.
365 *
366 * %-ENOMEM - Insufficient memory available.
367 *
368 * %-EIO - I/O error
369 *
370 * %-ENOENT - the specified block does not exist (hole block)
371 *
372 * %-EINVAL - bmap is broken. (the caller should call nilfs_error())
373 */
374 int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block)
375 {
376 struct buffer_head *bh;
377 int err;
378
379 err = nilfs_mdt_read_block(inode, block, &bh);
380 if (unlikely(err))
381 return err;
382 nilfs_mark_buffer_dirty(bh);
383 nilfs_mdt_mark_dirty(inode);
384 brelse(bh);
385 return 0;
386 }
387
388 int nilfs_mdt_fetch_dirty(struct inode *inode)
389 {
390 struct nilfs_inode_info *ii = NILFS_I(inode);
391
392 if (nilfs_bmap_test_and_clear_dirty(ii->i_bmap)) {
393 set_bit(NILFS_I_DIRTY, &ii->i_state);
394 return 1;
395 }
396 return test_bit(NILFS_I_DIRTY, &ii->i_state);
397 }
398
399 static int
400 nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
401 {
402 struct inode *inode = container_of(page->mapping,
403 struct inode, i_data);
404 struct super_block *sb = inode->i_sb;
405 struct nilfs_sb_info *writer = NULL;
406 int err = 0;
407
408 redirty_page_for_writepage(wbc, page);
409 unlock_page(page);
410
411 if (page->mapping->assoc_mapping)
412 return 0; /* Do not request flush for shadow page cache */
413 if (!sb) {
414 writer = nilfs_get_writer(NILFS_MDT(inode)->mi_nilfs);
415 if (!writer)
416 return -EROFS;
417 sb = writer->s_super;
418 }
419
420 if (wbc->sync_mode == WB_SYNC_ALL)
421 err = nilfs_construct_segment(sb);
422 else if (wbc->for_reclaim)
423 nilfs_flush_segment(sb, inode->i_ino);
424
425 if (writer)
426 nilfs_put_writer(NILFS_MDT(inode)->mi_nilfs);
427 return err;
428 }
429
430
431 static struct address_space_operations def_mdt_aops = {
432 .writepage = nilfs_mdt_write_page,
433 };
434
435 static struct inode_operations def_mdt_iops;
436 static struct file_operations def_mdt_fops;
437
438 /*
439 * NILFS2 uses pseudo inodes for meta data files such as DAT, cpfile, sufile,
440 * ifile, or gcinodes. This allows the B-tree code and segment constructor
441 * to treat them like regular files, and this helps to simplify the
442 * implementation.
443 * On the other hand, some of the pseudo inodes have an irregular point:
444 * They don't have valid inode->i_sb pointer because their lifetimes are
445 * longer than those of the super block structs; they may continue for
446 * several consecutive mounts/umounts. This would need discussions.
447 */
448 struct inode *
449 nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb,
450 ino_t ino, gfp_t gfp_mask)
451 {
452 struct inode *inode = nilfs_alloc_inode(sb);
453
454 if (!inode)
455 return NULL;
456 else {
457 struct address_space * const mapping = &inode->i_data;
458 struct nilfs_mdt_info *mi = kzalloc(sizeof(*mi), GFP_NOFS);
459
460 if (!mi) {
461 nilfs_destroy_inode(inode);
462 return NULL;
463 }
464 mi->mi_nilfs = nilfs;
465 init_rwsem(&mi->mi_sem);
466
467 inode->i_sb = sb; /* sb may be NULL for some meta data files */
468 inode->i_blkbits = nilfs->ns_blocksize_bits;
469 inode->i_flags = 0;
470 atomic_set(&inode->i_count, 1);
471 inode->i_nlink = 1;
472 inode->i_ino = ino;
473 inode->i_mode = S_IFREG;
474 inode->i_private = mi;
475
476 #ifdef INIT_UNUSED_INODE_FIELDS
477 atomic_set(&inode->i_writecount, 0);
478 inode->i_size = 0;
479 inode->i_blocks = 0;
480 inode->i_bytes = 0;
481 inode->i_generation = 0;
482 #ifdef CONFIG_QUOTA
483 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
484 #endif
485 inode->i_pipe = NULL;
486 inode->i_bdev = NULL;
487 inode->i_cdev = NULL;
488 inode->i_rdev = 0;
489 #ifdef CONFIG_SECURITY
490 inode->i_security = NULL;
491 #endif
492 inode->dirtied_when = 0;
493
494 INIT_LIST_HEAD(&inode->i_list);
495 INIT_LIST_HEAD(&inode->i_sb_list);
496 inode->i_state = 0;
497 #endif
498
499 spin_lock_init(&inode->i_lock);
500 mutex_init(&inode->i_mutex);
501 init_rwsem(&inode->i_alloc_sem);
502
503 mapping->host = NULL; /* instead of inode */
504 mapping->flags = 0;
505 mapping_set_gfp_mask(mapping, gfp_mask);
506 mapping->assoc_mapping = NULL;
507 mapping->backing_dev_info = nilfs->ns_bdi;
508
509 inode->i_mapping = mapping;
510 }
511
512 return inode;
513 }
514
515 struct inode *nilfs_mdt_new(struct the_nilfs *nilfs, struct super_block *sb,
516 ino_t ino, gfp_t gfp_mask)
517 {
518 struct inode *inode = nilfs_mdt_new_common(nilfs, sb, ino, gfp_mask);
519
520 if (!inode)
521 return NULL;
522
523 inode->i_op = &def_mdt_iops;
524 inode->i_fop = &def_mdt_fops;
525 inode->i_mapping->a_ops = &def_mdt_aops;
526 return inode;
527 }
528
529 void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size,
530 unsigned header_size)
531 {
532 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
533
534 mi->mi_entry_size = entry_size;
535 mi->mi_entries_per_block = (1 << inode->i_blkbits) / entry_size;
536 mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
537 }
538
539 void nilfs_mdt_set_shadow(struct inode *orig, struct inode *shadow)
540 {
541 shadow->i_mapping->assoc_mapping = orig->i_mapping;
542 NILFS_I(shadow)->i_btnode_cache.assoc_mapping =
543 &NILFS_I(orig)->i_btnode_cache;
544 }
545
546 void nilfs_mdt_clear(struct inode *inode)
547 {
548 struct nilfs_inode_info *ii = NILFS_I(inode);
549
550 invalidate_mapping_pages(inode->i_mapping, 0, -1);
551 truncate_inode_pages(inode->i_mapping, 0);
552
553 nilfs_bmap_clear(ii->i_bmap);
554 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
555 }
556
557 void nilfs_mdt_destroy(struct inode *inode)
558 {
559 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
560
561 kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
562 kfree(mdi);
563 nilfs_destroy_inode(inode);
564 }
This page took 0.056608 seconds and 5 git commands to generate.