2 * cpfile.c - NILFS checkpoint file.
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
23 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/buffer_head.h>
27 #include <linux/errno.h>
28 #include <linux/nilfs2_fs.h>
33 static inline unsigned long
34 nilfs_cpfile_checkpoints_per_block(const struct inode
*cpfile
)
36 return NILFS_MDT(cpfile
)->mi_entries_per_block
;
39 /* block number from the beginning of the file */
41 nilfs_cpfile_get_blkoff(const struct inode
*cpfile
, __u64 cno
)
45 BUG_ON(cno
== 0); /* checkpoint number 0 is invalid */
46 tcno
= cno
+ NILFS_MDT(cpfile
)->mi_first_entry_offset
- 1;
47 do_div(tcno
, nilfs_cpfile_checkpoints_per_block(cpfile
));
48 return (unsigned long)tcno
;
53 nilfs_cpfile_get_offset(const struct inode
*cpfile
, __u64 cno
)
55 __u64 tcno
= cno
+ NILFS_MDT(cpfile
)->mi_first_entry_offset
- 1;
56 return do_div(tcno
, nilfs_cpfile_checkpoints_per_block(cpfile
));
60 nilfs_cpfile_checkpoints_in_block(const struct inode
*cpfile
,
65 nilfs_cpfile_checkpoints_per_block(cpfile
) -
66 nilfs_cpfile_get_offset(cpfile
, curr
),
70 static inline int nilfs_cpfile_is_in_first(const struct inode
*cpfile
,
73 return nilfs_cpfile_get_blkoff(cpfile
, cno
) == 0;
77 nilfs_cpfile_block_add_valid_checkpoints(const struct inode
*cpfile
,
78 struct buffer_head
*bh
,
82 struct nilfs_checkpoint
*cp
= kaddr
+ bh_offset(bh
);
85 count
= le32_to_cpu(cp
->cp_checkpoints_count
) + n
;
86 cp
->cp_checkpoints_count
= cpu_to_le32(count
);
91 nilfs_cpfile_block_sub_valid_checkpoints(const struct inode
*cpfile
,
92 struct buffer_head
*bh
,
96 struct nilfs_checkpoint
*cp
= kaddr
+ bh_offset(bh
);
99 BUG_ON(le32_to_cpu(cp
->cp_checkpoints_count
) < n
);
100 count
= le32_to_cpu(cp
->cp_checkpoints_count
) - n
;
101 cp
->cp_checkpoints_count
= cpu_to_le32(count
);
105 static inline struct nilfs_cpfile_header
*
106 nilfs_cpfile_block_get_header(const struct inode
*cpfile
,
107 struct buffer_head
*bh
,
110 return kaddr
+ bh_offset(bh
);
113 static struct nilfs_checkpoint
*
114 nilfs_cpfile_block_get_checkpoint(const struct inode
*cpfile
, __u64 cno
,
115 struct buffer_head
*bh
,
118 return kaddr
+ bh_offset(bh
) + nilfs_cpfile_get_offset(cpfile
, cno
) *
119 NILFS_MDT(cpfile
)->mi_entry_size
;
122 static void nilfs_cpfile_block_init(struct inode
*cpfile
,
123 struct buffer_head
*bh
,
126 struct nilfs_checkpoint
*cp
= kaddr
+ bh_offset(bh
);
127 size_t cpsz
= NILFS_MDT(cpfile
)->mi_entry_size
;
128 int n
= nilfs_cpfile_checkpoints_per_block(cpfile
);
131 nilfs_checkpoint_set_invalid(cp
);
132 cp
= (void *)cp
+ cpsz
;
136 static inline int nilfs_cpfile_get_header_block(struct inode
*cpfile
,
137 struct buffer_head
**bhp
)
139 return nilfs_mdt_get_block(cpfile
, 0, 0, NULL
, bhp
);
142 static inline int nilfs_cpfile_get_checkpoint_block(struct inode
*cpfile
,
145 struct buffer_head
**bhp
)
147 return nilfs_mdt_get_block(cpfile
,
148 nilfs_cpfile_get_blkoff(cpfile
, cno
),
149 create
, nilfs_cpfile_block_init
, bhp
);
152 static inline int nilfs_cpfile_delete_checkpoint_block(struct inode
*cpfile
,
155 return nilfs_mdt_delete_block(cpfile
,
156 nilfs_cpfile_get_blkoff(cpfile
, cno
));
160 * nilfs_cpfile_get_checkpoint - get a checkpoint
161 * @cpfile: inode of checkpoint file
162 * @cno: checkpoint number
163 * @create: create flag
164 * @cpp: pointer to a checkpoint
165 * @bhp: pointer to a buffer head
167 * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint
168 * specified by @cno. A new checkpoint will be created if @cno is the current
169 * checkpoint number and @create is nonzero.
171 * Return Value: On success, 0 is returned, and the checkpoint and the
172 * buffer head of the buffer on which the checkpoint is located are stored in
173 * the place pointed by @cpp and @bhp, respectively. On error, one of the
174 * following negative error codes is returned.
178 * %-ENOMEM - Insufficient amount of memory available.
180 * %-ENOENT - No such checkpoint.
182 int nilfs_cpfile_get_checkpoint(struct inode
*cpfile
,
185 struct nilfs_checkpoint
**cpp
,
186 struct buffer_head
**bhp
)
188 struct buffer_head
*header_bh
, *cp_bh
;
189 struct nilfs_cpfile_header
*header
;
190 struct nilfs_checkpoint
*cp
;
194 BUG_ON(cno
< 1 || cno
> nilfs_mdt_cno(cpfile
) ||
195 (cno
< nilfs_mdt_cno(cpfile
) && create
));
197 down_write(&NILFS_MDT(cpfile
)->mi_sem
);
199 ret
= nilfs_cpfile_get_header_block(cpfile
, &header_bh
);
202 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, cno
, create
, &cp_bh
);
205 kaddr
= kmap(cp_bh
->b_page
);
206 cp
= nilfs_cpfile_block_get_checkpoint(cpfile
, cno
, cp_bh
, kaddr
);
207 if (nilfs_checkpoint_invalid(cp
)) {
209 kunmap(cp_bh
->b_page
);
214 /* a newly-created checkpoint */
215 nilfs_checkpoint_clear_invalid(cp
);
216 if (!nilfs_cpfile_is_in_first(cpfile
, cno
))
217 nilfs_cpfile_block_add_valid_checkpoints(cpfile
, cp_bh
,
219 nilfs_mdt_mark_buffer_dirty(cp_bh
);
221 kaddr
= kmap_atomic(header_bh
->b_page
, KM_USER0
);
222 header
= nilfs_cpfile_block_get_header(cpfile
, header_bh
,
224 le64_add_cpu(&header
->ch_ncheckpoints
, 1);
225 kunmap_atomic(kaddr
, KM_USER0
);
226 nilfs_mdt_mark_buffer_dirty(header_bh
);
227 nilfs_mdt_mark_dirty(cpfile
);
238 up_write(&NILFS_MDT(cpfile
)->mi_sem
);
243 * nilfs_cpfile_put_checkpoint - put a checkpoint
244 * @cpfile: inode of checkpoint file
245 * @cno: checkpoint number
248 * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint
249 * specified by @cno. @bh must be the buffer head which has been returned by
250 * a previous call to nilfs_cpfile_get_checkpoint() with @cno.
252 void nilfs_cpfile_put_checkpoint(struct inode
*cpfile
, __u64 cno
,
253 struct buffer_head
*bh
)
260 * nilfs_cpfile_delete_checkpoints - delete checkpoints
261 * @cpfile: inode of checkpoint file
262 * @start: start checkpoint number
263 * @end: end checkpoint numer
265 * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in
266 * the period from @start to @end, excluding @end itself. The checkpoints
267 * which have been already deleted are ignored.
269 * Return Value: On success, 0 is returned. On error, one of the following
270 * negative error codes is returned.
274 * %-ENOMEM - Insufficient amount of memory available.
276 * %-EINVAL - invalid checkpoints.
278 int nilfs_cpfile_delete_checkpoints(struct inode
*cpfile
,
282 struct buffer_head
*header_bh
, *cp_bh
;
283 struct nilfs_cpfile_header
*header
;
284 struct nilfs_checkpoint
*cp
;
285 size_t cpsz
= NILFS_MDT(cpfile
)->mi_entry_size
;
288 unsigned long tnicps
;
289 int ret
, ncps
, nicps
, count
, i
;
291 if ((start
== 0) || (start
> end
)) {
292 printk(KERN_CRIT
"%s: start = %llu, end = %llu\n",
294 (unsigned long long)start
,
295 (unsigned long long)end
);
299 /* cannot delete the latest checkpoint */
300 if (start
== nilfs_mdt_cno(cpfile
) - 1)
303 down_write(&NILFS_MDT(cpfile
)->mi_sem
);
305 ret
= nilfs_cpfile_get_header_block(cpfile
, &header_bh
);
310 for (cno
= start
; cno
< end
; cno
+= ncps
) {
311 ncps
= nilfs_cpfile_checkpoints_in_block(cpfile
, cno
, end
);
312 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, cno
, 0, &cp_bh
);
321 kaddr
= kmap_atomic(cp_bh
->b_page
, KM_USER0
);
322 cp
= nilfs_cpfile_block_get_checkpoint(
323 cpfile
, cno
, cp_bh
, kaddr
);
325 for (i
= 0; i
< ncps
; i
++, cp
= (void *)cp
+ cpsz
) {
326 BUG_ON(nilfs_checkpoint_snapshot(cp
));
327 if (!nilfs_checkpoint_invalid(cp
)) {
328 nilfs_checkpoint_set_invalid(cp
);
334 nilfs_mdt_mark_buffer_dirty(cp_bh
);
335 nilfs_mdt_mark_dirty(cpfile
);
336 if (!nilfs_cpfile_is_in_first(cpfile
, cno
) &&
337 (count
= nilfs_cpfile_block_sub_valid_checkpoints(
338 cpfile
, cp_bh
, kaddr
, nicps
)) == 0) {
340 kunmap_atomic(kaddr
, KM_USER0
);
342 ret
= nilfs_cpfile_delete_checkpoint_block(
346 printk(KERN_ERR
"%s: cannot delete block\n",
352 kunmap_atomic(kaddr
, KM_USER0
);
357 kaddr
= kmap_atomic(header_bh
->b_page
, KM_USER0
);
358 header
= nilfs_cpfile_block_get_header(cpfile
, header_bh
,
360 le64_add_cpu(&header
->ch_ncheckpoints
, -(u64
)tnicps
);
361 nilfs_mdt_mark_buffer_dirty(header_bh
);
362 nilfs_mdt_mark_dirty(cpfile
);
363 kunmap_atomic(kaddr
, KM_USER0
);
368 up_write(&NILFS_MDT(cpfile
)->mi_sem
);
372 static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode
*cpfile
,
373 struct nilfs_checkpoint
*cp
,
374 struct nilfs_cpinfo
*ci
)
376 ci
->ci_flags
= le32_to_cpu(cp
->cp_flags
);
377 ci
->ci_cno
= le64_to_cpu(cp
->cp_cno
);
378 ci
->ci_create
= le64_to_cpu(cp
->cp_create
);
379 ci
->ci_nblk_inc
= le64_to_cpu(cp
->cp_nblk_inc
);
380 ci
->ci_inodes_count
= le64_to_cpu(cp
->cp_inodes_count
);
381 ci
->ci_blocks_count
= le64_to_cpu(cp
->cp_blocks_count
);
382 ci
->ci_next
= le64_to_cpu(cp
->cp_snapshot_list
.ssl_next
);
385 static ssize_t
nilfs_cpfile_do_get_cpinfo(struct inode
*cpfile
, __u64 cno
,
386 struct nilfs_cpinfo
*ci
, size_t nci
)
388 struct nilfs_checkpoint
*cp
;
389 struct buffer_head
*bh
;
390 size_t cpsz
= NILFS_MDT(cpfile
)->mi_entry_size
;
391 __u64 cur_cno
= nilfs_mdt_cno(cpfile
);
396 down_read(&NILFS_MDT(cpfile
)->mi_sem
);
398 for (n
= 0; cno
< cur_cno
&& n
< nci
; cno
+= ncps
) {
399 ncps
= nilfs_cpfile_checkpoints_in_block(cpfile
, cno
, cur_cno
);
400 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, cno
, 0, &bh
);
404 continue; /* skip hole */
407 kaddr
= kmap_atomic(bh
->b_page
, KM_USER0
);
408 cp
= nilfs_cpfile_block_get_checkpoint(cpfile
, cno
, bh
, kaddr
);
409 for (i
= 0; i
< ncps
&& n
< nci
; i
++, cp
= (void *)cp
+ cpsz
) {
410 if (!nilfs_checkpoint_invalid(cp
))
411 nilfs_cpfile_checkpoint_to_cpinfo(
412 cpfile
, cp
, &ci
[n
++]);
414 kunmap_atomic(kaddr
, KM_USER0
);
421 up_read(&NILFS_MDT(cpfile
)->mi_sem
);
425 static ssize_t
nilfs_cpfile_do_get_ssinfo(struct inode
*cpfile
, __u64
*cnop
,
426 struct nilfs_cpinfo
*ci
, size_t nci
)
428 struct buffer_head
*bh
;
429 struct nilfs_cpfile_header
*header
;
430 struct nilfs_checkpoint
*cp
;
431 __u64 curr
= *cnop
, next
;
432 unsigned long curr_blkoff
, next_blkoff
;
436 down_read(&NILFS_MDT(cpfile
)->mi_sem
);
439 ret
= nilfs_cpfile_get_header_block(cpfile
, &bh
);
442 kaddr
= kmap_atomic(bh
->b_page
, KM_USER0
);
443 header
= nilfs_cpfile_block_get_header(cpfile
, bh
, kaddr
);
444 curr
= le64_to_cpu(header
->ch_snapshot_list
.ssl_next
);
445 kunmap_atomic(kaddr
, KM_USER0
);
451 } else if (unlikely(curr
== ~(__u64
)0)) {
456 curr_blkoff
= nilfs_cpfile_get_blkoff(cpfile
, curr
);
457 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, curr
, 0, &bh
);
460 kaddr
= kmap_atomic(bh
->b_page
, KM_USER0
);
461 for (n
= 0; n
< nci
; n
++) {
462 cp
= nilfs_cpfile_block_get_checkpoint(
463 cpfile
, curr
, bh
, kaddr
);
464 nilfs_cpfile_checkpoint_to_cpinfo(cpfile
, cp
, &ci
[n
]);
465 next
= le64_to_cpu(cp
->cp_snapshot_list
.ssl_next
);
467 curr
= ~(__u64
)0; /* Terminator */
471 next_blkoff
= nilfs_cpfile_get_blkoff(cpfile
, next
);
472 if (curr_blkoff
!= next_blkoff
) {
473 kunmap_atomic(kaddr
, KM_USER0
);
475 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, next
,
479 kaddr
= kmap_atomic(bh
->b_page
, KM_USER0
);
482 curr_blkoff
= next_blkoff
;
484 kunmap_atomic(kaddr
, KM_USER0
);
490 up_read(&NILFS_MDT(cpfile
)->mi_sem
);
495 * nilfs_cpfile_get_cpinfo -
502 ssize_t
nilfs_cpfile_get_cpinfo(struct inode
*cpfile
, __u64
*cnop
, int mode
,
503 struct nilfs_cpinfo
*ci
, size_t nci
)
506 case NILFS_CHECKPOINT
:
507 return nilfs_cpfile_do_get_cpinfo(cpfile
, *cnop
, ci
, nci
);
509 return nilfs_cpfile_do_get_ssinfo(cpfile
, cnop
, ci
, nci
);
516 * nilfs_cpfile_delete_checkpoint -
520 int nilfs_cpfile_delete_checkpoint(struct inode
*cpfile
, __u64 cno
)
522 struct nilfs_cpinfo ci
;
526 /* checkpoint number 0 is invalid */
529 nci
= nilfs_cpfile_do_get_cpinfo(cpfile
, cno
, &ci
, 1);
532 else if (nci
== 0 || ci
.ci_cno
!= cno
)
535 /* cannot delete the latest checkpoint nor snapshots */
536 ret
= nilfs_cpinfo_snapshot(&ci
);
539 else if (ret
> 0 || cno
== nilfs_mdt_cno(cpfile
) - 1)
542 return nilfs_cpfile_delete_checkpoints(cpfile
, cno
, cno
+ 1);
545 static struct nilfs_snapshot_list
*
546 nilfs_cpfile_block_get_snapshot_list(const struct inode
*cpfile
,
548 struct buffer_head
*bh
,
551 struct nilfs_cpfile_header
*header
;
552 struct nilfs_checkpoint
*cp
;
553 struct nilfs_snapshot_list
*list
;
556 cp
= nilfs_cpfile_block_get_checkpoint(cpfile
, cno
, bh
, kaddr
);
557 list
= &cp
->cp_snapshot_list
;
559 header
= nilfs_cpfile_block_get_header(cpfile
, bh
, kaddr
);
560 list
= &header
->ch_snapshot_list
;
565 static int nilfs_cpfile_set_snapshot(struct inode
*cpfile
, __u64 cno
)
567 struct buffer_head
*header_bh
, *curr_bh
, *prev_bh
, *cp_bh
;
568 struct nilfs_cpfile_header
*header
;
569 struct nilfs_checkpoint
*cp
;
570 struct nilfs_snapshot_list
*list
;
572 unsigned long curr_blkoff
, prev_blkoff
;
576 down_write(&NILFS_MDT(cpfile
)->mi_sem
);
578 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, cno
, 0, &cp_bh
);
581 kaddr
= kmap_atomic(cp_bh
->b_page
, KM_USER0
);
582 cp
= nilfs_cpfile_block_get_checkpoint(cpfile
, cno
, cp_bh
, kaddr
);
583 if (nilfs_checkpoint_invalid(cp
)) {
585 kunmap_atomic(kaddr
, KM_USER0
);
588 if (nilfs_checkpoint_snapshot(cp
)) {
590 kunmap_atomic(kaddr
, KM_USER0
);
593 kunmap_atomic(kaddr
, KM_USER0
);
595 ret
= nilfs_cpfile_get_header_block(cpfile
, &header_bh
);
598 kaddr
= kmap_atomic(header_bh
->b_page
, KM_USER0
);
599 header
= nilfs_cpfile_block_get_header(cpfile
, header_bh
, kaddr
);
600 list
= &header
->ch_snapshot_list
;
605 prev
= le64_to_cpu(list
->ssl_prev
);
607 prev_blkoff
= nilfs_cpfile_get_blkoff(cpfile
, prev
);
609 if (curr_blkoff
!= prev_blkoff
) {
610 kunmap_atomic(kaddr
, KM_USER0
);
612 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, curr
,
616 kaddr
= kmap_atomic(curr_bh
->b_page
, KM_USER0
);
618 curr_blkoff
= prev_blkoff
;
619 cp
= nilfs_cpfile_block_get_checkpoint(
620 cpfile
, curr
, curr_bh
, kaddr
);
621 list
= &cp
->cp_snapshot_list
;
622 prev
= le64_to_cpu(list
->ssl_prev
);
624 kunmap_atomic(kaddr
, KM_USER0
);
627 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, prev
, 0,
636 kaddr
= kmap_atomic(curr_bh
->b_page
, KM_USER0
);
637 list
= nilfs_cpfile_block_get_snapshot_list(
638 cpfile
, curr
, curr_bh
, kaddr
);
639 list
->ssl_prev
= cpu_to_le64(cno
);
640 kunmap_atomic(kaddr
, KM_USER0
);
642 kaddr
= kmap_atomic(cp_bh
->b_page
, KM_USER0
);
643 cp
= nilfs_cpfile_block_get_checkpoint(cpfile
, cno
, cp_bh
, kaddr
);
644 cp
->cp_snapshot_list
.ssl_next
= cpu_to_le64(curr
);
645 cp
->cp_snapshot_list
.ssl_prev
= cpu_to_le64(prev
);
646 nilfs_checkpoint_set_snapshot(cp
);
647 kunmap_atomic(kaddr
, KM_USER0
);
649 kaddr
= kmap_atomic(prev_bh
->b_page
, KM_USER0
);
650 list
= nilfs_cpfile_block_get_snapshot_list(
651 cpfile
, prev
, prev_bh
, kaddr
);
652 list
->ssl_next
= cpu_to_le64(cno
);
653 kunmap_atomic(kaddr
, KM_USER0
);
655 kaddr
= kmap_atomic(header_bh
->b_page
, KM_USER0
);
656 header
= nilfs_cpfile_block_get_header(cpfile
, header_bh
, kaddr
);
657 le64_add_cpu(&header
->ch_nsnapshots
, 1);
658 kunmap_atomic(kaddr
, KM_USER0
);
660 nilfs_mdt_mark_buffer_dirty(prev_bh
);
661 nilfs_mdt_mark_buffer_dirty(curr_bh
);
662 nilfs_mdt_mark_buffer_dirty(cp_bh
);
663 nilfs_mdt_mark_buffer_dirty(header_bh
);
664 nilfs_mdt_mark_dirty(cpfile
);
678 up_write(&NILFS_MDT(cpfile
)->mi_sem
);
682 static int nilfs_cpfile_clear_snapshot(struct inode
*cpfile
, __u64 cno
)
684 struct buffer_head
*header_bh
, *next_bh
, *prev_bh
, *cp_bh
;
685 struct nilfs_cpfile_header
*header
;
686 struct nilfs_checkpoint
*cp
;
687 struct nilfs_snapshot_list
*list
;
692 down_write(&NILFS_MDT(cpfile
)->mi_sem
);
694 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, cno
, 0, &cp_bh
);
697 kaddr
= kmap_atomic(cp_bh
->b_page
, KM_USER0
);
698 cp
= nilfs_cpfile_block_get_checkpoint(cpfile
, cno
, cp_bh
, kaddr
);
699 if (nilfs_checkpoint_invalid(cp
)) {
701 kunmap_atomic(kaddr
, KM_USER0
);
704 if (!nilfs_checkpoint_snapshot(cp
)) {
706 kunmap_atomic(kaddr
, KM_USER0
);
710 list
= &cp
->cp_snapshot_list
;
711 next
= le64_to_cpu(list
->ssl_next
);
712 prev
= le64_to_cpu(list
->ssl_prev
);
713 kunmap_atomic(kaddr
, KM_USER0
);
715 ret
= nilfs_cpfile_get_header_block(cpfile
, &header_bh
);
719 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, next
, 0,
728 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, prev
, 0,
737 kaddr
= kmap_atomic(next_bh
->b_page
, KM_USER0
);
738 list
= nilfs_cpfile_block_get_snapshot_list(
739 cpfile
, next
, next_bh
, kaddr
);
740 list
->ssl_prev
= cpu_to_le64(prev
);
741 kunmap_atomic(kaddr
, KM_USER0
);
743 kaddr
= kmap_atomic(prev_bh
->b_page
, KM_USER0
);
744 list
= nilfs_cpfile_block_get_snapshot_list(
745 cpfile
, prev
, prev_bh
, kaddr
);
746 list
->ssl_next
= cpu_to_le64(next
);
747 kunmap_atomic(kaddr
, KM_USER0
);
749 kaddr
= kmap_atomic(cp_bh
->b_page
, KM_USER0
);
750 cp
= nilfs_cpfile_block_get_checkpoint(cpfile
, cno
, cp_bh
, kaddr
);
751 cp
->cp_snapshot_list
.ssl_next
= cpu_to_le64(0);
752 cp
->cp_snapshot_list
.ssl_prev
= cpu_to_le64(0);
753 nilfs_checkpoint_clear_snapshot(cp
);
754 kunmap_atomic(kaddr
, KM_USER0
);
756 kaddr
= kmap_atomic(header_bh
->b_page
, KM_USER0
);
757 header
= nilfs_cpfile_block_get_header(cpfile
, header_bh
, kaddr
);
758 le64_add_cpu(&header
->ch_nsnapshots
, -1);
759 kunmap_atomic(kaddr
, KM_USER0
);
761 nilfs_mdt_mark_buffer_dirty(next_bh
);
762 nilfs_mdt_mark_buffer_dirty(prev_bh
);
763 nilfs_mdt_mark_buffer_dirty(cp_bh
);
764 nilfs_mdt_mark_buffer_dirty(header_bh
);
765 nilfs_mdt_mark_dirty(cpfile
);
779 up_write(&NILFS_MDT(cpfile
)->mi_sem
);
784 * nilfs_cpfile_is_snapshot -
785 * @cpfile: inode of checkpoint file
786 * @cno: checkpoint number
790 * Return Value: On success, 1 is returned if the checkpoint specified by
791 * @cno is a snapshot, or 0 if not. On error, one of the following negative
792 * error codes is returned.
796 * %-ENOMEM - Insufficient amount of memory available.
798 * %-ENOENT - No such checkpoint.
800 int nilfs_cpfile_is_snapshot(struct inode
*cpfile
, __u64 cno
)
802 struct buffer_head
*bh
;
803 struct nilfs_checkpoint
*cp
;
807 down_read(&NILFS_MDT(cpfile
)->mi_sem
);
809 ret
= nilfs_cpfile_get_checkpoint_block(cpfile
, cno
, 0, &bh
);
812 kaddr
= kmap_atomic(bh
->b_page
, KM_USER0
);
813 cp
= nilfs_cpfile_block_get_checkpoint(cpfile
, cno
, bh
, kaddr
);
814 ret
= nilfs_checkpoint_snapshot(cp
);
815 kunmap_atomic(kaddr
, KM_USER0
);
819 up_read(&NILFS_MDT(cpfile
)->mi_sem
);
824 * nilfs_cpfile_change_cpmode - change checkpoint mode
825 * @cpfile: inode of checkpoint file
826 * @cno: checkpoint number
827 * @status: mode of checkpoint
829 * Description: nilfs_change_cpmode() changes the mode of the checkpoint
830 * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
832 * Return Value: On success, 0 is returned. On error, one of the following
833 * negative error codes is returned.
837 * %-ENOMEM - Insufficient amount of memory available.
839 * %-ENOENT - No such checkpoint.
841 int nilfs_cpfile_change_cpmode(struct inode
*cpfile
, __u64 cno
, int mode
)
843 struct the_nilfs
*nilfs
;
846 nilfs
= NILFS_MDT(cpfile
)->mi_nilfs
;
849 case NILFS_CHECKPOINT
:
851 * Check for protecting existing snapshot mounts:
852 * bd_mount_sem is used to make this operation atomic and
853 * exclusive with a new mount job. Though it doesn't cover
854 * umount, it's enough for the purpose.
856 down(&nilfs
->ns_bdev
->bd_mount_sem
);
857 if (nilfs_checkpoint_is_mounted(nilfs
, cno
, 1)) {
858 /* Current implementation does not have to protect
859 plain read-only mounts since they are exclusive
860 with a read/write mount and are protected from the
864 ret
= nilfs_cpfile_clear_snapshot(cpfile
, cno
);
865 up(&nilfs
->ns_bdev
->bd_mount_sem
);
868 return nilfs_cpfile_set_snapshot(cpfile
, cno
);
875 * nilfs_cpfile_get_stat - get checkpoint statistics
876 * @cpfile: inode of checkpoint file
877 * @stat: pointer to a structure of checkpoint statistics
879 * Description: nilfs_cpfile_get_stat() returns information about checkpoints.
881 * Return Value: On success, 0 is returned, and checkpoints information is
882 * stored in the place pointed by @stat. On error, one of the following
883 * negative error codes is returned.
887 * %-ENOMEM - Insufficient amount of memory available.
889 int nilfs_cpfile_get_stat(struct inode
*cpfile
, struct nilfs_cpstat
*cpstat
)
891 struct buffer_head
*bh
;
892 struct nilfs_cpfile_header
*header
;
896 down_read(&NILFS_MDT(cpfile
)->mi_sem
);
898 ret
= nilfs_cpfile_get_header_block(cpfile
, &bh
);
901 kaddr
= kmap_atomic(bh
->b_page
, KM_USER0
);
902 header
= nilfs_cpfile_block_get_header(cpfile
, bh
, kaddr
);
903 cpstat
->cs_cno
= nilfs_mdt_cno(cpfile
);
904 cpstat
->cs_ncps
= le64_to_cpu(header
->ch_ncheckpoints
);
905 cpstat
->cs_nsss
= le64_to_cpu(header
->ch_nsnapshots
);
906 kunmap_atomic(kaddr
, KM_USER0
);
910 up_read(&NILFS_MDT(cpfile
)->mi_sem
);
This page took 0.063116 seconds and 6 git commands to generate.