nilfs2: unfold nilfs_sufile_block_get_header function
[deliverable/linux.git] / fs / nilfs2 / sufile.c
CommitLineData
6c98cd4e
KS
1/*
2 * sufile.c - NILFS segment usage file.
3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Koji Sato <koji@osrg.net>.
dda54f4b 21 * Rivised by Ryusuke Konishi <ryusuke@osrg.net>.
6c98cd4e
KS
22 */
23
24#include <linux/kernel.h>
25#include <linux/fs.h>
26#include <linux/string.h>
27#include <linux/buffer_head.h>
28#include <linux/errno.h>
29#include <linux/nilfs2_fs.h>
30#include "mdt.h"
31#include "sufile.h"
32
33
34static inline unsigned long
35nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
36{
37 return NILFS_MDT(sufile)->mi_entries_per_block;
38}
39
40static unsigned long
41nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
42{
43 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
44 do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
45 return (unsigned long)t;
46}
47
48static unsigned long
49nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
50{
51 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
52 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
53}
54
55static unsigned long
56nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
57 __u64 max)
58{
59 return min_t(unsigned long,
60 nilfs_sufile_segment_usages_per_block(sufile) -
61 nilfs_sufile_get_offset(sufile, curr),
62 max - curr + 1);
63}
64
6c98cd4e
KS
65static struct nilfs_segment_usage *
66nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
67 struct buffer_head *bh, void *kaddr)
68{
69 return kaddr + bh_offset(bh) +
70 nilfs_sufile_get_offset(sufile, segnum) *
71 NILFS_MDT(sufile)->mi_entry_size;
72}
73
74static inline int nilfs_sufile_get_header_block(struct inode *sufile,
75 struct buffer_head **bhp)
76{
77 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
78}
79
80static inline int
81nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
82 int create, struct buffer_head **bhp)
83{
84 return nilfs_mdt_get_block(sufile,
85 nilfs_sufile_get_blkoff(sufile, segnum),
86 create, NULL, bhp);
87}
88
a703018f
RK
89static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
90 u64 ncleanadd, u64 ndirtyadd)
91{
92 struct nilfs_sufile_header *header;
93 void *kaddr;
94
95 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
96 header = kaddr + bh_offset(header_bh);
97 le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
98 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
99 kunmap_atomic(kaddr, KM_USER0);
100
101 nilfs_mdt_mark_buffer_dirty(header_bh);
102}
103
dda54f4b
RK
104/**
105 * nilfs_sufile_updatev - modify multiple segment usages at a time
106 * @sufile: inode of segment usage file
107 * @segnumv: array of segment numbers
108 * @nsegs: size of @segnumv array
109 * @create: creation flag
110 * @ndone: place to store number of modified segments on @segnumv
111 * @dofunc: primitive operation for the update
112 *
113 * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
114 * against the given array of segments. The @dofunc is called with
115 * buffers of a header block and the sufile block in which the target
116 * segment usage entry is contained. If @ndone is given, the number
117 * of successfully modified segments from the head is stored in the
118 * place @ndone points to.
119 *
120 * Return Value: On success, zero is returned. On error, one of the
121 * following negative error codes is returned.
122 *
123 * %-EIO - I/O error.
124 *
125 * %-ENOMEM - Insufficient amount of memory available.
126 *
127 * %-ENOENT - Given segment usage is in hole block (may be returned if
128 * @create is zero)
129 *
130 * %-EINVAL - Invalid segment usage number
131 */
132int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
133 int create, size_t *ndone,
134 void (*dofunc)(struct inode *, __u64,
135 struct buffer_head *,
136 struct buffer_head *))
137{
138 struct buffer_head *header_bh, *bh;
139 unsigned long blkoff, prev_blkoff;
140 __u64 *seg;
141 size_t nerr = 0, n = 0;
142 int ret = 0;
143
144 if (unlikely(nsegs == 0))
145 goto out;
146
147 down_write(&NILFS_MDT(sufile)->mi_sem);
148 for (seg = segnumv; seg < segnumv + nsegs; seg++) {
149 if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
150 printk(KERN_WARNING
151 "%s: invalid segment number: %llu\n", __func__,
152 (unsigned long long)*seg);
153 nerr++;
154 }
155 }
156 if (nerr > 0) {
157 ret = -EINVAL;
158 goto out_sem;
159 }
160
161 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
162 if (ret < 0)
163 goto out_sem;
164
165 seg = segnumv;
166 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
167 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
168 if (ret < 0)
169 goto out_header;
170
171 for (;;) {
172 dofunc(sufile, *seg, header_bh, bh);
173
174 if (++seg >= segnumv + nsegs)
175 break;
176 prev_blkoff = blkoff;
177 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
178 if (blkoff == prev_blkoff)
179 continue;
180
181 /* get different block */
182 brelse(bh);
183 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
184 if (unlikely(ret < 0))
185 goto out_header;
186 }
187 brelse(bh);
188
189 out_header:
190 n = seg - segnumv;
191 brelse(header_bh);
192 out_sem:
193 up_write(&NILFS_MDT(sufile)->mi_sem);
194 out:
195 if (ndone)
196 *ndone = n;
197 return ret;
198}
199
a703018f
RK
200int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
201 void (*dofunc)(struct inode *, __u64,
202 struct buffer_head *,
203 struct buffer_head *))
204{
205 struct buffer_head *header_bh, *bh;
206 int ret;
207
208 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
209 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
210 __func__, (unsigned long long)segnum);
211 return -EINVAL;
212 }
213 down_write(&NILFS_MDT(sufile)->mi_sem);
214
215 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
216 if (ret < 0)
217 goto out_sem;
218
219 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
220 if (!ret) {
221 dofunc(sufile, segnum, header_bh, bh);
222 brelse(bh);
223 }
224 brelse(header_bh);
225
226 out_sem:
227 up_write(&NILFS_MDT(sufile)->mi_sem);
228 return ret;
229}
230
6c98cd4e
KS
231/**
232 * nilfs_sufile_alloc - allocate a segment
233 * @sufile: inode of segment usage file
234 * @segnump: pointer to segment number
235 *
236 * Description: nilfs_sufile_alloc() allocates a clean segment.
237 *
238 * Return Value: On success, 0 is returned and the segment number of the
239 * allocated segment is stored in the place pointed by @segnump. On error, one
240 * of the following negative error codes is returned.
241 *
242 * %-EIO - I/O error.
243 *
244 * %-ENOMEM - Insufficient amount of memory available.
245 *
246 * %-ENOSPC - No clean segment left.
247 */
248int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
249{
250 struct buffer_head *header_bh, *su_bh;
6c98cd4e
KS
251 struct nilfs_sufile_header *header;
252 struct nilfs_segment_usage *su;
253 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
254 __u64 segnum, maxsegnum, last_alloc;
255 void *kaddr;
256 unsigned long nsegments, ncleansegs, nsus;
257 int ret, i, j;
258
259 down_write(&NILFS_MDT(sufile)->mi_sem);
260
6c98cd4e
KS
261 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
262 if (ret < 0)
263 goto out_sem;
264 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
7b16c8a2 265 header = kaddr + bh_offset(header_bh);
6c98cd4e
KS
266 ncleansegs = le64_to_cpu(header->sh_ncleansegs);
267 last_alloc = le64_to_cpu(header->sh_last_alloc);
268 kunmap_atomic(kaddr, KM_USER0);
269
270 nsegments = nilfs_sufile_get_nsegments(sufile);
271 segnum = last_alloc + 1;
272 maxsegnum = nsegments - 1;
273 for (i = 0; i < nsegments; i += nsus) {
274 if (segnum >= nsegments) {
275 /* wrap around */
276 segnum = 0;
277 maxsegnum = last_alloc;
278 }
279 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
280 &su_bh);
281 if (ret < 0)
282 goto out_header;
283 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
284 su = nilfs_sufile_block_get_segment_usage(
285 sufile, segnum, su_bh, kaddr);
286
287 nsus = nilfs_sufile_segment_usages_in_block(
288 sufile, segnum, maxsegnum);
289 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
290 if (!nilfs_segment_usage_clean(su))
291 continue;
292 /* found a clean segment */
6c98cd4e
KS
293 nilfs_segment_usage_set_dirty(su);
294 kunmap_atomic(kaddr, KM_USER0);
295
296 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
7b16c8a2 297 header = kaddr + bh_offset(header_bh);
6c98cd4e
KS
298 le64_add_cpu(&header->sh_ncleansegs, -1);
299 le64_add_cpu(&header->sh_ndirtysegs, 1);
300 header->sh_last_alloc = cpu_to_le64(segnum);
301 kunmap_atomic(kaddr, KM_USER0);
302
303 nilfs_mdt_mark_buffer_dirty(header_bh);
304 nilfs_mdt_mark_buffer_dirty(su_bh);
305 nilfs_mdt_mark_dirty(sufile);
306 brelse(su_bh);
307 *segnump = segnum;
308 goto out_header;
309 }
310
311 kunmap_atomic(kaddr, KM_USER0);
312 brelse(su_bh);
313 }
314
315 /* no segments left */
316 ret = -ENOSPC;
317
318 out_header:
319 brelse(header_bh);
320
321 out_sem:
322 up_write(&NILFS_MDT(sufile)->mi_sem);
323 return ret;
324}
325
a703018f
RK
326void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
327 struct buffer_head *header_bh,
328 struct buffer_head *su_bh)
6c98cd4e 329{
6c98cd4e
KS
330 struct nilfs_segment_usage *su;
331 void *kaddr;
6c98cd4e
KS
332
333 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
a703018f 334 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
1f5abe7e
RK
335 if (unlikely(!nilfs_segment_usage_clean(su))) {
336 printk(KERN_WARNING "%s: segment %llu must be clean\n",
6c98cd4e 337 __func__, (unsigned long long)segnum);
1f5abe7e 338 kunmap_atomic(kaddr, KM_USER0);
a703018f 339 return;
6c98cd4e
KS
340 }
341 nilfs_segment_usage_set_dirty(su);
342 kunmap_atomic(kaddr, KM_USER0);
343
a703018f 344 nilfs_sufile_mod_counter(header_bh, -1, 1);
6c98cd4e
KS
345 nilfs_mdt_mark_buffer_dirty(su_bh);
346 nilfs_mdt_mark_dirty(sufile);
6c98cd4e
KS
347}
348
c85399c2
RK
349void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
350 struct buffer_head *header_bh,
351 struct buffer_head *su_bh)
352{
353 struct nilfs_segment_usage *su;
354 void *kaddr;
355 int clean, dirty;
356
357 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
358 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
359 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
360 su->su_nblocks == cpu_to_le32(0)) {
361 kunmap_atomic(kaddr, KM_USER0);
362 return;
363 }
364 clean = nilfs_segment_usage_clean(su);
365 dirty = nilfs_segment_usage_dirty(su);
366
367 /* make the segment garbage */
368 su->su_lastmod = cpu_to_le64(0);
369 su->su_nblocks = cpu_to_le32(0);
370 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
371 kunmap_atomic(kaddr, KM_USER0);
372
373 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
374 nilfs_mdt_mark_buffer_dirty(su_bh);
375 nilfs_mdt_mark_dirty(sufile);
376}
377
a703018f
RK
378void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
379 struct buffer_head *header_bh,
380 struct buffer_head *su_bh)
6c98cd4e 381{
6c98cd4e
KS
382 struct nilfs_segment_usage *su;
383 void *kaddr;
a703018f 384 int sudirty;
6c98cd4e 385
a703018f
RK
386 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
387 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
388 if (nilfs_segment_usage_clean(su)) {
389 printk(KERN_WARNING "%s: segment %llu is already clean\n",
390 __func__, (unsigned long long)segnum);
6c98cd4e 391 kunmap_atomic(kaddr, KM_USER0);
a703018f 392 return;
6c98cd4e 393 }
a703018f
RK
394 WARN_ON(nilfs_segment_usage_error(su));
395 WARN_ON(!nilfs_segment_usage_dirty(su));
6c98cd4e 396
a703018f
RK
397 sudirty = nilfs_segment_usage_dirty(su);
398 nilfs_segment_usage_set_clean(su);
399 kunmap_atomic(kaddr, KM_USER0);
400 nilfs_mdt_mark_buffer_dirty(su_bh);
6c98cd4e 401
a703018f
RK
402 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
403 nilfs_mdt_mark_dirty(sufile);
6c98cd4e
KS
404}
405
406/**
407 * nilfs_sufile_get_segment_usage - get a segment usage
408 * @sufile: inode of segment usage file
409 * @segnum: segment number
410 * @sup: pointer to segment usage
411 * @bhp: pointer to buffer head
412 *
413 * Description: nilfs_sufile_get_segment_usage() acquires the segment usage
414 * specified by @segnum.
415 *
416 * Return Value: On success, 0 is returned, and the segment usage and the
417 * buffer head of the buffer on which the segment usage is located are stored
418 * in the place pointed by @sup and @bhp, respectively. On error, one of the
419 * following negative error codes is returned.
420 *
421 * %-EIO - I/O error.
422 *
423 * %-ENOMEM - Insufficient amount of memory available.
424 *
425 * %-EINVAL - Invalid segment usage number.
426 */
427int nilfs_sufile_get_segment_usage(struct inode *sufile, __u64 segnum,
428 struct nilfs_segment_usage **sup,
429 struct buffer_head **bhp)
430{
431 struct buffer_head *bh;
432 struct nilfs_segment_usage *su;
433 void *kaddr;
434 int ret;
435
436 /* segnum is 0 origin */
1f5abe7e
RK
437 if (segnum >= nilfs_sufile_get_nsegments(sufile))
438 return -EINVAL;
6c98cd4e
KS
439 down_write(&NILFS_MDT(sufile)->mi_sem);
440 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh);
441 if (ret < 0)
442 goto out_sem;
443 kaddr = kmap(bh->b_page);
444 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
445 if (nilfs_segment_usage_error(su)) {
446 kunmap(bh->b_page);
447 brelse(bh);
448 ret = -EINVAL;
449 goto out_sem;
450 }
451
452 if (sup != NULL)
453 *sup = su;
454 *bhp = bh;
455
456 out_sem:
457 up_write(&NILFS_MDT(sufile)->mi_sem);
458 return ret;
459}
460
461/**
462 * nilfs_sufile_put_segment_usage - put a segment usage
463 * @sufile: inode of segment usage file
464 * @segnum: segment number
465 * @bh: buffer head
466 *
467 * Description: nilfs_sufile_put_segment_usage() releases the segment usage
468 * specified by @segnum. @bh must be the buffer head which have been returned
469 * by a previous call to nilfs_sufile_get_segment_usage() with @segnum.
470 */
471void nilfs_sufile_put_segment_usage(struct inode *sufile, __u64 segnum,
472 struct buffer_head *bh)
473{
474 kunmap(bh->b_page);
475 brelse(bh);
476}
477
478/**
479 * nilfs_sufile_get_stat - get segment usage statistics
480 * @sufile: inode of segment usage file
481 * @stat: pointer to a structure of segment usage statistics
482 *
483 * Description: nilfs_sufile_get_stat() returns information about segment
484 * usage.
485 *
486 * Return Value: On success, 0 is returned, and segment usage information is
487 * stored in the place pointed by @stat. On error, one of the following
488 * negative error codes is returned.
489 *
490 * %-EIO - I/O error.
491 *
492 * %-ENOMEM - Insufficient amount of memory available.
493 */
494int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
495{
496 struct buffer_head *header_bh;
497 struct nilfs_sufile_header *header;
2c2e52fc 498 struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
6c98cd4e
KS
499 void *kaddr;
500 int ret;
501
502 down_read(&NILFS_MDT(sufile)->mi_sem);
503
504 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
505 if (ret < 0)
506 goto out_sem;
507
508 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
7b16c8a2 509 header = kaddr + bh_offset(header_bh);
6c98cd4e
KS
510 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
511 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
512 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
2c2e52fc
RK
513 sustat->ss_ctime = nilfs->ns_ctime;
514 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
515 spin_lock(&nilfs->ns_last_segment_lock);
516 sustat->ss_prot_seq = nilfs->ns_prot_seq;
517 spin_unlock(&nilfs->ns_last_segment_lock);
6c98cd4e
KS
518 kunmap_atomic(kaddr, KM_USER0);
519 brelse(header_bh);
520
521 out_sem:
522 up_read(&NILFS_MDT(sufile)->mi_sem);
523 return ret;
524}
525
526/**
527 * nilfs_sufile_get_ncleansegs - get the number of clean segments
528 * @sufile: inode of segment usage file
529 * @nsegsp: pointer to the number of clean segments
530 *
531 * Description: nilfs_sufile_get_ncleansegs() acquires the number of clean
532 * segments.
533 *
534 * Return Value: On success, 0 is returned and the number of clean segments is
535 * stored in the place pointed by @nsegsp. On error, one of the following
536 * negative error codes is returned.
537 *
538 * %-EIO - I/O error.
539 *
540 * %-ENOMEM - Insufficient amount of memory available.
541 */
542int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
543{
544 struct nilfs_sustat sustat;
545 int ret;
546
547 ret = nilfs_sufile_get_stat(sufile, &sustat);
548 if (ret == 0)
549 *nsegsp = sustat.ss_ncleansegs;
550 return ret;
551}
552
a703018f
RK
553void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
554 struct buffer_head *header_bh,
555 struct buffer_head *su_bh)
6c98cd4e 556{
6c98cd4e 557 struct nilfs_segment_usage *su;
6c98cd4e 558 void *kaddr;
a703018f 559 int suclean;
6c98cd4e
KS
560
561 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
562 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
563 if (nilfs_segment_usage_error(su)) {
564 kunmap_atomic(kaddr, KM_USER0);
a703018f 565 return;
6c98cd4e 566 }
88072faf 567 suclean = nilfs_segment_usage_clean(su);
6c98cd4e
KS
568 nilfs_segment_usage_set_error(su);
569 kunmap_atomic(kaddr, KM_USER0);
6c98cd4e 570
a703018f
RK
571 if (suclean)
572 nilfs_sufile_mod_counter(header_bh, -1, 0);
6c98cd4e
KS
573 nilfs_mdt_mark_buffer_dirty(su_bh);
574 nilfs_mdt_mark_dirty(sufile);
6c98cd4e
KS
575}
576
577/**
578 * nilfs_sufile_get_suinfo -
579 * @sufile: inode of segment usage file
580 * @segnum: segment number to start looking
003ff182
RK
581 * @buf: array of suinfo
582 * @sisz: byte size of suinfo
6c98cd4e
KS
583 * @nsi: size of suinfo array
584 *
585 * Description:
586 *
587 * Return Value: On success, 0 is returned and .... On error, one of the
588 * following negative error codes is returned.
589 *
590 * %-EIO - I/O error.
591 *
592 * %-ENOMEM - Insufficient amount of memory available.
593 */
003ff182
RK
594ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
595 unsigned sisz, size_t nsi)
6c98cd4e
KS
596{
597 struct buffer_head *su_bh;
598 struct nilfs_segment_usage *su;
003ff182 599 struct nilfs_suinfo *si = buf;
6c98cd4e 600 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
cece5520 601 struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
6c98cd4e
KS
602 void *kaddr;
603 unsigned long nsegs, segusages_per_block;
604 ssize_t n;
605 int ret, i, j;
606
607 down_read(&NILFS_MDT(sufile)->mi_sem);
608
609 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
610 nsegs = min_t(unsigned long,
611 nilfs_sufile_get_nsegments(sufile) - segnum,
612 nsi);
613 for (i = 0; i < nsegs; i += n, segnum += n) {
614 n = min_t(unsigned long,
615 segusages_per_block -
616 nilfs_sufile_get_offset(sufile, segnum),
617 nsegs - i);
618 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
619 &su_bh);
620 if (ret < 0) {
621 if (ret != -ENOENT)
622 goto out;
623 /* hole */
003ff182
RK
624 memset(si, 0, sisz * n);
625 si = (void *)si + sisz * n;
6c98cd4e
KS
626 continue;
627 }
628
629 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
630 su = nilfs_sufile_block_get_segment_usage(
631 sufile, segnum, su_bh, kaddr);
003ff182
RK
632 for (j = 0; j < n;
633 j++, su = (void *)su + susz, si = (void *)si + sisz) {
634 si->sui_lastmod = le64_to_cpu(su->su_lastmod);
635 si->sui_nblocks = le32_to_cpu(su->su_nblocks);
636 si->sui_flags = le32_to_cpu(su->su_flags) &
cece5520 637 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
3efb55b4 638 if (nilfs_segment_is_active(nilfs, segnum + j))
003ff182 639 si->sui_flags |=
cece5520 640 (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
6c98cd4e
KS
641 }
642 kunmap_atomic(kaddr, KM_USER0);
643 brelse(su_bh);
644 }
645 ret = nsegs;
646
647 out:
648 up_read(&NILFS_MDT(sufile)->mi_sem);
649 return ret;
650}
79739565 651
8707df38
RK
652/**
653 * nilfs_sufile_read - read sufile inode
654 * @sufile: sufile inode
655 * @raw_inode: on-disk sufile inode
656 */
657int nilfs_sufile_read(struct inode *sufile, struct nilfs_inode *raw_inode)
658{
659 return nilfs_read_inode_common(sufile, raw_inode);
660}
661
79739565
RK
662/**
663 * nilfs_sufile_new - create sufile
664 * @nilfs: nilfs object
665 * @susize: size of a segment usage entry
666 */
667struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize)
668{
669 struct inode *sufile;
670
671 sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO, 0);
672 if (sufile)
673 nilfs_mdt_set_entry_size(sufile, susize,
674 sizeof(struct nilfs_sufile_header));
675 return sufile;
676}
This page took 0.0960530000000001 seconds and 5 git commands to generate.