hfsplus: flush disk caches in sync and fsync
[deliverable/linux.git] / fs / hfsplus / extents.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/hfsplus/extents.c
3 *
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
7 *
8 * Handling of Extents both in catalog and extents overflow trees
9 */
10
11#include <linux/errno.h>
12#include <linux/fs.h>
13#include <linux/pagemap.h>
1da177e4
LT
14
15#include "hfsplus_fs.h"
16#include "hfsplus_raw.h"
17
18/* Compare two extents keys, returns 0 on same, pos/neg for difference */
2179d372
DE
19int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1,
20 const hfsplus_btree_key *k2)
1da177e4
LT
21{
22 __be32 k1id, k2id;
23 __be32 k1s, k2s;
24
25 k1id = k1->ext.cnid;
26 k2id = k2->ext.cnid;
27 if (k1id != k2id)
28 return be32_to_cpu(k1id) < be32_to_cpu(k2id) ? -1 : 1;
29
30 if (k1->ext.fork_type != k2->ext.fork_type)
31 return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1;
32
33 k1s = k1->ext.start_block;
34 k2s = k2->ext.start_block;
35 if (k1s == k2s)
36 return 0;
37 return be32_to_cpu(k1s) < be32_to_cpu(k2s) ? -1 : 1;
38}
39
40static void hfsplus_ext_build_key(hfsplus_btree_key *key, u32 cnid,
41 u32 block, u8 type)
42{
43 key->key_len = cpu_to_be16(HFSPLUS_EXT_KEYLEN - 2);
44 key->ext.cnid = cpu_to_be32(cnid);
45 key->ext.start_block = cpu_to_be32(block);
46 key->ext.fork_type = type;
47 key->ext.pad = 0;
48}
49
50static u32 hfsplus_ext_find_block(struct hfsplus_extent *ext, u32 off)
51{
52 int i;
53 u32 count;
54
55 for (i = 0; i < 8; ext++, i++) {
56 count = be32_to_cpu(ext->block_count);
57 if (off < count)
58 return be32_to_cpu(ext->start_block) + off;
59 off -= count;
60 }
61 /* panic? */
62 return 0;
63}
64
65static int hfsplus_ext_block_count(struct hfsplus_extent *ext)
66{
67 int i;
68 u32 count = 0;
69
70 for (i = 0; i < 8; ext++, i++)
71 count += be32_to_cpu(ext->block_count);
72 return count;
73}
74
75static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext)
76{
77 int i;
78
79 ext += 7;
80 for (i = 0; i < 7; ext--, i++)
81 if (ext->block_count)
82 break;
83 return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count);
84}
85
86static void __hfsplus_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
87{
6af502de 88 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
1da177e4
LT
89 int res;
90
7fcc99f4
CH
91 WARN_ON(!mutex_is_locked(&hip->extents_lock));
92
6af502de
CH
93 hfsplus_ext_build_key(fd->search_key, inode->i_ino, hip->cached_start,
94 HFSPLUS_IS_RSRC(inode) ?
95 HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
96
1da177e4 97 res = hfs_brec_find(fd);
b33b7921 98 if (hip->extent_state & HFSPLUS_EXT_NEW) {
1da177e4
LT
99 if (res != -ENOENT)
100 return;
6af502de
CH
101 hfs_brec_insert(fd, hip->cached_extents,
102 sizeof(hfsplus_extent_rec));
b33b7921 103 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
1da177e4
LT
104 } else {
105 if (res)
106 return;
6af502de
CH
107 hfs_bnode_write(fd->bnode, hip->cached_extents,
108 fd->entryoffset, fd->entrylength);
b33b7921 109 hip->extent_state &= ~HFSPLUS_EXT_DIRTY;
1da177e4 110 }
e3494705
CH
111
112 /*
113 * We can't just use hfsplus_mark_inode_dirty here, because we
114 * also get called from hfsplus_write_inode, which should not
115 * redirty the inode. Instead the callers have to be careful
116 * to explicily mark the inode dirty, too.
117 */
118 set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags);
1da177e4
LT
119}
120
7fcc99f4 121static void hfsplus_ext_write_extent_locked(struct inode *inode)
1da177e4 122{
b33b7921 123 if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) {
1da177e4
LT
124 struct hfs_find_data fd;
125
dd73a01a 126 hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
1da177e4
LT
127 __hfsplus_ext_write_extent(inode, &fd);
128 hfs_find_exit(&fd);
129 }
130}
131
7fcc99f4
CH
132void hfsplus_ext_write_extent(struct inode *inode)
133{
134 mutex_lock(&HFSPLUS_I(inode)->extents_lock);
135 hfsplus_ext_write_extent_locked(inode);
136 mutex_unlock(&HFSPLUS_I(inode)->extents_lock);
137}
138
1da177e4
LT
139static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
140 struct hfsplus_extent *extent,
141 u32 cnid, u32 block, u8 type)
142{
143 int res;
144
145 hfsplus_ext_build_key(fd->search_key, cnid, block, type);
146 fd->key->ext.cnid = 0;
147 res = hfs_brec_find(fd);
148 if (res && res != -ENOENT)
149 return res;
150 if (fd->key->ext.cnid != fd->search_key->ext.cnid ||
151 fd->key->ext.fork_type != fd->search_key->ext.fork_type)
152 return -ENOENT;
153 if (fd->entrylength != sizeof(hfsplus_extent_rec))
154 return -EIO;
155 hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfsplus_extent_rec));
156 return 0;
157}
158
159static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block)
160{
6af502de 161 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
1da177e4
LT
162 int res;
163
7fcc99f4
CH
164 WARN_ON(!mutex_is_locked(&hip->extents_lock));
165
b33b7921 166 if (hip->extent_state & HFSPLUS_EXT_DIRTY)
1da177e4
LT
167 __hfsplus_ext_write_extent(inode, fd);
168
6af502de
CH
169 res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino,
170 block, HFSPLUS_IS_RSRC(inode) ?
171 HFSPLUS_TYPE_RSRC :
172 HFSPLUS_TYPE_DATA);
1da177e4 173 if (!res) {
6af502de
CH
174 hip->cached_start = be32_to_cpu(fd->key->ext.start_block);
175 hip->cached_blocks = hfsplus_ext_block_count(hip->cached_extents);
1da177e4 176 } else {
6af502de 177 hip->cached_start = hip->cached_blocks = 0;
b33b7921 178 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
1da177e4
LT
179 }
180 return res;
181}
182
183static int hfsplus_ext_read_extent(struct inode *inode, u32 block)
184{
6af502de 185 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
1da177e4
LT
186 struct hfs_find_data fd;
187 int res;
188
6af502de
CH
189 if (block >= hip->cached_start &&
190 block < hip->cached_start + hip->cached_blocks)
1da177e4
LT
191 return 0;
192
dd73a01a 193 hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
1da177e4
LT
194 res = __hfsplus_ext_cache_extent(&fd, inode, block);
195 hfs_find_exit(&fd);
196 return res;
197}
198
199/* Get a block at iblock for inode, possibly allocating if create */
200int hfsplus_get_block(struct inode *inode, sector_t iblock,
201 struct buffer_head *bh_result, int create)
202{
dd73a01a
CH
203 struct super_block *sb = inode->i_sb;
204 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
6af502de 205 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
1da177e4
LT
206 int res = -EIO;
207 u32 ablock, dblock, mask;
e3494705 208 int was_dirty = 0;
1da177e4
LT
209 int shift;
210
1da177e4 211 /* Convert inode block to disk allocation block */
dd73a01a
CH
212 shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits;
213 ablock = iblock >> sbi->fs_shift;
1da177e4 214
6af502de
CH
215 if (iblock >= hip->fs_blocks) {
216 if (iblock > hip->fs_blocks || !create)
1da177e4 217 return -EIO;
6af502de 218 if (ablock >= hip->alloc_blocks) {
1da177e4
LT
219 res = hfsplus_file_extend(inode);
220 if (res)
221 return res;
222 }
223 } else
224 create = 0;
225
6af502de
CH
226 if (ablock < hip->first_blocks) {
227 dblock = hfsplus_ext_find_block(hip->first_extents, ablock);
1da177e4
LT
228 goto done;
229 }
230
248736c2
ES
231 if (inode->i_ino == HFSPLUS_EXT_CNID)
232 return -EIO;
233
6af502de 234 mutex_lock(&hip->extents_lock);
e3494705
CH
235
236 /*
237 * hfsplus_ext_read_extent will write out a cached extent into
238 * the extents btree. In that case we may have to mark the inode
239 * dirty even for a pure read of an extent here.
240 */
241 was_dirty = (hip->extent_state & HFSPLUS_EXT_DIRTY);
1da177e4 242 res = hfsplus_ext_read_extent(inode, ablock);
e3494705 243 if (res) {
6af502de 244 mutex_unlock(&hip->extents_lock);
1da177e4
LT
245 return -EIO;
246 }
e3494705
CH
247 dblock = hfsplus_ext_find_block(hip->cached_extents,
248 ablock - hip->cached_start);
6af502de 249 mutex_unlock(&hip->extents_lock);
1da177e4
LT
250
251done:
252 dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock);
dd73a01a
CH
253 mask = (1 << sbi->fs_shift) - 1;
254 map_bh(bh_result, sb, (dblock << sbi->fs_shift) + sbi->blockoffset + (iblock & mask));
1da177e4
LT
255 if (create) {
256 set_buffer_new(bh_result);
6af502de
CH
257 hip->phys_size += sb->s_blocksize;
258 hip->fs_blocks++;
1da177e4 259 inode_add_bytes(inode, sb->s_blocksize);
1da177e4 260 }
e3494705
CH
261 if (create || was_dirty)
262 mark_inode_dirty(inode);
1da177e4
LT
263 return 0;
264}
265
266static void hfsplus_dump_extent(struct hfsplus_extent *extent)
267{
268 int i;
269
270 dprint(DBG_EXTENT, " ");
271 for (i = 0; i < 8; i++)
272 dprint(DBG_EXTENT, " %u:%u", be32_to_cpu(extent[i].start_block),
273 be32_to_cpu(extent[i].block_count));
274 dprint(DBG_EXTENT, "\n");
275}
276
277static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset,
278 u32 alloc_block, u32 block_count)
279{
280 u32 count, start;
281 int i;
282
283 hfsplus_dump_extent(extent);
284 for (i = 0; i < 8; extent++, i++) {
285 count = be32_to_cpu(extent->block_count);
286 if (offset == count) {
287 start = be32_to_cpu(extent->start_block);
288 if (alloc_block != start + count) {
289 if (++i >= 8)
290 return -ENOSPC;
291 extent++;
292 extent->start_block = cpu_to_be32(alloc_block);
293 } else
294 block_count += count;
295 extent->block_count = cpu_to_be32(block_count);
296 return 0;
297 } else if (offset < count)
298 break;
299 offset -= count;
300 }
301 /* panic? */
302 return -EIO;
303}
304
305static int hfsplus_free_extents(struct super_block *sb,
306 struct hfsplus_extent *extent,
307 u32 offset, u32 block_nr)
308{
309 u32 count, start;
310 int i;
311
312 hfsplus_dump_extent(extent);
313 for (i = 0; i < 8; extent++, i++) {
314 count = be32_to_cpu(extent->block_count);
315 if (offset == count)
316 goto found;
317 else if (offset < count)
318 break;
319 offset -= count;
320 }
321 /* panic? */
322 return -EIO;
323found:
324 for (;;) {
325 start = be32_to_cpu(extent->start_block);
326 if (count <= block_nr) {
327 hfsplus_block_free(sb, start, count);
328 extent->block_count = 0;
329 extent->start_block = 0;
330 block_nr -= count;
331 } else {
332 count -= block_nr;
333 hfsplus_block_free(sb, start + count, block_nr);
334 extent->block_count = cpu_to_be32(count);
335 block_nr = 0;
336 }
337 if (!block_nr || !i)
338 return 0;
339 i--;
340 extent--;
341 count = be32_to_cpu(extent->block_count);
342 }
343}
344
345int hfsplus_free_fork(struct super_block *sb, u32 cnid, struct hfsplus_fork_raw *fork, int type)
346{
347 struct hfs_find_data fd;
348 hfsplus_extent_rec ext_entry;
349 u32 total_blocks, blocks, start;
350 int res, i;
351
352 total_blocks = be32_to_cpu(fork->total_blocks);
353 if (!total_blocks)
354 return 0;
355
356 blocks = 0;
357 for (i = 0; i < 8; i++)
358 blocks += be32_to_cpu(fork->extents[i].block_count);
359
360 res = hfsplus_free_extents(sb, fork->extents, blocks, blocks);
361 if (res)
362 return res;
363 if (total_blocks == blocks)
364 return 0;
365
dd73a01a 366 hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
1da177e4
LT
367 do {
368 res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid,
369 total_blocks, type);
370 if (res)
371 break;
372 start = be32_to_cpu(fd.key->ext.start_block);
373 hfsplus_free_extents(sb, ext_entry,
374 total_blocks - start,
375 total_blocks);
376 hfs_brec_remove(&fd);
377 total_blocks = start;
378 } while (total_blocks > blocks);
379 hfs_find_exit(&fd);
380
381 return res;
382}
383
384int hfsplus_file_extend(struct inode *inode)
385{
386 struct super_block *sb = inode->i_sb;
dd73a01a 387 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
6af502de 388 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
1da177e4
LT
389 u32 start, len, goal;
390 int res;
391
dd73a01a
CH
392 if (sbi->alloc_file->i_size * 8 <
393 sbi->total_blocks - sbi->free_blocks + 8) {
1da177e4 394 // extend alloc file
dd73a01a
CH
395 printk(KERN_ERR "hfs: extend alloc file! (%Lu,%u,%u)\n",
396 sbi->alloc_file->i_size * 8,
397 sbi->total_blocks, sbi->free_blocks);
1da177e4 398 return -ENOSPC;
1da177e4
LT
399 }
400
6af502de
CH
401 mutex_lock(&hip->extents_lock);
402 if (hip->alloc_blocks == hip->first_blocks)
403 goal = hfsplus_ext_lastblock(hip->first_extents);
1da177e4 404 else {
6af502de 405 res = hfsplus_ext_read_extent(inode, hip->alloc_blocks);
1da177e4
LT
406 if (res)
407 goto out;
6af502de 408 goal = hfsplus_ext_lastblock(hip->cached_extents);
1da177e4
LT
409 }
410
6af502de 411 len = hip->clump_blocks;
dd73a01a
CH
412 start = hfsplus_block_allocate(sb, sbi->total_blocks, goal, &len);
413 if (start >= sbi->total_blocks) {
1da177e4
LT
414 start = hfsplus_block_allocate(sb, goal, 0, &len);
415 if (start >= goal) {
416 res = -ENOSPC;
417 goto out;
418 }
419 }
420
421 dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
6af502de
CH
422
423 if (hip->alloc_blocks <= hip->first_blocks) {
424 if (!hip->first_blocks) {
1da177e4
LT
425 dprint(DBG_EXTENT, "first extents\n");
426 /* no extents yet */
6af502de
CH
427 hip->first_extents[0].start_block = cpu_to_be32(start);
428 hip->first_extents[0].block_count = cpu_to_be32(len);
1da177e4
LT
429 res = 0;
430 } else {
431 /* try to append to extents in inode */
6af502de
CH
432 res = hfsplus_add_extent(hip->first_extents,
433 hip->alloc_blocks,
1da177e4
LT
434 start, len);
435 if (res == -ENOSPC)
436 goto insert_extent;
437 }
438 if (!res) {
6af502de
CH
439 hfsplus_dump_extent(hip->first_extents);
440 hip->first_blocks += len;
1da177e4
LT
441 }
442 } else {
6af502de
CH
443 res = hfsplus_add_extent(hip->cached_extents,
444 hip->alloc_blocks - hip->cached_start,
1da177e4
LT
445 start, len);
446 if (!res) {
6af502de 447 hfsplus_dump_extent(hip->cached_extents);
b33b7921 448 hip->extent_state |= HFSPLUS_EXT_DIRTY;
6af502de 449 hip->cached_blocks += len;
1da177e4
LT
450 } else if (res == -ENOSPC)
451 goto insert_extent;
452 }
453out:
6af502de 454 mutex_unlock(&hip->extents_lock);
1da177e4 455 if (!res) {
6af502de 456 hip->alloc_blocks += len;
e3494705 457 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);
1da177e4
LT
458 }
459 return res;
460
461insert_extent:
462 dprint(DBG_EXTENT, "insert new extent\n");
7fcc99f4 463 hfsplus_ext_write_extent_locked(inode);
1da177e4 464
6af502de
CH
465 memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
466 hip->cached_extents[0].start_block = cpu_to_be32(start);
467 hip->cached_extents[0].block_count = cpu_to_be32(len);
468 hfsplus_dump_extent(hip->cached_extents);
b33b7921 469 hip->extent_state |= HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW;
6af502de
CH
470 hip->cached_start = hip->alloc_blocks;
471 hip->cached_blocks = len;
1da177e4
LT
472
473 res = 0;
474 goto out;
475}
476
477void hfsplus_file_truncate(struct inode *inode)
478{
479 struct super_block *sb = inode->i_sb;
6af502de 480 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
1da177e4
LT
481 struct hfs_find_data fd;
482 u32 alloc_cnt, blk_cnt, start;
483 int res;
484
6af502de
CH
485 dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n",
486 inode->i_ino, (long long)hip->phys_size, inode->i_size);
487
488 if (inode->i_size > hip->phys_size) {
1da177e4
LT
489 struct address_space *mapping = inode->i_mapping;
490 struct page *page;
7c0efc62
NP
491 void *fsdata;
492 u32 size = inode->i_size;
1da177e4
LT
493 int res;
494
7c0efc62
NP
495 res = pagecache_write_begin(NULL, mapping, size, 0,
496 AOP_FLAG_UNINTERRUPTIBLE,
497 &page, &fsdata);
1da177e4 498 if (res)
7c0efc62
NP
499 return;
500 res = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
501 if (res < 0)
502 return;
1da177e4
LT
503 mark_inode_dirty(inode);
504 return;
6af502de 505 } else if (inode->i_size == hip->phys_size)
f76d28d2
RZ
506 return;
507
dd73a01a
CH
508 blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >>
509 HFSPLUS_SB(sb)->alloc_blksz_shift;
6af502de 510 alloc_cnt = hip->alloc_blocks;
1da177e4
LT
511 if (blk_cnt == alloc_cnt)
512 goto out;
513
6af502de 514 mutex_lock(&hip->extents_lock);
dd73a01a 515 hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
1da177e4 516 while (1) {
6af502de
CH
517 if (alloc_cnt == hip->first_blocks) {
518 hfsplus_free_extents(sb, hip->first_extents,
1da177e4 519 alloc_cnt, alloc_cnt - blk_cnt);
6af502de
CH
520 hfsplus_dump_extent(hip->first_extents);
521 hip->first_blocks = blk_cnt;
1da177e4
LT
522 break;
523 }
524 res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
525 if (res)
526 break;
6af502de
CH
527 start = hip->cached_start;
528 hfsplus_free_extents(sb, hip->cached_extents,
1da177e4 529 alloc_cnt - start, alloc_cnt - blk_cnt);
6af502de 530 hfsplus_dump_extent(hip->cached_extents);
1da177e4 531 if (blk_cnt > start) {
b33b7921 532 hip->extent_state |= HFSPLUS_EXT_DIRTY;
1da177e4
LT
533 break;
534 }
535 alloc_cnt = start;
6af502de 536 hip->cached_start = hip->cached_blocks = 0;
b33b7921 537 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
1da177e4
LT
538 hfs_brec_remove(&fd);
539 }
540 hfs_find_exit(&fd);
6af502de 541 mutex_unlock(&hip->extents_lock);
1da177e4 542
6af502de 543 hip->alloc_blocks = blk_cnt;
1da177e4 544out:
6af502de
CH
545 hip->phys_size = inode->i_size;
546 hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
547 inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
e3494705 548 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);
1da177e4 549}
This page took 0.549972 seconds and 5 git commands to generate.