Commit | Line | Data |
---|---|---|
f79e2abb AM |
1 | /* |
2 | * High-level sync()-related operations | |
3 | */ | |
4 | ||
5 | #include <linux/kernel.h> | |
6 | #include <linux/file.h> | |
7 | #include <linux/fs.h> | |
8 | #include <linux/module.h> | |
914e2637 | 9 | #include <linux/sched.h> |
f79e2abb AM |
10 | #include <linux/writeback.h> |
11 | #include <linux/syscalls.h> | |
12 | #include <linux/linkage.h> | |
13 | #include <linux/pagemap.h> | |
cf9a2ae8 DH |
14 | #include <linux/quotaops.h> |
15 | #include <linux/buffer_head.h> | |
5a3e5cb8 | 16 | #include "internal.h" |
f79e2abb AM |
17 | |
18 | #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ | |
19 | SYNC_FILE_RANGE_WAIT_AFTER) | |
20 | ||
c15c54f5 | 21 | /* |
d8a8559c JA |
22 | * Do the filesystem syncing work. For simple filesystems |
23 | * writeback_inodes_sb(sb) just dirties buffers with inodes so we have to | |
24 | * submit IO for these buffers via __sync_blockdev(). This also speeds up the | |
25 | * wait == 1 case since in that case write_inode() functions do | |
26 | * sync_dirty_buffer() and thus effectively write one block at a time. | |
c15c54f5 | 27 | */ |
60b0680f | 28 | static int __sync_filesystem(struct super_block *sb, int wait) |
c15c54f5 | 29 | { |
32a88aa1 JA |
30 | /* |
31 | * This should be safe, as we require bdi backing to actually | |
32 | * write out data in the first place | |
33 | */ | |
34 | if (!sb->s_bdi) | |
35 | return 0; | |
36 | ||
c3f8a40c | 37 | /* Avoid doing twice syncing and cache pruning for quota sync */ |
d8a8559c | 38 | if (!wait) { |
c3f8a40c | 39 | writeout_quota_sb(sb, -1); |
d8a8559c JA |
40 | writeback_inodes_sb(sb); |
41 | } else { | |
c3f8a40c | 42 | sync_quota_sb(sb, -1); |
d8a8559c JA |
43 | sync_inodes_sb(sb); |
44 | } | |
c15c54f5 JK |
45 | if (sb->s_op->sync_fs) |
46 | sb->s_op->sync_fs(sb, wait); | |
47 | return __sync_blockdev(sb->s_bdev, wait); | |
48 | } | |
49 | ||
50 | /* | |
51 | * Write out and wait upon all dirty data associated with this | |
52 | * superblock. Filesystem data as well as the underlying block | |
53 | * device. Takes the superblock lock. | |
54 | */ | |
60b0680f | 55 | int sync_filesystem(struct super_block *sb) |
c15c54f5 JK |
56 | { |
57 | int ret; | |
58 | ||
5af7926f CH |
59 | /* |
60 | * We need to be protected against the filesystem going from | |
61 | * r/o to r/w or vice versa. | |
62 | */ | |
63 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | |
64 | ||
65 | /* | |
66 | * No point in syncing out anything if the filesystem is read-only. | |
67 | */ | |
68 | if (sb->s_flags & MS_RDONLY) | |
69 | return 0; | |
70 | ||
60b0680f | 71 | ret = __sync_filesystem(sb, 0); |
c15c54f5 JK |
72 | if (ret < 0) |
73 | return ret; | |
60b0680f | 74 | return __sync_filesystem(sb, 1); |
c15c54f5 | 75 | } |
60b0680f | 76 | EXPORT_SYMBOL_GPL(sync_filesystem); |
c15c54f5 JK |
77 | |
78 | /* | |
79 | * Sync all the data for all the filesystems (called by sys_sync() and | |
80 | * emergency sync) | |
81 | * | |
82 | * This operation is careful to avoid the livelock which could easily happen | |
83 | * if two or more filesystems are being continuously dirtied. s_need_sync | |
84 | * is used only here. We set it against all filesystems and then clear it as | |
85 | * we sync them. So redirtied filesystems are skipped. | |
86 | * | |
87 | * But if process A is currently running sync_filesystems and then process B | |
88 | * calls sync_filesystems as well, process B will set all the s_need_sync | |
89 | * flags again, which will cause process A to resync everything. Fix that with | |
90 | * a local mutex. | |
91 | */ | |
92 | static void sync_filesystems(int wait) | |
93 | { | |
94 | struct super_block *sb; | |
95 | static DEFINE_MUTEX(mutex); | |
96 | ||
97 | mutex_lock(&mutex); /* Could be down_interruptible */ | |
98 | spin_lock(&sb_lock); | |
5af7926f | 99 | list_for_each_entry(sb, &super_blocks, s_list) |
c15c54f5 | 100 | sb->s_need_sync = 1; |
c15c54f5 JK |
101 | |
102 | restart: | |
103 | list_for_each_entry(sb, &super_blocks, s_list) { | |
104 | if (!sb->s_need_sync) | |
105 | continue; | |
106 | sb->s_need_sync = 0; | |
c15c54f5 JK |
107 | sb->s_count++; |
108 | spin_unlock(&sb_lock); | |
5af7926f | 109 | |
c15c54f5 | 110 | down_read(&sb->s_umount); |
32a88aa1 | 111 | if (!(sb->s_flags & MS_RDONLY) && sb->s_root && sb->s_bdi) |
60b0680f | 112 | __sync_filesystem(sb, wait); |
c15c54f5 | 113 | up_read(&sb->s_umount); |
5af7926f | 114 | |
c15c54f5 JK |
115 | /* restart only when sb is no longer on the list */ |
116 | spin_lock(&sb_lock); | |
117 | if (__put_super_and_need_restart(sb)) | |
118 | goto restart; | |
119 | } | |
120 | spin_unlock(&sb_lock); | |
121 | mutex_unlock(&mutex); | |
122 | } | |
123 | ||
3beab0b4 ZY |
124 | /* |
125 | * sync everything. Start out by waking pdflush, because that writes back | |
126 | * all queues in parallel. | |
127 | */ | |
5cee5815 | 128 | SYSCALL_DEFINE0(sync) |
cf9a2ae8 | 129 | { |
03ba3782 | 130 | wakeup_flusher_threads(0); |
5cee5815 JK |
131 | sync_filesystems(0); |
132 | sync_filesystems(1); | |
cf9a2ae8 DH |
133 | if (unlikely(laptop_mode)) |
134 | laptop_sync_completion(); | |
cf9a2ae8 DH |
135 | return 0; |
136 | } | |
137 | ||
a2a9537a JA |
138 | static void do_sync_work(struct work_struct *work) |
139 | { | |
5cee5815 JK |
140 | /* |
141 | * Sync twice to reduce the possibility we skipped some inodes / pages | |
142 | * because they were temporarily locked | |
143 | */ | |
144 | sync_filesystems(0); | |
145 | sync_filesystems(0); | |
146 | printk("Emergency Sync complete\n"); | |
a2a9537a JA |
147 | kfree(work); |
148 | } | |
149 | ||
cf9a2ae8 DH |
150 | void emergency_sync(void) |
151 | { | |
a2a9537a JA |
152 | struct work_struct *work; |
153 | ||
154 | work = kmalloc(sizeof(*work), GFP_ATOMIC); | |
155 | if (work) { | |
156 | INIT_WORK(work, do_sync_work); | |
157 | schedule_work(work); | |
158 | } | |
cf9a2ae8 DH |
159 | } |
160 | ||
161 | /* | |
162 | * Generic function to fsync a file. | |
163 | * | |
164 | * filp may be NULL if called via the msync of a vma. | |
165 | */ | |
166 | int file_fsync(struct file *filp, struct dentry *dentry, int datasync) | |
167 | { | |
168 | struct inode * inode = dentry->d_inode; | |
169 | struct super_block * sb; | |
170 | int ret, err; | |
171 | ||
172 | /* sync the inode to buffers */ | |
173 | ret = write_inode_now(inode, 0); | |
174 | ||
175 | /* sync the superblock to buffers */ | |
176 | sb = inode->i_sb; | |
762873c2 | 177 | if (sb->s_dirt && sb->s_op->write_super) |
cf9a2ae8 | 178 | sb->s_op->write_super(sb); |
cf9a2ae8 DH |
179 | |
180 | /* .. finally sync the buffers to disk */ | |
181 | err = sync_blockdev(sb->s_bdev); | |
182 | if (!ret) | |
183 | ret = err; | |
184 | return ret; | |
185 | } | |
1fe72eaa | 186 | EXPORT_SYMBOL(file_fsync); |
cf9a2ae8 | 187 | |
4c728ef5 | 188 | /** |
148f948b | 189 | * vfs_fsync_range - helper to sync a range of data & metadata to disk |
4c728ef5 CH |
190 | * @file: file to sync |
191 | * @dentry: dentry of @file | |
148f948b JK |
192 | * @start: offset in bytes of the beginning of data range to sync |
193 | * @end: offset in bytes of the end of data range (inclusive) | |
194 | * @datasync: perform only datasync | |
4c728ef5 | 195 | * |
148f948b JK |
196 | * Write back data in range @start..@end and metadata for @file to disk. If |
197 | * @datasync is set only metadata needed to access modified file data is | |
198 | * written. | |
4c728ef5 CH |
199 | * |
200 | * In case this function is called from nfsd @file may be %NULL and | |
201 | * only @dentry is set. This can only happen when the filesystem | |
202 | * implements the export_operations API. | |
203 | */ | |
148f948b JK |
204 | int vfs_fsync_range(struct file *file, struct dentry *dentry, loff_t start, |
205 | loff_t end, int datasync) | |
cf9a2ae8 | 206 | { |
4c728ef5 CH |
207 | const struct file_operations *fop; |
208 | struct address_space *mapping; | |
209 | int err, ret; | |
210 | ||
211 | /* | |
212 | * Get mapping and operations from the file in case we have | |
213 | * as file, or get the default values for them in case we | |
214 | * don't have a struct file available. Damn nfsd.. | |
215 | */ | |
216 | if (file) { | |
217 | mapping = file->f_mapping; | |
218 | fop = file->f_op; | |
219 | } else { | |
220 | mapping = dentry->d_inode->i_mapping; | |
221 | fop = dentry->d_inode->i_fop; | |
222 | } | |
cf9a2ae8 | 223 | |
4c728ef5 | 224 | if (!fop || !fop->fsync) { |
cf9a2ae8 DH |
225 | ret = -EINVAL; |
226 | goto out; | |
227 | } | |
228 | ||
2daea67e | 229 | ret = filemap_write_and_wait_range(mapping, start, end); |
cf9a2ae8 DH |
230 | |
231 | /* | |
232 | * We need to protect against concurrent writers, which could cause | |
233 | * livelocks in fsync_buffers_list(). | |
234 | */ | |
235 | mutex_lock(&mapping->host->i_mutex); | |
4c728ef5 | 236 | err = fop->fsync(file, dentry, datasync); |
cf9a2ae8 DH |
237 | if (!ret) |
238 | ret = err; | |
239 | mutex_unlock(&mapping->host->i_mutex); | |
148f948b | 240 | |
cf9a2ae8 DH |
241 | out: |
242 | return ret; | |
243 | } | |
148f948b JK |
244 | EXPORT_SYMBOL(vfs_fsync_range); |
245 | ||
246 | /** | |
247 | * vfs_fsync - perform a fsync or fdatasync on a file | |
248 | * @file: file to sync | |
249 | * @dentry: dentry of @file | |
250 | * @datasync: only perform a fdatasync operation | |
251 | * | |
252 | * Write back data and metadata for @file to disk. If @datasync is | |
253 | * set only metadata needed to access modified file data is written. | |
254 | * | |
255 | * In case this function is called from nfsd @file may be %NULL and | |
256 | * only @dentry is set. This can only happen when the filesystem | |
257 | * implements the export_operations API. | |
258 | */ | |
259 | int vfs_fsync(struct file *file, struct dentry *dentry, int datasync) | |
260 | { | |
261 | return vfs_fsync_range(file, dentry, 0, LLONG_MAX, datasync); | |
262 | } | |
4c728ef5 | 263 | EXPORT_SYMBOL(vfs_fsync); |
cf9a2ae8 | 264 | |
4c728ef5 | 265 | static int do_fsync(unsigned int fd, int datasync) |
cf9a2ae8 DH |
266 | { |
267 | struct file *file; | |
268 | int ret = -EBADF; | |
269 | ||
270 | file = fget(fd); | |
271 | if (file) { | |
4c728ef5 | 272 | ret = vfs_fsync(file, file->f_path.dentry, datasync); |
cf9a2ae8 DH |
273 | fput(file); |
274 | } | |
275 | return ret; | |
276 | } | |
277 | ||
a5f8fa9e | 278 | SYSCALL_DEFINE1(fsync, unsigned int, fd) |
cf9a2ae8 | 279 | { |
4c728ef5 | 280 | return do_fsync(fd, 0); |
cf9a2ae8 DH |
281 | } |
282 | ||
a5f8fa9e | 283 | SYSCALL_DEFINE1(fdatasync, unsigned int, fd) |
cf9a2ae8 | 284 | { |
4c728ef5 | 285 | return do_fsync(fd, 1); |
cf9a2ae8 DH |
286 | } |
287 | ||
148f948b JK |
288 | /** |
289 | * generic_write_sync - perform syncing after a write if file / inode is sync | |
290 | * @file: file to which the write happened | |
291 | * @pos: offset where the write started | |
292 | * @count: length of the write | |
293 | * | |
294 | * This is just a simple wrapper about our general syncing function. | |
295 | */ | |
296 | int generic_write_sync(struct file *file, loff_t pos, loff_t count) | |
297 | { | |
6b2f3d1f | 298 | if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host)) |
148f948b JK |
299 | return 0; |
300 | return vfs_fsync_range(file, file->f_path.dentry, pos, | |
6b2f3d1f CH |
301 | pos + count - 1, |
302 | (file->f_flags & __O_SYNC) ? 0 : 1); | |
148f948b JK |
303 | } |
304 | EXPORT_SYMBOL(generic_write_sync); | |
305 | ||
f79e2abb AM |
306 | /* |
307 | * sys_sync_file_range() permits finely controlled syncing over a segment of | |
308 | * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is | |
309 | * zero then sys_sync_file_range() will operate from offset out to EOF. | |
310 | * | |
311 | * The flag bits are: | |
312 | * | |
313 | * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range | |
314 | * before performing the write. | |
315 | * | |
316 | * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the | |
cce77081 PM |
317 | * range which are not presently under writeback. Note that this may block for |
318 | * significant periods due to exhaustion of disk request structures. | |
f79e2abb AM |
319 | * |
320 | * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range | |
321 | * after performing the write. | |
322 | * | |
323 | * Useful combinations of the flag bits are: | |
324 | * | |
325 | * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages | |
326 | * in the range which were dirty on entry to sys_sync_file_range() are placed | |
327 | * under writeout. This is a start-write-for-data-integrity operation. | |
328 | * | |
329 | * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which | |
330 | * are not presently under writeout. This is an asynchronous flush-to-disk | |
331 | * operation. Not suitable for data integrity operations. | |
332 | * | |
333 | * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for | |
334 | * completion of writeout of all pages in the range. This will be used after an | |
335 | * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait | |
336 | * for that operation to complete and to return the result. | |
337 | * | |
338 | * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER: | |
339 | * a traditional sync() operation. This is a write-for-data-integrity operation | |
340 | * which will ensure that all pages in the range which were dirty on entry to | |
341 | * sys_sync_file_range() are committed to disk. | |
342 | * | |
343 | * | |
344 | * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any | |
345 | * I/O errors or ENOSPC conditions and will return those to the caller, after | |
346 | * clearing the EIO and ENOSPC flags in the address_space. | |
347 | * | |
348 | * It should be noted that none of these operations write out the file's | |
349 | * metadata. So unless the application is strictly performing overwrites of | |
350 | * already-instantiated disk blocks, there are no guarantees here that the data | |
351 | * will be available after a crash. | |
352 | */ | |
6673e0c3 HC |
353 | SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes, |
354 | unsigned int flags) | |
f79e2abb AM |
355 | { |
356 | int ret; | |
357 | struct file *file; | |
7a0ad10c | 358 | struct address_space *mapping; |
f79e2abb AM |
359 | loff_t endbyte; /* inclusive */ |
360 | int fput_needed; | |
361 | umode_t i_mode; | |
362 | ||
363 | ret = -EINVAL; | |
364 | if (flags & ~VALID_FLAGS) | |
365 | goto out; | |
366 | ||
367 | endbyte = offset + nbytes; | |
368 | ||
369 | if ((s64)offset < 0) | |
370 | goto out; | |
371 | if ((s64)endbyte < 0) | |
372 | goto out; | |
373 | if (endbyte < offset) | |
374 | goto out; | |
375 | ||
376 | if (sizeof(pgoff_t) == 4) { | |
377 | if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { | |
378 | /* | |
379 | * The range starts outside a 32 bit machine's | |
380 | * pagecache addressing capabilities. Let it "succeed" | |
381 | */ | |
382 | ret = 0; | |
383 | goto out; | |
384 | } | |
385 | if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { | |
386 | /* | |
387 | * Out to EOF | |
388 | */ | |
389 | nbytes = 0; | |
390 | } | |
391 | } | |
392 | ||
393 | if (nbytes == 0) | |
111ebb6e | 394 | endbyte = LLONG_MAX; |
f79e2abb AM |
395 | else |
396 | endbyte--; /* inclusive */ | |
397 | ||
398 | ret = -EBADF; | |
399 | file = fget_light(fd, &fput_needed); | |
400 | if (!file) | |
401 | goto out; | |
402 | ||
0f7fc9e4 | 403 | i_mode = file->f_path.dentry->d_inode->i_mode; |
f79e2abb AM |
404 | ret = -ESPIPE; |
405 | if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) && | |
406 | !S_ISLNK(i_mode)) | |
407 | goto out_put; | |
408 | ||
7a0ad10c CH |
409 | mapping = file->f_mapping; |
410 | if (!mapping) { | |
411 | ret = -EINVAL; | |
412 | goto out_put; | |
413 | } | |
414 | ||
415 | ret = 0; | |
416 | if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) { | |
417 | ret = filemap_fdatawait_range(mapping, offset, endbyte); | |
418 | if (ret < 0) | |
419 | goto out_put; | |
420 | } | |
421 | ||
422 | if (flags & SYNC_FILE_RANGE_WRITE) { | |
423 | ret = filemap_fdatawrite_range(mapping, offset, endbyte); | |
424 | if (ret < 0) | |
425 | goto out_put; | |
426 | } | |
427 | ||
428 | if (flags & SYNC_FILE_RANGE_WAIT_AFTER) | |
429 | ret = filemap_fdatawait_range(mapping, offset, endbyte); | |
430 | ||
f79e2abb AM |
431 | out_put: |
432 | fput_light(file, fput_needed); | |
433 | out: | |
434 | return ret; | |
435 | } | |
6673e0c3 HC |
436 | #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS |
437 | asmlinkage long SyS_sync_file_range(long fd, loff_t offset, loff_t nbytes, | |
438 | long flags) | |
439 | { | |
440 | return SYSC_sync_file_range((int) fd, offset, nbytes, | |
441 | (unsigned int) flags); | |
442 | } | |
443 | SYSCALL_ALIAS(sys_sync_file_range, SyS_sync_file_range); | |
444 | #endif | |
f79e2abb | 445 | |
edd5cd4a DW |
446 | /* It would be nice if people remember that not all the world's an i386 |
447 | when they introduce new system calls */ | |
6673e0c3 HC |
448 | SYSCALL_DEFINE(sync_file_range2)(int fd, unsigned int flags, |
449 | loff_t offset, loff_t nbytes) | |
edd5cd4a DW |
450 | { |
451 | return sys_sync_file_range(fd, offset, nbytes, flags); | |
452 | } | |
6673e0c3 HC |
453 | #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS |
454 | asmlinkage long SyS_sync_file_range2(long fd, long flags, | |
455 | loff_t offset, loff_t nbytes) | |
456 | { | |
457 | return SYSC_sync_file_range2((int) fd, (unsigned int) flags, | |
458 | offset, nbytes); | |
459 | } | |
460 | SYSCALL_ALIAS(sys_sync_file_range2, SyS_sync_file_range2); | |
461 | #endif |