xfs: Fix the logic check for all quotas being turned off
[deliverable/linux.git] / fs / stat.c
... / ...
CommitLineData
1/*
2 * linux/fs/stat.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7#include <linux/export.h>
8#include <linux/mm.h>
9#include <linux/errno.h>
10#include <linux/file.h>
11#include <linux/highuid.h>
12#include <linux/fs.h>
13#include <linux/namei.h>
14#include <linux/security.h>
15#include <linux/syscalls.h>
16#include <linux/pagemap.h>
17
18#include <asm/uaccess.h>
19#include <asm/unistd.h>
20
21void generic_fillattr(struct inode *inode, struct kstat *stat)
22{
23 stat->dev = inode->i_sb->s_dev;
24 stat->ino = inode->i_ino;
25 stat->mode = inode->i_mode;
26 stat->nlink = inode->i_nlink;
27 stat->uid = inode->i_uid;
28 stat->gid = inode->i_gid;
29 stat->rdev = inode->i_rdev;
30 stat->size = i_size_read(inode);
31 stat->atime = inode->i_atime;
32 stat->mtime = inode->i_mtime;
33 stat->ctime = inode->i_ctime;
34 stat->blksize = (1 << inode->i_blkbits);
35 stat->blocks = inode->i_blocks;
36}
37
38EXPORT_SYMBOL(generic_fillattr);
39
40int vfs_getattr(struct path *path, struct kstat *stat)
41{
42 struct inode *inode = path->dentry->d_inode;
43 int retval;
44
45 retval = security_inode_getattr(path->mnt, path->dentry);
46 if (retval)
47 return retval;
48
49 if (inode->i_op->getattr)
50 return inode->i_op->getattr(path->mnt, path->dentry, stat);
51
52 generic_fillattr(inode, stat);
53 return 0;
54}
55
56EXPORT_SYMBOL(vfs_getattr);
57
58int vfs_fstat(unsigned int fd, struct kstat *stat)
59{
60 struct fd f = fdget_raw(fd);
61 int error = -EBADF;
62
63 if (f.file) {
64 error = vfs_getattr(&f.file->f_path, stat);
65 fdput(f);
66 }
67 return error;
68}
69EXPORT_SYMBOL(vfs_fstat);
70
71int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
72 int flag)
73{
74 struct path path;
75 int error = -EINVAL;
76 unsigned int lookup_flags = 0;
77
78 if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
79 AT_EMPTY_PATH)) != 0)
80 goto out;
81
82 if (!(flag & AT_SYMLINK_NOFOLLOW))
83 lookup_flags |= LOOKUP_FOLLOW;
84 if (flag & AT_EMPTY_PATH)
85 lookup_flags |= LOOKUP_EMPTY;
86retry:
87 error = user_path_at(dfd, filename, lookup_flags, &path);
88 if (error)
89 goto out;
90
91 error = vfs_getattr(&path, stat);
92 path_put(&path);
93 if (retry_estale(error, lookup_flags)) {
94 lookup_flags |= LOOKUP_REVAL;
95 goto retry;
96 }
97out:
98 return error;
99}
100EXPORT_SYMBOL(vfs_fstatat);
101
102int vfs_stat(const char __user *name, struct kstat *stat)
103{
104 return vfs_fstatat(AT_FDCWD, name, stat, 0);
105}
106EXPORT_SYMBOL(vfs_stat);
107
108int vfs_lstat(const char __user *name, struct kstat *stat)
109{
110 return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW);
111}
112EXPORT_SYMBOL(vfs_lstat);
113
114
115#ifdef __ARCH_WANT_OLD_STAT
116
117/*
118 * For backward compatibility? Maybe this should be moved
119 * into arch/i386 instead?
120 */
121static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
122{
123 static int warncount = 5;
124 struct __old_kernel_stat tmp;
125
126 if (warncount > 0) {
127 warncount--;
128 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
129 current->comm);
130 } else if (warncount < 0) {
131 /* it's laughable, but... */
132 warncount = 0;
133 }
134
135 memset(&tmp, 0, sizeof(struct __old_kernel_stat));
136 tmp.st_dev = old_encode_dev(stat->dev);
137 tmp.st_ino = stat->ino;
138 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
139 return -EOVERFLOW;
140 tmp.st_mode = stat->mode;
141 tmp.st_nlink = stat->nlink;
142 if (tmp.st_nlink != stat->nlink)
143 return -EOVERFLOW;
144 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
145 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
146 tmp.st_rdev = old_encode_dev(stat->rdev);
147#if BITS_PER_LONG == 32
148 if (stat->size > MAX_NON_LFS)
149 return -EOVERFLOW;
150#endif
151 tmp.st_size = stat->size;
152 tmp.st_atime = stat->atime.tv_sec;
153 tmp.st_mtime = stat->mtime.tv_sec;
154 tmp.st_ctime = stat->ctime.tv_sec;
155 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
156}
157
158SYSCALL_DEFINE2(stat, const char __user *, filename,
159 struct __old_kernel_stat __user *, statbuf)
160{
161 struct kstat stat;
162 int error;
163
164 error = vfs_stat(filename, &stat);
165 if (error)
166 return error;
167
168 return cp_old_stat(&stat, statbuf);
169}
170
171SYSCALL_DEFINE2(lstat, const char __user *, filename,
172 struct __old_kernel_stat __user *, statbuf)
173{
174 struct kstat stat;
175 int error;
176
177 error = vfs_lstat(filename, &stat);
178 if (error)
179 return error;
180
181 return cp_old_stat(&stat, statbuf);
182}
183
184SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
185{
186 struct kstat stat;
187 int error = vfs_fstat(fd, &stat);
188
189 if (!error)
190 error = cp_old_stat(&stat, statbuf);
191
192 return error;
193}
194
195#endif /* __ARCH_WANT_OLD_STAT */
196
197#if BITS_PER_LONG == 32
198# define choose_32_64(a,b) a
199#else
200# define choose_32_64(a,b) b
201#endif
202
203#define valid_dev(x) choose_32_64(old_valid_dev,new_valid_dev)(x)
204#define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
205
206#ifndef INIT_STRUCT_STAT_PADDING
207# define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
208#endif
209
210static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
211{
212 struct stat tmp;
213
214 if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
215 return -EOVERFLOW;
216#if BITS_PER_LONG == 32
217 if (stat->size > MAX_NON_LFS)
218 return -EOVERFLOW;
219#endif
220
221 INIT_STRUCT_STAT_PADDING(tmp);
222 tmp.st_dev = encode_dev(stat->dev);
223 tmp.st_ino = stat->ino;
224 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
225 return -EOVERFLOW;
226 tmp.st_mode = stat->mode;
227 tmp.st_nlink = stat->nlink;
228 if (tmp.st_nlink != stat->nlink)
229 return -EOVERFLOW;
230 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
231 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
232 tmp.st_rdev = encode_dev(stat->rdev);
233 tmp.st_size = stat->size;
234 tmp.st_atime = stat->atime.tv_sec;
235 tmp.st_mtime = stat->mtime.tv_sec;
236 tmp.st_ctime = stat->ctime.tv_sec;
237#ifdef STAT_HAVE_NSEC
238 tmp.st_atime_nsec = stat->atime.tv_nsec;
239 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
240 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
241#endif
242 tmp.st_blocks = stat->blocks;
243 tmp.st_blksize = stat->blksize;
244 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
245}
246
247SYSCALL_DEFINE2(newstat, const char __user *, filename,
248 struct stat __user *, statbuf)
249{
250 struct kstat stat;
251 int error = vfs_stat(filename, &stat);
252
253 if (error)
254 return error;
255 return cp_new_stat(&stat, statbuf);
256}
257
258SYSCALL_DEFINE2(newlstat, const char __user *, filename,
259 struct stat __user *, statbuf)
260{
261 struct kstat stat;
262 int error;
263
264 error = vfs_lstat(filename, &stat);
265 if (error)
266 return error;
267
268 return cp_new_stat(&stat, statbuf);
269}
270
271#if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
272SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
273 struct stat __user *, statbuf, int, flag)
274{
275 struct kstat stat;
276 int error;
277
278 error = vfs_fstatat(dfd, filename, &stat, flag);
279 if (error)
280 return error;
281 return cp_new_stat(&stat, statbuf);
282}
283#endif
284
285SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
286{
287 struct kstat stat;
288 int error = vfs_fstat(fd, &stat);
289
290 if (!error)
291 error = cp_new_stat(&stat, statbuf);
292
293 return error;
294}
295
296SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
297 char __user *, buf, int, bufsiz)
298{
299 struct path path;
300 int error;
301 int empty = 0;
302 unsigned int lookup_flags = LOOKUP_EMPTY;
303
304 if (bufsiz <= 0)
305 return -EINVAL;
306
307retry:
308 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
309 if (!error) {
310 struct inode *inode = path.dentry->d_inode;
311
312 error = empty ? -ENOENT : -EINVAL;
313 if (inode->i_op->readlink) {
314 error = security_inode_readlink(path.dentry);
315 if (!error) {
316 touch_atime(&path);
317 error = inode->i_op->readlink(path.dentry,
318 buf, bufsiz);
319 }
320 }
321 path_put(&path);
322 if (retry_estale(error, lookup_flags)) {
323 lookup_flags |= LOOKUP_REVAL;
324 goto retry;
325 }
326 }
327 return error;
328}
329
330SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
331 int, bufsiz)
332{
333 return sys_readlinkat(AT_FDCWD, path, buf, bufsiz);
334}
335
336
337/* ---------- LFS-64 ----------- */
338#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
339
340#ifndef INIT_STRUCT_STAT64_PADDING
341# define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
342#endif
343
344static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
345{
346 struct stat64 tmp;
347
348 INIT_STRUCT_STAT64_PADDING(tmp);
349#ifdef CONFIG_MIPS
350 /* mips has weird padding, so we don't get 64 bits there */
351 if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev))
352 return -EOVERFLOW;
353 tmp.st_dev = new_encode_dev(stat->dev);
354 tmp.st_rdev = new_encode_dev(stat->rdev);
355#else
356 tmp.st_dev = huge_encode_dev(stat->dev);
357 tmp.st_rdev = huge_encode_dev(stat->rdev);
358#endif
359 tmp.st_ino = stat->ino;
360 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
361 return -EOVERFLOW;
362#ifdef STAT64_HAS_BROKEN_ST_INO
363 tmp.__st_ino = stat->ino;
364#endif
365 tmp.st_mode = stat->mode;
366 tmp.st_nlink = stat->nlink;
367 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
368 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
369 tmp.st_atime = stat->atime.tv_sec;
370 tmp.st_atime_nsec = stat->atime.tv_nsec;
371 tmp.st_mtime = stat->mtime.tv_sec;
372 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
373 tmp.st_ctime = stat->ctime.tv_sec;
374 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
375 tmp.st_size = stat->size;
376 tmp.st_blocks = stat->blocks;
377 tmp.st_blksize = stat->blksize;
378 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
379}
380
381SYSCALL_DEFINE2(stat64, const char __user *, filename,
382 struct stat64 __user *, statbuf)
383{
384 struct kstat stat;
385 int error = vfs_stat(filename, &stat);
386
387 if (!error)
388 error = cp_new_stat64(&stat, statbuf);
389
390 return error;
391}
392
393SYSCALL_DEFINE2(lstat64, const char __user *, filename,
394 struct stat64 __user *, statbuf)
395{
396 struct kstat stat;
397 int error = vfs_lstat(filename, &stat);
398
399 if (!error)
400 error = cp_new_stat64(&stat, statbuf);
401
402 return error;
403}
404
405SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
406{
407 struct kstat stat;
408 int error = vfs_fstat(fd, &stat);
409
410 if (!error)
411 error = cp_new_stat64(&stat, statbuf);
412
413 return error;
414}
415
416SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
417 struct stat64 __user *, statbuf, int, flag)
418{
419 struct kstat stat;
420 int error;
421
422 error = vfs_fstatat(dfd, filename, &stat, flag);
423 if (error)
424 return error;
425 return cp_new_stat64(&stat, statbuf);
426}
427#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
428
429/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
430void __inode_add_bytes(struct inode *inode, loff_t bytes)
431{
432 inode->i_blocks += bytes >> 9;
433 bytes &= 511;
434 inode->i_bytes += bytes;
435 if (inode->i_bytes >= 512) {
436 inode->i_blocks++;
437 inode->i_bytes -= 512;
438 }
439}
440
441void inode_add_bytes(struct inode *inode, loff_t bytes)
442{
443 spin_lock(&inode->i_lock);
444 __inode_add_bytes(inode, bytes);
445 spin_unlock(&inode->i_lock);
446}
447
448EXPORT_SYMBOL(inode_add_bytes);
449
450void inode_sub_bytes(struct inode *inode, loff_t bytes)
451{
452 spin_lock(&inode->i_lock);
453 inode->i_blocks -= bytes >> 9;
454 bytes &= 511;
455 if (inode->i_bytes < bytes) {
456 inode->i_blocks--;
457 inode->i_bytes += 512;
458 }
459 inode->i_bytes -= bytes;
460 spin_unlock(&inode->i_lock);
461}
462
463EXPORT_SYMBOL(inode_sub_bytes);
464
465loff_t inode_get_bytes(struct inode *inode)
466{
467 loff_t ret;
468
469 spin_lock(&inode->i_lock);
470 ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
471 spin_unlock(&inode->i_lock);
472 return ret;
473}
474
475EXPORT_SYMBOL(inode_get_bytes);
476
477void inode_set_bytes(struct inode *inode, loff_t bytes)
478{
479 /* Caller is here responsible for sufficient locking
480 * (ie. inode->i_lock) */
481 inode->i_blocks = bytes >> 9;
482 inode->i_bytes = bytes & 511;
483}
484
485EXPORT_SYMBOL(inode_set_bytes);
This page took 0.025983 seconds and 5 git commands to generate.