ceph: fix comment, remove extraneous args
[deliverable/linux.git] / fs / ceph / inode.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
355da1eb
SW
2
3#include <linux/module.h>
4#include <linux/fs.h>
5#include <linux/smp_lock.h>
6#include <linux/slab.h>
7#include <linux/string.h>
8#include <linux/uaccess.h>
9#include <linux/kernel.h>
10#include <linux/namei.h>
11#include <linux/writeback.h>
12#include <linux/vmalloc.h>
c9af9fb6 13#include <linux/pagevec.h>
355da1eb
SW
14
15#include "super.h"
3d14c5d2
YS
16#include "mds_client.h"
17#include <linux/ceph/decode.h>
355da1eb
SW
18
19/*
20 * Ceph inode operations
21 *
22 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
23 * setattr, etc.), xattr helpers, and helpers for assimilating
24 * metadata returned by the MDS into our cache.
25 *
26 * Also define helpers for doing asynchronous writeback, invalidation,
27 * and truncation for the benefit of those who can't afford to block
28 * (typically because they are in the message handler path).
29 */
30
31static const struct inode_operations ceph_symlink_iops;
32
3c6f6b79
SW
33static void ceph_invalidate_work(struct work_struct *work);
34static void ceph_writeback_work(struct work_struct *work);
35static void ceph_vmtruncate_work(struct work_struct *work);
355da1eb
SW
36
37/*
38 * find or create an inode, given the ceph ino number
39 */
40struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
41{
42 struct inode *inode;
43 ino_t t = ceph_vino_to_ino(vino);
44
45 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
46 if (inode == NULL)
47 return ERR_PTR(-ENOMEM);
48 if (inode->i_state & I_NEW) {
49 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
50 inode, ceph_vinop(inode), (u64)inode->i_ino);
51 unlock_new_inode(inode);
52 }
53
54 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
55 vino.snap, inode);
56 return inode;
57}
58
59/*
60 * get/constuct snapdir inode for a given directory
61 */
62struct inode *ceph_get_snapdir(struct inode *parent)
63{
64 struct ceph_vino vino = {
65 .ino = ceph_ino(parent),
66 .snap = CEPH_SNAPDIR,
67 };
68 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
b377ff13 69 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
70
71 BUG_ON(!S_ISDIR(parent->i_mode));
72 if (IS_ERR(inode))
7e34bc52 73 return inode;
355da1eb
SW
74 inode->i_mode = parent->i_mode;
75 inode->i_uid = parent->i_uid;
76 inode->i_gid = parent->i_gid;
77 inode->i_op = &ceph_dir_iops;
78 inode->i_fop = &ceph_dir_fops;
b377ff13
SW
79 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
80 ci->i_rbytes = 0;
355da1eb
SW
81 return inode;
82}
83
84const struct inode_operations ceph_file_iops = {
85 .permission = ceph_permission,
86 .setattr = ceph_setattr,
87 .getattr = ceph_getattr,
88 .setxattr = ceph_setxattr,
89 .getxattr = ceph_getxattr,
90 .listxattr = ceph_listxattr,
91 .removexattr = ceph_removexattr,
92};
93
94
95/*
96 * We use a 'frag tree' to keep track of the MDS's directory fragments
97 * for a given inode (usually there is just a single fragment). We
98 * need to know when a child frag is delegated to a new MDS, or when
99 * it is flagged as replicated, so we can direct our requests
100 * accordingly.
101 */
102
103/*
104 * find/create a frag in the tree
105 */
106static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
107 u32 f)
108{
109 struct rb_node **p;
110 struct rb_node *parent = NULL;
111 struct ceph_inode_frag *frag;
112 int c;
113
114 p = &ci->i_fragtree.rb_node;
115 while (*p) {
116 parent = *p;
117 frag = rb_entry(parent, struct ceph_inode_frag, node);
118 c = ceph_frag_compare(f, frag->frag);
119 if (c < 0)
120 p = &(*p)->rb_left;
121 else if (c > 0)
122 p = &(*p)->rb_right;
123 else
124 return frag;
125 }
126
127 frag = kmalloc(sizeof(*frag), GFP_NOFS);
128 if (!frag) {
129 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
130 "frag %x\n", &ci->vfs_inode,
131 ceph_vinop(&ci->vfs_inode), f);
132 return ERR_PTR(-ENOMEM);
133 }
134 frag->frag = f;
135 frag->split_by = 0;
136 frag->mds = -1;
137 frag->ndist = 0;
138
139 rb_link_node(&frag->node, parent, p);
140 rb_insert_color(&frag->node, &ci->i_fragtree);
141
142 dout("get_or_create_frag added %llx.%llx frag %x\n",
143 ceph_vinop(&ci->vfs_inode), f);
144 return frag;
145}
146
147/*
148 * find a specific frag @f
149 */
150struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
151{
152 struct rb_node *n = ci->i_fragtree.rb_node;
153
154 while (n) {
155 struct ceph_inode_frag *frag =
156 rb_entry(n, struct ceph_inode_frag, node);
157 int c = ceph_frag_compare(f, frag->frag);
158 if (c < 0)
159 n = n->rb_left;
160 else if (c > 0)
161 n = n->rb_right;
162 else
163 return frag;
164 }
165 return NULL;
166}
167
168/*
169 * Choose frag containing the given value @v. If @pfrag is
170 * specified, copy the frag delegation info to the caller if
171 * it is present.
172 */
173u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
174 struct ceph_inode_frag *pfrag,
175 int *found)
176{
177 u32 t = ceph_frag_make(0, 0);
178 struct ceph_inode_frag *frag;
179 unsigned nway, i;
180 u32 n;
181
182 if (found)
183 *found = 0;
184
185 mutex_lock(&ci->i_fragtree_mutex);
186 while (1) {
187 WARN_ON(!ceph_frag_contains_value(t, v));
188 frag = __ceph_find_frag(ci, t);
189 if (!frag)
190 break; /* t is a leaf */
191 if (frag->split_by == 0) {
192 if (pfrag)
193 memcpy(pfrag, frag, sizeof(*pfrag));
194 if (found)
195 *found = 1;
196 break;
197 }
198
199 /* choose child */
200 nway = 1 << frag->split_by;
201 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
202 frag->split_by, nway);
203 for (i = 0; i < nway; i++) {
204 n = ceph_frag_make_child(t, frag->split_by, i);
205 if (ceph_frag_contains_value(n, v)) {
206 t = n;
207 break;
208 }
209 }
210 BUG_ON(i == nway);
211 }
212 dout("choose_frag(%x) = %x\n", v, t);
213
214 mutex_unlock(&ci->i_fragtree_mutex);
215 return t;
216}
217
218/*
219 * Process dirfrag (delegation) info from the mds. Include leaf
220 * fragment in tree ONLY if ndist > 0. Otherwise, only
221 * branches/splits are included in i_fragtree)
222 */
223static int ceph_fill_dirfrag(struct inode *inode,
224 struct ceph_mds_reply_dirfrag *dirinfo)
225{
226 struct ceph_inode_info *ci = ceph_inode(inode);
227 struct ceph_inode_frag *frag;
228 u32 id = le32_to_cpu(dirinfo->frag);
229 int mds = le32_to_cpu(dirinfo->auth);
230 int ndist = le32_to_cpu(dirinfo->ndist);
231 int i;
232 int err = 0;
233
234 mutex_lock(&ci->i_fragtree_mutex);
235 if (ndist == 0) {
236 /* no delegation info needed. */
237 frag = __ceph_find_frag(ci, id);
238 if (!frag)
239 goto out;
240 if (frag->split_by == 0) {
241 /* tree leaf, remove */
242 dout("fill_dirfrag removed %llx.%llx frag %x"
243 " (no ref)\n", ceph_vinop(inode), id);
244 rb_erase(&frag->node, &ci->i_fragtree);
245 kfree(frag);
246 } else {
247 /* tree branch, keep and clear */
248 dout("fill_dirfrag cleared %llx.%llx frag %x"
249 " referral\n", ceph_vinop(inode), id);
250 frag->mds = -1;
251 frag->ndist = 0;
252 }
253 goto out;
254 }
255
256
257 /* find/add this frag to store mds delegation info */
258 frag = __get_or_create_frag(ci, id);
259 if (IS_ERR(frag)) {
260 /* this is not the end of the world; we can continue
261 with bad/inaccurate delegation info */
262 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
263 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
264 err = -ENOMEM;
265 goto out;
266 }
267
268 frag->mds = mds;
269 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
270 for (i = 0; i < frag->ndist; i++)
271 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
272 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
273 ceph_vinop(inode), frag->frag, frag->ndist);
274
275out:
276 mutex_unlock(&ci->i_fragtree_mutex);
277 return err;
278}
279
280
281/*
282 * initialize a newly allocated inode.
283 */
284struct inode *ceph_alloc_inode(struct super_block *sb)
285{
286 struct ceph_inode_info *ci;
287 int i;
288
289 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
290 if (!ci)
291 return NULL;
292
293 dout("alloc_inode %p\n", &ci->vfs_inode);
294
295 ci->i_version = 0;
296 ci->i_time_warp_seq = 0;
297 ci->i_ceph_flags = 0;
298 ci->i_release_count = 0;
299 ci->i_symlink = NULL;
300
301 ci->i_fragtree = RB_ROOT;
302 mutex_init(&ci->i_fragtree_mutex);
303
304 ci->i_xattrs.blob = NULL;
305 ci->i_xattrs.prealloc_blob = NULL;
306 ci->i_xattrs.dirty = false;
307 ci->i_xattrs.index = RB_ROOT;
308 ci->i_xattrs.count = 0;
309 ci->i_xattrs.names_size = 0;
310 ci->i_xattrs.vals_size = 0;
311 ci->i_xattrs.version = 0;
312 ci->i_xattrs.index_version = 0;
313
314 ci->i_caps = RB_ROOT;
315 ci->i_auth_cap = NULL;
316 ci->i_dirty_caps = 0;
317 ci->i_flushing_caps = 0;
318 INIT_LIST_HEAD(&ci->i_dirty_item);
319 INIT_LIST_HEAD(&ci->i_flushing_item);
320 ci->i_cap_flush_seq = 0;
321 ci->i_cap_flush_last_tid = 0;
322 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
323 init_waitqueue_head(&ci->i_cap_wq);
324 ci->i_hold_caps_min = 0;
325 ci->i_hold_caps_max = 0;
326 INIT_LIST_HEAD(&ci->i_cap_delay_list);
327 ci->i_cap_exporting_mds = 0;
328 ci->i_cap_exporting_mseq = 0;
329 ci->i_cap_exporting_issued = 0;
330 INIT_LIST_HEAD(&ci->i_cap_snaps);
331 ci->i_head_snapc = NULL;
332 ci->i_snap_caps = 0;
333
334 for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
335 ci->i_nr_by_mode[i] = 0;
336
337 ci->i_truncate_seq = 0;
338 ci->i_truncate_size = 0;
339 ci->i_truncate_pending = 0;
340
341 ci->i_max_size = 0;
342 ci->i_reported_size = 0;
343 ci->i_wanted_max_size = 0;
344 ci->i_requested_max_size = 0;
345
346 ci->i_pin_ref = 0;
347 ci->i_rd_ref = 0;
348 ci->i_rdcache_ref = 0;
349 ci->i_wr_ref = 0;
350 ci->i_wrbuffer_ref = 0;
351 ci->i_wrbuffer_ref_head = 0;
352 ci->i_shared_gen = 0;
353 ci->i_rdcache_gen = 0;
354 ci->i_rdcache_revoking = 0;
355
356 INIT_LIST_HEAD(&ci->i_unsafe_writes);
357 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
358 spin_lock_init(&ci->i_unsafe_lock);
359
360 ci->i_snap_realm = NULL;
361 INIT_LIST_HEAD(&ci->i_snap_realm_item);
362 INIT_LIST_HEAD(&ci->i_snap_flush_item);
363
3c6f6b79
SW
364 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
365 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
355da1eb
SW
366
367 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
368
369 return &ci->vfs_inode;
370}
371
372void ceph_destroy_inode(struct inode *inode)
373{
374 struct ceph_inode_info *ci = ceph_inode(inode);
375 struct ceph_inode_frag *frag;
376 struct rb_node *n;
377
378 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
379
380 ceph_queue_caps_release(inode);
381
8b218b8a
SW
382 /*
383 * we may still have a snap_realm reference if there are stray
384 * caps in i_cap_exporting_issued or i_snap_caps.
385 */
386 if (ci->i_snap_realm) {
387 struct ceph_mds_client *mdsc =
3d14c5d2 388 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
8b218b8a
SW
389 struct ceph_snap_realm *realm = ci->i_snap_realm;
390
391 dout(" dropping residual ref to snap realm %p\n", realm);
392 spin_lock(&realm->inodes_with_caps_lock);
393 list_del_init(&ci->i_snap_realm_item);
394 spin_unlock(&realm->inodes_with_caps_lock);
395 ceph_put_snap_realm(mdsc, realm);
396 }
397
355da1eb
SW
398 kfree(ci->i_symlink);
399 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
400 frag = rb_entry(n, struct ceph_inode_frag, node);
401 rb_erase(n, &ci->i_fragtree);
402 kfree(frag);
403 }
404
405 __ceph_destroy_xattrs(ci);
b6c1d5b8
SW
406 if (ci->i_xattrs.blob)
407 ceph_buffer_put(ci->i_xattrs.blob);
408 if (ci->i_xattrs.prealloc_blob)
409 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
355da1eb
SW
410
411 kmem_cache_free(ceph_inode_cachep, ci);
412}
413
414
415/*
416 * Helpers to fill in size, ctime, mtime, and atime. We have to be
417 * careful because either the client or MDS may have more up to date
418 * info, depending on which capabilities are held, and whether
419 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
420 * and size are monotonically increasing, except when utimes() or
421 * truncate() increments the corresponding _seq values.)
422 */
423int ceph_fill_file_size(struct inode *inode, int issued,
424 u32 truncate_seq, u64 truncate_size, u64 size)
425{
426 struct ceph_inode_info *ci = ceph_inode(inode);
427 int queue_trunc = 0;
428
429 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
430 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
431 dout("size %lld -> %llu\n", inode->i_size, size);
432 inode->i_size = size;
433 inode->i_blocks = (size + (1<<9) - 1) >> 9;
434 ci->i_reported_size = size;
435 if (truncate_seq != ci->i_truncate_seq) {
436 dout("truncate_seq %u -> %u\n",
437 ci->i_truncate_seq, truncate_seq);
438 ci->i_truncate_seq = truncate_seq;
3d497d85
YS
439 /*
440 * If we hold relevant caps, or in the case where we're
441 * not the only client referencing this file and we
442 * don't hold those caps, then we need to check whether
443 * the file is either opened or mmaped
444 */
445 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
2962507c
SW
446 CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
447 CEPH_CAP_FILE_EXCL|
448 CEPH_CAP_FILE_LAZYIO)) ||
3d497d85
YS
449 mapping_mapped(inode->i_mapping) ||
450 __ceph_caps_file_wanted(ci)) {
355da1eb
SW
451 ci->i_truncate_pending++;
452 queue_trunc = 1;
453 }
454 }
455 }
456 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
457 ci->i_truncate_size != truncate_size) {
458 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
459 truncate_size);
460 ci->i_truncate_size = truncate_size;
461 }
462 return queue_trunc;
463}
464
465void ceph_fill_file_time(struct inode *inode, int issued,
466 u64 time_warp_seq, struct timespec *ctime,
467 struct timespec *mtime, struct timespec *atime)
468{
469 struct ceph_inode_info *ci = ceph_inode(inode);
470 int warn = 0;
471
472 if (issued & (CEPH_CAP_FILE_EXCL|
473 CEPH_CAP_FILE_WR|
d8672d64
SW
474 CEPH_CAP_FILE_BUFFER|
475 CEPH_CAP_AUTH_EXCL|
476 CEPH_CAP_XATTR_EXCL)) {
355da1eb
SW
477 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
478 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
479 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
480 ctime->tv_sec, ctime->tv_nsec);
481 inode->i_ctime = *ctime;
482 }
483 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
484 /* the MDS did a utimes() */
485 dout("mtime %ld.%09ld -> %ld.%09ld "
486 "tw %d -> %d\n",
487 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
488 mtime->tv_sec, mtime->tv_nsec,
489 ci->i_time_warp_seq, (int)time_warp_seq);
490
491 inode->i_mtime = *mtime;
492 inode->i_atime = *atime;
493 ci->i_time_warp_seq = time_warp_seq;
494 } else if (time_warp_seq == ci->i_time_warp_seq) {
495 /* nobody did utimes(); take the max */
496 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
497 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
498 inode->i_mtime.tv_sec,
499 inode->i_mtime.tv_nsec,
500 mtime->tv_sec, mtime->tv_nsec);
501 inode->i_mtime = *mtime;
502 }
503 if (timespec_compare(atime, &inode->i_atime) > 0) {
504 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
505 inode->i_atime.tv_sec,
506 inode->i_atime.tv_nsec,
507 atime->tv_sec, atime->tv_nsec);
508 inode->i_atime = *atime;
509 }
510 } else if (issued & CEPH_CAP_FILE_EXCL) {
511 /* we did a utimes(); ignore mds values */
512 } else {
513 warn = 1;
514 }
515 } else {
d8672d64 516 /* we have no write|excl caps; whatever the MDS says is true */
355da1eb
SW
517 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
518 inode->i_ctime = *ctime;
519 inode->i_mtime = *mtime;
520 inode->i_atime = *atime;
521 ci->i_time_warp_seq = time_warp_seq;
522 } else {
523 warn = 1;
524 }
525 }
526 if (warn) /* time_warp_seq shouldn't go backwards */
527 dout("%p mds time_warp_seq %llu < %u\n",
528 inode, time_warp_seq, ci->i_time_warp_seq);
529}
530
531/*
532 * Populate an inode based on info from mds. May be called on new or
533 * existing inodes.
534 */
535static int fill_inode(struct inode *inode,
536 struct ceph_mds_reply_info_in *iinfo,
537 struct ceph_mds_reply_dirfrag *dirinfo,
538 struct ceph_mds_session *session,
539 unsigned long ttl_from, int cap_fmode,
540 struct ceph_cap_reservation *caps_reservation)
541{
542 struct ceph_mds_reply_inode *info = iinfo->in;
543 struct ceph_inode_info *ci = ceph_inode(inode);
544 int i;
545 int issued, implemented;
546 struct timespec mtime, atime, ctime;
547 u32 nsplits;
548 struct ceph_buffer *xattr_blob = NULL;
549 int err = 0;
550 int queue_trunc = 0;
551
552 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
553 inode, ceph_vinop(inode), le64_to_cpu(info->version),
554 ci->i_version);
555
556 /*
557 * prealloc xattr data, if it looks like we'll need it. only
558 * if len > 4 (meaning there are actually xattrs; the first 4
559 * bytes are the xattr count).
560 */
561 if (iinfo->xattr_len > 4) {
b6c1d5b8 562 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
355da1eb
SW
563 if (!xattr_blob)
564 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
565 iinfo->xattr_len);
566 }
567
568 spin_lock(&inode->i_lock);
569
570 /*
571 * provided version will be odd if inode value is projected,
8bd59e01
SW
572 * even if stable. skip the update if we have newer stable
573 * info (ours>=theirs, e.g. due to racing mds replies), unless
574 * we are getting projected (unstable) info (in which case the
575 * version is odd, and we want ours>theirs).
576 * us them
577 * 2 2 skip
578 * 3 2 skip
579 * 3 3 update
355da1eb
SW
580 */
581 if (le64_to_cpu(info->version) > 0 &&
8bd59e01 582 (ci->i_version & ~1) >= le64_to_cpu(info->version))
355da1eb
SW
583 goto no_change;
584
585 issued = __ceph_caps_issued(ci, &implemented);
586 issued |= implemented | __ceph_caps_dirty(ci);
587
588 /* update inode */
589 ci->i_version = le64_to_cpu(info->version);
590 inode->i_version++;
591 inode->i_rdev = le32_to_cpu(info->rdev);
592
593 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
594 inode->i_mode = le32_to_cpu(info->mode);
595 inode->i_uid = le32_to_cpu(info->uid);
596 inode->i_gid = le32_to_cpu(info->gid);
597 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
598 inode->i_uid, inode->i_gid);
599 }
600
601 if ((issued & CEPH_CAP_LINK_EXCL) == 0)
602 inode->i_nlink = le32_to_cpu(info->nlink);
603
604 /* be careful with mtime, atime, size */
605 ceph_decode_timespec(&atime, &info->atime);
606 ceph_decode_timespec(&mtime, &info->mtime);
607 ceph_decode_timespec(&ctime, &info->ctime);
608 queue_trunc = ceph_fill_file_size(inode, issued,
609 le32_to_cpu(info->truncate_seq),
610 le64_to_cpu(info->truncate_size),
355da1eb
SW
611 le64_to_cpu(info->size));
612 ceph_fill_file_time(inode, issued,
613 le32_to_cpu(info->time_warp_seq),
614 &ctime, &mtime, &atime);
615
912a9b03
SW
616 /* only update max_size on auth cap */
617 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
618 ci->i_max_size != le64_to_cpu(info->max_size)) {
619 dout("max_size %lld -> %llu\n", ci->i_max_size,
620 le64_to_cpu(info->max_size));
621 ci->i_max_size = le64_to_cpu(info->max_size);
622 }
623
355da1eb
SW
624 ci->i_layout = info->layout;
625 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
626
627 /* xattrs */
628 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
629 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
630 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
631 if (ci->i_xattrs.blob)
632 ceph_buffer_put(ci->i_xattrs.blob);
633 ci->i_xattrs.blob = xattr_blob;
634 if (xattr_blob)
635 memcpy(ci->i_xattrs.blob->vec.iov_base,
636 iinfo->xattr_data, iinfo->xattr_len);
637 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
a6424e48 638 xattr_blob = NULL;
355da1eb
SW
639 }
640
641 inode->i_mapping->a_ops = &ceph_aops;
642 inode->i_mapping->backing_dev_info =
640ef79d 643 &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
355da1eb
SW
644
645 switch (inode->i_mode & S_IFMT) {
646 case S_IFIFO:
647 case S_IFBLK:
648 case S_IFCHR:
649 case S_IFSOCK:
650 init_special_inode(inode, inode->i_mode, inode->i_rdev);
651 inode->i_op = &ceph_file_iops;
652 break;
653 case S_IFREG:
654 inode->i_op = &ceph_file_iops;
655 inode->i_fop = &ceph_file_fops;
656 break;
657 case S_IFLNK:
658 inode->i_op = &ceph_symlink_iops;
659 if (!ci->i_symlink) {
660 int symlen = iinfo->symlink_len;
661 char *sym;
662
663 BUG_ON(symlen != inode->i_size);
664 spin_unlock(&inode->i_lock);
665
666 err = -ENOMEM;
667 sym = kmalloc(symlen+1, GFP_NOFS);
668 if (!sym)
669 goto out;
670 memcpy(sym, iinfo->symlink, symlen);
671 sym[symlen] = 0;
672
673 spin_lock(&inode->i_lock);
674 if (!ci->i_symlink)
675 ci->i_symlink = sym;
676 else
677 kfree(sym); /* lost a race */
678 }
679 break;
680 case S_IFDIR:
681 inode->i_op = &ceph_dir_iops;
682 inode->i_fop = &ceph_dir_fops;
683
684 ci->i_files = le64_to_cpu(info->files);
685 ci->i_subdirs = le64_to_cpu(info->subdirs);
686 ci->i_rbytes = le64_to_cpu(info->rbytes);
687 ci->i_rfiles = le64_to_cpu(info->rfiles);
688 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
689 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
690
691 /* set dir completion flag? */
692 if (ci->i_files == 0 && ci->i_subdirs == 0 &&
693 ceph_snap(inode) == CEPH_NOSNAP &&
1b7facc4 694 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
12451491 695 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
1b7facc4 696 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
355da1eb
SW
697 dout(" marking %p complete (empty)\n", inode);
698 ci->i_ceph_flags |= CEPH_I_COMPLETE;
699 ci->i_max_offset = 2;
700 }
701
702 /* it may be better to set st_size in getattr instead? */
3d14c5d2 703 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), RBYTES))
355da1eb
SW
704 inode->i_size = ci->i_rbytes;
705 break;
706 default:
707 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
708 ceph_vinop(inode), inode->i_mode);
709 }
710
711no_change:
712 spin_unlock(&inode->i_lock);
713
714 /* queue truncate if we saw i_size decrease */
715 if (queue_trunc)
3c6f6b79 716 ceph_queue_vmtruncate(inode);
355da1eb
SW
717
718 /* populate frag tree */
719 /* FIXME: move me up, if/when version reflects fragtree changes */
720 nsplits = le32_to_cpu(info->fragtree.nsplits);
721 mutex_lock(&ci->i_fragtree_mutex);
722 for (i = 0; i < nsplits; i++) {
723 u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
724 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
725
726 if (IS_ERR(frag))
727 continue;
728 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
729 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
730 }
731 mutex_unlock(&ci->i_fragtree_mutex);
732
733 /* were we issued a capability? */
734 if (info->cap.caps) {
735 if (ceph_snap(inode) == CEPH_NOSNAP) {
736 ceph_add_cap(inode, session,
737 le64_to_cpu(info->cap.cap_id),
738 cap_fmode,
739 le32_to_cpu(info->cap.caps),
740 le32_to_cpu(info->cap.wanted),
741 le32_to_cpu(info->cap.seq),
742 le32_to_cpu(info->cap.mseq),
743 le64_to_cpu(info->cap.realm),
744 info->cap.flags,
745 caps_reservation);
746 } else {
747 spin_lock(&inode->i_lock);
748 dout(" %p got snap_caps %s\n", inode,
749 ceph_cap_string(le32_to_cpu(info->cap.caps)));
750 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
751 if (cap_fmode >= 0)
752 __ceph_get_fmode(ci, cap_fmode);
753 spin_unlock(&inode->i_lock);
754 }
04d000eb
SW
755 } else if (cap_fmode >= 0) {
756 pr_warning("mds issued no caps on %llx.%llx\n",
757 ceph_vinop(inode));
758 __ceph_get_fmode(ci, cap_fmode);
355da1eb
SW
759 }
760
761 /* update delegation info? */
762 if (dirinfo)
763 ceph_fill_dirfrag(inode, dirinfo);
764
765 err = 0;
766
767out:
b6c1d5b8
SW
768 if (xattr_blob)
769 ceph_buffer_put(xattr_blob);
355da1eb
SW
770 return err;
771}
772
773/*
774 * caller should hold session s_mutex.
775 */
776static void update_dentry_lease(struct dentry *dentry,
777 struct ceph_mds_reply_lease *lease,
778 struct ceph_mds_session *session,
779 unsigned long from_time)
780{
781 struct ceph_dentry_info *di = ceph_dentry(dentry);
782 long unsigned duration = le32_to_cpu(lease->duration_ms);
783 long unsigned ttl = from_time + (duration * HZ) / 1000;
784 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
785 struct inode *dir;
786
787 /* only track leases on regular dentries */
788 if (dentry->d_op != &ceph_dentry_ops)
789 return;
790
791 spin_lock(&dentry->d_lock);
792 dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n",
793 dentry, le16_to_cpu(lease->mask), duration, ttl);
794
795 /* make lease_rdcache_gen match directory */
796 dir = dentry->d_parent->d_inode;
797 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
798
799 if (lease->mask == 0)
800 goto out_unlock;
801
802 if (di->lease_gen == session->s_cap_gen &&
803 time_before(ttl, dentry->d_time))
804 goto out_unlock; /* we already have a newer lease. */
805
806 if (di->lease_session && di->lease_session != session)
807 goto out_unlock;
808
809 ceph_dentry_lru_touch(dentry);
810
811 if (!di->lease_session)
812 di->lease_session = ceph_get_mds_session(session);
813 di->lease_gen = session->s_cap_gen;
814 di->lease_seq = le32_to_cpu(lease->seq);
815 di->lease_renew_after = half_ttl;
816 di->lease_renew_from = 0;
817 dentry->d_time = ttl;
818out_unlock:
819 spin_unlock(&dentry->d_lock);
820 return;
821}
822
1cd3935b
SW
823/*
824 * Set dentry's directory position based on the current dir's max, and
825 * order it in d_subdirs, so that dcache_readdir behaves.
826 */
827static void ceph_set_dentry_offset(struct dentry *dn)
828{
829 struct dentry *dir = dn->d_parent;
830 struct inode *inode = dn->d_parent->d_inode;
831 struct ceph_dentry_info *di;
832
833 BUG_ON(!inode);
834
835 di = ceph_dentry(dn);
836
837 spin_lock(&inode->i_lock);
838 if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
839 spin_unlock(&inode->i_lock);
840 return;
841 }
842 di->offset = ceph_inode(inode)->i_max_offset++;
843 spin_unlock(&inode->i_lock);
844
845 spin_lock(&dcache_lock);
846 spin_lock(&dn->d_lock);
13a4214c 847 list_move(&dn->d_u.d_child, &dir->d_subdirs);
1cd3935b
SW
848 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
849 dn->d_u.d_child.prev, dn->d_u.d_child.next);
850 spin_unlock(&dn->d_lock);
851 spin_unlock(&dcache_lock);
852}
853
355da1eb
SW
854/*
855 * splice a dentry to an inode.
856 * caller must hold directory i_mutex for this to be safe.
857 *
858 * we will only rehash the resulting dentry if @prehash is
859 * true; @prehash will be set to false (for the benefit of
860 * the caller) if we fail.
861 */
862static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
467c5251 863 bool *prehash, bool set_offset)
355da1eb
SW
864{
865 struct dentry *realdn;
866
1cd3935b
SW
867 BUG_ON(dn->d_inode);
868
355da1eb
SW
869 /* dn must be unhashed */
870 if (!d_unhashed(dn))
871 d_drop(dn);
872 realdn = d_materialise_unique(dn, in);
873 if (IS_ERR(realdn)) {
d69ed05a
SW
874 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
875 PTR_ERR(realdn), dn, in, ceph_vinop(in));
355da1eb
SW
876 if (prehash)
877 *prehash = false; /* don't rehash on error */
878 dn = realdn; /* note realdn contains the error */
879 goto out;
880 } else if (realdn) {
881 dout("dn %p (%d) spliced with %p (%d) "
882 "inode %p ino %llx.%llx\n",
883 dn, atomic_read(&dn->d_count),
884 realdn, atomic_read(&realdn->d_count),
885 realdn->d_inode, ceph_vinop(realdn->d_inode));
886 dput(dn);
887 dn = realdn;
888 } else {
889 BUG_ON(!ceph_dentry(dn));
355da1eb
SW
890 dout("dn %p attached to %p ino %llx.%llx\n",
891 dn, dn->d_inode, ceph_vinop(dn->d_inode));
892 }
893 if ((!prehash || *prehash) && d_unhashed(dn))
894 d_rehash(dn);
467c5251
SW
895 if (set_offset)
896 ceph_set_dentry_offset(dn);
355da1eb
SW
897out:
898 return dn;
899}
900
901/*
902 * Incorporate results into the local cache. This is either just
903 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
904 * after a lookup).
905 *
906 * A reply may contain
907 * a directory inode along with a dentry.
908 * and/or a target inode
909 *
910 * Called with snap_rwsem (read).
911 */
912int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
913 struct ceph_mds_session *session)
914{
915 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
916 struct inode *in = NULL;
917 struct ceph_mds_reply_inode *ininfo;
918 struct ceph_vino vino;
3d14c5d2 919 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
355da1eb
SW
920 int i = 0;
921 int err = 0;
922
923 dout("fill_trace %p is_dentry %d is_target %d\n", req,
924 rinfo->head->is_dentry, rinfo->head->is_target);
925
926#if 0
927 /*
928 * Debugging hook:
929 *
930 * If we resend completed ops to a recovering mds, we get no
931 * trace. Since that is very rare, pretend this is the case
932 * to ensure the 'no trace' handlers in the callers behave.
933 *
934 * Fill in inodes unconditionally to avoid breaking cap
935 * invariants.
936 */
937 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
938 pr_info("fill_trace faking empty trace on %lld %s\n",
939 req->r_tid, ceph_mds_op_name(rinfo->head->op));
940 if (rinfo->head->is_dentry) {
941 rinfo->head->is_dentry = 0;
942 err = fill_inode(req->r_locked_dir,
943 &rinfo->diri, rinfo->dirfrag,
944 session, req->r_request_started, -1);
945 }
946 if (rinfo->head->is_target) {
947 rinfo->head->is_target = 0;
948 ininfo = rinfo->targeti.in;
949 vino.ino = le64_to_cpu(ininfo->ino);
950 vino.snap = le64_to_cpu(ininfo->snapid);
951 in = ceph_get_inode(sb, vino);
952 err = fill_inode(in, &rinfo->targeti, NULL,
953 session, req->r_request_started,
954 req->r_fmode);
955 iput(in);
956 }
957 }
958#endif
959
960 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
961 dout("fill_trace reply is empty!\n");
167c9e35
SW
962 if (rinfo->head->result == 0 && req->r_locked_dir)
963 ceph_invalidate_dir_request(req);
355da1eb
SW
964 return 0;
965 }
966
967 if (rinfo->head->is_dentry) {
5b1daecd
SW
968 struct inode *dir = req->r_locked_dir;
969
970 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
971 session, req->r_request_started, -1,
972 &req->r_caps_reservation);
973 if (err < 0)
974 return err;
975 }
976
9358c6d4
SW
977 /*
978 * ignore null lease/binding on snapdir ENOENT, or else we
979 * will have trouble splicing in the virtual snapdir later
980 */
981 if (rinfo->head->is_dentry && !req->r_aborted &&
982 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
3d14c5d2 983 fsc->mount_options->snapdir_name,
9358c6d4 984 req->r_dentry->d_name.len))) {
355da1eb
SW
985 /*
986 * lookup link rename : null -> possibly existing inode
987 * mknod symlink mkdir : null -> new inode
988 * unlink : linked -> null
989 */
990 struct inode *dir = req->r_locked_dir;
991 struct dentry *dn = req->r_dentry;
992 bool have_dir_cap, have_lease;
993
994 BUG_ON(!dn);
995 BUG_ON(!dir);
996 BUG_ON(dn->d_parent->d_inode != dir);
997 BUG_ON(ceph_ino(dir) !=
998 le64_to_cpu(rinfo->diri.in->ino));
999 BUG_ON(ceph_snap(dir) !=
1000 le64_to_cpu(rinfo->diri.in->snapid));
1001
355da1eb
SW
1002 /* do we have a lease on the whole dir? */
1003 have_dir_cap =
1004 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1005 CEPH_CAP_FILE_SHARED);
1006
1007 /* do we have a dn lease? */
1008 have_lease = have_dir_cap ||
1009 (le16_to_cpu(rinfo->dlease->mask) &
1010 CEPH_LOCK_DN);
1011
1012 if (!have_lease)
1013 dout("fill_trace no dentry lease or dir cap\n");
1014
1015 /* rename? */
1016 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1017 dout(" src %p '%.*s' dst %p '%.*s'\n",
1018 req->r_old_dentry,
1019 req->r_old_dentry->d_name.len,
1020 req->r_old_dentry->d_name.name,
1021 dn, dn->d_name.len, dn->d_name.name);
1022 dout("fill_trace doing d_move %p -> %p\n",
1023 req->r_old_dentry, dn);
c10f5e12
SW
1024
1025 /* d_move screws up d_subdirs order */
1026 ceph_i_clear(dir, CEPH_I_COMPLETE);
1027
355da1eb
SW
1028 d_move(req->r_old_dentry, dn);
1029 dout(" src %p '%.*s' dst %p '%.*s'\n",
1030 req->r_old_dentry,
1031 req->r_old_dentry->d_name.len,
1032 req->r_old_dentry->d_name.name,
1033 dn, dn->d_name.len, dn->d_name.name);
81a6cf2d 1034
c4a29f26
SW
1035 /* ensure target dentry is invalidated, despite
1036 rehashing bug in vfs_rename_dir */
81a6cf2d
SW
1037 ceph_invalidate_dentry_lease(dn);
1038
355da1eb 1039 /* take overwritten dentry's readdir offset */
1cd3935b
SW
1040 dout("dn %p gets %p offset %lld (old offset %lld)\n",
1041 req->r_old_dentry, dn, ceph_dentry(dn)->offset,
1042 ceph_dentry(req->r_old_dentry)->offset);
355da1eb
SW
1043 ceph_dentry(req->r_old_dentry)->offset =
1044 ceph_dentry(dn)->offset;
81a6cf2d 1045
355da1eb
SW
1046 dn = req->r_old_dentry; /* use old_dentry */
1047 in = dn->d_inode;
1048 }
1049
1050 /* null dentry? */
1051 if (!rinfo->head->is_target) {
1052 dout("fill_trace null dentry\n");
1053 if (dn->d_inode) {
1054 dout("d_delete %p\n", dn);
1055 d_delete(dn);
1056 } else {
1057 dout("d_instantiate %p NULL\n", dn);
1058 d_instantiate(dn, NULL);
1059 if (have_lease && d_unhashed(dn))
1060 d_rehash(dn);
1061 update_dentry_lease(dn, rinfo->dlease,
1062 session,
1063 req->r_request_started);
1064 }
1065 goto done;
1066 }
1067
1068 /* attach proper inode */
1069 ininfo = rinfo->targeti.in;
1070 vino.ino = le64_to_cpu(ininfo->ino);
1071 vino.snap = le64_to_cpu(ininfo->snapid);
d8b16b3d
SW
1072 in = dn->d_inode;
1073 if (!in) {
355da1eb
SW
1074 in = ceph_get_inode(sb, vino);
1075 if (IS_ERR(in)) {
1076 pr_err("fill_trace bad get_inode "
1077 "%llx.%llx\n", vino.ino, vino.snap);
1078 err = PTR_ERR(in);
1079 d_delete(dn);
1080 goto done;
1081 }
467c5251 1082 dn = splice_dentry(dn, in, &have_lease, true);
355da1eb
SW
1083 if (IS_ERR(dn)) {
1084 err = PTR_ERR(dn);
1085 goto done;
1086 }
1087 req->r_dentry = dn; /* may have spliced */
1088 igrab(in);
1089 } else if (ceph_ino(in) == vino.ino &&
1090 ceph_snap(in) == vino.snap) {
1091 igrab(in);
1092 } else {
1093 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1094 dn, in, ceph_ino(in), ceph_snap(in),
1095 vino.ino, vino.snap);
1096 have_lease = false;
1097 in = NULL;
1098 }
1099
1100 if (have_lease)
1101 update_dentry_lease(dn, rinfo->dlease, session,
1102 req->r_request_started);
1103 dout(" final dn %p\n", dn);
1104 i++;
1105 } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1106 req->r_op == CEPH_MDS_OP_MKSNAP) {
1107 struct dentry *dn = req->r_dentry;
1108
1109 /* fill out a snapdir LOOKUPSNAP dentry */
1110 BUG_ON(!dn);
1111 BUG_ON(!req->r_locked_dir);
1112 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
1113 ininfo = rinfo->targeti.in;
1114 vino.ino = le64_to_cpu(ininfo->ino);
1115 vino.snap = le64_to_cpu(ininfo->snapid);
1116 in = ceph_get_inode(sb, vino);
1117 if (IS_ERR(in)) {
1118 pr_err("fill_inode get_inode badness %llx.%llx\n",
1119 vino.ino, vino.snap);
1120 err = PTR_ERR(in);
1121 d_delete(dn);
1122 goto done;
1123 }
1124 dout(" linking snapped dir %p to dn %p\n", in, dn);
467c5251 1125 dn = splice_dentry(dn, in, NULL, true);
355da1eb
SW
1126 if (IS_ERR(dn)) {
1127 err = PTR_ERR(dn);
1128 goto done;
1129 }
1130 req->r_dentry = dn; /* may have spliced */
1131 igrab(in);
1132 rinfo->head->is_dentry = 1; /* fool notrace handlers */
1133 }
1134
1135 if (rinfo->head->is_target) {
1136 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1137 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1138
1139 if (in == NULL || ceph_ino(in) != vino.ino ||
1140 ceph_snap(in) != vino.snap) {
1141 in = ceph_get_inode(sb, vino);
1142 if (IS_ERR(in)) {
1143 err = PTR_ERR(in);
1144 goto done;
1145 }
1146 }
1147 req->r_target_inode = in;
1148
1149 err = fill_inode(in,
1150 &rinfo->targeti, NULL,
1151 session, req->r_request_started,
1152 (le32_to_cpu(rinfo->head->result) == 0) ?
1153 req->r_fmode : -1,
1154 &req->r_caps_reservation);
1155 if (err < 0) {
1156 pr_err("fill_inode badness %p %llx.%llx\n",
1157 in, ceph_vinop(in));
1158 goto done;
1159 }
1160 }
1161
1162done:
1163 dout("fill_trace done err=%d\n", err);
1164 return err;
1165}
1166
1167/*
1168 * Prepopulate our cache with readdir results, leases, etc.
1169 */
1170int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1171 struct ceph_mds_session *session)
1172{
1173 struct dentry *parent = req->r_dentry;
1174 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1175 struct qstr dname;
1176 struct dentry *dn;
1177 struct inode *in;
1178 int err = 0, i;
1179 struct inode *snapdir = NULL;
1180 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1181 u64 frag = le32_to_cpu(rhead->args.readdir.frag);
1182 struct ceph_dentry_info *di;
1183
1184 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1185 snapdir = ceph_get_snapdir(parent->d_inode);
1186 parent = d_find_alias(snapdir);
1187 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1188 rinfo->dir_nr, parent);
1189 } else {
1190 dout("readdir_prepopulate %d items under dn %p\n",
1191 rinfo->dir_nr, parent);
1192 if (rinfo->dir_dir)
1193 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1194 }
1195
1196 for (i = 0; i < rinfo->dir_nr; i++) {
1197 struct ceph_vino vino;
1198
1199 dname.name = rinfo->dir_dname[i];
1200 dname.len = rinfo->dir_dname_len[i];
1201 dname.hash = full_name_hash(dname.name, dname.len);
1202
1203 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1204 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1205
1206retry_lookup:
1207 dn = d_lookup(parent, &dname);
1208 dout("d_lookup on parent=%p name=%.*s got %p\n",
1209 parent, dname.len, dname.name, dn);
1210
1211 if (!dn) {
1212 dn = d_alloc(parent, &dname);
1213 dout("d_alloc %p '%.*s' = %p\n", parent,
1214 dname.len, dname.name, dn);
1215 if (dn == NULL) {
1216 dout("d_alloc badness\n");
1217 err = -ENOMEM;
1218 goto out;
1219 }
1220 err = ceph_init_dentry(dn);
8c696737
SW
1221 if (err < 0) {
1222 dput(dn);
355da1eb 1223 goto out;
8c696737 1224 }
355da1eb
SW
1225 } else if (dn->d_inode &&
1226 (ceph_ino(dn->d_inode) != vino.ino ||
1227 ceph_snap(dn->d_inode) != vino.snap)) {
1228 dout(" dn %p points to wrong inode %p\n",
1229 dn, dn->d_inode);
1230 d_delete(dn);
1231 dput(dn);
1232 goto retry_lookup;
1233 } else {
1234 /* reorder parent's d_subdirs */
1235 spin_lock(&dcache_lock);
1236 spin_lock(&dn->d_lock);
1237 list_move(&dn->d_u.d_child, &parent->d_subdirs);
1238 spin_unlock(&dn->d_lock);
1239 spin_unlock(&dcache_lock);
1240 }
1241
1242 di = dn->d_fsdata;
1243 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
1244
1245 /* inode */
1246 if (dn->d_inode) {
1247 in = dn->d_inode;
1248 } else {
1249 in = ceph_get_inode(parent->d_sb, vino);
ac1f12ef 1250 if (IS_ERR(in)) {
355da1eb
SW
1251 dout("new_inode badness\n");
1252 d_delete(dn);
1253 dput(dn);
ac1f12ef 1254 err = PTR_ERR(in);
355da1eb
SW
1255 goto out;
1256 }
467c5251 1257 dn = splice_dentry(dn, in, NULL, false);
d69ed05a
SW
1258 if (IS_ERR(dn))
1259 dn = NULL;
355da1eb
SW
1260 }
1261
1262 if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1263 req->r_request_started, -1,
1264 &req->r_caps_reservation) < 0) {
1265 pr_err("fill_inode badness on %p\n", in);
d69ed05a 1266 goto next_item;
355da1eb 1267 }
d69ed05a
SW
1268 if (dn)
1269 update_dentry_lease(dn, rinfo->dir_dlease[i],
1270 req->r_session,
1271 req->r_request_started);
1272next_item:
1273 if (dn)
1274 dput(dn);
355da1eb
SW
1275 }
1276 req->r_did_prepopulate = true;
1277
1278out:
1279 if (snapdir) {
1280 iput(snapdir);
1281 dput(parent);
1282 }
1283 dout("readdir_prepopulate done\n");
1284 return err;
1285}
1286
1287int ceph_inode_set_size(struct inode *inode, loff_t size)
1288{
1289 struct ceph_inode_info *ci = ceph_inode(inode);
1290 int ret = 0;
1291
1292 spin_lock(&inode->i_lock);
1293 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1294 inode->i_size = size;
1295 inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1296
1297 /* tell the MDS if we are approaching max_size */
1298 if ((size << 1) >= ci->i_max_size &&
1299 (ci->i_reported_size << 1) < ci->i_max_size)
1300 ret = 1;
1301
1302 spin_unlock(&inode->i_lock);
1303 return ret;
1304}
1305
1306/*
1307 * Write back inode data in a worker thread. (This can't be done
1308 * in the message handler context.)
1309 */
3c6f6b79
SW
1310void ceph_queue_writeback(struct inode *inode)
1311{
1312 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1313 &ceph_inode(inode)->i_wb_work)) {
2c27c9a5 1314 dout("ceph_queue_writeback %p\n", inode);
3c6f6b79
SW
1315 igrab(inode);
1316 } else {
2c27c9a5 1317 dout("ceph_queue_writeback %p failed\n", inode);
3c6f6b79
SW
1318 }
1319}
1320
1321static void ceph_writeback_work(struct work_struct *work)
355da1eb
SW
1322{
1323 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1324 i_wb_work);
1325 struct inode *inode = &ci->vfs_inode;
1326
1327 dout("writeback %p\n", inode);
1328 filemap_fdatawrite(&inode->i_data);
1329 iput(inode);
1330}
1331
3c6f6b79
SW
1332/*
1333 * queue an async invalidation
1334 */
1335void ceph_queue_invalidate(struct inode *inode)
1336{
1337 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1338 &ceph_inode(inode)->i_pg_inv_work)) {
1339 dout("ceph_queue_invalidate %p\n", inode);
1340 igrab(inode);
1341 } else {
1342 dout("ceph_queue_invalidate %p failed\n", inode);
1343 }
1344}
1345
c9af9fb6
YS
1346/*
1347 * invalidate any pages that are not dirty or under writeback. this
1348 * includes pages that are clean and mapped.
1349 */
1350static void ceph_invalidate_nondirty_pages(struct address_space *mapping)
1351{
1352 struct pagevec pvec;
1353 pgoff_t next = 0;
1354 int i;
1355
1356 pagevec_init(&pvec, 0);
1357 while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
1358 for (i = 0; i < pagevec_count(&pvec); i++) {
1359 struct page *page = pvec.pages[i];
1360 pgoff_t index;
1361 int skip_page =
1362 (PageDirty(page) || PageWriteback(page));
1363
1364 if (!skip_page)
1365 skip_page = !trylock_page(page);
1366
1367 /*
1368 * We really shouldn't be looking at the ->index of an
1369 * unlocked page. But we're not allowed to lock these
1370 * pages. So we rely upon nobody altering the ->index
1371 * of this (pinned-by-us) page.
1372 */
1373 index = page->index;
1374 if (index > next)
1375 next = index;
1376 next++;
1377
1378 if (skip_page)
1379 continue;
1380
1381 generic_error_remove_page(mapping, page);
1382 unlock_page(page);
1383 }
1384 pagevec_release(&pvec);
1385 cond_resched();
1386 }
1387}
1388
355da1eb
SW
1389/*
1390 * Invalidate inode pages in a worker thread. (This can't be done
1391 * in the message handler context.)
1392 */
3c6f6b79 1393static void ceph_invalidate_work(struct work_struct *work)
355da1eb
SW
1394{
1395 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1396 i_pg_inv_work);
1397 struct inode *inode = &ci->vfs_inode;
1398 u32 orig_gen;
1399 int check = 0;
1400
1401 spin_lock(&inode->i_lock);
1402 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1403 ci->i_rdcache_gen, ci->i_rdcache_revoking);
cd045cb4 1404 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
355da1eb 1405 /* nevermind! */
355da1eb
SW
1406 spin_unlock(&inode->i_lock);
1407 goto out;
1408 }
1409 orig_gen = ci->i_rdcache_gen;
1410 spin_unlock(&inode->i_lock);
1411
c9af9fb6 1412 ceph_invalidate_nondirty_pages(inode->i_mapping);
355da1eb
SW
1413
1414 spin_lock(&inode->i_lock);
cd045cb4
SW
1415 if (orig_gen == ci->i_rdcache_gen &&
1416 orig_gen == ci->i_rdcache_revoking) {
355da1eb
SW
1417 dout("invalidate_pages %p gen %d successful\n", inode,
1418 ci->i_rdcache_gen);
cd045cb4 1419 ci->i_rdcache_revoking--;
355da1eb
SW
1420 check = 1;
1421 } else {
cd045cb4
SW
1422 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1423 inode, orig_gen, ci->i_rdcache_gen,
1424 ci->i_rdcache_revoking);
355da1eb
SW
1425 }
1426 spin_unlock(&inode->i_lock);
1427
1428 if (check)
1429 ceph_check_caps(ci, 0, NULL);
1430out:
1431 iput(inode);
1432}
1433
1434
1435/*
1436 * called by trunc_wq; take i_mutex ourselves
1437 *
1438 * We also truncate in a separate thread as well.
1439 */
3c6f6b79 1440static void ceph_vmtruncate_work(struct work_struct *work)
355da1eb
SW
1441{
1442 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1443 i_vmtruncate_work);
1444 struct inode *inode = &ci->vfs_inode;
1445
1446 dout("vmtruncate_work %p\n", inode);
1447 mutex_lock(&inode->i_mutex);
1448 __ceph_do_pending_vmtruncate(inode);
1449 mutex_unlock(&inode->i_mutex);
1450 iput(inode);
1451}
1452
3c6f6b79
SW
1453/*
1454 * Queue an async vmtruncate. If we fail to queue work, we will handle
1455 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1456 */
1457void ceph_queue_vmtruncate(struct inode *inode)
1458{
1459 struct ceph_inode_info *ci = ceph_inode(inode);
1460
640ef79d 1461 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
3c6f6b79
SW
1462 &ci->i_vmtruncate_work)) {
1463 dout("ceph_queue_vmtruncate %p\n", inode);
1464 igrab(inode);
1465 } else {
1466 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1467 inode, ci->i_truncate_pending);
1468 }
1469}
1470
355da1eb
SW
1471/*
1472 * called with i_mutex held.
1473 *
1474 * Make sure any pending truncation is applied before doing anything
1475 * that may depend on it.
1476 */
1477void __ceph_do_pending_vmtruncate(struct inode *inode)
1478{
1479 struct ceph_inode_info *ci = ceph_inode(inode);
1480 u64 to;
1481 int wrbuffer_refs, wake = 0;
1482
1483retry:
1484 spin_lock(&inode->i_lock);
1485 if (ci->i_truncate_pending == 0) {
1486 dout("__do_pending_vmtruncate %p none pending\n", inode);
1487 spin_unlock(&inode->i_lock);
1488 return;
1489 }
1490
1491 /*
1492 * make sure any dirty snapped pages are flushed before we
1493 * possibly truncate them.. so write AND block!
1494 */
1495 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1496 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1497 inode);
1498 spin_unlock(&inode->i_lock);
1499 filemap_write_and_wait_range(&inode->i_data, 0,
1500 inode->i_sb->s_maxbytes);
1501 goto retry;
1502 }
1503
1504 to = ci->i_truncate_size;
1505 wrbuffer_refs = ci->i_wrbuffer_ref;
1506 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1507 ci->i_truncate_pending, to);
1508 spin_unlock(&inode->i_lock);
1509
1510 truncate_inode_pages(inode->i_mapping, to);
1511
1512 spin_lock(&inode->i_lock);
1513 ci->i_truncate_pending--;
1514 if (ci->i_truncate_pending == 0)
1515 wake = 1;
1516 spin_unlock(&inode->i_lock);
1517
1518 if (wrbuffer_refs == 0)
1519 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1520 if (wake)
03066f23 1521 wake_up_all(&ci->i_cap_wq);
355da1eb
SW
1522}
1523
1524
1525/*
1526 * symlinks
1527 */
1528static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
1529{
1530 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
1531 nd_set_link(nd, ci->i_symlink);
1532 return NULL;
1533}
1534
1535static const struct inode_operations ceph_symlink_iops = {
1536 .readlink = generic_readlink,
1537 .follow_link = ceph_sym_follow_link,
1538};
1539
1540/*
1541 * setattr
1542 */
1543int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1544{
1545 struct inode *inode = dentry->d_inode;
1546 struct ceph_inode_info *ci = ceph_inode(inode);
1547 struct inode *parent_inode = dentry->d_parent->d_inode;
1548 const unsigned int ia_valid = attr->ia_valid;
1549 struct ceph_mds_request *req;
3d14c5d2 1550 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
355da1eb
SW
1551 int issued;
1552 int release = 0, dirtied = 0;
1553 int mask = 0;
1554 int err = 0;
355da1eb
SW
1555
1556 if (ceph_snap(inode) != CEPH_NOSNAP)
1557 return -EROFS;
1558
1559 __ceph_do_pending_vmtruncate(inode);
1560
1561 err = inode_change_ok(inode, attr);
1562 if (err != 0)
1563 return err;
1564
1565 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1566 USE_AUTH_MDS);
1567 if (IS_ERR(req))
1568 return PTR_ERR(req);
1569
1570 spin_lock(&inode->i_lock);
1571 issued = __ceph_caps_issued(ci, NULL);
1572 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1573
1574 if (ia_valid & ATTR_UID) {
1575 dout("setattr %p uid %d -> %d\n", inode,
1576 inode->i_uid, attr->ia_uid);
1577 if (issued & CEPH_CAP_AUTH_EXCL) {
1578 inode->i_uid = attr->ia_uid;
1579 dirtied |= CEPH_CAP_AUTH_EXCL;
1580 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1581 attr->ia_uid != inode->i_uid) {
1582 req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid);
1583 mask |= CEPH_SETATTR_UID;
1584 release |= CEPH_CAP_AUTH_SHARED;
1585 }
1586 }
1587 if (ia_valid & ATTR_GID) {
1588 dout("setattr %p gid %d -> %d\n", inode,
1589 inode->i_gid, attr->ia_gid);
1590 if (issued & CEPH_CAP_AUTH_EXCL) {
1591 inode->i_gid = attr->ia_gid;
1592 dirtied |= CEPH_CAP_AUTH_EXCL;
1593 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1594 attr->ia_gid != inode->i_gid) {
1595 req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid);
1596 mask |= CEPH_SETATTR_GID;
1597 release |= CEPH_CAP_AUTH_SHARED;
1598 }
1599 }
1600 if (ia_valid & ATTR_MODE) {
1601 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1602 attr->ia_mode);
1603 if (issued & CEPH_CAP_AUTH_EXCL) {
1604 inode->i_mode = attr->ia_mode;
1605 dirtied |= CEPH_CAP_AUTH_EXCL;
1606 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1607 attr->ia_mode != inode->i_mode) {
1608 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1609 mask |= CEPH_SETATTR_MODE;
1610 release |= CEPH_CAP_AUTH_SHARED;
1611 }
1612 }
1613
1614 if (ia_valid & ATTR_ATIME) {
1615 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1616 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1617 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1618 if (issued & CEPH_CAP_FILE_EXCL) {
1619 ci->i_time_warp_seq++;
1620 inode->i_atime = attr->ia_atime;
1621 dirtied |= CEPH_CAP_FILE_EXCL;
1622 } else if ((issued & CEPH_CAP_FILE_WR) &&
1623 timespec_compare(&inode->i_atime,
1624 &attr->ia_atime) < 0) {
1625 inode->i_atime = attr->ia_atime;
1626 dirtied |= CEPH_CAP_FILE_WR;
1627 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1628 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1629 ceph_encode_timespec(&req->r_args.setattr.atime,
1630 &attr->ia_atime);
1631 mask |= CEPH_SETATTR_ATIME;
1632 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1633 CEPH_CAP_FILE_WR;
1634 }
1635 }
1636 if (ia_valid & ATTR_MTIME) {
1637 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1638 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1639 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1640 if (issued & CEPH_CAP_FILE_EXCL) {
1641 ci->i_time_warp_seq++;
1642 inode->i_mtime = attr->ia_mtime;
1643 dirtied |= CEPH_CAP_FILE_EXCL;
1644 } else if ((issued & CEPH_CAP_FILE_WR) &&
1645 timespec_compare(&inode->i_mtime,
1646 &attr->ia_mtime) < 0) {
1647 inode->i_mtime = attr->ia_mtime;
1648 dirtied |= CEPH_CAP_FILE_WR;
1649 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1650 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1651 ceph_encode_timespec(&req->r_args.setattr.mtime,
1652 &attr->ia_mtime);
1653 mask |= CEPH_SETATTR_MTIME;
1654 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1655 CEPH_CAP_FILE_WR;
1656 }
1657 }
1658 if (ia_valid & ATTR_SIZE) {
1659 dout("setattr %p size %lld -> %lld\n", inode,
1660 inode->i_size, attr->ia_size);
1661 if (attr->ia_size > inode->i_sb->s_maxbytes) {
1662 err = -EINVAL;
1663 goto out;
1664 }
1665 if ((issued & CEPH_CAP_FILE_EXCL) &&
1666 attr->ia_size > inode->i_size) {
1667 inode->i_size = attr->ia_size;
355da1eb
SW
1668 inode->i_blocks =
1669 (attr->ia_size + (1 << 9) - 1) >> 9;
1670 inode->i_ctime = attr->ia_ctime;
1671 ci->i_reported_size = attr->ia_size;
1672 dirtied |= CEPH_CAP_FILE_EXCL;
1673 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1674 attr->ia_size != inode->i_size) {
1675 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1676 req->r_args.setattr.old_size =
1677 cpu_to_le64(inode->i_size);
1678 mask |= CEPH_SETATTR_SIZE;
1679 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1680 CEPH_CAP_FILE_WR;
1681 }
1682 }
1683
1684 /* these do nothing */
1685 if (ia_valid & ATTR_CTIME) {
1686 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1687 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1688 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1689 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1690 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1691 only ? "ctime only" : "ignored");
1692 inode->i_ctime = attr->ia_ctime;
1693 if (only) {
1694 /*
1695 * if kernel wants to dirty ctime but nothing else,
1696 * we need to choose a cap to dirty under, or do
1697 * a almost-no-op setattr
1698 */
1699 if (issued & CEPH_CAP_AUTH_EXCL)
1700 dirtied |= CEPH_CAP_AUTH_EXCL;
1701 else if (issued & CEPH_CAP_FILE_EXCL)
1702 dirtied |= CEPH_CAP_FILE_EXCL;
1703 else if (issued & CEPH_CAP_XATTR_EXCL)
1704 dirtied |= CEPH_CAP_XATTR_EXCL;
1705 else
1706 mask |= CEPH_SETATTR_CTIME;
1707 }
1708 }
1709 if (ia_valid & ATTR_FILE)
1710 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1711
1712 if (dirtied) {
1713 __ceph_mark_dirty_caps(ci, dirtied);
1714 inode->i_ctime = CURRENT_TIME;
1715 }
1716
1717 release &= issued;
1718 spin_unlock(&inode->i_lock);
1719
355da1eb
SW
1720 if (mask) {
1721 req->r_inode = igrab(inode);
1722 req->r_inode_drop = release;
1723 req->r_args.setattr.mask = cpu_to_le32(mask);
1724 req->r_num_caps = 1;
1725 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
1726 }
1727 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1728 ceph_cap_string(dirtied), mask);
1729
1730 ceph_mdsc_put_request(req);
1731 __ceph_do_pending_vmtruncate(inode);
1732 return err;
1733out:
1734 spin_unlock(&inode->i_lock);
1735 ceph_mdsc_put_request(req);
1736 return err;
1737}
1738
1739/*
1740 * Verify that we have a lease on the given mask. If not,
1741 * do a getattr against an mds.
1742 */
1743int ceph_do_getattr(struct inode *inode, int mask)
1744{
3d14c5d2
YS
1745 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
1746 struct ceph_mds_client *mdsc = fsc->mdsc;
355da1eb
SW
1747 struct ceph_mds_request *req;
1748 int err;
1749
1750 if (ceph_snap(inode) == CEPH_SNAPDIR) {
1751 dout("do_getattr inode %p SNAPDIR\n", inode);
1752 return 0;
1753 }
1754
1755 dout("do_getattr inode %p mask %s\n", inode, ceph_cap_string(mask));
1756 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
1757 return 0;
1758
1759 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1760 if (IS_ERR(req))
1761 return PTR_ERR(req);
1762 req->r_inode = igrab(inode);
1763 req->r_num_caps = 1;
1764 req->r_args.getattr.mask = cpu_to_le32(mask);
1765 err = ceph_mdsc_do_request(mdsc, NULL, req);
1766 ceph_mdsc_put_request(req);
1767 dout("do_getattr result=%d\n", err);
1768 return err;
1769}
1770
1771
1772/*
1773 * Check inode permissions. We verify we have a valid value for
1774 * the AUTH cap, then call the generic handler.
1775 */
1776int ceph_permission(struct inode *inode, int mask)
1777{
1778 int err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
1779
1780 if (!err)
1781 err = generic_permission(inode, mask, NULL);
1782 return err;
1783}
1784
1785/*
1786 * Get all attributes. Hopefully somedata we'll have a statlite()
1787 * and can limit the fields we require to be accurate.
1788 */
1789int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1790 struct kstat *stat)
1791{
1792 struct inode *inode = dentry->d_inode;
232d4b01 1793 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
1794 int err;
1795
1796 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
1797 if (!err) {
1798 generic_fillattr(inode, stat);
1799 stat->ino = inode->i_ino;
1800 if (ceph_snap(inode) != CEPH_NOSNAP)
1801 stat->dev = ceph_snap(inode);
1802 else
1803 stat->dev = 0;
232d4b01
SW
1804 if (S_ISDIR(inode->i_mode)) {
1805 stat->size = ci->i_rbytes;
1806 stat->blocks = 0;
355da1eb 1807 stat->blksize = 65536;
232d4b01 1808 }
355da1eb
SW
1809 }
1810 return err;
1811}
This page took 0.123522 seconds and 5 git commands to generate.