ceph: don't assume frag tree splits in mds reply are sorted
[deliverable/linux.git] / fs / ceph / inode.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
355da1eb
SW
2
3#include <linux/module.h>
4#include <linux/fs.h>
355da1eb
SW
5#include <linux/slab.h>
6#include <linux/string.h>
7#include <linux/uaccess.h>
8#include <linux/kernel.h>
355da1eb
SW
9#include <linux/writeback.h>
10#include <linux/vmalloc.h>
4db658ea 11#include <linux/posix_acl.h>
3e7fbe9c 12#include <linux/random.h>
a407846e 13#include <linux/sort.h>
355da1eb
SW
14
15#include "super.h"
3d14c5d2 16#include "mds_client.h"
99ccbd22 17#include "cache.h"
3d14c5d2 18#include <linux/ceph/decode.h>
355da1eb
SW
19
20/*
21 * Ceph inode operations
22 *
23 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
24 * setattr, etc.), xattr helpers, and helpers for assimilating
25 * metadata returned by the MDS into our cache.
26 *
27 * Also define helpers for doing asynchronous writeback, invalidation,
28 * and truncation for the benefit of those who can't afford to block
29 * (typically because they are in the message handler path).
30 */
31
32static const struct inode_operations ceph_symlink_iops;
33
3c6f6b79
SW
34static void ceph_invalidate_work(struct work_struct *work);
35static void ceph_writeback_work(struct work_struct *work);
36static void ceph_vmtruncate_work(struct work_struct *work);
355da1eb
SW
37
38/*
39 * find or create an inode, given the ceph ino number
40 */
ad1fee96
YS
41static int ceph_set_ino_cb(struct inode *inode, void *data)
42{
43 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
44 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
45 return 0;
46}
47
355da1eb
SW
48struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
49{
50 struct inode *inode;
51 ino_t t = ceph_vino_to_ino(vino);
52
53 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
54 if (inode == NULL)
55 return ERR_PTR(-ENOMEM);
56 if (inode->i_state & I_NEW) {
57 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
58 inode, ceph_vinop(inode), (u64)inode->i_ino);
59 unlock_new_inode(inode);
60 }
61
62 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
63 vino.snap, inode);
64 return inode;
65}
66
67/*
68 * get/constuct snapdir inode for a given directory
69 */
70struct inode *ceph_get_snapdir(struct inode *parent)
71{
72 struct ceph_vino vino = {
73 .ino = ceph_ino(parent),
74 .snap = CEPH_SNAPDIR,
75 };
76 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
b377ff13 77 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
78
79 BUG_ON(!S_ISDIR(parent->i_mode));
80 if (IS_ERR(inode))
7e34bc52 81 return inode;
355da1eb
SW
82 inode->i_mode = parent->i_mode;
83 inode->i_uid = parent->i_uid;
84 inode->i_gid = parent->i_gid;
38c48b5f
YZ
85 inode->i_op = &ceph_snapdir_iops;
86 inode->i_fop = &ceph_snapdir_fops;
b377ff13
SW
87 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
88 ci->i_rbytes = 0;
355da1eb
SW
89 return inode;
90}
91
92const struct inode_operations ceph_file_iops = {
93 .permission = ceph_permission,
94 .setattr = ceph_setattr,
95 .getattr = ceph_getattr,
96 .setxattr = ceph_setxattr,
97 .getxattr = ceph_getxattr,
98 .listxattr = ceph_listxattr,
99 .removexattr = ceph_removexattr,
7221fe4c 100 .get_acl = ceph_get_acl,
72466d0b 101 .set_acl = ceph_set_acl,
355da1eb
SW
102};
103
104
105/*
106 * We use a 'frag tree' to keep track of the MDS's directory fragments
107 * for a given inode (usually there is just a single fragment). We
108 * need to know when a child frag is delegated to a new MDS, or when
109 * it is flagged as replicated, so we can direct our requests
110 * accordingly.
111 */
112
113/*
114 * find/create a frag in the tree
115 */
116static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
117 u32 f)
118{
119 struct rb_node **p;
120 struct rb_node *parent = NULL;
121 struct ceph_inode_frag *frag;
122 int c;
123
124 p = &ci->i_fragtree.rb_node;
125 while (*p) {
126 parent = *p;
127 frag = rb_entry(parent, struct ceph_inode_frag, node);
128 c = ceph_frag_compare(f, frag->frag);
129 if (c < 0)
130 p = &(*p)->rb_left;
131 else if (c > 0)
132 p = &(*p)->rb_right;
133 else
134 return frag;
135 }
136
137 frag = kmalloc(sizeof(*frag), GFP_NOFS);
138 if (!frag) {
139 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
140 "frag %x\n", &ci->vfs_inode,
141 ceph_vinop(&ci->vfs_inode), f);
142 return ERR_PTR(-ENOMEM);
143 }
144 frag->frag = f;
145 frag->split_by = 0;
146 frag->mds = -1;
147 frag->ndist = 0;
148
149 rb_link_node(&frag->node, parent, p);
150 rb_insert_color(&frag->node, &ci->i_fragtree);
151
152 dout("get_or_create_frag added %llx.%llx frag %x\n",
153 ceph_vinop(&ci->vfs_inode), f);
154 return frag;
155}
156
157/*
158 * find a specific frag @f
159 */
160struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
161{
162 struct rb_node *n = ci->i_fragtree.rb_node;
163
164 while (n) {
165 struct ceph_inode_frag *frag =
166 rb_entry(n, struct ceph_inode_frag, node);
167 int c = ceph_frag_compare(f, frag->frag);
168 if (c < 0)
169 n = n->rb_left;
170 else if (c > 0)
171 n = n->rb_right;
172 else
173 return frag;
174 }
175 return NULL;
176}
177
178/*
179 * Choose frag containing the given value @v. If @pfrag is
180 * specified, copy the frag delegation info to the caller if
181 * it is present.
182 */
3e7fbe9c
YZ
183static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
184 struct ceph_inode_frag *pfrag, int *found)
355da1eb
SW
185{
186 u32 t = ceph_frag_make(0, 0);
187 struct ceph_inode_frag *frag;
188 unsigned nway, i;
189 u32 n;
190
191 if (found)
192 *found = 0;
193
355da1eb
SW
194 while (1) {
195 WARN_ON(!ceph_frag_contains_value(t, v));
196 frag = __ceph_find_frag(ci, t);
197 if (!frag)
198 break; /* t is a leaf */
199 if (frag->split_by == 0) {
200 if (pfrag)
201 memcpy(pfrag, frag, sizeof(*pfrag));
202 if (found)
203 *found = 1;
204 break;
205 }
206
207 /* choose child */
208 nway = 1 << frag->split_by;
209 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
210 frag->split_by, nway);
211 for (i = 0; i < nway; i++) {
212 n = ceph_frag_make_child(t, frag->split_by, i);
213 if (ceph_frag_contains_value(n, v)) {
214 t = n;
215 break;
216 }
217 }
218 BUG_ON(i == nway);
219 }
220 dout("choose_frag(%x) = %x\n", v, t);
221
355da1eb
SW
222 return t;
223}
224
3e7fbe9c
YZ
225u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
226 struct ceph_inode_frag *pfrag, int *found)
227{
228 u32 ret;
229 mutex_lock(&ci->i_fragtree_mutex);
230 ret = __ceph_choose_frag(ci, v, pfrag, found);
231 mutex_unlock(&ci->i_fragtree_mutex);
232 return ret;
233}
234
355da1eb
SW
235/*
236 * Process dirfrag (delegation) info from the mds. Include leaf
237 * fragment in tree ONLY if ndist > 0. Otherwise, only
238 * branches/splits are included in i_fragtree)
239 */
240static int ceph_fill_dirfrag(struct inode *inode,
241 struct ceph_mds_reply_dirfrag *dirinfo)
242{
243 struct ceph_inode_info *ci = ceph_inode(inode);
244 struct ceph_inode_frag *frag;
245 u32 id = le32_to_cpu(dirinfo->frag);
246 int mds = le32_to_cpu(dirinfo->auth);
247 int ndist = le32_to_cpu(dirinfo->ndist);
8d08503c 248 int diri_auth = -1;
355da1eb
SW
249 int i;
250 int err = 0;
251
8d08503c
YZ
252 spin_lock(&ci->i_ceph_lock);
253 if (ci->i_auth_cap)
254 diri_auth = ci->i_auth_cap->mds;
255 spin_unlock(&ci->i_ceph_lock);
256
355da1eb 257 mutex_lock(&ci->i_fragtree_mutex);
8d08503c 258 if (ndist == 0 && mds == diri_auth) {
355da1eb
SW
259 /* no delegation info needed. */
260 frag = __ceph_find_frag(ci, id);
261 if (!frag)
262 goto out;
263 if (frag->split_by == 0) {
264 /* tree leaf, remove */
265 dout("fill_dirfrag removed %llx.%llx frag %x"
266 " (no ref)\n", ceph_vinop(inode), id);
267 rb_erase(&frag->node, &ci->i_fragtree);
268 kfree(frag);
269 } else {
270 /* tree branch, keep and clear */
271 dout("fill_dirfrag cleared %llx.%llx frag %x"
272 " referral\n", ceph_vinop(inode), id);
273 frag->mds = -1;
274 frag->ndist = 0;
275 }
276 goto out;
277 }
278
279
280 /* find/add this frag to store mds delegation info */
281 frag = __get_or_create_frag(ci, id);
282 if (IS_ERR(frag)) {
283 /* this is not the end of the world; we can continue
284 with bad/inaccurate delegation info */
285 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
286 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
287 err = -ENOMEM;
288 goto out;
289 }
290
291 frag->mds = mds;
292 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
293 for (i = 0; i < frag->ndist; i++)
294 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
295 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
296 ceph_vinop(inode), frag->frag, frag->ndist);
297
298out:
299 mutex_unlock(&ci->i_fragtree_mutex);
300 return err;
301}
302
a407846e
YZ
303static int frag_tree_split_cmp(const void *l, const void *r)
304{
305 struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
306 struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
307 return ceph_frag_compare(ls->frag, rs->frag);
308}
309
3e7fbe9c
YZ
310static int ceph_fill_fragtree(struct inode *inode,
311 struct ceph_frag_tree_head *fragtree,
312 struct ceph_mds_reply_dirfrag *dirinfo)
313{
314 struct ceph_inode_info *ci = ceph_inode(inode);
315 struct ceph_inode_frag *frag;
316 struct rb_node *rb_node;
317 int i;
318 u32 id, nsplits;
319 bool update = false;
320
321 mutex_lock(&ci->i_fragtree_mutex);
322 nsplits = le32_to_cpu(fragtree->nsplits);
323 if (nsplits) {
324 i = prandom_u32() % nsplits;
325 id = le32_to_cpu(fragtree->splits[i].frag);
326 if (!__ceph_find_frag(ci, id))
327 update = true;
328 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
329 rb_node = rb_first(&ci->i_fragtree);
330 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
331 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
332 update = true;
333 }
334 if (!update && dirinfo) {
335 id = le32_to_cpu(dirinfo->frag);
336 if (id != __ceph_choose_frag(ci, id, NULL, NULL))
337 update = true;
338 }
339 if (!update)
340 goto out_unlock;
341
a407846e
YZ
342 if (nsplits > 1) {
343 sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
344 frag_tree_split_cmp, NULL);
345 }
346
3e7fbe9c
YZ
347 dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
348 rb_node = rb_first(&ci->i_fragtree);
349 for (i = 0; i < nsplits; i++) {
350 id = le32_to_cpu(fragtree->splits[i].frag);
351 frag = NULL;
352 while (rb_node) {
353 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
354 if (ceph_frag_compare(frag->frag, id) >= 0) {
355 if (frag->frag != id)
356 frag = NULL;
357 else
358 rb_node = rb_next(rb_node);
359 break;
360 }
361 rb_node = rb_next(rb_node);
362 rb_erase(&frag->node, &ci->i_fragtree);
363 kfree(frag);
364 frag = NULL;
365 }
366 if (!frag) {
367 frag = __get_or_create_frag(ci, id);
368 if (IS_ERR(frag))
369 continue;
370 }
371 frag->split_by = le32_to_cpu(fragtree->splits[i].by);
372 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
373 }
374 while (rb_node) {
375 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
376 rb_node = rb_next(rb_node);
377 rb_erase(&frag->node, &ci->i_fragtree);
378 kfree(frag);
379 }
380out_unlock:
381 mutex_unlock(&ci->i_fragtree_mutex);
382 return 0;
383}
355da1eb
SW
384
385/*
386 * initialize a newly allocated inode.
387 */
388struct inode *ceph_alloc_inode(struct super_block *sb)
389{
390 struct ceph_inode_info *ci;
391 int i;
392
393 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
394 if (!ci)
395 return NULL;
396
397 dout("alloc_inode %p\n", &ci->vfs_inode);
398
be655596
SW
399 spin_lock_init(&ci->i_ceph_lock);
400
355da1eb 401 ci->i_version = 0;
31c542a1 402 ci->i_inline_version = 0;
355da1eb
SW
403 ci->i_time_warp_seq = 0;
404 ci->i_ceph_flags = 0;
fdd4e158
YZ
405 atomic64_set(&ci->i_ordered_count, 1);
406 atomic64_set(&ci->i_release_count, 1);
407 atomic64_set(&ci->i_complete_seq[0], 0);
408 atomic64_set(&ci->i_complete_seq[1], 0);
355da1eb
SW
409 ci->i_symlink = NULL;
410
6c0f3af7 411 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
5ea5c5e0 412 ci->i_pool_ns_len = 0;
6c0f3af7 413
355da1eb
SW
414 ci->i_fragtree = RB_ROOT;
415 mutex_init(&ci->i_fragtree_mutex);
416
417 ci->i_xattrs.blob = NULL;
418 ci->i_xattrs.prealloc_blob = NULL;
419 ci->i_xattrs.dirty = false;
420 ci->i_xattrs.index = RB_ROOT;
421 ci->i_xattrs.count = 0;
422 ci->i_xattrs.names_size = 0;
423 ci->i_xattrs.vals_size = 0;
424 ci->i_xattrs.version = 0;
425 ci->i_xattrs.index_version = 0;
426
427 ci->i_caps = RB_ROOT;
428 ci->i_auth_cap = NULL;
429 ci->i_dirty_caps = 0;
430 ci->i_flushing_caps = 0;
431 INIT_LIST_HEAD(&ci->i_dirty_item);
432 INIT_LIST_HEAD(&ci->i_flushing_item);
f66fd9f0 433 ci->i_prealloc_cap_flush = NULL;
553adfd9 434 ci->i_cap_flush_tree = RB_ROOT;
355da1eb
SW
435 init_waitqueue_head(&ci->i_cap_wq);
436 ci->i_hold_caps_min = 0;
437 ci->i_hold_caps_max = 0;
438 INIT_LIST_HEAD(&ci->i_cap_delay_list);
355da1eb
SW
439 INIT_LIST_HEAD(&ci->i_cap_snaps);
440 ci->i_head_snapc = NULL;
441 ci->i_snap_caps = 0;
442
443 for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
444 ci->i_nr_by_mode[i] = 0;
445
b0d7c223 446 mutex_init(&ci->i_truncate_mutex);
355da1eb
SW
447 ci->i_truncate_seq = 0;
448 ci->i_truncate_size = 0;
449 ci->i_truncate_pending = 0;
450
451 ci->i_max_size = 0;
452 ci->i_reported_size = 0;
453 ci->i_wanted_max_size = 0;
454 ci->i_requested_max_size = 0;
455
456 ci->i_pin_ref = 0;
457 ci->i_rd_ref = 0;
458 ci->i_rdcache_ref = 0;
459 ci->i_wr_ref = 0;
d3d0720d 460 ci->i_wb_ref = 0;
355da1eb
SW
461 ci->i_wrbuffer_ref = 0;
462 ci->i_wrbuffer_ref_head = 0;
463 ci->i_shared_gen = 0;
464 ci->i_rdcache_gen = 0;
465 ci->i_rdcache_revoking = 0;
466
467 INIT_LIST_HEAD(&ci->i_unsafe_writes);
468 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
68cd5b4b 469 INIT_LIST_HEAD(&ci->i_unsafe_iops);
355da1eb
SW
470 spin_lock_init(&ci->i_unsafe_lock);
471
472 ci->i_snap_realm = NULL;
473 INIT_LIST_HEAD(&ci->i_snap_realm_item);
474 INIT_LIST_HEAD(&ci->i_snap_flush_item);
475
3c6f6b79
SW
476 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
477 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
355da1eb
SW
478
479 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
480
99ccbd22
MT
481 ceph_fscache_inode_init(ci);
482
355da1eb
SW
483 return &ci->vfs_inode;
484}
485
fa0d7e3d
NP
486static void ceph_i_callback(struct rcu_head *head)
487{
488 struct inode *inode = container_of(head, struct inode, i_rcu);
489 struct ceph_inode_info *ci = ceph_inode(inode);
490
fa0d7e3d
NP
491 kmem_cache_free(ceph_inode_cachep, ci);
492}
493
355da1eb
SW
494void ceph_destroy_inode(struct inode *inode)
495{
496 struct ceph_inode_info *ci = ceph_inode(inode);
497 struct ceph_inode_frag *frag;
498 struct rb_node *n;
499
500 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
501
99ccbd22
MT
502 ceph_fscache_unregister_inode_cookie(ci);
503
355da1eb
SW
504 ceph_queue_caps_release(inode);
505
8b218b8a
SW
506 /*
507 * we may still have a snap_realm reference if there are stray
d9df2783 508 * caps in i_snap_caps.
8b218b8a
SW
509 */
510 if (ci->i_snap_realm) {
511 struct ceph_mds_client *mdsc =
3d14c5d2 512 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
8b218b8a
SW
513 struct ceph_snap_realm *realm = ci->i_snap_realm;
514
515 dout(" dropping residual ref to snap realm %p\n", realm);
516 spin_lock(&realm->inodes_with_caps_lock);
517 list_del_init(&ci->i_snap_realm_item);
518 spin_unlock(&realm->inodes_with_caps_lock);
519 ceph_put_snap_realm(mdsc, realm);
520 }
521
355da1eb
SW
522 kfree(ci->i_symlink);
523 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
524 frag = rb_entry(n, struct ceph_inode_frag, node);
525 rb_erase(n, &ci->i_fragtree);
526 kfree(frag);
527 }
528
529 __ceph_destroy_xattrs(ci);
b6c1d5b8
SW
530 if (ci->i_xattrs.blob)
531 ceph_buffer_put(ci->i_xattrs.blob);
532 if (ci->i_xattrs.prealloc_blob)
533 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
355da1eb 534
fa0d7e3d 535 call_rcu(&inode->i_rcu, ceph_i_callback);
355da1eb
SW
536}
537
9f12bd11
YZ
538int ceph_drop_inode(struct inode *inode)
539{
540 /*
541 * Positve dentry and corresponding inode are always accompanied
542 * in MDS reply. So no need to keep inode in the cache after
543 * dropping all its aliases.
544 */
545 return 1;
546}
547
355da1eb
SW
548/*
549 * Helpers to fill in size, ctime, mtime, and atime. We have to be
550 * careful because either the client or MDS may have more up to date
551 * info, depending on which capabilities are held, and whether
552 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
553 * and size are monotonically increasing, except when utimes() or
554 * truncate() increments the corresponding _seq values.)
555 */
556int ceph_fill_file_size(struct inode *inode, int issued,
557 u32 truncate_seq, u64 truncate_size, u64 size)
558{
559 struct ceph_inode_info *ci = ceph_inode(inode);
560 int queue_trunc = 0;
561
562 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
563 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
564 dout("size %lld -> %llu\n", inode->i_size, size);
a3d714c3
YZ
565 if (size > 0 && S_ISDIR(inode->i_mode)) {
566 pr_err("fill_file_size non-zero size for directory\n");
567 size = 0;
568 }
99c88e69 569 i_size_write(inode, size);
355da1eb
SW
570 inode->i_blocks = (size + (1<<9) - 1) >> 9;
571 ci->i_reported_size = size;
572 if (truncate_seq != ci->i_truncate_seq) {
573 dout("truncate_seq %u -> %u\n",
574 ci->i_truncate_seq, truncate_seq);
575 ci->i_truncate_seq = truncate_seq;
b0d7c223
YZ
576
577 /* the MDS should have revoked these caps */
578 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
579 CEPH_CAP_FILE_RD |
580 CEPH_CAP_FILE_WR |
581 CEPH_CAP_FILE_LAZYIO));
3d497d85
YS
582 /*
583 * If we hold relevant caps, or in the case where we're
584 * not the only client referencing this file and we
585 * don't hold those caps, then we need to check whether
586 * the file is either opened or mmaped
587 */
b0d7c223
YZ
588 if ((issued & (CEPH_CAP_FILE_CACHE|
589 CEPH_CAP_FILE_BUFFER)) ||
3d497d85
YS
590 mapping_mapped(inode->i_mapping) ||
591 __ceph_caps_file_wanted(ci)) {
355da1eb
SW
592 ci->i_truncate_pending++;
593 queue_trunc = 1;
594 }
595 }
596 }
597 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
598 ci->i_truncate_size != truncate_size) {
599 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
600 truncate_size);
601 ci->i_truncate_size = truncate_size;
602 }
99ccbd22
MT
603
604 if (queue_trunc)
605 ceph_fscache_invalidate(inode);
606
355da1eb
SW
607 return queue_trunc;
608}
609
610void ceph_fill_file_time(struct inode *inode, int issued,
611 u64 time_warp_seq, struct timespec *ctime,
612 struct timespec *mtime, struct timespec *atime)
613{
614 struct ceph_inode_info *ci = ceph_inode(inode);
615 int warn = 0;
616
617 if (issued & (CEPH_CAP_FILE_EXCL|
618 CEPH_CAP_FILE_WR|
d8672d64
SW
619 CEPH_CAP_FILE_BUFFER|
620 CEPH_CAP_AUTH_EXCL|
621 CEPH_CAP_XATTR_EXCL)) {
355da1eb
SW
622 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
623 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
624 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
625 ctime->tv_sec, ctime->tv_nsec);
626 inode->i_ctime = *ctime;
627 }
628 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
629 /* the MDS did a utimes() */
630 dout("mtime %ld.%09ld -> %ld.%09ld "
631 "tw %d -> %d\n",
632 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
633 mtime->tv_sec, mtime->tv_nsec,
634 ci->i_time_warp_seq, (int)time_warp_seq);
635
636 inode->i_mtime = *mtime;
637 inode->i_atime = *atime;
638 ci->i_time_warp_seq = time_warp_seq;
639 } else if (time_warp_seq == ci->i_time_warp_seq) {
640 /* nobody did utimes(); take the max */
641 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
642 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
643 inode->i_mtime.tv_sec,
644 inode->i_mtime.tv_nsec,
645 mtime->tv_sec, mtime->tv_nsec);
646 inode->i_mtime = *mtime;
647 }
648 if (timespec_compare(atime, &inode->i_atime) > 0) {
649 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
650 inode->i_atime.tv_sec,
651 inode->i_atime.tv_nsec,
652 atime->tv_sec, atime->tv_nsec);
653 inode->i_atime = *atime;
654 }
655 } else if (issued & CEPH_CAP_FILE_EXCL) {
656 /* we did a utimes(); ignore mds values */
657 } else {
658 warn = 1;
659 }
660 } else {
d8672d64 661 /* we have no write|excl caps; whatever the MDS says is true */
355da1eb
SW
662 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
663 inode->i_ctime = *ctime;
664 inode->i_mtime = *mtime;
665 inode->i_atime = *atime;
666 ci->i_time_warp_seq = time_warp_seq;
667 } else {
668 warn = 1;
669 }
670 }
671 if (warn) /* time_warp_seq shouldn't go backwards */
672 dout("%p mds time_warp_seq %llu < %u\n",
673 inode, time_warp_seq, ci->i_time_warp_seq);
674}
675
676/*
677 * Populate an inode based on info from mds. May be called on new or
678 * existing inodes.
679 */
01deead0 680static int fill_inode(struct inode *inode, struct page *locked_page,
355da1eb
SW
681 struct ceph_mds_reply_info_in *iinfo,
682 struct ceph_mds_reply_dirfrag *dirinfo,
683 struct ceph_mds_session *session,
684 unsigned long ttl_from, int cap_fmode,
685 struct ceph_cap_reservation *caps_reservation)
686{
d9df2783 687 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
355da1eb
SW
688 struct ceph_mds_reply_inode *info = iinfo->in;
689 struct ceph_inode_info *ci = ceph_inode(inode);
f98a128a 690 int issued = 0, implemented, new_issued;
355da1eb 691 struct timespec mtime, atime, ctime;
355da1eb 692 struct ceph_buffer *xattr_blob = NULL;
d9df2783 693 struct ceph_cap *new_cap = NULL;
355da1eb 694 int err = 0;
d9df2783 695 bool wake = false;
f98a128a
YZ
696 bool queue_trunc = false;
697 bool new_version = false;
31c542a1 698 bool fill_inline = false;
355da1eb
SW
699
700 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
701 inode, ceph_vinop(inode), le64_to_cpu(info->version),
702 ci->i_version);
703
d9df2783
YZ
704 /* prealloc new cap struct */
705 if (info->cap.caps && ceph_snap(inode) == CEPH_NOSNAP)
706 new_cap = ceph_get_cap(mdsc, caps_reservation);
707
355da1eb
SW
708 /*
709 * prealloc xattr data, if it looks like we'll need it. only
710 * if len > 4 (meaning there are actually xattrs; the first 4
711 * bytes are the xattr count).
712 */
713 if (iinfo->xattr_len > 4) {
b6c1d5b8 714 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
355da1eb
SW
715 if (!xattr_blob)
716 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
717 iinfo->xattr_len);
718 }
719
be655596 720 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
721
722 /*
723 * provided version will be odd if inode value is projected,
8bd59e01
SW
724 * even if stable. skip the update if we have newer stable
725 * info (ours>=theirs, e.g. due to racing mds replies), unless
726 * we are getting projected (unstable) info (in which case the
727 * version is odd, and we want ours>theirs).
728 * us them
729 * 2 2 skip
730 * 3 2 skip
731 * 3 3 update
355da1eb 732 */
f98a128a
YZ
733 if (ci->i_version == 0 ||
734 ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
735 le64_to_cpu(info->version) > (ci->i_version & ~1)))
736 new_version = true;
737
355da1eb
SW
738 issued = __ceph_caps_issued(ci, &implemented);
739 issued |= implemented | __ceph_caps_dirty(ci);
f98a128a 740 new_issued = ~issued & le32_to_cpu(info->cap.caps);
355da1eb
SW
741
742 /* update inode */
743 ci->i_version = le64_to_cpu(info->version);
744 inode->i_version++;
745 inode->i_rdev = le32_to_cpu(info->rdev);
f98a128a 746 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
355da1eb 747
f98a128a
YZ
748 if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
749 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
355da1eb 750 inode->i_mode = le32_to_cpu(info->mode);
ab871b90
EB
751 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
752 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
355da1eb 753 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
bd2bae6a
EB
754 from_kuid(&init_user_ns, inode->i_uid),
755 from_kgid(&init_user_ns, inode->i_gid));
355da1eb
SW
756 }
757
f98a128a
YZ
758 if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
759 (issued & CEPH_CAP_LINK_EXCL) == 0)
bfe86848 760 set_nlink(inode, le32_to_cpu(info->nlink));
355da1eb 761
f98a128a
YZ
762 if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
763 /* be careful with mtime, atime, size */
764 ceph_decode_timespec(&atime, &info->atime);
765 ceph_decode_timespec(&mtime, &info->mtime);
766 ceph_decode_timespec(&ctime, &info->ctime);
767 ceph_fill_file_time(inode, issued,
768 le32_to_cpu(info->time_warp_seq),
769 &ctime, &mtime, &atime);
770 }
771
772 if (new_version ||
773 (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
10183a69
YZ
774 if (ci->i_layout.fl_pg_pool != info->layout.fl_pg_pool)
775 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
f98a128a 776 ci->i_layout = info->layout;
5ea5c5e0 777 ci->i_pool_ns_len = iinfo->pool_ns_len;
10183a69 778
f98a128a
YZ
779 queue_trunc = ceph_fill_file_size(inode, issued,
780 le32_to_cpu(info->truncate_seq),
781 le64_to_cpu(info->truncate_size),
782 le64_to_cpu(info->size));
783 /* only update max_size on auth cap */
784 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
785 ci->i_max_size != le64_to_cpu(info->max_size)) {
786 dout("max_size %lld -> %llu\n", ci->i_max_size,
787 le64_to_cpu(info->max_size));
788 ci->i_max_size = le64_to_cpu(info->max_size);
789 }
790 }
355da1eb
SW
791
792 /* xattrs */
793 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
508b32d8 794 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
355da1eb
SW
795 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
796 if (ci->i_xattrs.blob)
797 ceph_buffer_put(ci->i_xattrs.blob);
798 ci->i_xattrs.blob = xattr_blob;
799 if (xattr_blob)
800 memcpy(ci->i_xattrs.blob->vec.iov_base,
801 iinfo->xattr_data, iinfo->xattr_len);
802 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
7221fe4c 803 ceph_forget_all_cached_acls(inode);
a6424e48 804 xattr_blob = NULL;
355da1eb
SW
805 }
806
807 inode->i_mapping->a_ops = &ceph_aops;
355da1eb
SW
808
809 switch (inode->i_mode & S_IFMT) {
810 case S_IFIFO:
811 case S_IFBLK:
812 case S_IFCHR:
813 case S_IFSOCK:
814 init_special_inode(inode, inode->i_mode, inode->i_rdev);
815 inode->i_op = &ceph_file_iops;
816 break;
817 case S_IFREG:
818 inode->i_op = &ceph_file_iops;
819 inode->i_fop = &ceph_file_fops;
820 break;
821 case S_IFLNK:
822 inode->i_op = &ceph_symlink_iops;
823 if (!ci->i_symlink) {
810339ec 824 u32 symlen = iinfo->symlink_len;
355da1eb
SW
825 char *sym;
826
be655596 827 spin_unlock(&ci->i_ceph_lock);
355da1eb 828
810339ec 829 err = -EINVAL;
99c88e69 830 if (WARN_ON(symlen != i_size_read(inode)))
810339ec
XW
831 goto out;
832
355da1eb 833 err = -ENOMEM;
810339ec 834 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
355da1eb
SW
835 if (!sym)
836 goto out;
355da1eb 837
be655596 838 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
839 if (!ci->i_symlink)
840 ci->i_symlink = sym;
841 else
842 kfree(sym); /* lost a race */
843 }
ac194dcc 844 inode->i_link = ci->i_symlink;
355da1eb
SW
845 break;
846 case S_IFDIR:
847 inode->i_op = &ceph_dir_iops;
848 inode->i_fop = &ceph_dir_fops;
849
14303d20
SW
850 ci->i_dir_layout = iinfo->dir_layout;
851
355da1eb
SW
852 ci->i_files = le64_to_cpu(info->files);
853 ci->i_subdirs = le64_to_cpu(info->subdirs);
854 ci->i_rbytes = le64_to_cpu(info->rbytes);
855 ci->i_rfiles = le64_to_cpu(info->rfiles);
856 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
857 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
355da1eb
SW
858 break;
859 default:
860 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
861 ceph_vinop(inode), inode->i_mode);
862 }
863
355da1eb
SW
864 /* were we issued a capability? */
865 if (info->cap.caps) {
866 if (ceph_snap(inode) == CEPH_NOSNAP) {
2f92b3d0 867 unsigned caps = le32_to_cpu(info->cap.caps);
355da1eb
SW
868 ceph_add_cap(inode, session,
869 le64_to_cpu(info->cap.cap_id),
2f92b3d0 870 cap_fmode, caps,
355da1eb
SW
871 le32_to_cpu(info->cap.wanted),
872 le32_to_cpu(info->cap.seq),
873 le32_to_cpu(info->cap.mseq),
874 le64_to_cpu(info->cap.realm),
d9df2783 875 info->cap.flags, &new_cap);
2f92b3d0
YZ
876
877 /* set dir completion flag? */
878 if (S_ISDIR(inode->i_mode) &&
879 ci->i_files == 0 && ci->i_subdirs == 0 &&
880 (caps & CEPH_CAP_FILE_SHARED) &&
881 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
882 !__ceph_dir_is_complete(ci)) {
883 dout(" marking %p complete (empty)\n", inode);
fdd4e158 884 i_size_write(inode, 0);
2f92b3d0 885 __ceph_dir_set_complete(ci,
fdd4e158
YZ
886 atomic64_read(&ci->i_release_count),
887 atomic64_read(&ci->i_ordered_count));
2f92b3d0
YZ
888 }
889
d9df2783 890 wake = true;
355da1eb 891 } else {
355da1eb
SW
892 dout(" %p got snap_caps %s\n", inode,
893 ceph_cap_string(le32_to_cpu(info->cap.caps)));
894 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
895 if (cap_fmode >= 0)
896 __ceph_get_fmode(ci, cap_fmode);
355da1eb 897 }
04d000eb 898 } else if (cap_fmode >= 0) {
f3ae1b97 899 pr_warn("mds issued no caps on %llx.%llx\n",
04d000eb
SW
900 ceph_vinop(inode));
901 __ceph_get_fmode(ci, cap_fmode);
355da1eb 902 }
31c542a1
YZ
903
904 if (iinfo->inline_version > 0 &&
905 iinfo->inline_version >= ci->i_inline_version) {
906 int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
907 ci->i_inline_version = iinfo->inline_version;
908 if (ci->i_inline_version != CEPH_INLINE_NONE &&
01deead0
YZ
909 (locked_page ||
910 (le32_to_cpu(info->cap.caps) & cache_caps)))
31c542a1
YZ
911 fill_inline = true;
912 }
913
be655596 914 spin_unlock(&ci->i_ceph_lock);
355da1eb 915
31c542a1 916 if (fill_inline)
01deead0 917 ceph_fill_inline_data(inode, locked_page,
31c542a1
YZ
918 iinfo->inline_data, iinfo->inline_len);
919
d9df2783
YZ
920 if (wake)
921 wake_up_all(&ci->i_cap_wq);
922
355da1eb
SW
923 /* queue truncate if we saw i_size decrease */
924 if (queue_trunc)
3c6f6b79 925 ceph_queue_vmtruncate(inode);
355da1eb
SW
926
927 /* populate frag tree */
3e7fbe9c
YZ
928 if (S_ISDIR(inode->i_mode))
929 ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
355da1eb
SW
930
931 /* update delegation info? */
932 if (dirinfo)
933 ceph_fill_dirfrag(inode, dirinfo);
934
935 err = 0;
355da1eb 936out:
d9df2783
YZ
937 if (new_cap)
938 ceph_put_cap(mdsc, new_cap);
b6c1d5b8
SW
939 if (xattr_blob)
940 ceph_buffer_put(xattr_blob);
355da1eb
SW
941 return err;
942}
943
944/*
945 * caller should hold session s_mutex.
946 */
947static void update_dentry_lease(struct dentry *dentry,
948 struct ceph_mds_reply_lease *lease,
949 struct ceph_mds_session *session,
950 unsigned long from_time)
951{
952 struct ceph_dentry_info *di = ceph_dentry(dentry);
953 long unsigned duration = le32_to_cpu(lease->duration_ms);
954 long unsigned ttl = from_time + (duration * HZ) / 1000;
955 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
956 struct inode *dir;
957
958 /* only track leases on regular dentries */
959 if (dentry->d_op != &ceph_dentry_ops)
960 return;
961
962 spin_lock(&dentry->d_lock);
2f90b852
SW
963 dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
964 dentry, duration, ttl);
355da1eb
SW
965
966 /* make lease_rdcache_gen match directory */
2b0143b5 967 dir = d_inode(dentry->d_parent);
355da1eb
SW
968 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
969
2f90b852 970 if (duration == 0)
355da1eb
SW
971 goto out_unlock;
972
973 if (di->lease_gen == session->s_cap_gen &&
974 time_before(ttl, dentry->d_time))
975 goto out_unlock; /* we already have a newer lease. */
976
977 if (di->lease_session && di->lease_session != session)
978 goto out_unlock;
979
980 ceph_dentry_lru_touch(dentry);
981
982 if (!di->lease_session)
983 di->lease_session = ceph_get_mds_session(session);
984 di->lease_gen = session->s_cap_gen;
985 di->lease_seq = le32_to_cpu(lease->seq);
986 di->lease_renew_after = half_ttl;
987 di->lease_renew_from = 0;
988 dentry->d_time = ttl;
989out_unlock:
990 spin_unlock(&dentry->d_lock);
991 return;
992}
993
994/*
995 * splice a dentry to an inode.
996 * caller must hold directory i_mutex for this to be safe.
355da1eb 997 */
f7380af0 998static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
355da1eb
SW
999{
1000 struct dentry *realdn;
1001
2b0143b5 1002 BUG_ON(d_inode(dn));
1cd3935b 1003
355da1eb
SW
1004 /* dn must be unhashed */
1005 if (!d_unhashed(dn))
1006 d_drop(dn);
41d28bca 1007 realdn = d_splice_alias(in, dn);
355da1eb 1008 if (IS_ERR(realdn)) {
d69ed05a
SW
1009 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1010 PTR_ERR(realdn), dn, in, ceph_vinop(in));
355da1eb
SW
1011 dn = realdn; /* note realdn contains the error */
1012 goto out;
1013 } else if (realdn) {
1014 dout("dn %p (%d) spliced with %p (%d) "
1015 "inode %p ino %llx.%llx\n",
84d08fa8
AV
1016 dn, d_count(dn),
1017 realdn, d_count(realdn),
2b0143b5 1018 d_inode(realdn), ceph_vinop(d_inode(realdn)));
355da1eb
SW
1019 dput(dn);
1020 dn = realdn;
1021 } else {
1022 BUG_ON(!ceph_dentry(dn));
355da1eb 1023 dout("dn %p attached to %p ino %llx.%llx\n",
2b0143b5 1024 dn, d_inode(dn), ceph_vinop(d_inode(dn)));
355da1eb 1025 }
355da1eb
SW
1026out:
1027 return dn;
1028}
1029
1030/*
1031 * Incorporate results into the local cache. This is either just
1032 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1033 * after a lookup).
1034 *
1035 * A reply may contain
1036 * a directory inode along with a dentry.
1037 * and/or a target inode
1038 *
1039 * Called with snap_rwsem (read).
1040 */
1041int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1042 struct ceph_mds_session *session)
1043{
1044 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1045 struct inode *in = NULL;
355da1eb 1046 struct ceph_vino vino;
3d14c5d2 1047 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
355da1eb
SW
1048 int err = 0;
1049
1050 dout("fill_trace %p is_dentry %d is_target %d\n", req,
1051 rinfo->head->is_dentry, rinfo->head->is_target);
1052
1053#if 0
1054 /*
1055 * Debugging hook:
1056 *
1057 * If we resend completed ops to a recovering mds, we get no
1058 * trace. Since that is very rare, pretend this is the case
1059 * to ensure the 'no trace' handlers in the callers behave.
1060 *
1061 * Fill in inodes unconditionally to avoid breaking cap
1062 * invariants.
1063 */
1064 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
1065 pr_info("fill_trace faking empty trace on %lld %s\n",
1066 req->r_tid, ceph_mds_op_name(rinfo->head->op));
1067 if (rinfo->head->is_dentry) {
1068 rinfo->head->is_dentry = 0;
1069 err = fill_inode(req->r_locked_dir,
1070 &rinfo->diri, rinfo->dirfrag,
1071 session, req->r_request_started, -1);
1072 }
1073 if (rinfo->head->is_target) {
1074 rinfo->head->is_target = 0;
1075 ininfo = rinfo->targeti.in;
1076 vino.ino = le64_to_cpu(ininfo->ino);
1077 vino.snap = le64_to_cpu(ininfo->snapid);
1078 in = ceph_get_inode(sb, vino);
1079 err = fill_inode(in, &rinfo->targeti, NULL,
1080 session, req->r_request_started,
1081 req->r_fmode);
1082 iput(in);
1083 }
1084 }
1085#endif
1086
1087 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1088 dout("fill_trace reply is empty!\n");
167c9e35
SW
1089 if (rinfo->head->result == 0 && req->r_locked_dir)
1090 ceph_invalidate_dir_request(req);
355da1eb
SW
1091 return 0;
1092 }
1093
1094 if (rinfo->head->is_dentry) {
5b1daecd
SW
1095 struct inode *dir = req->r_locked_dir;
1096
6c5e50fa 1097 if (dir) {
01deead0
YZ
1098 err = fill_inode(dir, NULL,
1099 &rinfo->diri, rinfo->dirfrag,
6c5e50fa
SW
1100 session, req->r_request_started, -1,
1101 &req->r_caps_reservation);
1102 if (err < 0)
19913b4e 1103 goto done;
6c5e50fa
SW
1104 } else {
1105 WARN_ON_ONCE(1);
1106 }
19913b4e
YZ
1107
1108 if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) {
1109 struct qstr dname;
1110 struct dentry *dn, *parent;
1111
1112 BUG_ON(!rinfo->head->is_target);
1113 BUG_ON(req->r_dentry);
1114
1115 parent = d_find_any_alias(dir);
1116 BUG_ON(!parent);
1117
1118 dname.name = rinfo->dname;
1119 dname.len = rinfo->dname_len;
1120 dname.hash = full_name_hash(dname.name, dname.len);
1121 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1122 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1123retry_lookup:
1124 dn = d_lookup(parent, &dname);
1125 dout("d_lookup on parent=%p name=%.*s got %p\n",
1126 parent, dname.len, dname.name, dn);
1127
1128 if (!dn) {
1129 dn = d_alloc(parent, &dname);
1130 dout("d_alloc %p '%.*s' = %p\n", parent,
1131 dname.len, dname.name, dn);
1132 if (dn == NULL) {
1133 dput(parent);
1134 err = -ENOMEM;
1135 goto done;
1136 }
1137 err = ceph_init_dentry(dn);
1138 if (err < 0) {
1139 dput(dn);
1140 dput(parent);
1141 goto done;
1142 }
2b0143b5
DH
1143 } else if (d_really_is_positive(dn) &&
1144 (ceph_ino(d_inode(dn)) != vino.ino ||
1145 ceph_snap(d_inode(dn)) != vino.snap)) {
19913b4e 1146 dout(" dn %p points to wrong inode %p\n",
2b0143b5 1147 dn, d_inode(dn));
19913b4e
YZ
1148 d_delete(dn);
1149 dput(dn);
1150 goto retry_lookup;
1151 }
1152
1153 req->r_dentry = dn;
1154 dput(parent);
1155 }
5b1daecd
SW
1156 }
1157
86b58d13
YZ
1158 if (rinfo->head->is_target) {
1159 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1160 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1161
1162 in = ceph_get_inode(sb, vino);
1163 if (IS_ERR(in)) {
1164 err = PTR_ERR(in);
1165 goto done;
1166 }
1167 req->r_target_inode = in;
1168
01deead0 1169 err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
86b58d13 1170 session, req->r_request_started,
48193012 1171 (!req->r_aborted && rinfo->head->result == 0) ?
86b58d13
YZ
1172 req->r_fmode : -1,
1173 &req->r_caps_reservation);
1174 if (err < 0) {
1175 pr_err("fill_inode badness %p %llx.%llx\n",
1176 in, ceph_vinop(in));
1177 goto done;
1178 }
1179 }
1180
9358c6d4
SW
1181 /*
1182 * ignore null lease/binding on snapdir ENOENT, or else we
1183 * will have trouble splicing in the virtual snapdir later
1184 */
1185 if (rinfo->head->is_dentry && !req->r_aborted &&
6c5e50fa 1186 req->r_locked_dir &&
9358c6d4 1187 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
3d14c5d2 1188 fsc->mount_options->snapdir_name,
9358c6d4 1189 req->r_dentry->d_name.len))) {
355da1eb
SW
1190 /*
1191 * lookup link rename : null -> possibly existing inode
1192 * mknod symlink mkdir : null -> new inode
1193 * unlink : linked -> null
1194 */
1195 struct inode *dir = req->r_locked_dir;
1196 struct dentry *dn = req->r_dentry;
1197 bool have_dir_cap, have_lease;
1198
1199 BUG_ON(!dn);
1200 BUG_ON(!dir);
2b0143b5 1201 BUG_ON(d_inode(dn->d_parent) != dir);
355da1eb
SW
1202 BUG_ON(ceph_ino(dir) !=
1203 le64_to_cpu(rinfo->diri.in->ino));
1204 BUG_ON(ceph_snap(dir) !=
1205 le64_to_cpu(rinfo->diri.in->snapid));
1206
355da1eb
SW
1207 /* do we have a lease on the whole dir? */
1208 have_dir_cap =
1209 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1210 CEPH_CAP_FILE_SHARED);
1211
1212 /* do we have a dn lease? */
1213 have_lease = have_dir_cap ||
2f90b852 1214 le32_to_cpu(rinfo->dlease->duration_ms);
355da1eb
SW
1215 if (!have_lease)
1216 dout("fill_trace no dentry lease or dir cap\n");
1217
1218 /* rename? */
1219 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
0a8a70f9
YZ
1220 struct inode *olddir = req->r_old_dentry_dir;
1221 BUG_ON(!olddir);
1222
a455589f 1223 dout(" src %p '%pd' dst %p '%pd'\n",
355da1eb 1224 req->r_old_dentry,
a455589f
AV
1225 req->r_old_dentry,
1226 dn, dn);
355da1eb
SW
1227 dout("fill_trace doing d_move %p -> %p\n",
1228 req->r_old_dentry, dn);
c10f5e12 1229
fdd4e158
YZ
1230 /* d_move screws up sibling dentries' offsets */
1231 ceph_dir_clear_ordered(dir);
1232 ceph_dir_clear_ordered(olddir);
1233
355da1eb 1234 d_move(req->r_old_dentry, dn);
a455589f
AV
1235 dout(" src %p '%pd' dst %p '%pd'\n",
1236 req->r_old_dentry,
355da1eb 1237 req->r_old_dentry,
a455589f 1238 dn, dn);
81a6cf2d 1239
c4a29f26
SW
1240 /* ensure target dentry is invalidated, despite
1241 rehashing bug in vfs_rename_dir */
81a6cf2d
SW
1242 ceph_invalidate_dentry_lease(dn);
1243
99ccbd22 1244 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1cd3935b 1245 ceph_dentry(req->r_old_dentry)->offset);
81a6cf2d 1246
355da1eb 1247 dn = req->r_old_dentry; /* use old_dentry */
355da1eb
SW
1248 }
1249
1250 /* null dentry? */
1251 if (!rinfo->head->is_target) {
1252 dout("fill_trace null dentry\n");
2b0143b5 1253 if (d_really_is_positive(dn)) {
70db4f36 1254 ceph_dir_clear_ordered(dir);
355da1eb
SW
1255 dout("d_delete %p\n", dn);
1256 d_delete(dn);
1257 } else {
355da1eb 1258 if (have_lease && d_unhashed(dn))
f8b31710 1259 d_add(dn, NULL);
355da1eb
SW
1260 update_dentry_lease(dn, rinfo->dlease,
1261 session,
1262 req->r_request_started);
1263 }
1264 goto done;
1265 }
1266
1267 /* attach proper inode */
2b0143b5 1268 if (d_really_is_negative(dn)) {
70db4f36 1269 ceph_dir_clear_ordered(dir);
86b58d13 1270 ihold(in);
f7380af0 1271 dn = splice_dentry(dn, in);
355da1eb
SW
1272 if (IS_ERR(dn)) {
1273 err = PTR_ERR(dn);
1274 goto done;
1275 }
1276 req->r_dentry = dn; /* may have spliced */
2b0143b5 1277 } else if (d_really_is_positive(dn) && d_inode(dn) != in) {
355da1eb 1278 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
2b0143b5 1279 dn, d_inode(dn), ceph_vinop(d_inode(dn)),
86b58d13 1280 ceph_vinop(in));
200fd27c 1281 d_invalidate(dn);
355da1eb 1282 have_lease = false;
355da1eb
SW
1283 }
1284
1285 if (have_lease)
1286 update_dentry_lease(dn, rinfo->dlease, session,
1287 req->r_request_started);
1288 dout(" final dn %p\n", dn);
86b58d13
YZ
1289 } else if (!req->r_aborted &&
1290 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1291 req->r_op == CEPH_MDS_OP_MKSNAP)) {
355da1eb 1292 struct dentry *dn = req->r_dentry;
0a8a70f9 1293 struct inode *dir = req->r_locked_dir;
355da1eb
SW
1294
1295 /* fill out a snapdir LOOKUPSNAP dentry */
1296 BUG_ON(!dn);
0a8a70f9
YZ
1297 BUG_ON(!dir);
1298 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
355da1eb 1299 dout(" linking snapped dir %p to dn %p\n", in, dn);
70db4f36 1300 ceph_dir_clear_ordered(dir);
86b58d13 1301 ihold(in);
f7380af0 1302 dn = splice_dentry(dn, in);
355da1eb
SW
1303 if (IS_ERR(dn)) {
1304 err = PTR_ERR(dn);
1305 goto done;
1306 }
1307 req->r_dentry = dn; /* may have spliced */
355da1eb 1308 }
355da1eb
SW
1309done:
1310 dout("fill_trace done err=%d\n", err);
1311 return err;
1312}
1313
1314/*
1315 * Prepopulate our cache with readdir results, leases, etc.
1316 */
79f9f99a
SW
1317static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1318 struct ceph_mds_session *session)
1319{
1320 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1321 int i, err = 0;
1322
1323 for (i = 0; i < rinfo->dir_nr; i++) {
2a5beea3 1324 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
79f9f99a
SW
1325 struct ceph_vino vino;
1326 struct inode *in;
1327 int rc;
1328
2a5beea3
YZ
1329 vino.ino = le64_to_cpu(rde->inode.in->ino);
1330 vino.snap = le64_to_cpu(rde->inode.in->snapid);
79f9f99a
SW
1331
1332 in = ceph_get_inode(req->r_dentry->d_sb, vino);
1333 if (IS_ERR(in)) {
1334 err = PTR_ERR(in);
1335 dout("new_inode badness got %d\n", err);
1336 continue;
1337 }
2a5beea3 1338 rc = fill_inode(in, NULL, &rde->inode, NULL, session,
79f9f99a
SW
1339 req->r_request_started, -1,
1340 &req->r_caps_reservation);
1341 if (rc < 0) {
1342 pr_err("fill_inode badness on %p got %d\n", in, rc);
1343 err = rc;
79f9f99a 1344 }
209ae762 1345 iput(in);
79f9f99a
SW
1346 }
1347
1348 return err;
1349}
1350
fdd4e158
YZ
1351void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1352{
1353 if (ctl->page) {
1354 kunmap(ctl->page);
09cbfeaf 1355 put_page(ctl->page);
fdd4e158
YZ
1356 ctl->page = NULL;
1357 }
1358}
1359
1360static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1361 struct ceph_readdir_cache_control *ctl,
1362 struct ceph_mds_request *req)
1363{
1364 struct ceph_inode_info *ci = ceph_inode(dir);
09cbfeaf 1365 unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
fdd4e158
YZ
1366 unsigned idx = ctl->index % nsize;
1367 pgoff_t pgoff = ctl->index / nsize;
1368
1369 if (!ctl->page || pgoff != page_index(ctl->page)) {
1370 ceph_readdir_cache_release(ctl);
af5e5eb5
YZ
1371 if (idx == 0)
1372 ctl->page = grab_cache_page(&dir->i_data, pgoff);
1373 else
1374 ctl->page = find_lock_page(&dir->i_data, pgoff);
fdd4e158
YZ
1375 if (!ctl->page) {
1376 ctl->index = -1;
af5e5eb5 1377 return idx == 0 ? -ENOMEM : 0;
fdd4e158
YZ
1378 }
1379 /* reading/filling the cache are serialized by
1380 * i_mutex, no need to use page lock */
1381 unlock_page(ctl->page);
1382 ctl->dentries = kmap(ctl->page);
af5e5eb5 1383 if (idx == 0)
09cbfeaf 1384 memset(ctl->dentries, 0, PAGE_SIZE);
fdd4e158
YZ
1385 }
1386
1387 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1388 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1389 dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1390 ctl->dentries[idx] = dn;
1391 ctl->index++;
1392 } else {
1393 dout("disable readdir cache\n");
1394 ctl->index = -1;
1395 }
1396 return 0;
1397}
1398
355da1eb
SW
1399int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1400 struct ceph_mds_session *session)
1401{
1402 struct dentry *parent = req->r_dentry;
f3c4ebe6 1403 struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
355da1eb
SW
1404 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1405 struct qstr dname;
1406 struct dentry *dn;
1407 struct inode *in;
315f2408 1408 int err = 0, skipped = 0, ret, i;
355da1eb
SW
1409 struct inode *snapdir = NULL;
1410 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
81c6aea5 1411 u32 frag = le32_to_cpu(rhead->args.readdir.frag);
f3c4ebe6
YZ
1412 u32 last_hash = 0;
1413 u32 fpos_offset;
fdd4e158
YZ
1414 struct ceph_readdir_cache_control cache_ctl = {};
1415
1416 if (req->r_aborted)
1417 return readdir_prepopulate_inodes_only(req, session);
81c6aea5 1418
f3c4ebe6
YZ
1419 if (rinfo->hash_order && req->r_path2) {
1420 last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1421 req->r_path2, strlen(req->r_path2));
1422 last_hash = ceph_frag_value(last_hash);
1423 }
1424
81c6aea5
YZ
1425 if (rinfo->dir_dir &&
1426 le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1427 dout("readdir_prepopulate got new frag %x -> %x\n",
1428 frag, le32_to_cpu(rinfo->dir_dir->frag));
1429 frag = le32_to_cpu(rinfo->dir_dir->frag);
f3c4ebe6
YZ
1430 if (!rinfo->hash_order)
1431 req->r_readdir_offset = 2;
81c6aea5 1432 }
355da1eb
SW
1433
1434 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
2b0143b5 1435 snapdir = ceph_get_snapdir(d_inode(parent));
355da1eb
SW
1436 parent = d_find_alias(snapdir);
1437 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1438 rinfo->dir_nr, parent);
1439 } else {
1440 dout("readdir_prepopulate %d items under dn %p\n",
1441 rinfo->dir_nr, parent);
1442 if (rinfo->dir_dir)
2b0143b5 1443 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
355da1eb
SW
1444 }
1445
fdd4e158
YZ
1446 if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2) {
1447 /* note dir version at start of readdir so we can tell
1448 * if any dentries get dropped */
fdd4e158
YZ
1449 req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
1450 req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count);
1451 req->r_readdir_cache_idx = 0;
1452 }
1453
1454 cache_ctl.index = req->r_readdir_cache_idx;
f3c4ebe6 1455 fpos_offset = req->r_readdir_offset;
fdd4e158 1456
86b58d13 1457 /* FIXME: release caps/leases if error occurs */
355da1eb 1458 for (i = 0; i < rinfo->dir_nr; i++) {
2a5beea3 1459 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
355da1eb
SW
1460 struct ceph_vino vino;
1461
2a5beea3
YZ
1462 dname.name = rde->name;
1463 dname.len = rde->name_len;
355da1eb
SW
1464 dname.hash = full_name_hash(dname.name, dname.len);
1465
2a5beea3
YZ
1466 vino.ino = le64_to_cpu(rde->inode.in->ino);
1467 vino.snap = le64_to_cpu(rde->inode.in->snapid);
355da1eb 1468
f3c4ebe6
YZ
1469 if (rinfo->hash_order) {
1470 u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1471 rde->name, rde->name_len);
1472 hash = ceph_frag_value(hash);
1473 if (hash != last_hash)
1474 fpos_offset = 2;
1475 last_hash = hash;
1476 rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1477 } else {
1478 rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1479 }
1480
355da1eb
SW
1481retry_lookup:
1482 dn = d_lookup(parent, &dname);
1483 dout("d_lookup on parent=%p name=%.*s got %p\n",
1484 parent, dname.len, dname.name, dn);
1485
1486 if (!dn) {
1487 dn = d_alloc(parent, &dname);
1488 dout("d_alloc %p '%.*s' = %p\n", parent,
1489 dname.len, dname.name, dn);
1490 if (dn == NULL) {
1491 dout("d_alloc badness\n");
1492 err = -ENOMEM;
1493 goto out;
1494 }
86b58d13
YZ
1495 ret = ceph_init_dentry(dn);
1496 if (ret < 0) {
8c696737 1497 dput(dn);
86b58d13 1498 err = ret;
355da1eb 1499 goto out;
8c696737 1500 }
2b0143b5
DH
1501 } else if (d_really_is_positive(dn) &&
1502 (ceph_ino(d_inode(dn)) != vino.ino ||
1503 ceph_snap(d_inode(dn)) != vino.snap)) {
355da1eb 1504 dout(" dn %p points to wrong inode %p\n",
2b0143b5 1505 dn, d_inode(dn));
355da1eb
SW
1506 d_delete(dn);
1507 dput(dn);
1508 goto retry_lookup;
355da1eb
SW
1509 }
1510
355da1eb 1511 /* inode */
2b0143b5
DH
1512 if (d_really_is_positive(dn)) {
1513 in = d_inode(dn);
355da1eb
SW
1514 } else {
1515 in = ceph_get_inode(parent->d_sb, vino);
ac1f12ef 1516 if (IS_ERR(in)) {
355da1eb 1517 dout("new_inode badness\n");
2744c171 1518 d_drop(dn);
355da1eb 1519 dput(dn);
ac1f12ef 1520 err = PTR_ERR(in);
355da1eb
SW
1521 goto out;
1522 }
355da1eb
SW
1523 }
1524
2a5beea3 1525 ret = fill_inode(in, NULL, &rde->inode, NULL, session,
fdd4e158
YZ
1526 req->r_request_started, -1,
1527 &req->r_caps_reservation);
1528 if (ret < 0) {
355da1eb 1529 pr_err("fill_inode badness on %p\n", in);
2b0143b5 1530 if (d_really_is_negative(dn))
86b58d13
YZ
1531 iput(in);
1532 d_drop(dn);
fdd4e158 1533 err = ret;
d69ed05a 1534 goto next_item;
355da1eb 1535 }
86b58d13 1536
2b0143b5 1537 if (d_really_is_negative(dn)) {
315f2408
YZ
1538 struct dentry *realdn;
1539
1540 if (ceph_security_xattr_deadlock(in)) {
1541 dout(" skip splicing dn %p to inode %p"
1542 " (security xattr deadlock)\n", dn, in);
1543 iput(in);
1544 skipped++;
1545 goto next_item;
1546 }
1547
1548 realdn = splice_dentry(dn, in);
5cba372c
YZ
1549 if (IS_ERR(realdn)) {
1550 err = PTR_ERR(realdn);
1551 d_drop(dn);
86b58d13
YZ
1552 dn = NULL;
1553 goto next_item;
1554 }
5cba372c 1555 dn = realdn;
86b58d13
YZ
1556 }
1557
f3c4ebe6 1558 ceph_dentry(dn)->offset = rde->offset;
86b58d13 1559
2a5beea3 1560 update_dentry_lease(dn, rde->lease, req->r_session,
86b58d13 1561 req->r_request_started);
fdd4e158 1562
315f2408 1563 if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
fdd4e158
YZ
1564 ret = fill_readdir_cache(d_inode(parent), dn,
1565 &cache_ctl, req);
1566 if (ret < 0)
1567 err = ret;
1568 }
d69ed05a
SW
1569next_item:
1570 if (dn)
1571 dput(dn);
355da1eb 1572 }
355da1eb 1573out:
315f2408 1574 if (err == 0 && skipped == 0) {
fdd4e158
YZ
1575 req->r_did_prepopulate = true;
1576 req->r_readdir_cache_idx = cache_ctl.index;
1577 }
1578 ceph_readdir_cache_release(&cache_ctl);
355da1eb
SW
1579 if (snapdir) {
1580 iput(snapdir);
1581 dput(parent);
1582 }
1583 dout("readdir_prepopulate done\n");
1584 return err;
1585}
1586
1587int ceph_inode_set_size(struct inode *inode, loff_t size)
1588{
1589 struct ceph_inode_info *ci = ceph_inode(inode);
1590 int ret = 0;
1591
be655596 1592 spin_lock(&ci->i_ceph_lock);
355da1eb 1593 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
99c88e69 1594 i_size_write(inode, size);
355da1eb
SW
1595 inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1596
1597 /* tell the MDS if we are approaching max_size */
1598 if ((size << 1) >= ci->i_max_size &&
1599 (ci->i_reported_size << 1) < ci->i_max_size)
1600 ret = 1;
1601
be655596 1602 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
1603 return ret;
1604}
1605
1606/*
1607 * Write back inode data in a worker thread. (This can't be done
1608 * in the message handler context.)
1609 */
3c6f6b79
SW
1610void ceph_queue_writeback(struct inode *inode)
1611{
15a2015f 1612 ihold(inode);
3c6f6b79
SW
1613 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1614 &ceph_inode(inode)->i_wb_work)) {
2c27c9a5 1615 dout("ceph_queue_writeback %p\n", inode);
3c6f6b79 1616 } else {
2c27c9a5 1617 dout("ceph_queue_writeback %p failed\n", inode);
15a2015f 1618 iput(inode);
3c6f6b79
SW
1619 }
1620}
1621
1622static void ceph_writeback_work(struct work_struct *work)
355da1eb
SW
1623{
1624 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1625 i_wb_work);
1626 struct inode *inode = &ci->vfs_inode;
1627
1628 dout("writeback %p\n", inode);
1629 filemap_fdatawrite(&inode->i_data);
1630 iput(inode);
1631}
1632
3c6f6b79
SW
1633/*
1634 * queue an async invalidation
1635 */
1636void ceph_queue_invalidate(struct inode *inode)
1637{
15a2015f 1638 ihold(inode);
3c6f6b79
SW
1639 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1640 &ceph_inode(inode)->i_pg_inv_work)) {
1641 dout("ceph_queue_invalidate %p\n", inode);
3c6f6b79
SW
1642 } else {
1643 dout("ceph_queue_invalidate %p failed\n", inode);
15a2015f 1644 iput(inode);
3c6f6b79
SW
1645 }
1646}
1647
355da1eb
SW
1648/*
1649 * Invalidate inode pages in a worker thread. (This can't be done
1650 * in the message handler context.)
1651 */
3c6f6b79 1652static void ceph_invalidate_work(struct work_struct *work)
355da1eb
SW
1653{
1654 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1655 i_pg_inv_work);
1656 struct inode *inode = &ci->vfs_inode;
6c93df5d 1657 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
355da1eb
SW
1658 u32 orig_gen;
1659 int check = 0;
1660
b0d7c223 1661 mutex_lock(&ci->i_truncate_mutex);
6c93df5d
YZ
1662
1663 if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
1664 pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n",
1665 inode, ceph_ino(inode));
1666 mapping_set_error(inode->i_mapping, -EIO);
1667 truncate_pagecache(inode, 0);
1668 mutex_unlock(&ci->i_truncate_mutex);
1669 goto out;
1670 }
1671
be655596 1672 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1673 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1674 ci->i_rdcache_gen, ci->i_rdcache_revoking);
cd045cb4 1675 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
9563f88c
YZ
1676 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1677 check = 1;
be655596 1678 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1679 mutex_unlock(&ci->i_truncate_mutex);
355da1eb
SW
1680 goto out;
1681 }
1682 orig_gen = ci->i_rdcache_gen;
be655596 1683 spin_unlock(&ci->i_ceph_lock);
355da1eb 1684
4e217b5d 1685 truncate_pagecache(inode, 0);
355da1eb 1686
be655596 1687 spin_lock(&ci->i_ceph_lock);
cd045cb4
SW
1688 if (orig_gen == ci->i_rdcache_gen &&
1689 orig_gen == ci->i_rdcache_revoking) {
355da1eb
SW
1690 dout("invalidate_pages %p gen %d successful\n", inode,
1691 ci->i_rdcache_gen);
cd045cb4 1692 ci->i_rdcache_revoking--;
355da1eb
SW
1693 check = 1;
1694 } else {
cd045cb4
SW
1695 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1696 inode, orig_gen, ci->i_rdcache_gen,
1697 ci->i_rdcache_revoking);
9563f88c
YZ
1698 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1699 check = 1;
355da1eb 1700 }
be655596 1701 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1702 mutex_unlock(&ci->i_truncate_mutex);
9563f88c 1703out:
355da1eb
SW
1704 if (check)
1705 ceph_check_caps(ci, 0, NULL);
355da1eb
SW
1706 iput(inode);
1707}
1708
1709
1710/*
3f99969f 1711 * called by trunc_wq;
355da1eb
SW
1712 *
1713 * We also truncate in a separate thread as well.
1714 */
3c6f6b79 1715static void ceph_vmtruncate_work(struct work_struct *work)
355da1eb
SW
1716{
1717 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1718 i_vmtruncate_work);
1719 struct inode *inode = &ci->vfs_inode;
1720
1721 dout("vmtruncate_work %p\n", inode);
b415bf4f 1722 __ceph_do_pending_vmtruncate(inode);
355da1eb
SW
1723 iput(inode);
1724}
1725
3c6f6b79
SW
1726/*
1727 * Queue an async vmtruncate. If we fail to queue work, we will handle
1728 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1729 */
1730void ceph_queue_vmtruncate(struct inode *inode)
1731{
1732 struct ceph_inode_info *ci = ceph_inode(inode);
1733
15a2015f 1734 ihold(inode);
99ccbd22 1735
640ef79d 1736 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
3c6f6b79
SW
1737 &ci->i_vmtruncate_work)) {
1738 dout("ceph_queue_vmtruncate %p\n", inode);
3c6f6b79
SW
1739 } else {
1740 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1741 inode, ci->i_truncate_pending);
15a2015f 1742 iput(inode);
3c6f6b79
SW
1743 }
1744}
1745
355da1eb 1746/*
355da1eb
SW
1747 * Make sure any pending truncation is applied before doing anything
1748 * that may depend on it.
1749 */
b415bf4f 1750void __ceph_do_pending_vmtruncate(struct inode *inode)
355da1eb
SW
1751{
1752 struct ceph_inode_info *ci = ceph_inode(inode);
1753 u64 to;
a85f50b6 1754 int wrbuffer_refs, finish = 0;
355da1eb 1755
b0d7c223 1756 mutex_lock(&ci->i_truncate_mutex);
355da1eb 1757retry:
be655596 1758 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1759 if (ci->i_truncate_pending == 0) {
1760 dout("__do_pending_vmtruncate %p none pending\n", inode);
be655596 1761 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1762 mutex_unlock(&ci->i_truncate_mutex);
355da1eb
SW
1763 return;
1764 }
1765
1766 /*
1767 * make sure any dirty snapped pages are flushed before we
1768 * possibly truncate them.. so write AND block!
1769 */
1770 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1771 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1772 inode);
be655596 1773 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
1774 filemap_write_and_wait_range(&inode->i_data, 0,
1775 inode->i_sb->s_maxbytes);
1776 goto retry;
1777 }
1778
b0d7c223
YZ
1779 /* there should be no reader or writer */
1780 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1781
355da1eb
SW
1782 to = ci->i_truncate_size;
1783 wrbuffer_refs = ci->i_wrbuffer_ref;
1784 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1785 ci->i_truncate_pending, to);
be655596 1786 spin_unlock(&ci->i_ceph_lock);
355da1eb 1787
4e217b5d 1788 truncate_pagecache(inode, to);
355da1eb 1789
be655596 1790 spin_lock(&ci->i_ceph_lock);
a85f50b6
YZ
1791 if (to == ci->i_truncate_size) {
1792 ci->i_truncate_pending = 0;
1793 finish = 1;
1794 }
be655596 1795 spin_unlock(&ci->i_ceph_lock);
a85f50b6
YZ
1796 if (!finish)
1797 goto retry;
355da1eb 1798
b0d7c223
YZ
1799 mutex_unlock(&ci->i_truncate_mutex);
1800
355da1eb
SW
1801 if (wrbuffer_refs == 0)
1802 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
a85f50b6
YZ
1803
1804 wake_up_all(&ci->i_cap_wq);
355da1eb
SW
1805}
1806
355da1eb
SW
1807/*
1808 * symlinks
1809 */
355da1eb
SW
1810static const struct inode_operations ceph_symlink_iops = {
1811 .readlink = generic_readlink,
6b255391 1812 .get_link = simple_get_link,
0b932672
YZ
1813 .setattr = ceph_setattr,
1814 .getattr = ceph_getattr,
1815 .setxattr = ceph_setxattr,
1816 .getxattr = ceph_getxattr,
1817 .listxattr = ceph_listxattr,
1818 .removexattr = ceph_removexattr,
355da1eb
SW
1819};
1820
1821/*
1822 * setattr
1823 */
1824int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1825{
2b0143b5 1826 struct inode *inode = d_inode(dentry);
355da1eb 1827 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
1828 const unsigned int ia_valid = attr->ia_valid;
1829 struct ceph_mds_request *req;
3d14c5d2 1830 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
f66fd9f0 1831 struct ceph_cap_flush *prealloc_cf;
355da1eb
SW
1832 int issued;
1833 int release = 0, dirtied = 0;
1834 int mask = 0;
1835 int err = 0;
fca65b4a 1836 int inode_dirty_flags = 0;
604d1b02 1837 bool lock_snap_rwsem = false;
355da1eb
SW
1838
1839 if (ceph_snap(inode) != CEPH_NOSNAP)
1840 return -EROFS;
1841
355da1eb
SW
1842 err = inode_change_ok(inode, attr);
1843 if (err != 0)
1844 return err;
1845
f66fd9f0
YZ
1846 prealloc_cf = ceph_alloc_cap_flush();
1847 if (!prealloc_cf)
1848 return -ENOMEM;
1849
355da1eb
SW
1850 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1851 USE_AUTH_MDS);
f66fd9f0
YZ
1852 if (IS_ERR(req)) {
1853 ceph_free_cap_flush(prealloc_cf);
355da1eb 1854 return PTR_ERR(req);
f66fd9f0 1855 }
355da1eb 1856
be655596 1857 spin_lock(&ci->i_ceph_lock);
355da1eb 1858 issued = __ceph_caps_issued(ci, NULL);
604d1b02
YZ
1859
1860 if (!ci->i_head_snapc &&
1861 (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
1862 lock_snap_rwsem = true;
1863 if (!down_read_trylock(&mdsc->snap_rwsem)) {
1864 spin_unlock(&ci->i_ceph_lock);
1865 down_read(&mdsc->snap_rwsem);
1866 spin_lock(&ci->i_ceph_lock);
1867 issued = __ceph_caps_issued(ci, NULL);
1868 }
1869 }
1870
355da1eb
SW
1871 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1872
1873 if (ia_valid & ATTR_UID) {
1874 dout("setattr %p uid %d -> %d\n", inode,
bd2bae6a
EB
1875 from_kuid(&init_user_ns, inode->i_uid),
1876 from_kuid(&init_user_ns, attr->ia_uid));
355da1eb
SW
1877 if (issued & CEPH_CAP_AUTH_EXCL) {
1878 inode->i_uid = attr->ia_uid;
1879 dirtied |= CEPH_CAP_AUTH_EXCL;
1880 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
ab871b90
EB
1881 !uid_eq(attr->ia_uid, inode->i_uid)) {
1882 req->r_args.setattr.uid = cpu_to_le32(
1883 from_kuid(&init_user_ns, attr->ia_uid));
355da1eb
SW
1884 mask |= CEPH_SETATTR_UID;
1885 release |= CEPH_CAP_AUTH_SHARED;
1886 }
1887 }
1888 if (ia_valid & ATTR_GID) {
1889 dout("setattr %p gid %d -> %d\n", inode,
bd2bae6a
EB
1890 from_kgid(&init_user_ns, inode->i_gid),
1891 from_kgid(&init_user_ns, attr->ia_gid));
355da1eb
SW
1892 if (issued & CEPH_CAP_AUTH_EXCL) {
1893 inode->i_gid = attr->ia_gid;
1894 dirtied |= CEPH_CAP_AUTH_EXCL;
1895 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
ab871b90
EB
1896 !gid_eq(attr->ia_gid, inode->i_gid)) {
1897 req->r_args.setattr.gid = cpu_to_le32(
1898 from_kgid(&init_user_ns, attr->ia_gid));
355da1eb
SW
1899 mask |= CEPH_SETATTR_GID;
1900 release |= CEPH_CAP_AUTH_SHARED;
1901 }
1902 }
1903 if (ia_valid & ATTR_MODE) {
1904 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1905 attr->ia_mode);
1906 if (issued & CEPH_CAP_AUTH_EXCL) {
1907 inode->i_mode = attr->ia_mode;
1908 dirtied |= CEPH_CAP_AUTH_EXCL;
1909 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1910 attr->ia_mode != inode->i_mode) {
7221fe4c 1911 inode->i_mode = attr->ia_mode;
355da1eb
SW
1912 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1913 mask |= CEPH_SETATTR_MODE;
1914 release |= CEPH_CAP_AUTH_SHARED;
1915 }
1916 }
1917
1918 if (ia_valid & ATTR_ATIME) {
1919 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1920 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1921 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1922 if (issued & CEPH_CAP_FILE_EXCL) {
1923 ci->i_time_warp_seq++;
1924 inode->i_atime = attr->ia_atime;
1925 dirtied |= CEPH_CAP_FILE_EXCL;
1926 } else if ((issued & CEPH_CAP_FILE_WR) &&
1927 timespec_compare(&inode->i_atime,
1928 &attr->ia_atime) < 0) {
1929 inode->i_atime = attr->ia_atime;
1930 dirtied |= CEPH_CAP_FILE_WR;
1931 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1932 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1933 ceph_encode_timespec(&req->r_args.setattr.atime,
1934 &attr->ia_atime);
1935 mask |= CEPH_SETATTR_ATIME;
1936 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1937 CEPH_CAP_FILE_WR;
1938 }
1939 }
1940 if (ia_valid & ATTR_MTIME) {
1941 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1942 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1943 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1944 if (issued & CEPH_CAP_FILE_EXCL) {
1945 ci->i_time_warp_seq++;
1946 inode->i_mtime = attr->ia_mtime;
1947 dirtied |= CEPH_CAP_FILE_EXCL;
1948 } else if ((issued & CEPH_CAP_FILE_WR) &&
1949 timespec_compare(&inode->i_mtime,
1950 &attr->ia_mtime) < 0) {
1951 inode->i_mtime = attr->ia_mtime;
1952 dirtied |= CEPH_CAP_FILE_WR;
1953 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1954 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1955 ceph_encode_timespec(&req->r_args.setattr.mtime,
1956 &attr->ia_mtime);
1957 mask |= CEPH_SETATTR_MTIME;
1958 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1959 CEPH_CAP_FILE_WR;
1960 }
1961 }
1962 if (ia_valid & ATTR_SIZE) {
1963 dout("setattr %p size %lld -> %lld\n", inode,
1964 inode->i_size, attr->ia_size);
355da1eb
SW
1965 if ((issued & CEPH_CAP_FILE_EXCL) &&
1966 attr->ia_size > inode->i_size) {
99c88e69 1967 i_size_write(inode, attr->ia_size);
355da1eb
SW
1968 inode->i_blocks =
1969 (attr->ia_size + (1 << 9) - 1) >> 9;
1970 inode->i_ctime = attr->ia_ctime;
1971 ci->i_reported_size = attr->ia_size;
1972 dirtied |= CEPH_CAP_FILE_EXCL;
1973 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1974 attr->ia_size != inode->i_size) {
1975 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1976 req->r_args.setattr.old_size =
1977 cpu_to_le64(inode->i_size);
1978 mask |= CEPH_SETATTR_SIZE;
1979 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1980 CEPH_CAP_FILE_WR;
1981 }
1982 }
1983
1984 /* these do nothing */
1985 if (ia_valid & ATTR_CTIME) {
1986 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1987 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1988 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1989 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1990 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1991 only ? "ctime only" : "ignored");
1992 inode->i_ctime = attr->ia_ctime;
1993 if (only) {
1994 /*
1995 * if kernel wants to dirty ctime but nothing else,
1996 * we need to choose a cap to dirty under, or do
1997 * a almost-no-op setattr
1998 */
1999 if (issued & CEPH_CAP_AUTH_EXCL)
2000 dirtied |= CEPH_CAP_AUTH_EXCL;
2001 else if (issued & CEPH_CAP_FILE_EXCL)
2002 dirtied |= CEPH_CAP_FILE_EXCL;
2003 else if (issued & CEPH_CAP_XATTR_EXCL)
2004 dirtied |= CEPH_CAP_XATTR_EXCL;
2005 else
2006 mask |= CEPH_SETATTR_CTIME;
2007 }
2008 }
2009 if (ia_valid & ATTR_FILE)
2010 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
2011
2012 if (dirtied) {
f66fd9f0
YZ
2013 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2014 &prealloc_cf);
8bbd4714 2015 inode->i_ctime = current_fs_time(inode->i_sb);
355da1eb
SW
2016 }
2017
2018 release &= issued;
be655596 2019 spin_unlock(&ci->i_ceph_lock);
604d1b02
YZ
2020 if (lock_snap_rwsem)
2021 up_read(&mdsc->snap_rwsem);
355da1eb 2022
fca65b4a
SW
2023 if (inode_dirty_flags)
2024 __mark_inode_dirty(inode, inode_dirty_flags);
2025
7221fe4c 2026 if (ia_valid & ATTR_MODE) {
4db658ea 2027 err = posix_acl_chmod(inode, attr->ia_mode);
7221fe4c
GZ
2028 if (err)
2029 goto out_put;
2030 }
2031
355da1eb 2032 if (mask) {
70b666c3
SW
2033 req->r_inode = inode;
2034 ihold(inode);
355da1eb
SW
2035 req->r_inode_drop = release;
2036 req->r_args.setattr.mask = cpu_to_le32(mask);
2037 req->r_num_caps = 1;
752c8bdc 2038 err = ceph_mdsc_do_request(mdsc, NULL, req);
355da1eb
SW
2039 }
2040 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
2041 ceph_cap_string(dirtied), mask);
2042
2043 ceph_mdsc_put_request(req);
b0d7c223
YZ
2044 if (mask & CEPH_SETATTR_SIZE)
2045 __ceph_do_pending_vmtruncate(inode);
f66fd9f0 2046 ceph_free_cap_flush(prealloc_cf);
355da1eb 2047 return err;
7221fe4c 2048out_put:
355da1eb 2049 ceph_mdsc_put_request(req);
f66fd9f0 2050 ceph_free_cap_flush(prealloc_cf);
355da1eb
SW
2051 return err;
2052}
2053
2054/*
2055 * Verify that we have a lease on the given mask. If not,
2056 * do a getattr against an mds.
2057 */
01deead0
YZ
2058int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2059 int mask, bool force)
355da1eb 2060{
3d14c5d2
YS
2061 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2062 struct ceph_mds_client *mdsc = fsc->mdsc;
355da1eb
SW
2063 struct ceph_mds_request *req;
2064 int err;
2065
2066 if (ceph_snap(inode) == CEPH_SNAPDIR) {
2067 dout("do_getattr inode %p SNAPDIR\n", inode);
2068 return 0;
2069 }
2070
01deead0
YZ
2071 dout("do_getattr inode %p mask %s mode 0%o\n",
2072 inode, ceph_cap_string(mask), inode->i_mode);
508b32d8 2073 if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
355da1eb
SW
2074 return 0;
2075
2076 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
2077 if (IS_ERR(req))
2078 return PTR_ERR(req);
70b666c3
SW
2079 req->r_inode = inode;
2080 ihold(inode);
355da1eb
SW
2081 req->r_num_caps = 1;
2082 req->r_args.getattr.mask = cpu_to_le32(mask);
01deead0 2083 req->r_locked_page = locked_page;
355da1eb 2084 err = ceph_mdsc_do_request(mdsc, NULL, req);
01deead0
YZ
2085 if (locked_page && err == 0) {
2086 u64 inline_version = req->r_reply_info.targeti.inline_version;
2087 if (inline_version == 0) {
2088 /* the reply is supposed to contain inline data */
2089 err = -EINVAL;
2090 } else if (inline_version == CEPH_INLINE_NONE) {
2091 err = -ENODATA;
2092 } else {
2093 err = req->r_reply_info.targeti.inline_len;
2094 }
2095 }
355da1eb
SW
2096 ceph_mdsc_put_request(req);
2097 dout("do_getattr result=%d\n", err);
2098 return err;
2099}
2100
2101
2102/*
2103 * Check inode permissions. We verify we have a valid value for
2104 * the AUTH cap, then call the generic handler.
2105 */
10556cb2 2106int ceph_permission(struct inode *inode, int mask)
355da1eb 2107{
b74c79e9
NP
2108 int err;
2109
10556cb2 2110 if (mask & MAY_NOT_BLOCK)
b74c79e9
NP
2111 return -ECHILD;
2112
508b32d8 2113 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
355da1eb
SW
2114
2115 if (!err)
2830ba7f 2116 err = generic_permission(inode, mask);
355da1eb
SW
2117 return err;
2118}
2119
2120/*
2121 * Get all attributes. Hopefully somedata we'll have a statlite()
2122 * and can limit the fields we require to be accurate.
2123 */
2124int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
2125 struct kstat *stat)
2126{
2b0143b5 2127 struct inode *inode = d_inode(dentry);
232d4b01 2128 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
2129 int err;
2130
508b32d8 2131 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL, false);
355da1eb
SW
2132 if (!err) {
2133 generic_fillattr(inode, stat);
ad1fee96 2134 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
355da1eb
SW
2135 if (ceph_snap(inode) != CEPH_NOSNAP)
2136 stat->dev = ceph_snap(inode);
2137 else
2138 stat->dev = 0;
232d4b01 2139 if (S_ISDIR(inode->i_mode)) {
1c1266bb
YS
2140 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
2141 RBYTES))
2142 stat->size = ci->i_rbytes;
2143 else
2144 stat->size = ci->i_files + ci->i_subdirs;
232d4b01 2145 stat->blocks = 0;
355da1eb 2146 stat->blksize = 65536;
232d4b01 2147 }
355da1eb
SW
2148 }
2149 return err;
2150}
This page took 0.383778 seconds and 5 git commands to generate.