libceph: define new ceph_file_layout structure
[deliverable/linux.git] / fs / ceph / xattr.c
1 #include <linux/ceph/ceph_debug.h>
2 #include <linux/ceph/pagelist.h>
3
4 #include "super.h"
5 #include "mds_client.h"
6
7 #include <linux/ceph/decode.h>
8
9 #include <linux/xattr.h>
10 #include <linux/posix_acl_xattr.h>
11 #include <linux/slab.h>
12
13 #define XATTR_CEPH_PREFIX "ceph."
14 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
15
16 static int __remove_xattr(struct ceph_inode_info *ci,
17 struct ceph_inode_xattr *xattr);
18
19 const struct xattr_handler ceph_other_xattr_handler;
20
21 /*
22 * List of handlers for synthetic system.* attributes. Other
23 * attributes are handled directly.
24 */
25 const struct xattr_handler *ceph_xattr_handlers[] = {
26 #ifdef CONFIG_CEPH_FS_POSIX_ACL
27 &posix_acl_access_xattr_handler,
28 &posix_acl_default_xattr_handler,
29 #endif
30 &ceph_other_xattr_handler,
31 NULL,
32 };
33
34 static bool ceph_is_valid_xattr(const char *name)
35 {
36 return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
37 !strncmp(name, XATTR_SECURITY_PREFIX,
38 XATTR_SECURITY_PREFIX_LEN) ||
39 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
40 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
41 }
42
43 /*
44 * These define virtual xattrs exposing the recursive directory
45 * statistics and layout metadata.
46 */
47 struct ceph_vxattr {
48 char *name;
49 size_t name_size; /* strlen(name) + 1 (for '\0') */
50 size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
51 size_t size);
52 bool readonly, hidden;
53 bool (*exists_cb)(struct ceph_inode_info *ci);
54 };
55
56 /* layouts */
57
58 static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
59 {
60 size_t s;
61 char *p = (char *)&ci->i_layout;
62
63 for (s = 0; s < sizeof(ci->i_layout); s++, p++)
64 if (*p)
65 return true;
66 return false;
67 }
68
69 static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
70 size_t size)
71 {
72 int ret;
73 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
74 struct ceph_osd_client *osdc = &fsc->client->osdc;
75 s64 pool = ci->i_layout.pool_id;
76 const char *pool_name;
77 char buf[128];
78
79 dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode);
80 down_read(&osdc->lock);
81 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
82 if (pool_name) {
83 size_t len = strlen(pool_name);
84 ret = snprintf(buf, sizeof(buf),
85 "stripe_unit=%u stripe_count=%u object_size=%u pool=",
86 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
87 ci->i_layout.object_size);
88 if (!size) {
89 ret += len;
90 } else if (ret + len > size) {
91 ret = -ERANGE;
92 } else {
93 memcpy(val, buf, ret);
94 memcpy(val + ret, pool_name, len);
95 ret += len;
96 }
97 } else {
98 ret = snprintf(buf, sizeof(buf),
99 "stripe_unit=%u stripe_count=%u object_size=%u pool=%lld",
100 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
101 ci->i_layout.object_size, (unsigned long long)pool);
102 if (size) {
103 if (ret <= size)
104 memcpy(val, buf, ret);
105 else
106 ret = -ERANGE;
107 }
108 }
109 up_read(&osdc->lock);
110 return ret;
111 }
112
113 static size_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci,
114 char *val, size_t size)
115 {
116 return snprintf(val, size, "%u", ci->i_layout.stripe_unit);
117 }
118
119 static size_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci,
120 char *val, size_t size)
121 {
122 return snprintf(val, size, "%u", ci->i_layout.stripe_count);
123 }
124
125 static size_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci,
126 char *val, size_t size)
127 {
128 return snprintf(val, size, "%u", ci->i_layout.object_size);
129 }
130
131 static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
132 char *val, size_t size)
133 {
134 int ret;
135 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
136 struct ceph_osd_client *osdc = &fsc->client->osdc;
137 s64 pool = ci->i_layout.pool_id;
138 const char *pool_name;
139
140 down_read(&osdc->lock);
141 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
142 if (pool_name)
143 ret = snprintf(val, size, "%s", pool_name);
144 else
145 ret = snprintf(val, size, "%lld", (unsigned long long)pool);
146 up_read(&osdc->lock);
147 return ret;
148 }
149
150 /* directories */
151
152 static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
153 size_t size)
154 {
155 return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
156 }
157
158 static size_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val,
159 size_t size)
160 {
161 return snprintf(val, size, "%lld", ci->i_files);
162 }
163
164 static size_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val,
165 size_t size)
166 {
167 return snprintf(val, size, "%lld", ci->i_subdirs);
168 }
169
170 static size_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val,
171 size_t size)
172 {
173 return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
174 }
175
176 static size_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val,
177 size_t size)
178 {
179 return snprintf(val, size, "%lld", ci->i_rfiles);
180 }
181
182 static size_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val,
183 size_t size)
184 {
185 return snprintf(val, size, "%lld", ci->i_rsubdirs);
186 }
187
188 static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
189 size_t size)
190 {
191 return snprintf(val, size, "%lld", ci->i_rbytes);
192 }
193
194 static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
195 size_t size)
196 {
197 return snprintf(val, size, "%ld.09%ld", (long)ci->i_rctime.tv_sec,
198 (long)ci->i_rctime.tv_nsec);
199 }
200
201
202 #define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
203 #define CEPH_XATTR_NAME2(_type, _name, _name2) \
204 XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
205
206 #define XATTR_NAME_CEPH(_type, _name) \
207 { \
208 .name = CEPH_XATTR_NAME(_type, _name), \
209 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
210 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
211 .readonly = true, \
212 .hidden = false, \
213 .exists_cb = NULL, \
214 }
215 #define XATTR_LAYOUT_FIELD(_type, _name, _field) \
216 { \
217 .name = CEPH_XATTR_NAME2(_type, _name, _field), \
218 .name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \
219 .getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \
220 .readonly = false, \
221 .hidden = true, \
222 .exists_cb = ceph_vxattrcb_layout_exists, \
223 }
224
225 static struct ceph_vxattr ceph_dir_vxattrs[] = {
226 {
227 .name = "ceph.dir.layout",
228 .name_size = sizeof("ceph.dir.layout"),
229 .getxattr_cb = ceph_vxattrcb_layout,
230 .readonly = false,
231 .hidden = true,
232 .exists_cb = ceph_vxattrcb_layout_exists,
233 },
234 XATTR_LAYOUT_FIELD(dir, layout, stripe_unit),
235 XATTR_LAYOUT_FIELD(dir, layout, stripe_count),
236 XATTR_LAYOUT_FIELD(dir, layout, object_size),
237 XATTR_LAYOUT_FIELD(dir, layout, pool),
238 XATTR_NAME_CEPH(dir, entries),
239 XATTR_NAME_CEPH(dir, files),
240 XATTR_NAME_CEPH(dir, subdirs),
241 XATTR_NAME_CEPH(dir, rentries),
242 XATTR_NAME_CEPH(dir, rfiles),
243 XATTR_NAME_CEPH(dir, rsubdirs),
244 XATTR_NAME_CEPH(dir, rbytes),
245 XATTR_NAME_CEPH(dir, rctime),
246 { .name = NULL, 0 } /* Required table terminator */
247 };
248 static size_t ceph_dir_vxattrs_name_size; /* total size of all names */
249
250 /* files */
251
252 static struct ceph_vxattr ceph_file_vxattrs[] = {
253 {
254 .name = "ceph.file.layout",
255 .name_size = sizeof("ceph.file.layout"),
256 .getxattr_cb = ceph_vxattrcb_layout,
257 .readonly = false,
258 .hidden = true,
259 .exists_cb = ceph_vxattrcb_layout_exists,
260 },
261 XATTR_LAYOUT_FIELD(file, layout, stripe_unit),
262 XATTR_LAYOUT_FIELD(file, layout, stripe_count),
263 XATTR_LAYOUT_FIELD(file, layout, object_size),
264 XATTR_LAYOUT_FIELD(file, layout, pool),
265 { .name = NULL, 0 } /* Required table terminator */
266 };
267 static size_t ceph_file_vxattrs_name_size; /* total size of all names */
268
269 static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode)
270 {
271 if (S_ISDIR(inode->i_mode))
272 return ceph_dir_vxattrs;
273 else if (S_ISREG(inode->i_mode))
274 return ceph_file_vxattrs;
275 return NULL;
276 }
277
278 static size_t ceph_vxattrs_name_size(struct ceph_vxattr *vxattrs)
279 {
280 if (vxattrs == ceph_dir_vxattrs)
281 return ceph_dir_vxattrs_name_size;
282 if (vxattrs == ceph_file_vxattrs)
283 return ceph_file_vxattrs_name_size;
284 BUG_ON(vxattrs);
285 return 0;
286 }
287
288 /*
289 * Compute the aggregate size (including terminating '\0') of all
290 * virtual extended attribute names in the given vxattr table.
291 */
292 static size_t __init vxattrs_name_size(struct ceph_vxattr *vxattrs)
293 {
294 struct ceph_vxattr *vxattr;
295 size_t size = 0;
296
297 for (vxattr = vxattrs; vxattr->name; vxattr++)
298 if (!vxattr->hidden)
299 size += vxattr->name_size;
300
301 return size;
302 }
303
304 /* Routines called at initialization and exit time */
305
306 void __init ceph_xattr_init(void)
307 {
308 ceph_dir_vxattrs_name_size = vxattrs_name_size(ceph_dir_vxattrs);
309 ceph_file_vxattrs_name_size = vxattrs_name_size(ceph_file_vxattrs);
310 }
311
312 void ceph_xattr_exit(void)
313 {
314 ceph_dir_vxattrs_name_size = 0;
315 ceph_file_vxattrs_name_size = 0;
316 }
317
318 static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
319 const char *name)
320 {
321 struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode);
322
323 if (vxattr) {
324 while (vxattr->name) {
325 if (!strcmp(vxattr->name, name))
326 return vxattr;
327 vxattr++;
328 }
329 }
330
331 return NULL;
332 }
333
334 static int __set_xattr(struct ceph_inode_info *ci,
335 const char *name, int name_len,
336 const char *val, int val_len,
337 int flags, int update_xattr,
338 struct ceph_inode_xattr **newxattr)
339 {
340 struct rb_node **p;
341 struct rb_node *parent = NULL;
342 struct ceph_inode_xattr *xattr = NULL;
343 int c;
344 int new = 0;
345
346 p = &ci->i_xattrs.index.rb_node;
347 while (*p) {
348 parent = *p;
349 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
350 c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
351 if (c < 0)
352 p = &(*p)->rb_left;
353 else if (c > 0)
354 p = &(*p)->rb_right;
355 else {
356 if (name_len == xattr->name_len)
357 break;
358 else if (name_len < xattr->name_len)
359 p = &(*p)->rb_left;
360 else
361 p = &(*p)->rb_right;
362 }
363 xattr = NULL;
364 }
365
366 if (update_xattr) {
367 int err = 0;
368 if (xattr && (flags & XATTR_CREATE))
369 err = -EEXIST;
370 else if (!xattr && (flags & XATTR_REPLACE))
371 err = -ENODATA;
372 if (err) {
373 kfree(name);
374 kfree(val);
375 return err;
376 }
377 if (update_xattr < 0) {
378 if (xattr)
379 __remove_xattr(ci, xattr);
380 kfree(name);
381 return 0;
382 }
383 }
384
385 if (!xattr) {
386 new = 1;
387 xattr = *newxattr;
388 xattr->name = name;
389 xattr->name_len = name_len;
390 xattr->should_free_name = update_xattr;
391
392 ci->i_xattrs.count++;
393 dout("__set_xattr count=%d\n", ci->i_xattrs.count);
394 } else {
395 kfree(*newxattr);
396 *newxattr = NULL;
397 if (xattr->should_free_val)
398 kfree((void *)xattr->val);
399
400 if (update_xattr) {
401 kfree((void *)name);
402 name = xattr->name;
403 }
404 ci->i_xattrs.names_size -= xattr->name_len;
405 ci->i_xattrs.vals_size -= xattr->val_len;
406 }
407 ci->i_xattrs.names_size += name_len;
408 ci->i_xattrs.vals_size += val_len;
409 if (val)
410 xattr->val = val;
411 else
412 xattr->val = "";
413
414 xattr->val_len = val_len;
415 xattr->dirty = update_xattr;
416 xattr->should_free_val = (val && update_xattr);
417
418 if (new) {
419 rb_link_node(&xattr->node, parent, p);
420 rb_insert_color(&xattr->node, &ci->i_xattrs.index);
421 dout("__set_xattr_val p=%p\n", p);
422 }
423
424 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
425 ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
426
427 return 0;
428 }
429
430 static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
431 const char *name)
432 {
433 struct rb_node **p;
434 struct rb_node *parent = NULL;
435 struct ceph_inode_xattr *xattr = NULL;
436 int name_len = strlen(name);
437 int c;
438
439 p = &ci->i_xattrs.index.rb_node;
440 while (*p) {
441 parent = *p;
442 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
443 c = strncmp(name, xattr->name, xattr->name_len);
444 if (c == 0 && name_len > xattr->name_len)
445 c = 1;
446 if (c < 0)
447 p = &(*p)->rb_left;
448 else if (c > 0)
449 p = &(*p)->rb_right;
450 else {
451 dout("__get_xattr %s: found %.*s\n", name,
452 xattr->val_len, xattr->val);
453 return xattr;
454 }
455 }
456
457 dout("__get_xattr %s: not found\n", name);
458
459 return NULL;
460 }
461
462 static void __free_xattr(struct ceph_inode_xattr *xattr)
463 {
464 BUG_ON(!xattr);
465
466 if (xattr->should_free_name)
467 kfree((void *)xattr->name);
468 if (xattr->should_free_val)
469 kfree((void *)xattr->val);
470
471 kfree(xattr);
472 }
473
474 static int __remove_xattr(struct ceph_inode_info *ci,
475 struct ceph_inode_xattr *xattr)
476 {
477 if (!xattr)
478 return -ENODATA;
479
480 rb_erase(&xattr->node, &ci->i_xattrs.index);
481
482 if (xattr->should_free_name)
483 kfree((void *)xattr->name);
484 if (xattr->should_free_val)
485 kfree((void *)xattr->val);
486
487 ci->i_xattrs.names_size -= xattr->name_len;
488 ci->i_xattrs.vals_size -= xattr->val_len;
489 ci->i_xattrs.count--;
490 kfree(xattr);
491
492 return 0;
493 }
494
495 static char *__copy_xattr_names(struct ceph_inode_info *ci,
496 char *dest)
497 {
498 struct rb_node *p;
499 struct ceph_inode_xattr *xattr = NULL;
500
501 p = rb_first(&ci->i_xattrs.index);
502 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
503
504 while (p) {
505 xattr = rb_entry(p, struct ceph_inode_xattr, node);
506 memcpy(dest, xattr->name, xattr->name_len);
507 dest[xattr->name_len] = '\0';
508
509 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
510 xattr->name_len, ci->i_xattrs.names_size);
511
512 dest += xattr->name_len + 1;
513 p = rb_next(p);
514 }
515
516 return dest;
517 }
518
519 void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
520 {
521 struct rb_node *p, *tmp;
522 struct ceph_inode_xattr *xattr = NULL;
523
524 p = rb_first(&ci->i_xattrs.index);
525
526 dout("__ceph_destroy_xattrs p=%p\n", p);
527
528 while (p) {
529 xattr = rb_entry(p, struct ceph_inode_xattr, node);
530 tmp = p;
531 p = rb_next(tmp);
532 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
533 xattr->name_len, xattr->name);
534 rb_erase(tmp, &ci->i_xattrs.index);
535
536 __free_xattr(xattr);
537 }
538
539 ci->i_xattrs.names_size = 0;
540 ci->i_xattrs.vals_size = 0;
541 ci->i_xattrs.index_version = 0;
542 ci->i_xattrs.count = 0;
543 ci->i_xattrs.index = RB_ROOT;
544 }
545
546 static int __build_xattrs(struct inode *inode)
547 __releases(ci->i_ceph_lock)
548 __acquires(ci->i_ceph_lock)
549 {
550 u32 namelen;
551 u32 numattr = 0;
552 void *p, *end;
553 u32 len;
554 const char *name, *val;
555 struct ceph_inode_info *ci = ceph_inode(inode);
556 int xattr_version;
557 struct ceph_inode_xattr **xattrs = NULL;
558 int err = 0;
559 int i;
560
561 dout("__build_xattrs() len=%d\n",
562 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
563
564 if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
565 return 0; /* already built */
566
567 __ceph_destroy_xattrs(ci);
568
569 start:
570 /* updated internal xattr rb tree */
571 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
572 p = ci->i_xattrs.blob->vec.iov_base;
573 end = p + ci->i_xattrs.blob->vec.iov_len;
574 ceph_decode_32_safe(&p, end, numattr, bad);
575 xattr_version = ci->i_xattrs.version;
576 spin_unlock(&ci->i_ceph_lock);
577
578 xattrs = kcalloc(numattr, sizeof(struct ceph_inode_xattr *),
579 GFP_NOFS);
580 err = -ENOMEM;
581 if (!xattrs)
582 goto bad_lock;
583
584 for (i = 0; i < numattr; i++) {
585 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
586 GFP_NOFS);
587 if (!xattrs[i])
588 goto bad_lock;
589 }
590
591 spin_lock(&ci->i_ceph_lock);
592 if (ci->i_xattrs.version != xattr_version) {
593 /* lost a race, retry */
594 for (i = 0; i < numattr; i++)
595 kfree(xattrs[i]);
596 kfree(xattrs);
597 xattrs = NULL;
598 goto start;
599 }
600 err = -EIO;
601 while (numattr--) {
602 ceph_decode_32_safe(&p, end, len, bad);
603 namelen = len;
604 name = p;
605 p += len;
606 ceph_decode_32_safe(&p, end, len, bad);
607 val = p;
608 p += len;
609
610 err = __set_xattr(ci, name, namelen, val, len,
611 0, 0, &xattrs[numattr]);
612
613 if (err < 0)
614 goto bad;
615 }
616 kfree(xattrs);
617 }
618 ci->i_xattrs.index_version = ci->i_xattrs.version;
619 ci->i_xattrs.dirty = false;
620
621 return err;
622 bad_lock:
623 spin_lock(&ci->i_ceph_lock);
624 bad:
625 if (xattrs) {
626 for (i = 0; i < numattr; i++)
627 kfree(xattrs[i]);
628 kfree(xattrs);
629 }
630 ci->i_xattrs.names_size = 0;
631 return err;
632 }
633
634 static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
635 int val_size)
636 {
637 /*
638 * 4 bytes for the length, and additional 4 bytes per each xattr name,
639 * 4 bytes per each value
640 */
641 int size = 4 + ci->i_xattrs.count*(4 + 4) +
642 ci->i_xattrs.names_size +
643 ci->i_xattrs.vals_size;
644 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
645 ci->i_xattrs.count, ci->i_xattrs.names_size,
646 ci->i_xattrs.vals_size);
647
648 if (name_size)
649 size += 4 + 4 + name_size + val_size;
650
651 return size;
652 }
653
654 /*
655 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
656 * and swap into place.
657 */
658 void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
659 {
660 struct rb_node *p;
661 struct ceph_inode_xattr *xattr = NULL;
662 void *dest;
663
664 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
665 if (ci->i_xattrs.dirty) {
666 int need = __get_required_blob_size(ci, 0, 0);
667
668 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
669
670 p = rb_first(&ci->i_xattrs.index);
671 dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
672
673 ceph_encode_32(&dest, ci->i_xattrs.count);
674 while (p) {
675 xattr = rb_entry(p, struct ceph_inode_xattr, node);
676
677 ceph_encode_32(&dest, xattr->name_len);
678 memcpy(dest, xattr->name, xattr->name_len);
679 dest += xattr->name_len;
680 ceph_encode_32(&dest, xattr->val_len);
681 memcpy(dest, xattr->val, xattr->val_len);
682 dest += xattr->val_len;
683
684 p = rb_next(p);
685 }
686
687 /* adjust buffer len; it may be larger than we need */
688 ci->i_xattrs.prealloc_blob->vec.iov_len =
689 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
690
691 if (ci->i_xattrs.blob)
692 ceph_buffer_put(ci->i_xattrs.blob);
693 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
694 ci->i_xattrs.prealloc_blob = NULL;
695 ci->i_xattrs.dirty = false;
696 ci->i_xattrs.version++;
697 }
698 }
699
700 static inline int __get_request_mask(struct inode *in) {
701 struct ceph_mds_request *req = current->journal_info;
702 int mask = 0;
703 if (req && req->r_target_inode == in) {
704 if (req->r_op == CEPH_MDS_OP_LOOKUP ||
705 req->r_op == CEPH_MDS_OP_LOOKUPINO ||
706 req->r_op == CEPH_MDS_OP_LOOKUPPARENT ||
707 req->r_op == CEPH_MDS_OP_GETATTR) {
708 mask = le32_to_cpu(req->r_args.getattr.mask);
709 } else if (req->r_op == CEPH_MDS_OP_OPEN ||
710 req->r_op == CEPH_MDS_OP_CREATE) {
711 mask = le32_to_cpu(req->r_args.open.mask);
712 }
713 }
714 return mask;
715 }
716
717 ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
718 size_t size)
719 {
720 struct ceph_inode_info *ci = ceph_inode(inode);
721 struct ceph_inode_xattr *xattr;
722 struct ceph_vxattr *vxattr = NULL;
723 int req_mask;
724 int err;
725
726 /* let's see if a virtual xattr was requested */
727 vxattr = ceph_match_vxattr(inode, name);
728 if (vxattr) {
729 err = -ENODATA;
730 if (!(vxattr->exists_cb && !vxattr->exists_cb(ci)))
731 err = vxattr->getxattr_cb(ci, value, size);
732 return err;
733 }
734
735 req_mask = __get_request_mask(inode);
736
737 spin_lock(&ci->i_ceph_lock);
738 dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
739 ci->i_xattrs.version, ci->i_xattrs.index_version);
740
741 if (ci->i_xattrs.version == 0 ||
742 !((req_mask & CEPH_CAP_XATTR_SHARED) ||
743 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1))) {
744 spin_unlock(&ci->i_ceph_lock);
745
746 /* security module gets xattr while filling trace */
747 if (current->journal_info != NULL) {
748 pr_warn_ratelimited("sync getxattr %p "
749 "during filling trace\n", inode);
750 return -EBUSY;
751 }
752
753 /* get xattrs from mds (if we don't already have them) */
754 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
755 if (err)
756 return err;
757 spin_lock(&ci->i_ceph_lock);
758 }
759
760 err = __build_xattrs(inode);
761 if (err < 0)
762 goto out;
763
764 err = -ENODATA; /* == ENOATTR */
765 xattr = __get_xattr(ci, name);
766 if (!xattr)
767 goto out;
768
769 err = -ERANGE;
770 if (size && size < xattr->val_len)
771 goto out;
772
773 err = xattr->val_len;
774 if (size == 0)
775 goto out;
776
777 memcpy(value, xattr->val, xattr->val_len);
778
779 if (current->journal_info != NULL &&
780 !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN))
781 ci->i_ceph_flags |= CEPH_I_SEC_INITED;
782 out:
783 spin_unlock(&ci->i_ceph_lock);
784 return err;
785 }
786
787 ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
788 {
789 struct inode *inode = d_inode(dentry);
790 struct ceph_inode_info *ci = ceph_inode(inode);
791 struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode);
792 u32 vir_namelen = 0;
793 u32 namelen;
794 int err;
795 u32 len;
796 int i;
797
798 spin_lock(&ci->i_ceph_lock);
799 dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
800 ci->i_xattrs.version, ci->i_xattrs.index_version);
801
802 if (ci->i_xattrs.version == 0 ||
803 !__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1)) {
804 spin_unlock(&ci->i_ceph_lock);
805 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
806 if (err)
807 return err;
808 spin_lock(&ci->i_ceph_lock);
809 }
810
811 err = __build_xattrs(inode);
812 if (err < 0)
813 goto out;
814 /*
815 * Start with virtual dir xattr names (if any) (including
816 * terminating '\0' characters for each).
817 */
818 vir_namelen = ceph_vxattrs_name_size(vxattrs);
819
820 /* adding 1 byte per each variable due to the null termination */
821 namelen = ci->i_xattrs.names_size + ci->i_xattrs.count;
822 err = -ERANGE;
823 if (size && vir_namelen + namelen > size)
824 goto out;
825
826 err = namelen + vir_namelen;
827 if (size == 0)
828 goto out;
829
830 names = __copy_xattr_names(ci, names);
831
832 /* virtual xattr names, too */
833 err = namelen;
834 if (vxattrs) {
835 for (i = 0; vxattrs[i].name; i++) {
836 if (!vxattrs[i].hidden &&
837 !(vxattrs[i].exists_cb &&
838 !vxattrs[i].exists_cb(ci))) {
839 len = sprintf(names, "%s", vxattrs[i].name);
840 names += len + 1;
841 err += len + 1;
842 }
843 }
844 }
845
846 out:
847 spin_unlock(&ci->i_ceph_lock);
848 return err;
849 }
850
851 static int ceph_sync_setxattr(struct inode *inode, const char *name,
852 const char *value, size_t size, int flags)
853 {
854 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
855 struct ceph_inode_info *ci = ceph_inode(inode);
856 struct ceph_mds_request *req;
857 struct ceph_mds_client *mdsc = fsc->mdsc;
858 struct ceph_pagelist *pagelist = NULL;
859 int op = CEPH_MDS_OP_SETXATTR;
860 int err;
861
862 if (size > 0) {
863 /* copy value into pagelist */
864 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
865 if (!pagelist)
866 return -ENOMEM;
867
868 ceph_pagelist_init(pagelist);
869 err = ceph_pagelist_append(pagelist, value, size);
870 if (err)
871 goto out;
872 } else if (!value) {
873 if (flags & CEPH_XATTR_REPLACE)
874 op = CEPH_MDS_OP_RMXATTR;
875 else
876 flags |= CEPH_XATTR_REMOVE;
877 }
878
879 dout("setxattr value=%.*s\n", (int)size, value);
880
881 /* do request */
882 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
883 if (IS_ERR(req)) {
884 err = PTR_ERR(req);
885 goto out;
886 }
887
888 req->r_path2 = kstrdup(name, GFP_NOFS);
889 if (!req->r_path2) {
890 ceph_mdsc_put_request(req);
891 err = -ENOMEM;
892 goto out;
893 }
894
895 if (op == CEPH_MDS_OP_SETXATTR) {
896 req->r_args.setxattr.flags = cpu_to_le32(flags);
897 req->r_pagelist = pagelist;
898 pagelist = NULL;
899 }
900
901 req->r_inode = inode;
902 ihold(inode);
903 req->r_num_caps = 1;
904 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
905
906 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
907 err = ceph_mdsc_do_request(mdsc, NULL, req);
908 ceph_mdsc_put_request(req);
909 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
910
911 out:
912 if (pagelist)
913 ceph_pagelist_release(pagelist);
914 return err;
915 }
916
917 int __ceph_setxattr(struct inode *inode, const char *name,
918 const void *value, size_t size, int flags)
919 {
920 struct ceph_vxattr *vxattr;
921 struct ceph_inode_info *ci = ceph_inode(inode);
922 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
923 struct ceph_cap_flush *prealloc_cf = NULL;
924 int issued;
925 int err;
926 int dirty = 0;
927 int name_len = strlen(name);
928 int val_len = size;
929 char *newname = NULL;
930 char *newval = NULL;
931 struct ceph_inode_xattr *xattr = NULL;
932 int required_blob_size;
933 bool lock_snap_rwsem = false;
934
935 if (ceph_snap(inode) != CEPH_NOSNAP)
936 return -EROFS;
937
938 vxattr = ceph_match_vxattr(inode, name);
939 if (vxattr && vxattr->readonly)
940 return -EOPNOTSUPP;
941
942 /* pass any unhandled ceph.* xattrs through to the MDS */
943 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
944 goto do_sync_unlocked;
945
946 /* preallocate memory for xattr name, value, index node */
947 err = -ENOMEM;
948 newname = kmemdup(name, name_len + 1, GFP_NOFS);
949 if (!newname)
950 goto out;
951
952 if (val_len) {
953 newval = kmemdup(value, val_len, GFP_NOFS);
954 if (!newval)
955 goto out;
956 }
957
958 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
959 if (!xattr)
960 goto out;
961
962 prealloc_cf = ceph_alloc_cap_flush();
963 if (!prealloc_cf)
964 goto out;
965
966 spin_lock(&ci->i_ceph_lock);
967 retry:
968 issued = __ceph_caps_issued(ci, NULL);
969 if (ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))
970 goto do_sync;
971
972 if (!lock_snap_rwsem && !ci->i_head_snapc) {
973 lock_snap_rwsem = true;
974 if (!down_read_trylock(&mdsc->snap_rwsem)) {
975 spin_unlock(&ci->i_ceph_lock);
976 down_read(&mdsc->snap_rwsem);
977 spin_lock(&ci->i_ceph_lock);
978 goto retry;
979 }
980 }
981
982 dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
983 __build_xattrs(inode);
984
985 required_blob_size = __get_required_blob_size(ci, name_len, val_len);
986
987 if (!ci->i_xattrs.prealloc_blob ||
988 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
989 struct ceph_buffer *blob;
990
991 spin_unlock(&ci->i_ceph_lock);
992 dout(" preaallocating new blob size=%d\n", required_blob_size);
993 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
994 if (!blob)
995 goto do_sync_unlocked;
996 spin_lock(&ci->i_ceph_lock);
997 if (ci->i_xattrs.prealloc_blob)
998 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
999 ci->i_xattrs.prealloc_blob = blob;
1000 goto retry;
1001 }
1002
1003 err = __set_xattr(ci, newname, name_len, newval, val_len,
1004 flags, value ? 1 : -1, &xattr);
1005
1006 if (!err) {
1007 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL,
1008 &prealloc_cf);
1009 ci->i_xattrs.dirty = true;
1010 inode->i_ctime = current_fs_time(inode->i_sb);
1011 }
1012
1013 spin_unlock(&ci->i_ceph_lock);
1014 if (lock_snap_rwsem)
1015 up_read(&mdsc->snap_rwsem);
1016 if (dirty)
1017 __mark_inode_dirty(inode, dirty);
1018 ceph_free_cap_flush(prealloc_cf);
1019 return err;
1020
1021 do_sync:
1022 spin_unlock(&ci->i_ceph_lock);
1023 do_sync_unlocked:
1024 if (lock_snap_rwsem)
1025 up_read(&mdsc->snap_rwsem);
1026
1027 /* security module set xattr while filling trace */
1028 if (current->journal_info != NULL) {
1029 pr_warn_ratelimited("sync setxattr %p "
1030 "during filling trace\n", inode);
1031 err = -EBUSY;
1032 } else {
1033 err = ceph_sync_setxattr(inode, name, value, size, flags);
1034 }
1035 out:
1036 ceph_free_cap_flush(prealloc_cf);
1037 kfree(newname);
1038 kfree(newval);
1039 kfree(xattr);
1040 return err;
1041 }
1042
1043 static int ceph_get_xattr_handler(const struct xattr_handler *handler,
1044 struct dentry *dentry, struct inode *inode,
1045 const char *name, void *value, size_t size)
1046 {
1047 if (!ceph_is_valid_xattr(name))
1048 return -EOPNOTSUPP;
1049 return __ceph_getxattr(inode, name, value, size);
1050 }
1051
1052 static int ceph_set_xattr_handler(const struct xattr_handler *handler,
1053 struct dentry *unused, struct inode *inode,
1054 const char *name, const void *value,
1055 size_t size, int flags)
1056 {
1057 if (!ceph_is_valid_xattr(name))
1058 return -EOPNOTSUPP;
1059 return __ceph_setxattr(inode, name, value, size, flags);
1060 }
1061
1062 const struct xattr_handler ceph_other_xattr_handler = {
1063 .prefix = "", /* match any name => handlers called with full name */
1064 .get = ceph_get_xattr_handler,
1065 .set = ceph_set_xattr_handler,
1066 };
1067
1068 #ifdef CONFIG_SECURITY
1069 bool ceph_security_xattr_wanted(struct inode *in)
1070 {
1071 return in->i_security != NULL;
1072 }
1073
1074 bool ceph_security_xattr_deadlock(struct inode *in)
1075 {
1076 struct ceph_inode_info *ci;
1077 bool ret;
1078 if (in->i_security == NULL)
1079 return false;
1080 ci = ceph_inode(in);
1081 spin_lock(&ci->i_ceph_lock);
1082 ret = !(ci->i_ceph_flags & CEPH_I_SEC_INITED) &&
1083 !(ci->i_xattrs.version > 0 &&
1084 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0));
1085 spin_unlock(&ci->i_ceph_lock);
1086 return ret;
1087 }
1088 #endif
This page took 0.083108 seconds and 5 git commands to generate.