1 #include <linux/ceph/ceph_debug.h>
2 #include <linux/ceph/pagelist.h>
5 #include "mds_client.h"
7 #include <linux/ceph/decode.h>
9 #include <linux/xattr.h>
10 #include <linux/posix_acl_xattr.h>
11 #include <linux/slab.h>
13 #define XATTR_CEPH_PREFIX "ceph."
14 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
16 static int __remove_xattr(struct ceph_inode_info
*ci
,
17 struct ceph_inode_xattr
*xattr
);
19 const struct xattr_handler ceph_other_xattr_handler
;
22 * List of handlers for synthetic system.* attributes. Other
23 * attributes are handled directly.
25 const struct xattr_handler
*ceph_xattr_handlers
[] = {
26 #ifdef CONFIG_CEPH_FS_POSIX_ACL
27 &posix_acl_access_xattr_handler
,
28 &posix_acl_default_xattr_handler
,
30 &ceph_other_xattr_handler
,
34 static bool ceph_is_valid_xattr(const char *name
)
36 return !strncmp(name
, XATTR_CEPH_PREFIX
, XATTR_CEPH_PREFIX_LEN
) ||
37 !strncmp(name
, XATTR_SECURITY_PREFIX
,
38 XATTR_SECURITY_PREFIX_LEN
) ||
39 !strncmp(name
, XATTR_TRUSTED_PREFIX
, XATTR_TRUSTED_PREFIX_LEN
) ||
40 !strncmp(name
, XATTR_USER_PREFIX
, XATTR_USER_PREFIX_LEN
);
44 * These define virtual xattrs exposing the recursive directory
45 * statistics and layout metadata.
49 size_t name_size
; /* strlen(name) + 1 (for '\0') */
50 size_t (*getxattr_cb
)(struct ceph_inode_info
*ci
, char *val
,
52 bool readonly
, hidden
;
53 bool (*exists_cb
)(struct ceph_inode_info
*ci
);
58 static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info
*ci
)
61 char *p
= (char *)&ci
->i_layout
;
63 for (s
= 0; s
< sizeof(ci
->i_layout
); s
++, p
++)
69 static size_t ceph_vxattrcb_layout(struct ceph_inode_info
*ci
, char *val
,
73 struct ceph_fs_client
*fsc
= ceph_sb_to_client(ci
->vfs_inode
.i_sb
);
74 struct ceph_osd_client
*osdc
= &fsc
->client
->osdc
;
75 s64 pool
= ceph_file_layout_pg_pool(ci
->i_layout
);
76 const char *pool_name
;
79 dout("ceph_vxattrcb_layout %p\n", &ci
->vfs_inode
);
80 down_read(&osdc
->map_sem
);
81 pool_name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, pool
);
83 size_t len
= strlen(pool_name
);
84 ret
= snprintf(buf
, sizeof(buf
),
85 "stripe_unit=%lld stripe_count=%lld object_size=%lld pool=",
86 (unsigned long long)ceph_file_layout_su(ci
->i_layout
),
87 (unsigned long long)ceph_file_layout_stripe_count(ci
->i_layout
),
88 (unsigned long long)ceph_file_layout_object_size(ci
->i_layout
));
91 } else if (ret
+ len
> size
) {
94 memcpy(val
, buf
, ret
);
95 memcpy(val
+ ret
, pool_name
, len
);
99 ret
= snprintf(buf
, sizeof(buf
),
100 "stripe_unit=%lld stripe_count=%lld object_size=%lld pool=%lld",
101 (unsigned long long)ceph_file_layout_su(ci
->i_layout
),
102 (unsigned long long)ceph_file_layout_stripe_count(ci
->i_layout
),
103 (unsigned long long)ceph_file_layout_object_size(ci
->i_layout
),
104 (unsigned long long)pool
);
107 memcpy(val
, buf
, ret
);
112 up_read(&osdc
->map_sem
);
116 static size_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info
*ci
,
117 char *val
, size_t size
)
119 return snprintf(val
, size
, "%lld",
120 (unsigned long long)ceph_file_layout_su(ci
->i_layout
));
123 static size_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info
*ci
,
124 char *val
, size_t size
)
126 return snprintf(val
, size
, "%lld",
127 (unsigned long long)ceph_file_layout_stripe_count(ci
->i_layout
));
130 static size_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info
*ci
,
131 char *val
, size_t size
)
133 return snprintf(val
, size
, "%lld",
134 (unsigned long long)ceph_file_layout_object_size(ci
->i_layout
));
137 static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info
*ci
,
138 char *val
, size_t size
)
141 struct ceph_fs_client
*fsc
= ceph_sb_to_client(ci
->vfs_inode
.i_sb
);
142 struct ceph_osd_client
*osdc
= &fsc
->client
->osdc
;
143 s64 pool
= ceph_file_layout_pg_pool(ci
->i_layout
);
144 const char *pool_name
;
146 down_read(&osdc
->map_sem
);
147 pool_name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, pool
);
149 ret
= snprintf(val
, size
, "%s", pool_name
);
151 ret
= snprintf(val
, size
, "%lld", (unsigned long long)pool
);
152 up_read(&osdc
->map_sem
);
158 static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info
*ci
, char *val
,
161 return snprintf(val
, size
, "%lld", ci
->i_files
+ ci
->i_subdirs
);
164 static size_t ceph_vxattrcb_dir_files(struct ceph_inode_info
*ci
, char *val
,
167 return snprintf(val
, size
, "%lld", ci
->i_files
);
170 static size_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info
*ci
, char *val
,
173 return snprintf(val
, size
, "%lld", ci
->i_subdirs
);
176 static size_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info
*ci
, char *val
,
179 return snprintf(val
, size
, "%lld", ci
->i_rfiles
+ ci
->i_rsubdirs
);
182 static size_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info
*ci
, char *val
,
185 return snprintf(val
, size
, "%lld", ci
->i_rfiles
);
188 static size_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info
*ci
, char *val
,
191 return snprintf(val
, size
, "%lld", ci
->i_rsubdirs
);
194 static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info
*ci
, char *val
,
197 return snprintf(val
, size
, "%lld", ci
->i_rbytes
);
200 static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info
*ci
, char *val
,
203 return snprintf(val
, size
, "%ld.09%ld", (long)ci
->i_rctime
.tv_sec
,
204 (long)ci
->i_rctime
.tv_nsec
);
208 #define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
209 #define CEPH_XATTR_NAME2(_type, _name, _name2) \
210 XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
212 #define XATTR_NAME_CEPH(_type, _name) \
214 .name = CEPH_XATTR_NAME(_type, _name), \
215 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
216 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
221 #define XATTR_LAYOUT_FIELD(_type, _name, _field) \
223 .name = CEPH_XATTR_NAME2(_type, _name, _field), \
224 .name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \
225 .getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \
228 .exists_cb = ceph_vxattrcb_layout_exists, \
231 static struct ceph_vxattr ceph_dir_vxattrs
[] = {
233 .name
= "ceph.dir.layout",
234 .name_size
= sizeof("ceph.dir.layout"),
235 .getxattr_cb
= ceph_vxattrcb_layout
,
238 .exists_cb
= ceph_vxattrcb_layout_exists
,
240 XATTR_LAYOUT_FIELD(dir
, layout
, stripe_unit
),
241 XATTR_LAYOUT_FIELD(dir
, layout
, stripe_count
),
242 XATTR_LAYOUT_FIELD(dir
, layout
, object_size
),
243 XATTR_LAYOUT_FIELD(dir
, layout
, pool
),
244 XATTR_NAME_CEPH(dir
, entries
),
245 XATTR_NAME_CEPH(dir
, files
),
246 XATTR_NAME_CEPH(dir
, subdirs
),
247 XATTR_NAME_CEPH(dir
, rentries
),
248 XATTR_NAME_CEPH(dir
, rfiles
),
249 XATTR_NAME_CEPH(dir
, rsubdirs
),
250 XATTR_NAME_CEPH(dir
, rbytes
),
251 XATTR_NAME_CEPH(dir
, rctime
),
252 { .name
= NULL
, 0 } /* Required table terminator */
254 static size_t ceph_dir_vxattrs_name_size
; /* total size of all names */
258 static struct ceph_vxattr ceph_file_vxattrs
[] = {
260 .name
= "ceph.file.layout",
261 .name_size
= sizeof("ceph.file.layout"),
262 .getxattr_cb
= ceph_vxattrcb_layout
,
265 .exists_cb
= ceph_vxattrcb_layout_exists
,
267 XATTR_LAYOUT_FIELD(file
, layout
, stripe_unit
),
268 XATTR_LAYOUT_FIELD(file
, layout
, stripe_count
),
269 XATTR_LAYOUT_FIELD(file
, layout
, object_size
),
270 XATTR_LAYOUT_FIELD(file
, layout
, pool
),
271 { .name
= NULL
, 0 } /* Required table terminator */
273 static size_t ceph_file_vxattrs_name_size
; /* total size of all names */
275 static struct ceph_vxattr
*ceph_inode_vxattrs(struct inode
*inode
)
277 if (S_ISDIR(inode
->i_mode
))
278 return ceph_dir_vxattrs
;
279 else if (S_ISREG(inode
->i_mode
))
280 return ceph_file_vxattrs
;
284 static size_t ceph_vxattrs_name_size(struct ceph_vxattr
*vxattrs
)
286 if (vxattrs
== ceph_dir_vxattrs
)
287 return ceph_dir_vxattrs_name_size
;
288 if (vxattrs
== ceph_file_vxattrs
)
289 return ceph_file_vxattrs_name_size
;
295 * Compute the aggregate size (including terminating '\0') of all
296 * virtual extended attribute names in the given vxattr table.
298 static size_t __init
vxattrs_name_size(struct ceph_vxattr
*vxattrs
)
300 struct ceph_vxattr
*vxattr
;
303 for (vxattr
= vxattrs
; vxattr
->name
; vxattr
++)
305 size
+= vxattr
->name_size
;
310 /* Routines called at initialization and exit time */
312 void __init
ceph_xattr_init(void)
314 ceph_dir_vxattrs_name_size
= vxattrs_name_size(ceph_dir_vxattrs
);
315 ceph_file_vxattrs_name_size
= vxattrs_name_size(ceph_file_vxattrs
);
318 void ceph_xattr_exit(void)
320 ceph_dir_vxattrs_name_size
= 0;
321 ceph_file_vxattrs_name_size
= 0;
324 static struct ceph_vxattr
*ceph_match_vxattr(struct inode
*inode
,
327 struct ceph_vxattr
*vxattr
= ceph_inode_vxattrs(inode
);
330 while (vxattr
->name
) {
331 if (!strcmp(vxattr
->name
, name
))
340 static int __set_xattr(struct ceph_inode_info
*ci
,
341 const char *name
, int name_len
,
342 const char *val
, int val_len
,
343 int flags
, int update_xattr
,
344 struct ceph_inode_xattr
**newxattr
)
347 struct rb_node
*parent
= NULL
;
348 struct ceph_inode_xattr
*xattr
= NULL
;
352 p
= &ci
->i_xattrs
.index
.rb_node
;
355 xattr
= rb_entry(parent
, struct ceph_inode_xattr
, node
);
356 c
= strncmp(name
, xattr
->name
, min(name_len
, xattr
->name_len
));
362 if (name_len
== xattr
->name_len
)
364 else if (name_len
< xattr
->name_len
)
374 if (xattr
&& (flags
& XATTR_CREATE
))
376 else if (!xattr
&& (flags
& XATTR_REPLACE
))
383 if (update_xattr
< 0) {
385 __remove_xattr(ci
, xattr
);
395 xattr
->name_len
= name_len
;
396 xattr
->should_free_name
= update_xattr
;
398 ci
->i_xattrs
.count
++;
399 dout("__set_xattr count=%d\n", ci
->i_xattrs
.count
);
403 if (xattr
->should_free_val
)
404 kfree((void *)xattr
->val
);
410 ci
->i_xattrs
.names_size
-= xattr
->name_len
;
411 ci
->i_xattrs
.vals_size
-= xattr
->val_len
;
413 ci
->i_xattrs
.names_size
+= name_len
;
414 ci
->i_xattrs
.vals_size
+= val_len
;
420 xattr
->val_len
= val_len
;
421 xattr
->dirty
= update_xattr
;
422 xattr
->should_free_val
= (val
&& update_xattr
);
425 rb_link_node(&xattr
->node
, parent
, p
);
426 rb_insert_color(&xattr
->node
, &ci
->i_xattrs
.index
);
427 dout("__set_xattr_val p=%p\n", p
);
430 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
431 ceph_vinop(&ci
->vfs_inode
), xattr
, name
, val_len
, val
);
436 static struct ceph_inode_xattr
*__get_xattr(struct ceph_inode_info
*ci
,
440 struct rb_node
*parent
= NULL
;
441 struct ceph_inode_xattr
*xattr
= NULL
;
442 int name_len
= strlen(name
);
445 p
= &ci
->i_xattrs
.index
.rb_node
;
448 xattr
= rb_entry(parent
, struct ceph_inode_xattr
, node
);
449 c
= strncmp(name
, xattr
->name
, xattr
->name_len
);
450 if (c
== 0 && name_len
> xattr
->name_len
)
457 dout("__get_xattr %s: found %.*s\n", name
,
458 xattr
->val_len
, xattr
->val
);
463 dout("__get_xattr %s: not found\n", name
);
468 static void __free_xattr(struct ceph_inode_xattr
*xattr
)
472 if (xattr
->should_free_name
)
473 kfree((void *)xattr
->name
);
474 if (xattr
->should_free_val
)
475 kfree((void *)xattr
->val
);
480 static int __remove_xattr(struct ceph_inode_info
*ci
,
481 struct ceph_inode_xattr
*xattr
)
486 rb_erase(&xattr
->node
, &ci
->i_xattrs
.index
);
488 if (xattr
->should_free_name
)
489 kfree((void *)xattr
->name
);
490 if (xattr
->should_free_val
)
491 kfree((void *)xattr
->val
);
493 ci
->i_xattrs
.names_size
-= xattr
->name_len
;
494 ci
->i_xattrs
.vals_size
-= xattr
->val_len
;
495 ci
->i_xattrs
.count
--;
501 static char *__copy_xattr_names(struct ceph_inode_info
*ci
,
505 struct ceph_inode_xattr
*xattr
= NULL
;
507 p
= rb_first(&ci
->i_xattrs
.index
);
508 dout("__copy_xattr_names count=%d\n", ci
->i_xattrs
.count
);
511 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
512 memcpy(dest
, xattr
->name
, xattr
->name_len
);
513 dest
[xattr
->name_len
] = '\0';
515 dout("dest=%s %p (%s) (%d/%d)\n", dest
, xattr
, xattr
->name
,
516 xattr
->name_len
, ci
->i_xattrs
.names_size
);
518 dest
+= xattr
->name_len
+ 1;
525 void __ceph_destroy_xattrs(struct ceph_inode_info
*ci
)
527 struct rb_node
*p
, *tmp
;
528 struct ceph_inode_xattr
*xattr
= NULL
;
530 p
= rb_first(&ci
->i_xattrs
.index
);
532 dout("__ceph_destroy_xattrs p=%p\n", p
);
535 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
538 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p
,
539 xattr
->name_len
, xattr
->name
);
540 rb_erase(tmp
, &ci
->i_xattrs
.index
);
545 ci
->i_xattrs
.names_size
= 0;
546 ci
->i_xattrs
.vals_size
= 0;
547 ci
->i_xattrs
.index_version
= 0;
548 ci
->i_xattrs
.count
= 0;
549 ci
->i_xattrs
.index
= RB_ROOT
;
552 static int __build_xattrs(struct inode
*inode
)
553 __releases(ci
->i_ceph_lock
)
554 __acquires(ci
->i_ceph_lock
)
560 const char *name
, *val
;
561 struct ceph_inode_info
*ci
= ceph_inode(inode
);
563 struct ceph_inode_xattr
**xattrs
= NULL
;
567 dout("__build_xattrs() len=%d\n",
568 ci
->i_xattrs
.blob
? (int)ci
->i_xattrs
.blob
->vec
.iov_len
: 0);
570 if (ci
->i_xattrs
.index_version
>= ci
->i_xattrs
.version
)
571 return 0; /* already built */
573 __ceph_destroy_xattrs(ci
);
576 /* updated internal xattr rb tree */
577 if (ci
->i_xattrs
.blob
&& ci
->i_xattrs
.blob
->vec
.iov_len
> 4) {
578 p
= ci
->i_xattrs
.blob
->vec
.iov_base
;
579 end
= p
+ ci
->i_xattrs
.blob
->vec
.iov_len
;
580 ceph_decode_32_safe(&p
, end
, numattr
, bad
);
581 xattr_version
= ci
->i_xattrs
.version
;
582 spin_unlock(&ci
->i_ceph_lock
);
584 xattrs
= kcalloc(numattr
, sizeof(struct ceph_inode_xattr
*),
590 for (i
= 0; i
< numattr
; i
++) {
591 xattrs
[i
] = kmalloc(sizeof(struct ceph_inode_xattr
),
597 spin_lock(&ci
->i_ceph_lock
);
598 if (ci
->i_xattrs
.version
!= xattr_version
) {
599 /* lost a race, retry */
600 for (i
= 0; i
< numattr
; i
++)
608 ceph_decode_32_safe(&p
, end
, len
, bad
);
612 ceph_decode_32_safe(&p
, end
, len
, bad
);
616 err
= __set_xattr(ci
, name
, namelen
, val
, len
,
617 0, 0, &xattrs
[numattr
]);
624 ci
->i_xattrs
.index_version
= ci
->i_xattrs
.version
;
625 ci
->i_xattrs
.dirty
= false;
629 spin_lock(&ci
->i_ceph_lock
);
632 for (i
= 0; i
< numattr
; i
++)
636 ci
->i_xattrs
.names_size
= 0;
640 static int __get_required_blob_size(struct ceph_inode_info
*ci
, int name_size
,
644 * 4 bytes for the length, and additional 4 bytes per each xattr name,
645 * 4 bytes per each value
647 int size
= 4 + ci
->i_xattrs
.count
*(4 + 4) +
648 ci
->i_xattrs
.names_size
+
649 ci
->i_xattrs
.vals_size
;
650 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
651 ci
->i_xattrs
.count
, ci
->i_xattrs
.names_size
,
652 ci
->i_xattrs
.vals_size
);
655 size
+= 4 + 4 + name_size
+ val_size
;
661 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
662 * and swap into place.
664 void __ceph_build_xattrs_blob(struct ceph_inode_info
*ci
)
667 struct ceph_inode_xattr
*xattr
= NULL
;
670 dout("__build_xattrs_blob %p\n", &ci
->vfs_inode
);
671 if (ci
->i_xattrs
.dirty
) {
672 int need
= __get_required_blob_size(ci
, 0, 0);
674 BUG_ON(need
> ci
->i_xattrs
.prealloc_blob
->alloc_len
);
676 p
= rb_first(&ci
->i_xattrs
.index
);
677 dest
= ci
->i_xattrs
.prealloc_blob
->vec
.iov_base
;
679 ceph_encode_32(&dest
, ci
->i_xattrs
.count
);
681 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
683 ceph_encode_32(&dest
, xattr
->name_len
);
684 memcpy(dest
, xattr
->name
, xattr
->name_len
);
685 dest
+= xattr
->name_len
;
686 ceph_encode_32(&dest
, xattr
->val_len
);
687 memcpy(dest
, xattr
->val
, xattr
->val_len
);
688 dest
+= xattr
->val_len
;
693 /* adjust buffer len; it may be larger than we need */
694 ci
->i_xattrs
.prealloc_blob
->vec
.iov_len
=
695 dest
- ci
->i_xattrs
.prealloc_blob
->vec
.iov_base
;
697 if (ci
->i_xattrs
.blob
)
698 ceph_buffer_put(ci
->i_xattrs
.blob
);
699 ci
->i_xattrs
.blob
= ci
->i_xattrs
.prealloc_blob
;
700 ci
->i_xattrs
.prealloc_blob
= NULL
;
701 ci
->i_xattrs
.dirty
= false;
702 ci
->i_xattrs
.version
++;
706 static inline int __get_request_mask(struct inode
*in
) {
707 struct ceph_mds_request
*req
= current
->journal_info
;
709 if (req
&& req
->r_target_inode
== in
) {
710 if (req
->r_op
== CEPH_MDS_OP_LOOKUP
||
711 req
->r_op
== CEPH_MDS_OP_LOOKUPINO
||
712 req
->r_op
== CEPH_MDS_OP_LOOKUPPARENT
||
713 req
->r_op
== CEPH_MDS_OP_GETATTR
) {
714 mask
= le32_to_cpu(req
->r_args
.getattr
.mask
);
715 } else if (req
->r_op
== CEPH_MDS_OP_OPEN
||
716 req
->r_op
== CEPH_MDS_OP_CREATE
) {
717 mask
= le32_to_cpu(req
->r_args
.open
.mask
);
723 ssize_t
__ceph_getxattr(struct inode
*inode
, const char *name
, void *value
,
726 struct ceph_inode_info
*ci
= ceph_inode(inode
);
727 struct ceph_inode_xattr
*xattr
;
728 struct ceph_vxattr
*vxattr
= NULL
;
732 /* let's see if a virtual xattr was requested */
733 vxattr
= ceph_match_vxattr(inode
, name
);
736 if (!(vxattr
->exists_cb
&& !vxattr
->exists_cb(ci
)))
737 err
= vxattr
->getxattr_cb(ci
, value
, size
);
741 req_mask
= __get_request_mask(inode
);
743 spin_lock(&ci
->i_ceph_lock
);
744 dout("getxattr %p ver=%lld index_ver=%lld\n", inode
,
745 ci
->i_xattrs
.version
, ci
->i_xattrs
.index_version
);
747 if (ci
->i_xattrs
.version
== 0 ||
748 !((req_mask
& CEPH_CAP_XATTR_SHARED
) ||
749 __ceph_caps_issued_mask(ci
, CEPH_CAP_XATTR_SHARED
, 1))) {
750 spin_unlock(&ci
->i_ceph_lock
);
752 /* security module gets xattr while filling trace */
753 if (current
->journal_info
!= NULL
) {
754 pr_warn_ratelimited("sync getxattr %p "
755 "during filling trace\n", inode
);
759 /* get xattrs from mds (if we don't already have them) */
760 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_XATTR
, true);
763 spin_lock(&ci
->i_ceph_lock
);
766 err
= __build_xattrs(inode
);
770 err
= -ENODATA
; /* == ENOATTR */
771 xattr
= __get_xattr(ci
, name
);
776 if (size
&& size
< xattr
->val_len
)
779 err
= xattr
->val_len
;
783 memcpy(value
, xattr
->val
, xattr
->val_len
);
785 if (current
->journal_info
!= NULL
&&
786 !strncmp(name
, XATTR_SECURITY_PREFIX
, XATTR_SECURITY_PREFIX_LEN
))
787 ci
->i_ceph_flags
|= CEPH_I_SEC_INITED
;
789 spin_unlock(&ci
->i_ceph_lock
);
793 ssize_t
ceph_listxattr(struct dentry
*dentry
, char *names
, size_t size
)
795 struct inode
*inode
= d_inode(dentry
);
796 struct ceph_inode_info
*ci
= ceph_inode(inode
);
797 struct ceph_vxattr
*vxattrs
= ceph_inode_vxattrs(inode
);
804 spin_lock(&ci
->i_ceph_lock
);
805 dout("listxattr %p ver=%lld index_ver=%lld\n", inode
,
806 ci
->i_xattrs
.version
, ci
->i_xattrs
.index_version
);
808 if (ci
->i_xattrs
.version
== 0 ||
809 !__ceph_caps_issued_mask(ci
, CEPH_CAP_XATTR_SHARED
, 1)) {
810 spin_unlock(&ci
->i_ceph_lock
);
811 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_XATTR
, true);
814 spin_lock(&ci
->i_ceph_lock
);
817 err
= __build_xattrs(inode
);
821 * Start with virtual dir xattr names (if any) (including
822 * terminating '\0' characters for each).
824 vir_namelen
= ceph_vxattrs_name_size(vxattrs
);
826 /* adding 1 byte per each variable due to the null termination */
827 namelen
= ci
->i_xattrs
.names_size
+ ci
->i_xattrs
.count
;
829 if (size
&& vir_namelen
+ namelen
> size
)
832 err
= namelen
+ vir_namelen
;
836 names
= __copy_xattr_names(ci
, names
);
838 /* virtual xattr names, too */
841 for (i
= 0; vxattrs
[i
].name
; i
++) {
842 if (!vxattrs
[i
].hidden
&&
843 !(vxattrs
[i
].exists_cb
&&
844 !vxattrs
[i
].exists_cb(ci
))) {
845 len
= sprintf(names
, "%s", vxattrs
[i
].name
);
853 spin_unlock(&ci
->i_ceph_lock
);
857 static int ceph_sync_setxattr(struct inode
*inode
, const char *name
,
858 const char *value
, size_t size
, int flags
)
860 struct ceph_fs_client
*fsc
= ceph_sb_to_client(inode
->i_sb
);
861 struct ceph_inode_info
*ci
= ceph_inode(inode
);
862 struct ceph_mds_request
*req
;
863 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
864 struct ceph_pagelist
*pagelist
= NULL
;
868 /* copy value into pagelist */
869 pagelist
= kmalloc(sizeof(*pagelist
), GFP_NOFS
);
873 ceph_pagelist_init(pagelist
);
874 err
= ceph_pagelist_append(pagelist
, value
, size
);
878 flags
|= CEPH_XATTR_REMOVE
;
881 dout("setxattr value=%.*s\n", (int)size
, value
);
884 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_SETXATTR
,
891 req
->r_args
.setxattr
.flags
= cpu_to_le32(flags
);
892 req
->r_path2
= kstrdup(name
, GFP_NOFS
);
894 ceph_mdsc_put_request(req
);
899 req
->r_pagelist
= pagelist
;
902 req
->r_inode
= inode
;
905 req
->r_inode_drop
= CEPH_CAP_XATTR_SHARED
;
907 dout("xattr.ver (before): %lld\n", ci
->i_xattrs
.version
);
908 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
909 ceph_mdsc_put_request(req
);
910 dout("xattr.ver (after): %lld\n", ci
->i_xattrs
.version
);
914 ceph_pagelist_release(pagelist
);
918 int __ceph_setxattr(struct inode
*inode
, const char *name
,
919 const void *value
, size_t size
, int flags
)
921 struct ceph_vxattr
*vxattr
;
922 struct ceph_inode_info
*ci
= ceph_inode(inode
);
923 struct ceph_mds_client
*mdsc
= ceph_sb_to_client(inode
->i_sb
)->mdsc
;
924 struct ceph_cap_flush
*prealloc_cf
= NULL
;
928 int name_len
= strlen(name
);
930 char *newname
= NULL
;
932 struct ceph_inode_xattr
*xattr
= NULL
;
933 int required_blob_size
;
934 bool lock_snap_rwsem
= false;
936 if (ceph_snap(inode
) != CEPH_NOSNAP
)
939 vxattr
= ceph_match_vxattr(inode
, name
);
940 if (vxattr
&& vxattr
->readonly
)
943 /* pass any unhandled ceph.* xattrs through to the MDS */
944 if (!strncmp(name
, XATTR_CEPH_PREFIX
, XATTR_CEPH_PREFIX_LEN
))
945 goto do_sync_unlocked
;
947 /* preallocate memory for xattr name, value, index node */
949 newname
= kmemdup(name
, name_len
+ 1, GFP_NOFS
);
954 newval
= kmemdup(value
, val_len
, GFP_NOFS
);
959 xattr
= kmalloc(sizeof(struct ceph_inode_xattr
), GFP_NOFS
);
963 prealloc_cf
= ceph_alloc_cap_flush();
967 spin_lock(&ci
->i_ceph_lock
);
969 issued
= __ceph_caps_issued(ci
, NULL
);
970 if (ci
->i_xattrs
.version
== 0 || !(issued
& CEPH_CAP_XATTR_EXCL
))
973 if (!lock_snap_rwsem
&& !ci
->i_head_snapc
) {
974 lock_snap_rwsem
= true;
975 if (!down_read_trylock(&mdsc
->snap_rwsem
)) {
976 spin_unlock(&ci
->i_ceph_lock
);
977 down_read(&mdsc
->snap_rwsem
);
978 spin_lock(&ci
->i_ceph_lock
);
983 dout("setxattr %p issued %s\n", inode
, ceph_cap_string(issued
));
984 __build_xattrs(inode
);
986 required_blob_size
= __get_required_blob_size(ci
, name_len
, val_len
);
988 if (!ci
->i_xattrs
.prealloc_blob
||
989 required_blob_size
> ci
->i_xattrs
.prealloc_blob
->alloc_len
) {
990 struct ceph_buffer
*blob
;
992 spin_unlock(&ci
->i_ceph_lock
);
993 dout(" preaallocating new blob size=%d\n", required_blob_size
);
994 blob
= ceph_buffer_new(required_blob_size
, GFP_NOFS
);
996 goto do_sync_unlocked
;
997 spin_lock(&ci
->i_ceph_lock
);
998 if (ci
->i_xattrs
.prealloc_blob
)
999 ceph_buffer_put(ci
->i_xattrs
.prealloc_blob
);
1000 ci
->i_xattrs
.prealloc_blob
= blob
;
1004 err
= __set_xattr(ci
, newname
, name_len
, newval
, val_len
,
1005 flags
, value
? 1 : -1, &xattr
);
1008 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_XATTR_EXCL
,
1010 ci
->i_xattrs
.dirty
= true;
1011 inode
->i_ctime
= current_fs_time(inode
->i_sb
);
1014 spin_unlock(&ci
->i_ceph_lock
);
1015 if (lock_snap_rwsem
)
1016 up_read(&mdsc
->snap_rwsem
);
1018 __mark_inode_dirty(inode
, dirty
);
1019 ceph_free_cap_flush(prealloc_cf
);
1023 spin_unlock(&ci
->i_ceph_lock
);
1025 if (lock_snap_rwsem
)
1026 up_read(&mdsc
->snap_rwsem
);
1028 /* security module set xattr while filling trace */
1029 if (current
->journal_info
!= NULL
) {
1030 pr_warn_ratelimited("sync setxattr %p "
1031 "during filling trace\n", inode
);
1034 err
= ceph_sync_setxattr(inode
, name
, value
, size
, flags
);
1037 ceph_free_cap_flush(prealloc_cf
);
1044 static int ceph_get_xattr_handler(const struct xattr_handler
*handler
,
1045 struct dentry
*dentry
, struct inode
*inode
,
1046 const char *name
, void *value
, size_t size
)
1048 if (!ceph_is_valid_xattr(name
))
1050 return __ceph_getxattr(inode
, name
, value
, size
);
1053 static int ceph_set_xattr_handler(const struct xattr_handler
*handler
,
1054 struct dentry
*unused
, struct inode
*inode
,
1055 const char *name
, const void *value
,
1056 size_t size
, int flags
)
1058 if (!ceph_is_valid_xattr(name
))
1060 return __ceph_setxattr(inode
, name
, value
, size
, flags
);
1063 const struct xattr_handler ceph_other_xattr_handler
= {
1064 .prefix
= "", /* match any name => handlers called with full name */
1065 .get
= ceph_get_xattr_handler
,
1066 .set
= ceph_set_xattr_handler
,
1069 #ifdef CONFIG_SECURITY
1070 bool ceph_security_xattr_wanted(struct inode
*in
)
1072 return in
->i_security
!= NULL
;
1075 bool ceph_security_xattr_deadlock(struct inode
*in
)
1077 struct ceph_inode_info
*ci
;
1079 if (in
->i_security
== NULL
)
1081 ci
= ceph_inode(in
);
1082 spin_lock(&ci
->i_ceph_lock
);
1083 ret
= !(ci
->i_ceph_flags
& CEPH_I_SEC_INITED
) &&
1084 !(ci
->i_xattrs
.version
> 0 &&
1085 __ceph_caps_issued_mask(ci
, CEPH_CAP_XATTR_SHARED
, 0));
1086 spin_unlock(&ci
->i_ceph_lock
);