2 * Copyright 2012 Xyratex Technology Limited
4 * Author: Andrew Perepechko <Andrew_Perepechko@xyratex.com>
8 #define DEBUG_SUBSYSTEM S_LLITE
11 #include <linux/sched.h>
13 #include "../include/obd_support.h"
14 #include "../include/lustre_lite.h"
15 #include "../include/lustre_dlm.h"
16 #include "../include/lustre_ver.h"
17 #include "llite_internal.h"
19 /* If we ever have hundreds of extended attributes, we might want to consider
20 * using a hash or a tree structure instead of list for faster lookups.
22 struct ll_xattr_entry
{
23 struct list_head xe_list
; /* protected with
24 * lli_xattrs_list_rwsem */
25 char *xe_name
; /* xattr name, \0-terminated */
26 char *xe_value
; /* xattr value */
27 unsigned xe_namelen
; /* strlen(xe_name) + 1 */
28 unsigned xe_vallen
; /* xattr value length */
31 static struct kmem_cache
*xattr_kmem
;
32 static struct lu_kmem_descr xattr_caches
[] = {
34 .ckd_cache
= &xattr_kmem
,
35 .ckd_name
= "xattr_kmem",
36 .ckd_size
= sizeof(struct ll_xattr_entry
)
43 int ll_xattr_init(void)
45 return lu_kmem_init(xattr_caches
);
48 void ll_xattr_fini(void)
50 lu_kmem_fini(xattr_caches
);
54 * Initializes xattr cache for an inode.
56 * This initializes the xattr list and marks cache presence.
58 static void ll_xattr_cache_init(struct ll_inode_info
*lli
)
64 INIT_LIST_HEAD(&lli
->lli_xattrs
);
65 lli
->lli_flags
|= LLIF_XATTR_CACHE
;
69 * This looks for a specific extended attribute.
71 * Find in @cache and return @xattr_name attribute in @xattr,
72 * for the NULL @xattr_name return the first cached @xattr.
75 * \retval -ENODATA if not found
77 static int ll_xattr_cache_find(struct list_head
*cache
,
78 const char *xattr_name
,
79 struct ll_xattr_entry
**xattr
)
81 struct ll_xattr_entry
*entry
;
85 list_for_each_entry(entry
, cache
, xe_list
) {
86 /* xattr_name == NULL means look for any entry */
87 if (xattr_name
== NULL
||
88 strcmp(xattr_name
, entry
->xe_name
) == 0) {
90 CDEBUG(D_CACHE
, "find: [%s]=%.*s\n",
91 entry
->xe_name
, entry
->xe_vallen
,
101 * This adds an xattr.
103 * Add @xattr_name attr with @xattr_val value and @xattr_val_len length,
106 * \retval -ENOMEM if no memory could be allocated for the cached attr
107 * \retval -EPROTO if duplicate xattr is being added
109 static int ll_xattr_cache_add(struct list_head
*cache
,
110 const char *xattr_name
,
111 const char *xattr_val
,
112 unsigned xattr_val_len
)
114 struct ll_xattr_entry
*xattr
;
118 if (ll_xattr_cache_find(cache
, xattr_name
, &xattr
) == 0) {
119 CDEBUG(D_CACHE
, "duplicate xattr: [%s]\n", xattr_name
);
123 OBD_SLAB_ALLOC_PTR_GFP(xattr
, xattr_kmem
, GFP_NOFS
);
125 CDEBUG(D_CACHE
, "failed to allocate xattr\n");
129 xattr
->xe_namelen
= strlen(xattr_name
) + 1;
131 OBD_ALLOC(xattr
->xe_name
, xattr
->xe_namelen
);
132 if (!xattr
->xe_name
) {
133 CDEBUG(D_CACHE
, "failed to alloc xattr name %u\n",
137 OBD_ALLOC(xattr
->xe_value
, xattr_val_len
);
138 if (!xattr
->xe_value
) {
139 CDEBUG(D_CACHE
, "failed to alloc xattr value %d\n",
144 memcpy(xattr
->xe_name
, xattr_name
, xattr
->xe_namelen
);
145 memcpy(xattr
->xe_value
, xattr_val
, xattr_val_len
);
146 xattr
->xe_vallen
= xattr_val_len
;
147 list_add(&xattr
->xe_list
, cache
);
149 CDEBUG(D_CACHE
, "set: [%s]=%.*s\n", xattr_name
,
150 xattr_val_len
, xattr_val
);
154 OBD_FREE(xattr
->xe_name
, xattr
->xe_namelen
);
156 OBD_SLAB_FREE_PTR(xattr
, xattr_kmem
);
162 * This removes an extended attribute from cache.
164 * Remove @xattr_name attribute from @cache.
167 * \retval -ENODATA if @xattr_name is not cached
169 static int ll_xattr_cache_del(struct list_head
*cache
,
170 const char *xattr_name
)
172 struct ll_xattr_entry
*xattr
;
176 CDEBUG(D_CACHE
, "del xattr: %s\n", xattr_name
);
178 if (ll_xattr_cache_find(cache
, xattr_name
, &xattr
) == 0) {
179 list_del(&xattr
->xe_list
);
180 OBD_FREE(xattr
->xe_name
, xattr
->xe_namelen
);
181 OBD_FREE(xattr
->xe_value
, xattr
->xe_vallen
);
182 OBD_SLAB_FREE_PTR(xattr
, xattr_kmem
);
191 * This iterates cached extended attributes.
193 * Walk over cached attributes in @cache and
194 * fill in @xld_buffer or only calculate buffer
195 * size if @xld_buffer is NULL.
197 * \retval >= 0 buffer list size
198 * \retval -ENODATA if the list cannot fit @xld_size buffer
200 static int ll_xattr_cache_list(struct list_head
*cache
,
204 struct ll_xattr_entry
*xattr
, *tmp
;
209 list_for_each_entry_safe(xattr
, tmp
, cache
, xe_list
) {
210 CDEBUG(D_CACHE
, "list: buffer=%p[%d] name=%s\n",
211 xld_buffer
, xld_tail
, xattr
->xe_name
);
214 xld_size
-= xattr
->xe_namelen
;
217 memcpy(&xld_buffer
[xld_tail
],
218 xattr
->xe_name
, xattr
->xe_namelen
);
220 xld_tail
+= xattr
->xe_namelen
;
230 * Check if the xattr cache is initialized (filled).
232 * \retval 0 @cache is not initialized
233 * \retval 1 @cache is initialized
235 static int ll_xattr_cache_valid(struct ll_inode_info
*lli
)
237 return !!(lli
->lli_flags
& LLIF_XATTR_CACHE
);
241 * This finalizes the xattr cache.
243 * Free all xattr memory. @lli is the inode info pointer.
245 * \retval 0 no error occurred
247 static int ll_xattr_cache_destroy_locked(struct ll_inode_info
*lli
)
251 if (!ll_xattr_cache_valid(lli
))
254 while (ll_xattr_cache_del(&lli
->lli_xattrs
, NULL
) == 0)
256 lli
->lli_flags
&= ~LLIF_XATTR_CACHE
;
261 int ll_xattr_cache_destroy(struct inode
*inode
)
263 struct ll_inode_info
*lli
= ll_i2info(inode
);
268 down_write(&lli
->lli_xattrs_list_rwsem
);
269 rc
= ll_xattr_cache_destroy_locked(lli
);
270 up_write(&lli
->lli_xattrs_list_rwsem
);
276 * Match or enqueue a PR lock.
278 * Find or request an LDLM lock with xattr data.
279 * Since LDLM does not provide API for atomic match_or_enqueue,
280 * the function handles it with a separate enq lock.
281 * If successful, the function exits with the list lock held.
283 * \retval 0 no error occurred
284 * \retval -ENOMEM not enough memory
286 static int ll_xattr_find_get_lock(struct inode
*inode
,
287 struct lookup_intent
*oit
,
288 struct ptlrpc_request
**req
)
291 struct lustre_handle lockh
= { 0 };
292 struct md_op_data
*op_data
;
293 struct ll_inode_info
*lli
= ll_i2info(inode
);
294 struct ldlm_enqueue_info einfo
= { .ei_type
= LDLM_IBITS
,
295 .ei_mode
= it_to_lock_mode(oit
),
296 .ei_cb_bl
= ll_md_blocking_ast
,
297 .ei_cb_cp
= ldlm_completion_ast
};
298 struct ll_sb_info
*sbi
= ll_i2sbi(inode
);
299 struct obd_export
*exp
= sbi
->ll_md_exp
;
304 mutex_lock(&lli
->lli_xattrs_enq_lock
);
305 /* Try matching first. */
306 mode
= ll_take_md_lock(inode
, MDS_INODELOCK_XATTR
, &lockh
, 0, LCK_PR
);
308 /* fake oit in mdc_revalidate_lock() manner */
309 oit
->d
.lustre
.it_lock_handle
= lockh
.cookie
;
310 oit
->d
.lustre
.it_lock_mode
= mode
;
314 /* Enqueue if the lock isn't cached locally. */
315 op_data
= ll_prep_md_op_data(NULL
, inode
, NULL
, NULL
, 0, 0,
316 LUSTRE_OPC_ANY
, NULL
);
317 if (IS_ERR(op_data
)) {
318 mutex_unlock(&lli
->lli_xattrs_enq_lock
);
319 return PTR_ERR(op_data
);
322 op_data
->op_valid
= OBD_MD_FLXATTR
| OBD_MD_FLXATTRLS
;
324 rc
= md_enqueue(exp
, &einfo
, oit
, op_data
, &lockh
, NULL
, 0, NULL
, 0);
325 ll_finish_md_op_data(op_data
);
329 "md_intent_lock failed with %d for fid "DFID
"\n",
330 rc
, PFID(ll_inode2fid(inode
)));
331 mutex_unlock(&lli
->lli_xattrs_enq_lock
);
335 *req
= (struct ptlrpc_request
*)oit
->d
.lustre
.it_data
;
337 down_write(&lli
->lli_xattrs_list_rwsem
);
338 mutex_unlock(&lli
->lli_xattrs_enq_lock
);
344 * Refill the xattr cache.
346 * Fetch and cache the whole of xattrs for @inode, acquiring
347 * a read or a write xattr lock depending on operation in @oit.
348 * Intent is dropped on exit unless the operation is setxattr.
350 * \retval 0 no error occurred
351 * \retval -EPROTO network protocol error
352 * \retval -ENOMEM not enough memory for the cache
354 static int ll_xattr_cache_refill(struct inode
*inode
, struct lookup_intent
*oit
)
356 struct ll_sb_info
*sbi
= ll_i2sbi(inode
);
357 struct ptlrpc_request
*req
= NULL
;
358 const char *xdata
, *xval
, *xtail
, *xvtail
;
359 struct ll_inode_info
*lli
= ll_i2info(inode
);
360 struct mdt_body
*body
;
366 rc
= ll_xattr_find_get_lock(inode
, oit
, &req
);
370 /* Do we have the data at this point? */
371 if (ll_xattr_cache_valid(lli
)) {
372 ll_stats_ops_tally(sbi
, LPROC_LL_GETXATTR_HITS
, 1);
377 /* Matched but no cache? Cancelled on error by a parallel refill. */
378 if (unlikely(req
== NULL
)) {
379 CDEBUG(D_CACHE
, "cancelled by a parallel getxattr\n");
384 if (oit
->d
.lustre
.it_status
< 0) {
385 CDEBUG(D_CACHE
, "getxattr intent returned %d for fid "DFID
"\n",
386 oit
->d
.lustre
.it_status
, PFID(ll_inode2fid(inode
)));
387 rc
= oit
->d
.lustre
.it_status
;
388 /* xattr data is so large that we don't want to cache it */
394 body
= req_capsule_server_get(&req
->rq_pill
, &RMF_MDT_BODY
);
396 CERROR("no MDT BODY in the refill xattr reply\n");
400 /* do not need swab xattr data */
401 xdata
= req_capsule_server_sized_get(&req
->rq_pill
, &RMF_EADATA
,
403 xval
= req_capsule_server_sized_get(&req
->rq_pill
, &RMF_EAVALS
,
405 xsizes
= req_capsule_server_sized_get(&req
->rq_pill
, &RMF_EAVALS_LENS
,
406 body
->max_mdsize
* sizeof(__u32
));
407 if (xdata
== NULL
|| xval
== NULL
|| xsizes
== NULL
) {
408 CERROR("wrong setxattr reply\n");
413 xtail
= xdata
+ body
->eadatasize
;
414 xvtail
= xval
+ body
->aclsize
;
416 CDEBUG(D_CACHE
, "caching: xdata=%p xtail=%p\n", xdata
, xtail
);
418 ll_xattr_cache_init(lli
);
420 for (i
= 0; i
< body
->max_mdsize
; i
++) {
421 CDEBUG(D_CACHE
, "caching [%s]=%.*s\n", xdata
, *xsizes
, xval
);
422 /* Perform consistency checks: attr names and vals in pill */
423 if (memchr(xdata
, 0, xtail
- xdata
) == NULL
) {
424 CERROR("xattr protocol violation (names are broken)\n");
426 } else if (xval
+ *xsizes
> xvtail
) {
427 CERROR("xattr protocol violation (vals are broken)\n");
429 } else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM
)) {
431 } else if (!strcmp(xdata
, XATTR_NAME_ACL_ACCESS
)) {
432 /* Filter out ACL ACCESS since it's cached separately */
433 CDEBUG(D_CACHE
, "not caching %s\n",
434 XATTR_NAME_ACL_ACCESS
);
437 rc
= ll_xattr_cache_add(&lli
->lli_xattrs
, xdata
, xval
,
441 ll_xattr_cache_destroy_locked(lli
);
444 xdata
+= strlen(xdata
) + 1;
449 if (xdata
!= xtail
|| xval
!= xvtail
)
450 CERROR("a hole in xattr data\n");
452 ll_set_lock_data(sbi
->ll_md_exp
, inode
, oit
, NULL
);
457 ll_intent_drop_lock(oit
);
460 up_write(&lli
->lli_xattrs_list_rwsem
);
462 ptlrpc_req_finished(req
);
467 up_write(&lli
->lli_xattrs_list_rwsem
);
469 ldlm_lock_decref_and_cancel((struct lustre_handle
*)
470 &oit
->d
.lustre
.it_lock_handle
,
471 oit
->d
.lustre
.it_lock_mode
);
477 * Get an xattr value or list xattrs using the write-through cache.
479 * Get the xattr value (@valid has OBD_MD_FLXATTR set) of @name or
480 * list xattr names (@valid has OBD_MD_FLXATTRLS set) for @inode.
481 * The resulting value/list is stored in @buffer if the former
482 * is not larger than @size.
484 * \retval 0 no error occurred
485 * \retval -EPROTO network protocol error
486 * \retval -ENOMEM not enough memory for the cache
487 * \retval -ERANGE the buffer is not large enough
488 * \retval -ENODATA no such attr or the list is empty
490 int ll_xattr_cache_get(struct inode
*inode
,
496 struct lookup_intent oit
= { .it_op
= IT_GETXATTR
};
497 struct ll_inode_info
*lli
= ll_i2info(inode
);
502 LASSERT(!!(valid
& OBD_MD_FLXATTR
) ^ !!(valid
& OBD_MD_FLXATTRLS
));
504 down_read(&lli
->lli_xattrs_list_rwsem
);
505 if (!ll_xattr_cache_valid(lli
)) {
506 up_read(&lli
->lli_xattrs_list_rwsem
);
507 rc
= ll_xattr_cache_refill(inode
, &oit
);
510 downgrade_write(&lli
->lli_xattrs_list_rwsem
);
512 ll_stats_ops_tally(ll_i2sbi(inode
), LPROC_LL_GETXATTR_HITS
, 1);
515 if (valid
& OBD_MD_FLXATTR
) {
516 struct ll_xattr_entry
*xattr
;
518 rc
= ll_xattr_cache_find(&lli
->lli_xattrs
, name
, &xattr
);
520 rc
= xattr
->xe_vallen
;
521 /* zero size means we are only requested size in rc */
523 if (size
>= xattr
->xe_vallen
)
524 memcpy(buffer
, xattr
->xe_value
,
530 } else if (valid
& OBD_MD_FLXATTRLS
) {
531 rc
= ll_xattr_cache_list(&lli
->lli_xattrs
,
532 size
? buffer
: NULL
, size
);
537 up_read(&lli
->lli_xattrs_list_rwsem
);