2 * Copyright 2012 Xyratex Technology Limited
4 * Author: Andrew Perepechko <Andrew_Perepechko@xyratex.com>
8 #define DEBUG_SUBSYSTEM S_LLITE
11 #include <linux/sched.h>
13 #include "../include/obd_support.h"
14 #include "../include/lustre_lite.h"
15 #include "../include/lustre_dlm.h"
16 #include "../include/lustre_ver.h"
17 #include "llite_internal.h"
19 /* If we ever have hundreds of extended attributes, we might want to consider
20 * using a hash or a tree structure instead of list for faster lookups.
22 struct ll_xattr_entry {
23 struct list_head xe_list; /* protected with
24 * lli_xattrs_list_rwsem */
25 char *xe_name; /* xattr name, \0-terminated */
26 char *xe_value; /* xattr value */
27 unsigned xe_namelen; /* strlen(xe_name) + 1 */
28 unsigned xe_vallen; /* xattr value length */
31 static struct kmem_cache *xattr_kmem;
32 static struct lu_kmem_descr xattr_caches[] = {
34 .ckd_cache = &xattr_kmem,
35 .ckd_name = "xattr_kmem",
36 .ckd_size = sizeof(struct ll_xattr_entry)
43 int ll_xattr_init(void)
45 return lu_kmem_init(xattr_caches);
48 void ll_xattr_fini(void)
50 lu_kmem_fini(xattr_caches);
54 * Initializes xattr cache for an inode.
56 * This initializes the xattr list and marks cache presence.
58 static void ll_xattr_cache_init(struct ll_inode_info *lli)
64 INIT_LIST_HEAD(&lli->lli_xattrs);
65 lli->lli_flags |= LLIF_XATTR_CACHE;
69 * This looks for a specific extended attribute.
71 * Find in @cache and return @xattr_name attribute in @xattr,
72 * for the NULL @xattr_name return the first cached @xattr.
75 * \retval -ENODATA if not found
77 static int ll_xattr_cache_find(struct list_head *cache,
78 const char *xattr_name,
79 struct ll_xattr_entry **xattr)
81 struct ll_xattr_entry *entry;
85 list_for_each_entry(entry, cache, xe_list) {
86 /* xattr_name == NULL means look for any entry */
87 if (xattr_name == NULL ||
88 strcmp(xattr_name, entry->xe_name) == 0) {
90 CDEBUG(D_CACHE, "find: [%s]=%.*s\n",
91 entry->xe_name, entry->xe_vallen,
101 * This adds an xattr.
103 * Add @xattr_name attr with @xattr_val value and @xattr_val_len length,
106 * \retval -ENOMEM if no memory could be allocated for the cached attr
107 * \retval -EPROTO if duplicate xattr is being added
109 static int ll_xattr_cache_add(struct list_head *cache,
110 const char *xattr_name,
111 const char *xattr_val,
112 unsigned xattr_val_len)
114 struct ll_xattr_entry *xattr;
118 if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
119 CDEBUG(D_CACHE, "duplicate xattr: [%s]\n", xattr_name);
123 OBD_SLAB_ALLOC_PTR_GFP(xattr, xattr_kmem, GFP_NOFS);
125 CDEBUG(D_CACHE, "failed to allocate xattr\n");
129 xattr->xe_name = kstrdup(xattr_name, GFP_NOFS);
130 if (!xattr->xe_name) {
131 CDEBUG(D_CACHE, "failed to alloc xattr name %u\n",
135 xattr->xe_value = kmemdup(xattr_val, xattr_val_len, GFP_NOFS);
136 if (!xattr->xe_value)
139 xattr->xe_vallen = xattr_val_len;
140 list_add(&xattr->xe_list, cache);
142 CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name,
143 xattr_val_len, xattr_val);
147 kfree(xattr->xe_name);
149 OBD_SLAB_FREE_PTR(xattr, xattr_kmem);
155 * This removes an extended attribute from cache.
157 * Remove @xattr_name attribute from @cache.
160 * \retval -ENODATA if @xattr_name is not cached
162 static int ll_xattr_cache_del(struct list_head *cache,
163 const char *xattr_name)
165 struct ll_xattr_entry *xattr;
169 CDEBUG(D_CACHE, "del xattr: %s\n", xattr_name);
171 if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
172 list_del(&xattr->xe_list);
173 kfree(xattr->xe_name);
174 kfree(xattr->xe_value);
175 OBD_SLAB_FREE_PTR(xattr, xattr_kmem);
184 * This iterates cached extended attributes.
186 * Walk over cached attributes in @cache and
187 * fill in @xld_buffer or only calculate buffer
188 * size if @xld_buffer is NULL.
190 * \retval >= 0 buffer list size
191 * \retval -ENODATA if the list cannot fit @xld_size buffer
193 static int ll_xattr_cache_list(struct list_head *cache,
197 struct ll_xattr_entry *xattr, *tmp;
202 list_for_each_entry_safe(xattr, tmp, cache, xe_list) {
203 CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n",
204 xld_buffer, xld_tail, xattr->xe_name);
207 xld_size -= xattr->xe_namelen;
210 memcpy(&xld_buffer[xld_tail],
211 xattr->xe_name, xattr->xe_namelen);
213 xld_tail += xattr->xe_namelen;
223 * Check if the xattr cache is initialized (filled).
225 * \retval 0 @cache is not initialized
226 * \retval 1 @cache is initialized
228 static int ll_xattr_cache_valid(struct ll_inode_info *lli)
230 return !!(lli->lli_flags & LLIF_XATTR_CACHE);
234 * This finalizes the xattr cache.
236 * Free all xattr memory. @lli is the inode info pointer.
238 * \retval 0 no error occurred
240 static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
244 if (!ll_xattr_cache_valid(lli))
247 while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
249 lli->lli_flags &= ~LLIF_XATTR_CACHE;
254 int ll_xattr_cache_destroy(struct inode *inode)
256 struct ll_inode_info *lli = ll_i2info(inode);
261 down_write(&lli->lli_xattrs_list_rwsem);
262 rc = ll_xattr_cache_destroy_locked(lli);
263 up_write(&lli->lli_xattrs_list_rwsem);
269 * Match or enqueue a PR lock.
271 * Find or request an LDLM lock with xattr data.
272 * Since LDLM does not provide API for atomic match_or_enqueue,
273 * the function handles it with a separate enq lock.
274 * If successful, the function exits with the list lock held.
276 * \retval 0 no error occurred
277 * \retval -ENOMEM not enough memory
279 static int ll_xattr_find_get_lock(struct inode *inode,
280 struct lookup_intent *oit,
281 struct ptlrpc_request **req)
284 struct lustre_handle lockh = { 0 };
285 struct md_op_data *op_data;
286 struct ll_inode_info *lli = ll_i2info(inode);
287 struct ldlm_enqueue_info einfo = { .ei_type = LDLM_IBITS,
288 .ei_mode = it_to_lock_mode(oit),
289 .ei_cb_bl = ll_md_blocking_ast,
290 .ei_cb_cp = ldlm_completion_ast };
291 struct ll_sb_info *sbi = ll_i2sbi(inode);
292 struct obd_export *exp = sbi->ll_md_exp;
297 mutex_lock(&lli->lli_xattrs_enq_lock);
298 /* inode may have been shrunk and recreated, so data is gone, match lock
299 * only when data exists. */
300 if (ll_xattr_cache_valid(lli)) {
301 /* Try matching first. */
302 mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
305 /* fake oit in mdc_revalidate_lock() manner */
306 oit->d.lustre.it_lock_handle = lockh.cookie;
307 oit->d.lustre.it_lock_mode = mode;
312 /* Enqueue if the lock isn't cached locally. */
313 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
314 LUSTRE_OPC_ANY, NULL);
315 if (IS_ERR(op_data)) {
316 mutex_unlock(&lli->lli_xattrs_enq_lock);
317 return PTR_ERR(op_data);
320 op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS;
322 rc = md_enqueue(exp, &einfo, oit, op_data, &lockh, NULL, 0, NULL, 0);
323 ll_finish_md_op_data(op_data);
327 "md_intent_lock failed with %d for fid "DFID"\n",
328 rc, PFID(ll_inode2fid(inode)));
329 mutex_unlock(&lli->lli_xattrs_enq_lock);
333 *req = (struct ptlrpc_request *)oit->d.lustre.it_data;
335 down_write(&lli->lli_xattrs_list_rwsem);
336 mutex_unlock(&lli->lli_xattrs_enq_lock);
342 * Refill the xattr cache.
344 * Fetch and cache the whole of xattrs for @inode, acquiring
345 * a read or a write xattr lock depending on operation in @oit.
346 * Intent is dropped on exit unless the operation is setxattr.
348 * \retval 0 no error occurred
349 * \retval -EPROTO network protocol error
350 * \retval -ENOMEM not enough memory for the cache
352 static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
354 struct ll_sb_info *sbi = ll_i2sbi(inode);
355 struct ptlrpc_request *req = NULL;
356 const char *xdata, *xval, *xtail, *xvtail;
357 struct ll_inode_info *lli = ll_i2info(inode);
358 struct mdt_body *body;
364 rc = ll_xattr_find_get_lock(inode, oit, &req);
368 /* Do we have the data at this point? */
369 if (ll_xattr_cache_valid(lli)) {
370 ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1);
375 /* Matched but no cache? Cancelled on error by a parallel refill. */
376 if (unlikely(req == NULL)) {
377 CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n");
382 if (oit->d.lustre.it_status < 0) {
383 CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n",
384 oit->d.lustre.it_status, PFID(ll_inode2fid(inode)));
385 rc = oit->d.lustre.it_status;
386 /* xattr data is so large that we don't want to cache it */
392 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
394 CERROR("no MDT BODY in the refill xattr reply\n");
398 /* do not need swab xattr data */
399 xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
401 xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS,
403 xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS,
404 body->max_mdsize * sizeof(__u32));
405 if (xdata == NULL || xval == NULL || xsizes == NULL) {
406 CERROR("wrong setxattr reply\n");
411 xtail = xdata + body->eadatasize;
412 xvtail = xval + body->aclsize;
414 CDEBUG(D_CACHE, "caching: xdata=%p xtail=%p\n", xdata, xtail);
416 ll_xattr_cache_init(lli);
418 for (i = 0; i < body->max_mdsize; i++) {
419 CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval);
420 /* Perform consistency checks: attr names and vals in pill */
421 if (memchr(xdata, 0, xtail - xdata) == NULL) {
422 CERROR("xattr protocol violation (names are broken)\n");
424 } else if (xval + *xsizes > xvtail) {
425 CERROR("xattr protocol violation (vals are broken)\n");
427 } else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM)) {
429 } else if (!strcmp(xdata, XATTR_NAME_ACL_ACCESS)) {
430 /* Filter out ACL ACCESS since it's cached separately */
431 CDEBUG(D_CACHE, "not caching %s\n",
432 XATTR_NAME_ACL_ACCESS);
435 rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
439 ll_xattr_cache_destroy_locked(lli);
442 xdata += strlen(xdata) + 1;
447 if (xdata != xtail || xval != xvtail)
448 CERROR("a hole in xattr data\n");
450 ll_set_lock_data(sbi->ll_md_exp, inode, oit, NULL);
455 ll_intent_drop_lock(oit);
458 up_write(&lli->lli_xattrs_list_rwsem);
460 ptlrpc_req_finished(req);
465 up_write(&lli->lli_xattrs_list_rwsem);
467 ldlm_lock_decref_and_cancel((struct lustre_handle *)
468 &oit->d.lustre.it_lock_handle,
469 oit->d.lustre.it_lock_mode);
475 * Get an xattr value or list xattrs using the write-through cache.
477 * Get the xattr value (@valid has OBD_MD_FLXATTR set) of @name or
478 * list xattr names (@valid has OBD_MD_FLXATTRLS set) for @inode.
479 * The resulting value/list is stored in @buffer if the former
480 * is not larger than @size.
482 * \retval 0 no error occurred
483 * \retval -EPROTO network protocol error
484 * \retval -ENOMEM not enough memory for the cache
485 * \retval -ERANGE the buffer is not large enough
486 * \retval -ENODATA no such attr or the list is empty
488 int ll_xattr_cache_get(struct inode *inode,
494 struct lookup_intent oit = { .it_op = IT_GETXATTR };
495 struct ll_inode_info *lli = ll_i2info(inode);
500 LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRLS));
502 down_read(&lli->lli_xattrs_list_rwsem);
503 if (!ll_xattr_cache_valid(lli)) {
504 up_read(&lli->lli_xattrs_list_rwsem);
505 rc = ll_xattr_cache_refill(inode, &oit);
508 downgrade_write(&lli->lli_xattrs_list_rwsem);
510 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR_HITS, 1);
513 if (valid & OBD_MD_FLXATTR) {
514 struct ll_xattr_entry *xattr;
516 rc = ll_xattr_cache_find(&lli->lli_xattrs, name, &xattr);
518 rc = xattr->xe_vallen;
519 /* zero size means we are only requested size in rc */
521 if (size >= xattr->xe_vallen)
522 memcpy(buffer, xattr->xe_value,
528 } else if (valid & OBD_MD_FLXATTRLS) {
529 rc = ll_xattr_cache_list(&lli->lli_xattrs,
530 size ? buffer : NULL, size);
535 up_read(&lli->lli_xattrs_list_rwsem);