1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
5 #include <linux/slab.h>
6 #include <linux/string.h>
7 #include <linux/uaccess.h>
8 #include <linux/kernel.h>
9 #include <linux/writeback.h>
10 #include <linux/vmalloc.h>
11 #include <linux/xattr.h>
12 #include <linux/posix_acl.h>
13 #include <linux/random.h>
16 #include "mds_client.h"
18 #include <linux/ceph/decode.h>
21 * Ceph inode operations
23 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
24 * setattr, etc.), xattr helpers, and helpers for assimilating
25 * metadata returned by the MDS into our cache.
27 * Also define helpers for doing asynchronous writeback, invalidation,
28 * and truncation for the benefit of those who can't afford to block
29 * (typically because they are in the message handler path).
32 static const struct inode_operations ceph_symlink_iops;
34 static void ceph_invalidate_work(struct work_struct *work);
35 static void ceph_writeback_work(struct work_struct *work);
36 static void ceph_vmtruncate_work(struct work_struct *work);
39 * find or create an inode, given the ceph ino number
41 static int ceph_set_ino_cb(struct inode *inode, void *data)
43 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
44 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
48 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
51 ino_t t = ceph_vino_to_ino(vino);
53 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
55 return ERR_PTR(-ENOMEM);
56 if (inode->i_state & I_NEW) {
57 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
58 inode, ceph_vinop(inode), (u64)inode->i_ino);
59 unlock_new_inode(inode);
62 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
68 * get/constuct snapdir inode for a given directory
70 struct inode *ceph_get_snapdir(struct inode *parent)
72 struct ceph_vino vino = {
73 .ino = ceph_ino(parent),
76 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
77 struct ceph_inode_info *ci = ceph_inode(inode);
79 BUG_ON(!S_ISDIR(parent->i_mode));
82 inode->i_mode = parent->i_mode;
83 inode->i_uid = parent->i_uid;
84 inode->i_gid = parent->i_gid;
85 inode->i_op = &ceph_snapdir_iops;
86 inode->i_fop = &ceph_snapdir_fops;
87 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
92 const struct inode_operations ceph_file_iops = {
93 .permission = ceph_permission,
94 .setattr = ceph_setattr,
95 .getattr = ceph_getattr,
96 .setxattr = generic_setxattr,
97 .getxattr = generic_getxattr,
98 .listxattr = ceph_listxattr,
99 .removexattr = generic_removexattr,
100 .get_acl = ceph_get_acl,
101 .set_acl = ceph_set_acl,
106 * We use a 'frag tree' to keep track of the MDS's directory fragments
107 * for a given inode (usually there is just a single fragment). We
108 * need to know when a child frag is delegated to a new MDS, or when
109 * it is flagged as replicated, so we can direct our requests
114 * find/create a frag in the tree
116 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
120 struct rb_node *parent = NULL;
121 struct ceph_inode_frag *frag;
124 p = &ci->i_fragtree.rb_node;
127 frag = rb_entry(parent, struct ceph_inode_frag, node);
128 c = ceph_frag_compare(f, frag->frag);
137 frag = kmalloc(sizeof(*frag), GFP_NOFS);
139 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
140 "frag %x\n", &ci->vfs_inode,
141 ceph_vinop(&ci->vfs_inode), f);
142 return ERR_PTR(-ENOMEM);
149 rb_link_node(&frag->node, parent, p);
150 rb_insert_color(&frag->node, &ci->i_fragtree);
152 dout("get_or_create_frag added %llx.%llx frag %x\n",
153 ceph_vinop(&ci->vfs_inode), f);
158 * find a specific frag @f
160 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
162 struct rb_node *n = ci->i_fragtree.rb_node;
165 struct ceph_inode_frag *frag =
166 rb_entry(n, struct ceph_inode_frag, node);
167 int c = ceph_frag_compare(f, frag->frag);
179 * Choose frag containing the given value @v. If @pfrag is
180 * specified, copy the frag delegation info to the caller if
183 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
184 struct ceph_inode_frag *pfrag, int *found)
186 u32 t = ceph_frag_make(0, 0);
187 struct ceph_inode_frag *frag;
195 WARN_ON(!ceph_frag_contains_value(t, v));
196 frag = __ceph_find_frag(ci, t);
198 break; /* t is a leaf */
199 if (frag->split_by == 0) {
201 memcpy(pfrag, frag, sizeof(*pfrag));
208 nway = 1 << frag->split_by;
209 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
210 frag->split_by, nway);
211 for (i = 0; i < nway; i++) {
212 n = ceph_frag_make_child(t, frag->split_by, i);
213 if (ceph_frag_contains_value(n, v)) {
220 dout("choose_frag(%x) = %x\n", v, t);
225 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
226 struct ceph_inode_frag *pfrag, int *found)
229 mutex_lock(&ci->i_fragtree_mutex);
230 ret = __ceph_choose_frag(ci, v, pfrag, found);
231 mutex_unlock(&ci->i_fragtree_mutex);
236 * Process dirfrag (delegation) info from the mds. Include leaf
237 * fragment in tree ONLY if ndist > 0. Otherwise, only
238 * branches/splits are included in i_fragtree)
240 static int ceph_fill_dirfrag(struct inode *inode,
241 struct ceph_mds_reply_dirfrag *dirinfo)
243 struct ceph_inode_info *ci = ceph_inode(inode);
244 struct ceph_inode_frag *frag;
245 u32 id = le32_to_cpu(dirinfo->frag);
246 int mds = le32_to_cpu(dirinfo->auth);
247 int ndist = le32_to_cpu(dirinfo->ndist);
252 spin_lock(&ci->i_ceph_lock);
254 diri_auth = ci->i_auth_cap->mds;
255 spin_unlock(&ci->i_ceph_lock);
257 mutex_lock(&ci->i_fragtree_mutex);
258 if (ndist == 0 && mds == diri_auth) {
259 /* no delegation info needed. */
260 frag = __ceph_find_frag(ci, id);
263 if (frag->split_by == 0) {
264 /* tree leaf, remove */
265 dout("fill_dirfrag removed %llx.%llx frag %x"
266 " (no ref)\n", ceph_vinop(inode), id);
267 rb_erase(&frag->node, &ci->i_fragtree);
270 /* tree branch, keep and clear */
271 dout("fill_dirfrag cleared %llx.%llx frag %x"
272 " referral\n", ceph_vinop(inode), id);
280 /* find/add this frag to store mds delegation info */
281 frag = __get_or_create_frag(ci, id);
283 /* this is not the end of the world; we can continue
284 with bad/inaccurate delegation info */
285 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
286 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
292 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
293 for (i = 0; i < frag->ndist; i++)
294 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
295 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
296 ceph_vinop(inode), frag->frag, frag->ndist);
299 mutex_unlock(&ci->i_fragtree_mutex);
303 static int ceph_fill_fragtree(struct inode *inode,
304 struct ceph_frag_tree_head *fragtree,
305 struct ceph_mds_reply_dirfrag *dirinfo)
307 struct ceph_inode_info *ci = ceph_inode(inode);
308 struct ceph_inode_frag *frag;
309 struct rb_node *rb_node;
314 mutex_lock(&ci->i_fragtree_mutex);
315 nsplits = le32_to_cpu(fragtree->nsplits);
317 i = prandom_u32() % nsplits;
318 id = le32_to_cpu(fragtree->splits[i].frag);
319 if (!__ceph_find_frag(ci, id))
321 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
322 rb_node = rb_first(&ci->i_fragtree);
323 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
324 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
327 if (!update && dirinfo) {
328 id = le32_to_cpu(dirinfo->frag);
329 if (id != __ceph_choose_frag(ci, id, NULL, NULL))
335 dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
336 rb_node = rb_first(&ci->i_fragtree);
337 for (i = 0; i < nsplits; i++) {
338 id = le32_to_cpu(fragtree->splits[i].frag);
341 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
342 if (ceph_frag_compare(frag->frag, id) >= 0) {
343 if (frag->frag != id)
346 rb_node = rb_next(rb_node);
349 rb_node = rb_next(rb_node);
350 rb_erase(&frag->node, &ci->i_fragtree);
355 frag = __get_or_create_frag(ci, id);
359 frag->split_by = le32_to_cpu(fragtree->splits[i].by);
360 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
363 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
364 rb_node = rb_next(rb_node);
365 rb_erase(&frag->node, &ci->i_fragtree);
369 mutex_unlock(&ci->i_fragtree_mutex);
374 * initialize a newly allocated inode.
376 struct inode *ceph_alloc_inode(struct super_block *sb)
378 struct ceph_inode_info *ci;
381 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
385 dout("alloc_inode %p\n", &ci->vfs_inode);
387 spin_lock_init(&ci->i_ceph_lock);
390 ci->i_inline_version = 0;
391 ci->i_time_warp_seq = 0;
392 ci->i_ceph_flags = 0;
393 atomic64_set(&ci->i_ordered_count, 1);
394 atomic64_set(&ci->i_release_count, 1);
395 atomic64_set(&ci->i_complete_seq[0], 0);
396 atomic64_set(&ci->i_complete_seq[1], 0);
397 ci->i_symlink = NULL;
399 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
400 ci->i_pool_ns_len = 0;
402 ci->i_fragtree = RB_ROOT;
403 mutex_init(&ci->i_fragtree_mutex);
405 ci->i_xattrs.blob = NULL;
406 ci->i_xattrs.prealloc_blob = NULL;
407 ci->i_xattrs.dirty = false;
408 ci->i_xattrs.index = RB_ROOT;
409 ci->i_xattrs.count = 0;
410 ci->i_xattrs.names_size = 0;
411 ci->i_xattrs.vals_size = 0;
412 ci->i_xattrs.version = 0;
413 ci->i_xattrs.index_version = 0;
415 ci->i_caps = RB_ROOT;
416 ci->i_auth_cap = NULL;
417 ci->i_dirty_caps = 0;
418 ci->i_flushing_caps = 0;
419 INIT_LIST_HEAD(&ci->i_dirty_item);
420 INIT_LIST_HEAD(&ci->i_flushing_item);
421 ci->i_prealloc_cap_flush = NULL;
422 ci->i_cap_flush_tree = RB_ROOT;
423 init_waitqueue_head(&ci->i_cap_wq);
424 ci->i_hold_caps_min = 0;
425 ci->i_hold_caps_max = 0;
426 INIT_LIST_HEAD(&ci->i_cap_delay_list);
427 INIT_LIST_HEAD(&ci->i_cap_snaps);
428 ci->i_head_snapc = NULL;
431 for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
432 ci->i_nr_by_mode[i] = 0;
434 mutex_init(&ci->i_truncate_mutex);
435 ci->i_truncate_seq = 0;
436 ci->i_truncate_size = 0;
437 ci->i_truncate_pending = 0;
440 ci->i_reported_size = 0;
441 ci->i_wanted_max_size = 0;
442 ci->i_requested_max_size = 0;
446 ci->i_rdcache_ref = 0;
449 ci->i_wrbuffer_ref = 0;
450 ci->i_wrbuffer_ref_head = 0;
451 ci->i_shared_gen = 0;
452 ci->i_rdcache_gen = 0;
453 ci->i_rdcache_revoking = 0;
455 INIT_LIST_HEAD(&ci->i_unsafe_writes);
456 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
457 INIT_LIST_HEAD(&ci->i_unsafe_iops);
458 spin_lock_init(&ci->i_unsafe_lock);
460 ci->i_snap_realm = NULL;
461 INIT_LIST_HEAD(&ci->i_snap_realm_item);
462 INIT_LIST_HEAD(&ci->i_snap_flush_item);
464 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
465 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
467 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
469 ceph_fscache_inode_init(ci);
471 return &ci->vfs_inode;
474 static void ceph_i_callback(struct rcu_head *head)
476 struct inode *inode = container_of(head, struct inode, i_rcu);
477 struct ceph_inode_info *ci = ceph_inode(inode);
479 kmem_cache_free(ceph_inode_cachep, ci);
482 void ceph_destroy_inode(struct inode *inode)
484 struct ceph_inode_info *ci = ceph_inode(inode);
485 struct ceph_inode_frag *frag;
488 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
490 ceph_fscache_unregister_inode_cookie(ci);
492 ceph_queue_caps_release(inode);
495 * we may still have a snap_realm reference if there are stray
496 * caps in i_snap_caps.
498 if (ci->i_snap_realm) {
499 struct ceph_mds_client *mdsc =
500 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
501 struct ceph_snap_realm *realm = ci->i_snap_realm;
503 dout(" dropping residual ref to snap realm %p\n", realm);
504 spin_lock(&realm->inodes_with_caps_lock);
505 list_del_init(&ci->i_snap_realm_item);
506 spin_unlock(&realm->inodes_with_caps_lock);
507 ceph_put_snap_realm(mdsc, realm);
510 kfree(ci->i_symlink);
511 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
512 frag = rb_entry(n, struct ceph_inode_frag, node);
513 rb_erase(n, &ci->i_fragtree);
517 __ceph_destroy_xattrs(ci);
518 if (ci->i_xattrs.blob)
519 ceph_buffer_put(ci->i_xattrs.blob);
520 if (ci->i_xattrs.prealloc_blob)
521 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
523 call_rcu(&inode->i_rcu, ceph_i_callback);
526 int ceph_drop_inode(struct inode *inode)
529 * Positve dentry and corresponding inode are always accompanied
530 * in MDS reply. So no need to keep inode in the cache after
531 * dropping all its aliases.
537 * Helpers to fill in size, ctime, mtime, and atime. We have to be
538 * careful because either the client or MDS may have more up to date
539 * info, depending on which capabilities are held, and whether
540 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
541 * and size are monotonically increasing, except when utimes() or
542 * truncate() increments the corresponding _seq values.)
544 int ceph_fill_file_size(struct inode *inode, int issued,
545 u32 truncate_seq, u64 truncate_size, u64 size)
547 struct ceph_inode_info *ci = ceph_inode(inode);
550 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
551 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
552 dout("size %lld -> %llu\n", inode->i_size, size);
553 if (size > 0 && S_ISDIR(inode->i_mode)) {
554 pr_err("fill_file_size non-zero size for directory\n");
557 i_size_write(inode, size);
558 inode->i_blocks = (size + (1<<9) - 1) >> 9;
559 ci->i_reported_size = size;
560 if (truncate_seq != ci->i_truncate_seq) {
561 dout("truncate_seq %u -> %u\n",
562 ci->i_truncate_seq, truncate_seq);
563 ci->i_truncate_seq = truncate_seq;
565 /* the MDS should have revoked these caps */
566 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
569 CEPH_CAP_FILE_LAZYIO));
571 * If we hold relevant caps, or in the case where we're
572 * not the only client referencing this file and we
573 * don't hold those caps, then we need to check whether
574 * the file is either opened or mmaped
576 if ((issued & (CEPH_CAP_FILE_CACHE|
577 CEPH_CAP_FILE_BUFFER)) ||
578 mapping_mapped(inode->i_mapping) ||
579 __ceph_caps_file_wanted(ci)) {
580 ci->i_truncate_pending++;
585 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
586 ci->i_truncate_size != truncate_size) {
587 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
589 ci->i_truncate_size = truncate_size;
593 ceph_fscache_invalidate(inode);
598 void ceph_fill_file_time(struct inode *inode, int issued,
599 u64 time_warp_seq, struct timespec *ctime,
600 struct timespec *mtime, struct timespec *atime)
602 struct ceph_inode_info *ci = ceph_inode(inode);
605 if (issued & (CEPH_CAP_FILE_EXCL|
607 CEPH_CAP_FILE_BUFFER|
609 CEPH_CAP_XATTR_EXCL)) {
610 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
611 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
612 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
613 ctime->tv_sec, ctime->tv_nsec);
614 inode->i_ctime = *ctime;
616 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
617 /* the MDS did a utimes() */
618 dout("mtime %ld.%09ld -> %ld.%09ld "
620 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
621 mtime->tv_sec, mtime->tv_nsec,
622 ci->i_time_warp_seq, (int)time_warp_seq);
624 inode->i_mtime = *mtime;
625 inode->i_atime = *atime;
626 ci->i_time_warp_seq = time_warp_seq;
627 } else if (time_warp_seq == ci->i_time_warp_seq) {
628 /* nobody did utimes(); take the max */
629 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
630 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
631 inode->i_mtime.tv_sec,
632 inode->i_mtime.tv_nsec,
633 mtime->tv_sec, mtime->tv_nsec);
634 inode->i_mtime = *mtime;
636 if (timespec_compare(atime, &inode->i_atime) > 0) {
637 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
638 inode->i_atime.tv_sec,
639 inode->i_atime.tv_nsec,
640 atime->tv_sec, atime->tv_nsec);
641 inode->i_atime = *atime;
643 } else if (issued & CEPH_CAP_FILE_EXCL) {
644 /* we did a utimes(); ignore mds values */
649 /* we have no write|excl caps; whatever the MDS says is true */
650 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
651 inode->i_ctime = *ctime;
652 inode->i_mtime = *mtime;
653 inode->i_atime = *atime;
654 ci->i_time_warp_seq = time_warp_seq;
659 if (warn) /* time_warp_seq shouldn't go backwards */
660 dout("%p mds time_warp_seq %llu < %u\n",
661 inode, time_warp_seq, ci->i_time_warp_seq);
665 * Populate an inode based on info from mds. May be called on new or
668 static int fill_inode(struct inode *inode, struct page *locked_page,
669 struct ceph_mds_reply_info_in *iinfo,
670 struct ceph_mds_reply_dirfrag *dirinfo,
671 struct ceph_mds_session *session,
672 unsigned long ttl_from, int cap_fmode,
673 struct ceph_cap_reservation *caps_reservation)
675 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
676 struct ceph_mds_reply_inode *info = iinfo->in;
677 struct ceph_inode_info *ci = ceph_inode(inode);
678 int issued = 0, implemented, new_issued;
679 struct timespec mtime, atime, ctime;
680 struct ceph_buffer *xattr_blob = NULL;
681 struct ceph_cap *new_cap = NULL;
684 bool queue_trunc = false;
685 bool new_version = false;
686 bool fill_inline = false;
688 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
689 inode, ceph_vinop(inode), le64_to_cpu(info->version),
692 /* prealloc new cap struct */
693 if (info->cap.caps && ceph_snap(inode) == CEPH_NOSNAP)
694 new_cap = ceph_get_cap(mdsc, caps_reservation);
697 * prealloc xattr data, if it looks like we'll need it. only
698 * if len > 4 (meaning there are actually xattrs; the first 4
699 * bytes are the xattr count).
701 if (iinfo->xattr_len > 4) {
702 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
704 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
708 spin_lock(&ci->i_ceph_lock);
711 * provided version will be odd if inode value is projected,
712 * even if stable. skip the update if we have newer stable
713 * info (ours>=theirs, e.g. due to racing mds replies), unless
714 * we are getting projected (unstable) info (in which case the
715 * version is odd, and we want ours>theirs).
721 if (ci->i_version == 0 ||
722 ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
723 le64_to_cpu(info->version) > (ci->i_version & ~1)))
726 issued = __ceph_caps_issued(ci, &implemented);
727 issued |= implemented | __ceph_caps_dirty(ci);
728 new_issued = ~issued & le32_to_cpu(info->cap.caps);
731 ci->i_version = le64_to_cpu(info->version);
733 inode->i_rdev = le32_to_cpu(info->rdev);
734 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
736 if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
737 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
738 inode->i_mode = le32_to_cpu(info->mode);
739 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
740 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
741 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
742 from_kuid(&init_user_ns, inode->i_uid),
743 from_kgid(&init_user_ns, inode->i_gid));
746 if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
747 (issued & CEPH_CAP_LINK_EXCL) == 0)
748 set_nlink(inode, le32_to_cpu(info->nlink));
750 if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
751 /* be careful with mtime, atime, size */
752 ceph_decode_timespec(&atime, &info->atime);
753 ceph_decode_timespec(&mtime, &info->mtime);
754 ceph_decode_timespec(&ctime, &info->ctime);
755 ceph_fill_file_time(inode, issued,
756 le32_to_cpu(info->time_warp_seq),
757 &ctime, &mtime, &atime);
761 (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
762 if (ci->i_layout.fl_pg_pool != info->layout.fl_pg_pool)
763 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
764 ci->i_layout = info->layout;
765 ci->i_pool_ns_len = iinfo->pool_ns_len;
767 queue_trunc = ceph_fill_file_size(inode, issued,
768 le32_to_cpu(info->truncate_seq),
769 le64_to_cpu(info->truncate_size),
770 le64_to_cpu(info->size));
771 /* only update max_size on auth cap */
772 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
773 ci->i_max_size != le64_to_cpu(info->max_size)) {
774 dout("max_size %lld -> %llu\n", ci->i_max_size,
775 le64_to_cpu(info->max_size));
776 ci->i_max_size = le64_to_cpu(info->max_size);
781 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
782 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
783 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
784 if (ci->i_xattrs.blob)
785 ceph_buffer_put(ci->i_xattrs.blob);
786 ci->i_xattrs.blob = xattr_blob;
788 memcpy(ci->i_xattrs.blob->vec.iov_base,
789 iinfo->xattr_data, iinfo->xattr_len);
790 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
791 ceph_forget_all_cached_acls(inode);
795 inode->i_mapping->a_ops = &ceph_aops;
797 switch (inode->i_mode & S_IFMT) {
802 init_special_inode(inode, inode->i_mode, inode->i_rdev);
803 inode->i_op = &ceph_file_iops;
806 inode->i_op = &ceph_file_iops;
807 inode->i_fop = &ceph_file_fops;
810 inode->i_op = &ceph_symlink_iops;
811 if (!ci->i_symlink) {
812 u32 symlen = iinfo->symlink_len;
815 spin_unlock(&ci->i_ceph_lock);
818 if (WARN_ON(symlen != i_size_read(inode)))
822 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
826 spin_lock(&ci->i_ceph_lock);
830 kfree(sym); /* lost a race */
832 inode->i_link = ci->i_symlink;
835 inode->i_op = &ceph_dir_iops;
836 inode->i_fop = &ceph_dir_fops;
838 ci->i_dir_layout = iinfo->dir_layout;
840 ci->i_files = le64_to_cpu(info->files);
841 ci->i_subdirs = le64_to_cpu(info->subdirs);
842 ci->i_rbytes = le64_to_cpu(info->rbytes);
843 ci->i_rfiles = le64_to_cpu(info->rfiles);
844 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
845 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
848 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
849 ceph_vinop(inode), inode->i_mode);
852 /* were we issued a capability? */
853 if (info->cap.caps) {
854 if (ceph_snap(inode) == CEPH_NOSNAP) {
855 unsigned caps = le32_to_cpu(info->cap.caps);
856 ceph_add_cap(inode, session,
857 le64_to_cpu(info->cap.cap_id),
859 le32_to_cpu(info->cap.wanted),
860 le32_to_cpu(info->cap.seq),
861 le32_to_cpu(info->cap.mseq),
862 le64_to_cpu(info->cap.realm),
863 info->cap.flags, &new_cap);
865 /* set dir completion flag? */
866 if (S_ISDIR(inode->i_mode) &&
867 ci->i_files == 0 && ci->i_subdirs == 0 &&
868 (caps & CEPH_CAP_FILE_SHARED) &&
869 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
870 !__ceph_dir_is_complete(ci)) {
871 dout(" marking %p complete (empty)\n", inode);
872 i_size_write(inode, 0);
873 __ceph_dir_set_complete(ci,
874 atomic64_read(&ci->i_release_count),
875 atomic64_read(&ci->i_ordered_count));
880 dout(" %p got snap_caps %s\n", inode,
881 ceph_cap_string(le32_to_cpu(info->cap.caps)));
882 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
884 __ceph_get_fmode(ci, cap_fmode);
886 } else if (cap_fmode >= 0) {
887 pr_warn("mds issued no caps on %llx.%llx\n",
889 __ceph_get_fmode(ci, cap_fmode);
892 if (iinfo->inline_version > 0 &&
893 iinfo->inline_version >= ci->i_inline_version) {
894 int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
895 ci->i_inline_version = iinfo->inline_version;
896 if (ci->i_inline_version != CEPH_INLINE_NONE &&
898 (le32_to_cpu(info->cap.caps) & cache_caps)))
902 spin_unlock(&ci->i_ceph_lock);
905 ceph_fill_inline_data(inode, locked_page,
906 iinfo->inline_data, iinfo->inline_len);
909 wake_up_all(&ci->i_cap_wq);
911 /* queue truncate if we saw i_size decrease */
913 ceph_queue_vmtruncate(inode);
915 /* populate frag tree */
916 if (S_ISDIR(inode->i_mode))
917 ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
919 /* update delegation info? */
921 ceph_fill_dirfrag(inode, dirinfo);
926 ceph_put_cap(mdsc, new_cap);
928 ceph_buffer_put(xattr_blob);
933 * caller should hold session s_mutex.
935 static void update_dentry_lease(struct dentry *dentry,
936 struct ceph_mds_reply_lease *lease,
937 struct ceph_mds_session *session,
938 unsigned long from_time)
940 struct ceph_dentry_info *di = ceph_dentry(dentry);
941 long unsigned duration = le32_to_cpu(lease->duration_ms);
942 long unsigned ttl = from_time + (duration * HZ) / 1000;
943 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
946 /* only track leases on regular dentries */
947 if (dentry->d_op != &ceph_dentry_ops)
950 spin_lock(&dentry->d_lock);
951 dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
952 dentry, duration, ttl);
954 /* make lease_rdcache_gen match directory */
955 dir = d_inode(dentry->d_parent);
956 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
961 if (di->lease_gen == session->s_cap_gen &&
962 time_before(ttl, dentry->d_time))
963 goto out_unlock; /* we already have a newer lease. */
965 if (di->lease_session && di->lease_session != session)
968 ceph_dentry_lru_touch(dentry);
970 if (!di->lease_session)
971 di->lease_session = ceph_get_mds_session(session);
972 di->lease_gen = session->s_cap_gen;
973 di->lease_seq = le32_to_cpu(lease->seq);
974 di->lease_renew_after = half_ttl;
975 di->lease_renew_from = 0;
976 dentry->d_time = ttl;
978 spin_unlock(&dentry->d_lock);
983 * splice a dentry to an inode.
984 * caller must hold directory i_mutex for this to be safe.
986 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
988 struct dentry *realdn;
992 /* dn must be unhashed */
995 realdn = d_splice_alias(in, dn);
996 if (IS_ERR(realdn)) {
997 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
998 PTR_ERR(realdn), dn, in, ceph_vinop(in));
999 dn = realdn; /* note realdn contains the error */
1001 } else if (realdn) {
1002 dout("dn %p (%d) spliced with %p (%d) "
1003 "inode %p ino %llx.%llx\n",
1005 realdn, d_count(realdn),
1006 d_inode(realdn), ceph_vinop(d_inode(realdn)));
1010 BUG_ON(!ceph_dentry(dn));
1011 dout("dn %p attached to %p ino %llx.%llx\n",
1012 dn, d_inode(dn), ceph_vinop(d_inode(dn)));
1019 * Incorporate results into the local cache. This is either just
1020 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1023 * A reply may contain
1024 * a directory inode along with a dentry.
1025 * and/or a target inode
1027 * Called with snap_rwsem (read).
1029 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1030 struct ceph_mds_session *session)
1032 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1033 struct inode *in = NULL;
1034 struct ceph_vino vino;
1035 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1038 dout("fill_trace %p is_dentry %d is_target %d\n", req,
1039 rinfo->head->is_dentry, rinfo->head->is_target);
1045 * If we resend completed ops to a recovering mds, we get no
1046 * trace. Since that is very rare, pretend this is the case
1047 * to ensure the 'no trace' handlers in the callers behave.
1049 * Fill in inodes unconditionally to avoid breaking cap
1052 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
1053 pr_info("fill_trace faking empty trace on %lld %s\n",
1054 req->r_tid, ceph_mds_op_name(rinfo->head->op));
1055 if (rinfo->head->is_dentry) {
1056 rinfo->head->is_dentry = 0;
1057 err = fill_inode(req->r_locked_dir,
1058 &rinfo->diri, rinfo->dirfrag,
1059 session, req->r_request_started, -1);
1061 if (rinfo->head->is_target) {
1062 rinfo->head->is_target = 0;
1063 ininfo = rinfo->targeti.in;
1064 vino.ino = le64_to_cpu(ininfo->ino);
1065 vino.snap = le64_to_cpu(ininfo->snapid);
1066 in = ceph_get_inode(sb, vino);
1067 err = fill_inode(in, &rinfo->targeti, NULL,
1068 session, req->r_request_started,
1075 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1076 dout("fill_trace reply is empty!\n");
1077 if (rinfo->head->result == 0 && req->r_locked_dir)
1078 ceph_invalidate_dir_request(req);
1082 if (rinfo->head->is_dentry) {
1083 struct inode *dir = req->r_locked_dir;
1086 err = fill_inode(dir, NULL,
1087 &rinfo->diri, rinfo->dirfrag,
1088 session, req->r_request_started, -1,
1089 &req->r_caps_reservation);
1096 if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) {
1098 struct dentry *dn, *parent;
1100 BUG_ON(!rinfo->head->is_target);
1101 BUG_ON(req->r_dentry);
1103 parent = d_find_any_alias(dir);
1106 dname.name = rinfo->dname;
1107 dname.len = rinfo->dname_len;
1108 dname.hash = full_name_hash(dname.name, dname.len);
1109 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1110 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1112 dn = d_lookup(parent, &dname);
1113 dout("d_lookup on parent=%p name=%.*s got %p\n",
1114 parent, dname.len, dname.name, dn);
1117 dn = d_alloc(parent, &dname);
1118 dout("d_alloc %p '%.*s' = %p\n", parent,
1119 dname.len, dname.name, dn);
1125 err = ceph_init_dentry(dn);
1131 } else if (d_really_is_positive(dn) &&
1132 (ceph_ino(d_inode(dn)) != vino.ino ||
1133 ceph_snap(d_inode(dn)) != vino.snap)) {
1134 dout(" dn %p points to wrong inode %p\n",
1146 if (rinfo->head->is_target) {
1147 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1148 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1150 in = ceph_get_inode(sb, vino);
1155 req->r_target_inode = in;
1157 err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
1158 session, req->r_request_started,
1159 (!req->r_aborted && rinfo->head->result == 0) ?
1161 &req->r_caps_reservation);
1163 pr_err("fill_inode badness %p %llx.%llx\n",
1164 in, ceph_vinop(in));
1170 * ignore null lease/binding on snapdir ENOENT, or else we
1171 * will have trouble splicing in the virtual snapdir later
1173 if (rinfo->head->is_dentry && !req->r_aborted &&
1174 req->r_locked_dir &&
1175 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1176 fsc->mount_options->snapdir_name,
1177 req->r_dentry->d_name.len))) {
1179 * lookup link rename : null -> possibly existing inode
1180 * mknod symlink mkdir : null -> new inode
1181 * unlink : linked -> null
1183 struct inode *dir = req->r_locked_dir;
1184 struct dentry *dn = req->r_dentry;
1185 bool have_dir_cap, have_lease;
1189 BUG_ON(d_inode(dn->d_parent) != dir);
1190 BUG_ON(ceph_ino(dir) !=
1191 le64_to_cpu(rinfo->diri.in->ino));
1192 BUG_ON(ceph_snap(dir) !=
1193 le64_to_cpu(rinfo->diri.in->snapid));
1195 /* do we have a lease on the whole dir? */
1197 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1198 CEPH_CAP_FILE_SHARED);
1200 /* do we have a dn lease? */
1201 have_lease = have_dir_cap ||
1202 le32_to_cpu(rinfo->dlease->duration_ms);
1204 dout("fill_trace no dentry lease or dir cap\n");
1207 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1208 struct inode *olddir = req->r_old_dentry_dir;
1211 dout(" src %p '%pd' dst %p '%pd'\n",
1215 dout("fill_trace doing d_move %p -> %p\n",
1216 req->r_old_dentry, dn);
1218 /* d_move screws up sibling dentries' offsets */
1219 ceph_dir_clear_ordered(dir);
1220 ceph_dir_clear_ordered(olddir);
1222 d_move(req->r_old_dentry, dn);
1223 dout(" src %p '%pd' dst %p '%pd'\n",
1228 /* ensure target dentry is invalidated, despite
1229 rehashing bug in vfs_rename_dir */
1230 ceph_invalidate_dentry_lease(dn);
1232 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1233 ceph_dentry(req->r_old_dentry)->offset);
1235 dn = req->r_old_dentry; /* use old_dentry */
1239 if (!rinfo->head->is_target) {
1240 dout("fill_trace null dentry\n");
1241 if (d_really_is_positive(dn)) {
1242 ceph_dir_clear_ordered(dir);
1243 dout("d_delete %p\n", dn);
1246 if (have_lease && d_unhashed(dn))
1248 update_dentry_lease(dn, rinfo->dlease,
1250 req->r_request_started);
1255 /* attach proper inode */
1256 if (d_really_is_negative(dn)) {
1257 ceph_dir_clear_ordered(dir);
1259 dn = splice_dentry(dn, in);
1264 req->r_dentry = dn; /* may have spliced */
1265 } else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1266 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1267 dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1274 update_dentry_lease(dn, rinfo->dlease, session,
1275 req->r_request_started);
1276 dout(" final dn %p\n", dn);
1277 } else if (!req->r_aborted &&
1278 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1279 req->r_op == CEPH_MDS_OP_MKSNAP)) {
1280 struct dentry *dn = req->r_dentry;
1281 struct inode *dir = req->r_locked_dir;
1283 /* fill out a snapdir LOOKUPSNAP dentry */
1286 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1287 dout(" linking snapped dir %p to dn %p\n", in, dn);
1288 ceph_dir_clear_ordered(dir);
1290 dn = splice_dentry(dn, in);
1295 req->r_dentry = dn; /* may have spliced */
1298 dout("fill_trace done err=%d\n", err);
1303 * Prepopulate our cache with readdir results, leases, etc.
1305 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1306 struct ceph_mds_session *session)
1308 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1311 for (i = 0; i < rinfo->dir_nr; i++) {
1312 struct ceph_vino vino;
1316 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1317 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1319 in = ceph_get_inode(req->r_dentry->d_sb, vino);
1322 dout("new_inode badness got %d\n", err);
1325 rc = fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
1326 req->r_request_started, -1,
1327 &req->r_caps_reservation);
1329 pr_err("fill_inode badness on %p got %d\n", in, rc);
1338 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1342 put_page(ctl->page);
1347 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1348 struct ceph_readdir_cache_control *ctl,
1349 struct ceph_mds_request *req)
1351 struct ceph_inode_info *ci = ceph_inode(dir);
1352 unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1353 unsigned idx = ctl->index % nsize;
1354 pgoff_t pgoff = ctl->index / nsize;
1356 if (!ctl->page || pgoff != page_index(ctl->page)) {
1357 ceph_readdir_cache_release(ctl);
1359 ctl->page = grab_cache_page(&dir->i_data, pgoff);
1361 ctl->page = find_lock_page(&dir->i_data, pgoff);
1364 return idx == 0 ? -ENOMEM : 0;
1366 /* reading/filling the cache are serialized by
1367 * i_mutex, no need to use page lock */
1368 unlock_page(ctl->page);
1369 ctl->dentries = kmap(ctl->page);
1371 memset(ctl->dentries, 0, PAGE_SIZE);
1374 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1375 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1376 dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1377 ctl->dentries[idx] = dn;
1380 dout("disable readdir cache\n");
1386 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1387 struct ceph_mds_session *session)
1389 struct dentry *parent = req->r_dentry;
1390 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1394 int err = 0, skipped = 0, ret, i;
1395 struct inode *snapdir = NULL;
1396 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1397 struct ceph_dentry_info *di;
1398 u32 frag = le32_to_cpu(rhead->args.readdir.frag);
1399 struct ceph_readdir_cache_control cache_ctl = {};
1402 return readdir_prepopulate_inodes_only(req, session);
1404 if (rinfo->dir_dir &&
1405 le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1406 dout("readdir_prepopulate got new frag %x -> %x\n",
1407 frag, le32_to_cpu(rinfo->dir_dir->frag));
1408 frag = le32_to_cpu(rinfo->dir_dir->frag);
1409 if (ceph_frag_is_leftmost(frag))
1410 req->r_readdir_offset = 2;
1412 req->r_readdir_offset = 0;
1415 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1416 snapdir = ceph_get_snapdir(d_inode(parent));
1417 parent = d_find_alias(snapdir);
1418 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1419 rinfo->dir_nr, parent);
1421 dout("readdir_prepopulate %d items under dn %p\n",
1422 rinfo->dir_nr, parent);
1424 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1427 if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2) {
1428 /* note dir version at start of readdir so we can tell
1429 * if any dentries get dropped */
1430 struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
1431 req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
1432 req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count);
1433 req->r_readdir_cache_idx = 0;
1436 cache_ctl.index = req->r_readdir_cache_idx;
1438 /* FIXME: release caps/leases if error occurs */
1439 for (i = 0; i < rinfo->dir_nr; i++) {
1440 struct ceph_vino vino;
1442 dname.name = rinfo->dir_dname[i];
1443 dname.len = rinfo->dir_dname_len[i];
1444 dname.hash = full_name_hash(dname.name, dname.len);
1446 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1447 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1450 dn = d_lookup(parent, &dname);
1451 dout("d_lookup on parent=%p name=%.*s got %p\n",
1452 parent, dname.len, dname.name, dn);
1455 dn = d_alloc(parent, &dname);
1456 dout("d_alloc %p '%.*s' = %p\n", parent,
1457 dname.len, dname.name, dn);
1459 dout("d_alloc badness\n");
1463 ret = ceph_init_dentry(dn);
1469 } else if (d_really_is_positive(dn) &&
1470 (ceph_ino(d_inode(dn)) != vino.ino ||
1471 ceph_snap(d_inode(dn)) != vino.snap)) {
1472 dout(" dn %p points to wrong inode %p\n",
1480 if (d_really_is_positive(dn)) {
1483 in = ceph_get_inode(parent->d_sb, vino);
1485 dout("new_inode badness\n");
1493 ret = fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
1494 req->r_request_started, -1,
1495 &req->r_caps_reservation);
1497 pr_err("fill_inode badness on %p\n", in);
1498 if (d_really_is_negative(dn))
1505 if (d_really_is_negative(dn)) {
1506 struct dentry *realdn;
1508 if (ceph_security_xattr_deadlock(in)) {
1509 dout(" skip splicing dn %p to inode %p"
1510 " (security xattr deadlock)\n", dn, in);
1516 realdn = splice_dentry(dn, in);
1517 if (IS_ERR(realdn)) {
1518 err = PTR_ERR(realdn);
1527 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
1529 update_dentry_lease(dn, rinfo->dir_dlease[i],
1531 req->r_request_started);
1533 if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
1534 ret = fill_readdir_cache(d_inode(parent), dn,
1544 if (err == 0 && skipped == 0) {
1545 req->r_did_prepopulate = true;
1546 req->r_readdir_cache_idx = cache_ctl.index;
1548 ceph_readdir_cache_release(&cache_ctl);
1553 dout("readdir_prepopulate done\n");
1557 int ceph_inode_set_size(struct inode *inode, loff_t size)
1559 struct ceph_inode_info *ci = ceph_inode(inode);
1562 spin_lock(&ci->i_ceph_lock);
1563 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1564 i_size_write(inode, size);
1565 inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1567 /* tell the MDS if we are approaching max_size */
1568 if ((size << 1) >= ci->i_max_size &&
1569 (ci->i_reported_size << 1) < ci->i_max_size)
1572 spin_unlock(&ci->i_ceph_lock);
1577 * Write back inode data in a worker thread. (This can't be done
1578 * in the message handler context.)
1580 void ceph_queue_writeback(struct inode *inode)
1583 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1584 &ceph_inode(inode)->i_wb_work)) {
1585 dout("ceph_queue_writeback %p\n", inode);
1587 dout("ceph_queue_writeback %p failed\n", inode);
1592 static void ceph_writeback_work(struct work_struct *work)
1594 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1596 struct inode *inode = &ci->vfs_inode;
1598 dout("writeback %p\n", inode);
1599 filemap_fdatawrite(&inode->i_data);
1604 * queue an async invalidation
1606 void ceph_queue_invalidate(struct inode *inode)
1609 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1610 &ceph_inode(inode)->i_pg_inv_work)) {
1611 dout("ceph_queue_invalidate %p\n", inode);
1613 dout("ceph_queue_invalidate %p failed\n", inode);
1619 * Invalidate inode pages in a worker thread. (This can't be done
1620 * in the message handler context.)
1622 static void ceph_invalidate_work(struct work_struct *work)
1624 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1626 struct inode *inode = &ci->vfs_inode;
1630 mutex_lock(&ci->i_truncate_mutex);
1631 spin_lock(&ci->i_ceph_lock);
1632 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1633 ci->i_rdcache_gen, ci->i_rdcache_revoking);
1634 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1635 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1637 spin_unlock(&ci->i_ceph_lock);
1638 mutex_unlock(&ci->i_truncate_mutex);
1641 orig_gen = ci->i_rdcache_gen;
1642 spin_unlock(&ci->i_ceph_lock);
1644 truncate_pagecache(inode, 0);
1646 spin_lock(&ci->i_ceph_lock);
1647 if (orig_gen == ci->i_rdcache_gen &&
1648 orig_gen == ci->i_rdcache_revoking) {
1649 dout("invalidate_pages %p gen %d successful\n", inode,
1651 ci->i_rdcache_revoking--;
1654 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1655 inode, orig_gen, ci->i_rdcache_gen,
1656 ci->i_rdcache_revoking);
1657 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1660 spin_unlock(&ci->i_ceph_lock);
1661 mutex_unlock(&ci->i_truncate_mutex);
1664 ceph_check_caps(ci, 0, NULL);
1670 * called by trunc_wq;
1672 * We also truncate in a separate thread as well.
1674 static void ceph_vmtruncate_work(struct work_struct *work)
1676 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1678 struct inode *inode = &ci->vfs_inode;
1680 dout("vmtruncate_work %p\n", inode);
1681 __ceph_do_pending_vmtruncate(inode);
1686 * Queue an async vmtruncate. If we fail to queue work, we will handle
1687 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1689 void ceph_queue_vmtruncate(struct inode *inode)
1691 struct ceph_inode_info *ci = ceph_inode(inode);
1695 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1696 &ci->i_vmtruncate_work)) {
1697 dout("ceph_queue_vmtruncate %p\n", inode);
1699 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1700 inode, ci->i_truncate_pending);
1706 * Make sure any pending truncation is applied before doing anything
1707 * that may depend on it.
1709 void __ceph_do_pending_vmtruncate(struct inode *inode)
1711 struct ceph_inode_info *ci = ceph_inode(inode);
1713 int wrbuffer_refs, finish = 0;
1715 mutex_lock(&ci->i_truncate_mutex);
1717 spin_lock(&ci->i_ceph_lock);
1718 if (ci->i_truncate_pending == 0) {
1719 dout("__do_pending_vmtruncate %p none pending\n", inode);
1720 spin_unlock(&ci->i_ceph_lock);
1721 mutex_unlock(&ci->i_truncate_mutex);
1726 * make sure any dirty snapped pages are flushed before we
1727 * possibly truncate them.. so write AND block!
1729 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1730 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1732 spin_unlock(&ci->i_ceph_lock);
1733 filemap_write_and_wait_range(&inode->i_data, 0,
1734 inode->i_sb->s_maxbytes);
1738 /* there should be no reader or writer */
1739 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1741 to = ci->i_truncate_size;
1742 wrbuffer_refs = ci->i_wrbuffer_ref;
1743 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1744 ci->i_truncate_pending, to);
1745 spin_unlock(&ci->i_ceph_lock);
1747 truncate_pagecache(inode, to);
1749 spin_lock(&ci->i_ceph_lock);
1750 if (to == ci->i_truncate_size) {
1751 ci->i_truncate_pending = 0;
1754 spin_unlock(&ci->i_ceph_lock);
1758 mutex_unlock(&ci->i_truncate_mutex);
1760 if (wrbuffer_refs == 0)
1761 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1763 wake_up_all(&ci->i_cap_wq);
1769 static const struct inode_operations ceph_symlink_iops = {
1770 .readlink = generic_readlink,
1771 .get_link = simple_get_link,
1772 .setattr = ceph_setattr,
1773 .getattr = ceph_getattr,
1774 .setxattr = generic_setxattr,
1775 .getxattr = generic_getxattr,
1776 .listxattr = ceph_listxattr,
1777 .removexattr = generic_removexattr,
1780 int __ceph_setattr(struct inode *inode, struct iattr *attr)
1782 struct ceph_inode_info *ci = ceph_inode(inode);
1783 const unsigned int ia_valid = attr->ia_valid;
1784 struct ceph_mds_request *req;
1785 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1786 struct ceph_cap_flush *prealloc_cf;
1788 int release = 0, dirtied = 0;
1791 int inode_dirty_flags = 0;
1792 bool lock_snap_rwsem = false;
1794 if (ceph_snap(inode) != CEPH_NOSNAP)
1797 err = inode_change_ok(inode, attr);
1801 prealloc_cf = ceph_alloc_cap_flush();
1805 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1808 ceph_free_cap_flush(prealloc_cf);
1809 return PTR_ERR(req);
1812 spin_lock(&ci->i_ceph_lock);
1813 issued = __ceph_caps_issued(ci, NULL);
1815 if (!ci->i_head_snapc &&
1816 (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
1817 lock_snap_rwsem = true;
1818 if (!down_read_trylock(&mdsc->snap_rwsem)) {
1819 spin_unlock(&ci->i_ceph_lock);
1820 down_read(&mdsc->snap_rwsem);
1821 spin_lock(&ci->i_ceph_lock);
1822 issued = __ceph_caps_issued(ci, NULL);
1826 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1828 if (ia_valid & ATTR_UID) {
1829 dout("setattr %p uid %d -> %d\n", inode,
1830 from_kuid(&init_user_ns, inode->i_uid),
1831 from_kuid(&init_user_ns, attr->ia_uid));
1832 if (issued & CEPH_CAP_AUTH_EXCL) {
1833 inode->i_uid = attr->ia_uid;
1834 dirtied |= CEPH_CAP_AUTH_EXCL;
1835 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1836 !uid_eq(attr->ia_uid, inode->i_uid)) {
1837 req->r_args.setattr.uid = cpu_to_le32(
1838 from_kuid(&init_user_ns, attr->ia_uid));
1839 mask |= CEPH_SETATTR_UID;
1840 release |= CEPH_CAP_AUTH_SHARED;
1843 if (ia_valid & ATTR_GID) {
1844 dout("setattr %p gid %d -> %d\n", inode,
1845 from_kgid(&init_user_ns, inode->i_gid),
1846 from_kgid(&init_user_ns, attr->ia_gid));
1847 if (issued & CEPH_CAP_AUTH_EXCL) {
1848 inode->i_gid = attr->ia_gid;
1849 dirtied |= CEPH_CAP_AUTH_EXCL;
1850 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1851 !gid_eq(attr->ia_gid, inode->i_gid)) {
1852 req->r_args.setattr.gid = cpu_to_le32(
1853 from_kgid(&init_user_ns, attr->ia_gid));
1854 mask |= CEPH_SETATTR_GID;
1855 release |= CEPH_CAP_AUTH_SHARED;
1858 if (ia_valid & ATTR_MODE) {
1859 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1861 if (issued & CEPH_CAP_AUTH_EXCL) {
1862 inode->i_mode = attr->ia_mode;
1863 dirtied |= CEPH_CAP_AUTH_EXCL;
1864 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1865 attr->ia_mode != inode->i_mode) {
1866 inode->i_mode = attr->ia_mode;
1867 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1868 mask |= CEPH_SETATTR_MODE;
1869 release |= CEPH_CAP_AUTH_SHARED;
1873 if (ia_valid & ATTR_ATIME) {
1874 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1875 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1876 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1877 if (issued & CEPH_CAP_FILE_EXCL) {
1878 ci->i_time_warp_seq++;
1879 inode->i_atime = attr->ia_atime;
1880 dirtied |= CEPH_CAP_FILE_EXCL;
1881 } else if ((issued & CEPH_CAP_FILE_WR) &&
1882 timespec_compare(&inode->i_atime,
1883 &attr->ia_atime) < 0) {
1884 inode->i_atime = attr->ia_atime;
1885 dirtied |= CEPH_CAP_FILE_WR;
1886 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1887 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1888 ceph_encode_timespec(&req->r_args.setattr.atime,
1890 mask |= CEPH_SETATTR_ATIME;
1891 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1895 if (ia_valid & ATTR_MTIME) {
1896 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1897 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1898 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1899 if (issued & CEPH_CAP_FILE_EXCL) {
1900 ci->i_time_warp_seq++;
1901 inode->i_mtime = attr->ia_mtime;
1902 dirtied |= CEPH_CAP_FILE_EXCL;
1903 } else if ((issued & CEPH_CAP_FILE_WR) &&
1904 timespec_compare(&inode->i_mtime,
1905 &attr->ia_mtime) < 0) {
1906 inode->i_mtime = attr->ia_mtime;
1907 dirtied |= CEPH_CAP_FILE_WR;
1908 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1909 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1910 ceph_encode_timespec(&req->r_args.setattr.mtime,
1912 mask |= CEPH_SETATTR_MTIME;
1913 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1917 if (ia_valid & ATTR_SIZE) {
1918 dout("setattr %p size %lld -> %lld\n", inode,
1919 inode->i_size, attr->ia_size);
1920 if ((issued & CEPH_CAP_FILE_EXCL) &&
1921 attr->ia_size > inode->i_size) {
1922 i_size_write(inode, attr->ia_size);
1924 (attr->ia_size + (1 << 9) - 1) >> 9;
1925 inode->i_ctime = attr->ia_ctime;
1926 ci->i_reported_size = attr->ia_size;
1927 dirtied |= CEPH_CAP_FILE_EXCL;
1928 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1929 attr->ia_size != inode->i_size) {
1930 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1931 req->r_args.setattr.old_size =
1932 cpu_to_le64(inode->i_size);
1933 mask |= CEPH_SETATTR_SIZE;
1934 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1939 /* these do nothing */
1940 if (ia_valid & ATTR_CTIME) {
1941 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1942 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1943 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1944 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1945 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1946 only ? "ctime only" : "ignored");
1947 inode->i_ctime = attr->ia_ctime;
1950 * if kernel wants to dirty ctime but nothing else,
1951 * we need to choose a cap to dirty under, or do
1952 * a almost-no-op setattr
1954 if (issued & CEPH_CAP_AUTH_EXCL)
1955 dirtied |= CEPH_CAP_AUTH_EXCL;
1956 else if (issued & CEPH_CAP_FILE_EXCL)
1957 dirtied |= CEPH_CAP_FILE_EXCL;
1958 else if (issued & CEPH_CAP_XATTR_EXCL)
1959 dirtied |= CEPH_CAP_XATTR_EXCL;
1961 mask |= CEPH_SETATTR_CTIME;
1964 if (ia_valid & ATTR_FILE)
1965 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1968 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
1970 inode->i_ctime = current_fs_time(inode->i_sb);
1974 spin_unlock(&ci->i_ceph_lock);
1975 if (lock_snap_rwsem)
1976 up_read(&mdsc->snap_rwsem);
1978 if (inode_dirty_flags)
1979 __mark_inode_dirty(inode, inode_dirty_flags);
1981 if (ia_valid & ATTR_MODE) {
1982 err = posix_acl_chmod(inode, attr->ia_mode);
1988 req->r_inode = inode;
1990 req->r_inode_drop = release;
1991 req->r_args.setattr.mask = cpu_to_le32(mask);
1992 req->r_num_caps = 1;
1993 err = ceph_mdsc_do_request(mdsc, NULL, req);
1995 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1996 ceph_cap_string(dirtied), mask);
1998 ceph_mdsc_put_request(req);
1999 if (mask & CEPH_SETATTR_SIZE)
2000 __ceph_do_pending_vmtruncate(inode);
2001 ceph_free_cap_flush(prealloc_cf);
2004 ceph_mdsc_put_request(req);
2005 ceph_free_cap_flush(prealloc_cf);
2012 int ceph_setattr(struct dentry *dentry, struct iattr *attr)
2014 return __ceph_setattr(d_inode(dentry), attr);
2018 * Verify that we have a lease on the given mask. If not,
2019 * do a getattr against an mds.
2021 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2022 int mask, bool force)
2024 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2025 struct ceph_mds_client *mdsc = fsc->mdsc;
2026 struct ceph_mds_request *req;
2029 if (ceph_snap(inode) == CEPH_SNAPDIR) {
2030 dout("do_getattr inode %p SNAPDIR\n", inode);
2034 dout("do_getattr inode %p mask %s mode 0%o\n",
2035 inode, ceph_cap_string(mask), inode->i_mode);
2036 if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
2039 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
2041 return PTR_ERR(req);
2042 req->r_inode = inode;
2044 req->r_num_caps = 1;
2045 req->r_args.getattr.mask = cpu_to_le32(mask);
2046 req->r_locked_page = locked_page;
2047 err = ceph_mdsc_do_request(mdsc, NULL, req);
2048 if (locked_page && err == 0) {
2049 u64 inline_version = req->r_reply_info.targeti.inline_version;
2050 if (inline_version == 0) {
2051 /* the reply is supposed to contain inline data */
2053 } else if (inline_version == CEPH_INLINE_NONE) {
2056 err = req->r_reply_info.targeti.inline_len;
2059 ceph_mdsc_put_request(req);
2060 dout("do_getattr result=%d\n", err);
2066 * Check inode permissions. We verify we have a valid value for
2067 * the AUTH cap, then call the generic handler.
2069 int ceph_permission(struct inode *inode, int mask)
2073 if (mask & MAY_NOT_BLOCK)
2076 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
2079 err = generic_permission(inode, mask);
2084 * Get all attributes. Hopefully somedata we'll have a statlite()
2085 * and can limit the fields we require to be accurate.
2087 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
2090 struct inode *inode = d_inode(dentry);
2091 struct ceph_inode_info *ci = ceph_inode(inode);
2094 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL, false);
2096 generic_fillattr(inode, stat);
2097 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
2098 if (ceph_snap(inode) != CEPH_NOSNAP)
2099 stat->dev = ceph_snap(inode);
2102 if (S_ISDIR(inode->i_mode)) {
2103 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
2105 stat->size = ci->i_rbytes;
2107 stat->size = ci->i_files + ci->i_subdirs;
2109 stat->blksize = 65536;