2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/log2.h>
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_log_format.h"
25 #include "xfs_trans_resv.h"
27 #include "xfs_mount.h"
28 #include "xfs_inode.h"
29 #include "xfs_da_format.h"
30 #include "xfs_da_btree.h"
32 #include "xfs_attr_sf.h"
34 #include "xfs_trans_space.h"
35 #include "xfs_trans.h"
36 #include "xfs_buf_item.h"
37 #include "xfs_inode_item.h"
38 #include "xfs_ialloc.h"
40 #include "xfs_bmap_util.h"
41 #include "xfs_error.h"
42 #include "xfs_quota.h"
43 #include "xfs_filestream.h"
44 #include "xfs_cksum.h"
45 #include "xfs_trace.h"
46 #include "xfs_icache.h"
47 #include "xfs_symlink.h"
48 #include "xfs_trans_priv.h"
50 #include "xfs_bmap_btree.h"
52 kmem_zone_t *xfs_inode_zone;
55 * Used in xfs_itruncate_extents(). This is the maximum number of extents
56 * freed from a file in a single transaction.
58 #define XFS_ITRUNC_MAX_EXTENTS 2
60 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
62 STATIC int xfs_iunlink_remove(xfs_trans_t *, xfs_inode_t *);
65 * helper function to extract extent size hint from inode
71 if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
72 return ip->i_d.di_extsize;
73 if (XFS_IS_REALTIME_INODE(ip))
74 return ip->i_mount->m_sb.sb_rextsize;
79 * These two are wrapper routines around the xfs_ilock() routine used to
80 * centralize some grungy code. They are used in places that wish to lock the
81 * inode solely for reading the extents. The reason these places can't just
82 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
83 * bringing in of the extents from disk for a file in b-tree format. If the
84 * inode is in b-tree format, then we need to lock the inode exclusively until
85 * the extents are read in. Locking it exclusively all the time would limit
86 * our parallelism unnecessarily, though. What we do instead is check to see
87 * if the extents have been read in yet, and only lock the inode exclusively
90 * The functions return a value which should be given to the corresponding
94 xfs_ilock_data_map_shared(
97 uint lock_mode = XFS_ILOCK_SHARED;
99 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
100 (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
101 lock_mode = XFS_ILOCK_EXCL;
102 xfs_ilock(ip, lock_mode);
107 xfs_ilock_attr_map_shared(
108 struct xfs_inode *ip)
110 uint lock_mode = XFS_ILOCK_SHARED;
112 if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
113 (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
114 lock_mode = XFS_ILOCK_EXCL;
115 xfs_ilock(ip, lock_mode);
120 * The xfs inode contains 2 locks: a multi-reader lock called the
121 * i_iolock and a multi-reader lock called the i_lock. This routine
122 * allows either or both of the locks to be obtained.
124 * The 2 locks should always be ordered so that the IO lock is
125 * obtained first in order to prevent deadlock.
127 * ip -- the inode being locked
128 * lock_flags -- this parameter indicates the inode's locks
129 * to be locked. It can be:
134 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
135 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
136 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
137 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
144 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
147 * You can't set both SHARED and EXCL for the same lock,
148 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
149 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
151 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
152 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
153 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
154 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
155 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
157 if (lock_flags & XFS_IOLOCK_EXCL)
158 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
159 else if (lock_flags & XFS_IOLOCK_SHARED)
160 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
162 if (lock_flags & XFS_ILOCK_EXCL)
163 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
164 else if (lock_flags & XFS_ILOCK_SHARED)
165 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
169 * This is just like xfs_ilock(), except that the caller
170 * is guaranteed not to sleep. It returns 1 if it gets
171 * the requested locks and 0 otherwise. If the IO lock is
172 * obtained but the inode lock cannot be, then the IO lock
173 * is dropped before returning.
175 * ip -- the inode being locked
176 * lock_flags -- this parameter indicates the inode's locks to be
177 * to be locked. See the comment for xfs_ilock() for a list
185 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
188 * You can't set both SHARED and EXCL for the same lock,
189 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
190 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
192 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
193 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
194 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
195 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
196 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
198 if (lock_flags & XFS_IOLOCK_EXCL) {
199 if (!mrtryupdate(&ip->i_iolock))
201 } else if (lock_flags & XFS_IOLOCK_SHARED) {
202 if (!mrtryaccess(&ip->i_iolock))
205 if (lock_flags & XFS_ILOCK_EXCL) {
206 if (!mrtryupdate(&ip->i_lock))
207 goto out_undo_iolock;
208 } else if (lock_flags & XFS_ILOCK_SHARED) {
209 if (!mrtryaccess(&ip->i_lock))
210 goto out_undo_iolock;
215 if (lock_flags & XFS_IOLOCK_EXCL)
216 mrunlock_excl(&ip->i_iolock);
217 else if (lock_flags & XFS_IOLOCK_SHARED)
218 mrunlock_shared(&ip->i_iolock);
224 * xfs_iunlock() is used to drop the inode locks acquired with
225 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
226 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
227 * that we know which locks to drop.
229 * ip -- the inode being unlocked
230 * lock_flags -- this parameter indicates the inode's locks to be
231 * to be unlocked. See the comment for xfs_ilock() for a list
232 * of valid values for this parameter.
241 * You can't set both SHARED and EXCL for the same lock,
242 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
243 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
245 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
246 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
247 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
248 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
249 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
250 ASSERT(lock_flags != 0);
252 if (lock_flags & XFS_IOLOCK_EXCL)
253 mrunlock_excl(&ip->i_iolock);
254 else if (lock_flags & XFS_IOLOCK_SHARED)
255 mrunlock_shared(&ip->i_iolock);
257 if (lock_flags & XFS_ILOCK_EXCL)
258 mrunlock_excl(&ip->i_lock);
259 else if (lock_flags & XFS_ILOCK_SHARED)
260 mrunlock_shared(&ip->i_lock);
262 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
266 * give up write locks. the i/o lock cannot be held nested
267 * if it is being demoted.
274 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
275 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
277 if (lock_flags & XFS_ILOCK_EXCL)
278 mrdemote(&ip->i_lock);
279 if (lock_flags & XFS_IOLOCK_EXCL)
280 mrdemote(&ip->i_iolock);
282 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
285 #if defined(DEBUG) || defined(XFS_WARN)
291 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
292 if (!(lock_flags & XFS_ILOCK_SHARED))
293 return !!ip->i_lock.mr_writer;
294 return rwsem_is_locked(&ip->i_lock.mr_lock);
297 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
298 if (!(lock_flags & XFS_IOLOCK_SHARED))
299 return !!ip->i_iolock.mr_writer;
300 return rwsem_is_locked(&ip->i_iolock.mr_lock);
310 int xfs_small_retries;
311 int xfs_middle_retries;
312 int xfs_lots_retries;
317 * Bump the subclass so xfs_lock_inodes() acquires each lock with
321 xfs_lock_inumorder(int lock_mode, int subclass)
323 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
324 lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT;
325 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))
326 lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT;
332 * The following routine will lock n inodes in exclusive mode. We assume the
333 * caller calls us with the inodes in i_ino order.
335 * We need to detect deadlock where an inode that we lock is in the AIL and we
336 * start waiting for another inode that is locked by a thread in a long running
337 * transaction (such as truncate). This can result in deadlock since the long
338 * running trans might need to wait for the inode we just locked in order to
339 * push the tail and free space in the log.
347 int attempts = 0, i, j, try_lock;
350 /* currently supports between 2 and 5 inodes */
351 ASSERT(ips && inodes >= 2 && inodes <= 5);
356 for (; i < inodes; i++) {
359 if (i && (ips[i] == ips[i - 1])) /* Already locked */
363 * If try_lock is not set yet, make sure all locked inodes are
364 * not in the AIL. If any are, set try_lock to be used later.
367 for (j = (i - 1); j >= 0 && !try_lock; j--) {
368 lp = (xfs_log_item_t *)ips[j]->i_itemp;
369 if (lp && (lp->li_flags & XFS_LI_IN_AIL))
375 * If any of the previous locks we have locked is in the AIL,
376 * we must TRY to get the second and subsequent locks. If
377 * we can't get any, we must release all we have
381 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
385 /* try_lock means we have an inode locked that is in the AIL. */
387 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
391 * Unlock all previous guys and try again. xfs_iunlock will try
392 * to push the tail if the inode is in the AIL.
395 for (j = i - 1; j >= 0; j--) {
397 * Check to see if we've already unlocked this one. Not
398 * the first one going back, and the inode ptr is the
401 if (j != (i - 1) && ips[j] == ips[j + 1])
404 xfs_iunlock(ips[j], lock_mode);
407 if ((attempts % 5) == 0) {
408 delay(1); /* Don't just spin the CPU */
420 if (attempts < 5) xfs_small_retries++;
421 else if (attempts < 100) xfs_middle_retries++;
422 else xfs_lots_retries++;
430 * xfs_lock_two_inodes() can only be used to lock one type of lock
431 * at a time - the iolock or the ilock, but not both at once. If
432 * we lock both at once, lockdep will report false positives saying
433 * we have violated locking orders.
445 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
446 ASSERT((lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) == 0);
447 ASSERT(ip0->i_ino != ip1->i_ino);
449 if (ip0->i_ino > ip1->i_ino) {
456 xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
459 * If the first lock we have locked is in the AIL, we must TRY to get
460 * the second lock. If we can't get it, we must release the first one
463 lp = (xfs_log_item_t *)ip0->i_itemp;
464 if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
465 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
466 xfs_iunlock(ip0, lock_mode);
467 if ((++attempts % 5) == 0)
468 delay(1); /* Don't just spin the CPU */
472 xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
479 struct xfs_inode *ip)
481 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
482 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
485 prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
486 if (xfs_isiflocked(ip))
488 } while (!xfs_iflock_nowait(ip));
490 finish_wait(wq, &wait.wait);
499 if (di_flags & XFS_DIFLAG_ANY) {
500 if (di_flags & XFS_DIFLAG_REALTIME)
501 flags |= XFS_XFLAG_REALTIME;
502 if (di_flags & XFS_DIFLAG_PREALLOC)
503 flags |= XFS_XFLAG_PREALLOC;
504 if (di_flags & XFS_DIFLAG_IMMUTABLE)
505 flags |= XFS_XFLAG_IMMUTABLE;
506 if (di_flags & XFS_DIFLAG_APPEND)
507 flags |= XFS_XFLAG_APPEND;
508 if (di_flags & XFS_DIFLAG_SYNC)
509 flags |= XFS_XFLAG_SYNC;
510 if (di_flags & XFS_DIFLAG_NOATIME)
511 flags |= XFS_XFLAG_NOATIME;
512 if (di_flags & XFS_DIFLAG_NODUMP)
513 flags |= XFS_XFLAG_NODUMP;
514 if (di_flags & XFS_DIFLAG_RTINHERIT)
515 flags |= XFS_XFLAG_RTINHERIT;
516 if (di_flags & XFS_DIFLAG_PROJINHERIT)
517 flags |= XFS_XFLAG_PROJINHERIT;
518 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
519 flags |= XFS_XFLAG_NOSYMLINKS;
520 if (di_flags & XFS_DIFLAG_EXTSIZE)
521 flags |= XFS_XFLAG_EXTSIZE;
522 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
523 flags |= XFS_XFLAG_EXTSZINHERIT;
524 if (di_flags & XFS_DIFLAG_NODEFRAG)
525 flags |= XFS_XFLAG_NODEFRAG;
526 if (di_flags & XFS_DIFLAG_FILESTREAM)
527 flags |= XFS_XFLAG_FILESTREAM;
537 xfs_icdinode_t *dic = &ip->i_d;
539 return _xfs_dic2xflags(dic->di_flags) |
540 (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
547 return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) |
548 (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
552 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
553 * is allowed, otherwise it has to be an exact match. If a CI match is found,
554 * ci_name->name will point to a the actual name (caller must free) or
555 * will be set to NULL if an exact match is found.
560 struct xfs_name *name,
562 struct xfs_name *ci_name)
568 trace_xfs_lookup(dp, name);
570 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
573 lock_mode = xfs_ilock_data_map_shared(dp);
574 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
575 xfs_iunlock(dp, lock_mode);
580 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
588 kmem_free(ci_name->name);
595 * Allocate an inode on disk and return a copy of its in-core version.
596 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
597 * appropriately within the inode. The uid and gid for the inode are
598 * set according to the contents of the given cred structure.
600 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
601 * has a free inode available, call xfs_iget() to obtain the in-core
602 * version of the allocated inode. Finally, fill in the inode and
603 * log its initial contents. In this case, ialloc_context would be
606 * If xfs_dialloc() does not have an available inode, it will replenish
607 * its supply by doing an allocation. Since we can only do one
608 * allocation within a transaction without deadlocks, we must commit
609 * the current transaction before returning the inode itself.
610 * In this case, therefore, we will set ialloc_context and return.
611 * The caller should then commit the current transaction, start a new
612 * transaction, and call xfs_ialloc() again to actually get the inode.
614 * To ensure that some other process does not grab the inode that
615 * was allocated during the first call to xfs_ialloc(), this routine
616 * also returns the [locked] bp pointing to the head of the freelist
617 * as ialloc_context. The caller should hold this buffer across
618 * the commit and pass it back into this routine on the second call.
620 * If we are allocating quota inodes, we do not have a parent inode
621 * to attach to or associate with (i.e. pip == NULL) because they
622 * are not linked into the directory structure - they are attached
623 * directly to the superblock - and so have no parent.
634 xfs_buf_t **ialloc_context,
637 struct xfs_mount *mp = tp->t_mountp;
645 * Call the space management code to pick
646 * the on-disk inode to be allocated.
648 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
649 ialloc_context, &ino);
652 if (*ialloc_context || ino == NULLFSINO) {
656 ASSERT(*ialloc_context == NULL);
659 * Get the in-core inode with the lock held exclusively.
660 * This is because we're setting fields here we need
661 * to prevent others from looking at until we're done.
663 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
664 XFS_ILOCK_EXCL, &ip);
670 * We always convert v1 inodes to v2 now - we only support filesystems
671 * with >= v2 inode capability, so there is no reason for ever leaving
672 * an inode in v1 format.
674 if (ip->i_d.di_version == 1)
675 ip->i_d.di_version = 2;
677 ip->i_d.di_mode = mode;
678 ip->i_d.di_onlink = 0;
679 ip->i_d.di_nlink = nlink;
680 ASSERT(ip->i_d.di_nlink == nlink);
681 ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
682 ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
683 xfs_set_projid(ip, prid);
684 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
686 if (pip && XFS_INHERIT_GID(pip)) {
687 ip->i_d.di_gid = pip->i_d.di_gid;
688 if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) {
689 ip->i_d.di_mode |= S_ISGID;
694 * If the group ID of the new file does not match the effective group
695 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
696 * (and only if the irix_sgid_inherit compatibility variable is set).
698 if ((irix_sgid_inherit) &&
699 (ip->i_d.di_mode & S_ISGID) &&
700 (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid)))) {
701 ip->i_d.di_mode &= ~S_ISGID;
705 ip->i_d.di_nextents = 0;
706 ASSERT(ip->i_d.di_nblocks == 0);
708 tv = current_fs_time(mp->m_super);
709 ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
710 ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
711 ip->i_d.di_atime = ip->i_d.di_mtime;
712 ip->i_d.di_ctime = ip->i_d.di_mtime;
715 * di_gen will have been taken care of in xfs_iread.
717 ip->i_d.di_extsize = 0;
718 ip->i_d.di_dmevmask = 0;
719 ip->i_d.di_dmstate = 0;
720 ip->i_d.di_flags = 0;
722 if (ip->i_d.di_version == 3) {
723 ASSERT(ip->i_d.di_ino == ino);
724 ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid));
726 ip->i_d.di_changecount = 1;
728 ip->i_d.di_flags2 = 0;
729 memset(&(ip->i_d.di_pad2[0]), 0, sizeof(ip->i_d.di_pad2));
730 ip->i_d.di_crtime = ip->i_d.di_mtime;
734 flags = XFS_ILOG_CORE;
735 switch (mode & S_IFMT) {
740 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
741 ip->i_df.if_u2.if_rdev = rdev;
742 ip->i_df.if_flags = 0;
743 flags |= XFS_ILOG_DEV;
747 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
751 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
752 di_flags |= XFS_DIFLAG_RTINHERIT;
753 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
754 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
755 ip->i_d.di_extsize = pip->i_d.di_extsize;
757 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
758 di_flags |= XFS_DIFLAG_PROJINHERIT;
759 } else if (S_ISREG(mode)) {
760 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
761 di_flags |= XFS_DIFLAG_REALTIME;
762 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
763 di_flags |= XFS_DIFLAG_EXTSIZE;
764 ip->i_d.di_extsize = pip->i_d.di_extsize;
767 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
769 di_flags |= XFS_DIFLAG_NOATIME;
770 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
772 di_flags |= XFS_DIFLAG_NODUMP;
773 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
775 di_flags |= XFS_DIFLAG_SYNC;
776 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
777 xfs_inherit_nosymlinks)
778 di_flags |= XFS_DIFLAG_NOSYMLINKS;
779 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
780 xfs_inherit_nodefrag)
781 di_flags |= XFS_DIFLAG_NODEFRAG;
782 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
783 di_flags |= XFS_DIFLAG_FILESTREAM;
784 ip->i_d.di_flags |= di_flags;
788 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
789 ip->i_df.if_flags = XFS_IFEXTENTS;
790 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
791 ip->i_df.if_u1.if_extents = NULL;
797 * Attribute fork settings for new inode.
799 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
800 ip->i_d.di_anextents = 0;
803 * Log the new values stuffed into the inode.
805 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
806 xfs_trans_log_inode(tp, ip, flags);
808 /* now that we have an i_mode we can setup the inode structure */
816 * Allocates a new inode from disk and return a pointer to the
817 * incore copy. This routine will internally commit the current
818 * transaction and allocate a new one if the Space Manager needed
819 * to do an allocation to replenish the inode free-list.
821 * This routine is designed to be called from xfs_create and
827 xfs_trans_t **tpp, /* input: current transaction;
828 output: may be a new transaction. */
829 xfs_inode_t *dp, /* directory within whose allocate
834 prid_t prid, /* project id */
835 int okalloc, /* ok to allocate new space */
836 xfs_inode_t **ipp, /* pointer to inode; it will be
844 xfs_buf_t *ialloc_context = NULL;
850 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
853 * xfs_ialloc will return a pointer to an incore inode if
854 * the Space Manager has an available inode on the free
855 * list. Otherwise, it will do an allocation and replenish
856 * the freelist. Since we can only do one allocation per
857 * transaction without deadlocks, we will need to commit the
858 * current transaction and start a new one. We will then
859 * need to call xfs_ialloc again to get the inode.
861 * If xfs_ialloc did an allocation to replenish the freelist,
862 * it returns the bp containing the head of the freelist as
863 * ialloc_context. We will hold a lock on it across the
864 * transaction commit so that no other process can steal
865 * the inode(s) that we've just allocated.
867 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc,
868 &ialloc_context, &ip);
871 * Return an error if we were unable to allocate a new inode.
872 * This should only happen if we run out of space on disk or
873 * encounter a disk error.
879 if (!ialloc_context && !ip) {
885 * If the AGI buffer is non-NULL, then we were unable to get an
886 * inode in one operation. We need to commit the current
887 * transaction and call xfs_ialloc() again. It is guaranteed
888 * to succeed the second time.
890 if (ialloc_context) {
891 struct xfs_trans_res tres;
894 * Normally, xfs_trans_commit releases all the locks.
895 * We call bhold to hang on to the ialloc_context across
896 * the commit. Holding this buffer prevents any other
897 * processes from doing any allocations in this
900 xfs_trans_bhold(tp, ialloc_context);
902 * Save the log reservation so we can use
903 * them in the next transaction.
905 tres.tr_logres = xfs_trans_get_log_res(tp);
906 tres.tr_logcount = xfs_trans_get_log_count(tp);
909 * We want the quota changes to be associated with the next
910 * transaction, NOT this one. So, detach the dqinfo from this
911 * and attach it to the next transaction.
916 dqinfo = (void *)tp->t_dqinfo;
918 tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
919 tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
922 ntp = xfs_trans_dup(tp);
923 code = xfs_trans_commit(tp, 0);
925 if (committed != NULL) {
929 * If we get an error during the commit processing,
930 * release the buffer that is still held and return
934 xfs_buf_relse(ialloc_context);
936 tp->t_dqinfo = dqinfo;
937 xfs_trans_free_dqinfo(tp);
945 * transaction commit worked ok so we can drop the extra ticket
946 * reference that we gained in xfs_trans_dup()
948 xfs_log_ticket_put(tp->t_ticket);
949 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
950 code = xfs_trans_reserve(tp, &tres, 0, 0);
953 * Re-attach the quota info that we detached from prev trx.
956 tp->t_dqinfo = dqinfo;
957 tp->t_flags |= tflags;
961 xfs_buf_relse(ialloc_context);
966 xfs_trans_bjoin(tp, ialloc_context);
969 * Call ialloc again. Since we've locked out all
970 * other allocations in this allocation group,
971 * this call should always succeed.
973 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
974 okalloc, &ialloc_context, &ip);
977 * If we get an error at this point, return to the caller
978 * so that the current transaction can be aborted.
985 ASSERT(!ialloc_context && ip);
988 if (committed != NULL)
999 * Decrement the link count on an inode & log the change.
1000 * If this causes the link count to go to zero, initiate the
1001 * logging activity required to truncate a file.
1010 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1012 ASSERT (ip->i_d.di_nlink > 0);
1014 drop_nlink(VFS_I(ip));
1015 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1018 if (ip->i_d.di_nlink == 0) {
1020 * We're dropping the last link to this file.
1021 * Move the on-disk inode to the AGI unlinked list.
1022 * From xfs_inactive() we will pull the inode from
1023 * the list and free it.
1025 error = xfs_iunlink(tp, ip);
1031 * Increment the link count on an inode & log the change.
1038 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1040 ASSERT(ip->i_d.di_version > 1);
1041 ASSERT(ip->i_d.di_nlink > 0 || (VFS_I(ip)->i_state & I_LINKABLE));
1043 inc_nlink(VFS_I(ip));
1044 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1051 struct xfs_name *name,
1056 int is_dir = S_ISDIR(mode);
1057 struct xfs_mount *mp = dp->i_mount;
1058 struct xfs_inode *ip = NULL;
1059 struct xfs_trans *tp = NULL;
1061 xfs_bmap_free_t free_list;
1062 xfs_fsblock_t first_block;
1063 bool unlock_dp_on_error = false;
1067 struct xfs_dquot *udqp = NULL;
1068 struct xfs_dquot *gdqp = NULL;
1069 struct xfs_dquot *pdqp = NULL;
1070 struct xfs_trans_res *tres;
1073 trace_xfs_create(dp, name);
1075 if (XFS_FORCED_SHUTDOWN(mp))
1078 prid = xfs_get_initial_prid(dp);
1081 * Make sure that we have allocated dquot(s) on disk.
1083 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1084 xfs_kgid_to_gid(current_fsgid()), prid,
1085 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1086 &udqp, &gdqp, &pdqp);
1092 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1093 tres = &M_RES(mp)->tr_mkdir;
1094 tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
1096 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1097 tres = &M_RES(mp)->tr_create;
1098 tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
1101 cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
1104 * Initially assume that the file does not exist and
1105 * reserve the resources for that case. If that is not
1106 * the case we'll drop the one we have and get a more
1107 * appropriate transaction later.
1109 error = xfs_trans_reserve(tp, tres, resblks, 0);
1110 if (error == -ENOSPC) {
1111 /* flush outstanding delalloc blocks and retry */
1112 xfs_flush_inodes(mp);
1113 error = xfs_trans_reserve(tp, tres, resblks, 0);
1115 if (error == -ENOSPC) {
1116 /* No space at all so try a "no-allocation" reservation */
1118 error = xfs_trans_reserve(tp, tres, 0, 0);
1122 goto out_trans_cancel;
1125 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1126 unlock_dp_on_error = true;
1128 xfs_bmap_init(&free_list, &first_block);
1131 * Reserve disk quota and the inode.
1133 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1134 pdqp, resblks, 1, 0);
1136 goto out_trans_cancel;
1139 error = xfs_dir_canenter(tp, dp, name);
1141 goto out_trans_cancel;
1145 * A newly created regular or special file just has one directory
1146 * entry pointing to them, but a directory also the "." entry
1147 * pointing to itself.
1149 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
1150 prid, resblks > 0, &ip, &committed);
1152 if (error == -ENOSPC)
1153 goto out_trans_cancel;
1154 goto out_trans_abort;
1158 * Now we join the directory inode to the transaction. We do not do it
1159 * earlier because xfs_dir_ialloc might commit the previous transaction
1160 * (and release all the locks). An error from here on will result in
1161 * the transaction cancel unlocking dp so don't do it explicitly in the
1164 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1165 unlock_dp_on_error = false;
1167 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1168 &first_block, &free_list, resblks ?
1169 resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
1171 ASSERT(error != -ENOSPC);
1172 goto out_trans_abort;
1174 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1175 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1178 error = xfs_dir_init(tp, ip, dp);
1180 goto out_bmap_cancel;
1182 error = xfs_bumplink(tp, dp);
1184 goto out_bmap_cancel;
1188 * If this is a synchronous mount, make sure that the
1189 * create transaction goes to disk before returning to
1192 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1193 xfs_trans_set_sync(tp);
1196 * Attach the dquot(s) to the inodes and modify them incore.
1197 * These ids of the inode couldn't have changed since the new
1198 * inode has been locked ever since it was created.
1200 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1202 error = xfs_bmap_finish(&tp, &free_list, &committed);
1204 goto out_bmap_cancel;
1206 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1208 goto out_release_inode;
1210 xfs_qm_dqrele(udqp);
1211 xfs_qm_dqrele(gdqp);
1212 xfs_qm_dqrele(pdqp);
1218 xfs_bmap_cancel(&free_list);
1220 cancel_flags |= XFS_TRANS_ABORT;
1222 xfs_trans_cancel(tp, cancel_flags);
1225 * Wait until after the current transaction is aborted to finish the
1226 * setup of the inode and release the inode. This prevents recursive
1227 * transactions and deadlocks from xfs_inactive.
1230 xfs_finish_inode_setup(ip);
1234 xfs_qm_dqrele(udqp);
1235 xfs_qm_dqrele(gdqp);
1236 xfs_qm_dqrele(pdqp);
1238 if (unlock_dp_on_error)
1239 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1245 struct xfs_inode *dp,
1246 struct dentry *dentry,
1248 struct xfs_inode **ipp)
1250 struct xfs_mount *mp = dp->i_mount;
1251 struct xfs_inode *ip = NULL;
1252 struct xfs_trans *tp = NULL;
1254 uint cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
1256 struct xfs_dquot *udqp = NULL;
1257 struct xfs_dquot *gdqp = NULL;
1258 struct xfs_dquot *pdqp = NULL;
1259 struct xfs_trans_res *tres;
1262 if (XFS_FORCED_SHUTDOWN(mp))
1265 prid = xfs_get_initial_prid(dp);
1268 * Make sure that we have allocated dquot(s) on disk.
1270 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1271 xfs_kgid_to_gid(current_fsgid()), prid,
1272 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1273 &udqp, &gdqp, &pdqp);
1277 resblks = XFS_IALLOC_SPACE_RES(mp);
1278 tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE_TMPFILE);
1280 tres = &M_RES(mp)->tr_create_tmpfile;
1281 error = xfs_trans_reserve(tp, tres, resblks, 0);
1282 if (error == -ENOSPC) {
1283 /* No space at all so try a "no-allocation" reservation */
1285 error = xfs_trans_reserve(tp, tres, 0, 0);
1289 goto out_trans_cancel;
1292 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1293 pdqp, resblks, 1, 0);
1295 goto out_trans_cancel;
1297 error = xfs_dir_ialloc(&tp, dp, mode, 1, 0,
1298 prid, resblks > 0, &ip, NULL);
1300 if (error == -ENOSPC)
1301 goto out_trans_cancel;
1302 goto out_trans_abort;
1305 if (mp->m_flags & XFS_MOUNT_WSYNC)
1306 xfs_trans_set_sync(tp);
1309 * Attach the dquot(s) to the inodes and modify them incore.
1310 * These ids of the inode couldn't have changed since the new
1311 * inode has been locked ever since it was created.
1313 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1316 error = xfs_iunlink(tp, ip);
1318 goto out_trans_abort;
1320 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1322 goto out_release_inode;
1324 xfs_qm_dqrele(udqp);
1325 xfs_qm_dqrele(gdqp);
1326 xfs_qm_dqrele(pdqp);
1332 cancel_flags |= XFS_TRANS_ABORT;
1334 xfs_trans_cancel(tp, cancel_flags);
1337 * Wait until after the current transaction is aborted to finish the
1338 * setup of the inode and release the inode. This prevents recursive
1339 * transactions and deadlocks from xfs_inactive.
1342 xfs_finish_inode_setup(ip);
1346 xfs_qm_dqrele(udqp);
1347 xfs_qm_dqrele(gdqp);
1348 xfs_qm_dqrele(pdqp);
1357 struct xfs_name *target_name)
1359 xfs_mount_t *mp = tdp->i_mount;
1362 xfs_bmap_free_t free_list;
1363 xfs_fsblock_t first_block;
1368 trace_xfs_link(tdp, target_name);
1370 ASSERT(!S_ISDIR(sip->i_d.di_mode));
1372 if (XFS_FORCED_SHUTDOWN(mp))
1375 error = xfs_qm_dqattach(sip, 0);
1379 error = xfs_qm_dqattach(tdp, 0);
1383 tp = xfs_trans_alloc(mp, XFS_TRANS_LINK);
1384 cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
1385 resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1386 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, resblks, 0);
1387 if (error == -ENOSPC) {
1389 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, 0, 0);
1396 xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
1398 xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1399 xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1402 * If we are using project inheritance, we only allow hard link
1403 * creation in our tree when the project IDs are the same; else
1404 * the tree quota mechanism could be circumvented.
1406 if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1407 (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
1413 error = xfs_dir_canenter(tp, tdp, target_name);
1418 xfs_bmap_init(&free_list, &first_block);
1420 if (sip->i_d.di_nlink == 0) {
1421 error = xfs_iunlink_remove(tp, sip);
1426 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1427 &first_block, &free_list, resblks);
1430 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1431 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1433 error = xfs_bumplink(tp, sip);
1438 * If this is a synchronous mount, make sure that the
1439 * link transaction goes to disk before returning to
1442 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
1443 xfs_trans_set_sync(tp);
1446 error = xfs_bmap_finish (&tp, &free_list, &committed);
1448 xfs_bmap_cancel(&free_list);
1452 return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1455 cancel_flags |= XFS_TRANS_ABORT;
1457 xfs_trans_cancel(tp, cancel_flags);
1463 * Free up the underlying blocks past new_size. The new size must be smaller
1464 * than the current size. This routine can be used both for the attribute and
1465 * data fork, and does not modify the inode size, which is left to the caller.
1467 * The transaction passed to this routine must have made a permanent log
1468 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1469 * given transaction and start new ones, so make sure everything involved in
1470 * the transaction is tidy before calling here. Some transaction will be
1471 * returned to the caller to be committed. The incoming transaction must
1472 * already include the inode, and both inode locks must be held exclusively.
1473 * The inode must also be "held" within the transaction. On return the inode
1474 * will be "held" within the returned transaction. This routine does NOT
1475 * require any disk space to be reserved for it within the transaction.
1477 * If we get an error, we must return with the inode locked and linked into the
1478 * current transaction. This keeps things simple for the higher level code,
1479 * because it always knows that the inode is locked and held in the transaction
1480 * that returns to it whether errors occur or not. We don't mark the inode
1481 * dirty on error so that transactions can be easily aborted if possible.
1484 xfs_itruncate_extents(
1485 struct xfs_trans **tpp,
1486 struct xfs_inode *ip,
1488 xfs_fsize_t new_size)
1490 struct xfs_mount *mp = ip->i_mount;
1491 struct xfs_trans *tp = *tpp;
1492 struct xfs_trans *ntp;
1493 xfs_bmap_free_t free_list;
1494 xfs_fsblock_t first_block;
1495 xfs_fileoff_t first_unmap_block;
1496 xfs_fileoff_t last_block;
1497 xfs_filblks_t unmap_len;
1502 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1503 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1504 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1505 ASSERT(new_size <= XFS_ISIZE(ip));
1506 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1507 ASSERT(ip->i_itemp != NULL);
1508 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1509 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1511 trace_xfs_itruncate_extents_start(ip, new_size);
1514 * Since it is possible for space to become allocated beyond
1515 * the end of the file (in a crash where the space is allocated
1516 * but the inode size is not yet updated), simply remove any
1517 * blocks which show up between the new EOF and the maximum
1518 * possible file size. If the first block to be removed is
1519 * beyond the maximum file size (ie it is the same as last_block),
1520 * then there is nothing to do.
1522 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1523 last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1524 if (first_unmap_block == last_block)
1527 ASSERT(first_unmap_block < last_block);
1528 unmap_len = last_block - first_unmap_block + 1;
1530 xfs_bmap_init(&free_list, &first_block);
1531 error = xfs_bunmapi(tp, ip,
1532 first_unmap_block, unmap_len,
1533 xfs_bmapi_aflag(whichfork),
1534 XFS_ITRUNC_MAX_EXTENTS,
1535 &first_block, &free_list,
1538 goto out_bmap_cancel;
1541 * Duplicate the transaction that has the permanent
1542 * reservation and commit the old transaction.
1544 error = xfs_bmap_finish(&tp, &free_list, &committed);
1546 xfs_trans_ijoin(tp, ip, 0);
1548 goto out_bmap_cancel;
1552 * Mark the inode dirty so it will be logged and
1553 * moved forward in the log as part of every commit.
1555 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1558 ntp = xfs_trans_dup(tp);
1559 error = xfs_trans_commit(tp, 0);
1562 xfs_trans_ijoin(tp, ip, 0);
1568 * Transaction commit worked ok so we can drop the extra ticket
1569 * reference that we gained in xfs_trans_dup()
1571 xfs_log_ticket_put(tp->t_ticket);
1572 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
1578 * Always re-log the inode so that our permanent transaction can keep
1579 * on rolling it forward in the log.
1581 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1583 trace_xfs_itruncate_extents_end(ip, new_size);
1590 * If the bunmapi call encounters an error, return to the caller where
1591 * the transaction can be properly aborted. We just need to make sure
1592 * we're not holding any resources that we were not when we came in.
1594 xfs_bmap_cancel(&free_list);
1602 xfs_mount_t *mp = ip->i_mount;
1605 if (!S_ISREG(ip->i_d.di_mode) || (ip->i_d.di_mode == 0))
1608 /* If this is a read-only mount, don't do this (would generate I/O) */
1609 if (mp->m_flags & XFS_MOUNT_RDONLY)
1612 if (!XFS_FORCED_SHUTDOWN(mp)) {
1616 * If we previously truncated this file and removed old data
1617 * in the process, we want to initiate "early" writeout on
1618 * the last close. This is an attempt to combat the notorious
1619 * NULL files problem which is particularly noticeable from a
1620 * truncate down, buffered (re-)write (delalloc), followed by
1621 * a crash. What we are effectively doing here is
1622 * significantly reducing the time window where we'd otherwise
1623 * be exposed to that problem.
1625 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1627 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1628 if (ip->i_delayed_blks > 0) {
1629 error = filemap_flush(VFS_I(ip)->i_mapping);
1636 if (ip->i_d.di_nlink == 0)
1639 if (xfs_can_free_eofblocks(ip, false)) {
1642 * If we can't get the iolock just skip truncating the blocks
1643 * past EOF because we could deadlock with the mmap_sem
1644 * otherwise. We'll get another chance to drop them once the
1645 * last reference to the inode is dropped, so we'll never leak
1646 * blocks permanently.
1648 * Further, check if the inode is being opened, written and
1649 * closed frequently and we have delayed allocation blocks
1650 * outstanding (e.g. streaming writes from the NFS server),
1651 * truncating the blocks past EOF will cause fragmentation to
1654 * In this case don't do the truncation, either, but we have to
1655 * be careful how we detect this case. Blocks beyond EOF show
1656 * up as i_delayed_blks even when the inode is clean, so we
1657 * need to truncate them away first before checking for a dirty
1658 * release. Hence on the first dirty close we will still remove
1659 * the speculative allocation, but after that we will leave it
1662 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1665 error = xfs_free_eofblocks(mp, ip, true);
1666 if (error && error != -EAGAIN)
1669 /* delalloc blocks after truncation means it really is dirty */
1670 if (ip->i_delayed_blks)
1671 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1677 * xfs_inactive_truncate
1679 * Called to perform a truncate when an inode becomes unlinked.
1682 xfs_inactive_truncate(
1683 struct xfs_inode *ip)
1685 struct xfs_mount *mp = ip->i_mount;
1686 struct xfs_trans *tp;
1689 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
1690 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
1692 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1693 xfs_trans_cancel(tp, 0);
1697 xfs_ilock(ip, XFS_ILOCK_EXCL);
1698 xfs_trans_ijoin(tp, ip, 0);
1701 * Log the inode size first to prevent stale data exposure in the event
1702 * of a system crash before the truncate completes. See the related
1703 * comment in xfs_setattr_size() for details.
1705 ip->i_d.di_size = 0;
1706 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1708 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1710 goto error_trans_cancel;
1712 ASSERT(ip->i_d.di_nextents == 0);
1714 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1718 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1722 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1724 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1729 * xfs_inactive_ifree()
1731 * Perform the inode free when an inode is unlinked.
1735 struct xfs_inode *ip)
1737 xfs_bmap_free_t free_list;
1738 xfs_fsblock_t first_block;
1740 struct xfs_mount *mp = ip->i_mount;
1741 struct xfs_trans *tp;
1744 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
1747 * The ifree transaction might need to allocate blocks for record
1748 * insertion to the finobt. We don't want to fail here at ENOSPC, so
1749 * allow ifree to dip into the reserved block pool if necessary.
1751 * Freeing large sets of inodes generally means freeing inode chunks,
1752 * directory and file data blocks, so this should be relatively safe.
1753 * Only under severe circumstances should it be possible to free enough
1754 * inodes to exhaust the reserve block pool via finobt expansion while
1755 * at the same time not creating free space in the filesystem.
1757 * Send a warning if the reservation does happen to fail, as the inode
1758 * now remains allocated and sits on the unlinked list until the fs is
1761 tp->t_flags |= XFS_TRANS_RESERVE;
1762 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree,
1763 XFS_IFREE_SPACE_RES(mp), 0);
1765 if (error == -ENOSPC) {
1766 xfs_warn_ratelimited(mp,
1767 "Failed to remove inode(s) from unlinked list. "
1768 "Please free space, unmount and run xfs_repair.");
1770 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1772 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
1776 xfs_ilock(ip, XFS_ILOCK_EXCL);
1777 xfs_trans_ijoin(tp, ip, 0);
1779 xfs_bmap_init(&free_list, &first_block);
1780 error = xfs_ifree(tp, ip, &free_list);
1783 * If we fail to free the inode, shut down. The cancel
1784 * might do that, we need to make sure. Otherwise the
1785 * inode might be lost for a long time or forever.
1787 if (!XFS_FORCED_SHUTDOWN(mp)) {
1788 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1790 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1792 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
1793 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1798 * Credit the quota account(s). The inode is gone.
1800 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1803 * Just ignore errors at this point. There is nothing we can
1804 * do except to try to keep going. Make sure it's not a silent
1807 error = xfs_bmap_finish(&tp, &free_list, &committed);
1809 xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
1811 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1813 xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1816 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1823 * This is called when the vnode reference count for the vnode
1824 * goes to zero. If the file has been unlinked, then it must
1825 * now be truncated. Also, we clear all of the read-ahead state
1826 * kept for the inode here since the file is now closed.
1832 struct xfs_mount *mp;
1837 * If the inode is already free, then there can be nothing
1840 if (ip->i_d.di_mode == 0) {
1841 ASSERT(ip->i_df.if_real_bytes == 0);
1842 ASSERT(ip->i_df.if_broot_bytes == 0);
1848 /* If this is a read-only mount, don't do this (would generate I/O) */
1849 if (mp->m_flags & XFS_MOUNT_RDONLY)
1852 if (ip->i_d.di_nlink != 0) {
1854 * force is true because we are evicting an inode from the
1855 * cache. Post-eof blocks must be freed, lest we end up with
1856 * broken free space accounting.
1858 if (xfs_can_free_eofblocks(ip, true))
1859 xfs_free_eofblocks(mp, ip, false);
1864 if (S_ISREG(ip->i_d.di_mode) &&
1865 (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1866 ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
1869 error = xfs_qm_dqattach(ip, 0);
1873 if (S_ISLNK(ip->i_d.di_mode))
1874 error = xfs_inactive_symlink(ip);
1876 error = xfs_inactive_truncate(ip);
1881 * If there are attributes associated with the file then blow them away
1882 * now. The code calls a routine that recursively deconstructs the
1883 * attribute fork. We need to just commit the current transaction
1884 * because we can't use it for xfs_attr_inactive().
1886 if (ip->i_d.di_anextents > 0) {
1887 ASSERT(ip->i_d.di_forkoff != 0);
1889 error = xfs_attr_inactive(ip);
1895 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
1897 ASSERT(ip->i_d.di_anextents == 0);
1902 error = xfs_inactive_ifree(ip);
1907 * Release the dquots held by inode, if any.
1909 xfs_qm_dqdetach(ip);
1913 * This is called when the inode's link count goes to 0.
1914 * We place the on-disk inode on a list in the AGI. It
1915 * will be pulled from this list when the inode is freed.
1932 ASSERT(ip->i_d.di_nlink == 0);
1933 ASSERT(ip->i_d.di_mode != 0);
1938 * Get the agi buffer first. It ensures lock ordering
1941 error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
1944 agi = XFS_BUF_TO_AGI(agibp);
1947 * Get the index into the agi hash table for the
1948 * list this inode will go on.
1950 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1952 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1953 ASSERT(agi->agi_unlinked[bucket_index]);
1954 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1956 if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
1958 * There is already another inode in the bucket we need
1959 * to add ourselves to. Add us at the front of the list.
1960 * Here we put the head pointer into our next pointer,
1961 * and then we fall through to point the head at us.
1963 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
1968 ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
1969 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1970 offset = ip->i_imap.im_boffset +
1971 offsetof(xfs_dinode_t, di_next_unlinked);
1973 /* need to recalc the inode CRC if appropriate */
1974 xfs_dinode_calc_crc(mp, dip);
1976 xfs_trans_inode_buf(tp, ibp);
1977 xfs_trans_log_buf(tp, ibp, offset,
1978 (offset + sizeof(xfs_agino_t) - 1));
1979 xfs_inobp_check(mp, ibp);
1983 * Point the bucket head pointer at the inode being inserted.
1986 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1987 offset = offsetof(xfs_agi_t, agi_unlinked) +
1988 (sizeof(xfs_agino_t) * bucket_index);
1989 xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
1990 xfs_trans_log_buf(tp, agibp, offset,
1991 (offset + sizeof(xfs_agino_t) - 1));
1996 * Pull the on-disk inode from the AGI unlinked list.
2009 xfs_agnumber_t agno;
2011 xfs_agino_t next_agino;
2012 xfs_buf_t *last_ibp;
2013 xfs_dinode_t *last_dip = NULL;
2015 int offset, last_offset = 0;
2019 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2022 * Get the agi buffer first. It ensures lock ordering
2025 error = xfs_read_agi(mp, tp, agno, &agibp);
2029 agi = XFS_BUF_TO_AGI(agibp);
2032 * Get the index into the agi hash table for the
2033 * list this inode will go on.
2035 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2037 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2038 ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
2039 ASSERT(agi->agi_unlinked[bucket_index]);
2041 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
2043 * We're at the head of the list. Get the inode's on-disk
2044 * buffer to see if there is anyone after us on the list.
2045 * Only modify our next pointer if it is not already NULLAGINO.
2046 * This saves us the overhead of dealing with the buffer when
2047 * there is no need to change it.
2049 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2052 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2056 next_agino = be32_to_cpu(dip->di_next_unlinked);
2057 ASSERT(next_agino != 0);
2058 if (next_agino != NULLAGINO) {
2059 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
2060 offset = ip->i_imap.im_boffset +
2061 offsetof(xfs_dinode_t, di_next_unlinked);
2063 /* need to recalc the inode CRC if appropriate */
2064 xfs_dinode_calc_crc(mp, dip);
2066 xfs_trans_inode_buf(tp, ibp);
2067 xfs_trans_log_buf(tp, ibp, offset,
2068 (offset + sizeof(xfs_agino_t) - 1));
2069 xfs_inobp_check(mp, ibp);
2071 xfs_trans_brelse(tp, ibp);
2074 * Point the bucket head pointer at the next inode.
2076 ASSERT(next_agino != 0);
2077 ASSERT(next_agino != agino);
2078 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
2079 offset = offsetof(xfs_agi_t, agi_unlinked) +
2080 (sizeof(xfs_agino_t) * bucket_index);
2081 xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
2082 xfs_trans_log_buf(tp, agibp, offset,
2083 (offset + sizeof(xfs_agino_t) - 1));
2086 * We need to search the list for the inode being freed.
2088 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2090 while (next_agino != agino) {
2091 struct xfs_imap imap;
2094 xfs_trans_brelse(tp, last_ibp);
2097 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
2099 error = xfs_imap(mp, tp, next_ino, &imap, 0);
2102 "%s: xfs_imap returned error %d.",
2107 error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
2111 "%s: xfs_imap_to_bp returned error %d.",
2116 last_offset = imap.im_boffset;
2117 next_agino = be32_to_cpu(last_dip->di_next_unlinked);
2118 ASSERT(next_agino != NULLAGINO);
2119 ASSERT(next_agino != 0);
2123 * Now last_ibp points to the buffer previous to us on the
2124 * unlinked list. Pull us from the list.
2126 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2129 xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
2133 next_agino = be32_to_cpu(dip->di_next_unlinked);
2134 ASSERT(next_agino != 0);
2135 ASSERT(next_agino != agino);
2136 if (next_agino != NULLAGINO) {
2137 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
2138 offset = ip->i_imap.im_boffset +
2139 offsetof(xfs_dinode_t, di_next_unlinked);
2141 /* need to recalc the inode CRC if appropriate */
2142 xfs_dinode_calc_crc(mp, dip);
2144 xfs_trans_inode_buf(tp, ibp);
2145 xfs_trans_log_buf(tp, ibp, offset,
2146 (offset + sizeof(xfs_agino_t) - 1));
2147 xfs_inobp_check(mp, ibp);
2149 xfs_trans_brelse(tp, ibp);
2152 * Point the previous inode on the list to the next inode.
2154 last_dip->di_next_unlinked = cpu_to_be32(next_agino);
2155 ASSERT(next_agino != 0);
2156 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
2158 /* need to recalc the inode CRC if appropriate */
2159 xfs_dinode_calc_crc(mp, last_dip);
2161 xfs_trans_inode_buf(tp, last_ibp);
2162 xfs_trans_log_buf(tp, last_ibp, offset,
2163 (offset + sizeof(xfs_agino_t) - 1));
2164 xfs_inobp_check(mp, last_ibp);
2170 * A big issue when freeing the inode cluster is that we _cannot_ skip any
2171 * inodes that are in memory - they all must be marked stale and attached to
2172 * the cluster buffer.
2176 xfs_inode_t *free_ip,
2180 xfs_mount_t *mp = free_ip->i_mount;
2181 int blks_per_cluster;
2182 int inodes_per_cluster;
2188 xfs_inode_log_item_t *iip;
2189 xfs_log_item_t *lip;
2190 struct xfs_perag *pag;
2192 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
2193 blks_per_cluster = xfs_icluster_size_fsb(mp);
2194 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
2195 nbufs = mp->m_ialloc_blks / blks_per_cluster;
2197 for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) {
2198 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2199 XFS_INO_TO_AGBNO(mp, inum));
2202 * We obtain and lock the backing buffer first in the process
2203 * here, as we have to ensure that any dirty inode that we
2204 * can't get the flush lock on is attached to the buffer.
2205 * If we scan the in-memory inodes first, then buffer IO can
2206 * complete before we get a lock on it, and hence we may fail
2207 * to mark all the active inodes on the buffer stale.
2209 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2210 mp->m_bsize * blks_per_cluster,
2217 * This buffer may not have been correctly initialised as we
2218 * didn't read it from disk. That's not important because we are
2219 * only using to mark the buffer as stale in the log, and to
2220 * attach stale cached inodes on it. That means it will never be
2221 * dispatched for IO. If it is, we want to know about it, and we
2222 * want it to fail. We can acheive this by adding a write
2223 * verifier to the buffer.
2225 bp->b_ops = &xfs_inode_buf_ops;
2228 * Walk the inodes already attached to the buffer and mark them
2229 * stale. These will all have the flush locks held, so an
2230 * in-memory inode walk can't lock them. By marking them all
2231 * stale first, we will not attempt to lock them in the loop
2232 * below as the XFS_ISTALE flag will be set.
2236 if (lip->li_type == XFS_LI_INODE) {
2237 iip = (xfs_inode_log_item_t *)lip;
2238 ASSERT(iip->ili_logged == 1);
2239 lip->li_cb = xfs_istale_done;
2240 xfs_trans_ail_copy_lsn(mp->m_ail,
2241 &iip->ili_flush_lsn,
2242 &iip->ili_item.li_lsn);
2243 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
2245 lip = lip->li_bio_list;
2250 * For each inode in memory attempt to add it to the inode
2251 * buffer and set it up for being staled on buffer IO
2252 * completion. This is safe as we've locked out tail pushing
2253 * and flushing by locking the buffer.
2255 * We have already marked every inode that was part of a
2256 * transaction stale above, which means there is no point in
2257 * even trying to lock them.
2259 for (i = 0; i < inodes_per_cluster; i++) {
2262 ip = radix_tree_lookup(&pag->pag_ici_root,
2263 XFS_INO_TO_AGINO(mp, (inum + i)));
2265 /* Inode not in memory, nothing to do */
2272 * because this is an RCU protected lookup, we could
2273 * find a recently freed or even reallocated inode
2274 * during the lookup. We need to check under the
2275 * i_flags_lock for a valid inode here. Skip it if it
2276 * is not valid, the wrong inode or stale.
2278 spin_lock(&ip->i_flags_lock);
2279 if (ip->i_ino != inum + i ||
2280 __xfs_iflags_test(ip, XFS_ISTALE)) {
2281 spin_unlock(&ip->i_flags_lock);
2285 spin_unlock(&ip->i_flags_lock);
2288 * Don't try to lock/unlock the current inode, but we
2289 * _cannot_ skip the other inodes that we did not find
2290 * in the list attached to the buffer and are not
2291 * already marked stale. If we can't lock it, back off
2294 if (ip != free_ip &&
2295 !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2303 xfs_iflags_set(ip, XFS_ISTALE);
2306 * we don't need to attach clean inodes or those only
2307 * with unlogged changes (which we throw away, anyway).
2310 if (!iip || xfs_inode_clean(ip)) {
2311 ASSERT(ip != free_ip);
2313 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2317 iip->ili_last_fields = iip->ili_fields;
2318 iip->ili_fields = 0;
2319 iip->ili_logged = 1;
2320 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2321 &iip->ili_item.li_lsn);
2323 xfs_buf_attach_iodone(bp, xfs_istale_done,
2327 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2330 xfs_trans_stale_inode_buf(tp, bp);
2331 xfs_trans_binval(tp, bp);
2339 * This is called to return an inode to the inode free list.
2340 * The inode should already be truncated to 0 length and have
2341 * no pages associated with it. This routine also assumes that
2342 * the inode is already a part of the transaction.
2344 * The on-disk copy of the inode will have been added to the list
2345 * of unlinked inodes in the AGI. We need to remove the inode from
2346 * that list atomically with respect to freeing it here.
2352 xfs_bmap_free_t *flist)
2356 xfs_ino_t first_ino;
2358 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2359 ASSERT(ip->i_d.di_nlink == 0);
2360 ASSERT(ip->i_d.di_nextents == 0);
2361 ASSERT(ip->i_d.di_anextents == 0);
2362 ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode));
2363 ASSERT(ip->i_d.di_nblocks == 0);
2366 * Pull the on-disk inode from the AGI unlinked list.
2368 error = xfs_iunlink_remove(tp, ip);
2372 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
2376 ip->i_d.di_mode = 0; /* mark incore inode as free */
2377 ip->i_d.di_flags = 0;
2378 ip->i_d.di_dmevmask = 0;
2379 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
2380 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2381 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2383 * Bump the generation count so no one will be confused
2384 * by reincarnations of this inode.
2387 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2390 error = xfs_ifree_cluster(ip, tp, first_ino);
2396 * This is called to unpin an inode. The caller must have the inode locked
2397 * in at least shared mode so that the buffer cannot be subsequently pinned
2398 * once someone is waiting for it to be unpinned.
2402 struct xfs_inode *ip)
2404 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2406 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2408 /* Give the log a push to start the unpinning I/O */
2409 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
2415 struct xfs_inode *ip)
2417 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2418 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2423 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
2424 if (xfs_ipincount(ip))
2426 } while (xfs_ipincount(ip));
2427 finish_wait(wq, &wait.wait);
2432 struct xfs_inode *ip)
2434 if (xfs_ipincount(ip))
2435 __xfs_iunpin_wait(ip);
2439 * Removing an inode from the namespace involves removing the directory entry
2440 * and dropping the link count on the inode. Removing the directory entry can
2441 * result in locking an AGF (directory blocks were freed) and removing a link
2442 * count can result in placing the inode on an unlinked list which results in
2445 * The big problem here is that we have an ordering constraint on AGF and AGI
2446 * locking - inode allocation locks the AGI, then can allocate a new extent for
2447 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2448 * removes the inode from the unlinked list, requiring that we lock the AGI
2449 * first, and then freeing the inode can result in an inode chunk being freed
2450 * and hence freeing disk space requiring that we lock an AGF.
2452 * Hence the ordering that is imposed by other parts of the code is AGI before
2453 * AGF. This means we cannot remove the directory entry before we drop the inode
2454 * reference count and put it on the unlinked list as this results in a lock
2455 * order of AGF then AGI, and this can deadlock against inode allocation and
2456 * freeing. Therefore we must drop the link counts before we remove the
2459 * This is still safe from a transactional point of view - it is not until we
2460 * get to xfs_bmap_finish() that we have the possibility of multiple
2461 * transactions in this operation. Hence as long as we remove the directory
2462 * entry and drop the link count in the first transaction of the remove
2463 * operation, there are no transactional constraints on the ordering here.
2468 struct xfs_name *name,
2471 xfs_mount_t *mp = dp->i_mount;
2472 xfs_trans_t *tp = NULL;
2473 int is_dir = S_ISDIR(ip->i_d.di_mode);
2475 xfs_bmap_free_t free_list;
2476 xfs_fsblock_t first_block;
2481 trace_xfs_remove(dp, name);
2483 if (XFS_FORCED_SHUTDOWN(mp))
2486 error = xfs_qm_dqattach(dp, 0);
2490 error = xfs_qm_dqattach(ip, 0);
2495 tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
2497 tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);
2498 cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
2501 * We try to get the real space reservation first,
2502 * allowing for directory btree deletion(s) implying
2503 * possible bmap insert(s). If we can't get the space
2504 * reservation then we use 0 instead, and avoid the bmap
2505 * btree insert(s) in the directory code by, if the bmap
2506 * insert tries to happen, instead trimming the LAST
2507 * block from the directory.
2509 resblks = XFS_REMOVE_SPACE_RES(mp);
2510 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, resblks, 0);
2511 if (error == -ENOSPC) {
2513 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, 0, 0);
2516 ASSERT(error != -ENOSPC);
2518 goto out_trans_cancel;
2521 xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
2523 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2524 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2527 * If we're removing a directory perform some additional validation.
2529 cancel_flags |= XFS_TRANS_ABORT;
2531 ASSERT(ip->i_d.di_nlink >= 2);
2532 if (ip->i_d.di_nlink != 2) {
2534 goto out_trans_cancel;
2536 if (!xfs_dir_isempty(ip)) {
2538 goto out_trans_cancel;
2541 /* Drop the link from ip's "..". */
2542 error = xfs_droplink(tp, dp);
2544 goto out_trans_cancel;
2546 /* Drop the "." link from ip to self. */
2547 error = xfs_droplink(tp, ip);
2549 goto out_trans_cancel;
2552 * When removing a non-directory we need to log the parent
2553 * inode here. For a directory this is done implicitly
2554 * by the xfs_droplink call for the ".." entry.
2556 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2558 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2560 /* Drop the link from dp to ip. */
2561 error = xfs_droplink(tp, ip);
2563 goto out_trans_cancel;
2565 xfs_bmap_init(&free_list, &first_block);
2566 error = xfs_dir_removename(tp, dp, name, ip->i_ino,
2567 &first_block, &free_list, resblks);
2569 ASSERT(error != -ENOENT);
2570 goto out_bmap_cancel;
2574 * If this is a synchronous mount, make sure that the
2575 * remove transaction goes to disk before returning to
2578 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2579 xfs_trans_set_sync(tp);
2581 error = xfs_bmap_finish(&tp, &free_list, &committed);
2583 goto out_bmap_cancel;
2585 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
2589 if (is_dir && xfs_inode_is_filestream(ip))
2590 xfs_filestream_deassociate(ip);
2595 xfs_bmap_cancel(&free_list);
2597 xfs_trans_cancel(tp, cancel_flags);
2603 * Enter all inodes for a rename transaction into a sorted array.
2605 #define __XFS_SORT_INODES 5
2607 xfs_sort_for_rename(
2608 struct xfs_inode *dp1, /* in: old (source) directory inode */
2609 struct xfs_inode *dp2, /* in: new (target) directory inode */
2610 struct xfs_inode *ip1, /* in: inode of old entry */
2611 struct xfs_inode *ip2, /* in: inode of new entry */
2612 struct xfs_inode *wip, /* in: whiteout inode */
2613 struct xfs_inode **i_tab,/* out: sorted array of inodes */
2614 int *num_inodes) /* in/out: inodes in array */
2618 ASSERT(*num_inodes == __XFS_SORT_INODES);
2619 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2622 * i_tab contains a list of pointers to inodes. We initialize
2623 * the table here & we'll sort it. We will then use it to
2624 * order the acquisition of the inode locks.
2626 * Note that the table may contain duplicates. e.g., dp1 == dp2.
2639 * Sort the elements via bubble sort. (Remember, there are at
2640 * most 5 elements to sort, so this is adequate.)
2642 for (i = 0; i < *num_inodes; i++) {
2643 for (j = 1; j < *num_inodes; j++) {
2644 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2645 struct xfs_inode *temp = i_tab[j];
2646 i_tab[j] = i_tab[j-1];
2655 struct xfs_trans *tp,
2656 struct xfs_bmap_free *free_list)
2662 * If this is a synchronous mount, make sure that the rename transaction
2663 * goes to disk before returning to the user.
2665 if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2666 xfs_trans_set_sync(tp);
2668 error = xfs_bmap_finish(&tp, free_list, &committed);
2670 xfs_bmap_cancel(free_list);
2671 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
2675 return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
2679 * xfs_cross_rename()
2681 * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
2685 struct xfs_trans *tp,
2686 struct xfs_inode *dp1,
2687 struct xfs_name *name1,
2688 struct xfs_inode *ip1,
2689 struct xfs_inode *dp2,
2690 struct xfs_name *name2,
2691 struct xfs_inode *ip2,
2692 struct xfs_bmap_free *free_list,
2693 xfs_fsblock_t *first_block,
2701 /* Swap inode number for dirent in first parent */
2702 error = xfs_dir_replace(tp, dp1, name1,
2704 first_block, free_list, spaceres);
2706 goto out_trans_abort;
2708 /* Swap inode number for dirent in second parent */
2709 error = xfs_dir_replace(tp, dp2, name2,
2711 first_block, free_list, spaceres);
2713 goto out_trans_abort;
2716 * If we're renaming one or more directories across different parents,
2717 * update the respective ".." entries (and link counts) to match the new
2721 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2723 if (S_ISDIR(ip2->i_d.di_mode)) {
2724 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2725 dp1->i_ino, first_block,
2726 free_list, spaceres);
2728 goto out_trans_abort;
2730 /* transfer ip2 ".." reference to dp1 */
2731 if (!S_ISDIR(ip1->i_d.di_mode)) {
2732 error = xfs_droplink(tp, dp2);
2734 goto out_trans_abort;
2735 error = xfs_bumplink(tp, dp1);
2737 goto out_trans_abort;
2741 * Although ip1 isn't changed here, userspace needs
2742 * to be warned about the change, so that applications
2743 * relying on it (like backup ones), will properly
2746 ip1_flags |= XFS_ICHGTIME_CHG;
2747 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2750 if (S_ISDIR(ip1->i_d.di_mode)) {
2751 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2752 dp2->i_ino, first_block,
2753 free_list, spaceres);
2755 goto out_trans_abort;
2757 /* transfer ip1 ".." reference to dp2 */
2758 if (!S_ISDIR(ip2->i_d.di_mode)) {
2759 error = xfs_droplink(tp, dp1);
2761 goto out_trans_abort;
2762 error = xfs_bumplink(tp, dp2);
2764 goto out_trans_abort;
2768 * Although ip2 isn't changed here, userspace needs
2769 * to be warned about the change, so that applications
2770 * relying on it (like backup ones), will properly
2773 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2774 ip2_flags |= XFS_ICHGTIME_CHG;
2779 xfs_trans_ichgtime(tp, ip1, ip1_flags);
2780 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2783 xfs_trans_ichgtime(tp, ip2, ip2_flags);
2784 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2787 xfs_trans_ichgtime(tp, dp2, dp2_flags);
2788 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2790 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2791 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2792 return xfs_finish_rename(tp, free_list);
2795 xfs_bmap_cancel(free_list);
2796 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
2805 xfs_inode_t *src_dp,
2806 struct xfs_name *src_name,
2807 xfs_inode_t *src_ip,
2808 xfs_inode_t *target_dp,
2809 struct xfs_name *target_name,
2810 xfs_inode_t *target_ip,
2813 xfs_trans_t *tp = NULL;
2814 xfs_mount_t *mp = src_dp->i_mount;
2815 int new_parent; /* moving to a new dir */
2816 int src_is_directory; /* src_name is a directory */
2818 xfs_bmap_free_t free_list;
2819 xfs_fsblock_t first_block;
2820 int cancel_flags = 0;
2821 xfs_inode_t *inodes[__XFS_SORT_INODES];
2822 int num_inodes = __XFS_SORT_INODES;
2825 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2827 if ((flags & RENAME_EXCHANGE) && !target_ip)
2830 new_parent = (src_dp != target_dp);
2831 src_is_directory = S_ISDIR(src_ip->i_d.di_mode);
2833 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, NULL,
2834 inodes, &num_inodes);
2836 tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);
2837 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
2838 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, spaceres, 0);
2839 if (error == -ENOSPC) {
2841 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, 0, 0);
2844 goto out_trans_cancel;
2845 cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
2848 * Attach the dquots to the inodes
2850 error = xfs_qm_vop_rename_dqattach(inodes);
2852 goto out_trans_cancel;
2855 * Lock all the participating inodes. Depending upon whether
2856 * the target_name exists in the target directory, and
2857 * whether the target directory is the same as the source
2858 * directory, we can lock from 2 to 4 inodes.
2860 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2863 * Join all the inodes to the transaction. From this point on,
2864 * we can rely on either trans_commit or trans_cancel to unlock
2867 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
2869 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
2870 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
2872 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
2875 * If we are using project inheritance, we only allow renames
2876 * into our tree when the project IDs are the same; else the
2877 * tree quota mechanism would be circumvented.
2879 if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
2880 (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
2882 goto out_trans_cancel;
2885 xfs_bmap_init(&free_list, &first_block);
2887 /* RENAME_EXCHANGE is unique from here on. */
2888 if (flags & RENAME_EXCHANGE)
2889 return xfs_cross_rename(tp, src_dp, src_name, src_ip,
2890 target_dp, target_name, target_ip,
2891 &free_list, &first_block, spaceres);
2894 * Set up the target.
2896 if (target_ip == NULL) {
2898 * If there's no space reservation, check the entry will
2899 * fit before actually inserting it.
2902 error = xfs_dir_canenter(tp, target_dp, target_name);
2904 goto out_trans_cancel;
2907 * If target does not exist and the rename crosses
2908 * directories, adjust the target directory link count
2909 * to account for the ".." reference from the new entry.
2911 error = xfs_dir_createname(tp, target_dp, target_name,
2912 src_ip->i_ino, &first_block,
2913 &free_list, spaceres);
2914 if (error == -ENOSPC)
2915 goto out_bmap_cancel;
2917 goto out_trans_abort;
2919 xfs_trans_ichgtime(tp, target_dp,
2920 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2922 if (new_parent && src_is_directory) {
2923 error = xfs_bumplink(tp, target_dp);
2925 goto out_trans_abort;
2927 } else { /* target_ip != NULL */
2929 * If target exists and it's a directory, check that both
2930 * target and source are directories and that target can be
2931 * destroyed, or that neither is a directory.
2933 if (S_ISDIR(target_ip->i_d.di_mode)) {
2935 * Make sure target dir is empty.
2937 if (!(xfs_dir_isempty(target_ip)) ||
2938 (target_ip->i_d.di_nlink > 2)) {
2940 goto out_trans_cancel;
2945 * Link the source inode under the target name.
2946 * If the source inode is a directory and we are moving
2947 * it across directories, its ".." entry will be
2948 * inconsistent until we replace that down below.
2950 * In case there is already an entry with the same
2951 * name at the destination directory, remove it first.
2953 error = xfs_dir_replace(tp, target_dp, target_name,
2955 &first_block, &free_list, spaceres);
2957 goto out_trans_abort;
2959 xfs_trans_ichgtime(tp, target_dp,
2960 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2963 * Decrement the link count on the target since the target
2964 * dir no longer points to it.
2966 error = xfs_droplink(tp, target_ip);
2968 goto out_trans_abort;
2970 if (src_is_directory) {
2972 * Drop the link from the old "." entry.
2974 error = xfs_droplink(tp, target_ip);
2976 goto out_trans_abort;
2978 } /* target_ip != NULL */
2981 * Remove the source.
2983 if (new_parent && src_is_directory) {
2985 * Rewrite the ".." entry to point to the new
2988 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
2990 &first_block, &free_list, spaceres);
2991 ASSERT(error != -EEXIST);
2993 goto out_trans_abort;
2997 * We always want to hit the ctime on the source inode.
2999 * This isn't strictly required by the standards since the source
3000 * inode isn't really being changed, but old unix file systems did
3001 * it and some incremental backup programs won't work without it.
3003 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3004 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3007 * Adjust the link count on src_dp. This is necessary when
3008 * renaming a directory, either within one parent when
3009 * the target existed, or across two parent directories.
3011 if (src_is_directory && (new_parent || target_ip != NULL)) {
3014 * Decrement link count on src_directory since the
3015 * entry that's moved no longer points to it.
3017 error = xfs_droplink(tp, src_dp);
3019 goto out_trans_abort;
3022 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3023 &first_block, &free_list, spaceres);
3025 goto out_trans_abort;
3027 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3028 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3030 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3032 return xfs_finish_rename(tp, &free_list);
3035 cancel_flags |= XFS_TRANS_ABORT;
3037 xfs_bmap_cancel(&free_list);
3039 xfs_trans_cancel(tp, cancel_flags);
3048 xfs_mount_t *mp = ip->i_mount;
3049 struct xfs_perag *pag;
3050 unsigned long first_index, mask;
3051 unsigned long inodes_per_cluster;
3053 xfs_inode_t **ilist;
3060 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
3062 inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
3063 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
3064 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
3068 mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
3069 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
3071 /* really need a gang lookup range call here */
3072 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
3073 first_index, inodes_per_cluster);
3077 for (i = 0; i < nr_found; i++) {
3083 * because this is an RCU protected lookup, we could find a
3084 * recently freed or even reallocated inode during the lookup.
3085 * We need to check under the i_flags_lock for a valid inode
3086 * here. Skip it if it is not valid or the wrong inode.
3088 spin_lock(&ip->i_flags_lock);
3090 (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
3091 spin_unlock(&ip->i_flags_lock);
3094 spin_unlock(&ip->i_flags_lock);
3097 * Do an un-protected check to see if the inode is dirty and
3098 * is a candidate for flushing. These checks will be repeated
3099 * later after the appropriate locks are acquired.
3101 if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
3105 * Try to get locks. If any are unavailable or it is pinned,
3106 * then this inode cannot be flushed and is skipped.
3109 if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
3111 if (!xfs_iflock_nowait(iq)) {
3112 xfs_iunlock(iq, XFS_ILOCK_SHARED);
3115 if (xfs_ipincount(iq)) {
3117 xfs_iunlock(iq, XFS_ILOCK_SHARED);
3122 * arriving here means that this inode can be flushed. First
3123 * re-check that it's dirty before flushing.
3125 if (!xfs_inode_clean(iq)) {
3127 error = xfs_iflush_int(iq, bp);
3129 xfs_iunlock(iq, XFS_ILOCK_SHARED);
3130 goto cluster_corrupt_out;
3136 xfs_iunlock(iq, XFS_ILOCK_SHARED);
3140 XFS_STATS_INC(xs_icluster_flushcnt);
3141 XFS_STATS_ADD(xs_icluster_flushinode, clcount);
3152 cluster_corrupt_out:
3154 * Corruption detected in the clustering loop. Invalidate the
3155 * inode buffer and shut down the filesystem.
3159 * Clean up the buffer. If it was delwri, just release it --
3160 * brelse can handle it with no problems. If not, shut down the
3161 * filesystem before releasing the buffer.
3163 bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
3167 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3169 if (!bufwasdelwri) {
3171 * Just like incore_relse: if we have b_iodone functions,
3172 * mark the buffer as an error and call them. Otherwise
3173 * mark it as stale and brelse.
3178 xfs_buf_ioerror(bp, -EIO);
3187 * Unlocks the flush lock
3189 xfs_iflush_abort(iq, false);
3192 return -EFSCORRUPTED;
3196 * Flush dirty inode metadata into the backing buffer.
3198 * The caller must have the inode lock and the inode flush lock held. The
3199 * inode lock will still be held upon return to the caller, and the inode
3200 * flush lock will be released after the inode has reached the disk.
3202 * The caller must write out the buffer returned in *bpp and release it.
3206 struct xfs_inode *ip,
3207 struct xfs_buf **bpp)
3209 struct xfs_mount *mp = ip->i_mount;
3211 struct xfs_dinode *dip;
3214 XFS_STATS_INC(xs_iflush_count);
3216 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3217 ASSERT(xfs_isiflocked(ip));
3218 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3219 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3223 xfs_iunpin_wait(ip);
3226 * For stale inodes we cannot rely on the backing buffer remaining
3227 * stale in cache for the remaining life of the stale inode and so
3228 * xfs_imap_to_bp() below may give us a buffer that no longer contains
3229 * inodes below. We have to check this after ensuring the inode is
3230 * unpinned so that it is safe to reclaim the stale inode after the
3233 if (xfs_iflags_test(ip, XFS_ISTALE)) {
3239 * This may have been unpinned because the filesystem is shutting
3240 * down forcibly. If that's the case we must not write this inode
3241 * to disk, because the log record didn't make it to disk.
3243 * We also have to remove the log item from the AIL in this case,
3244 * as we wait for an empty AIL as part of the unmount process.
3246 if (XFS_FORCED_SHUTDOWN(mp)) {
3252 * Get the buffer containing the on-disk inode.
3254 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
3262 * First flush out the inode that xfs_iflush was called with.
3264 error = xfs_iflush_int(ip, bp);
3269 * If the buffer is pinned then push on the log now so we won't
3270 * get stuck waiting in the write for too long.
3272 if (xfs_buf_ispinned(bp))
3273 xfs_log_force(mp, 0);
3277 * see if other inodes can be gathered into this write
3279 error = xfs_iflush_cluster(ip, bp);
3281 goto cluster_corrupt_out;
3288 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3289 cluster_corrupt_out:
3290 error = -EFSCORRUPTED;
3293 * Unlocks the flush lock
3295 xfs_iflush_abort(ip, false);
3301 struct xfs_inode *ip,
3304 struct xfs_inode_log_item *iip = ip->i_itemp;
3305 struct xfs_dinode *dip;
3306 struct xfs_mount *mp = ip->i_mount;
3308 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3309 ASSERT(xfs_isiflocked(ip));
3310 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3311 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3312 ASSERT(iip != NULL && iip->ili_fields != 0);
3313 ASSERT(ip->i_d.di_version > 1);
3315 /* set *dip = inode's place in the buffer */
3316 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
3318 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3319 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
3320 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3321 "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
3322 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3325 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
3326 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
3327 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3328 "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
3329 __func__, ip->i_ino, ip, ip->i_d.di_magic);
3332 if (S_ISREG(ip->i_d.di_mode)) {
3334 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3335 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
3336 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
3337 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3338 "%s: Bad regular inode %Lu, ptr 0x%p",
3339 __func__, ip->i_ino, ip);
3342 } else if (S_ISDIR(ip->i_d.di_mode)) {
3344 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3345 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3346 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
3347 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
3348 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3349 "%s: Bad directory inode %Lu, ptr 0x%p",
3350 __func__, ip->i_ino, ip);
3354 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
3355 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
3356 XFS_RANDOM_IFLUSH_5)) {
3357 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3358 "%s: detected corrupt incore inode %Lu, "
3359 "total extents = %d, nblocks = %Ld, ptr 0x%p",
3360 __func__, ip->i_ino,
3361 ip->i_d.di_nextents + ip->i_d.di_anextents,
3362 ip->i_d.di_nblocks, ip);
3365 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3366 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
3367 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3368 "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
3369 __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
3374 * Inode item log recovery for v2 inodes are dependent on the
3375 * di_flushiter count for correct sequencing. We bump the flush
3376 * iteration count so we can detect flushes which postdate a log record
3377 * during recovery. This is redundant as we now log every change and
3378 * hence this can't happen but we need to still do it to ensure
3379 * backwards compatibility with old kernels that predate logging all
3382 if (ip->i_d.di_version < 3)
3383 ip->i_d.di_flushiter++;
3386 * Copy the dirty parts of the inode into the on-disk
3387 * inode. We always copy out the core of the inode,
3388 * because if the inode is dirty at all the core must
3391 xfs_dinode_to_disk(dip, &ip->i_d);
3393 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3394 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3395 ip->i_d.di_flushiter = 0;
3397 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3398 if (XFS_IFORK_Q(ip))
3399 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3400 xfs_inobp_check(mp, bp);
3403 * We've recorded everything logged in the inode, so we'd like to clear
3404 * the ili_fields bits so we don't log and flush things unnecessarily.
3405 * However, we can't stop logging all this information until the data
3406 * we've copied into the disk buffer is written to disk. If we did we
3407 * might overwrite the copy of the inode in the log with all the data
3408 * after re-logging only part of it, and in the face of a crash we
3409 * wouldn't have all the data we need to recover.
3411 * What we do is move the bits to the ili_last_fields field. When
3412 * logging the inode, these bits are moved back to the ili_fields field.
3413 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
3414 * know that the information those bits represent is permanently on
3415 * disk. As long as the flush completes before the inode is logged
3416 * again, then both ili_fields and ili_last_fields will be cleared.
3418 * We can play with the ili_fields bits here, because the inode lock
3419 * must be held exclusively in order to set bits there and the flush
3420 * lock protects the ili_last_fields bits. Set ili_logged so the flush
3421 * done routine can tell whether or not to look in the AIL. Also, store
3422 * the current LSN of the inode so that we can tell whether the item has
3423 * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
3424 * need the AIL lock, because it is a 64 bit value that cannot be read
3427 iip->ili_last_fields = iip->ili_fields;
3428 iip->ili_fields = 0;
3429 iip->ili_logged = 1;
3431 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3432 &iip->ili_item.li_lsn);
3435 * Attach the function xfs_iflush_done to the inode's
3436 * buffer. This will remove the inode from the AIL
3437 * and unlock the inode's flush lock when the inode is
3438 * completely written to disk.
3440 xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
3442 /* update the lsn in the on disk inode if required */
3443 if (ip->i_d.di_version == 3)
3444 dip->di_lsn = cpu_to_be64(iip->ili_item.li_lsn);
3446 /* generate the checksum. */
3447 xfs_dinode_calc_crc(mp, dip);
3449 ASSERT(bp->b_fspriv != NULL);
3450 ASSERT(bp->b_iodone != NULL);
3454 return -EFSCORRUPTED;