4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5 * Doug Evans (dje@spiff.uucp), August 07, 1992
7 * Deadlock detection added.
8 * FIXME: one thing isn't handled yet:
9 * - mandatory locks (requires lots of changes elsewhere)
10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
15 * Converted file_lock_table to a linked list from an array, which eliminates
16 * the limits on how many active file locks are open.
17 * Chad Page (pageone@netcom.com), November 27, 1994
19 * Removed dependency on file descriptors. dup()'ed file descriptors now
20 * get the same locks as the original file descriptors, and a close() on
21 * any file descriptor removes ALL the locks on the file for the current
22 * process. Since locks still depend on the process id, locks are inherited
23 * after an exec() but not after a fork(). This agrees with POSIX, and both
24 * BSD and SVR4 practice.
25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
27 * Scrapped free list which is redundant now that we allocate locks
28 * dynamically with kmalloc()/kfree().
29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX.
33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the
34 * fcntl() system call. They have the semantics described above.
36 * FL_FLOCK locks are created with calls to flock(), through the flock()
37 * system call, which is new. Old C libraries implement flock() via fcntl()
38 * and will continue to use the old, broken implementation.
40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41 * with a file pointer (filp). As a result they can be shared by a parent
42 * process and its children after a fork(). They are removed when the last
43 * file descriptor referring to the file pointer is closed (unless explicitly
46 * FL_FLOCK locks never deadlock, an existing lock is always removed before
47 * upgrading from shared to exclusive (or vice versa). When this happens
48 * any processes blocked by the current lock are woken up and allowed to
49 * run before the new lock is applied.
50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
52 * Removed some race conditions in flock_lock_file(), marked other possible
53 * races. Just grep for FIXME to see them.
54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58 * once we've checked for blocking and deadlocking.
59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
61 * Initial implementation of mandatory locks. SunOS turned out to be
62 * a rotten model, so I implemented the "obvious" semantics.
63 * See 'Documentation/filesystems/mandatory-locking.txt' for details.
64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67 * check if a file has mandatory locks, used by mmap(), open() and creat() to
68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
72 * Tidied up block list handling. Added '/proc/locks' interface.
73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
75 * Fixed deadlock condition for pathological code that mixes calls to
76 * flock() and fcntl().
77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81 * guarantee sensible behaviour in the case where file system modules might
82 * be compiled with different options than the kernel itself.
83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90 * locks. Changed process synchronisation to avoid dereferencing locks that
91 * have already been freed.
92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
94 * Made the block list a circular list to minimise searching in the list.
95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
97 * Made mandatory locking a mount option. Default is not to allow mandatory
99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
101 * Some adaptations for NFS support.
102 * Olaf Kirch (okir@monad.swb.de), Dec 1996,
104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
107 * Use slab allocator instead of kmalloc/kfree.
108 * Use generic list implementation from <linux/list.h>.
109 * Sped up posix_locks_deadlock by only considering blocked locks.
110 * Matthew Wilcox <willy@debian.org>, March, 2000.
112 * Leases and LOCK_MAND
113 * Matthew Wilcox <willy@debian.org>, June, 2000.
114 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
117 #include <linux/capability.h>
118 #include <linux/file.h>
119 #include <linux/fdtable.h>
120 #include <linux/fs.h>
121 #include <linux/init.h>
122 #include <linux/security.h>
123 #include <linux/slab.h>
124 #include <linux/syscalls.h>
125 #include <linux/time.h>
126 #include <linux/rcupdate.h>
127 #include <linux/pid_namespace.h>
128 #include <linux/hashtable.h>
129 #include <linux/percpu.h>
130 #include <linux/lglock.h>
132 #define CREATE_TRACE_POINTS
133 #include <trace/events/filelock.h>
135 #include <asm/uaccess.h>
137 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
138 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
139 #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
140 #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
142 static bool lease_breaking(struct file_lock *fl)
144 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
147 static int target_leasetype(struct file_lock *fl)
149 if (fl->fl_flags & FL_UNLOCK_PENDING)
151 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
156 int leases_enable = 1;
157 int lease_break_time = 45;
160 * The global file_lock_list is only used for displaying /proc/locks, so we
161 * keep a list on each CPU, with each list protected by its own spinlock via
162 * the file_lock_lglock. Note that alterations to the list also require that
163 * the relevant flc_lock is held.
165 DEFINE_STATIC_LGLOCK(file_lock_lglock);
166 static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
167 DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
170 * The blocked_hash is used to find POSIX lock loops for deadlock detection.
171 * It is protected by blocked_lock_lock.
173 * We hash locks by lockowner in order to optimize searching for the lock a
174 * particular lockowner is waiting on.
176 * FIXME: make this value scale via some heuristic? We generally will want more
177 * buckets when we have more lockowners holding locks, but that's a little
178 * difficult to determine without knowing what the workload will look like.
180 #define BLOCKED_HASH_BITS 7
181 static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
184 * This lock protects the blocked_hash. Generally, if you're accessing it, you
185 * want to be holding this lock.
187 * In addition, it also protects the fl->fl_block list, and the fl->fl_next
188 * pointer for file_lock structures that are acting as lock requests (in
189 * contrast to those that are acting as records of acquired locks).
191 * Note that when we acquire this lock in order to change the above fields,
192 * we often hold the flc_lock as well. In certain cases, when reading the fields
193 * protected by this lock, we can skip acquiring it iff we already hold the
196 * In particular, adding an entry to the fl_block list requires that you hold
197 * both the flc_lock and the blocked_lock_lock (acquired in that order).
198 * Deleting an entry from the list however only requires the file_lock_lock.
200 static DEFINE_SPINLOCK(blocked_lock_lock);
202 static struct kmem_cache *flctx_cache __read_mostly;
203 static struct kmem_cache *filelock_cache __read_mostly;
205 static struct file_lock_context *
206 locks_get_lock_context(struct inode *inode, int type)
208 struct file_lock_context *ctx;
210 /* paired with cmpxchg() below */
211 ctx = smp_load_acquire(&inode->i_flctx);
212 if (likely(ctx) || type == F_UNLCK)
215 ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
219 spin_lock_init(&ctx->flc_lock);
220 INIT_LIST_HEAD(&ctx->flc_flock);
221 INIT_LIST_HEAD(&ctx->flc_posix);
222 INIT_LIST_HEAD(&ctx->flc_lease);
225 * Assign the pointer if it's not already assigned. If it is, then
226 * free the context we just allocated.
228 if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
229 kmem_cache_free(flctx_cache, ctx);
230 ctx = smp_load_acquire(&inode->i_flctx);
233 trace_locks_get_lock_context(inode, type, ctx);
238 locks_dump_ctx_list(struct list_head *list, char *list_type)
240 struct file_lock *fl;
242 list_for_each_entry(fl, list, fl_list) {
243 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
248 locks_check_ctx_lists(struct inode *inode)
250 struct file_lock_context *ctx = inode->i_flctx;
252 if (unlikely(!list_empty(&ctx->flc_flock) ||
253 !list_empty(&ctx->flc_posix) ||
254 !list_empty(&ctx->flc_lease))) {
255 pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
256 MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
258 locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
259 locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
260 locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
265 locks_free_lock_context(struct inode *inode)
267 struct file_lock_context *ctx = inode->i_flctx;
270 locks_check_ctx_lists(inode);
271 kmem_cache_free(flctx_cache, ctx);
275 static void locks_init_lock_heads(struct file_lock *fl)
277 INIT_HLIST_NODE(&fl->fl_link);
278 INIT_LIST_HEAD(&fl->fl_list);
279 INIT_LIST_HEAD(&fl->fl_block);
280 init_waitqueue_head(&fl->fl_wait);
283 /* Allocate an empty lock structure. */
284 struct file_lock *locks_alloc_lock(void)
286 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
289 locks_init_lock_heads(fl);
293 EXPORT_SYMBOL_GPL(locks_alloc_lock);
295 void locks_release_private(struct file_lock *fl)
298 if (fl->fl_ops->fl_release_private)
299 fl->fl_ops->fl_release_private(fl);
304 if (fl->fl_lmops->lm_put_owner) {
305 fl->fl_lmops->lm_put_owner(fl->fl_owner);
311 EXPORT_SYMBOL_GPL(locks_release_private);
313 /* Free a lock which is not in use. */
314 void locks_free_lock(struct file_lock *fl)
316 BUG_ON(waitqueue_active(&fl->fl_wait));
317 BUG_ON(!list_empty(&fl->fl_list));
318 BUG_ON(!list_empty(&fl->fl_block));
319 BUG_ON(!hlist_unhashed(&fl->fl_link));
321 locks_release_private(fl);
322 kmem_cache_free(filelock_cache, fl);
324 EXPORT_SYMBOL(locks_free_lock);
327 locks_dispose_list(struct list_head *dispose)
329 struct file_lock *fl;
331 while (!list_empty(dispose)) {
332 fl = list_first_entry(dispose, struct file_lock, fl_list);
333 list_del_init(&fl->fl_list);
338 void locks_init_lock(struct file_lock *fl)
340 memset(fl, 0, sizeof(struct file_lock));
341 locks_init_lock_heads(fl);
344 EXPORT_SYMBOL(locks_init_lock);
347 * Initialize a new lock from an existing file_lock structure.
349 void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
351 new->fl_owner = fl->fl_owner;
352 new->fl_pid = fl->fl_pid;
354 new->fl_flags = fl->fl_flags;
355 new->fl_type = fl->fl_type;
356 new->fl_start = fl->fl_start;
357 new->fl_end = fl->fl_end;
358 new->fl_lmops = fl->fl_lmops;
362 if (fl->fl_lmops->lm_get_owner)
363 fl->fl_lmops->lm_get_owner(fl->fl_owner);
366 EXPORT_SYMBOL(locks_copy_conflock);
368 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
370 /* "new" must be a freshly-initialized lock */
371 WARN_ON_ONCE(new->fl_ops);
373 locks_copy_conflock(new, fl);
375 new->fl_file = fl->fl_file;
376 new->fl_ops = fl->fl_ops;
379 if (fl->fl_ops->fl_copy_lock)
380 fl->fl_ops->fl_copy_lock(new, fl);
384 EXPORT_SYMBOL(locks_copy_lock);
386 static inline int flock_translate_cmd(int cmd) {
388 return cmd & (LOCK_MAND | LOCK_RW);
400 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
401 static struct file_lock *
402 flock_make_lock(struct file *filp, unsigned int cmd)
404 struct file_lock *fl;
405 int type = flock_translate_cmd(cmd);
408 return ERR_PTR(type);
410 fl = locks_alloc_lock();
412 return ERR_PTR(-ENOMEM);
416 fl->fl_pid = current->tgid;
417 fl->fl_flags = FL_FLOCK;
419 fl->fl_end = OFFSET_MAX;
424 static int assign_type(struct file_lock *fl, long type)
438 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
441 switch (l->l_whence) {
446 fl->fl_start = filp->f_pos;
449 fl->fl_start = i_size_read(file_inode(filp));
454 if (l->l_start > OFFSET_MAX - fl->fl_start)
456 fl->fl_start += l->l_start;
457 if (fl->fl_start < 0)
460 /* POSIX-1996 leaves the case l->l_len < 0 undefined;
461 POSIX-2001 defines it. */
463 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
465 fl->fl_end = fl->fl_start + l->l_len - 1;
467 } else if (l->l_len < 0) {
468 if (fl->fl_start + l->l_len < 0)
470 fl->fl_end = fl->fl_start - 1;
471 fl->fl_start += l->l_len;
473 fl->fl_end = OFFSET_MAX;
475 fl->fl_owner = current->files;
476 fl->fl_pid = current->tgid;
478 fl->fl_flags = FL_POSIX;
482 return assign_type(fl, l->l_type);
485 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
488 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
491 struct flock64 ll = {
493 .l_whence = l->l_whence,
494 .l_start = l->l_start,
498 return flock64_to_posix_lock(filp, fl, &ll);
501 /* default lease lock manager operations */
503 lease_break_callback(struct file_lock *fl)
505 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
510 lease_setup(struct file_lock *fl, void **priv)
512 struct file *filp = fl->fl_file;
513 struct fasync_struct *fa = *priv;
516 * fasync_insert_entry() returns the old entry if any. If there was no
517 * old entry, then it used "priv" and inserted it into the fasync list.
518 * Clear the pointer to indicate that it shouldn't be freed.
520 if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
523 __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
526 static const struct lock_manager_operations lease_manager_ops = {
527 .lm_break = lease_break_callback,
528 .lm_change = lease_modify,
529 .lm_setup = lease_setup,
533 * Initialize a lease, use the default lock manager operations
535 static int lease_init(struct file *filp, long type, struct file_lock *fl)
537 if (assign_type(fl, type) != 0)
541 fl->fl_pid = current->tgid;
544 fl->fl_flags = FL_LEASE;
546 fl->fl_end = OFFSET_MAX;
548 fl->fl_lmops = &lease_manager_ops;
552 /* Allocate a file_lock initialised to this type of lease */
553 static struct file_lock *lease_alloc(struct file *filp, long type)
555 struct file_lock *fl = locks_alloc_lock();
559 return ERR_PTR(error);
561 error = lease_init(filp, type, fl);
564 return ERR_PTR(error);
569 /* Check if two locks overlap each other.
571 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
573 return ((fl1->fl_end >= fl2->fl_start) &&
574 (fl2->fl_end >= fl1->fl_start));
578 * Check whether two locks have the same owner.
580 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
582 if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
583 return fl2->fl_lmops == fl1->fl_lmops &&
584 fl1->fl_lmops->lm_compare_owner(fl1, fl2);
585 return fl1->fl_owner == fl2->fl_owner;
588 /* Must be called with the flc_lock held! */
589 static void locks_insert_global_locks(struct file_lock *fl)
591 percpu_rwsem_assert_held(&file_rwsem);
593 lg_local_lock(&file_lock_lglock);
594 fl->fl_link_cpu = smp_processor_id();
595 hlist_add_head(&fl->fl_link, this_cpu_ptr(&file_lock_list));
596 lg_local_unlock(&file_lock_lglock);
599 /* Must be called with the flc_lock held! */
600 static void locks_delete_global_locks(struct file_lock *fl)
602 percpu_rwsem_assert_held(&file_rwsem);
605 * Avoid taking lock if already unhashed. This is safe since this check
606 * is done while holding the flc_lock, and new insertions into the list
607 * also require that it be held.
609 if (hlist_unhashed(&fl->fl_link))
611 lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu);
612 hlist_del_init(&fl->fl_link);
613 lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu);
617 posix_owner_key(struct file_lock *fl)
619 if (fl->fl_lmops && fl->fl_lmops->lm_owner_key)
620 return fl->fl_lmops->lm_owner_key(fl);
621 return (unsigned long)fl->fl_owner;
624 static void locks_insert_global_blocked(struct file_lock *waiter)
626 lockdep_assert_held(&blocked_lock_lock);
628 hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
631 static void locks_delete_global_blocked(struct file_lock *waiter)
633 lockdep_assert_held(&blocked_lock_lock);
635 hash_del(&waiter->fl_link);
638 /* Remove waiter from blocker's block list.
639 * When blocker ends up pointing to itself then the list is empty.
641 * Must be called with blocked_lock_lock held.
643 static void __locks_delete_block(struct file_lock *waiter)
645 locks_delete_global_blocked(waiter);
646 list_del_init(&waiter->fl_block);
647 waiter->fl_next = NULL;
650 static void locks_delete_block(struct file_lock *waiter)
652 spin_lock(&blocked_lock_lock);
653 __locks_delete_block(waiter);
654 spin_unlock(&blocked_lock_lock);
657 /* Insert waiter into blocker's block list.
658 * We use a circular list so that processes can be easily woken up in
659 * the order they blocked. The documentation doesn't require this but
660 * it seems like the reasonable thing to do.
662 * Must be called with both the flc_lock and blocked_lock_lock held. The
663 * fl_block list itself is protected by the blocked_lock_lock, but by ensuring
664 * that the flc_lock is also held on insertions we can avoid taking the
665 * blocked_lock_lock in some cases when we see that the fl_block list is empty.
667 static void __locks_insert_block(struct file_lock *blocker,
668 struct file_lock *waiter)
670 BUG_ON(!list_empty(&waiter->fl_block));
671 waiter->fl_next = blocker;
672 list_add_tail(&waiter->fl_block, &blocker->fl_block);
673 if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
674 locks_insert_global_blocked(waiter);
677 /* Must be called with flc_lock held. */
678 static void locks_insert_block(struct file_lock *blocker,
679 struct file_lock *waiter)
681 spin_lock(&blocked_lock_lock);
682 __locks_insert_block(blocker, waiter);
683 spin_unlock(&blocked_lock_lock);
687 * Wake up processes blocked waiting for blocker.
689 * Must be called with the inode->flc_lock held!
691 static void locks_wake_up_blocks(struct file_lock *blocker)
694 * Avoid taking global lock if list is empty. This is safe since new
695 * blocked requests are only added to the list under the flc_lock, and
696 * the flc_lock is always held here. Note that removal from the fl_block
697 * list does not require the flc_lock, so we must recheck list_empty()
698 * after acquiring the blocked_lock_lock.
700 if (list_empty(&blocker->fl_block))
703 spin_lock(&blocked_lock_lock);
704 while (!list_empty(&blocker->fl_block)) {
705 struct file_lock *waiter;
707 waiter = list_first_entry(&blocker->fl_block,
708 struct file_lock, fl_block);
709 __locks_delete_block(waiter);
710 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
711 waiter->fl_lmops->lm_notify(waiter);
713 wake_up(&waiter->fl_wait);
715 spin_unlock(&blocked_lock_lock);
719 locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
721 fl->fl_nspid = get_pid(task_tgid(current));
722 list_add_tail(&fl->fl_list, before);
723 locks_insert_global_locks(fl);
727 locks_unlink_lock_ctx(struct file_lock *fl)
729 locks_delete_global_locks(fl);
730 list_del_init(&fl->fl_list);
732 put_pid(fl->fl_nspid);
735 locks_wake_up_blocks(fl);
739 locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
741 locks_unlink_lock_ctx(fl);
743 list_add(&fl->fl_list, dispose);
748 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
749 * checks for shared/exclusive status of overlapping locks.
751 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
753 if (sys_fl->fl_type == F_WRLCK)
755 if (caller_fl->fl_type == F_WRLCK)
760 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
761 * checking before calling the locks_conflict().
763 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
765 /* POSIX locks owned by the same process do not conflict with
768 if (posix_same_owner(caller_fl, sys_fl))
771 /* Check whether they overlap */
772 if (!locks_overlap(caller_fl, sys_fl))
775 return (locks_conflict(caller_fl, sys_fl));
778 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
779 * checking before calling the locks_conflict().
781 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
783 /* FLOCK locks referring to the same filp do not conflict with
786 if (caller_fl->fl_file == sys_fl->fl_file)
788 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
791 return (locks_conflict(caller_fl, sys_fl));
795 posix_test_lock(struct file *filp, struct file_lock *fl)
797 struct file_lock *cfl;
798 struct file_lock_context *ctx;
799 struct inode *inode = file_inode(filp);
801 ctx = smp_load_acquire(&inode->i_flctx);
802 if (!ctx || list_empty_careful(&ctx->flc_posix)) {
803 fl->fl_type = F_UNLCK;
807 spin_lock(&ctx->flc_lock);
808 list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
809 if (posix_locks_conflict(fl, cfl)) {
810 locks_copy_conflock(fl, cfl);
812 fl->fl_pid = pid_vnr(cfl->fl_nspid);
816 fl->fl_type = F_UNLCK;
818 spin_unlock(&ctx->flc_lock);
821 EXPORT_SYMBOL(posix_test_lock);
824 * Deadlock detection:
826 * We attempt to detect deadlocks that are due purely to posix file
829 * We assume that a task can be waiting for at most one lock at a time.
830 * So for any acquired lock, the process holding that lock may be
831 * waiting on at most one other lock. That lock in turns may be held by
832 * someone waiting for at most one other lock. Given a requested lock
833 * caller_fl which is about to wait for a conflicting lock block_fl, we
834 * follow this chain of waiters to ensure we are not about to create a
837 * Since we do this before we ever put a process to sleep on a lock, we
838 * are ensured that there is never a cycle; that is what guarantees that
839 * the while() loop in posix_locks_deadlock() eventually completes.
841 * Note: the above assumption may not be true when handling lock
842 * requests from a broken NFS client. It may also fail in the presence
843 * of tasks (such as posix threads) sharing the same open file table.
844 * To handle those cases, we just bail out after a few iterations.
846 * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
847 * Because the owner is not even nominally tied to a thread of
848 * execution, the deadlock detection below can't reasonably work well. Just
851 * In principle, we could do a more limited deadlock detection on FL_OFDLCK
852 * locks that just checks for the case where two tasks are attempting to
853 * upgrade from read to write locks on the same inode.
856 #define MAX_DEADLK_ITERATIONS 10
858 /* Find a lock that the owner of the given block_fl is blocking on. */
859 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
861 struct file_lock *fl;
863 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
864 if (posix_same_owner(fl, block_fl))
870 /* Must be called with the blocked_lock_lock held! */
871 static int posix_locks_deadlock(struct file_lock *caller_fl,
872 struct file_lock *block_fl)
876 lockdep_assert_held(&blocked_lock_lock);
879 * This deadlock detector can't reasonably detect deadlocks with
880 * FL_OFDLCK locks, since they aren't owned by a process, per-se.
882 if (IS_OFDLCK(caller_fl))
885 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
886 if (i++ > MAX_DEADLK_ITERATIONS)
888 if (posix_same_owner(caller_fl, block_fl))
894 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
895 * after any leases, but before any posix locks.
897 * Note that if called with an FL_EXISTS argument, the caller may determine
898 * whether or not a lock was successfully freed by testing the return
901 static int flock_lock_inode(struct inode *inode, struct file_lock *request)
903 struct file_lock *new_fl = NULL;
904 struct file_lock *fl;
905 struct file_lock_context *ctx;
910 ctx = locks_get_lock_context(inode, request->fl_type);
912 if (request->fl_type != F_UNLCK)
914 return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
917 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
918 new_fl = locks_alloc_lock();
923 percpu_down_read(&file_rwsem);
924 spin_lock(&ctx->flc_lock);
925 if (request->fl_flags & FL_ACCESS)
928 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
929 if (request->fl_file != fl->fl_file)
931 if (request->fl_type == fl->fl_type)
934 locks_delete_lock_ctx(fl, &dispose);
938 if (request->fl_type == F_UNLCK) {
939 if ((request->fl_flags & FL_EXISTS) && !found)
945 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
946 if (!flock_locks_conflict(request, fl))
949 if (!(request->fl_flags & FL_SLEEP))
951 error = FILE_LOCK_DEFERRED;
952 locks_insert_block(fl, request);
955 if (request->fl_flags & FL_ACCESS)
957 locks_copy_lock(new_fl, request);
958 locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
963 spin_unlock(&ctx->flc_lock);
964 percpu_up_read(&file_rwsem);
966 locks_free_lock(new_fl);
967 locks_dispose_list(&dispose);
971 static int posix_lock_inode(struct inode *inode, struct file_lock *request,
972 struct file_lock *conflock)
974 struct file_lock *fl, *tmp;
975 struct file_lock *new_fl = NULL;
976 struct file_lock *new_fl2 = NULL;
977 struct file_lock *left = NULL;
978 struct file_lock *right = NULL;
979 struct file_lock_context *ctx;
984 ctx = locks_get_lock_context(inode, request->fl_type);
986 return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
989 * We may need two file_lock structures for this operation,
990 * so we get them in advance to avoid races.
992 * In some cases we can be sure, that no new locks will be needed
994 if (!(request->fl_flags & FL_ACCESS) &&
995 (request->fl_type != F_UNLCK ||
996 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
997 new_fl = locks_alloc_lock();
998 new_fl2 = locks_alloc_lock();
1001 percpu_down_read(&file_rwsem);
1002 spin_lock(&ctx->flc_lock);
1004 * New lock request. Walk all POSIX locks and look for conflicts. If
1005 * there are any, either return error or put the request on the
1006 * blocker's list of waiters and the global blocked_hash.
1008 if (request->fl_type != F_UNLCK) {
1009 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1010 if (!posix_locks_conflict(request, fl))
1013 locks_copy_conflock(conflock, fl);
1015 if (!(request->fl_flags & FL_SLEEP))
1018 * Deadlock detection and insertion into the blocked
1019 * locks list must be done while holding the same lock!
1022 spin_lock(&blocked_lock_lock);
1023 if (likely(!posix_locks_deadlock(request, fl))) {
1024 error = FILE_LOCK_DEFERRED;
1025 __locks_insert_block(fl, request);
1027 spin_unlock(&blocked_lock_lock);
1032 /* If we're just looking for a conflict, we're done. */
1034 if (request->fl_flags & FL_ACCESS)
1037 /* Find the first old lock with the same owner as the new lock */
1038 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1039 if (posix_same_owner(request, fl))
1043 /* Process locks with this owner. */
1044 list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1045 if (!posix_same_owner(request, fl))
1048 /* Detect adjacent or overlapping regions (if same lock type) */
1049 if (request->fl_type == fl->fl_type) {
1050 /* In all comparisons of start vs end, use
1051 * "start - 1" rather than "end + 1". If end
1052 * is OFFSET_MAX, end + 1 will become negative.
1054 if (fl->fl_end < request->fl_start - 1)
1056 /* If the next lock in the list has entirely bigger
1057 * addresses than the new one, insert the lock here.
1059 if (fl->fl_start - 1 > request->fl_end)
1062 /* If we come here, the new and old lock are of the
1063 * same type and adjacent or overlapping. Make one
1064 * lock yielding from the lower start address of both
1065 * locks to the higher end address.
1067 if (fl->fl_start > request->fl_start)
1068 fl->fl_start = request->fl_start;
1070 request->fl_start = fl->fl_start;
1071 if (fl->fl_end < request->fl_end)
1072 fl->fl_end = request->fl_end;
1074 request->fl_end = fl->fl_end;
1076 locks_delete_lock_ctx(fl, &dispose);
1082 /* Processing for different lock types is a bit
1085 if (fl->fl_end < request->fl_start)
1087 if (fl->fl_start > request->fl_end)
1089 if (request->fl_type == F_UNLCK)
1091 if (fl->fl_start < request->fl_start)
1093 /* If the next lock in the list has a higher end
1094 * address than the new one, insert the new one here.
1096 if (fl->fl_end > request->fl_end) {
1100 if (fl->fl_start >= request->fl_start) {
1101 /* The new lock completely replaces an old
1102 * one (This may happen several times).
1105 locks_delete_lock_ctx(fl, &dispose);
1109 * Replace the old lock with new_fl, and
1110 * remove the old one. It's safe to do the
1111 * insert here since we know that we won't be
1112 * using new_fl later, and that the lock is
1113 * just replacing an existing lock.
1118 locks_copy_lock(new_fl, request);
1121 locks_insert_lock_ctx(request, &fl->fl_list);
1122 locks_delete_lock_ctx(fl, &dispose);
1129 * The above code only modifies existing locks in case of merging or
1130 * replacing. If new lock(s) need to be inserted all modifications are
1131 * done below this, so it's safe yet to bail out.
1133 error = -ENOLCK; /* "no luck" */
1134 if (right && left == right && !new_fl2)
1139 if (request->fl_type == F_UNLCK) {
1140 if (request->fl_flags & FL_EXISTS)
1149 locks_copy_lock(new_fl, request);
1150 locks_insert_lock_ctx(new_fl, &fl->fl_list);
1155 if (left == right) {
1156 /* The new lock breaks the old one in two pieces,
1157 * so we have to use the second new lock.
1161 locks_copy_lock(left, right);
1162 locks_insert_lock_ctx(left, &fl->fl_list);
1164 right->fl_start = request->fl_end + 1;
1165 locks_wake_up_blocks(right);
1168 left->fl_end = request->fl_start - 1;
1169 locks_wake_up_blocks(left);
1172 spin_unlock(&ctx->flc_lock);
1173 percpu_up_read(&file_rwsem);
1175 * Free any unused locks.
1178 locks_free_lock(new_fl);
1180 locks_free_lock(new_fl2);
1181 locks_dispose_list(&dispose);
1182 trace_posix_lock_inode(inode, request, error);
1188 * posix_lock_file - Apply a POSIX-style lock to a file
1189 * @filp: The file to apply the lock to
1190 * @fl: The lock to be applied
1191 * @conflock: Place to return a copy of the conflicting lock, if found.
1193 * Add a POSIX style lock to a file.
1194 * We merge adjacent & overlapping locks whenever possible.
1195 * POSIX locks are sorted by owner task, then by starting address
1197 * Note that if called with an FL_EXISTS argument, the caller may determine
1198 * whether or not a lock was successfully freed by testing the return
1199 * value for -ENOENT.
1201 int posix_lock_file(struct file *filp, struct file_lock *fl,
1202 struct file_lock *conflock)
1204 return posix_lock_inode(file_inode(filp), fl, conflock);
1206 EXPORT_SYMBOL(posix_lock_file);
1209 * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1210 * @inode: inode of file to which lock request should be applied
1211 * @fl: The lock to be applied
1213 * Apply a POSIX style lock request to an inode.
1215 static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1220 error = posix_lock_inode(inode, fl, NULL);
1221 if (error != FILE_LOCK_DEFERRED)
1223 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1227 locks_delete_block(fl);
1233 #ifdef CONFIG_MANDATORY_FILE_LOCKING
1235 * locks_mandatory_locked - Check for an active lock
1236 * @file: the file to check
1238 * Searches the inode's list of locks to find any POSIX locks which conflict.
1239 * This function is called from locks_verify_locked() only.
1241 int locks_mandatory_locked(struct file *file)
1244 struct inode *inode = file_inode(file);
1245 struct file_lock_context *ctx;
1246 struct file_lock *fl;
1248 ctx = smp_load_acquire(&inode->i_flctx);
1249 if (!ctx || list_empty_careful(&ctx->flc_posix))
1253 * Search the lock list for this inode for any POSIX locks.
1255 spin_lock(&ctx->flc_lock);
1257 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1258 if (fl->fl_owner != current->files &&
1259 fl->fl_owner != file) {
1264 spin_unlock(&ctx->flc_lock);
1269 * locks_mandatory_area - Check for a conflicting lock
1270 * @inode: the file to check
1271 * @filp: how the file was opened (if it was)
1272 * @start: first byte in the file to check
1273 * @end: lastbyte in the file to check
1274 * @type: %F_WRLCK for a write lock, else %F_RDLCK
1276 * Searches the inode's list of locks to find any POSIX locks which conflict.
1278 int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
1279 loff_t end, unsigned char type)
1281 struct file_lock fl;
1285 locks_init_lock(&fl);
1286 fl.fl_pid = current->tgid;
1288 fl.fl_flags = FL_POSIX | FL_ACCESS;
1289 if (filp && !(filp->f_flags & O_NONBLOCK))
1292 fl.fl_start = start;
1298 fl.fl_flags &= ~FL_SLEEP;
1299 error = posix_lock_inode(inode, &fl, NULL);
1305 fl.fl_flags |= FL_SLEEP;
1306 fl.fl_owner = current->files;
1307 error = posix_lock_inode(inode, &fl, NULL);
1308 if (error != FILE_LOCK_DEFERRED)
1310 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1313 * If we've been sleeping someone might have
1314 * changed the permissions behind our back.
1316 if (__mandatory_lock(inode))
1320 locks_delete_block(&fl);
1327 EXPORT_SYMBOL(locks_mandatory_area);
1328 #endif /* CONFIG_MANDATORY_FILE_LOCKING */
1330 static void lease_clear_pending(struct file_lock *fl, int arg)
1334 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1337 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1341 /* We already had a lease on this file; just change its type */
1342 int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1344 int error = assign_type(fl, arg);
1348 lease_clear_pending(fl, arg);
1349 locks_wake_up_blocks(fl);
1350 if (arg == F_UNLCK) {
1351 struct file *filp = fl->fl_file;
1354 filp->f_owner.signum = 0;
1355 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1356 if (fl->fl_fasync != NULL) {
1357 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1358 fl->fl_fasync = NULL;
1360 locks_delete_lock_ctx(fl, dispose);
1364 EXPORT_SYMBOL(lease_modify);
1366 static bool past_time(unsigned long then)
1369 /* 0 is a special value meaning "this never expires": */
1371 return time_after(jiffies, then);
1374 static void time_out_leases(struct inode *inode, struct list_head *dispose)
1376 struct file_lock_context *ctx = inode->i_flctx;
1377 struct file_lock *fl, *tmp;
1379 lockdep_assert_held(&ctx->flc_lock);
1381 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1382 trace_time_out_leases(inode, fl);
1383 if (past_time(fl->fl_downgrade_time))
1384 lease_modify(fl, F_RDLCK, dispose);
1385 if (past_time(fl->fl_break_time))
1386 lease_modify(fl, F_UNLCK, dispose);
1390 static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1392 if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT))
1394 if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE))
1396 return locks_conflict(breaker, lease);
1400 any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1402 struct file_lock_context *ctx = inode->i_flctx;
1403 struct file_lock *fl;
1405 lockdep_assert_held(&ctx->flc_lock);
1407 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1408 if (leases_conflict(fl, breaker))
1415 * __break_lease - revoke all outstanding leases on file
1416 * @inode: the inode of the file to return
1417 * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1419 * @type: FL_LEASE: break leases and delegations; FL_DELEG: break
1422 * break_lease (inlined for speed) has checked there already is at least
1423 * some kind of lock (maybe a lease) on this file. Leases are broken on
1424 * a call to open() or truncate(). This function can sleep unless you
1425 * specified %O_NONBLOCK to your open().
1427 int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1430 struct file_lock_context *ctx;
1431 struct file_lock *new_fl, *fl, *tmp;
1432 unsigned long break_time;
1433 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1436 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1438 return PTR_ERR(new_fl);
1439 new_fl->fl_flags = type;
1441 /* typically we will check that ctx is non-NULL before calling */
1442 ctx = smp_load_acquire(&inode->i_flctx);
1448 percpu_down_read(&file_rwsem);
1449 spin_lock(&ctx->flc_lock);
1451 time_out_leases(inode, &dispose);
1453 if (!any_leases_conflict(inode, new_fl))
1457 if (lease_break_time > 0) {
1458 break_time = jiffies + lease_break_time * HZ;
1459 if (break_time == 0)
1460 break_time++; /* so that 0 means no break time */
1463 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1464 if (!leases_conflict(fl, new_fl))
1467 if (fl->fl_flags & FL_UNLOCK_PENDING)
1469 fl->fl_flags |= FL_UNLOCK_PENDING;
1470 fl->fl_break_time = break_time;
1472 if (lease_breaking(fl))
1474 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1475 fl->fl_downgrade_time = break_time;
1477 if (fl->fl_lmops->lm_break(fl))
1478 locks_delete_lock_ctx(fl, &dispose);
1481 if (list_empty(&ctx->flc_lease))
1484 if (mode & O_NONBLOCK) {
1485 trace_break_lease_noblock(inode, new_fl);
1486 error = -EWOULDBLOCK;
1491 fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1492 break_time = fl->fl_break_time;
1493 if (break_time != 0)
1494 break_time -= jiffies;
1495 if (break_time == 0)
1497 locks_insert_block(fl, new_fl);
1498 trace_break_lease_block(inode, new_fl);
1499 spin_unlock(&ctx->flc_lock);
1500 percpu_up_read(&file_rwsem);
1502 locks_dispose_list(&dispose);
1503 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1504 !new_fl->fl_next, break_time);
1506 percpu_down_read(&file_rwsem);
1507 spin_lock(&ctx->flc_lock);
1508 trace_break_lease_unblock(inode, new_fl);
1509 locks_delete_block(new_fl);
1512 * Wait for the next conflicting lease that has not been
1516 time_out_leases(inode, &dispose);
1517 if (any_leases_conflict(inode, new_fl))
1522 spin_unlock(&ctx->flc_lock);
1523 percpu_up_read(&file_rwsem);
1524 locks_dispose_list(&dispose);
1525 locks_free_lock(new_fl);
1529 EXPORT_SYMBOL(__break_lease);
1532 * lease_get_mtime - get the last modified time of an inode
1534 * @time: pointer to a timespec which will contain the last modified time
1536 * This is to force NFS clients to flush their caches for files with
1537 * exclusive leases. The justification is that if someone has an
1538 * exclusive lease, then they could be modifying it.
1540 void lease_get_mtime(struct inode *inode, struct timespec *time)
1542 bool has_lease = false;
1543 struct file_lock_context *ctx;
1544 struct file_lock *fl;
1546 ctx = smp_load_acquire(&inode->i_flctx);
1547 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1548 spin_lock(&ctx->flc_lock);
1549 fl = list_first_entry_or_null(&ctx->flc_lease,
1550 struct file_lock, fl_list);
1551 if (fl && (fl->fl_type == F_WRLCK))
1553 spin_unlock(&ctx->flc_lock);
1557 *time = current_fs_time(inode->i_sb);
1559 *time = inode->i_mtime;
1562 EXPORT_SYMBOL(lease_get_mtime);
1565 * fcntl_getlease - Enquire what lease is currently active
1568 * The value returned by this function will be one of
1569 * (if no lease break is pending):
1571 * %F_RDLCK to indicate a shared lease is held.
1573 * %F_WRLCK to indicate an exclusive lease is held.
1575 * %F_UNLCK to indicate no lease is held.
1577 * (if a lease break is pending):
1579 * %F_RDLCK to indicate an exclusive lease needs to be
1580 * changed to a shared lease (or removed).
1582 * %F_UNLCK to indicate the lease needs to be removed.
1584 * XXX: sfr & willy disagree over whether F_INPROGRESS
1585 * should be returned to userspace.
1587 int fcntl_getlease(struct file *filp)
1589 struct file_lock *fl;
1590 struct inode *inode = file_inode(filp);
1591 struct file_lock_context *ctx;
1595 ctx = smp_load_acquire(&inode->i_flctx);
1596 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1597 spin_lock(&ctx->flc_lock);
1598 time_out_leases(file_inode(filp), &dispose);
1599 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1600 if (fl->fl_file != filp)
1602 type = target_leasetype(fl);
1605 spin_unlock(&ctx->flc_lock);
1606 locks_dispose_list(&dispose);
1612 * check_conflicting_open - see if the given dentry points to a file that has
1613 * an existing open that would conflict with the
1615 * @dentry: dentry to check
1616 * @arg: type of lease that we're trying to acquire
1617 * @flags: current lock flags
1619 * Check to see if there's an existing open fd on this file that would
1620 * conflict with the lease we're trying to set.
1623 check_conflicting_open(const struct dentry *dentry, const long arg, int flags)
1626 struct inode *inode = dentry->d_inode;
1628 if (flags & FL_LAYOUT)
1631 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1634 if ((arg == F_WRLCK) && ((d_count(dentry) > 1) ||
1635 (atomic_read(&inode->i_count) > 1)))
1642 generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1644 struct file_lock *fl, *my_fl = NULL, *lease;
1645 struct dentry *dentry = filp->f_path.dentry;
1646 struct inode *inode = file_inode(filp);
1647 struct file_lock_context *ctx;
1648 bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1653 trace_generic_add_lease(inode, lease);
1655 /* Note that arg is never F_UNLCK here */
1656 ctx = locks_get_lock_context(inode, arg);
1661 * In the delegation case we need mutual exclusion with
1662 * a number of operations that take the i_mutex. We trylock
1663 * because delegations are an optional optimization, and if
1664 * there's some chance of a conflict--we'd rather not
1665 * bother, maybe that's a sign this just isn't a good file to
1666 * hand out a delegation on.
1668 if (is_deleg && !inode_trylock(inode))
1671 if (is_deleg && arg == F_WRLCK) {
1672 /* Write delegations are not currently supported: */
1673 inode_unlock(inode);
1678 percpu_down_read(&file_rwsem);
1679 spin_lock(&ctx->flc_lock);
1680 time_out_leases(inode, &dispose);
1681 error = check_conflicting_open(dentry, arg, lease->fl_flags);
1686 * At this point, we know that if there is an exclusive
1687 * lease on this file, then we hold it on this filp
1688 * (otherwise our open of this file would have blocked).
1689 * And if we are trying to acquire an exclusive lease,
1690 * then the file is not open by anyone (including us)
1691 * except for this filp.
1694 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1695 if (fl->fl_file == filp &&
1696 fl->fl_owner == lease->fl_owner) {
1702 * No exclusive leases if someone else has a lease on
1708 * Modifying our existing lease is OK, but no getting a
1709 * new lease if someone else is opening for write:
1711 if (fl->fl_flags & FL_UNLOCK_PENDING)
1715 if (my_fl != NULL) {
1717 error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1727 locks_insert_lock_ctx(lease, &ctx->flc_lease);
1729 * The check in break_lease() is lockless. It's possible for another
1730 * open to race in after we did the earlier check for a conflicting
1731 * open but before the lease was inserted. Check again for a
1732 * conflicting open and cancel the lease if there is one.
1734 * We also add a barrier here to ensure that the insertion of the lock
1735 * precedes these checks.
1738 error = check_conflicting_open(dentry, arg, lease->fl_flags);
1740 locks_unlink_lock_ctx(lease);
1745 if (lease->fl_lmops->lm_setup)
1746 lease->fl_lmops->lm_setup(lease, priv);
1748 spin_unlock(&ctx->flc_lock);
1749 percpu_up_read(&file_rwsem);
1750 locks_dispose_list(&dispose);
1752 inode_unlock(inode);
1753 if (!error && !my_fl)
1758 static int generic_delete_lease(struct file *filp, void *owner)
1760 int error = -EAGAIN;
1761 struct file_lock *fl, *victim = NULL;
1762 struct inode *inode = file_inode(filp);
1763 struct file_lock_context *ctx;
1766 ctx = smp_load_acquire(&inode->i_flctx);
1768 trace_generic_delete_lease(inode, NULL);
1772 percpu_down_read(&file_rwsem);
1773 spin_lock(&ctx->flc_lock);
1774 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1775 if (fl->fl_file == filp &&
1776 fl->fl_owner == owner) {
1781 trace_generic_delete_lease(inode, victim);
1783 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1784 spin_unlock(&ctx->flc_lock);
1785 percpu_up_read(&file_rwsem);
1786 locks_dispose_list(&dispose);
1791 * generic_setlease - sets a lease on an open file
1792 * @filp: file pointer
1793 * @arg: type of lease to obtain
1794 * @flp: input - file_lock to use, output - file_lock inserted
1795 * @priv: private data for lm_setup (may be NULL if lm_setup
1796 * doesn't require it)
1798 * The (input) flp->fl_lmops->lm_break function is required
1801 int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1804 struct inode *inode = file_inode(filp);
1807 if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1809 if (!S_ISREG(inode->i_mode))
1811 error = security_file_lock(filp, arg);
1817 return generic_delete_lease(filp, *priv);
1820 if (!(*flp)->fl_lmops->lm_break) {
1825 return generic_add_lease(filp, arg, flp, priv);
1830 EXPORT_SYMBOL(generic_setlease);
1833 * vfs_setlease - sets a lease on an open file
1834 * @filp: file pointer
1835 * @arg: type of lease to obtain
1836 * @lease: file_lock to use when adding a lease
1837 * @priv: private info for lm_setup when adding a lease (may be
1838 * NULL if lm_setup doesn't require it)
1840 * Call this to establish a lease on the file. The "lease" argument is not
1841 * used for F_UNLCK requests and may be NULL. For commands that set or alter
1842 * an existing lease, the (*lease)->fl_lmops->lm_break operation must be set;
1843 * if not, this function will return -ENOLCK (and generate a scary-looking
1846 * The "priv" pointer is passed directly to the lm_setup function as-is. It
1847 * may be NULL if the lm_setup operation doesn't require it.
1850 vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1852 if (filp->f_op->setlease)
1853 return filp->f_op->setlease(filp, arg, lease, priv);
1855 return generic_setlease(filp, arg, lease, priv);
1857 EXPORT_SYMBOL_GPL(vfs_setlease);
1859 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1861 struct file_lock *fl;
1862 struct fasync_struct *new;
1865 fl = lease_alloc(filp, arg);
1869 new = fasync_alloc();
1871 locks_free_lock(fl);
1876 error = vfs_setlease(filp, arg, &fl, (void **)&new);
1878 locks_free_lock(fl);
1885 * fcntl_setlease - sets a lease on an open file
1886 * @fd: open file descriptor
1887 * @filp: file pointer
1888 * @arg: type of lease to obtain
1890 * Call this fcntl to establish a lease on the file.
1891 * Note that you also need to call %F_SETSIG to
1892 * receive a signal when the lease is broken.
1894 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1897 return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
1898 return do_fcntl_add_lease(fd, filp, arg);
1902 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
1903 * @inode: inode of the file to apply to
1904 * @fl: The lock to be applied
1906 * Apply a FLOCK style lock request to an inode.
1908 static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1913 error = flock_lock_inode(inode, fl);
1914 if (error != FILE_LOCK_DEFERRED)
1916 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1920 locks_delete_block(fl);
1927 * locks_lock_inode_wait - Apply a lock to an inode
1928 * @inode: inode of the file to apply to
1929 * @fl: The lock to be applied
1931 * Apply a POSIX or FLOCK style lock request to an inode.
1933 int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1936 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
1938 res = posix_lock_inode_wait(inode, fl);
1941 res = flock_lock_inode_wait(inode, fl);
1948 EXPORT_SYMBOL(locks_lock_inode_wait);
1951 * sys_flock: - flock() system call.
1952 * @fd: the file descriptor to lock.
1953 * @cmd: the type of lock to apply.
1955 * Apply a %FL_FLOCK style lock to an open file descriptor.
1956 * The @cmd can be one of
1958 * %LOCK_SH -- a shared lock.
1960 * %LOCK_EX -- an exclusive lock.
1962 * %LOCK_UN -- remove an existing lock.
1964 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes.
1966 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1967 * processes read and write access respectively.
1969 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1971 struct fd f = fdget(fd);
1972 struct file_lock *lock;
1973 int can_sleep, unlock;
1980 can_sleep = !(cmd & LOCK_NB);
1982 unlock = (cmd == LOCK_UN);
1984 if (!unlock && !(cmd & LOCK_MAND) &&
1985 !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
1988 lock = flock_make_lock(f.file, cmd);
1990 error = PTR_ERR(lock);
1995 lock->fl_flags |= FL_SLEEP;
1997 error = security_file_lock(f.file, lock->fl_type);
2001 if (f.file->f_op->flock)
2002 error = f.file->f_op->flock(f.file,
2003 (can_sleep) ? F_SETLKW : F_SETLK,
2006 error = locks_lock_file_wait(f.file, lock);
2009 locks_free_lock(lock);
2018 * vfs_test_lock - test file byte range lock
2019 * @filp: The file to test lock for
2020 * @fl: The lock to test; also used to hold result
2022 * Returns -ERRNO on failure. Indicates presence of conflicting lock by
2023 * setting conf->fl_type to something other than F_UNLCK.
2025 int vfs_test_lock(struct file *filp, struct file_lock *fl)
2027 if (filp->f_op->lock)
2028 return filp->f_op->lock(filp, F_GETLK, fl);
2029 posix_test_lock(filp, fl);
2032 EXPORT_SYMBOL_GPL(vfs_test_lock);
2034 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2036 flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
2037 #if BITS_PER_LONG == 32
2039 * Make sure we can represent the posix lock via
2040 * legacy 32bit flock.
2042 if (fl->fl_start > OFFT_OFFSET_MAX)
2044 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2047 flock->l_start = fl->fl_start;
2048 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2049 fl->fl_end - fl->fl_start + 1;
2050 flock->l_whence = 0;
2051 flock->l_type = fl->fl_type;
2055 #if BITS_PER_LONG == 32
2056 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2058 flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
2059 flock->l_start = fl->fl_start;
2060 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2061 fl->fl_end - fl->fl_start + 1;
2062 flock->l_whence = 0;
2063 flock->l_type = fl->fl_type;
2067 /* Report the first existing lock that would conflict with l.
2068 * This implements the F_GETLK command of fcntl().
2070 int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock __user *l)
2072 struct file_lock file_lock;
2077 if (copy_from_user(&flock, l, sizeof(flock)))
2080 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
2083 error = flock_to_posix_lock(filp, &file_lock, &flock);
2087 if (cmd == F_OFD_GETLK) {
2089 if (flock.l_pid != 0)
2093 file_lock.fl_flags |= FL_OFDLCK;
2094 file_lock.fl_owner = filp;
2097 error = vfs_test_lock(filp, &file_lock);
2101 flock.l_type = file_lock.fl_type;
2102 if (file_lock.fl_type != F_UNLCK) {
2103 error = posix_lock_to_flock(&flock, &file_lock);
2108 if (!copy_to_user(l, &flock, sizeof(flock)))
2111 locks_release_private(&file_lock);
2117 * vfs_lock_file - file byte range lock
2118 * @filp: The file to apply the lock to
2119 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2120 * @fl: The lock to be applied
2121 * @conf: Place to return a copy of the conflicting lock, if found.
2123 * A caller that doesn't care about the conflicting lock may pass NULL
2124 * as the final argument.
2126 * If the filesystem defines a private ->lock() method, then @conf will
2127 * be left unchanged; so a caller that cares should initialize it to
2128 * some acceptable default.
2130 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2131 * locks, the ->lock() interface may return asynchronously, before the lock has
2132 * been granted or denied by the underlying filesystem, if (and only if)
2133 * lm_grant is set. Callers expecting ->lock() to return asynchronously
2134 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
2135 * the request is for a blocking lock. When ->lock() does return asynchronously,
2136 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
2137 * request completes.
2138 * If the request is for non-blocking lock the file system should return
2139 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2140 * with the result. If the request timed out the callback routine will return a
2141 * nonzero return code and the file system should release the lock. The file
2142 * system is also responsible to keep a corresponding posix lock when it
2143 * grants a lock so the VFS can find out which locks are locally held and do
2144 * the correct lock cleanup when required.
2145 * The underlying filesystem must not drop the kernel lock or call
2146 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2149 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2151 if (filp->f_op->lock)
2152 return filp->f_op->lock(filp, cmd, fl);
2154 return posix_lock_file(filp, fl, conf);
2156 EXPORT_SYMBOL_GPL(vfs_lock_file);
2158 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2159 struct file_lock *fl)
2163 error = security_file_lock(filp, fl->fl_type);
2168 error = vfs_lock_file(filp, cmd, fl, NULL);
2169 if (error != FILE_LOCK_DEFERRED)
2171 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
2175 locks_delete_block(fl);
2182 /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2184 check_fmode_for_setlk(struct file_lock *fl)
2186 switch (fl->fl_type) {
2188 if (!(fl->fl_file->f_mode & FMODE_READ))
2192 if (!(fl->fl_file->f_mode & FMODE_WRITE))
2198 /* Apply the lock described by l to an open file descriptor.
2199 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2201 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2202 struct flock __user *l)
2204 struct file_lock *file_lock = locks_alloc_lock();
2206 struct inode *inode;
2210 if (file_lock == NULL)
2213 inode = file_inode(filp);
2216 * This might block, so we do it before checking the inode.
2219 if (copy_from_user(&flock, l, sizeof(flock)))
2222 /* Don't allow mandatory locks on files that may be memory mapped
2225 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2230 error = flock_to_posix_lock(filp, file_lock, &flock);
2234 error = check_fmode_for_setlk(file_lock);
2239 * If the cmd is requesting file-private locks, then set the
2240 * FL_OFDLCK flag and override the owner.
2245 if (flock.l_pid != 0)
2249 file_lock->fl_flags |= FL_OFDLCK;
2250 file_lock->fl_owner = filp;
2254 if (flock.l_pid != 0)
2258 file_lock->fl_flags |= FL_OFDLCK;
2259 file_lock->fl_owner = filp;
2262 file_lock->fl_flags |= FL_SLEEP;
2265 error = do_lock_file_wait(filp, cmd, file_lock);
2268 * Attempt to detect a close/fcntl race and recover by releasing the
2269 * lock that was just acquired. There is no need to do that when we're
2270 * unlocking though, or for OFD locks.
2272 if (!error && file_lock->fl_type != F_UNLCK &&
2273 !(file_lock->fl_flags & FL_OFDLCK)) {
2275 * We need that spin_lock here - it prevents reordering between
2276 * update of i_flctx->flc_posix and check for it done in
2277 * close(). rcu_read_lock() wouldn't do.
2279 spin_lock(¤t->files->file_lock);
2281 spin_unlock(¤t->files->file_lock);
2283 file_lock->fl_type = F_UNLCK;
2284 error = do_lock_file_wait(filp, cmd, file_lock);
2285 WARN_ON_ONCE(error);
2290 trace_fcntl_setlk(inode, file_lock, error);
2291 locks_free_lock(file_lock);
2295 #if BITS_PER_LONG == 32
2296 /* Report the first existing lock that would conflict with l.
2297 * This implements the F_GETLK command of fcntl().
2299 int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l)
2301 struct file_lock file_lock;
2302 struct flock64 flock;
2306 if (copy_from_user(&flock, l, sizeof(flock)))
2309 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
2312 error = flock64_to_posix_lock(filp, &file_lock, &flock);
2316 if (cmd == F_OFD_GETLK) {
2318 if (flock.l_pid != 0)
2322 file_lock.fl_flags |= FL_OFDLCK;
2323 file_lock.fl_owner = filp;
2326 error = vfs_test_lock(filp, &file_lock);
2330 flock.l_type = file_lock.fl_type;
2331 if (file_lock.fl_type != F_UNLCK)
2332 posix_lock_to_flock64(&flock, &file_lock);
2335 if (!copy_to_user(l, &flock, sizeof(flock)))
2338 locks_release_private(&file_lock);
2343 /* Apply the lock described by l to an open file descriptor.
2344 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2346 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2347 struct flock64 __user *l)
2349 struct file_lock *file_lock = locks_alloc_lock();
2350 struct flock64 flock;
2351 struct inode *inode;
2355 if (file_lock == NULL)
2359 * This might block, so we do it before checking the inode.
2362 if (copy_from_user(&flock, l, sizeof(flock)))
2365 inode = file_inode(filp);
2367 /* Don't allow mandatory locks on files that may be memory mapped
2370 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2375 error = flock64_to_posix_lock(filp, file_lock, &flock);
2379 error = check_fmode_for_setlk(file_lock);
2384 * If the cmd is requesting file-private locks, then set the
2385 * FL_OFDLCK flag and override the owner.
2390 if (flock.l_pid != 0)
2394 file_lock->fl_flags |= FL_OFDLCK;
2395 file_lock->fl_owner = filp;
2399 if (flock.l_pid != 0)
2403 file_lock->fl_flags |= FL_OFDLCK;
2404 file_lock->fl_owner = filp;
2407 file_lock->fl_flags |= FL_SLEEP;
2410 error = do_lock_file_wait(filp, cmd, file_lock);
2413 * Attempt to detect a close/fcntl race and recover by releasing the
2414 * lock that was just acquired. There is no need to do that when we're
2415 * unlocking though, or for OFD locks.
2417 if (!error && file_lock->fl_type != F_UNLCK &&
2418 !(file_lock->fl_flags & FL_OFDLCK)) {
2420 * We need that spin_lock here - it prevents reordering between
2421 * update of i_flctx->flc_posix and check for it done in
2422 * close(). rcu_read_lock() wouldn't do.
2424 spin_lock(¤t->files->file_lock);
2426 spin_unlock(¤t->files->file_lock);
2428 file_lock->fl_type = F_UNLCK;
2429 error = do_lock_file_wait(filp, cmd, file_lock);
2430 WARN_ON_ONCE(error);
2435 locks_free_lock(file_lock);
2438 #endif /* BITS_PER_LONG == 32 */
2441 * This function is called when the file is being removed
2442 * from the task's fd array. POSIX locks belonging to this task
2443 * are deleted at this time.
2445 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2448 struct file_lock lock;
2449 struct file_lock_context *ctx;
2452 * If there are no locks held on this file, we don't need to call
2453 * posix_lock_file(). Another process could be setting a lock on this
2454 * file at the same time, but we wouldn't remove that lock anyway.
2456 ctx = smp_load_acquire(&file_inode(filp)->i_flctx);
2457 if (!ctx || list_empty(&ctx->flc_posix))
2460 lock.fl_type = F_UNLCK;
2461 lock.fl_flags = FL_POSIX | FL_CLOSE;
2463 lock.fl_end = OFFSET_MAX;
2464 lock.fl_owner = owner;
2465 lock.fl_pid = current->tgid;
2466 lock.fl_file = filp;
2468 lock.fl_lmops = NULL;
2470 error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2472 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2473 lock.fl_ops->fl_release_private(&lock);
2474 trace_locks_remove_posix(file_inode(filp), &lock, error);
2477 EXPORT_SYMBOL(locks_remove_posix);
2479 /* The i_flctx must be valid when calling into here */
2481 locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2483 struct file_lock fl = {
2485 .fl_pid = current->tgid,
2487 .fl_flags = FL_FLOCK,
2489 .fl_end = OFFSET_MAX,
2491 struct inode *inode = file_inode(filp);
2493 if (list_empty(&flctx->flc_flock))
2496 if (filp->f_op->flock)
2497 filp->f_op->flock(filp, F_SETLKW, &fl);
2499 flock_lock_inode(inode, &fl);
2501 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2502 fl.fl_ops->fl_release_private(&fl);
2505 /* The i_flctx must be valid when calling into here */
2507 locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2509 struct file_lock *fl, *tmp;
2512 if (list_empty(&ctx->flc_lease))
2515 spin_lock(&ctx->flc_lock);
2516 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2517 if (filp == fl->fl_file)
2518 lease_modify(fl, F_UNLCK, &dispose);
2519 spin_unlock(&ctx->flc_lock);
2520 locks_dispose_list(&dispose);
2524 * This function is called on the last close of an open file.
2526 void locks_remove_file(struct file *filp)
2528 struct file_lock_context *ctx;
2530 ctx = smp_load_acquire(&file_inode(filp)->i_flctx);
2534 /* remove any OFD locks */
2535 locks_remove_posix(filp, filp);
2537 /* remove flock locks */
2538 locks_remove_flock(filp, ctx);
2540 /* remove any leases */
2541 locks_remove_lease(filp, ctx);
2545 * posix_unblock_lock - stop waiting for a file lock
2546 * @waiter: the lock which was waiting
2548 * lockd needs to block waiting for locks.
2551 posix_unblock_lock(struct file_lock *waiter)
2555 spin_lock(&blocked_lock_lock);
2556 if (waiter->fl_next)
2557 __locks_delete_block(waiter);
2560 spin_unlock(&blocked_lock_lock);
2563 EXPORT_SYMBOL(posix_unblock_lock);
2566 * vfs_cancel_lock - file byte range unblock lock
2567 * @filp: The file to apply the unblock to
2568 * @fl: The lock to be unblocked
2570 * Used by lock managers to cancel blocked requests
2572 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2574 if (filp->f_op->lock)
2575 return filp->f_op->lock(filp, F_CANCELLK, fl);
2579 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2581 #ifdef CONFIG_PROC_FS
2582 #include <linux/proc_fs.h>
2583 #include <linux/seq_file.h>
2585 struct locks_iterator {
2590 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2591 loff_t id, char *pfx)
2593 struct inode *inode = NULL;
2594 unsigned int fl_pid;
2597 fl_pid = pid_vnr(fl->fl_nspid);
2599 fl_pid = fl->fl_pid;
2601 if (fl->fl_file != NULL)
2602 inode = file_inode(fl->fl_file);
2604 seq_printf(f, "%lld:%s ", id, pfx);
2606 if (fl->fl_flags & FL_ACCESS)
2607 seq_puts(f, "ACCESS");
2608 else if (IS_OFDLCK(fl))
2609 seq_puts(f, "OFDLCK");
2611 seq_puts(f, "POSIX ");
2613 seq_printf(f, " %s ",
2614 (inode == NULL) ? "*NOINODE*" :
2615 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2616 } else if (IS_FLOCK(fl)) {
2617 if (fl->fl_type & LOCK_MAND) {
2618 seq_puts(f, "FLOCK MSNFS ");
2620 seq_puts(f, "FLOCK ADVISORY ");
2622 } else if (IS_LEASE(fl)) {
2623 if (fl->fl_flags & FL_DELEG)
2624 seq_puts(f, "DELEG ");
2626 seq_puts(f, "LEASE ");
2628 if (lease_breaking(fl))
2629 seq_puts(f, "BREAKING ");
2630 else if (fl->fl_file)
2631 seq_puts(f, "ACTIVE ");
2633 seq_puts(f, "BREAKER ");
2635 seq_puts(f, "UNKNOWN UNKNOWN ");
2637 if (fl->fl_type & LOCK_MAND) {
2638 seq_printf(f, "%s ",
2639 (fl->fl_type & LOCK_READ)
2640 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2641 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2643 seq_printf(f, "%s ",
2644 (lease_breaking(fl))
2645 ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
2646 : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
2649 /* userspace relies on this representation of dev_t */
2650 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2651 MAJOR(inode->i_sb->s_dev),
2652 MINOR(inode->i_sb->s_dev), inode->i_ino);
2654 seq_printf(f, "%d <none>:0 ", fl_pid);
2657 if (fl->fl_end == OFFSET_MAX)
2658 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2660 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2662 seq_puts(f, "0 EOF\n");
2666 static int locks_show(struct seq_file *f, void *v)
2668 struct locks_iterator *iter = f->private;
2669 struct file_lock *fl, *bfl;
2671 fl = hlist_entry(v, struct file_lock, fl_link);
2673 lock_get_status(f, fl, iter->li_pos, "");
2675 list_for_each_entry(bfl, &fl->fl_block, fl_block)
2676 lock_get_status(f, bfl, iter->li_pos, " ->");
2681 static void __show_fd_locks(struct seq_file *f,
2682 struct list_head *head, int *id,
2683 struct file *filp, struct files_struct *files)
2685 struct file_lock *fl;
2687 list_for_each_entry(fl, head, fl_list) {
2689 if (filp != fl->fl_file)
2691 if (fl->fl_owner != files &&
2692 fl->fl_owner != filp)
2696 seq_puts(f, "lock:\t");
2697 lock_get_status(f, fl, *id, "");
2701 void show_fd_locks(struct seq_file *f,
2702 struct file *filp, struct files_struct *files)
2704 struct inode *inode = file_inode(filp);
2705 struct file_lock_context *ctx;
2708 ctx = smp_load_acquire(&inode->i_flctx);
2712 spin_lock(&ctx->flc_lock);
2713 __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2714 __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2715 __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2716 spin_unlock(&ctx->flc_lock);
2719 static void *locks_start(struct seq_file *f, loff_t *pos)
2720 __acquires(&blocked_lock_lock)
2722 struct locks_iterator *iter = f->private;
2724 iter->li_pos = *pos + 1;
2725 percpu_down_write(&file_rwsem);
2726 lg_global_lock(&file_lock_lglock);
2727 spin_lock(&blocked_lock_lock);
2728 return seq_hlist_start_percpu(&file_lock_list, &iter->li_cpu, *pos);
2731 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2733 struct locks_iterator *iter = f->private;
2736 return seq_hlist_next_percpu(v, &file_lock_list, &iter->li_cpu, pos);
2739 static void locks_stop(struct seq_file *f, void *v)
2740 __releases(&blocked_lock_lock)
2742 spin_unlock(&blocked_lock_lock);
2743 lg_global_unlock(&file_lock_lglock);
2744 percpu_up_write(&file_rwsem);
2747 static const struct seq_operations locks_seq_operations = {
2748 .start = locks_start,
2754 static int locks_open(struct inode *inode, struct file *filp)
2756 return seq_open_private(filp, &locks_seq_operations,
2757 sizeof(struct locks_iterator));
2760 static const struct file_operations proc_locks_operations = {
2763 .llseek = seq_lseek,
2764 .release = seq_release_private,
2767 static int __init proc_locks_init(void)
2769 proc_create("locks", 0, NULL, &proc_locks_operations);
2772 fs_initcall(proc_locks_init);
2775 static int __init filelock_init(void)
2779 flctx_cache = kmem_cache_create("file_lock_ctx",
2780 sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2782 filelock_cache = kmem_cache_create("file_lock_cache",
2783 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2785 lg_lock_init(&file_lock_lglock, "file_lock_lglock");
2787 for_each_possible_cpu(i)
2788 INIT_HLIST_HEAD(per_cpu_ptr(&file_lock_list, i));
2793 core_initcall(filelock_init);