[GFS2] Fix calculation of demote state
[cascardo/linux.git] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/rwsem.h>
25 #include <asm/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28 #include <linux/module.h>
29 #include <linux/kallsyms.h>
30
31 #include "gfs2.h"
32 #include "incore.h"
33 #include "glock.h"
34 #include "glops.h"
35 #include "inode.h"
36 #include "lm.h"
37 #include "lops.h"
38 #include "meta_io.h"
39 #include "quota.h"
40 #include "super.h"
41 #include "util.h"
42
43 struct gfs2_gl_hash_bucket {
44         struct hlist_head hb_list;
45 };
46
47 struct glock_iter {
48         int hash;                     /* hash bucket index         */
49         struct gfs2_sbd *sdp;         /* incore superblock         */
50         struct gfs2_glock *gl;        /* current glock struct      */
51         struct hlist_head *hb_list;   /* current hash bucket ptr   */
52         struct seq_file *seq;         /* sequence file for debugfs */
53         char string[512];             /* scratch space             */
54 };
55
56 typedef void (*glock_examiner) (struct gfs2_glock * gl);
57
58 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
59 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
60 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
61 static void gfs2_glock_drop_th(struct gfs2_glock *gl);
62 static DECLARE_RWSEM(gfs2_umount_flush_sem);
63 static struct dentry *gfs2_root;
64
65 #define GFS2_GL_HASH_SHIFT      15
66 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
67 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
68
69 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
70 static struct dentry *gfs2_root;
71
72 /*
73  * Despite what you might think, the numbers below are not arbitrary :-)
74  * They are taken from the ipv4 routing hash code, which is well tested
75  * and thus should be nearly optimal. Later on we might tweek the numbers
76  * but for now this should be fine.
77  *
78  * The reason for putting the locks in a separate array from the list heads
79  * is that we can have fewer locks than list heads and save memory. We use
80  * the same hash function for both, but with a different hash mask.
81  */
82 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
83         defined(CONFIG_PROVE_LOCKING)
84
85 #ifdef CONFIG_LOCKDEP
86 # define GL_HASH_LOCK_SZ        256
87 #else
88 # if NR_CPUS >= 32
89 #  define GL_HASH_LOCK_SZ       4096
90 # elif NR_CPUS >= 16
91 #  define GL_HASH_LOCK_SZ       2048
92 # elif NR_CPUS >= 8
93 #  define GL_HASH_LOCK_SZ       1024
94 # elif NR_CPUS >= 4
95 #  define GL_HASH_LOCK_SZ       512
96 # else
97 #  define GL_HASH_LOCK_SZ       256
98 # endif
99 #endif
100
101 /* We never want more locks than chains */
102 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
103 # undef GL_HASH_LOCK_SZ
104 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
105 #endif
106
107 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
108
109 static inline rwlock_t *gl_lock_addr(unsigned int x)
110 {
111         return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
112 }
113 #else /* not SMP, so no spinlocks required */
114 static inline rwlock_t *gl_lock_addr(unsigned int x)
115 {
116         return NULL;
117 }
118 #endif
119
120 /**
121  * relaxed_state_ok - is a requested lock compatible with the current lock mode?
122  * @actual: the current state of the lock
123  * @requested: the lock state that was requested by the caller
124  * @flags: the modifier flags passed in by the caller
125  *
126  * Returns: 1 if the locks are compatible, 0 otherwise
127  */
128
129 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
130                                    int flags)
131 {
132         if (actual == requested)
133                 return 1;
134
135         if (flags & GL_EXACT)
136                 return 0;
137
138         if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
139                 return 1;
140
141         if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
142                 return 1;
143
144         return 0;
145 }
146
147 /**
148  * gl_hash() - Turn glock number into hash bucket number
149  * @lock: The glock number
150  *
151  * Returns: The number of the corresponding hash bucket
152  */
153
154 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
155                             const struct lm_lockname *name)
156 {
157         unsigned int h;
158
159         h = jhash(&name->ln_number, sizeof(u64), 0);
160         h = jhash(&name->ln_type, sizeof(unsigned int), h);
161         h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
162         h &= GFS2_GL_HASH_MASK;
163
164         return h;
165 }
166
167 /**
168  * glock_free() - Perform a few checks and then release struct gfs2_glock
169  * @gl: The glock to release
170  *
171  * Also calls lock module to release its internal structure for this glock.
172  *
173  */
174
175 static void glock_free(struct gfs2_glock *gl)
176 {
177         struct gfs2_sbd *sdp = gl->gl_sbd;
178         struct inode *aspace = gl->gl_aspace;
179
180         gfs2_lm_put_lock(sdp, gl->gl_lock);
181
182         if (aspace)
183                 gfs2_aspace_put(aspace);
184
185         kmem_cache_free(gfs2_glock_cachep, gl);
186 }
187
188 /**
189  * gfs2_glock_hold() - increment reference count on glock
190  * @gl: The glock to hold
191  *
192  */
193
194 void gfs2_glock_hold(struct gfs2_glock *gl)
195 {
196         atomic_inc(&gl->gl_ref);
197 }
198
199 /**
200  * gfs2_glock_put() - Decrement reference count on glock
201  * @gl: The glock to put
202  *
203  */
204
205 int gfs2_glock_put(struct gfs2_glock *gl)
206 {
207         int rv = 0;
208         struct gfs2_sbd *sdp = gl->gl_sbd;
209
210         write_lock(gl_lock_addr(gl->gl_hash));
211         if (atomic_dec_and_test(&gl->gl_ref)) {
212                 hlist_del(&gl->gl_list);
213                 write_unlock(gl_lock_addr(gl->gl_hash));
214                 BUG_ON(spin_is_locked(&gl->gl_spin));
215                 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
216                 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
217                 gfs2_assert(sdp, list_empty(&gl->gl_holders));
218                 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
219                 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
220                 glock_free(gl);
221                 rv = 1;
222                 goto out;
223         }
224         write_unlock(gl_lock_addr(gl->gl_hash));
225 out:
226         return rv;
227 }
228
229 /**
230  * search_bucket() - Find struct gfs2_glock by lock number
231  * @bucket: the bucket to search
232  * @name: The lock name
233  *
234  * Returns: NULL, or the struct gfs2_glock with the requested number
235  */
236
237 static struct gfs2_glock *search_bucket(unsigned int hash,
238                                         const struct gfs2_sbd *sdp,
239                                         const struct lm_lockname *name)
240 {
241         struct gfs2_glock *gl;
242         struct hlist_node *h;
243
244         hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
245                 if (!lm_name_equal(&gl->gl_name, name))
246                         continue;
247                 if (gl->gl_sbd != sdp)
248                         continue;
249
250                 atomic_inc(&gl->gl_ref);
251
252                 return gl;
253         }
254
255         return NULL;
256 }
257
258 /**
259  * gfs2_glock_find() - Find glock by lock number
260  * @sdp: The GFS2 superblock
261  * @name: The lock name
262  *
263  * Returns: NULL, or the struct gfs2_glock with the requested number
264  */
265
266 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
267                                           const struct lm_lockname *name)
268 {
269         unsigned int hash = gl_hash(sdp, name);
270         struct gfs2_glock *gl;
271
272         read_lock(gl_lock_addr(hash));
273         gl = search_bucket(hash, sdp, name);
274         read_unlock(gl_lock_addr(hash));
275
276         return gl;
277 }
278
279 /**
280  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
281  * @sdp: The GFS2 superblock
282  * @number: the lock number
283  * @glops: The glock_operations to use
284  * @create: If 0, don't create the glock if it doesn't exist
285  * @glp: the glock is returned here
286  *
287  * This does not lock a glock, just finds/creates structures for one.
288  *
289  * Returns: errno
290  */
291
292 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
293                    const struct gfs2_glock_operations *glops, int create,
294                    struct gfs2_glock **glp)
295 {
296         struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
297         struct gfs2_glock *gl, *tmp;
298         unsigned int hash = gl_hash(sdp, &name);
299         int error;
300
301         read_lock(gl_lock_addr(hash));
302         gl = search_bucket(hash, sdp, &name);
303         read_unlock(gl_lock_addr(hash));
304
305         if (gl || !create) {
306                 *glp = gl;
307                 return 0;
308         }
309
310         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
311         if (!gl)
312                 return -ENOMEM;
313
314         gl->gl_flags = 0;
315         gl->gl_name = name;
316         atomic_set(&gl->gl_ref, 1);
317         gl->gl_state = LM_ST_UNLOCKED;
318         gl->gl_hash = hash;
319         gl->gl_owner_pid = 0;
320         gl->gl_ip = 0;
321         gl->gl_ops = glops;
322         gl->gl_req_gh = NULL;
323         gl->gl_req_bh = NULL;
324         gl->gl_vn = 0;
325         gl->gl_stamp = jiffies;
326         gl->gl_object = NULL;
327         gl->gl_sbd = sdp;
328         gl->gl_aspace = NULL;
329         lops_init_le(&gl->gl_le, &gfs2_glock_lops);
330
331         /* If this glock protects actual on-disk data or metadata blocks,
332            create a VFS inode to manage the pages/buffers holding them. */
333         if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
334                 gl->gl_aspace = gfs2_aspace_get(sdp);
335                 if (!gl->gl_aspace) {
336                         error = -ENOMEM;
337                         goto fail;
338                 }
339         }
340
341         error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
342         if (error)
343                 goto fail_aspace;
344
345         write_lock(gl_lock_addr(hash));
346         tmp = search_bucket(hash, sdp, &name);
347         if (tmp) {
348                 write_unlock(gl_lock_addr(hash));
349                 glock_free(gl);
350                 gl = tmp;
351         } else {
352                 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
353                 write_unlock(gl_lock_addr(hash));
354         }
355
356         *glp = gl;
357
358         return 0;
359
360 fail_aspace:
361         if (gl->gl_aspace)
362                 gfs2_aspace_put(gl->gl_aspace);
363 fail:
364         kmem_cache_free(gfs2_glock_cachep, gl);
365         return error;
366 }
367
368 /**
369  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
370  * @gl: the glock
371  * @state: the state we're requesting
372  * @flags: the modifier flags
373  * @gh: the holder structure
374  *
375  */
376
377 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
378                       struct gfs2_holder *gh)
379 {
380         INIT_LIST_HEAD(&gh->gh_list);
381         gh->gh_gl = gl;
382         gh->gh_ip = (unsigned long)__builtin_return_address(0);
383         gh->gh_owner_pid = current->pid;
384         gh->gh_state = state;
385         gh->gh_flags = flags;
386         gh->gh_error = 0;
387         gh->gh_iflags = 0;
388         gfs2_glock_hold(gl);
389 }
390
391 /**
392  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
393  * @state: the state we're requesting
394  * @flags: the modifier flags
395  * @gh: the holder structure
396  *
397  * Don't mess with the glock.
398  *
399  */
400
401 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
402 {
403         gh->gh_state = state;
404         gh->gh_flags = flags;
405         gh->gh_iflags = 0;
406         gh->gh_ip = (unsigned long)__builtin_return_address(0);
407 }
408
409 /**
410  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
411  * @gh: the holder structure
412  *
413  */
414
415 void gfs2_holder_uninit(struct gfs2_holder *gh)
416 {
417         gfs2_glock_put(gh->gh_gl);
418         gh->gh_gl = NULL;
419         gh->gh_ip = 0;
420 }
421
422 static void gfs2_holder_wake(struct gfs2_holder *gh)
423 {
424         clear_bit(HIF_WAIT, &gh->gh_iflags);
425         smp_mb__after_clear_bit();
426         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
427 }
428
429 static int just_schedule(void *word)
430 {
431         schedule();
432         return 0;
433 }
434
435 static void wait_on_holder(struct gfs2_holder *gh)
436 {
437         might_sleep();
438         wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
439 }
440
441 static void gfs2_demote_wake(struct gfs2_glock *gl)
442 {
443         clear_bit(GLF_DEMOTE, &gl->gl_flags);
444         smp_mb__after_clear_bit();
445         wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
446 }
447
448 static void wait_on_demote(struct gfs2_glock *gl)
449 {
450         might_sleep();
451         wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
452 }
453
454 /**
455  * rq_mutex - process a mutex request in the queue
456  * @gh: the glock holder
457  *
458  * Returns: 1 if the queue is blocked
459  */
460
461 static int rq_mutex(struct gfs2_holder *gh)
462 {
463         struct gfs2_glock *gl = gh->gh_gl;
464
465         list_del_init(&gh->gh_list);
466         /*  gh->gh_error never examined.  */
467         set_bit(GLF_LOCK, &gl->gl_flags);
468         clear_bit(HIF_WAIT, &gh->gh_iflags);
469         smp_mb();
470         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
471
472         return 1;
473 }
474
475 /**
476  * rq_promote - process a promote request in the queue
477  * @gh: the glock holder
478  *
479  * Acquire a new inter-node lock, or change a lock state to more restrictive.
480  *
481  * Returns: 1 if the queue is blocked
482  */
483
484 static int rq_promote(struct gfs2_holder *gh)
485 {
486         struct gfs2_glock *gl = gh->gh_gl;
487         struct gfs2_sbd *sdp = gl->gl_sbd;
488
489         if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
490                 if (list_empty(&gl->gl_holders)) {
491                         gl->gl_req_gh = gh;
492                         set_bit(GLF_LOCK, &gl->gl_flags);
493                         spin_unlock(&gl->gl_spin);
494
495                         if (atomic_read(&sdp->sd_reclaim_count) >
496                             gfs2_tune_get(sdp, gt_reclaim_limit) &&
497                             !(gh->gh_flags & LM_FLAG_PRIORITY)) {
498                                 gfs2_reclaim_glock(sdp);
499                                 gfs2_reclaim_glock(sdp);
500                         }
501
502                         gfs2_glock_xmote_th(gh->gh_gl, gh);
503                         spin_lock(&gl->gl_spin);
504                 }
505                 return 1;
506         }
507
508         if (list_empty(&gl->gl_holders)) {
509                 set_bit(HIF_FIRST, &gh->gh_iflags);
510                 set_bit(GLF_LOCK, &gl->gl_flags);
511         } else {
512                 struct gfs2_holder *next_gh;
513                 if (gh->gh_state == LM_ST_EXCLUSIVE)
514                         return 1;
515                 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
516                                      gh_list);
517                 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
518                          return 1;
519         }
520
521         list_move_tail(&gh->gh_list, &gl->gl_holders);
522         gh->gh_error = 0;
523         set_bit(HIF_HOLDER, &gh->gh_iflags);
524
525         gfs2_holder_wake(gh);
526
527         return 0;
528 }
529
530 /**
531  * rq_demote - process a demote request in the queue
532  * @gh: the glock holder
533  *
534  * Returns: 1 if the queue is blocked
535  */
536
537 static int rq_demote(struct gfs2_glock *gl)
538 {
539         if (!list_empty(&gl->gl_holders))
540                 return 1;
541
542         if (gl->gl_state == gl->gl_demote_state ||
543             gl->gl_state == LM_ST_UNLOCKED) {
544                 gfs2_demote_wake(gl);
545                 return 0;
546         }
547         set_bit(GLF_LOCK, &gl->gl_flags);
548         if (gl->gl_demote_state == LM_ST_UNLOCKED ||
549             gl->gl_state != LM_ST_EXCLUSIVE) {
550                 spin_unlock(&gl->gl_spin);
551                 gfs2_glock_drop_th(gl);
552         } else {
553                 spin_unlock(&gl->gl_spin);
554                 gfs2_glock_xmote_th(gl, NULL);
555         }
556         spin_lock(&gl->gl_spin);
557
558         return 0;
559 }
560
561 /**
562  * run_queue - process holder structures on a glock
563  * @gl: the glock
564  *
565  */
566 static void run_queue(struct gfs2_glock *gl)
567 {
568         struct gfs2_holder *gh;
569         int blocked = 1;
570
571         for (;;) {
572                 if (test_bit(GLF_LOCK, &gl->gl_flags))
573                         break;
574
575                 if (!list_empty(&gl->gl_waiters1)) {
576                         gh = list_entry(gl->gl_waiters1.next,
577                                         struct gfs2_holder, gh_list);
578
579                         if (test_bit(HIF_MUTEX, &gh->gh_iflags))
580                                 blocked = rq_mutex(gh);
581                         else
582                                 gfs2_assert_warn(gl->gl_sbd, 0);
583
584                 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
585                         blocked = rq_demote(gl);
586                 } else if (!list_empty(&gl->gl_waiters3)) {
587                         gh = list_entry(gl->gl_waiters3.next,
588                                         struct gfs2_holder, gh_list);
589
590                         if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
591                                 blocked = rq_promote(gh);
592                         else
593                                 gfs2_assert_warn(gl->gl_sbd, 0);
594
595                 } else
596                         break;
597
598                 if (blocked)
599                         break;
600         }
601 }
602
603 /**
604  * gfs2_glmutex_lock - acquire a local lock on a glock
605  * @gl: the glock
606  *
607  * Gives caller exclusive access to manipulate a glock structure.
608  */
609
610 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
611 {
612         struct gfs2_holder gh;
613
614         gfs2_holder_init(gl, 0, 0, &gh);
615         set_bit(HIF_MUTEX, &gh.gh_iflags);
616         if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
617                 BUG();
618
619         spin_lock(&gl->gl_spin);
620         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
621                 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
622         } else {
623                 gl->gl_owner_pid = current->pid;
624                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
625                 clear_bit(HIF_WAIT, &gh.gh_iflags);
626                 smp_mb();
627                 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
628         }
629         spin_unlock(&gl->gl_spin);
630
631         wait_on_holder(&gh);
632         gfs2_holder_uninit(&gh);
633 }
634
635 /**
636  * gfs2_glmutex_trylock - try to acquire a local lock on a glock
637  * @gl: the glock
638  *
639  * Returns: 1 if the glock is acquired
640  */
641
642 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
643 {
644         int acquired = 1;
645
646         spin_lock(&gl->gl_spin);
647         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
648                 acquired = 0;
649         } else {
650                 gl->gl_owner_pid = current->pid;
651                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
652         }
653         spin_unlock(&gl->gl_spin);
654
655         return acquired;
656 }
657
658 /**
659  * gfs2_glmutex_unlock - release a local lock on a glock
660  * @gl: the glock
661  *
662  */
663
664 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
665 {
666         spin_lock(&gl->gl_spin);
667         clear_bit(GLF_LOCK, &gl->gl_flags);
668         gl->gl_owner_pid = 0;
669         gl->gl_ip = 0;
670         run_queue(gl);
671         BUG_ON(!spin_is_locked(&gl->gl_spin));
672         spin_unlock(&gl->gl_spin);
673 }
674
675 /**
676  * handle_callback - process a demote request
677  * @gl: the glock
678  * @state: the state the caller wants us to change to
679  *
680  * There are only two requests that we are going to see in actual
681  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
682  */
683
684 static void handle_callback(struct gfs2_glock *gl, unsigned int state, int remote)
685 {
686         spin_lock(&gl->gl_spin);
687         if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) {
688                 gl->gl_demote_state = state;
689                 gl->gl_demote_time = jiffies;
690                 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
691                     gl->gl_object) {
692                         struct inode *inode = igrab(gl->gl_object);
693                         spin_unlock(&gl->gl_spin);
694                         if (inode) {
695                                 d_prune_aliases(inode);
696                                 iput(inode);
697                         }
698                         return;
699                 }
700         } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
701                         gl->gl_demote_state != state) {
702                 gl->gl_demote_state = LM_ST_UNLOCKED;
703         }
704         spin_unlock(&gl->gl_spin);
705 }
706
707 /**
708  * state_change - record that the glock is now in a different state
709  * @gl: the glock
710  * @new_state the new state
711  *
712  */
713
714 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
715 {
716         int held1, held2;
717
718         held1 = (gl->gl_state != LM_ST_UNLOCKED);
719         held2 = (new_state != LM_ST_UNLOCKED);
720
721         if (held1 != held2) {
722                 if (held2)
723                         gfs2_glock_hold(gl);
724                 else
725                         gfs2_glock_put(gl);
726         }
727
728         gl->gl_state = new_state;
729 }
730
731 /**
732  * xmote_bh - Called after the lock module is done acquiring a lock
733  * @gl: The glock in question
734  * @ret: the int returned from the lock module
735  *
736  */
737
738 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
739 {
740         struct gfs2_sbd *sdp = gl->gl_sbd;
741         const struct gfs2_glock_operations *glops = gl->gl_ops;
742         struct gfs2_holder *gh = gl->gl_req_gh;
743         int prev_state = gl->gl_state;
744         int op_done = 1;
745
746         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
747         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
748         gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
749
750         state_change(gl, ret & LM_OUT_ST_MASK);
751
752         if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
753                 if (glops->go_inval)
754                         glops->go_inval(gl, DIO_METADATA);
755         } else if (gl->gl_state == LM_ST_DEFERRED) {
756                 /* We might not want to do this here.
757                    Look at moving to the inode glops. */
758                 if (glops->go_inval)
759                         glops->go_inval(gl, 0);
760         }
761
762         /*  Deal with each possible exit condition  */
763
764         if (!gh) {
765                 gl->gl_stamp = jiffies;
766                 if (ret & LM_OUT_CANCELED) {
767                         op_done = 0;
768                 } else {
769                         spin_lock(&gl->gl_spin);
770                         if (gl->gl_state != gl->gl_demote_state) {
771                                 gl->gl_req_bh = NULL;
772                                 spin_unlock(&gl->gl_spin);
773                                 gfs2_glock_drop_th(gl);
774                                 gfs2_glock_put(gl);
775                                 return;
776                         }
777                         gfs2_demote_wake(gl);
778                         spin_unlock(&gl->gl_spin);
779                 }
780         } else {
781                 spin_lock(&gl->gl_spin);
782                 list_del_init(&gh->gh_list);
783                 gh->gh_error = -EIO;
784                 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 
785                         goto out;
786                 gh->gh_error = GLR_CANCELED;
787                 if (ret & LM_OUT_CANCELED) 
788                         goto out;
789                 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
790                         list_add_tail(&gh->gh_list, &gl->gl_holders);
791                         gh->gh_error = 0;
792                         set_bit(HIF_HOLDER, &gh->gh_iflags);
793                         set_bit(HIF_FIRST, &gh->gh_iflags);
794                         op_done = 0;
795                         goto out;
796                 }
797                 gh->gh_error = GLR_TRYFAILED;
798                 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
799                         goto out;
800                 gh->gh_error = -EINVAL;
801                 if (gfs2_assert_withdraw(sdp, 0) == -1)
802                         fs_err(sdp, "ret = 0x%.8X\n", ret);
803 out:
804                 spin_unlock(&gl->gl_spin);
805         }
806
807         if (glops->go_xmote_bh)
808                 glops->go_xmote_bh(gl);
809
810         if (op_done) {
811                 spin_lock(&gl->gl_spin);
812                 gl->gl_req_gh = NULL;
813                 gl->gl_req_bh = NULL;
814                 clear_bit(GLF_LOCK, &gl->gl_flags);
815                 run_queue(gl);
816                 spin_unlock(&gl->gl_spin);
817         }
818
819         gfs2_glock_put(gl);
820
821         if (gh)
822                 gfs2_holder_wake(gh);
823 }
824
825 /**
826  * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
827  * @gl: The glock in question
828  * @state: the requested state
829  * @flags: modifier flags to the lock call
830  *
831  */
832
833 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
834 {
835         struct gfs2_sbd *sdp = gl->gl_sbd;
836         int flags = gh ? gh->gh_flags : 0;
837         unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
838         const struct gfs2_glock_operations *glops = gl->gl_ops;
839         int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
840                                  LM_FLAG_NOEXP | LM_FLAG_ANY |
841                                  LM_FLAG_PRIORITY);
842         unsigned int lck_ret;
843
844         if (glops->go_xmote_th)
845                 glops->go_xmote_th(gl);
846
847         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
848         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
849         gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
850         gfs2_assert_warn(sdp, state != gl->gl_state);
851
852         gfs2_glock_hold(gl);
853         gl->gl_req_bh = xmote_bh;
854
855         lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
856
857         if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
858                 return;
859
860         if (lck_ret & LM_OUT_ASYNC)
861                 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
862         else
863                 xmote_bh(gl, lck_ret);
864 }
865
866 /**
867  * drop_bh - Called after a lock module unlock completes
868  * @gl: the glock
869  * @ret: the return status
870  *
871  * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
872  * Doesn't drop the reference on the glock the top half took out
873  *
874  */
875
876 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
877 {
878         struct gfs2_sbd *sdp = gl->gl_sbd;
879         const struct gfs2_glock_operations *glops = gl->gl_ops;
880         struct gfs2_holder *gh = gl->gl_req_gh;
881
882         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
883         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
884         gfs2_assert_warn(sdp, !ret);
885
886         state_change(gl, LM_ST_UNLOCKED);
887         gfs2_demote_wake(gl);
888
889         if (glops->go_inval)
890                 glops->go_inval(gl, DIO_METADATA);
891
892         if (gh) {
893                 spin_lock(&gl->gl_spin);
894                 list_del_init(&gh->gh_list);
895                 gh->gh_error = 0;
896                 spin_unlock(&gl->gl_spin);
897         }
898
899         spin_lock(&gl->gl_spin);
900         gl->gl_req_gh = NULL;
901         gl->gl_req_bh = NULL;
902         clear_bit(GLF_LOCK, &gl->gl_flags);
903         run_queue(gl);
904         spin_unlock(&gl->gl_spin);
905
906         gfs2_glock_put(gl);
907
908         if (gh)
909                 gfs2_holder_wake(gh);
910 }
911
912 /**
913  * gfs2_glock_drop_th - call into the lock module to unlock a lock
914  * @gl: the glock
915  *
916  */
917
918 static void gfs2_glock_drop_th(struct gfs2_glock *gl)
919 {
920         struct gfs2_sbd *sdp = gl->gl_sbd;
921         const struct gfs2_glock_operations *glops = gl->gl_ops;
922         unsigned int ret;
923
924         if (glops->go_drop_th)
925                 glops->go_drop_th(gl);
926
927         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
928         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
929         gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
930
931         gfs2_glock_hold(gl);
932         gl->gl_req_bh = drop_bh;
933
934         ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
935
936         if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
937                 return;
938
939         if (!ret)
940                 drop_bh(gl, ret);
941         else
942                 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
943 }
944
945 /**
946  * do_cancels - cancel requests for locks stuck waiting on an expire flag
947  * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
948  *
949  * Don't cancel GL_NOCANCEL requests.
950  */
951
952 static void do_cancels(struct gfs2_holder *gh)
953 {
954         struct gfs2_glock *gl = gh->gh_gl;
955
956         spin_lock(&gl->gl_spin);
957
958         while (gl->gl_req_gh != gh &&
959                !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
960                !list_empty(&gh->gh_list)) {
961                 if (gl->gl_req_bh && !(gl->gl_req_gh &&
962                                      (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
963                         spin_unlock(&gl->gl_spin);
964                         gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
965                         msleep(100);
966                         spin_lock(&gl->gl_spin);
967                 } else {
968                         spin_unlock(&gl->gl_spin);
969                         msleep(100);
970                         spin_lock(&gl->gl_spin);
971                 }
972         }
973
974         spin_unlock(&gl->gl_spin);
975 }
976
977 /**
978  * glock_wait_internal - wait on a glock acquisition
979  * @gh: the glock holder
980  *
981  * Returns: 0 on success
982  */
983
984 static int glock_wait_internal(struct gfs2_holder *gh)
985 {
986         struct gfs2_glock *gl = gh->gh_gl;
987         struct gfs2_sbd *sdp = gl->gl_sbd;
988         const struct gfs2_glock_operations *glops = gl->gl_ops;
989
990         if (test_bit(HIF_ABORTED, &gh->gh_iflags))
991                 return -EIO;
992
993         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
994                 spin_lock(&gl->gl_spin);
995                 if (gl->gl_req_gh != gh &&
996                     !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
997                     !list_empty(&gh->gh_list)) {
998                         list_del_init(&gh->gh_list);
999                         gh->gh_error = GLR_TRYFAILED;
1000                         run_queue(gl);
1001                         spin_unlock(&gl->gl_spin);
1002                         return gh->gh_error;
1003                 }
1004                 spin_unlock(&gl->gl_spin);
1005         }
1006
1007         if (gh->gh_flags & LM_FLAG_PRIORITY)
1008                 do_cancels(gh);
1009
1010         wait_on_holder(gh);
1011         if (gh->gh_error)
1012                 return gh->gh_error;
1013
1014         gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1015         gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1016                                                    gh->gh_flags));
1017
1018         if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1019                 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1020
1021                 if (glops->go_lock) {
1022                         gh->gh_error = glops->go_lock(gh);
1023                         if (gh->gh_error) {
1024                                 spin_lock(&gl->gl_spin);
1025                                 list_del_init(&gh->gh_list);
1026                                 spin_unlock(&gl->gl_spin);
1027                         }
1028                 }
1029
1030                 spin_lock(&gl->gl_spin);
1031                 gl->gl_req_gh = NULL;
1032                 gl->gl_req_bh = NULL;
1033                 clear_bit(GLF_LOCK, &gl->gl_flags);
1034                 run_queue(gl);
1035                 spin_unlock(&gl->gl_spin);
1036         }
1037
1038         return gh->gh_error;
1039 }
1040
1041 static inline struct gfs2_holder *
1042 find_holder_by_owner(struct list_head *head, pid_t pid)
1043 {
1044         struct gfs2_holder *gh;
1045
1046         list_for_each_entry(gh, head, gh_list) {
1047                 if (gh->gh_owner_pid == pid)
1048                         return gh;
1049         }
1050
1051         return NULL;
1052 }
1053
1054 static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1055 {
1056         va_list args;
1057
1058         va_start(args, fmt);
1059         if (gi) {
1060                 vsprintf(gi->string, fmt, args);
1061                 seq_printf(gi->seq, gi->string);
1062         }
1063         else
1064                 vprintk(fmt, args);
1065         va_end(args);
1066 }
1067
1068 /**
1069  * add_to_queue - Add a holder to the wait queue (but look for recursion)
1070  * @gh: the holder structure to add
1071  *
1072  */
1073
1074 static void add_to_queue(struct gfs2_holder *gh)
1075 {
1076         struct gfs2_glock *gl = gh->gh_gl;
1077         struct gfs2_holder *existing;
1078
1079         BUG_ON(!gh->gh_owner_pid);
1080         if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1081                 BUG();
1082
1083         existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner_pid);
1084         if (existing) {
1085                 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1086                 printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
1087                 printk(KERN_INFO "lock type : %d lock state : %d\n",
1088                                 existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
1089                 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1090                 printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
1091                 printk(KERN_INFO "lock type : %d lock state : %d\n",
1092                                 gl->gl_name.ln_type, gl->gl_state);
1093                 BUG();
1094         }
1095
1096         existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner_pid);
1097         if (existing) {
1098                 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1099                 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1100                 BUG();
1101         }
1102
1103         if (gh->gh_flags & LM_FLAG_PRIORITY)
1104                 list_add(&gh->gh_list, &gl->gl_waiters3);
1105         else
1106                 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1107 }
1108
1109 /**
1110  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1111  * @gh: the holder structure
1112  *
1113  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1114  *
1115  * Returns: 0, GLR_TRYFAILED, or errno on failure
1116  */
1117
1118 int gfs2_glock_nq(struct gfs2_holder *gh)
1119 {
1120         struct gfs2_glock *gl = gh->gh_gl;
1121         struct gfs2_sbd *sdp = gl->gl_sbd;
1122         int error = 0;
1123
1124 restart:
1125         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1126                 set_bit(HIF_ABORTED, &gh->gh_iflags);
1127                 return -EIO;
1128         }
1129
1130         set_bit(HIF_PROMOTE, &gh->gh_iflags);
1131
1132         spin_lock(&gl->gl_spin);
1133         add_to_queue(gh);
1134         run_queue(gl);
1135         spin_unlock(&gl->gl_spin);
1136
1137         if (!(gh->gh_flags & GL_ASYNC)) {
1138                 error = glock_wait_internal(gh);
1139                 if (error == GLR_CANCELED) {
1140                         msleep(100);
1141                         goto restart;
1142                 }
1143         }
1144
1145         return error;
1146 }
1147
1148 /**
1149  * gfs2_glock_poll - poll to see if an async request has been completed
1150  * @gh: the holder
1151  *
1152  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1153  */
1154
1155 int gfs2_glock_poll(struct gfs2_holder *gh)
1156 {
1157         struct gfs2_glock *gl = gh->gh_gl;
1158         int ready = 0;
1159
1160         spin_lock(&gl->gl_spin);
1161
1162         if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1163                 ready = 1;
1164         else if (list_empty(&gh->gh_list)) {
1165                 if (gh->gh_error == GLR_CANCELED) {
1166                         spin_unlock(&gl->gl_spin);
1167                         msleep(100);
1168                         if (gfs2_glock_nq(gh))
1169                                 return 1;
1170                         return 0;
1171                 } else
1172                         ready = 1;
1173         }
1174
1175         spin_unlock(&gl->gl_spin);
1176
1177         return ready;
1178 }
1179
1180 /**
1181  * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1182  * @gh: the holder structure
1183  *
1184  * Returns: 0, GLR_TRYFAILED, or errno on failure
1185  */
1186
1187 int gfs2_glock_wait(struct gfs2_holder *gh)
1188 {
1189         int error;
1190
1191         error = glock_wait_internal(gh);
1192         if (error == GLR_CANCELED) {
1193                 msleep(100);
1194                 gh->gh_flags &= ~GL_ASYNC;
1195                 error = gfs2_glock_nq(gh);
1196         }
1197
1198         return error;
1199 }
1200
1201 /**
1202  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1203  * @gh: the glock holder
1204  *
1205  */
1206
1207 void gfs2_glock_dq(struct gfs2_holder *gh)
1208 {
1209         struct gfs2_glock *gl = gh->gh_gl;
1210         const struct gfs2_glock_operations *glops = gl->gl_ops;
1211
1212         if (gh->gh_flags & GL_NOCACHE)
1213                 handle_callback(gl, LM_ST_UNLOCKED, 0);
1214
1215         gfs2_glmutex_lock(gl);
1216
1217         spin_lock(&gl->gl_spin);
1218         list_del_init(&gh->gh_list);
1219
1220         if (list_empty(&gl->gl_holders)) {
1221                 spin_unlock(&gl->gl_spin);
1222
1223                 if (glops->go_unlock)
1224                         glops->go_unlock(gh);
1225
1226                 spin_lock(&gl->gl_spin);
1227                 gl->gl_stamp = jiffies;
1228         }
1229
1230         clear_bit(GLF_LOCK, &gl->gl_flags);
1231         run_queue(gl);
1232         spin_unlock(&gl->gl_spin);
1233 }
1234
1235 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1236 {
1237         struct gfs2_glock *gl = gh->gh_gl;
1238         gfs2_glock_dq(gh);
1239         wait_on_demote(gl);
1240 }
1241
1242 /**
1243  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1244  * @gh: the holder structure
1245  *
1246  */
1247
1248 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1249 {
1250         gfs2_glock_dq(gh);
1251         gfs2_holder_uninit(gh);
1252 }
1253
1254 /**
1255  * gfs2_glock_nq_num - acquire a glock based on lock number
1256  * @sdp: the filesystem
1257  * @number: the lock number
1258  * @glops: the glock operations for the type of glock
1259  * @state: the state to acquire the glock in
1260  * @flags: modifier flags for the aquisition
1261  * @gh: the struct gfs2_holder
1262  *
1263  * Returns: errno
1264  */
1265
1266 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1267                       const struct gfs2_glock_operations *glops,
1268                       unsigned int state, int flags, struct gfs2_holder *gh)
1269 {
1270         struct gfs2_glock *gl;
1271         int error;
1272
1273         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1274         if (!error) {
1275                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1276                 gfs2_glock_put(gl);
1277         }
1278
1279         return error;
1280 }
1281
1282 /**
1283  * glock_compare - Compare two struct gfs2_glock structures for sorting
1284  * @arg_a: the first structure
1285  * @arg_b: the second structure
1286  *
1287  */
1288
1289 static int glock_compare(const void *arg_a, const void *arg_b)
1290 {
1291         const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1292         const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1293         const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1294         const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1295
1296         if (a->ln_number > b->ln_number)
1297                 return 1;
1298         if (a->ln_number < b->ln_number)
1299                 return -1;
1300         BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1301         return 0;
1302 }
1303
1304 /**
1305  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1306  * @num_gh: the number of structures
1307  * @ghs: an array of struct gfs2_holder structures
1308  *
1309  * Returns: 0 on success (all glocks acquired),
1310  *          errno on failure (no glocks acquired)
1311  */
1312
1313 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1314                      struct gfs2_holder **p)
1315 {
1316         unsigned int x;
1317         int error = 0;
1318
1319         for (x = 0; x < num_gh; x++)
1320                 p[x] = &ghs[x];
1321
1322         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1323
1324         for (x = 0; x < num_gh; x++) {
1325                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1326
1327                 error = gfs2_glock_nq(p[x]);
1328                 if (error) {
1329                         while (x--)
1330                                 gfs2_glock_dq(p[x]);
1331                         break;
1332                 }
1333         }
1334
1335         return error;
1336 }
1337
1338 /**
1339  * gfs2_glock_nq_m - acquire multiple glocks
1340  * @num_gh: the number of structures
1341  * @ghs: an array of struct gfs2_holder structures
1342  *
1343  *
1344  * Returns: 0 on success (all glocks acquired),
1345  *          errno on failure (no glocks acquired)
1346  */
1347
1348 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1349 {
1350         struct gfs2_holder *tmp[4];
1351         struct gfs2_holder **pph = tmp;
1352         int error = 0;
1353
1354         switch(num_gh) {
1355         case 0:
1356                 return 0;
1357         case 1:
1358                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1359                 return gfs2_glock_nq(ghs);
1360         default:
1361                 if (num_gh <= 4)
1362                         break;
1363                 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1364                 if (!pph)
1365                         return -ENOMEM;
1366         }
1367
1368         error = nq_m_sync(num_gh, ghs, pph);
1369
1370         if (pph != tmp)
1371                 kfree(pph);
1372
1373         return error;
1374 }
1375
1376 /**
1377  * gfs2_glock_dq_m - release multiple glocks
1378  * @num_gh: the number of structures
1379  * @ghs: an array of struct gfs2_holder structures
1380  *
1381  */
1382
1383 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1384 {
1385         unsigned int x;
1386
1387         for (x = 0; x < num_gh; x++)
1388                 gfs2_glock_dq(&ghs[x]);
1389 }
1390
1391 /**
1392  * gfs2_glock_dq_uninit_m - release multiple glocks
1393  * @num_gh: the number of structures
1394  * @ghs: an array of struct gfs2_holder structures
1395  *
1396  */
1397
1398 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1399 {
1400         unsigned int x;
1401
1402         for (x = 0; x < num_gh; x++)
1403                 gfs2_glock_dq_uninit(&ghs[x]);
1404 }
1405
1406 /**
1407  * gfs2_lvb_hold - attach a LVB from a glock
1408  * @gl: The glock in question
1409  *
1410  */
1411
1412 int gfs2_lvb_hold(struct gfs2_glock *gl)
1413 {
1414         int error;
1415
1416         gfs2_glmutex_lock(gl);
1417
1418         if (!atomic_read(&gl->gl_lvb_count)) {
1419                 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1420                 if (error) {
1421                         gfs2_glmutex_unlock(gl);
1422                         return error;
1423                 }
1424                 gfs2_glock_hold(gl);
1425         }
1426         atomic_inc(&gl->gl_lvb_count);
1427
1428         gfs2_glmutex_unlock(gl);
1429
1430         return 0;
1431 }
1432
1433 /**
1434  * gfs2_lvb_unhold - detach a LVB from a glock
1435  * @gl: The glock in question
1436  *
1437  */
1438
1439 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1440 {
1441         gfs2_glock_hold(gl);
1442         gfs2_glmutex_lock(gl);
1443
1444         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1445         if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1446                 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1447                 gl->gl_lvb = NULL;
1448                 gfs2_glock_put(gl);
1449         }
1450
1451         gfs2_glmutex_unlock(gl);
1452         gfs2_glock_put(gl);
1453 }
1454
1455 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1456                         unsigned int state)
1457 {
1458         struct gfs2_glock *gl;
1459
1460         gl = gfs2_glock_find(sdp, name);
1461         if (!gl)
1462                 return;
1463
1464         handle_callback(gl, state, 1);
1465
1466         spin_lock(&gl->gl_spin);
1467         run_queue(gl);
1468         spin_unlock(&gl->gl_spin);
1469
1470         gfs2_glock_put(gl);
1471 }
1472
1473 /**
1474  * gfs2_glock_cb - Callback used by locking module
1475  * @sdp: Pointer to the superblock
1476  * @type: Type of callback
1477  * @data: Type dependent data pointer
1478  *
1479  * Called by the locking module when it wants to tell us something.
1480  * Either we need to drop a lock, one of our ASYNC requests completed, or
1481  * a journal from another client needs to be recovered.
1482  */
1483
1484 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1485 {
1486         struct gfs2_sbd *sdp = cb_data;
1487
1488         switch (type) {
1489         case LM_CB_NEED_E:
1490                 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1491                 return;
1492
1493         case LM_CB_NEED_D:
1494                 blocking_cb(sdp, data, LM_ST_DEFERRED);
1495                 return;
1496
1497         case LM_CB_NEED_S:
1498                 blocking_cb(sdp, data, LM_ST_SHARED);
1499                 return;
1500
1501         case LM_CB_ASYNC: {
1502                 struct lm_async_cb *async = data;
1503                 struct gfs2_glock *gl;
1504
1505                 down_read(&gfs2_umount_flush_sem);
1506                 gl = gfs2_glock_find(sdp, &async->lc_name);
1507                 if (gfs2_assert_warn(sdp, gl))
1508                         return;
1509                 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1510                         gl->gl_req_bh(gl, async->lc_ret);
1511                 gfs2_glock_put(gl);
1512                 up_read(&gfs2_umount_flush_sem);
1513                 return;
1514         }
1515
1516         case LM_CB_NEED_RECOVERY:
1517                 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1518                 if (sdp->sd_recoverd_process)
1519                         wake_up_process(sdp->sd_recoverd_process);
1520                 return;
1521
1522         case LM_CB_DROPLOCKS:
1523                 gfs2_gl_hash_clear(sdp, NO_WAIT);
1524                 gfs2_quota_scan(sdp);
1525                 return;
1526
1527         default:
1528                 gfs2_assert_warn(sdp, 0);
1529                 return;
1530         }
1531 }
1532
1533 /**
1534  * demote_ok - Check to see if it's ok to unlock a glock
1535  * @gl: the glock
1536  *
1537  * Returns: 1 if it's ok
1538  */
1539
1540 static int demote_ok(struct gfs2_glock *gl)
1541 {
1542         const struct gfs2_glock_operations *glops = gl->gl_ops;
1543         int demote = 1;
1544
1545         if (test_bit(GLF_STICKY, &gl->gl_flags))
1546                 demote = 0;
1547         else if (glops->go_demote_ok)
1548                 demote = glops->go_demote_ok(gl);
1549
1550         return demote;
1551 }
1552
1553 /**
1554  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1555  * @gl: the glock
1556  *
1557  */
1558
1559 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1560 {
1561         struct gfs2_sbd *sdp = gl->gl_sbd;
1562
1563         spin_lock(&sdp->sd_reclaim_lock);
1564         if (list_empty(&gl->gl_reclaim)) {
1565                 gfs2_glock_hold(gl);
1566                 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1567                 atomic_inc(&sdp->sd_reclaim_count);
1568         }
1569         spin_unlock(&sdp->sd_reclaim_lock);
1570
1571         wake_up(&sdp->sd_reclaim_wq);
1572 }
1573
1574 /**
1575  * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1576  * @sdp: the filesystem
1577  *
1578  * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1579  * different glock and we notice that there are a lot of glocks in the
1580  * reclaim list.
1581  *
1582  */
1583
1584 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1585 {
1586         struct gfs2_glock *gl;
1587
1588         spin_lock(&sdp->sd_reclaim_lock);
1589         if (list_empty(&sdp->sd_reclaim_list)) {
1590                 spin_unlock(&sdp->sd_reclaim_lock);
1591                 return;
1592         }
1593         gl = list_entry(sdp->sd_reclaim_list.next,
1594                         struct gfs2_glock, gl_reclaim);
1595         list_del_init(&gl->gl_reclaim);
1596         spin_unlock(&sdp->sd_reclaim_lock);
1597
1598         atomic_dec(&sdp->sd_reclaim_count);
1599         atomic_inc(&sdp->sd_reclaimed);
1600
1601         if (gfs2_glmutex_trylock(gl)) {
1602                 if (list_empty(&gl->gl_holders) &&
1603                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1604                         handle_callback(gl, LM_ST_UNLOCKED, 0);
1605                 gfs2_glmutex_unlock(gl);
1606         }
1607
1608         gfs2_glock_put(gl);
1609 }
1610
1611 /**
1612  * examine_bucket - Call a function for glock in a hash bucket
1613  * @examiner: the function
1614  * @sdp: the filesystem
1615  * @bucket: the bucket
1616  *
1617  * Returns: 1 if the bucket has entries
1618  */
1619
1620 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1621                           unsigned int hash)
1622 {
1623         struct gfs2_glock *gl, *prev = NULL;
1624         int has_entries = 0;
1625         struct hlist_head *head = &gl_hash_table[hash].hb_list;
1626
1627         read_lock(gl_lock_addr(hash));
1628         /* Can't use hlist_for_each_entry - don't want prefetch here */
1629         if (hlist_empty(head))
1630                 goto out;
1631         gl = list_entry(head->first, struct gfs2_glock, gl_list);
1632         while(1) {
1633                 if (gl->gl_sbd == sdp) {
1634                         gfs2_glock_hold(gl);
1635                         read_unlock(gl_lock_addr(hash));
1636                         if (prev)
1637                                 gfs2_glock_put(prev);
1638                         prev = gl;
1639                         examiner(gl);
1640                         has_entries = 1;
1641                         read_lock(gl_lock_addr(hash));
1642                 }
1643                 if (gl->gl_list.next == NULL)
1644                         break;
1645                 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1646         }
1647 out:
1648         read_unlock(gl_lock_addr(hash));
1649         if (prev)
1650                 gfs2_glock_put(prev);
1651         return has_entries;
1652 }
1653
1654 /**
1655  * scan_glock - look at a glock and see if we can reclaim it
1656  * @gl: the glock to look at
1657  *
1658  */
1659
1660 static void scan_glock(struct gfs2_glock *gl)
1661 {
1662         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1663                 return;
1664
1665         if (gfs2_glmutex_trylock(gl)) {
1666                 if (list_empty(&gl->gl_holders) &&
1667                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1668                         goto out_schedule;
1669                 gfs2_glmutex_unlock(gl);
1670         }
1671         return;
1672
1673 out_schedule:
1674         gfs2_glmutex_unlock(gl);
1675         gfs2_glock_schedule_for_reclaim(gl);
1676 }
1677
1678 /**
1679  * gfs2_scand_internal - Look for glocks and inodes to toss from memory
1680  * @sdp: the filesystem
1681  *
1682  */
1683
1684 void gfs2_scand_internal(struct gfs2_sbd *sdp)
1685 {
1686         unsigned int x;
1687
1688         for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1689                 examine_bucket(scan_glock, sdp, x);
1690 }
1691
1692 /**
1693  * clear_glock - look at a glock and see if we can free it from glock cache
1694  * @gl: the glock to look at
1695  *
1696  */
1697
1698 static void clear_glock(struct gfs2_glock *gl)
1699 {
1700         struct gfs2_sbd *sdp = gl->gl_sbd;
1701         int released;
1702
1703         spin_lock(&sdp->sd_reclaim_lock);
1704         if (!list_empty(&gl->gl_reclaim)) {
1705                 list_del_init(&gl->gl_reclaim);
1706                 atomic_dec(&sdp->sd_reclaim_count);
1707                 spin_unlock(&sdp->sd_reclaim_lock);
1708                 released = gfs2_glock_put(gl);
1709                 gfs2_assert(sdp, !released);
1710         } else {
1711                 spin_unlock(&sdp->sd_reclaim_lock);
1712         }
1713
1714         if (gfs2_glmutex_trylock(gl)) {
1715                 if (list_empty(&gl->gl_holders) &&
1716                     gl->gl_state != LM_ST_UNLOCKED)
1717                         handle_callback(gl, LM_ST_UNLOCKED, 0);
1718                 gfs2_glmutex_unlock(gl);
1719         }
1720 }
1721
1722 /**
1723  * gfs2_gl_hash_clear - Empty out the glock hash table
1724  * @sdp: the filesystem
1725  * @wait: wait until it's all gone
1726  *
1727  * Called when unmounting the filesystem, or when inter-node lock manager
1728  * requests DROPLOCKS because it is running out of capacity.
1729  */
1730
1731 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1732 {
1733         unsigned long t;
1734         unsigned int x;
1735         int cont;
1736
1737         t = jiffies;
1738
1739         for (;;) {
1740                 cont = 0;
1741                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1742                         if (examine_bucket(clear_glock, sdp, x))
1743                                 cont = 1;
1744                 }
1745
1746                 if (!wait || !cont)
1747                         break;
1748
1749                 if (time_after_eq(jiffies,
1750                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1751                         fs_warn(sdp, "Unmount seems to be stalled. "
1752                                      "Dumping lock state...\n");
1753                         gfs2_dump_lockstate(sdp);
1754                         t = jiffies;
1755                 }
1756
1757                 down_write(&gfs2_umount_flush_sem);
1758                 invalidate_inodes(sdp->sd_vfs);
1759                 up_write(&gfs2_umount_flush_sem);
1760                 msleep(10);
1761         }
1762 }
1763
1764 /*
1765  *  Diagnostic routines to help debug distributed deadlock
1766  */
1767
1768 static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1769                               unsigned long address)
1770 {
1771         char buffer[KSYM_SYMBOL_LEN];
1772
1773         sprint_symbol(buffer, address);
1774         print_dbg(gi, fmt, buffer);
1775 }
1776
1777 /**
1778  * dump_holder - print information about a glock holder
1779  * @str: a string naming the type of holder
1780  * @gh: the glock holder
1781  *
1782  * Returns: 0 on success, -ENOBUFS when we run out of space
1783  */
1784
1785 static int dump_holder(struct glock_iter *gi, char *str,
1786                        struct gfs2_holder *gh)
1787 {
1788         unsigned int x;
1789         struct task_struct *gh_owner;
1790
1791         print_dbg(gi, "  %s\n", str);
1792         if (gh->gh_owner_pid) {
1793                 print_dbg(gi, "    owner = %ld ", (long)gh->gh_owner_pid);
1794                 gh_owner = find_task_by_pid(gh->gh_owner_pid);
1795                 if (gh_owner)
1796                         print_dbg(gi, "(%s)\n", gh_owner->comm);
1797                 else
1798                         print_dbg(gi, "(ended)\n");
1799         } else
1800                 print_dbg(gi, "    owner = -1\n");
1801         print_dbg(gi, "    gh_state = %u\n", gh->gh_state);
1802         print_dbg(gi, "    gh_flags =");
1803         for (x = 0; x < 32; x++)
1804                 if (gh->gh_flags & (1 << x))
1805                         print_dbg(gi, " %u", x);
1806         print_dbg(gi, " \n");
1807         print_dbg(gi, "    error = %d\n", gh->gh_error);
1808         print_dbg(gi, "    gh_iflags =");
1809         for (x = 0; x < 32; x++)
1810                 if (test_bit(x, &gh->gh_iflags))
1811                         print_dbg(gi, " %u", x);
1812         print_dbg(gi, " \n");
1813         gfs2_print_symbol(gi, "    initialized at: %s\n", gh->gh_ip);
1814
1815         return 0;
1816 }
1817
1818 /**
1819  * dump_inode - print information about an inode
1820  * @ip: the inode
1821  *
1822  * Returns: 0 on success, -ENOBUFS when we run out of space
1823  */
1824
1825 static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
1826 {
1827         unsigned int x;
1828
1829         print_dbg(gi, "  Inode:\n");
1830         print_dbg(gi, "    num = %llu/%llu\n",
1831                   (unsigned long long)ip->i_no_formal_ino,
1832                   (unsigned long long)ip->i_no_addr);
1833         print_dbg(gi, "    type = %u\n", IF2DT(ip->i_inode.i_mode));
1834         print_dbg(gi, "    i_flags =");
1835         for (x = 0; x < 32; x++)
1836                 if (test_bit(x, &ip->i_flags))
1837                         print_dbg(gi, " %u", x);
1838         print_dbg(gi, " \n");
1839         return 0;
1840 }
1841
1842 /**
1843  * dump_glock - print information about a glock
1844  * @gl: the glock
1845  * @count: where we are in the buffer
1846  *
1847  * Returns: 0 on success, -ENOBUFS when we run out of space
1848  */
1849
1850 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1851 {
1852         struct gfs2_holder *gh;
1853         unsigned int x;
1854         int error = -ENOBUFS;
1855         struct task_struct *gl_owner;
1856
1857         spin_lock(&gl->gl_spin);
1858
1859         print_dbg(gi, "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
1860                    (unsigned long long)gl->gl_name.ln_number);
1861         print_dbg(gi, "  gl_flags =");
1862         for (x = 0; x < 32; x++) {
1863                 if (test_bit(x, &gl->gl_flags))
1864                         print_dbg(gi, " %u", x);
1865         }
1866         if (!test_bit(GLF_LOCK, &gl->gl_flags))
1867                 print_dbg(gi, " (unlocked)");
1868         print_dbg(gi, " \n");
1869         print_dbg(gi, "  gl_ref = %d\n", atomic_read(&gl->gl_ref));
1870         print_dbg(gi, "  gl_state = %u\n", gl->gl_state);
1871         if (gl->gl_owner_pid) {
1872                 gl_owner = find_task_by_pid(gl->gl_owner_pid);
1873                 if (gl_owner)
1874                         print_dbg(gi, "  gl_owner = pid %d (%s)\n",
1875                                   gl->gl_owner_pid, gl_owner->comm);
1876                 else
1877                         print_dbg(gi, "  gl_owner = %d (ended)\n",
1878                                   gl->gl_owner_pid);
1879         } else
1880                 print_dbg(gi, "  gl_owner = -1\n");
1881         print_dbg(gi, "  gl_ip = %lu\n", gl->gl_ip);
1882         print_dbg(gi, "  req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1883         print_dbg(gi, "  req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1884         print_dbg(gi, "  lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1885         print_dbg(gi, "  object = %s\n", (gl->gl_object) ? "yes" : "no");
1886         print_dbg(gi, "  le = %s\n",
1887                    (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
1888         print_dbg(gi, "  reclaim = %s\n",
1889                    (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1890         if (gl->gl_aspace)
1891                 print_dbg(gi, "  aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1892                            gl->gl_aspace->i_mapping->nrpages);
1893         else
1894                 print_dbg(gi, "  aspace = no\n");
1895         print_dbg(gi, "  ail = %d\n", atomic_read(&gl->gl_ail_count));
1896         if (gl->gl_req_gh) {
1897                 error = dump_holder(gi, "Request", gl->gl_req_gh);
1898                 if (error)
1899                         goto out;
1900         }
1901         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1902                 error = dump_holder(gi, "Holder", gh);
1903                 if (error)
1904                         goto out;
1905         }
1906         list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1907                 error = dump_holder(gi, "Waiter1", gh);
1908                 if (error)
1909                         goto out;
1910         }
1911         list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1912                 error = dump_holder(gi, "Waiter3", gh);
1913                 if (error)
1914                         goto out;
1915         }
1916         if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1917                 print_dbg(gi, "  Demotion req to state %u (%llu uS ago)\n",
1918                           gl->gl_demote_state, (unsigned long long)
1919                           (jiffies - gl->gl_demote_time)*(1000000/HZ));
1920         }
1921         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1922                 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1923                         list_empty(&gl->gl_holders)) {
1924                         error = dump_inode(gi, gl->gl_object);
1925                         if (error)
1926                                 goto out;
1927                 } else {
1928                         error = -ENOBUFS;
1929                         print_dbg(gi, "  Inode: busy\n");
1930                 }
1931         }
1932
1933         error = 0;
1934
1935 out:
1936         spin_unlock(&gl->gl_spin);
1937         return error;
1938 }
1939
1940 /**
1941  * gfs2_dump_lockstate - print out the current lockstate
1942  * @sdp: the filesystem
1943  * @ub: the buffer to copy the information into
1944  *
1945  * If @ub is NULL, dump the lockstate to the console.
1946  *
1947  */
1948
1949 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1950 {
1951         struct gfs2_glock *gl;
1952         struct hlist_node *h;
1953         unsigned int x;
1954         int error = 0;
1955
1956         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1957
1958                 read_lock(gl_lock_addr(x));
1959
1960                 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1961                         if (gl->gl_sbd != sdp)
1962                                 continue;
1963
1964                         error = dump_glock(NULL, gl);
1965                         if (error)
1966                                 break;
1967                 }
1968
1969                 read_unlock(gl_lock_addr(x));
1970
1971                 if (error)
1972                         break;
1973         }
1974
1975
1976         return error;
1977 }
1978
1979 int __init gfs2_glock_init(void)
1980 {
1981         unsigned i;
1982         for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1983                 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
1984         }
1985 #ifdef GL_HASH_LOCK_SZ
1986         for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1987                 rwlock_init(&gl_hash_locks[i]);
1988         }
1989 #endif
1990         return 0;
1991 }
1992
1993 static int gfs2_glock_iter_next(struct glock_iter *gi)
1994 {
1995         read_lock(gl_lock_addr(gi->hash));
1996         while (1) {
1997                 if (!gi->hb_list) {  /* If we don't have a hash bucket yet */
1998                         gi->hb_list = &gl_hash_table[gi->hash].hb_list;
1999                         if (hlist_empty(gi->hb_list)) {
2000                                 read_unlock(gl_lock_addr(gi->hash));
2001                                 gi->hash++;
2002                                 read_lock(gl_lock_addr(gi->hash));
2003                                 gi->hb_list = NULL;
2004                                 if (gi->hash >= GFS2_GL_HASH_SIZE) {
2005                                         read_unlock(gl_lock_addr(gi->hash));
2006                                         return 1;
2007                                 }
2008                                 else
2009                                         continue;
2010                         }
2011                         if (!hlist_empty(gi->hb_list)) {
2012                                 gi->gl = list_entry(gi->hb_list->first,
2013                                                     struct gfs2_glock,
2014                                                     gl_list);
2015                         }
2016                 } else {
2017                         if (gi->gl->gl_list.next == NULL) {
2018                                 read_unlock(gl_lock_addr(gi->hash));
2019                                 gi->hash++;
2020                                 read_lock(gl_lock_addr(gi->hash));
2021                                 gi->hb_list = NULL;
2022                                 continue;
2023                         }
2024                         gi->gl = list_entry(gi->gl->gl_list.next,
2025                                             struct gfs2_glock, gl_list);
2026                 }
2027                 if (gi->gl)
2028                         break;
2029         }
2030         read_unlock(gl_lock_addr(gi->hash));
2031         return 0;
2032 }
2033
2034 static void gfs2_glock_iter_free(struct glock_iter *gi)
2035 {
2036         kfree(gi);
2037 }
2038
2039 static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
2040 {
2041         struct glock_iter *gi;
2042
2043         gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2044         if (!gi)
2045                 return NULL;
2046
2047         gi->sdp = sdp;
2048         gi->hash = 0;
2049         gi->gl = NULL;
2050         gi->hb_list = NULL;
2051         gi->seq = NULL;
2052         memset(gi->string, 0, sizeof(gi->string));
2053
2054         if (gfs2_glock_iter_next(gi)) {
2055                 gfs2_glock_iter_free(gi);
2056                 return NULL;
2057         }
2058
2059         return gi;
2060 }
2061
2062 static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
2063 {
2064         struct glock_iter *gi;
2065         loff_t n = *pos;
2066
2067         gi = gfs2_glock_iter_init(file->private);
2068         if (!gi)
2069                 return NULL;
2070
2071         while (n--) {
2072                 if (gfs2_glock_iter_next(gi)) {
2073                         gfs2_glock_iter_free(gi);
2074                         return NULL;
2075                 }
2076         }
2077
2078         return gi;
2079 }
2080
2081 static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2082                                  loff_t *pos)
2083 {
2084         struct glock_iter *gi = iter_ptr;
2085
2086         (*pos)++;
2087
2088         if (gfs2_glock_iter_next(gi)) {
2089                 gfs2_glock_iter_free(gi);
2090                 return NULL;
2091         }
2092
2093         return gi;
2094 }
2095
2096 static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
2097 {
2098         /* nothing for now */
2099 }
2100
2101 static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
2102 {
2103         struct glock_iter *gi = iter_ptr;
2104
2105         gi->seq = file;
2106         dump_glock(gi, gi->gl);
2107
2108         return 0;
2109 }
2110
2111 static struct seq_operations gfs2_glock_seq_ops = {
2112         .start = gfs2_glock_seq_start,
2113         .next  = gfs2_glock_seq_next,
2114         .stop  = gfs2_glock_seq_stop,
2115         .show  = gfs2_glock_seq_show,
2116 };
2117
2118 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2119 {
2120         struct seq_file *seq;
2121         int ret;
2122
2123         ret = seq_open(file, &gfs2_glock_seq_ops);
2124         if (ret)
2125                 return ret;
2126
2127         seq = file->private_data;
2128         seq->private = inode->i_private;
2129
2130         return 0;
2131 }
2132
2133 static const struct file_operations gfs2_debug_fops = {
2134         .owner   = THIS_MODULE,
2135         .open    = gfs2_debugfs_open,
2136         .read    = seq_read,
2137         .llseek  = seq_lseek,
2138         .release = seq_release
2139 };
2140
2141 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2142 {
2143         sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2144         if (!sdp->debugfs_dir)
2145                 return -ENOMEM;
2146         sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2147                                                          S_IFREG | S_IRUGO,
2148                                                          sdp->debugfs_dir, sdp,
2149                                                          &gfs2_debug_fops);
2150         if (!sdp->debugfs_dentry_glocks)
2151                 return -ENOMEM;
2152
2153         return 0;
2154 }
2155
2156 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2157 {
2158         if (sdp && sdp->debugfs_dir) {
2159                 if (sdp->debugfs_dentry_glocks) {
2160                         debugfs_remove(sdp->debugfs_dentry_glocks);
2161                         sdp->debugfs_dentry_glocks = NULL;
2162                 }
2163                 debugfs_remove(sdp->debugfs_dir);
2164                 sdp->debugfs_dir = NULL;
2165         }
2166 }
2167
2168 int gfs2_register_debugfs(void)
2169 {
2170         gfs2_root = debugfs_create_dir("gfs2", NULL);
2171         return gfs2_root ? 0 : -ENOMEM;
2172 }
2173
2174 void gfs2_unregister_debugfs(void)
2175 {
2176         debugfs_remove(gfs2_root);
2177         gfs2_root = NULL;
2178 }