[GFS2] Clean up duplicate includes in fs/gfs2/
[cascardo/linux.git] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/rwsem.h>
25 #include <asm/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28
29 #include "gfs2.h"
30 #include "incore.h"
31 #include "glock.h"
32 #include "glops.h"
33 #include "inode.h"
34 #include "lm.h"
35 #include "lops.h"
36 #include "meta_io.h"
37 #include "quota.h"
38 #include "super.h"
39 #include "util.h"
40
41 struct gfs2_gl_hash_bucket {
42         struct hlist_head hb_list;
43 };
44
45 struct glock_iter {
46         int hash;                     /* hash bucket index         */
47         struct gfs2_sbd *sdp;         /* incore superblock         */
48         struct gfs2_glock *gl;        /* current glock struct      */
49         struct hlist_head *hb_list;   /* current hash bucket ptr   */
50         struct seq_file *seq;         /* sequence file for debugfs */
51         char string[512];             /* scratch space             */
52 };
53
54 typedef void (*glock_examiner) (struct gfs2_glock * gl);
55
56 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
57 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
58 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
59 static void gfs2_glock_drop_th(struct gfs2_glock *gl);
60 static DECLARE_RWSEM(gfs2_umount_flush_sem);
61 static struct dentry *gfs2_root;
62
63 #define GFS2_GL_HASH_SHIFT      15
64 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
65 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
66
67 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
68 static struct dentry *gfs2_root;
69
70 /*
71  * Despite what you might think, the numbers below are not arbitrary :-)
72  * They are taken from the ipv4 routing hash code, which is well tested
73  * and thus should be nearly optimal. Later on we might tweek the numbers
74  * but for now this should be fine.
75  *
76  * The reason for putting the locks in a separate array from the list heads
77  * is that we can have fewer locks than list heads and save memory. We use
78  * the same hash function for both, but with a different hash mask.
79  */
80 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
81         defined(CONFIG_PROVE_LOCKING)
82
83 #ifdef CONFIG_LOCKDEP
84 # define GL_HASH_LOCK_SZ        256
85 #else
86 # if NR_CPUS >= 32
87 #  define GL_HASH_LOCK_SZ       4096
88 # elif NR_CPUS >= 16
89 #  define GL_HASH_LOCK_SZ       2048
90 # elif NR_CPUS >= 8
91 #  define GL_HASH_LOCK_SZ       1024
92 # elif NR_CPUS >= 4
93 #  define GL_HASH_LOCK_SZ       512
94 # else
95 #  define GL_HASH_LOCK_SZ       256
96 # endif
97 #endif
98
99 /* We never want more locks than chains */
100 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
101 # undef GL_HASH_LOCK_SZ
102 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
103 #endif
104
105 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
106
107 static inline rwlock_t *gl_lock_addr(unsigned int x)
108 {
109         return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
110 }
111 #else /* not SMP, so no spinlocks required */
112 static inline rwlock_t *gl_lock_addr(unsigned int x)
113 {
114         return NULL;
115 }
116 #endif
117
118 /**
119  * relaxed_state_ok - is a requested lock compatible with the current lock mode?
120  * @actual: the current state of the lock
121  * @requested: the lock state that was requested by the caller
122  * @flags: the modifier flags passed in by the caller
123  *
124  * Returns: 1 if the locks are compatible, 0 otherwise
125  */
126
127 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
128                                    int flags)
129 {
130         if (actual == requested)
131                 return 1;
132
133         if (flags & GL_EXACT)
134                 return 0;
135
136         if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
137                 return 1;
138
139         if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
140                 return 1;
141
142         return 0;
143 }
144
145 /**
146  * gl_hash() - Turn glock number into hash bucket number
147  * @lock: The glock number
148  *
149  * Returns: The number of the corresponding hash bucket
150  */
151
152 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
153                             const struct lm_lockname *name)
154 {
155         unsigned int h;
156
157         h = jhash(&name->ln_number, sizeof(u64), 0);
158         h = jhash(&name->ln_type, sizeof(unsigned int), h);
159         h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
160         h &= GFS2_GL_HASH_MASK;
161
162         return h;
163 }
164
165 /**
166  * glock_free() - Perform a few checks and then release struct gfs2_glock
167  * @gl: The glock to release
168  *
169  * Also calls lock module to release its internal structure for this glock.
170  *
171  */
172
173 static void glock_free(struct gfs2_glock *gl)
174 {
175         struct gfs2_sbd *sdp = gl->gl_sbd;
176         struct inode *aspace = gl->gl_aspace;
177
178         gfs2_lm_put_lock(sdp, gl->gl_lock);
179
180         if (aspace)
181                 gfs2_aspace_put(aspace);
182
183         kmem_cache_free(gfs2_glock_cachep, gl);
184 }
185
186 /**
187  * gfs2_glock_hold() - increment reference count on glock
188  * @gl: The glock to hold
189  *
190  */
191
192 void gfs2_glock_hold(struct gfs2_glock *gl)
193 {
194         atomic_inc(&gl->gl_ref);
195 }
196
197 /**
198  * gfs2_glock_put() - Decrement reference count on glock
199  * @gl: The glock to put
200  *
201  */
202
203 int gfs2_glock_put(struct gfs2_glock *gl)
204 {
205         int rv = 0;
206         struct gfs2_sbd *sdp = gl->gl_sbd;
207
208         write_lock(gl_lock_addr(gl->gl_hash));
209         if (atomic_dec_and_test(&gl->gl_ref)) {
210                 hlist_del(&gl->gl_list);
211                 write_unlock(gl_lock_addr(gl->gl_hash));
212                 BUG_ON(spin_is_locked(&gl->gl_spin));
213                 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
214                 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
215                 gfs2_assert(sdp, list_empty(&gl->gl_holders));
216                 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
217                 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
218                 glock_free(gl);
219                 rv = 1;
220                 goto out;
221         }
222         write_unlock(gl_lock_addr(gl->gl_hash));
223 out:
224         return rv;
225 }
226
227 /**
228  * search_bucket() - Find struct gfs2_glock by lock number
229  * @bucket: the bucket to search
230  * @name: The lock name
231  *
232  * Returns: NULL, or the struct gfs2_glock with the requested number
233  */
234
235 static struct gfs2_glock *search_bucket(unsigned int hash,
236                                         const struct gfs2_sbd *sdp,
237                                         const struct lm_lockname *name)
238 {
239         struct gfs2_glock *gl;
240         struct hlist_node *h;
241
242         hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
243                 if (!lm_name_equal(&gl->gl_name, name))
244                         continue;
245                 if (gl->gl_sbd != sdp)
246                         continue;
247
248                 atomic_inc(&gl->gl_ref);
249
250                 return gl;
251         }
252
253         return NULL;
254 }
255
256 /**
257  * gfs2_glock_find() - Find glock by lock number
258  * @sdp: The GFS2 superblock
259  * @name: The lock name
260  *
261  * Returns: NULL, or the struct gfs2_glock with the requested number
262  */
263
264 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
265                                           const struct lm_lockname *name)
266 {
267         unsigned int hash = gl_hash(sdp, name);
268         struct gfs2_glock *gl;
269
270         read_lock(gl_lock_addr(hash));
271         gl = search_bucket(hash, sdp, name);
272         read_unlock(gl_lock_addr(hash));
273
274         return gl;
275 }
276
277 /**
278  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
279  * @sdp: The GFS2 superblock
280  * @number: the lock number
281  * @glops: The glock_operations to use
282  * @create: If 0, don't create the glock if it doesn't exist
283  * @glp: the glock is returned here
284  *
285  * This does not lock a glock, just finds/creates structures for one.
286  *
287  * Returns: errno
288  */
289
290 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
291                    const struct gfs2_glock_operations *glops, int create,
292                    struct gfs2_glock **glp)
293 {
294         struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
295         struct gfs2_glock *gl, *tmp;
296         unsigned int hash = gl_hash(sdp, &name);
297         int error;
298
299         read_lock(gl_lock_addr(hash));
300         gl = search_bucket(hash, sdp, &name);
301         read_unlock(gl_lock_addr(hash));
302
303         if (gl || !create) {
304                 *glp = gl;
305                 return 0;
306         }
307
308         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
309         if (!gl)
310                 return -ENOMEM;
311
312         gl->gl_flags = 0;
313         gl->gl_name = name;
314         atomic_set(&gl->gl_ref, 1);
315         gl->gl_state = LM_ST_UNLOCKED;
316         gl->gl_hash = hash;
317         gl->gl_owner_pid = 0;
318         gl->gl_ip = 0;
319         gl->gl_ops = glops;
320         gl->gl_req_gh = NULL;
321         gl->gl_req_bh = NULL;
322         gl->gl_vn = 0;
323         gl->gl_stamp = jiffies;
324         gl->gl_object = NULL;
325         gl->gl_sbd = sdp;
326         gl->gl_aspace = NULL;
327         lops_init_le(&gl->gl_le, &gfs2_glock_lops);
328
329         /* If this glock protects actual on-disk data or metadata blocks,
330            create a VFS inode to manage the pages/buffers holding them. */
331         if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
332                 gl->gl_aspace = gfs2_aspace_get(sdp);
333                 if (!gl->gl_aspace) {
334                         error = -ENOMEM;
335                         goto fail;
336                 }
337         }
338
339         error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
340         if (error)
341                 goto fail_aspace;
342
343         write_lock(gl_lock_addr(hash));
344         tmp = search_bucket(hash, sdp, &name);
345         if (tmp) {
346                 write_unlock(gl_lock_addr(hash));
347                 glock_free(gl);
348                 gl = tmp;
349         } else {
350                 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
351                 write_unlock(gl_lock_addr(hash));
352         }
353
354         *glp = gl;
355
356         return 0;
357
358 fail_aspace:
359         if (gl->gl_aspace)
360                 gfs2_aspace_put(gl->gl_aspace);
361 fail:
362         kmem_cache_free(gfs2_glock_cachep, gl);
363         return error;
364 }
365
366 /**
367  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
368  * @gl: the glock
369  * @state: the state we're requesting
370  * @flags: the modifier flags
371  * @gh: the holder structure
372  *
373  */
374
375 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
376                       struct gfs2_holder *gh)
377 {
378         INIT_LIST_HEAD(&gh->gh_list);
379         gh->gh_gl = gl;
380         gh->gh_ip = (unsigned long)__builtin_return_address(0);
381         gh->gh_owner_pid = current->pid;
382         gh->gh_state = state;
383         gh->gh_flags = flags;
384         gh->gh_error = 0;
385         gh->gh_iflags = 0;
386         gfs2_glock_hold(gl);
387 }
388
389 /**
390  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
391  * @state: the state we're requesting
392  * @flags: the modifier flags
393  * @gh: the holder structure
394  *
395  * Don't mess with the glock.
396  *
397  */
398
399 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
400 {
401         gh->gh_state = state;
402         gh->gh_flags = flags;
403         gh->gh_iflags = 0;
404         gh->gh_ip = (unsigned long)__builtin_return_address(0);
405 }
406
407 /**
408  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
409  * @gh: the holder structure
410  *
411  */
412
413 void gfs2_holder_uninit(struct gfs2_holder *gh)
414 {
415         gfs2_glock_put(gh->gh_gl);
416         gh->gh_gl = NULL;
417         gh->gh_ip = 0;
418 }
419
420 static void gfs2_holder_wake(struct gfs2_holder *gh)
421 {
422         clear_bit(HIF_WAIT, &gh->gh_iflags);
423         smp_mb__after_clear_bit();
424         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
425 }
426
427 static int just_schedule(void *word)
428 {
429         schedule();
430         return 0;
431 }
432
433 static void wait_on_holder(struct gfs2_holder *gh)
434 {
435         might_sleep();
436         wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
437 }
438
439 static void gfs2_demote_wake(struct gfs2_glock *gl)
440 {
441         clear_bit(GLF_DEMOTE, &gl->gl_flags);
442         smp_mb__after_clear_bit();
443         wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
444 }
445
446 static void wait_on_demote(struct gfs2_glock *gl)
447 {
448         might_sleep();
449         wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
450 }
451
452 /**
453  * rq_mutex - process a mutex request in the queue
454  * @gh: the glock holder
455  *
456  * Returns: 1 if the queue is blocked
457  */
458
459 static int rq_mutex(struct gfs2_holder *gh)
460 {
461         struct gfs2_glock *gl = gh->gh_gl;
462
463         list_del_init(&gh->gh_list);
464         /*  gh->gh_error never examined.  */
465         set_bit(GLF_LOCK, &gl->gl_flags);
466         clear_bit(HIF_WAIT, &gh->gh_iflags);
467         smp_mb();
468         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
469
470         return 1;
471 }
472
473 /**
474  * rq_promote - process a promote request in the queue
475  * @gh: the glock holder
476  *
477  * Acquire a new inter-node lock, or change a lock state to more restrictive.
478  *
479  * Returns: 1 if the queue is blocked
480  */
481
482 static int rq_promote(struct gfs2_holder *gh)
483 {
484         struct gfs2_glock *gl = gh->gh_gl;
485         struct gfs2_sbd *sdp = gl->gl_sbd;
486
487         if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
488                 if (list_empty(&gl->gl_holders)) {
489                         gl->gl_req_gh = gh;
490                         set_bit(GLF_LOCK, &gl->gl_flags);
491                         spin_unlock(&gl->gl_spin);
492
493                         if (atomic_read(&sdp->sd_reclaim_count) >
494                             gfs2_tune_get(sdp, gt_reclaim_limit) &&
495                             !(gh->gh_flags & LM_FLAG_PRIORITY)) {
496                                 gfs2_reclaim_glock(sdp);
497                                 gfs2_reclaim_glock(sdp);
498                         }
499
500                         gfs2_glock_xmote_th(gh->gh_gl, gh);
501                         spin_lock(&gl->gl_spin);
502                 }
503                 return 1;
504         }
505
506         if (list_empty(&gl->gl_holders)) {
507                 set_bit(HIF_FIRST, &gh->gh_iflags);
508                 set_bit(GLF_LOCK, &gl->gl_flags);
509         } else {
510                 struct gfs2_holder *next_gh;
511                 if (gh->gh_state == LM_ST_EXCLUSIVE)
512                         return 1;
513                 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
514                                      gh_list);
515                 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
516                          return 1;
517         }
518
519         list_move_tail(&gh->gh_list, &gl->gl_holders);
520         gh->gh_error = 0;
521         set_bit(HIF_HOLDER, &gh->gh_iflags);
522
523         gfs2_holder_wake(gh);
524
525         return 0;
526 }
527
528 /**
529  * rq_demote - process a demote request in the queue
530  * @gh: the glock holder
531  *
532  * Returns: 1 if the queue is blocked
533  */
534
535 static int rq_demote(struct gfs2_glock *gl)
536 {
537         if (!list_empty(&gl->gl_holders))
538                 return 1;
539
540         if (gl->gl_state == gl->gl_demote_state ||
541             gl->gl_state == LM_ST_UNLOCKED) {
542                 gfs2_demote_wake(gl);
543                 return 0;
544         }
545         set_bit(GLF_LOCK, &gl->gl_flags);
546         if (gl->gl_demote_state == LM_ST_UNLOCKED ||
547             gl->gl_state != LM_ST_EXCLUSIVE) {
548                 spin_unlock(&gl->gl_spin);
549                 gfs2_glock_drop_th(gl);
550         } else {
551                 spin_unlock(&gl->gl_spin);
552                 gfs2_glock_xmote_th(gl, NULL);
553         }
554         spin_lock(&gl->gl_spin);
555
556         return 0;
557 }
558
559 /**
560  * run_queue - process holder structures on a glock
561  * @gl: the glock
562  *
563  */
564 static void run_queue(struct gfs2_glock *gl)
565 {
566         struct gfs2_holder *gh;
567         int blocked = 1;
568
569         for (;;) {
570                 if (test_bit(GLF_LOCK, &gl->gl_flags))
571                         break;
572
573                 if (!list_empty(&gl->gl_waiters1)) {
574                         gh = list_entry(gl->gl_waiters1.next,
575                                         struct gfs2_holder, gh_list);
576
577                         if (test_bit(HIF_MUTEX, &gh->gh_iflags))
578                                 blocked = rq_mutex(gh);
579                         else
580                                 gfs2_assert_warn(gl->gl_sbd, 0);
581
582                 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
583                         blocked = rq_demote(gl);
584                 } else if (!list_empty(&gl->gl_waiters3)) {
585                         gh = list_entry(gl->gl_waiters3.next,
586                                         struct gfs2_holder, gh_list);
587
588                         if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
589                                 blocked = rq_promote(gh);
590                         else
591                                 gfs2_assert_warn(gl->gl_sbd, 0);
592
593                 } else
594                         break;
595
596                 if (blocked)
597                         break;
598         }
599 }
600
601 /**
602  * gfs2_glmutex_lock - acquire a local lock on a glock
603  * @gl: the glock
604  *
605  * Gives caller exclusive access to manipulate a glock structure.
606  */
607
608 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
609 {
610         struct gfs2_holder gh;
611
612         gfs2_holder_init(gl, 0, 0, &gh);
613         set_bit(HIF_MUTEX, &gh.gh_iflags);
614         if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
615                 BUG();
616
617         spin_lock(&gl->gl_spin);
618         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
619                 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
620         } else {
621                 gl->gl_owner_pid = current->pid;
622                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
623                 clear_bit(HIF_WAIT, &gh.gh_iflags);
624                 smp_mb();
625                 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
626         }
627         spin_unlock(&gl->gl_spin);
628
629         wait_on_holder(&gh);
630         gfs2_holder_uninit(&gh);
631 }
632
633 /**
634  * gfs2_glmutex_trylock - try to acquire a local lock on a glock
635  * @gl: the glock
636  *
637  * Returns: 1 if the glock is acquired
638  */
639
640 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
641 {
642         int acquired = 1;
643
644         spin_lock(&gl->gl_spin);
645         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
646                 acquired = 0;
647         } else {
648                 gl->gl_owner_pid = current->pid;
649                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
650         }
651         spin_unlock(&gl->gl_spin);
652
653         return acquired;
654 }
655
656 /**
657  * gfs2_glmutex_unlock - release a local lock on a glock
658  * @gl: the glock
659  *
660  */
661
662 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
663 {
664         spin_lock(&gl->gl_spin);
665         clear_bit(GLF_LOCK, &gl->gl_flags);
666         gl->gl_owner_pid = 0;
667         gl->gl_ip = 0;
668         run_queue(gl);
669         BUG_ON(!spin_is_locked(&gl->gl_spin));
670         spin_unlock(&gl->gl_spin);
671 }
672
673 /**
674  * handle_callback - process a demote request
675  * @gl: the glock
676  * @state: the state the caller wants us to change to
677  *
678  * There are only two requests that we are going to see in actual
679  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
680  */
681
682 static void handle_callback(struct gfs2_glock *gl, unsigned int state, int remote)
683 {
684         spin_lock(&gl->gl_spin);
685         if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) {
686                 gl->gl_demote_state = state;
687                 gl->gl_demote_time = jiffies;
688                 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
689                     gl->gl_object) {
690                         struct inode *inode = igrab(gl->gl_object);
691                         spin_unlock(&gl->gl_spin);
692                         if (inode) {
693                                 d_prune_aliases(inode);
694                                 iput(inode);
695                         }
696                         return;
697                 }
698         } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
699                         gl->gl_demote_state != state) {
700                 gl->gl_demote_state = LM_ST_UNLOCKED;
701         }
702         spin_unlock(&gl->gl_spin);
703 }
704
705 /**
706  * state_change - record that the glock is now in a different state
707  * @gl: the glock
708  * @new_state the new state
709  *
710  */
711
712 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
713 {
714         int held1, held2;
715
716         held1 = (gl->gl_state != LM_ST_UNLOCKED);
717         held2 = (new_state != LM_ST_UNLOCKED);
718
719         if (held1 != held2) {
720                 if (held2)
721                         gfs2_glock_hold(gl);
722                 else
723                         gfs2_glock_put(gl);
724         }
725
726         gl->gl_state = new_state;
727 }
728
729 /**
730  * xmote_bh - Called after the lock module is done acquiring a lock
731  * @gl: The glock in question
732  * @ret: the int returned from the lock module
733  *
734  */
735
736 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
737 {
738         struct gfs2_sbd *sdp = gl->gl_sbd;
739         const struct gfs2_glock_operations *glops = gl->gl_ops;
740         struct gfs2_holder *gh = gl->gl_req_gh;
741         int prev_state = gl->gl_state;
742         int op_done = 1;
743
744         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
745         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
746         gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
747
748         state_change(gl, ret & LM_OUT_ST_MASK);
749
750         if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
751                 if (glops->go_inval)
752                         glops->go_inval(gl, DIO_METADATA);
753         } else if (gl->gl_state == LM_ST_DEFERRED) {
754                 /* We might not want to do this here.
755                    Look at moving to the inode glops. */
756                 if (glops->go_inval)
757                         glops->go_inval(gl, 0);
758         }
759
760         /*  Deal with each possible exit condition  */
761
762         if (!gh) {
763                 gl->gl_stamp = jiffies;
764                 if (ret & LM_OUT_CANCELED) {
765                         op_done = 0;
766                 } else {
767                         spin_lock(&gl->gl_spin);
768                         if (gl->gl_state != gl->gl_demote_state) {
769                                 gl->gl_req_bh = NULL;
770                                 spin_unlock(&gl->gl_spin);
771                                 gfs2_glock_drop_th(gl);
772                                 gfs2_glock_put(gl);
773                                 return;
774                         }
775                         gfs2_demote_wake(gl);
776                         spin_unlock(&gl->gl_spin);
777                 }
778         } else {
779                 spin_lock(&gl->gl_spin);
780                 list_del_init(&gh->gh_list);
781                 gh->gh_error = -EIO;
782                 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 
783                         goto out;
784                 gh->gh_error = GLR_CANCELED;
785                 if (ret & LM_OUT_CANCELED) 
786                         goto out;
787                 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
788                         list_add_tail(&gh->gh_list, &gl->gl_holders);
789                         gh->gh_error = 0;
790                         set_bit(HIF_HOLDER, &gh->gh_iflags);
791                         set_bit(HIF_FIRST, &gh->gh_iflags);
792                         op_done = 0;
793                         goto out;
794                 }
795                 gh->gh_error = GLR_TRYFAILED;
796                 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
797                         goto out;
798                 gh->gh_error = -EINVAL;
799                 if (gfs2_assert_withdraw(sdp, 0) == -1)
800                         fs_err(sdp, "ret = 0x%.8X\n", ret);
801 out:
802                 spin_unlock(&gl->gl_spin);
803         }
804
805         if (glops->go_xmote_bh)
806                 glops->go_xmote_bh(gl);
807
808         if (op_done) {
809                 spin_lock(&gl->gl_spin);
810                 gl->gl_req_gh = NULL;
811                 gl->gl_req_bh = NULL;
812                 clear_bit(GLF_LOCK, &gl->gl_flags);
813                 run_queue(gl);
814                 spin_unlock(&gl->gl_spin);
815         }
816
817         gfs2_glock_put(gl);
818
819         if (gh)
820                 gfs2_holder_wake(gh);
821 }
822
823 /**
824  * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
825  * @gl: The glock in question
826  * @state: the requested state
827  * @flags: modifier flags to the lock call
828  *
829  */
830
831 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
832 {
833         struct gfs2_sbd *sdp = gl->gl_sbd;
834         int flags = gh ? gh->gh_flags : 0;
835         unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
836         const struct gfs2_glock_operations *glops = gl->gl_ops;
837         int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
838                                  LM_FLAG_NOEXP | LM_FLAG_ANY |
839                                  LM_FLAG_PRIORITY);
840         unsigned int lck_ret;
841
842         if (glops->go_xmote_th)
843                 glops->go_xmote_th(gl);
844
845         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
846         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
847         gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
848         gfs2_assert_warn(sdp, state != gl->gl_state);
849
850         gfs2_glock_hold(gl);
851         gl->gl_req_bh = xmote_bh;
852
853         lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
854
855         if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
856                 return;
857
858         if (lck_ret & LM_OUT_ASYNC)
859                 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
860         else
861                 xmote_bh(gl, lck_ret);
862 }
863
864 /**
865  * drop_bh - Called after a lock module unlock completes
866  * @gl: the glock
867  * @ret: the return status
868  *
869  * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
870  * Doesn't drop the reference on the glock the top half took out
871  *
872  */
873
874 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
875 {
876         struct gfs2_sbd *sdp = gl->gl_sbd;
877         const struct gfs2_glock_operations *glops = gl->gl_ops;
878         struct gfs2_holder *gh = gl->gl_req_gh;
879
880         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
881         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
882         gfs2_assert_warn(sdp, !ret);
883
884         state_change(gl, LM_ST_UNLOCKED);
885         gfs2_demote_wake(gl);
886
887         if (glops->go_inval)
888                 glops->go_inval(gl, DIO_METADATA);
889
890         if (gh) {
891                 spin_lock(&gl->gl_spin);
892                 list_del_init(&gh->gh_list);
893                 gh->gh_error = 0;
894                 spin_unlock(&gl->gl_spin);
895         }
896
897         spin_lock(&gl->gl_spin);
898         gl->gl_req_gh = NULL;
899         gl->gl_req_bh = NULL;
900         clear_bit(GLF_LOCK, &gl->gl_flags);
901         run_queue(gl);
902         spin_unlock(&gl->gl_spin);
903
904         gfs2_glock_put(gl);
905
906         if (gh)
907                 gfs2_holder_wake(gh);
908 }
909
910 /**
911  * gfs2_glock_drop_th - call into the lock module to unlock a lock
912  * @gl: the glock
913  *
914  */
915
916 static void gfs2_glock_drop_th(struct gfs2_glock *gl)
917 {
918         struct gfs2_sbd *sdp = gl->gl_sbd;
919         const struct gfs2_glock_operations *glops = gl->gl_ops;
920         unsigned int ret;
921
922         if (glops->go_drop_th)
923                 glops->go_drop_th(gl);
924
925         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
926         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
927         gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
928
929         gfs2_glock_hold(gl);
930         gl->gl_req_bh = drop_bh;
931
932         ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
933
934         if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
935                 return;
936
937         if (!ret)
938                 drop_bh(gl, ret);
939         else
940                 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
941 }
942
943 /**
944  * do_cancels - cancel requests for locks stuck waiting on an expire flag
945  * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
946  *
947  * Don't cancel GL_NOCANCEL requests.
948  */
949
950 static void do_cancels(struct gfs2_holder *gh)
951 {
952         struct gfs2_glock *gl = gh->gh_gl;
953
954         spin_lock(&gl->gl_spin);
955
956         while (gl->gl_req_gh != gh &&
957                !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
958                !list_empty(&gh->gh_list)) {
959                 if (gl->gl_req_bh && !(gl->gl_req_gh &&
960                                      (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
961                         spin_unlock(&gl->gl_spin);
962                         gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
963                         msleep(100);
964                         spin_lock(&gl->gl_spin);
965                 } else {
966                         spin_unlock(&gl->gl_spin);
967                         msleep(100);
968                         spin_lock(&gl->gl_spin);
969                 }
970         }
971
972         spin_unlock(&gl->gl_spin);
973 }
974
975 /**
976  * glock_wait_internal - wait on a glock acquisition
977  * @gh: the glock holder
978  *
979  * Returns: 0 on success
980  */
981
982 static int glock_wait_internal(struct gfs2_holder *gh)
983 {
984         struct gfs2_glock *gl = gh->gh_gl;
985         struct gfs2_sbd *sdp = gl->gl_sbd;
986         const struct gfs2_glock_operations *glops = gl->gl_ops;
987
988         if (test_bit(HIF_ABORTED, &gh->gh_iflags))
989                 return -EIO;
990
991         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
992                 spin_lock(&gl->gl_spin);
993                 if (gl->gl_req_gh != gh &&
994                     !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
995                     !list_empty(&gh->gh_list)) {
996                         list_del_init(&gh->gh_list);
997                         gh->gh_error = GLR_TRYFAILED;
998                         run_queue(gl);
999                         spin_unlock(&gl->gl_spin);
1000                         return gh->gh_error;
1001                 }
1002                 spin_unlock(&gl->gl_spin);
1003         }
1004
1005         if (gh->gh_flags & LM_FLAG_PRIORITY)
1006                 do_cancels(gh);
1007
1008         wait_on_holder(gh);
1009         if (gh->gh_error)
1010                 return gh->gh_error;
1011
1012         gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1013         gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1014                                                    gh->gh_flags));
1015
1016         if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1017                 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1018
1019                 if (glops->go_lock) {
1020                         gh->gh_error = glops->go_lock(gh);
1021                         if (gh->gh_error) {
1022                                 spin_lock(&gl->gl_spin);
1023                                 list_del_init(&gh->gh_list);
1024                                 spin_unlock(&gl->gl_spin);
1025                         }
1026                 }
1027
1028                 spin_lock(&gl->gl_spin);
1029                 gl->gl_req_gh = NULL;
1030                 gl->gl_req_bh = NULL;
1031                 clear_bit(GLF_LOCK, &gl->gl_flags);
1032                 run_queue(gl);
1033                 spin_unlock(&gl->gl_spin);
1034         }
1035
1036         return gh->gh_error;
1037 }
1038
1039 static inline struct gfs2_holder *
1040 find_holder_by_owner(struct list_head *head, pid_t pid)
1041 {
1042         struct gfs2_holder *gh;
1043
1044         list_for_each_entry(gh, head, gh_list) {
1045                 if (gh->gh_owner_pid == pid)
1046                         return gh;
1047         }
1048
1049         return NULL;
1050 }
1051
1052 static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1053 {
1054         va_list args;
1055
1056         va_start(args, fmt);
1057         if (gi) {
1058                 vsprintf(gi->string, fmt, args);
1059                 seq_printf(gi->seq, gi->string);
1060         }
1061         else
1062                 vprintk(fmt, args);
1063         va_end(args);
1064 }
1065
1066 /**
1067  * add_to_queue - Add a holder to the wait queue (but look for recursion)
1068  * @gh: the holder structure to add
1069  *
1070  */
1071
1072 static void add_to_queue(struct gfs2_holder *gh)
1073 {
1074         struct gfs2_glock *gl = gh->gh_gl;
1075         struct gfs2_holder *existing;
1076
1077         BUG_ON(!gh->gh_owner_pid);
1078         if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1079                 BUG();
1080
1081         existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner_pid);
1082         if (existing) {
1083                 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1084                 printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
1085                 printk(KERN_INFO "lock type : %d lock state : %d\n",
1086                                 existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
1087                 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1088                 printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
1089                 printk(KERN_INFO "lock type : %d lock state : %d\n",
1090                                 gl->gl_name.ln_type, gl->gl_state);
1091                 BUG();
1092         }
1093
1094         existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner_pid);
1095         if (existing) {
1096                 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1097                 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1098                 BUG();
1099         }
1100
1101         if (gh->gh_flags & LM_FLAG_PRIORITY)
1102                 list_add(&gh->gh_list, &gl->gl_waiters3);
1103         else
1104                 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1105 }
1106
1107 /**
1108  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1109  * @gh: the holder structure
1110  *
1111  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1112  *
1113  * Returns: 0, GLR_TRYFAILED, or errno on failure
1114  */
1115
1116 int gfs2_glock_nq(struct gfs2_holder *gh)
1117 {
1118         struct gfs2_glock *gl = gh->gh_gl;
1119         struct gfs2_sbd *sdp = gl->gl_sbd;
1120         int error = 0;
1121
1122 restart:
1123         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1124                 set_bit(HIF_ABORTED, &gh->gh_iflags);
1125                 return -EIO;
1126         }
1127
1128         set_bit(HIF_PROMOTE, &gh->gh_iflags);
1129
1130         spin_lock(&gl->gl_spin);
1131         add_to_queue(gh);
1132         run_queue(gl);
1133         spin_unlock(&gl->gl_spin);
1134
1135         if (!(gh->gh_flags & GL_ASYNC)) {
1136                 error = glock_wait_internal(gh);
1137                 if (error == GLR_CANCELED) {
1138                         msleep(100);
1139                         goto restart;
1140                 }
1141         }
1142
1143         return error;
1144 }
1145
1146 /**
1147  * gfs2_glock_poll - poll to see if an async request has been completed
1148  * @gh: the holder
1149  *
1150  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1151  */
1152
1153 int gfs2_glock_poll(struct gfs2_holder *gh)
1154 {
1155         struct gfs2_glock *gl = gh->gh_gl;
1156         int ready = 0;
1157
1158         spin_lock(&gl->gl_spin);
1159
1160         if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1161                 ready = 1;
1162         else if (list_empty(&gh->gh_list)) {
1163                 if (gh->gh_error == GLR_CANCELED) {
1164                         spin_unlock(&gl->gl_spin);
1165                         msleep(100);
1166                         if (gfs2_glock_nq(gh))
1167                                 return 1;
1168                         return 0;
1169                 } else
1170                         ready = 1;
1171         }
1172
1173         spin_unlock(&gl->gl_spin);
1174
1175         return ready;
1176 }
1177
1178 /**
1179  * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1180  * @gh: the holder structure
1181  *
1182  * Returns: 0, GLR_TRYFAILED, or errno on failure
1183  */
1184
1185 int gfs2_glock_wait(struct gfs2_holder *gh)
1186 {
1187         int error;
1188
1189         error = glock_wait_internal(gh);
1190         if (error == GLR_CANCELED) {
1191                 msleep(100);
1192                 gh->gh_flags &= ~GL_ASYNC;
1193                 error = gfs2_glock_nq(gh);
1194         }
1195
1196         return error;
1197 }
1198
1199 /**
1200  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1201  * @gh: the glock holder
1202  *
1203  */
1204
1205 void gfs2_glock_dq(struct gfs2_holder *gh)
1206 {
1207         struct gfs2_glock *gl = gh->gh_gl;
1208         const struct gfs2_glock_operations *glops = gl->gl_ops;
1209
1210         if (gh->gh_flags & GL_NOCACHE)
1211                 handle_callback(gl, LM_ST_UNLOCKED, 0);
1212
1213         gfs2_glmutex_lock(gl);
1214
1215         spin_lock(&gl->gl_spin);
1216         list_del_init(&gh->gh_list);
1217
1218         if (list_empty(&gl->gl_holders)) {
1219                 spin_unlock(&gl->gl_spin);
1220
1221                 if (glops->go_unlock)
1222                         glops->go_unlock(gh);
1223
1224                 spin_lock(&gl->gl_spin);
1225                 gl->gl_stamp = jiffies;
1226         }
1227
1228         clear_bit(GLF_LOCK, &gl->gl_flags);
1229         run_queue(gl);
1230         spin_unlock(&gl->gl_spin);
1231 }
1232
1233 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1234 {
1235         struct gfs2_glock *gl = gh->gh_gl;
1236         gfs2_glock_dq(gh);
1237         wait_on_demote(gl);
1238 }
1239
1240 /**
1241  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1242  * @gh: the holder structure
1243  *
1244  */
1245
1246 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1247 {
1248         gfs2_glock_dq(gh);
1249         gfs2_holder_uninit(gh);
1250 }
1251
1252 /**
1253  * gfs2_glock_nq_num - acquire a glock based on lock number
1254  * @sdp: the filesystem
1255  * @number: the lock number
1256  * @glops: the glock operations for the type of glock
1257  * @state: the state to acquire the glock in
1258  * @flags: modifier flags for the aquisition
1259  * @gh: the struct gfs2_holder
1260  *
1261  * Returns: errno
1262  */
1263
1264 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1265                       const struct gfs2_glock_operations *glops,
1266                       unsigned int state, int flags, struct gfs2_holder *gh)
1267 {
1268         struct gfs2_glock *gl;
1269         int error;
1270
1271         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1272         if (!error) {
1273                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1274                 gfs2_glock_put(gl);
1275         }
1276
1277         return error;
1278 }
1279
1280 /**
1281  * glock_compare - Compare two struct gfs2_glock structures for sorting
1282  * @arg_a: the first structure
1283  * @arg_b: the second structure
1284  *
1285  */
1286
1287 static int glock_compare(const void *arg_a, const void *arg_b)
1288 {
1289         const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1290         const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1291         const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1292         const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1293
1294         if (a->ln_number > b->ln_number)
1295                 return 1;
1296         if (a->ln_number < b->ln_number)
1297                 return -1;
1298         BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1299         return 0;
1300 }
1301
1302 /**
1303  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1304  * @num_gh: the number of structures
1305  * @ghs: an array of struct gfs2_holder structures
1306  *
1307  * Returns: 0 on success (all glocks acquired),
1308  *          errno on failure (no glocks acquired)
1309  */
1310
1311 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1312                      struct gfs2_holder **p)
1313 {
1314         unsigned int x;
1315         int error = 0;
1316
1317         for (x = 0; x < num_gh; x++)
1318                 p[x] = &ghs[x];
1319
1320         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1321
1322         for (x = 0; x < num_gh; x++) {
1323                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1324
1325                 error = gfs2_glock_nq(p[x]);
1326                 if (error) {
1327                         while (x--)
1328                                 gfs2_glock_dq(p[x]);
1329                         break;
1330                 }
1331         }
1332
1333         return error;
1334 }
1335
1336 /**
1337  * gfs2_glock_nq_m - acquire multiple glocks
1338  * @num_gh: the number of structures
1339  * @ghs: an array of struct gfs2_holder structures
1340  *
1341  *
1342  * Returns: 0 on success (all glocks acquired),
1343  *          errno on failure (no glocks acquired)
1344  */
1345
1346 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1347 {
1348         struct gfs2_holder *tmp[4];
1349         struct gfs2_holder **pph = tmp;
1350         int error = 0;
1351
1352         switch(num_gh) {
1353         case 0:
1354                 return 0;
1355         case 1:
1356                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1357                 return gfs2_glock_nq(ghs);
1358         default:
1359                 if (num_gh <= 4)
1360                         break;
1361                 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1362                 if (!pph)
1363                         return -ENOMEM;
1364         }
1365
1366         error = nq_m_sync(num_gh, ghs, pph);
1367
1368         if (pph != tmp)
1369                 kfree(pph);
1370
1371         return error;
1372 }
1373
1374 /**
1375  * gfs2_glock_dq_m - release multiple glocks
1376  * @num_gh: the number of structures
1377  * @ghs: an array of struct gfs2_holder structures
1378  *
1379  */
1380
1381 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1382 {
1383         unsigned int x;
1384
1385         for (x = 0; x < num_gh; x++)
1386                 gfs2_glock_dq(&ghs[x]);
1387 }
1388
1389 /**
1390  * gfs2_glock_dq_uninit_m - release multiple glocks
1391  * @num_gh: the number of structures
1392  * @ghs: an array of struct gfs2_holder structures
1393  *
1394  */
1395
1396 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1397 {
1398         unsigned int x;
1399
1400         for (x = 0; x < num_gh; x++)
1401                 gfs2_glock_dq_uninit(&ghs[x]);
1402 }
1403
1404 /**
1405  * gfs2_lvb_hold - attach a LVB from a glock
1406  * @gl: The glock in question
1407  *
1408  */
1409
1410 int gfs2_lvb_hold(struct gfs2_glock *gl)
1411 {
1412         int error;
1413
1414         gfs2_glmutex_lock(gl);
1415
1416         if (!atomic_read(&gl->gl_lvb_count)) {
1417                 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1418                 if (error) {
1419                         gfs2_glmutex_unlock(gl);
1420                         return error;
1421                 }
1422                 gfs2_glock_hold(gl);
1423         }
1424         atomic_inc(&gl->gl_lvb_count);
1425
1426         gfs2_glmutex_unlock(gl);
1427
1428         return 0;
1429 }
1430
1431 /**
1432  * gfs2_lvb_unhold - detach a LVB from a glock
1433  * @gl: The glock in question
1434  *
1435  */
1436
1437 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1438 {
1439         gfs2_glock_hold(gl);
1440         gfs2_glmutex_lock(gl);
1441
1442         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1443         if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1444                 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1445                 gl->gl_lvb = NULL;
1446                 gfs2_glock_put(gl);
1447         }
1448
1449         gfs2_glmutex_unlock(gl);
1450         gfs2_glock_put(gl);
1451 }
1452
1453 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1454                         unsigned int state)
1455 {
1456         struct gfs2_glock *gl;
1457
1458         gl = gfs2_glock_find(sdp, name);
1459         if (!gl)
1460                 return;
1461
1462         handle_callback(gl, state, 1);
1463
1464         spin_lock(&gl->gl_spin);
1465         run_queue(gl);
1466         spin_unlock(&gl->gl_spin);
1467
1468         gfs2_glock_put(gl);
1469 }
1470
1471 /**
1472  * gfs2_glock_cb - Callback used by locking module
1473  * @sdp: Pointer to the superblock
1474  * @type: Type of callback
1475  * @data: Type dependent data pointer
1476  *
1477  * Called by the locking module when it wants to tell us something.
1478  * Either we need to drop a lock, one of our ASYNC requests completed, or
1479  * a journal from another client needs to be recovered.
1480  */
1481
1482 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1483 {
1484         struct gfs2_sbd *sdp = cb_data;
1485
1486         switch (type) {
1487         case LM_CB_NEED_E:
1488                 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1489                 return;
1490
1491         case LM_CB_NEED_D:
1492                 blocking_cb(sdp, data, LM_ST_DEFERRED);
1493                 return;
1494
1495         case LM_CB_NEED_S:
1496                 blocking_cb(sdp, data, LM_ST_SHARED);
1497                 return;
1498
1499         case LM_CB_ASYNC: {
1500                 struct lm_async_cb *async = data;
1501                 struct gfs2_glock *gl;
1502
1503                 down_read(&gfs2_umount_flush_sem);
1504                 gl = gfs2_glock_find(sdp, &async->lc_name);
1505                 if (gfs2_assert_warn(sdp, gl))
1506                         return;
1507                 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1508                         gl->gl_req_bh(gl, async->lc_ret);
1509                 gfs2_glock_put(gl);
1510                 up_read(&gfs2_umount_flush_sem);
1511                 return;
1512         }
1513
1514         case LM_CB_NEED_RECOVERY:
1515                 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1516                 if (sdp->sd_recoverd_process)
1517                         wake_up_process(sdp->sd_recoverd_process);
1518                 return;
1519
1520         case LM_CB_DROPLOCKS:
1521                 gfs2_gl_hash_clear(sdp, NO_WAIT);
1522                 gfs2_quota_scan(sdp);
1523                 return;
1524
1525         default:
1526                 gfs2_assert_warn(sdp, 0);
1527                 return;
1528         }
1529 }
1530
1531 /**
1532  * demote_ok - Check to see if it's ok to unlock a glock
1533  * @gl: the glock
1534  *
1535  * Returns: 1 if it's ok
1536  */
1537
1538 static int demote_ok(struct gfs2_glock *gl)
1539 {
1540         const struct gfs2_glock_operations *glops = gl->gl_ops;
1541         int demote = 1;
1542
1543         if (test_bit(GLF_STICKY, &gl->gl_flags))
1544                 demote = 0;
1545         else if (glops->go_demote_ok)
1546                 demote = glops->go_demote_ok(gl);
1547
1548         return demote;
1549 }
1550
1551 /**
1552  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1553  * @gl: the glock
1554  *
1555  */
1556
1557 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1558 {
1559         struct gfs2_sbd *sdp = gl->gl_sbd;
1560
1561         spin_lock(&sdp->sd_reclaim_lock);
1562         if (list_empty(&gl->gl_reclaim)) {
1563                 gfs2_glock_hold(gl);
1564                 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1565                 atomic_inc(&sdp->sd_reclaim_count);
1566         }
1567         spin_unlock(&sdp->sd_reclaim_lock);
1568
1569         wake_up(&sdp->sd_reclaim_wq);
1570 }
1571
1572 /**
1573  * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1574  * @sdp: the filesystem
1575  *
1576  * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1577  * different glock and we notice that there are a lot of glocks in the
1578  * reclaim list.
1579  *
1580  */
1581
1582 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1583 {
1584         struct gfs2_glock *gl;
1585
1586         spin_lock(&sdp->sd_reclaim_lock);
1587         if (list_empty(&sdp->sd_reclaim_list)) {
1588                 spin_unlock(&sdp->sd_reclaim_lock);
1589                 return;
1590         }
1591         gl = list_entry(sdp->sd_reclaim_list.next,
1592                         struct gfs2_glock, gl_reclaim);
1593         list_del_init(&gl->gl_reclaim);
1594         spin_unlock(&sdp->sd_reclaim_lock);
1595
1596         atomic_dec(&sdp->sd_reclaim_count);
1597         atomic_inc(&sdp->sd_reclaimed);
1598
1599         if (gfs2_glmutex_trylock(gl)) {
1600                 if (list_empty(&gl->gl_holders) &&
1601                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1602                         handle_callback(gl, LM_ST_UNLOCKED, 0);
1603                 gfs2_glmutex_unlock(gl);
1604         }
1605
1606         gfs2_glock_put(gl);
1607 }
1608
1609 /**
1610  * examine_bucket - Call a function for glock in a hash bucket
1611  * @examiner: the function
1612  * @sdp: the filesystem
1613  * @bucket: the bucket
1614  *
1615  * Returns: 1 if the bucket has entries
1616  */
1617
1618 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1619                           unsigned int hash)
1620 {
1621         struct gfs2_glock *gl, *prev = NULL;
1622         int has_entries = 0;
1623         struct hlist_head *head = &gl_hash_table[hash].hb_list;
1624
1625         read_lock(gl_lock_addr(hash));
1626         /* Can't use hlist_for_each_entry - don't want prefetch here */
1627         if (hlist_empty(head))
1628                 goto out;
1629         gl = list_entry(head->first, struct gfs2_glock, gl_list);
1630         while(1) {
1631                 if (gl->gl_sbd == sdp) {
1632                         gfs2_glock_hold(gl);
1633                         read_unlock(gl_lock_addr(hash));
1634                         if (prev)
1635                                 gfs2_glock_put(prev);
1636                         prev = gl;
1637                         examiner(gl);
1638                         has_entries = 1;
1639                         read_lock(gl_lock_addr(hash));
1640                 }
1641                 if (gl->gl_list.next == NULL)
1642                         break;
1643                 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1644         }
1645 out:
1646         read_unlock(gl_lock_addr(hash));
1647         if (prev)
1648                 gfs2_glock_put(prev);
1649         return has_entries;
1650 }
1651
1652 /**
1653  * scan_glock - look at a glock and see if we can reclaim it
1654  * @gl: the glock to look at
1655  *
1656  */
1657
1658 static void scan_glock(struct gfs2_glock *gl)
1659 {
1660         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1661                 return;
1662
1663         if (gfs2_glmutex_trylock(gl)) {
1664                 if (list_empty(&gl->gl_holders) &&
1665                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1666                         goto out_schedule;
1667                 gfs2_glmutex_unlock(gl);
1668         }
1669         return;
1670
1671 out_schedule:
1672         gfs2_glmutex_unlock(gl);
1673         gfs2_glock_schedule_for_reclaim(gl);
1674 }
1675
1676 /**
1677  * gfs2_scand_internal - Look for glocks and inodes to toss from memory
1678  * @sdp: the filesystem
1679  *
1680  */
1681
1682 void gfs2_scand_internal(struct gfs2_sbd *sdp)
1683 {
1684         unsigned int x;
1685
1686         for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1687                 examine_bucket(scan_glock, sdp, x);
1688 }
1689
1690 /**
1691  * clear_glock - look at a glock and see if we can free it from glock cache
1692  * @gl: the glock to look at
1693  *
1694  */
1695
1696 static void clear_glock(struct gfs2_glock *gl)
1697 {
1698         struct gfs2_sbd *sdp = gl->gl_sbd;
1699         int released;
1700
1701         spin_lock(&sdp->sd_reclaim_lock);
1702         if (!list_empty(&gl->gl_reclaim)) {
1703                 list_del_init(&gl->gl_reclaim);
1704                 atomic_dec(&sdp->sd_reclaim_count);
1705                 spin_unlock(&sdp->sd_reclaim_lock);
1706                 released = gfs2_glock_put(gl);
1707                 gfs2_assert(sdp, !released);
1708         } else {
1709                 spin_unlock(&sdp->sd_reclaim_lock);
1710         }
1711
1712         if (gfs2_glmutex_trylock(gl)) {
1713                 if (list_empty(&gl->gl_holders) &&
1714                     gl->gl_state != LM_ST_UNLOCKED)
1715                         handle_callback(gl, LM_ST_UNLOCKED, 0);
1716                 gfs2_glmutex_unlock(gl);
1717         }
1718 }
1719
1720 /**
1721  * gfs2_gl_hash_clear - Empty out the glock hash table
1722  * @sdp: the filesystem
1723  * @wait: wait until it's all gone
1724  *
1725  * Called when unmounting the filesystem, or when inter-node lock manager
1726  * requests DROPLOCKS because it is running out of capacity.
1727  */
1728
1729 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1730 {
1731         unsigned long t;
1732         unsigned int x;
1733         int cont;
1734
1735         t = jiffies;
1736
1737         for (;;) {
1738                 cont = 0;
1739                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1740                         if (examine_bucket(clear_glock, sdp, x))
1741                                 cont = 1;
1742                 }
1743
1744                 if (!wait || !cont)
1745                         break;
1746
1747                 if (time_after_eq(jiffies,
1748                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1749                         fs_warn(sdp, "Unmount seems to be stalled. "
1750                                      "Dumping lock state...\n");
1751                         gfs2_dump_lockstate(sdp);
1752                         t = jiffies;
1753                 }
1754
1755                 down_write(&gfs2_umount_flush_sem);
1756                 invalidate_inodes(sdp->sd_vfs);
1757                 up_write(&gfs2_umount_flush_sem);
1758                 msleep(10);
1759         }
1760 }
1761
1762 /*
1763  *  Diagnostic routines to help debug distributed deadlock
1764  */
1765
1766 static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1767                               unsigned long address)
1768 {
1769         char buffer[KSYM_SYMBOL_LEN];
1770
1771         sprint_symbol(buffer, address);
1772         print_dbg(gi, fmt, buffer);
1773 }
1774
1775 /**
1776  * dump_holder - print information about a glock holder
1777  * @str: a string naming the type of holder
1778  * @gh: the glock holder
1779  *
1780  * Returns: 0 on success, -ENOBUFS when we run out of space
1781  */
1782
1783 static int dump_holder(struct glock_iter *gi, char *str,
1784                        struct gfs2_holder *gh)
1785 {
1786         unsigned int x;
1787         struct task_struct *gh_owner;
1788
1789         print_dbg(gi, "  %s\n", str);
1790         if (gh->gh_owner_pid) {
1791                 print_dbg(gi, "    owner = %ld ", (long)gh->gh_owner_pid);
1792                 gh_owner = find_task_by_pid(gh->gh_owner_pid);
1793                 if (gh_owner)
1794                         print_dbg(gi, "(%s)\n", gh_owner->comm);
1795                 else
1796                         print_dbg(gi, "(ended)\n");
1797         } else
1798                 print_dbg(gi, "    owner = -1\n");
1799         print_dbg(gi, "    gh_state = %u\n", gh->gh_state);
1800         print_dbg(gi, "    gh_flags =");
1801         for (x = 0; x < 32; x++)
1802                 if (gh->gh_flags & (1 << x))
1803                         print_dbg(gi, " %u", x);
1804         print_dbg(gi, " \n");
1805         print_dbg(gi, "    error = %d\n", gh->gh_error);
1806         print_dbg(gi, "    gh_iflags =");
1807         for (x = 0; x < 32; x++)
1808                 if (test_bit(x, &gh->gh_iflags))
1809                         print_dbg(gi, " %u", x);
1810         print_dbg(gi, " \n");
1811         gfs2_print_symbol(gi, "    initialized at: %s\n", gh->gh_ip);
1812
1813         return 0;
1814 }
1815
1816 /**
1817  * dump_inode - print information about an inode
1818  * @ip: the inode
1819  *
1820  * Returns: 0 on success, -ENOBUFS when we run out of space
1821  */
1822
1823 static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
1824 {
1825         unsigned int x;
1826
1827         print_dbg(gi, "  Inode:\n");
1828         print_dbg(gi, "    num = %llu/%llu\n",
1829                   (unsigned long long)ip->i_no_formal_ino,
1830                   (unsigned long long)ip->i_no_addr);
1831         print_dbg(gi, "    type = %u\n", IF2DT(ip->i_inode.i_mode));
1832         print_dbg(gi, "    i_flags =");
1833         for (x = 0; x < 32; x++)
1834                 if (test_bit(x, &ip->i_flags))
1835                         print_dbg(gi, " %u", x);
1836         print_dbg(gi, " \n");
1837         return 0;
1838 }
1839
1840 /**
1841  * dump_glock - print information about a glock
1842  * @gl: the glock
1843  * @count: where we are in the buffer
1844  *
1845  * Returns: 0 on success, -ENOBUFS when we run out of space
1846  */
1847
1848 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1849 {
1850         struct gfs2_holder *gh;
1851         unsigned int x;
1852         int error = -ENOBUFS;
1853         struct task_struct *gl_owner;
1854
1855         spin_lock(&gl->gl_spin);
1856
1857         print_dbg(gi, "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
1858                    (unsigned long long)gl->gl_name.ln_number);
1859         print_dbg(gi, "  gl_flags =");
1860         for (x = 0; x < 32; x++) {
1861                 if (test_bit(x, &gl->gl_flags))
1862                         print_dbg(gi, " %u", x);
1863         }
1864         if (!test_bit(GLF_LOCK, &gl->gl_flags))
1865                 print_dbg(gi, " (unlocked)");
1866         print_dbg(gi, " \n");
1867         print_dbg(gi, "  gl_ref = %d\n", atomic_read(&gl->gl_ref));
1868         print_dbg(gi, "  gl_state = %u\n", gl->gl_state);
1869         if (gl->gl_owner_pid) {
1870                 gl_owner = find_task_by_pid(gl->gl_owner_pid);
1871                 if (gl_owner)
1872                         print_dbg(gi, "  gl_owner = pid %d (%s)\n",
1873                                   gl->gl_owner_pid, gl_owner->comm);
1874                 else
1875                         print_dbg(gi, "  gl_owner = %d (ended)\n",
1876                                   gl->gl_owner_pid);
1877         } else
1878                 print_dbg(gi, "  gl_owner = -1\n");
1879         print_dbg(gi, "  gl_ip = %lu\n", gl->gl_ip);
1880         print_dbg(gi, "  req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1881         print_dbg(gi, "  req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1882         print_dbg(gi, "  lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1883         print_dbg(gi, "  object = %s\n", (gl->gl_object) ? "yes" : "no");
1884         print_dbg(gi, "  le = %s\n",
1885                    (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
1886         print_dbg(gi, "  reclaim = %s\n",
1887                    (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1888         if (gl->gl_aspace)
1889                 print_dbg(gi, "  aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1890                            gl->gl_aspace->i_mapping->nrpages);
1891         else
1892                 print_dbg(gi, "  aspace = no\n");
1893         print_dbg(gi, "  ail = %d\n", atomic_read(&gl->gl_ail_count));
1894         if (gl->gl_req_gh) {
1895                 error = dump_holder(gi, "Request", gl->gl_req_gh);
1896                 if (error)
1897                         goto out;
1898         }
1899         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1900                 error = dump_holder(gi, "Holder", gh);
1901                 if (error)
1902                         goto out;
1903         }
1904         list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1905                 error = dump_holder(gi, "Waiter1", gh);
1906                 if (error)
1907                         goto out;
1908         }
1909         list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1910                 error = dump_holder(gi, "Waiter3", gh);
1911                 if (error)
1912                         goto out;
1913         }
1914         if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1915                 print_dbg(gi, "  Demotion req to state %u (%llu uS ago)\n",
1916                           gl->gl_demote_state, (unsigned long long)
1917                           (jiffies - gl->gl_demote_time)*(1000000/HZ));
1918         }
1919         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1920                 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1921                         list_empty(&gl->gl_holders)) {
1922                         error = dump_inode(gi, gl->gl_object);
1923                         if (error)
1924                                 goto out;
1925                 } else {
1926                         error = -ENOBUFS;
1927                         print_dbg(gi, "  Inode: busy\n");
1928                 }
1929         }
1930
1931         error = 0;
1932
1933 out:
1934         spin_unlock(&gl->gl_spin);
1935         return error;
1936 }
1937
1938 /**
1939  * gfs2_dump_lockstate - print out the current lockstate
1940  * @sdp: the filesystem
1941  * @ub: the buffer to copy the information into
1942  *
1943  * If @ub is NULL, dump the lockstate to the console.
1944  *
1945  */
1946
1947 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1948 {
1949         struct gfs2_glock *gl;
1950         struct hlist_node *h;
1951         unsigned int x;
1952         int error = 0;
1953
1954         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1955
1956                 read_lock(gl_lock_addr(x));
1957
1958                 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1959                         if (gl->gl_sbd != sdp)
1960                                 continue;
1961
1962                         error = dump_glock(NULL, gl);
1963                         if (error)
1964                                 break;
1965                 }
1966
1967                 read_unlock(gl_lock_addr(x));
1968
1969                 if (error)
1970                         break;
1971         }
1972
1973
1974         return error;
1975 }
1976
1977 int __init gfs2_glock_init(void)
1978 {
1979         unsigned i;
1980         for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1981                 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
1982         }
1983 #ifdef GL_HASH_LOCK_SZ
1984         for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1985                 rwlock_init(&gl_hash_locks[i]);
1986         }
1987 #endif
1988         return 0;
1989 }
1990
1991 static int gfs2_glock_iter_next(struct glock_iter *gi)
1992 {
1993         read_lock(gl_lock_addr(gi->hash));
1994         while (1) {
1995                 if (!gi->hb_list) {  /* If we don't have a hash bucket yet */
1996                         gi->hb_list = &gl_hash_table[gi->hash].hb_list;
1997                         if (hlist_empty(gi->hb_list)) {
1998                                 read_unlock(gl_lock_addr(gi->hash));
1999                                 gi->hash++;
2000                                 read_lock(gl_lock_addr(gi->hash));
2001                                 gi->hb_list = NULL;
2002                                 if (gi->hash >= GFS2_GL_HASH_SIZE) {
2003                                         read_unlock(gl_lock_addr(gi->hash));
2004                                         return 1;
2005                                 }
2006                                 else
2007                                         continue;
2008                         }
2009                         if (!hlist_empty(gi->hb_list)) {
2010                                 gi->gl = list_entry(gi->hb_list->first,
2011                                                     struct gfs2_glock,
2012                                                     gl_list);
2013                         }
2014                 } else {
2015                         if (gi->gl->gl_list.next == NULL) {
2016                                 read_unlock(gl_lock_addr(gi->hash));
2017                                 gi->hash++;
2018                                 read_lock(gl_lock_addr(gi->hash));
2019                                 gi->hb_list = NULL;
2020                                 continue;
2021                         }
2022                         gi->gl = list_entry(gi->gl->gl_list.next,
2023                                             struct gfs2_glock, gl_list);
2024                 }
2025                 if (gi->gl)
2026                         break;
2027         }
2028         read_unlock(gl_lock_addr(gi->hash));
2029         return 0;
2030 }
2031
2032 static void gfs2_glock_iter_free(struct glock_iter *gi)
2033 {
2034         kfree(gi);
2035 }
2036
2037 static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
2038 {
2039         struct glock_iter *gi;
2040
2041         gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2042         if (!gi)
2043                 return NULL;
2044
2045         gi->sdp = sdp;
2046         gi->hash = 0;
2047         gi->gl = NULL;
2048         gi->hb_list = NULL;
2049         gi->seq = NULL;
2050         memset(gi->string, 0, sizeof(gi->string));
2051
2052         if (gfs2_glock_iter_next(gi)) {
2053                 gfs2_glock_iter_free(gi);
2054                 return NULL;
2055         }
2056
2057         return gi;
2058 }
2059
2060 static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
2061 {
2062         struct glock_iter *gi;
2063         loff_t n = *pos;
2064
2065         gi = gfs2_glock_iter_init(file->private);
2066         if (!gi)
2067                 return NULL;
2068
2069         while (n--) {
2070                 if (gfs2_glock_iter_next(gi)) {
2071                         gfs2_glock_iter_free(gi);
2072                         return NULL;
2073                 }
2074         }
2075
2076         return gi;
2077 }
2078
2079 static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2080                                  loff_t *pos)
2081 {
2082         struct glock_iter *gi = iter_ptr;
2083
2084         (*pos)++;
2085
2086         if (gfs2_glock_iter_next(gi)) {
2087                 gfs2_glock_iter_free(gi);
2088                 return NULL;
2089         }
2090
2091         return gi;
2092 }
2093
2094 static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
2095 {
2096         /* nothing for now */
2097 }
2098
2099 static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
2100 {
2101         struct glock_iter *gi = iter_ptr;
2102
2103         gi->seq = file;
2104         dump_glock(gi, gi->gl);
2105
2106         return 0;
2107 }
2108
2109 static struct seq_operations gfs2_glock_seq_ops = {
2110         .start = gfs2_glock_seq_start,
2111         .next  = gfs2_glock_seq_next,
2112         .stop  = gfs2_glock_seq_stop,
2113         .show  = gfs2_glock_seq_show,
2114 };
2115
2116 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2117 {
2118         struct seq_file *seq;
2119         int ret;
2120
2121         ret = seq_open(file, &gfs2_glock_seq_ops);
2122         if (ret)
2123                 return ret;
2124
2125         seq = file->private_data;
2126         seq->private = inode->i_private;
2127
2128         return 0;
2129 }
2130
2131 static const struct file_operations gfs2_debug_fops = {
2132         .owner   = THIS_MODULE,
2133         .open    = gfs2_debugfs_open,
2134         .read    = seq_read,
2135         .llseek  = seq_lseek,
2136         .release = seq_release
2137 };
2138
2139 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2140 {
2141         sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2142         if (!sdp->debugfs_dir)
2143                 return -ENOMEM;
2144         sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2145                                                          S_IFREG | S_IRUGO,
2146                                                          sdp->debugfs_dir, sdp,
2147                                                          &gfs2_debug_fops);
2148         if (!sdp->debugfs_dentry_glocks)
2149                 return -ENOMEM;
2150
2151         return 0;
2152 }
2153
2154 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2155 {
2156         if (sdp && sdp->debugfs_dir) {
2157                 if (sdp->debugfs_dentry_glocks) {
2158                         debugfs_remove(sdp->debugfs_dentry_glocks);
2159                         sdp->debugfs_dentry_glocks = NULL;
2160                 }
2161                 debugfs_remove(sdp->debugfs_dir);
2162                 sdp->debugfs_dir = NULL;
2163         }
2164 }
2165
2166 int gfs2_register_debugfs(void)
2167 {
2168         gfs2_root = debugfs_create_dir("gfs2", NULL);
2169         return gfs2_root ? 0 : -ENOMEM;
2170 }
2171
2172 void gfs2_unregister_debugfs(void)
2173 {
2174         debugfs_remove(gfs2_root);
2175         gfs2_root = NULL;
2176 }