[GFS2] Remove unused function from glock.c
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / gfs2 / glock.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kref.h>
19 #include <linux/kallsyms.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <asm/uaccess.h>
22
23 #include "gfs2.h"
24 #include "lm_interface.h"
25 #include "incore.h"
26 #include "glock.h"
27 #include "glops.h"
28 #include "inode.h"
29 #include "lm.h"
30 #include "lops.h"
31 #include "meta_io.h"
32 #include "quota.h"
33 #include "super.h"
34 #include "util.h"
35
36 /* Must be kept in sync with the beginning of struct gfs2_glock */
37 struct glock_plug {
38 struct list_head gl_list;
39 unsigned long gl_flags;
40 };
41
42 struct greedy {
43 struct gfs2_holder gr_gh;
44 struct work_struct gr_work;
45 };
46
47 struct gfs2_gl_hash_bucket {
48 struct list_head hb_list;
49 };
50
51 typedef void (*glock_examiner) (struct gfs2_glock * gl);
52
53 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
54 static int dump_glock(struct gfs2_glock *gl);
55
56 #define GFS2_GL_HASH_SHIFT 13
57 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
58 #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
59
60 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
61
62 /*
63 * Despite what you might think, the numbers below are not arbitrary :-)
64 * They are taken from the ipv4 routing hash code, which is well tested
65 * and thus should be nearly optimal. Later on we might tweek the numbers
66 * but for now this should be fine.
67 *
68 * The reason for putting the locks in a separate array from the list heads
69 * is that we can have fewer locks than list heads and save memory. We use
70 * the same hash function for both, but with a different hash mask.
71 */
72 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
73 defined(CONFIG_PROVE_LOCKING)
74
75 #ifdef CONFIG_LOCKDEP
76 # define GL_HASH_LOCK_SZ 256
77 #else
78 # if NR_CPUS >= 32
79 # define GL_HASH_LOCK_SZ 4096
80 # elif NR_CPUS >= 16
81 # define GL_HASH_LOCK_SZ 2048
82 # elif NR_CPUS >= 8
83 # define GL_HASH_LOCK_SZ 1024
84 # elif NR_CPUS >= 4
85 # define GL_HASH_LOCK_SZ 512
86 # else
87 # define GL_HASH_LOCK_SZ 256
88 # endif
89 #endif
90
91 /* We never want more locks than chains */
92 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
93 # undef GL_HASH_LOCK_SZ
94 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
95 #endif
96
97 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
98
99 static inline rwlock_t *gl_lock_addr(unsigned int x)
100 {
101 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
102 }
103 #else /* not SMP, so no spinlocks required */
104 static inline rwlock_t *gl_lock_addr(x)
105 {
106 return NULL;
107 }
108 #endif
109
110 /**
111 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
112 * @actual: the current state of the lock
113 * @requested: the lock state that was requested by the caller
114 * @flags: the modifier flags passed in by the caller
115 *
116 * Returns: 1 if the locks are compatible, 0 otherwise
117 */
118
119 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
120 int flags)
121 {
122 if (actual == requested)
123 return 1;
124
125 if (flags & GL_EXACT)
126 return 0;
127
128 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
129 return 1;
130
131 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
132 return 1;
133
134 return 0;
135 }
136
137 /**
138 * gl_hash() - Turn glock number into hash bucket number
139 * @lock: The glock number
140 *
141 * Returns: The number of the corresponding hash bucket
142 */
143
144 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
145 const struct lm_lockname *name)
146 {
147 unsigned int h;
148
149 h = jhash(&name->ln_number, sizeof(u64), 0);
150 h = jhash(&name->ln_type, sizeof(unsigned int), h);
151 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
152 h &= GFS2_GL_HASH_MASK;
153
154 return h;
155 }
156
157 /**
158 * glock_free() - Perform a few checks and then release struct gfs2_glock
159 * @gl: The glock to release
160 *
161 * Also calls lock module to release its internal structure for this glock.
162 *
163 */
164
165 static void glock_free(struct gfs2_glock *gl)
166 {
167 struct gfs2_sbd *sdp = gl->gl_sbd;
168 struct inode *aspace = gl->gl_aspace;
169
170 gfs2_lm_put_lock(sdp, gl->gl_lock);
171
172 if (aspace)
173 gfs2_aspace_put(aspace);
174
175 kmem_cache_free(gfs2_glock_cachep, gl);
176 }
177
178 /**
179 * gfs2_glock_hold() - increment reference count on glock
180 * @gl: The glock to hold
181 *
182 */
183
184 void gfs2_glock_hold(struct gfs2_glock *gl)
185 {
186 kref_get(&gl->gl_ref);
187 }
188
189 /* All work is done after the return from kref_put() so we
190 can release the write_lock before the free. */
191
192 static void kill_glock(struct kref *kref)
193 {
194 struct gfs2_glock *gl = container_of(kref, struct gfs2_glock, gl_ref);
195 struct gfs2_sbd *sdp = gl->gl_sbd;
196
197 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
198 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
199 gfs2_assert(sdp, list_empty(&gl->gl_holders));
200 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
201 gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
202 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
203 }
204
205 /**
206 * gfs2_glock_put() - Decrement reference count on glock
207 * @gl: The glock to put
208 *
209 */
210
211 int gfs2_glock_put(struct gfs2_glock *gl)
212 {
213 int rv = 0;
214
215 write_lock(gl_lock_addr(gl->gl_hash));
216 if (kref_put(&gl->gl_ref, kill_glock)) {
217 list_del_init(&gl_hash_table[gl->gl_hash].hb_list);
218 write_unlock(gl_lock_addr(gl->gl_hash));
219 BUG_ON(spin_is_locked(&gl->gl_spin));
220 glock_free(gl);
221 rv = 1;
222 goto out;
223 }
224 write_unlock(gl_lock_addr(gl->gl_hash));
225 out:
226 return rv;
227 }
228
229 /**
230 * queue_empty - check to see if a glock's queue is empty
231 * @gl: the glock
232 * @head: the head of the queue to check
233 *
234 * This function protects the list in the event that a process already
235 * has a holder on the list and is adding a second holder for itself.
236 * The glmutex lock is what generally prevents processes from working
237 * on the same glock at once, but the special case of adding a second
238 * holder for yourself ("recursive" locking) doesn't involve locking
239 * glmutex, making the spin lock necessary.
240 *
241 * Returns: 1 if the queue is empty
242 */
243
244 static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
245 {
246 int empty;
247 spin_lock(&gl->gl_spin);
248 empty = list_empty(head);
249 spin_unlock(&gl->gl_spin);
250 return empty;
251 }
252
253 /**
254 * search_bucket() - Find struct gfs2_glock by lock number
255 * @bucket: the bucket to search
256 * @name: The lock name
257 *
258 * Returns: NULL, or the struct gfs2_glock with the requested number
259 */
260
261 static struct gfs2_glock *search_bucket(unsigned int hash,
262 const struct gfs2_sbd *sdp,
263 const struct lm_lockname *name)
264 {
265 struct gfs2_glock *gl;
266
267 list_for_each_entry(gl, &gl_hash_table[hash].hb_list, gl_list) {
268 if (test_bit(GLF_PLUG, &gl->gl_flags))
269 continue;
270 if (!lm_name_equal(&gl->gl_name, name))
271 continue;
272 if (gl->gl_sbd != sdp)
273 continue;
274
275 kref_get(&gl->gl_ref);
276
277 return gl;
278 }
279
280 return NULL;
281 }
282
283 /**
284 * gfs2_glock_find() - Find glock by lock number
285 * @sdp: The GFS2 superblock
286 * @name: The lock name
287 *
288 * Returns: NULL, or the struct gfs2_glock with the requested number
289 */
290
291 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
292 const struct lm_lockname *name)
293 {
294 unsigned int hash = gl_hash(sdp, name);
295 struct gfs2_glock *gl;
296
297 read_lock(gl_lock_addr(hash));
298 gl = search_bucket(hash, sdp, name);
299 read_unlock(gl_lock_addr(hash));
300
301 return gl;
302 }
303
304 /**
305 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
306 * @sdp: The GFS2 superblock
307 * @number: the lock number
308 * @glops: The glock_operations to use
309 * @create: If 0, don't create the glock if it doesn't exist
310 * @glp: the glock is returned here
311 *
312 * This does not lock a glock, just finds/creates structures for one.
313 *
314 * Returns: errno
315 */
316
317 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
318 const struct gfs2_glock_operations *glops, int create,
319 struct gfs2_glock **glp)
320 {
321 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
322 struct gfs2_glock *gl, *tmp;
323 unsigned int hash = gl_hash(sdp, &name);
324 int error;
325
326 read_lock(gl_lock_addr(hash));
327 gl = search_bucket(hash, sdp, &name);
328 read_unlock(gl_lock_addr(hash));
329
330 if (gl || !create) {
331 *glp = gl;
332 return 0;
333 }
334
335 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
336 if (!gl)
337 return -ENOMEM;
338
339 gl->gl_flags = 0;
340 gl->gl_name = name;
341 kref_init(&gl->gl_ref);
342 gl->gl_state = LM_ST_UNLOCKED;
343 gl->gl_hash = hash;
344 gl->gl_owner = NULL;
345 gl->gl_ip = 0;
346 gl->gl_ops = glops;
347 gl->gl_req_gh = NULL;
348 gl->gl_req_bh = NULL;
349 gl->gl_vn = 0;
350 gl->gl_stamp = jiffies;
351 gl->gl_object = NULL;
352 gl->gl_sbd = sdp;
353 gl->gl_aspace = NULL;
354 lops_init_le(&gl->gl_le, &gfs2_glock_lops);
355
356 /* If this glock protects actual on-disk data or metadata blocks,
357 create a VFS inode to manage the pages/buffers holding them. */
358 if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
359 gl->gl_aspace = gfs2_aspace_get(sdp);
360 if (!gl->gl_aspace) {
361 error = -ENOMEM;
362 goto fail;
363 }
364 }
365
366 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
367 if (error)
368 goto fail_aspace;
369
370 write_lock(gl_lock_addr(hash));
371 tmp = search_bucket(hash, sdp, &name);
372 if (tmp) {
373 write_unlock(gl_lock_addr(hash));
374 glock_free(gl);
375 gl = tmp;
376 } else {
377 list_add_tail(&gl->gl_list, &gl_hash_table[hash].hb_list);
378 write_unlock(gl_lock_addr(hash));
379 }
380
381 *glp = gl;
382
383 return 0;
384
385 fail_aspace:
386 if (gl->gl_aspace)
387 gfs2_aspace_put(gl->gl_aspace);
388 fail:
389 kmem_cache_free(gfs2_glock_cachep, gl);
390 return error;
391 }
392
393 /**
394 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
395 * @gl: the glock
396 * @state: the state we're requesting
397 * @flags: the modifier flags
398 * @gh: the holder structure
399 *
400 */
401
402 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
403 struct gfs2_holder *gh)
404 {
405 INIT_LIST_HEAD(&gh->gh_list);
406 gh->gh_gl = gl;
407 gh->gh_ip = (unsigned long)__builtin_return_address(0);
408 gh->gh_owner = current;
409 gh->gh_state = state;
410 gh->gh_flags = flags;
411 gh->gh_error = 0;
412 gh->gh_iflags = 0;
413 init_completion(&gh->gh_wait);
414
415 if (gh->gh_state == LM_ST_EXCLUSIVE)
416 gh->gh_flags |= GL_LOCAL_EXCL;
417
418 gfs2_glock_hold(gl);
419 }
420
421 /**
422 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
423 * @state: the state we're requesting
424 * @flags: the modifier flags
425 * @gh: the holder structure
426 *
427 * Don't mess with the glock.
428 *
429 */
430
431 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
432 {
433 gh->gh_state = state;
434 gh->gh_flags = flags;
435 if (gh->gh_state == LM_ST_EXCLUSIVE)
436 gh->gh_flags |= GL_LOCAL_EXCL;
437
438 gh->gh_iflags &= 1 << HIF_ALLOCED;
439 gh->gh_ip = (unsigned long)__builtin_return_address(0);
440 }
441
442 /**
443 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
444 * @gh: the holder structure
445 *
446 */
447
448 void gfs2_holder_uninit(struct gfs2_holder *gh)
449 {
450 gfs2_glock_put(gh->gh_gl);
451 gh->gh_gl = NULL;
452 gh->gh_ip = 0;
453 }
454
455 /**
456 * gfs2_holder_get - get a struct gfs2_holder structure
457 * @gl: the glock
458 * @state: the state we're requesting
459 * @flags: the modifier flags
460 * @gfp_flags:
461 *
462 * Figure out how big an impact this function has. Either:
463 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
464 * 2) Leave it like it is
465 *
466 * Returns: the holder structure, NULL on ENOMEM
467 */
468
469 static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl,
470 unsigned int state,
471 int flags, gfp_t gfp_flags)
472 {
473 struct gfs2_holder *gh;
474
475 gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
476 if (!gh)
477 return NULL;
478
479 gfs2_holder_init(gl, state, flags, gh);
480 set_bit(HIF_ALLOCED, &gh->gh_iflags);
481 gh->gh_ip = (unsigned long)__builtin_return_address(0);
482 return gh;
483 }
484
485 /**
486 * gfs2_holder_put - get rid of a struct gfs2_holder structure
487 * @gh: the holder structure
488 *
489 */
490
491 static void gfs2_holder_put(struct gfs2_holder *gh)
492 {
493 gfs2_holder_uninit(gh);
494 kfree(gh);
495 }
496
497 /**
498 * rq_mutex - process a mutex request in the queue
499 * @gh: the glock holder
500 *
501 * Returns: 1 if the queue is blocked
502 */
503
504 static int rq_mutex(struct gfs2_holder *gh)
505 {
506 struct gfs2_glock *gl = gh->gh_gl;
507
508 list_del_init(&gh->gh_list);
509 /* gh->gh_error never examined. */
510 set_bit(GLF_LOCK, &gl->gl_flags);
511 complete(&gh->gh_wait);
512
513 return 1;
514 }
515
516 /**
517 * rq_promote - process a promote request in the queue
518 * @gh: the glock holder
519 *
520 * Acquire a new inter-node lock, or change a lock state to more restrictive.
521 *
522 * Returns: 1 if the queue is blocked
523 */
524
525 static int rq_promote(struct gfs2_holder *gh)
526 {
527 struct gfs2_glock *gl = gh->gh_gl;
528 struct gfs2_sbd *sdp = gl->gl_sbd;
529 const struct gfs2_glock_operations *glops = gl->gl_ops;
530
531 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
532 if (list_empty(&gl->gl_holders)) {
533 gl->gl_req_gh = gh;
534 set_bit(GLF_LOCK, &gl->gl_flags);
535 spin_unlock(&gl->gl_spin);
536
537 if (atomic_read(&sdp->sd_reclaim_count) >
538 gfs2_tune_get(sdp, gt_reclaim_limit) &&
539 !(gh->gh_flags & LM_FLAG_PRIORITY)) {
540 gfs2_reclaim_glock(sdp);
541 gfs2_reclaim_glock(sdp);
542 }
543
544 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
545 spin_lock(&gl->gl_spin);
546 }
547 return 1;
548 }
549
550 if (list_empty(&gl->gl_holders)) {
551 set_bit(HIF_FIRST, &gh->gh_iflags);
552 set_bit(GLF_LOCK, &gl->gl_flags);
553 } else {
554 struct gfs2_holder *next_gh;
555 if (gh->gh_flags & GL_LOCAL_EXCL)
556 return 1;
557 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
558 gh_list);
559 if (next_gh->gh_flags & GL_LOCAL_EXCL)
560 return 1;
561 }
562
563 list_move_tail(&gh->gh_list, &gl->gl_holders);
564 gh->gh_error = 0;
565 set_bit(HIF_HOLDER, &gh->gh_iflags);
566
567 complete(&gh->gh_wait);
568
569 return 0;
570 }
571
572 /**
573 * rq_demote - process a demote request in the queue
574 * @gh: the glock holder
575 *
576 * Returns: 1 if the queue is blocked
577 */
578
579 static int rq_demote(struct gfs2_holder *gh)
580 {
581 struct gfs2_glock *gl = gh->gh_gl;
582 const struct gfs2_glock_operations *glops = gl->gl_ops;
583
584 if (!list_empty(&gl->gl_holders))
585 return 1;
586
587 if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
588 list_del_init(&gh->gh_list);
589 gh->gh_error = 0;
590 spin_unlock(&gl->gl_spin);
591 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
592 gfs2_holder_put(gh);
593 else
594 complete(&gh->gh_wait);
595 spin_lock(&gl->gl_spin);
596 } else {
597 gl->gl_req_gh = gh;
598 set_bit(GLF_LOCK, &gl->gl_flags);
599 spin_unlock(&gl->gl_spin);
600
601 if (gh->gh_state == LM_ST_UNLOCKED ||
602 gl->gl_state != LM_ST_EXCLUSIVE)
603 glops->go_drop_th(gl);
604 else
605 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
606
607 spin_lock(&gl->gl_spin);
608 }
609
610 return 0;
611 }
612
613 /**
614 * rq_greedy - process a queued request to drop greedy status
615 * @gh: the glock holder
616 *
617 * Returns: 1 if the queue is blocked
618 */
619
620 static int rq_greedy(struct gfs2_holder *gh)
621 {
622 struct gfs2_glock *gl = gh->gh_gl;
623
624 list_del_init(&gh->gh_list);
625 /* gh->gh_error never examined. */
626 clear_bit(GLF_GREEDY, &gl->gl_flags);
627 spin_unlock(&gl->gl_spin);
628
629 gfs2_holder_uninit(gh);
630 kfree(container_of(gh, struct greedy, gr_gh));
631
632 spin_lock(&gl->gl_spin);
633
634 return 0;
635 }
636
637 /**
638 * run_queue - process holder structures on a glock
639 * @gl: the glock
640 *
641 */
642 static void run_queue(struct gfs2_glock *gl)
643 {
644 struct gfs2_holder *gh;
645 int blocked = 1;
646
647 for (;;) {
648 if (test_bit(GLF_LOCK, &gl->gl_flags))
649 break;
650
651 if (!list_empty(&gl->gl_waiters1)) {
652 gh = list_entry(gl->gl_waiters1.next,
653 struct gfs2_holder, gh_list);
654
655 if (test_bit(HIF_MUTEX, &gh->gh_iflags))
656 blocked = rq_mutex(gh);
657 else
658 gfs2_assert_warn(gl->gl_sbd, 0);
659
660 } else if (!list_empty(&gl->gl_waiters2) &&
661 !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
662 gh = list_entry(gl->gl_waiters2.next,
663 struct gfs2_holder, gh_list);
664
665 if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
666 blocked = rq_demote(gh);
667 else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
668 blocked = rq_greedy(gh);
669 else
670 gfs2_assert_warn(gl->gl_sbd, 0);
671
672 } else if (!list_empty(&gl->gl_waiters3)) {
673 gh = list_entry(gl->gl_waiters3.next,
674 struct gfs2_holder, gh_list);
675
676 if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
677 blocked = rq_promote(gh);
678 else
679 gfs2_assert_warn(gl->gl_sbd, 0);
680
681 } else
682 break;
683
684 if (blocked)
685 break;
686 }
687 }
688
689 /**
690 * gfs2_glmutex_lock - acquire a local lock on a glock
691 * @gl: the glock
692 *
693 * Gives caller exclusive access to manipulate a glock structure.
694 */
695
696 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
697 {
698 struct gfs2_holder gh;
699
700 gfs2_holder_init(gl, 0, 0, &gh);
701 set_bit(HIF_MUTEX, &gh.gh_iflags);
702
703 spin_lock(&gl->gl_spin);
704 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
705 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
706 } else {
707 gl->gl_owner = current;
708 gl->gl_ip = (unsigned long)__builtin_return_address(0);
709 complete(&gh.gh_wait);
710 }
711 spin_unlock(&gl->gl_spin);
712
713 wait_for_completion(&gh.gh_wait);
714 gfs2_holder_uninit(&gh);
715 }
716
717 /**
718 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
719 * @gl: the glock
720 *
721 * Returns: 1 if the glock is acquired
722 */
723
724 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
725 {
726 int acquired = 1;
727
728 spin_lock(&gl->gl_spin);
729 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
730 acquired = 0;
731 } else {
732 gl->gl_owner = current;
733 gl->gl_ip = (unsigned long)__builtin_return_address(0);
734 }
735 spin_unlock(&gl->gl_spin);
736
737 return acquired;
738 }
739
740 /**
741 * gfs2_glmutex_unlock - release a local lock on a glock
742 * @gl: the glock
743 *
744 */
745
746 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
747 {
748 spin_lock(&gl->gl_spin);
749 clear_bit(GLF_LOCK, &gl->gl_flags);
750 gl->gl_owner = NULL;
751 gl->gl_ip = 0;
752 run_queue(gl);
753 BUG_ON(!spin_is_locked(&gl->gl_spin));
754 spin_unlock(&gl->gl_spin);
755 }
756
757 /**
758 * handle_callback - add a demote request to a lock's queue
759 * @gl: the glock
760 * @state: the state the caller wants us to change to
761 *
762 * Note: This may fail sliently if we are out of memory.
763 */
764
765 static void handle_callback(struct gfs2_glock *gl, unsigned int state)
766 {
767 struct gfs2_holder *gh, *new_gh = NULL;
768
769 restart:
770 spin_lock(&gl->gl_spin);
771
772 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
773 if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
774 gl->gl_req_gh != gh) {
775 if (gh->gh_state != state)
776 gh->gh_state = LM_ST_UNLOCKED;
777 goto out;
778 }
779 }
780
781 if (new_gh) {
782 list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
783 new_gh = NULL;
784 } else {
785 spin_unlock(&gl->gl_spin);
786
787 new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_KERNEL);
788 if (!new_gh)
789 return;
790 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
791 set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
792
793 goto restart;
794 }
795
796 out:
797 spin_unlock(&gl->gl_spin);
798
799 if (new_gh)
800 gfs2_holder_put(new_gh);
801 }
802
803 void gfs2_glock_inode_squish(struct inode *inode)
804 {
805 struct gfs2_holder gh;
806 struct gfs2_glock *gl = GFS2_I(inode)->i_gl;
807 gfs2_holder_init(gl, LM_ST_UNLOCKED, 0, &gh);
808 set_bit(HIF_DEMOTE, &gh.gh_iflags);
809 spin_lock(&gl->gl_spin);
810 gfs2_assert(inode->i_sb->s_fs_info, list_empty(&gl->gl_holders));
811 list_add_tail(&gh.gh_list, &gl->gl_waiters2);
812 run_queue(gl);
813 spin_unlock(&gl->gl_spin);
814 wait_for_completion(&gh.gh_wait);
815 gfs2_holder_uninit(&gh);
816 }
817
818 /**
819 * state_change - record that the glock is now in a different state
820 * @gl: the glock
821 * @new_state the new state
822 *
823 */
824
825 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
826 {
827 int held1, held2;
828
829 held1 = (gl->gl_state != LM_ST_UNLOCKED);
830 held2 = (new_state != LM_ST_UNLOCKED);
831
832 if (held1 != held2) {
833 if (held2)
834 gfs2_glock_hold(gl);
835 else
836 gfs2_glock_put(gl);
837 }
838
839 gl->gl_state = new_state;
840 }
841
842 /**
843 * xmote_bh - Called after the lock module is done acquiring a lock
844 * @gl: The glock in question
845 * @ret: the int returned from the lock module
846 *
847 */
848
849 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
850 {
851 struct gfs2_sbd *sdp = gl->gl_sbd;
852 const struct gfs2_glock_operations *glops = gl->gl_ops;
853 struct gfs2_holder *gh = gl->gl_req_gh;
854 int prev_state = gl->gl_state;
855 int op_done = 1;
856
857 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
858 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
859 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
860
861 state_change(gl, ret & LM_OUT_ST_MASK);
862
863 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
864 if (glops->go_inval)
865 glops->go_inval(gl, DIO_METADATA | DIO_DATA);
866 } else if (gl->gl_state == LM_ST_DEFERRED) {
867 /* We might not want to do this here.
868 Look at moving to the inode glops. */
869 if (glops->go_inval)
870 glops->go_inval(gl, DIO_DATA);
871 }
872
873 /* Deal with each possible exit condition */
874
875 if (!gh)
876 gl->gl_stamp = jiffies;
877 else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
878 spin_lock(&gl->gl_spin);
879 list_del_init(&gh->gh_list);
880 gh->gh_error = -EIO;
881 spin_unlock(&gl->gl_spin);
882 } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
883 spin_lock(&gl->gl_spin);
884 list_del_init(&gh->gh_list);
885 if (gl->gl_state == gh->gh_state ||
886 gl->gl_state == LM_ST_UNLOCKED) {
887 gh->gh_error = 0;
888 } else {
889 if (gfs2_assert_warn(sdp, gh->gh_flags &
890 (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
891 fs_warn(sdp, "ret = 0x%.8X\n", ret);
892 gh->gh_error = GLR_TRYFAILED;
893 }
894 spin_unlock(&gl->gl_spin);
895
896 if (ret & LM_OUT_CANCELED)
897 handle_callback(gl, LM_ST_UNLOCKED);
898
899 } else if (ret & LM_OUT_CANCELED) {
900 spin_lock(&gl->gl_spin);
901 list_del_init(&gh->gh_list);
902 gh->gh_error = GLR_CANCELED;
903 spin_unlock(&gl->gl_spin);
904
905 } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
906 spin_lock(&gl->gl_spin);
907 list_move_tail(&gh->gh_list, &gl->gl_holders);
908 gh->gh_error = 0;
909 set_bit(HIF_HOLDER, &gh->gh_iflags);
910 spin_unlock(&gl->gl_spin);
911
912 set_bit(HIF_FIRST, &gh->gh_iflags);
913
914 op_done = 0;
915
916 } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
917 spin_lock(&gl->gl_spin);
918 list_del_init(&gh->gh_list);
919 gh->gh_error = GLR_TRYFAILED;
920 spin_unlock(&gl->gl_spin);
921
922 } else {
923 if (gfs2_assert_withdraw(sdp, 0) == -1)
924 fs_err(sdp, "ret = 0x%.8X\n", ret);
925 }
926
927 if (glops->go_xmote_bh)
928 glops->go_xmote_bh(gl);
929
930 if (op_done) {
931 spin_lock(&gl->gl_spin);
932 gl->gl_req_gh = NULL;
933 gl->gl_req_bh = NULL;
934 clear_bit(GLF_LOCK, &gl->gl_flags);
935 run_queue(gl);
936 spin_unlock(&gl->gl_spin);
937 }
938
939 gfs2_glock_put(gl);
940
941 if (gh) {
942 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
943 gfs2_holder_put(gh);
944 else
945 complete(&gh->gh_wait);
946 }
947 }
948
949 /**
950 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
951 * @gl: The glock in question
952 * @state: the requested state
953 * @flags: modifier flags to the lock call
954 *
955 */
956
957 void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
958 {
959 struct gfs2_sbd *sdp = gl->gl_sbd;
960 const struct gfs2_glock_operations *glops = gl->gl_ops;
961 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
962 LM_FLAG_NOEXP | LM_FLAG_ANY |
963 LM_FLAG_PRIORITY);
964 unsigned int lck_ret;
965
966 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
967 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
968 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
969 gfs2_assert_warn(sdp, state != gl->gl_state);
970
971 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
972 glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE);
973
974 gfs2_glock_hold(gl);
975 gl->gl_req_bh = xmote_bh;
976
977 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
978
979 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
980 return;
981
982 if (lck_ret & LM_OUT_ASYNC)
983 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
984 else
985 xmote_bh(gl, lck_ret);
986 }
987
988 /**
989 * drop_bh - Called after a lock module unlock completes
990 * @gl: the glock
991 * @ret: the return status
992 *
993 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
994 * Doesn't drop the reference on the glock the top half took out
995 *
996 */
997
998 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
999 {
1000 struct gfs2_sbd *sdp = gl->gl_sbd;
1001 const struct gfs2_glock_operations *glops = gl->gl_ops;
1002 struct gfs2_holder *gh = gl->gl_req_gh;
1003
1004 clear_bit(GLF_PREFETCH, &gl->gl_flags);
1005
1006 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1007 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
1008 gfs2_assert_warn(sdp, !ret);
1009
1010 state_change(gl, LM_ST_UNLOCKED);
1011
1012 if (glops->go_inval)
1013 glops->go_inval(gl, DIO_METADATA | DIO_DATA);
1014
1015 if (gh) {
1016 spin_lock(&gl->gl_spin);
1017 list_del_init(&gh->gh_list);
1018 gh->gh_error = 0;
1019 spin_unlock(&gl->gl_spin);
1020 }
1021
1022 if (glops->go_drop_bh)
1023 glops->go_drop_bh(gl);
1024
1025 spin_lock(&gl->gl_spin);
1026 gl->gl_req_gh = NULL;
1027 gl->gl_req_bh = NULL;
1028 clear_bit(GLF_LOCK, &gl->gl_flags);
1029 run_queue(gl);
1030 spin_unlock(&gl->gl_spin);
1031
1032 gfs2_glock_put(gl);
1033
1034 if (gh) {
1035 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
1036 gfs2_holder_put(gh);
1037 else
1038 complete(&gh->gh_wait);
1039 }
1040 }
1041
1042 /**
1043 * gfs2_glock_drop_th - call into the lock module to unlock a lock
1044 * @gl: the glock
1045 *
1046 */
1047
1048 void gfs2_glock_drop_th(struct gfs2_glock *gl)
1049 {
1050 struct gfs2_sbd *sdp = gl->gl_sbd;
1051 const struct gfs2_glock_operations *glops = gl->gl_ops;
1052 unsigned int ret;
1053
1054 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1055 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
1056 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
1057
1058 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
1059 glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE);
1060
1061 gfs2_glock_hold(gl);
1062 gl->gl_req_bh = drop_bh;
1063
1064 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
1065
1066 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
1067 return;
1068
1069 if (!ret)
1070 drop_bh(gl, ret);
1071 else
1072 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
1073 }
1074
1075 /**
1076 * do_cancels - cancel requests for locks stuck waiting on an expire flag
1077 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1078 *
1079 * Don't cancel GL_NOCANCEL requests.
1080 */
1081
1082 static void do_cancels(struct gfs2_holder *gh)
1083 {
1084 struct gfs2_glock *gl = gh->gh_gl;
1085
1086 spin_lock(&gl->gl_spin);
1087
1088 while (gl->gl_req_gh != gh &&
1089 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1090 !list_empty(&gh->gh_list)) {
1091 if (gl->gl_req_bh && !(gl->gl_req_gh &&
1092 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
1093 spin_unlock(&gl->gl_spin);
1094 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
1095 msleep(100);
1096 spin_lock(&gl->gl_spin);
1097 } else {
1098 spin_unlock(&gl->gl_spin);
1099 msleep(100);
1100 spin_lock(&gl->gl_spin);
1101 }
1102 }
1103
1104 spin_unlock(&gl->gl_spin);
1105 }
1106
1107 /**
1108 * glock_wait_internal - wait on a glock acquisition
1109 * @gh: the glock holder
1110 *
1111 * Returns: 0 on success
1112 */
1113
1114 static int glock_wait_internal(struct gfs2_holder *gh)
1115 {
1116 struct gfs2_glock *gl = gh->gh_gl;
1117 struct gfs2_sbd *sdp = gl->gl_sbd;
1118 const struct gfs2_glock_operations *glops = gl->gl_ops;
1119
1120 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1121 return -EIO;
1122
1123 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1124 spin_lock(&gl->gl_spin);
1125 if (gl->gl_req_gh != gh &&
1126 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1127 !list_empty(&gh->gh_list)) {
1128 list_del_init(&gh->gh_list);
1129 gh->gh_error = GLR_TRYFAILED;
1130 run_queue(gl);
1131 spin_unlock(&gl->gl_spin);
1132 return gh->gh_error;
1133 }
1134 spin_unlock(&gl->gl_spin);
1135 }
1136
1137 if (gh->gh_flags & LM_FLAG_PRIORITY)
1138 do_cancels(gh);
1139
1140 wait_for_completion(&gh->gh_wait);
1141
1142 if (gh->gh_error)
1143 return gh->gh_error;
1144
1145 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1146 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1147 gh->gh_flags));
1148
1149 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1150 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1151
1152 if (glops->go_lock) {
1153 gh->gh_error = glops->go_lock(gh);
1154 if (gh->gh_error) {
1155 spin_lock(&gl->gl_spin);
1156 list_del_init(&gh->gh_list);
1157 spin_unlock(&gl->gl_spin);
1158 }
1159 }
1160
1161 spin_lock(&gl->gl_spin);
1162 gl->gl_req_gh = NULL;
1163 gl->gl_req_bh = NULL;
1164 clear_bit(GLF_LOCK, &gl->gl_flags);
1165 run_queue(gl);
1166 spin_unlock(&gl->gl_spin);
1167 }
1168
1169 return gh->gh_error;
1170 }
1171
1172 static inline struct gfs2_holder *
1173 find_holder_by_owner(struct list_head *head, struct task_struct *owner)
1174 {
1175 struct gfs2_holder *gh;
1176
1177 list_for_each_entry(gh, head, gh_list) {
1178 if (gh->gh_owner == owner)
1179 return gh;
1180 }
1181
1182 return NULL;
1183 }
1184
1185 /**
1186 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1187 * @gh: the holder structure to add
1188 *
1189 */
1190
1191 static void add_to_queue(struct gfs2_holder *gh)
1192 {
1193 struct gfs2_glock *gl = gh->gh_gl;
1194 struct gfs2_holder *existing;
1195
1196 BUG_ON(!gh->gh_owner);
1197
1198 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1199 if (existing) {
1200 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1201 printk(KERN_INFO "pid : %d\n", existing->gh_owner->pid);
1202 printk(KERN_INFO "lock type : %d lock state : %d\n",
1203 existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
1204 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1205 printk(KERN_INFO "pid : %d\n", gh->gh_owner->pid);
1206 printk(KERN_INFO "lock type : %d lock state : %d\n",
1207 gl->gl_name.ln_type, gl->gl_state);
1208 BUG();
1209 }
1210
1211 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
1212 if (existing) {
1213 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1214 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1215 BUG();
1216 }
1217
1218 if (gh->gh_flags & LM_FLAG_PRIORITY)
1219 list_add(&gh->gh_list, &gl->gl_waiters3);
1220 else
1221 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1222 }
1223
1224 /**
1225 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1226 * @gh: the holder structure
1227 *
1228 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1229 *
1230 * Returns: 0, GLR_TRYFAILED, or errno on failure
1231 */
1232
1233 int gfs2_glock_nq(struct gfs2_holder *gh)
1234 {
1235 struct gfs2_glock *gl = gh->gh_gl;
1236 struct gfs2_sbd *sdp = gl->gl_sbd;
1237 int error = 0;
1238
1239 restart:
1240 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1241 set_bit(HIF_ABORTED, &gh->gh_iflags);
1242 return -EIO;
1243 }
1244
1245 set_bit(HIF_PROMOTE, &gh->gh_iflags);
1246
1247 spin_lock(&gl->gl_spin);
1248 add_to_queue(gh);
1249 run_queue(gl);
1250 spin_unlock(&gl->gl_spin);
1251
1252 if (!(gh->gh_flags & GL_ASYNC)) {
1253 error = glock_wait_internal(gh);
1254 if (error == GLR_CANCELED) {
1255 msleep(100);
1256 goto restart;
1257 }
1258 }
1259
1260 clear_bit(GLF_PREFETCH, &gl->gl_flags);
1261
1262 if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP))
1263 dump_glock(gl);
1264
1265 return error;
1266 }
1267
1268 /**
1269 * gfs2_glock_poll - poll to see if an async request has been completed
1270 * @gh: the holder
1271 *
1272 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1273 */
1274
1275 int gfs2_glock_poll(struct gfs2_holder *gh)
1276 {
1277 struct gfs2_glock *gl = gh->gh_gl;
1278 int ready = 0;
1279
1280 spin_lock(&gl->gl_spin);
1281
1282 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1283 ready = 1;
1284 else if (list_empty(&gh->gh_list)) {
1285 if (gh->gh_error == GLR_CANCELED) {
1286 spin_unlock(&gl->gl_spin);
1287 msleep(100);
1288 if (gfs2_glock_nq(gh))
1289 return 1;
1290 return 0;
1291 } else
1292 ready = 1;
1293 }
1294
1295 spin_unlock(&gl->gl_spin);
1296
1297 return ready;
1298 }
1299
1300 /**
1301 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1302 * @gh: the holder structure
1303 *
1304 * Returns: 0, GLR_TRYFAILED, or errno on failure
1305 */
1306
1307 int gfs2_glock_wait(struct gfs2_holder *gh)
1308 {
1309 int error;
1310
1311 error = glock_wait_internal(gh);
1312 if (error == GLR_CANCELED) {
1313 msleep(100);
1314 gh->gh_flags &= ~GL_ASYNC;
1315 error = gfs2_glock_nq(gh);
1316 }
1317
1318 return error;
1319 }
1320
1321 /**
1322 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1323 * @gh: the glock holder
1324 *
1325 */
1326
1327 void gfs2_glock_dq(struct gfs2_holder *gh)
1328 {
1329 struct gfs2_glock *gl = gh->gh_gl;
1330 const struct gfs2_glock_operations *glops = gl->gl_ops;
1331
1332 if (gh->gh_flags & GL_NOCACHE)
1333 handle_callback(gl, LM_ST_UNLOCKED);
1334
1335 gfs2_glmutex_lock(gl);
1336
1337 spin_lock(&gl->gl_spin);
1338 list_del_init(&gh->gh_list);
1339
1340 if (list_empty(&gl->gl_holders)) {
1341 spin_unlock(&gl->gl_spin);
1342
1343 if (glops->go_unlock)
1344 glops->go_unlock(gh);
1345
1346 gl->gl_stamp = jiffies;
1347
1348 spin_lock(&gl->gl_spin);
1349 }
1350
1351 clear_bit(GLF_LOCK, &gl->gl_flags);
1352 run_queue(gl);
1353 spin_unlock(&gl->gl_spin);
1354 }
1355
1356 /**
1357 * gfs2_glock_prefetch - Try to prefetch a glock
1358 * @gl: the glock
1359 * @state: the state to prefetch in
1360 * @flags: flags passed to go_xmote_th()
1361 *
1362 */
1363
1364 static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
1365 int flags)
1366 {
1367 const struct gfs2_glock_operations *glops = gl->gl_ops;
1368
1369 spin_lock(&gl->gl_spin);
1370
1371 if (test_bit(GLF_LOCK, &gl->gl_flags) || !list_empty(&gl->gl_holders) ||
1372 !list_empty(&gl->gl_waiters1) || !list_empty(&gl->gl_waiters2) ||
1373 !list_empty(&gl->gl_waiters3) ||
1374 relaxed_state_ok(gl->gl_state, state, flags)) {
1375 spin_unlock(&gl->gl_spin);
1376 return;
1377 }
1378
1379 set_bit(GLF_PREFETCH, &gl->gl_flags);
1380 set_bit(GLF_LOCK, &gl->gl_flags);
1381 spin_unlock(&gl->gl_spin);
1382
1383 glops->go_xmote_th(gl, state, flags);
1384 }
1385
1386 static void greedy_work(void *data)
1387 {
1388 struct greedy *gr = data;
1389 struct gfs2_holder *gh = &gr->gr_gh;
1390 struct gfs2_glock *gl = gh->gh_gl;
1391 const struct gfs2_glock_operations *glops = gl->gl_ops;
1392
1393 clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1394
1395 if (glops->go_greedy)
1396 glops->go_greedy(gl);
1397
1398 spin_lock(&gl->gl_spin);
1399
1400 if (list_empty(&gl->gl_waiters2)) {
1401 clear_bit(GLF_GREEDY, &gl->gl_flags);
1402 spin_unlock(&gl->gl_spin);
1403 gfs2_holder_uninit(gh);
1404 kfree(gr);
1405 } else {
1406 gfs2_glock_hold(gl);
1407 list_add_tail(&gh->gh_list, &gl->gl_waiters2);
1408 run_queue(gl);
1409 spin_unlock(&gl->gl_spin);
1410 gfs2_glock_put(gl);
1411 }
1412 }
1413
1414 /**
1415 * gfs2_glock_be_greedy -
1416 * @gl:
1417 * @time:
1418 *
1419 * Returns: 0 if go_greedy will be called, 1 otherwise
1420 */
1421
1422 int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
1423 {
1424 struct greedy *gr;
1425 struct gfs2_holder *gh;
1426
1427 if (!time || gl->gl_sbd->sd_args.ar_localcaching ||
1428 test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
1429 return 1;
1430
1431 gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
1432 if (!gr) {
1433 clear_bit(GLF_GREEDY, &gl->gl_flags);
1434 return 1;
1435 }
1436 gh = &gr->gr_gh;
1437
1438 gfs2_holder_init(gl, 0, 0, gh);
1439 set_bit(HIF_GREEDY, &gh->gh_iflags);
1440 INIT_WORK(&gr->gr_work, greedy_work, gr);
1441
1442 set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1443 schedule_delayed_work(&gr->gr_work, time);
1444
1445 return 0;
1446 }
1447
1448 /**
1449 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1450 * @gh: the holder structure
1451 *
1452 */
1453
1454 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1455 {
1456 gfs2_glock_dq(gh);
1457 gfs2_holder_uninit(gh);
1458 }
1459
1460 /**
1461 * gfs2_glock_nq_num - acquire a glock based on lock number
1462 * @sdp: the filesystem
1463 * @number: the lock number
1464 * @glops: the glock operations for the type of glock
1465 * @state: the state to acquire the glock in
1466 * @flags: modifier flags for the aquisition
1467 * @gh: the struct gfs2_holder
1468 *
1469 * Returns: errno
1470 */
1471
1472 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1473 const struct gfs2_glock_operations *glops,
1474 unsigned int state, int flags, struct gfs2_holder *gh)
1475 {
1476 struct gfs2_glock *gl;
1477 int error;
1478
1479 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1480 if (!error) {
1481 error = gfs2_glock_nq_init(gl, state, flags, gh);
1482 gfs2_glock_put(gl);
1483 }
1484
1485 return error;
1486 }
1487
1488 /**
1489 * glock_compare - Compare two struct gfs2_glock structures for sorting
1490 * @arg_a: the first structure
1491 * @arg_b: the second structure
1492 *
1493 */
1494
1495 static int glock_compare(const void *arg_a, const void *arg_b)
1496 {
1497 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1498 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1499 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1500 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1501
1502 if (a->ln_number > b->ln_number)
1503 return 1;
1504 if (a->ln_number < b->ln_number)
1505 return -1;
1506 if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE)
1507 return 1;
1508 if (!(gh_a->gh_flags & GL_LOCAL_EXCL) && (gh_b->gh_flags & GL_LOCAL_EXCL))
1509 return 1;
1510 return 0;
1511 }
1512
1513 /**
1514 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1515 * @num_gh: the number of structures
1516 * @ghs: an array of struct gfs2_holder structures
1517 *
1518 * Returns: 0 on success (all glocks acquired),
1519 * errno on failure (no glocks acquired)
1520 */
1521
1522 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1523 struct gfs2_holder **p)
1524 {
1525 unsigned int x;
1526 int error = 0;
1527
1528 for (x = 0; x < num_gh; x++)
1529 p[x] = &ghs[x];
1530
1531 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1532
1533 for (x = 0; x < num_gh; x++) {
1534 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1535
1536 error = gfs2_glock_nq(p[x]);
1537 if (error) {
1538 while (x--)
1539 gfs2_glock_dq(p[x]);
1540 break;
1541 }
1542 }
1543
1544 return error;
1545 }
1546
1547 /**
1548 * gfs2_glock_nq_m - acquire multiple glocks
1549 * @num_gh: the number of structures
1550 * @ghs: an array of struct gfs2_holder structures
1551 *
1552 * Figure out how big an impact this function has. Either:
1553 * 1) Replace this code with code that calls gfs2_glock_prefetch()
1554 * 2) Forget async stuff and just call nq_m_sync()
1555 * 3) Leave it like it is
1556 *
1557 * Returns: 0 on success (all glocks acquired),
1558 * errno on failure (no glocks acquired)
1559 */
1560
1561 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1562 {
1563 int *e;
1564 unsigned int x;
1565 int borked = 0, serious = 0;
1566 int error = 0;
1567
1568 if (!num_gh)
1569 return 0;
1570
1571 if (num_gh == 1) {
1572 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1573 return gfs2_glock_nq(ghs);
1574 }
1575
1576 e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1577 if (!e)
1578 return -ENOMEM;
1579
1580 for (x = 0; x < num_gh; x++) {
1581 ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
1582 error = gfs2_glock_nq(&ghs[x]);
1583 if (error) {
1584 borked = 1;
1585 serious = error;
1586 num_gh = x;
1587 break;
1588 }
1589 }
1590
1591 for (x = 0; x < num_gh; x++) {
1592 error = e[x] = glock_wait_internal(&ghs[x]);
1593 if (error) {
1594 borked = 1;
1595 if (error != GLR_TRYFAILED && error != GLR_CANCELED)
1596 serious = error;
1597 }
1598 }
1599
1600 if (!borked) {
1601 kfree(e);
1602 return 0;
1603 }
1604
1605 for (x = 0; x < num_gh; x++)
1606 if (!e[x])
1607 gfs2_glock_dq(&ghs[x]);
1608
1609 if (serious)
1610 error = serious;
1611 else {
1612 for (x = 0; x < num_gh; x++)
1613 gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
1614 &ghs[x]);
1615 error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
1616 }
1617
1618 kfree(e);
1619
1620 return error;
1621 }
1622
1623 /**
1624 * gfs2_glock_dq_m - release multiple glocks
1625 * @num_gh: the number of structures
1626 * @ghs: an array of struct gfs2_holder structures
1627 *
1628 */
1629
1630 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1631 {
1632 unsigned int x;
1633
1634 for (x = 0; x < num_gh; x++)
1635 gfs2_glock_dq(&ghs[x]);
1636 }
1637
1638 /**
1639 * gfs2_glock_dq_uninit_m - release multiple glocks
1640 * @num_gh: the number of structures
1641 * @ghs: an array of struct gfs2_holder structures
1642 *
1643 */
1644
1645 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1646 {
1647 unsigned int x;
1648
1649 for (x = 0; x < num_gh; x++)
1650 gfs2_glock_dq_uninit(&ghs[x]);
1651 }
1652
1653 /**
1654 * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1655 * @sdp: the filesystem
1656 * @number: the lock number
1657 * @glops: the glock operations for the type of glock
1658 * @state: the state to acquire the glock in
1659 * @flags: modifier flags for the aquisition
1660 *
1661 * Returns: errno
1662 */
1663
1664 void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number,
1665 const struct gfs2_glock_operations *glops,
1666 unsigned int state, int flags)
1667 {
1668 struct gfs2_glock *gl;
1669 int error;
1670
1671 if (atomic_read(&sdp->sd_reclaim_count) <
1672 gfs2_tune_get(sdp, gt_reclaim_limit)) {
1673 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1674 if (!error) {
1675 gfs2_glock_prefetch(gl, state, flags);
1676 gfs2_glock_put(gl);
1677 }
1678 }
1679 }
1680
1681 /**
1682 * gfs2_lvb_hold - attach a LVB from a glock
1683 * @gl: The glock in question
1684 *
1685 */
1686
1687 int gfs2_lvb_hold(struct gfs2_glock *gl)
1688 {
1689 int error;
1690
1691 gfs2_glmutex_lock(gl);
1692
1693 if (!atomic_read(&gl->gl_lvb_count)) {
1694 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1695 if (error) {
1696 gfs2_glmutex_unlock(gl);
1697 return error;
1698 }
1699 gfs2_glock_hold(gl);
1700 }
1701 atomic_inc(&gl->gl_lvb_count);
1702
1703 gfs2_glmutex_unlock(gl);
1704
1705 return 0;
1706 }
1707
1708 /**
1709 * gfs2_lvb_unhold - detach a LVB from a glock
1710 * @gl: The glock in question
1711 *
1712 */
1713
1714 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1715 {
1716 gfs2_glock_hold(gl);
1717 gfs2_glmutex_lock(gl);
1718
1719 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1720 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1721 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1722 gl->gl_lvb = NULL;
1723 gfs2_glock_put(gl);
1724 }
1725
1726 gfs2_glmutex_unlock(gl);
1727 gfs2_glock_put(gl);
1728 }
1729
1730 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1731 unsigned int state)
1732 {
1733 struct gfs2_glock *gl;
1734
1735 gl = gfs2_glock_find(sdp, name);
1736 if (!gl)
1737 return;
1738
1739 if (gl->gl_ops->go_callback)
1740 gl->gl_ops->go_callback(gl, state);
1741 handle_callback(gl, state);
1742
1743 spin_lock(&gl->gl_spin);
1744 run_queue(gl);
1745 spin_unlock(&gl->gl_spin);
1746
1747 gfs2_glock_put(gl);
1748 }
1749
1750 /**
1751 * gfs2_glock_cb - Callback used by locking module
1752 * @sdp: Pointer to the superblock
1753 * @type: Type of callback
1754 * @data: Type dependent data pointer
1755 *
1756 * Called by the locking module when it wants to tell us something.
1757 * Either we need to drop a lock, one of our ASYNC requests completed, or
1758 * a journal from another client needs to be recovered.
1759 */
1760
1761 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1762 {
1763 struct gfs2_sbd *sdp = cb_data;
1764
1765 switch (type) {
1766 case LM_CB_NEED_E:
1767 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1768 return;
1769
1770 case LM_CB_NEED_D:
1771 blocking_cb(sdp, data, LM_ST_DEFERRED);
1772 return;
1773
1774 case LM_CB_NEED_S:
1775 blocking_cb(sdp, data, LM_ST_SHARED);
1776 return;
1777
1778 case LM_CB_ASYNC: {
1779 struct lm_async_cb *async = data;
1780 struct gfs2_glock *gl;
1781
1782 gl = gfs2_glock_find(sdp, &async->lc_name);
1783 if (gfs2_assert_warn(sdp, gl))
1784 return;
1785 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1786 gl->gl_req_bh(gl, async->lc_ret);
1787 gfs2_glock_put(gl);
1788 return;
1789 }
1790
1791 case LM_CB_NEED_RECOVERY:
1792 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1793 if (sdp->sd_recoverd_process)
1794 wake_up_process(sdp->sd_recoverd_process);
1795 return;
1796
1797 case LM_CB_DROPLOCKS:
1798 gfs2_gl_hash_clear(sdp, NO_WAIT);
1799 gfs2_quota_scan(sdp);
1800 return;
1801
1802 default:
1803 gfs2_assert_warn(sdp, 0);
1804 return;
1805 }
1806 }
1807
1808 /**
1809 * demote_ok - Check to see if it's ok to unlock a glock
1810 * @gl: the glock
1811 *
1812 * Returns: 1 if it's ok
1813 */
1814
1815 static int demote_ok(struct gfs2_glock *gl)
1816 {
1817 struct gfs2_sbd *sdp = gl->gl_sbd;
1818 const struct gfs2_glock_operations *glops = gl->gl_ops;
1819 int demote = 1;
1820
1821 if (test_bit(GLF_STICKY, &gl->gl_flags))
1822 demote = 0;
1823 else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
1824 demote = time_after_eq(jiffies, gl->gl_stamp +
1825 gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
1826 else if (glops->go_demote_ok)
1827 demote = glops->go_demote_ok(gl);
1828
1829 return demote;
1830 }
1831
1832 /**
1833 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1834 * @gl: the glock
1835 *
1836 */
1837
1838 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1839 {
1840 struct gfs2_sbd *sdp = gl->gl_sbd;
1841
1842 spin_lock(&sdp->sd_reclaim_lock);
1843 if (list_empty(&gl->gl_reclaim)) {
1844 gfs2_glock_hold(gl);
1845 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1846 atomic_inc(&sdp->sd_reclaim_count);
1847 }
1848 spin_unlock(&sdp->sd_reclaim_lock);
1849
1850 wake_up(&sdp->sd_reclaim_wq);
1851 }
1852
1853 /**
1854 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1855 * @sdp: the filesystem
1856 *
1857 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1858 * different glock and we notice that there are a lot of glocks in the
1859 * reclaim list.
1860 *
1861 */
1862
1863 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1864 {
1865 struct gfs2_glock *gl;
1866
1867 spin_lock(&sdp->sd_reclaim_lock);
1868 if (list_empty(&sdp->sd_reclaim_list)) {
1869 spin_unlock(&sdp->sd_reclaim_lock);
1870 return;
1871 }
1872 gl = list_entry(sdp->sd_reclaim_list.next,
1873 struct gfs2_glock, gl_reclaim);
1874 list_del_init(&gl->gl_reclaim);
1875 spin_unlock(&sdp->sd_reclaim_lock);
1876
1877 atomic_dec(&sdp->sd_reclaim_count);
1878 atomic_inc(&sdp->sd_reclaimed);
1879
1880 if (gfs2_glmutex_trylock(gl)) {
1881 if (queue_empty(gl, &gl->gl_holders) &&
1882 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1883 handle_callback(gl, LM_ST_UNLOCKED);
1884 gfs2_glmutex_unlock(gl);
1885 }
1886
1887 gfs2_glock_put(gl);
1888 }
1889
1890 /**
1891 * examine_bucket - Call a function for glock in a hash bucket
1892 * @examiner: the function
1893 * @sdp: the filesystem
1894 * @bucket: the bucket
1895 *
1896 * Returns: 1 if the bucket has entries
1897 */
1898
1899 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1900 unsigned int hash)
1901 {
1902 struct glock_plug plug;
1903 struct list_head *tmp;
1904 struct gfs2_glock *gl;
1905 int entries;
1906
1907 /* Add "plug" to end of bucket list, work back up list from there */
1908 memset(&plug.gl_flags, 0, sizeof(unsigned long));
1909 set_bit(GLF_PLUG, &plug.gl_flags);
1910
1911 write_lock(gl_lock_addr(hash));
1912 list_add(&plug.gl_list, &gl_hash_table[hash].hb_list);
1913 write_unlock(gl_lock_addr(hash));
1914
1915 for (;;) {
1916 write_lock(gl_lock_addr(hash));
1917
1918 for (;;) {
1919 tmp = plug.gl_list.next;
1920
1921 if (tmp == &gl_hash_table[hash].hb_list) {
1922 list_del(&plug.gl_list);
1923 entries = !list_empty(&gl_hash_table[hash].hb_list);
1924 write_unlock(gl_lock_addr(hash));
1925 return entries;
1926 }
1927 gl = list_entry(tmp, struct gfs2_glock, gl_list);
1928
1929 /* Move plug up list */
1930 list_move(&plug.gl_list, &gl->gl_list);
1931
1932 if (test_bit(GLF_PLUG, &gl->gl_flags))
1933 continue;
1934 if (gl->gl_sbd != sdp)
1935 continue;
1936
1937 /* examiner() must glock_put() */
1938 gfs2_glock_hold(gl);
1939
1940 break;
1941 }
1942
1943 write_unlock(gl_lock_addr(hash));
1944
1945 examiner(gl);
1946 }
1947 }
1948
1949 /**
1950 * scan_glock - look at a glock and see if we can reclaim it
1951 * @gl: the glock to look at
1952 *
1953 */
1954
1955 static void scan_glock(struct gfs2_glock *gl)
1956 {
1957 if (gl->gl_ops == &gfs2_inode_glops)
1958 goto out;
1959
1960 if (gfs2_glmutex_trylock(gl)) {
1961 if (queue_empty(gl, &gl->gl_holders) &&
1962 gl->gl_state != LM_ST_UNLOCKED &&
1963 demote_ok(gl))
1964 goto out_schedule;
1965 gfs2_glmutex_unlock(gl);
1966 }
1967 out:
1968 gfs2_glock_put(gl);
1969 return;
1970
1971 out_schedule:
1972 gfs2_glmutex_unlock(gl);
1973 gfs2_glock_schedule_for_reclaim(gl);
1974 gfs2_glock_put(gl);
1975 }
1976
1977 /**
1978 * gfs2_scand_internal - Look for glocks and inodes to toss from memory
1979 * @sdp: the filesystem
1980 *
1981 */
1982
1983 void gfs2_scand_internal(struct gfs2_sbd *sdp)
1984 {
1985 unsigned int x;
1986
1987 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1988 examine_bucket(scan_glock, sdp, x);
1989 }
1990
1991 /**
1992 * clear_glock - look at a glock and see if we can free it from glock cache
1993 * @gl: the glock to look at
1994 *
1995 */
1996
1997 static void clear_glock(struct gfs2_glock *gl)
1998 {
1999 struct gfs2_sbd *sdp = gl->gl_sbd;
2000 int released;
2001
2002 spin_lock(&sdp->sd_reclaim_lock);
2003 if (!list_empty(&gl->gl_reclaim)) {
2004 list_del_init(&gl->gl_reclaim);
2005 atomic_dec(&sdp->sd_reclaim_count);
2006 spin_unlock(&sdp->sd_reclaim_lock);
2007 released = gfs2_glock_put(gl);
2008 gfs2_assert(sdp, !released);
2009 } else {
2010 spin_unlock(&sdp->sd_reclaim_lock);
2011 }
2012
2013 if (gfs2_glmutex_trylock(gl)) {
2014 if (queue_empty(gl, &gl->gl_holders) &&
2015 gl->gl_state != LM_ST_UNLOCKED)
2016 handle_callback(gl, LM_ST_UNLOCKED);
2017
2018 gfs2_glmutex_unlock(gl);
2019 }
2020
2021 gfs2_glock_put(gl);
2022 }
2023
2024 /**
2025 * gfs2_gl_hash_clear - Empty out the glock hash table
2026 * @sdp: the filesystem
2027 * @wait: wait until it's all gone
2028 *
2029 * Called when unmounting the filesystem, or when inter-node lock manager
2030 * requests DROPLOCKS because it is running out of capacity.
2031 */
2032
2033 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
2034 {
2035 unsigned long t;
2036 unsigned int x;
2037 int cont;
2038
2039 t = jiffies;
2040
2041 for (;;) {
2042 cont = 0;
2043
2044 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2045 if (examine_bucket(clear_glock, sdp, x))
2046 cont = 1;
2047
2048 if (!wait || !cont)
2049 break;
2050
2051 if (time_after_eq(jiffies,
2052 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
2053 fs_warn(sdp, "Unmount seems to be stalled. "
2054 "Dumping lock state...\n");
2055 gfs2_dump_lockstate(sdp);
2056 t = jiffies;
2057 }
2058
2059 invalidate_inodes(sdp->sd_vfs);
2060 msleep(10);
2061 }
2062 }
2063
2064 /*
2065 * Diagnostic routines to help debug distributed deadlock
2066 */
2067
2068 /**
2069 * dump_holder - print information about a glock holder
2070 * @str: a string naming the type of holder
2071 * @gh: the glock holder
2072 *
2073 * Returns: 0 on success, -ENOBUFS when we run out of space
2074 */
2075
2076 static int dump_holder(char *str, struct gfs2_holder *gh)
2077 {
2078 unsigned int x;
2079 int error = -ENOBUFS;
2080
2081 printk(KERN_INFO " %s\n", str);
2082 printk(KERN_INFO " owner = %ld\n",
2083 (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
2084 printk(KERN_INFO " gh_state = %u\n", gh->gh_state);
2085 printk(KERN_INFO " gh_flags =");
2086 for (x = 0; x < 32; x++)
2087 if (gh->gh_flags & (1 << x))
2088 printk(" %u", x);
2089 printk(" \n");
2090 printk(KERN_INFO " error = %d\n", gh->gh_error);
2091 printk(KERN_INFO " gh_iflags =");
2092 for (x = 0; x < 32; x++)
2093 if (test_bit(x, &gh->gh_iflags))
2094 printk(" %u", x);
2095 printk(" \n");
2096 print_symbol(KERN_INFO " initialized at: %s\n", gh->gh_ip);
2097
2098 error = 0;
2099
2100 return error;
2101 }
2102
2103 /**
2104 * dump_inode - print information about an inode
2105 * @ip: the inode
2106 *
2107 * Returns: 0 on success, -ENOBUFS when we run out of space
2108 */
2109
2110 static int dump_inode(struct gfs2_inode *ip)
2111 {
2112 unsigned int x;
2113 int error = -ENOBUFS;
2114
2115 printk(KERN_INFO " Inode:\n");
2116 printk(KERN_INFO " num = %llu %llu\n",
2117 (unsigned long long)ip->i_num.no_formal_ino,
2118 (unsigned long long)ip->i_num.no_addr);
2119 printk(KERN_INFO " type = %u\n", IF2DT(ip->i_di.di_mode));
2120 printk(KERN_INFO " i_flags =");
2121 for (x = 0; x < 32; x++)
2122 if (test_bit(x, &ip->i_flags))
2123 printk(" %u", x);
2124 printk(" \n");
2125
2126 error = 0;
2127
2128 return error;
2129 }
2130
2131 /**
2132 * dump_glock - print information about a glock
2133 * @gl: the glock
2134 * @count: where we are in the buffer
2135 *
2136 * Returns: 0 on success, -ENOBUFS when we run out of space
2137 */
2138
2139 static int dump_glock(struct gfs2_glock *gl)
2140 {
2141 struct gfs2_holder *gh;
2142 unsigned int x;
2143 int error = -ENOBUFS;
2144
2145 spin_lock(&gl->gl_spin);
2146
2147 printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
2148 (unsigned long long)gl->gl_name.ln_number);
2149 printk(KERN_INFO " gl_flags =");
2150 for (x = 0; x < 32; x++) {
2151 if (test_bit(x, &gl->gl_flags))
2152 printk(" %u", x);
2153 }
2154 printk(" \n");
2155 printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
2156 printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
2157 printk(KERN_INFO " gl_owner = %s\n", gl->gl_owner->comm);
2158 print_symbol(KERN_INFO " gl_ip = %s\n", gl->gl_ip);
2159 printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
2160 printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
2161 printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
2162 printk(KERN_INFO " object = %s\n", (gl->gl_object) ? "yes" : "no");
2163 printk(KERN_INFO " le = %s\n",
2164 (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
2165 printk(KERN_INFO " reclaim = %s\n",
2166 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
2167 if (gl->gl_aspace)
2168 printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
2169 gl->gl_aspace->i_mapping->nrpages);
2170 else
2171 printk(KERN_INFO " aspace = no\n");
2172 printk(KERN_INFO " ail = %d\n", atomic_read(&gl->gl_ail_count));
2173 if (gl->gl_req_gh) {
2174 error = dump_holder("Request", gl->gl_req_gh);
2175 if (error)
2176 goto out;
2177 }
2178 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
2179 error = dump_holder("Holder", gh);
2180 if (error)
2181 goto out;
2182 }
2183 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
2184 error = dump_holder("Waiter1", gh);
2185 if (error)
2186 goto out;
2187 }
2188 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
2189 error = dump_holder("Waiter2", gh);
2190 if (error)
2191 goto out;
2192 }
2193 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
2194 error = dump_holder("Waiter3", gh);
2195 if (error)
2196 goto out;
2197 }
2198 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
2199 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
2200 list_empty(&gl->gl_holders)) {
2201 error = dump_inode(gl->gl_object);
2202 if (error)
2203 goto out;
2204 } else {
2205 error = -ENOBUFS;
2206 printk(KERN_INFO " Inode: busy\n");
2207 }
2208 }
2209
2210 error = 0;
2211
2212 out:
2213 spin_unlock(&gl->gl_spin);
2214 return error;
2215 }
2216
2217 /**
2218 * gfs2_dump_lockstate - print out the current lockstate
2219 * @sdp: the filesystem
2220 * @ub: the buffer to copy the information into
2221 *
2222 * If @ub is NULL, dump the lockstate to the console.
2223 *
2224 */
2225
2226 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2227 {
2228 struct gfs2_glock *gl;
2229 unsigned int x;
2230 int error = 0;
2231
2232 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2233
2234 read_lock(gl_lock_addr(x));
2235
2236 list_for_each_entry(gl, &gl_hash_table[x].hb_list, gl_list) {
2237 if (test_bit(GLF_PLUG, &gl->gl_flags))
2238 continue;
2239 if (gl->gl_sbd != sdp)
2240 continue;
2241
2242 error = dump_glock(gl);
2243 if (error)
2244 break;
2245 }
2246
2247 read_unlock(gl_lock_addr(x));
2248
2249 if (error)
2250 break;
2251 }
2252
2253
2254 return error;
2255 }
2256
2257 int __init gfs2_glock_init(void)
2258 {
2259 unsigned i;
2260 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2261 INIT_LIST_HEAD(&gl_hash_table[i].hb_list);
2262 }
2263 #ifdef GL_HASH_LOCK_SZ
2264 for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2265 rwlock_init(&gl_hash_locks[i]);
2266 }
2267 #endif
2268 return 0;
2269 }
2270