[GFS2] Remove semaphore.h from C files
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / gfs2 / glock.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/delay.h>
16#include <linux/sort.h>
17#include <linux/jhash.h>
18#include <linux/kref.h>
d0dc80db 19#include <linux/kallsyms.h>
5c676f6d 20#include <linux/gfs2_ondisk.h>
b3b94faa
DT
21#include <asm/uaccess.h>
22
23#include "gfs2.h"
5c676f6d
SW
24#include "lm_interface.h"
25#include "incore.h"
b3b94faa
DT
26#include "glock.h"
27#include "glops.h"
28#include "inode.h"
29#include "lm.h"
30#include "lops.h"
31#include "meta_io.h"
32#include "quota.h"
33#include "super.h"
5c676f6d 34#include "util.h"
b3b94faa
DT
35
36/* Must be kept in sync with the beginning of struct gfs2_glock */
37struct glock_plug {
38 struct list_head gl_list;
39 unsigned long gl_flags;
40};
41
42struct greedy {
43 struct gfs2_holder gr_gh;
44 struct work_struct gr_work;
45};
46
47typedef void (*glock_examiner) (struct gfs2_glock * gl);
48
08bc2dbc
AB
49static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
50
b3b94faa
DT
51/**
52 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
53 * @actual: the current state of the lock
54 * @requested: the lock state that was requested by the caller
55 * @flags: the modifier flags passed in by the caller
56 *
57 * Returns: 1 if the locks are compatible, 0 otherwise
58 */
59
60static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
61 int flags)
62{
63 if (actual == requested)
64 return 1;
65
66 if (flags & GL_EXACT)
67 return 0;
68
69 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
70 return 1;
71
72 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
73 return 1;
74
75 return 0;
76}
77
78/**
79 * gl_hash() - Turn glock number into hash bucket number
80 * @lock: The glock number
81 *
82 * Returns: The number of the corresponding hash bucket
83 */
84
85static unsigned int gl_hash(struct lm_lockname *name)
86{
87 unsigned int h;
88
89 h = jhash(&name->ln_number, sizeof(uint64_t), 0);
90 h = jhash(&name->ln_type, sizeof(unsigned int), h);
91 h &= GFS2_GL_HASH_MASK;
92
93 return h;
94}
95
96/**
97 * glock_free() - Perform a few checks and then release struct gfs2_glock
98 * @gl: The glock to release
99 *
100 * Also calls lock module to release its internal structure for this glock.
101 *
102 */
103
104static void glock_free(struct gfs2_glock *gl)
105{
106 struct gfs2_sbd *sdp = gl->gl_sbd;
107 struct inode *aspace = gl->gl_aspace;
108
109 gfs2_lm_put_lock(sdp, gl->gl_lock);
110
111 if (aspace)
112 gfs2_aspace_put(aspace);
113
114 kmem_cache_free(gfs2_glock_cachep, gl);
b3b94faa
DT
115}
116
117/**
118 * gfs2_glock_hold() - increment reference count on glock
119 * @gl: The glock to hold
120 *
121 */
122
123void gfs2_glock_hold(struct gfs2_glock *gl)
124{
125 kref_get(&gl->gl_ref);
126}
127
128/* All work is done after the return from kref_put() so we
129 can release the write_lock before the free. */
130
131static void kill_glock(struct kref *kref)
132{
133 struct gfs2_glock *gl = container_of(kref, struct gfs2_glock, gl_ref);
134 struct gfs2_sbd *sdp = gl->gl_sbd;
135
136 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
137 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
138 gfs2_assert(sdp, list_empty(&gl->gl_holders));
139 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
140 gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
141 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
142}
143
144/**
145 * gfs2_glock_put() - Decrement reference count on glock
146 * @gl: The glock to put
147 *
148 */
149
150int gfs2_glock_put(struct gfs2_glock *gl)
151{
152 struct gfs2_sbd *sdp = gl->gl_sbd;
153 struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket;
154 int rv = 0;
155
f55ab26a 156 mutex_lock(&sdp->sd_invalidate_inodes_mutex);
b3b94faa
DT
157
158 write_lock(&bucket->hb_lock);
159 if (kref_put(&gl->gl_ref, kill_glock)) {
160 list_del_init(&gl->gl_list);
161 write_unlock(&bucket->hb_lock);
190562bd 162 BUG_ON(spin_is_locked(&gl->gl_spin));
b3b94faa
DT
163 glock_free(gl);
164 rv = 1;
165 goto out;
166 }
167 write_unlock(&bucket->hb_lock);
168 out:
f55ab26a 169 mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
b3b94faa
DT
170 return rv;
171}
172
173/**
174 * queue_empty - check to see if a glock's queue is empty
175 * @gl: the glock
176 * @head: the head of the queue to check
177 *
178 * This function protects the list in the event that a process already
179 * has a holder on the list and is adding a second holder for itself.
180 * The glmutex lock is what generally prevents processes from working
181 * on the same glock at once, but the special case of adding a second
182 * holder for yourself ("recursive" locking) doesn't involve locking
183 * glmutex, making the spin lock necessary.
184 *
185 * Returns: 1 if the queue is empty
186 */
187
188static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
189{
190 int empty;
191 spin_lock(&gl->gl_spin);
192 empty = list_empty(head);
193 spin_unlock(&gl->gl_spin);
194 return empty;
195}
196
197/**
198 * search_bucket() - Find struct gfs2_glock by lock number
199 * @bucket: the bucket to search
200 * @name: The lock name
201 *
202 * Returns: NULL, or the struct gfs2_glock with the requested number
203 */
204
205static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket,
206 struct lm_lockname *name)
207{
208 struct gfs2_glock *gl;
209
210 list_for_each_entry(gl, &bucket->hb_list, gl_list) {
211 if (test_bit(GLF_PLUG, &gl->gl_flags))
212 continue;
213 if (!lm_name_equal(&gl->gl_name, name))
214 continue;
215
216 kref_get(&gl->gl_ref);
217
218 return gl;
219 }
220
221 return NULL;
222}
223
224/**
225 * gfs2_glock_find() - Find glock by lock number
226 * @sdp: The GFS2 superblock
227 * @name: The lock name
228 *
229 * Returns: NULL, or the struct gfs2_glock with the requested number
230 */
231
08bc2dbc
AB
232static struct gfs2_glock *gfs2_glock_find(struct gfs2_sbd *sdp,
233 struct lm_lockname *name)
b3b94faa
DT
234{
235 struct gfs2_gl_hash_bucket *bucket = &sdp->sd_gl_hash[gl_hash(name)];
236 struct gfs2_glock *gl;
237
238 read_lock(&bucket->hb_lock);
239 gl = search_bucket(bucket, name);
240 read_unlock(&bucket->hb_lock);
241
242 return gl;
243}
244
245/**
246 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
247 * @sdp: The GFS2 superblock
248 * @number: the lock number
249 * @glops: The glock_operations to use
250 * @create: If 0, don't create the glock if it doesn't exist
251 * @glp: the glock is returned here
252 *
253 * This does not lock a glock, just finds/creates structures for one.
254 *
255 * Returns: errno
256 */
257
258int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
259 struct gfs2_glock_operations *glops, int create,
260 struct gfs2_glock **glp)
261{
262 struct lm_lockname name;
263 struct gfs2_glock *gl, *tmp;
264 struct gfs2_gl_hash_bucket *bucket;
265 int error;
266
267 name.ln_number = number;
268 name.ln_type = glops->go_type;
269 bucket = &sdp->sd_gl_hash[gl_hash(&name)];
270
271 read_lock(&bucket->hb_lock);
272 gl = search_bucket(bucket, &name);
273 read_unlock(&bucket->hb_lock);
274
275 if (gl || !create) {
276 *glp = gl;
277 return 0;
278 }
279
280 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
281 if (!gl)
282 return -ENOMEM;
283
284 memset(gl, 0, sizeof(struct gfs2_glock));
285
286 INIT_LIST_HEAD(&gl->gl_list);
287 gl->gl_name = name;
288 kref_init(&gl->gl_ref);
289
290 spin_lock_init(&gl->gl_spin);
291
292 gl->gl_state = LM_ST_UNLOCKED;
293 INIT_LIST_HEAD(&gl->gl_holders);
294 INIT_LIST_HEAD(&gl->gl_waiters1);
295 INIT_LIST_HEAD(&gl->gl_waiters2);
296 INIT_LIST_HEAD(&gl->gl_waiters3);
297
298 gl->gl_ops = glops;
299
300 gl->gl_bucket = bucket;
301 INIT_LIST_HEAD(&gl->gl_reclaim);
302
303 gl->gl_sbd = sdp;
304
305 lops_init_le(&gl->gl_le, &gfs2_glock_lops);
306 INIT_LIST_HEAD(&gl->gl_ail_list);
307
308 /* If this glock protects actual on-disk data or metadata blocks,
309 create a VFS inode to manage the pages/buffers holding them. */
310 if (glops == &gfs2_inode_glops ||
311 glops == &gfs2_rgrp_glops ||
312 glops == &gfs2_meta_glops) {
313 gl->gl_aspace = gfs2_aspace_get(sdp);
314 if (!gl->gl_aspace) {
315 error = -ENOMEM;
316 goto fail;
317 }
318 }
319
320 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
321 if (error)
322 goto fail_aspace;
323
b3b94faa
DT
324 write_lock(&bucket->hb_lock);
325 tmp = search_bucket(bucket, &name);
326 if (tmp) {
327 write_unlock(&bucket->hb_lock);
328 glock_free(gl);
329 gl = tmp;
330 } else {
331 list_add_tail(&gl->gl_list, &bucket->hb_list);
332 write_unlock(&bucket->hb_lock);
333 }
334
335 *glp = gl;
336
337 return 0;
338
339 fail_aspace:
340 if (gl->gl_aspace)
341 gfs2_aspace_put(gl->gl_aspace);
342
343 fail:
344 kmem_cache_free(gfs2_glock_cachep, gl);
345
346 return error;
347}
348
349/**
350 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
351 * @gl: the glock
352 * @state: the state we're requesting
353 * @flags: the modifier flags
354 * @gh: the holder structure
355 *
356 */
357
190562bd 358void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
b3b94faa
DT
359 struct gfs2_holder *gh)
360{
361 INIT_LIST_HEAD(&gh->gh_list);
362 gh->gh_gl = gl;
d0dc80db 363 gh->gh_ip = (unsigned long)__builtin_return_address(0);
190562bd 364 gh->gh_owner = current;
b3b94faa
DT
365 gh->gh_state = state;
366 gh->gh_flags = flags;
367 gh->gh_error = 0;
368 gh->gh_iflags = 0;
369 init_completion(&gh->gh_wait);
370
371 if (gh->gh_state == LM_ST_EXCLUSIVE)
372 gh->gh_flags |= GL_LOCAL_EXCL;
373
374 gfs2_glock_hold(gl);
375}
376
377/**
378 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
379 * @state: the state we're requesting
380 * @flags: the modifier flags
381 * @gh: the holder structure
382 *
383 * Don't mess with the glock.
384 *
385 */
386
190562bd 387void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
b3b94faa
DT
388{
389 gh->gh_state = state;
579b78a4 390 gh->gh_flags = flags;
b3b94faa
DT
391 if (gh->gh_state == LM_ST_EXCLUSIVE)
392 gh->gh_flags |= GL_LOCAL_EXCL;
393
394 gh->gh_iflags &= 1 << HIF_ALLOCED;
d0dc80db 395 gh->gh_ip = (unsigned long)__builtin_return_address(0);
b3b94faa
DT
396}
397
398/**
399 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
400 * @gh: the holder structure
401 *
402 */
403
404void gfs2_holder_uninit(struct gfs2_holder *gh)
405{
406 gfs2_glock_put(gh->gh_gl);
407 gh->gh_gl = NULL;
d0dc80db 408 gh->gh_ip = 0;
b3b94faa
DT
409}
410
411/**
412 * gfs2_holder_get - get a struct gfs2_holder structure
413 * @gl: the glock
414 * @state: the state we're requesting
415 * @flags: the modifier flags
416 * @gfp_flags: __GFP_NOFAIL
417 *
418 * Figure out how big an impact this function has. Either:
419 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
420 * 2) Leave it like it is
421 *
422 * Returns: the holder structure, NULL on ENOMEM
423 */
424
08bc2dbc
AB
425static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl,
426 unsigned int state,
427 int flags, gfp_t gfp_flags)
b3b94faa
DT
428{
429 struct gfs2_holder *gh;
430
431 gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
432 if (!gh)
433 return NULL;
434
435 gfs2_holder_init(gl, state, flags, gh);
436 set_bit(HIF_ALLOCED, &gh->gh_iflags);
d0dc80db 437 gh->gh_ip = (unsigned long)__builtin_return_address(0);
b3b94faa
DT
438 return gh;
439}
440
441/**
442 * gfs2_holder_put - get rid of a struct gfs2_holder structure
443 * @gh: the holder structure
444 *
445 */
446
08bc2dbc 447static void gfs2_holder_put(struct gfs2_holder *gh)
b3b94faa
DT
448{
449 gfs2_holder_uninit(gh);
450 kfree(gh);
451}
452
b3b94faa
DT
453/**
454 * rq_mutex - process a mutex request in the queue
455 * @gh: the glock holder
456 *
457 * Returns: 1 if the queue is blocked
458 */
459
460static int rq_mutex(struct gfs2_holder *gh)
461{
462 struct gfs2_glock *gl = gh->gh_gl;
463
464 list_del_init(&gh->gh_list);
465 /* gh->gh_error never examined. */
466 set_bit(GLF_LOCK, &gl->gl_flags);
467 complete(&gh->gh_wait);
468
469 return 1;
470}
471
472/**
473 * rq_promote - process a promote request in the queue
474 * @gh: the glock holder
475 *
476 * Acquire a new inter-node lock, or change a lock state to more restrictive.
477 *
478 * Returns: 1 if the queue is blocked
479 */
480
481static int rq_promote(struct gfs2_holder *gh)
482{
483 struct gfs2_glock *gl = gh->gh_gl;
484 struct gfs2_sbd *sdp = gl->gl_sbd;
485 struct gfs2_glock_operations *glops = gl->gl_ops;
b3b94faa
DT
486
487 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
488 if (list_empty(&gl->gl_holders)) {
489 gl->gl_req_gh = gh;
490 set_bit(GLF_LOCK, &gl->gl_flags);
491 spin_unlock(&gl->gl_spin);
492
493 if (atomic_read(&sdp->sd_reclaim_count) >
494 gfs2_tune_get(sdp, gt_reclaim_limit) &&
495 !(gh->gh_flags & LM_FLAG_PRIORITY)) {
496 gfs2_reclaim_glock(sdp);
497 gfs2_reclaim_glock(sdp);
498 }
499
500 glops->go_xmote_th(gl, gh->gh_state,
501 gh->gh_flags);
502
503 spin_lock(&gl->gl_spin);
504 }
505 return 1;
506 }
507
508 if (list_empty(&gl->gl_holders)) {
509 set_bit(HIF_FIRST, &gh->gh_iflags);
510 set_bit(GLF_LOCK, &gl->gl_flags);
b3b94faa
DT
511 } else {
512 struct gfs2_holder *next_gh;
513 if (gh->gh_flags & GL_LOCAL_EXCL)
514 return 1;
515 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
516 gh_list);
517 if (next_gh->gh_flags & GL_LOCAL_EXCL)
518 return 1;
b3b94faa
DT
519 }
520
521 list_move_tail(&gh->gh_list, &gl->gl_holders);
522 gh->gh_error = 0;
523 set_bit(HIF_HOLDER, &gh->gh_iflags);
524
b3b94faa
DT
525 complete(&gh->gh_wait);
526
527 return 0;
528}
529
530/**
531 * rq_demote - process a demote request in the queue
532 * @gh: the glock holder
533 *
534 * Returns: 1 if the queue is blocked
535 */
536
537static int rq_demote(struct gfs2_holder *gh)
538{
539 struct gfs2_glock *gl = gh->gh_gl;
540 struct gfs2_glock_operations *glops = gl->gl_ops;
541
542 if (!list_empty(&gl->gl_holders))
543 return 1;
544
545 if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
546 list_del_init(&gh->gh_list);
547 gh->gh_error = 0;
548 spin_unlock(&gl->gl_spin);
549 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
550 gfs2_holder_put(gh);
551 else
552 complete(&gh->gh_wait);
553 spin_lock(&gl->gl_spin);
554 } else {
555 gl->gl_req_gh = gh;
556 set_bit(GLF_LOCK, &gl->gl_flags);
557 spin_unlock(&gl->gl_spin);
558
559 if (gh->gh_state == LM_ST_UNLOCKED ||
560 gl->gl_state != LM_ST_EXCLUSIVE)
561 glops->go_drop_th(gl);
562 else
563 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
564
565 spin_lock(&gl->gl_spin);
566 }
567
568 return 0;
569}
570
571/**
572 * rq_greedy - process a queued request to drop greedy status
573 * @gh: the glock holder
574 *
575 * Returns: 1 if the queue is blocked
576 */
577
578static int rq_greedy(struct gfs2_holder *gh)
579{
580 struct gfs2_glock *gl = gh->gh_gl;
581
582 list_del_init(&gh->gh_list);
583 /* gh->gh_error never examined. */
584 clear_bit(GLF_GREEDY, &gl->gl_flags);
585 spin_unlock(&gl->gl_spin);
586
587 gfs2_holder_uninit(gh);
588 kfree(container_of(gh, struct greedy, gr_gh));
589
590 spin_lock(&gl->gl_spin);
591
592 return 0;
593}
594
595/**
596 * run_queue - process holder structures on a glock
597 * @gl: the glock
598 *
599 */
b3b94faa
DT
600static void run_queue(struct gfs2_glock *gl)
601{
602 struct gfs2_holder *gh;
603 int blocked = 1;
604
605 for (;;) {
606 if (test_bit(GLF_LOCK, &gl->gl_flags))
607 break;
608
609 if (!list_empty(&gl->gl_waiters1)) {
610 gh = list_entry(gl->gl_waiters1.next,
611 struct gfs2_holder, gh_list);
612
613 if (test_bit(HIF_MUTEX, &gh->gh_iflags))
614 blocked = rq_mutex(gh);
615 else
616 gfs2_assert_warn(gl->gl_sbd, 0);
617
618 } else if (!list_empty(&gl->gl_waiters2) &&
619 !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
620 gh = list_entry(gl->gl_waiters2.next,
621 struct gfs2_holder, gh_list);
622
623 if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
624 blocked = rq_demote(gh);
625 else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
626 blocked = rq_greedy(gh);
627 else
628 gfs2_assert_warn(gl->gl_sbd, 0);
629
630 } else if (!list_empty(&gl->gl_waiters3)) {
631 gh = list_entry(gl->gl_waiters3.next,
632 struct gfs2_holder, gh_list);
633
634 if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
635 blocked = rq_promote(gh);
636 else
637 gfs2_assert_warn(gl->gl_sbd, 0);
638
639 } else
640 break;
641
642 if (blocked)
643 break;
644 }
645}
646
647/**
648 * gfs2_glmutex_lock - acquire a local lock on a glock
649 * @gl: the glock
650 *
651 * Gives caller exclusive access to manipulate a glock structure.
652 */
653
654void gfs2_glmutex_lock(struct gfs2_glock *gl)
655{
656 struct gfs2_holder gh;
657
658 gfs2_holder_init(gl, 0, 0, &gh);
659 set_bit(HIF_MUTEX, &gh.gh_iflags);
660
661 spin_lock(&gl->gl_spin);
662 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
663 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
664 else
665 complete(&gh.gh_wait);
666 spin_unlock(&gl->gl_spin);
667
668 wait_for_completion(&gh.gh_wait);
669 gfs2_holder_uninit(&gh);
670}
671
672/**
673 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
674 * @gl: the glock
675 *
676 * Returns: 1 if the glock is acquired
677 */
678
08bc2dbc 679static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
b3b94faa
DT
680{
681 int acquired = 1;
682
683 spin_lock(&gl->gl_spin);
684 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
685 acquired = 0;
686 spin_unlock(&gl->gl_spin);
687
688 return acquired;
689}
690
691/**
692 * gfs2_glmutex_unlock - release a local lock on a glock
693 * @gl: the glock
694 *
695 */
696
697void gfs2_glmutex_unlock(struct gfs2_glock *gl)
698{
699 spin_lock(&gl->gl_spin);
700 clear_bit(GLF_LOCK, &gl->gl_flags);
701 run_queue(gl);
190562bd 702 BUG_ON(!spin_is_locked(&gl->gl_spin));
b3b94faa
DT
703 spin_unlock(&gl->gl_spin);
704}
705
706/**
707 * handle_callback - add a demote request to a lock's queue
708 * @gl: the glock
709 * @state: the state the caller wants us to change to
710 *
711 */
712
713static void handle_callback(struct gfs2_glock *gl, unsigned int state)
714{
715 struct gfs2_holder *gh, *new_gh = NULL;
716
717 restart:
718 spin_lock(&gl->gl_spin);
719
720 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
721 if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
722 gl->gl_req_gh != gh) {
723 if (gh->gh_state != state)
724 gh->gh_state = LM_ST_UNLOCKED;
725 goto out;
726 }
727 }
728
729 if (new_gh) {
730 list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
731 new_gh = NULL;
732 } else {
733 spin_unlock(&gl->gl_spin);
734
579b78a4 735 new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY,
b3b94faa
DT
736 GFP_KERNEL | __GFP_NOFAIL),
737 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
738 set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
739
740 goto restart;
741 }
742
743 out:
744 spin_unlock(&gl->gl_spin);
745
746 if (new_gh)
747 gfs2_holder_put(new_gh);
748}
749
750/**
751 * state_change - record that the glock is now in a different state
752 * @gl: the glock
753 * @new_state the new state
754 *
755 */
756
757static void state_change(struct gfs2_glock *gl, unsigned int new_state)
758{
b3b94faa
DT
759 int held1, held2;
760
761 held1 = (gl->gl_state != LM_ST_UNLOCKED);
762 held2 = (new_state != LM_ST_UNLOCKED);
763
764 if (held1 != held2) {
6a6b3d01 765 if (held2)
b3b94faa 766 gfs2_glock_hold(gl);
6a6b3d01 767 else
b3b94faa 768 gfs2_glock_put(gl);
b3b94faa
DT
769 }
770
771 gl->gl_state = new_state;
772}
773
774/**
775 * xmote_bh - Called after the lock module is done acquiring a lock
776 * @gl: The glock in question
777 * @ret: the int returned from the lock module
778 *
779 */
780
781static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
782{
783 struct gfs2_sbd *sdp = gl->gl_sbd;
784 struct gfs2_glock_operations *glops = gl->gl_ops;
785 struct gfs2_holder *gh = gl->gl_req_gh;
786 int prev_state = gl->gl_state;
787 int op_done = 1;
788
789 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
790 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
791 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
792
793 state_change(gl, ret & LM_OUT_ST_MASK);
794
795 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
796 if (glops->go_inval)
797 glops->go_inval(gl, DIO_METADATA | DIO_DATA);
798 } else if (gl->gl_state == LM_ST_DEFERRED) {
799 /* We might not want to do this here.
800 Look at moving to the inode glops. */
801 if (glops->go_inval)
802 glops->go_inval(gl, DIO_DATA);
803 }
804
805 /* Deal with each possible exit condition */
806
807 if (!gh)
808 gl->gl_stamp = jiffies;
809
810 else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
811 spin_lock(&gl->gl_spin);
812 list_del_init(&gh->gh_list);
813 gh->gh_error = -EIO;
b3b94faa
DT
814 spin_unlock(&gl->gl_spin);
815
816 } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
817 spin_lock(&gl->gl_spin);
818 list_del_init(&gh->gh_list);
819 if (gl->gl_state == gh->gh_state ||
820 gl->gl_state == LM_ST_UNLOCKED)
821 gh->gh_error = 0;
822 else {
823 if (gfs2_assert_warn(sdp, gh->gh_flags &
824 (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
825 fs_warn(sdp, "ret = 0x%.8X\n", ret);
826 gh->gh_error = GLR_TRYFAILED;
827 }
828 spin_unlock(&gl->gl_spin);
829
830 if (ret & LM_OUT_CANCELED)
831 handle_callback(gl, LM_ST_UNLOCKED); /* Lame */
832
833 } else if (ret & LM_OUT_CANCELED) {
834 spin_lock(&gl->gl_spin);
835 list_del_init(&gh->gh_list);
836 gh->gh_error = GLR_CANCELED;
b3b94faa
DT
837 spin_unlock(&gl->gl_spin);
838
839 } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
840 spin_lock(&gl->gl_spin);
841 list_move_tail(&gh->gh_list, &gl->gl_holders);
842 gh->gh_error = 0;
843 set_bit(HIF_HOLDER, &gh->gh_iflags);
844 spin_unlock(&gl->gl_spin);
845
846 set_bit(HIF_FIRST, &gh->gh_iflags);
847
848 op_done = 0;
849
850 } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
851 spin_lock(&gl->gl_spin);
852 list_del_init(&gh->gh_list);
853 gh->gh_error = GLR_TRYFAILED;
b3b94faa
DT
854 spin_unlock(&gl->gl_spin);
855
856 } else {
857 if (gfs2_assert_withdraw(sdp, 0) == -1)
858 fs_err(sdp, "ret = 0x%.8X\n", ret);
859 }
860
861 if (glops->go_xmote_bh)
862 glops->go_xmote_bh(gl);
863
864 if (op_done) {
865 spin_lock(&gl->gl_spin);
866 gl->gl_req_gh = NULL;
867 gl->gl_req_bh = NULL;
868 clear_bit(GLF_LOCK, &gl->gl_flags);
869 run_queue(gl);
870 spin_unlock(&gl->gl_spin);
871 }
872
873 gfs2_glock_put(gl);
874
875 if (gh) {
876 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
877 gfs2_holder_put(gh);
878 else
879 complete(&gh->gh_wait);
880 }
881}
882
883/**
884 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
885 * @gl: The glock in question
886 * @state: the requested state
887 * @flags: modifier flags to the lock call
888 *
889 */
890
891void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
892{
893 struct gfs2_sbd *sdp = gl->gl_sbd;
894 struct gfs2_glock_operations *glops = gl->gl_ops;
895 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
896 LM_FLAG_NOEXP | LM_FLAG_ANY |
897 LM_FLAG_PRIORITY);
898 unsigned int lck_ret;
899
900 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
901 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
902 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
903 gfs2_assert_warn(sdp, state != gl->gl_state);
904
905 if (gl->gl_state == LM_ST_EXCLUSIVE) {
906 if (glops->go_sync)
907 glops->go_sync(gl,
908 DIO_METADATA | DIO_DATA | DIO_RELEASE);
909 }
910
911 gfs2_glock_hold(gl);
912 gl->gl_req_bh = xmote_bh;
913
b3b94faa
DT
914 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state,
915 lck_flags);
916
917 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
918 return;
919
920 if (lck_ret & LM_OUT_ASYNC)
921 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
922 else
923 xmote_bh(gl, lck_ret);
924}
925
926/**
927 * drop_bh - Called after a lock module unlock completes
928 * @gl: the glock
929 * @ret: the return status
930 *
931 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
932 * Doesn't drop the reference on the glock the top half took out
933 *
934 */
935
936static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
937{
938 struct gfs2_sbd *sdp = gl->gl_sbd;
939 struct gfs2_glock_operations *glops = gl->gl_ops;
940 struct gfs2_holder *gh = gl->gl_req_gh;
941
942 clear_bit(GLF_PREFETCH, &gl->gl_flags);
943
944 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
945 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
946 gfs2_assert_warn(sdp, !ret);
947
948 state_change(gl, LM_ST_UNLOCKED);
949
950 if (glops->go_inval)
951 glops->go_inval(gl, DIO_METADATA | DIO_DATA);
952
953 if (gh) {
954 spin_lock(&gl->gl_spin);
955 list_del_init(&gh->gh_list);
956 gh->gh_error = 0;
957 spin_unlock(&gl->gl_spin);
958 }
959
960 if (glops->go_drop_bh)
961 glops->go_drop_bh(gl);
962
963 spin_lock(&gl->gl_spin);
964 gl->gl_req_gh = NULL;
965 gl->gl_req_bh = NULL;
966 clear_bit(GLF_LOCK, &gl->gl_flags);
967 run_queue(gl);
968 spin_unlock(&gl->gl_spin);
969
970 gfs2_glock_put(gl);
971
972 if (gh) {
973 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
974 gfs2_holder_put(gh);
975 else
976 complete(&gh->gh_wait);
977 }
978}
979
980/**
981 * gfs2_glock_drop_th - call into the lock module to unlock a lock
982 * @gl: the glock
983 *
984 */
985
986void gfs2_glock_drop_th(struct gfs2_glock *gl)
987{
988 struct gfs2_sbd *sdp = gl->gl_sbd;
989 struct gfs2_glock_operations *glops = gl->gl_ops;
990 unsigned int ret;
991
992 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
993 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
994 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
995
996 if (gl->gl_state == LM_ST_EXCLUSIVE) {
997 if (glops->go_sync)
998 glops->go_sync(gl,
999 DIO_METADATA | DIO_DATA | DIO_RELEASE);
1000 }
1001
1002 gfs2_glock_hold(gl);
1003 gl->gl_req_bh = drop_bh;
1004
b3b94faa
DT
1005 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
1006
1007 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
1008 return;
1009
1010 if (!ret)
1011 drop_bh(gl, ret);
1012 else
1013 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
1014}
1015
1016/**
1017 * do_cancels - cancel requests for locks stuck waiting on an expire flag
1018 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1019 *
1020 * Don't cancel GL_NOCANCEL requests.
1021 */
1022
1023static void do_cancels(struct gfs2_holder *gh)
1024{
1025 struct gfs2_glock *gl = gh->gh_gl;
1026
1027 spin_lock(&gl->gl_spin);
1028
1029 while (gl->gl_req_gh != gh &&
1030 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1031 !list_empty(&gh->gh_list)) {
1032 if (gl->gl_req_bh &&
1033 !(gl->gl_req_gh &&
1034 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
1035 spin_unlock(&gl->gl_spin);
1036 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
1037 msleep(100);
1038 spin_lock(&gl->gl_spin);
1039 } else {
1040 spin_unlock(&gl->gl_spin);
1041 msleep(100);
1042 spin_lock(&gl->gl_spin);
1043 }
1044 }
1045
1046 spin_unlock(&gl->gl_spin);
1047}
1048
1049/**
1050 * glock_wait_internal - wait on a glock acquisition
1051 * @gh: the glock holder
1052 *
1053 * Returns: 0 on success
1054 */
1055
1056static int glock_wait_internal(struct gfs2_holder *gh)
1057{
1058 struct gfs2_glock *gl = gh->gh_gl;
1059 struct gfs2_sbd *sdp = gl->gl_sbd;
1060 struct gfs2_glock_operations *glops = gl->gl_ops;
1061
1062 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1063 return -EIO;
1064
1065 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1066 spin_lock(&gl->gl_spin);
1067 if (gl->gl_req_gh != gh &&
1068 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1069 !list_empty(&gh->gh_list)) {
1070 list_del_init(&gh->gh_list);
1071 gh->gh_error = GLR_TRYFAILED;
b3b94faa
DT
1072 run_queue(gl);
1073 spin_unlock(&gl->gl_spin);
1074 return gh->gh_error;
1075 }
1076 spin_unlock(&gl->gl_spin);
1077 }
1078
1079 if (gh->gh_flags & LM_FLAG_PRIORITY)
1080 do_cancels(gh);
1081
1082 wait_for_completion(&gh->gh_wait);
1083
1084 if (gh->gh_error)
1085 return gh->gh_error;
1086
1087 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1088 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state,
1089 gh->gh_state,
1090 gh->gh_flags));
1091
1092 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1093 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1094
1095 if (glops->go_lock) {
1096 gh->gh_error = glops->go_lock(gh);
1097 if (gh->gh_error) {
1098 spin_lock(&gl->gl_spin);
1099 list_del_init(&gh->gh_list);
b3b94faa
DT
1100 spin_unlock(&gl->gl_spin);
1101 }
1102 }
1103
1104 spin_lock(&gl->gl_spin);
1105 gl->gl_req_gh = NULL;
1106 gl->gl_req_bh = NULL;
1107 clear_bit(GLF_LOCK, &gl->gl_flags);
b3b94faa
DT
1108 run_queue(gl);
1109 spin_unlock(&gl->gl_spin);
1110 }
1111
1112 return gh->gh_error;
1113}
1114
1115static inline struct gfs2_holder *
1116find_holder_by_owner(struct list_head *head, struct task_struct *owner)
1117{
1118 struct gfs2_holder *gh;
1119
1120 list_for_each_entry(gh, head, gh_list) {
1121 if (gh->gh_owner == owner)
1122 return gh;
1123 }
1124
1125 return NULL;
1126}
1127
b3b94faa
DT
1128/**
1129 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1130 * @gh: the holder structure to add
1131 *
1132 */
1133
1134static void add_to_queue(struct gfs2_holder *gh)
1135{
1136 struct gfs2_glock *gl = gh->gh_gl;
1137 struct gfs2_holder *existing;
1138
190562bd
SW
1139 BUG_ON(!gh->gh_owner);
1140
b3b94faa
DT
1141 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1142 if (existing) {
5965b1f4
SW
1143 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1144 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1145 BUG();
b3b94faa
DT
1146 }
1147
1148 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
1149 if (existing) {
5965b1f4
SW
1150 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1151 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1152 BUG();
b3b94faa
DT
1153 }
1154
b3b94faa
DT
1155 if (gh->gh_flags & LM_FLAG_PRIORITY)
1156 list_add(&gh->gh_list, &gl->gl_waiters3);
1157 else
1158 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1159}
1160
1161/**
1162 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1163 * @gh: the holder structure
1164 *
1165 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1166 *
1167 * Returns: 0, GLR_TRYFAILED, or errno on failure
1168 */
1169
1170int gfs2_glock_nq(struct gfs2_holder *gh)
1171{
1172 struct gfs2_glock *gl = gh->gh_gl;
1173 struct gfs2_sbd *sdp = gl->gl_sbd;
1174 int error = 0;
1175
b3b94faa
DT
1176 restart:
1177 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1178 set_bit(HIF_ABORTED, &gh->gh_iflags);
1179 return -EIO;
1180 }
1181
1182 set_bit(HIF_PROMOTE, &gh->gh_iflags);
1183
1184 spin_lock(&gl->gl_spin);
1185 add_to_queue(gh);
1186 run_queue(gl);
1187 spin_unlock(&gl->gl_spin);
1188
1189 if (!(gh->gh_flags & GL_ASYNC)) {
1190 error = glock_wait_internal(gh);
1191 if (error == GLR_CANCELED) {
190562bd 1192 msleep(100);
b3b94faa
DT
1193 goto restart;
1194 }
1195 }
1196
1197 clear_bit(GLF_PREFETCH, &gl->gl_flags);
1198
1199 return error;
1200}
1201
1202/**
1203 * gfs2_glock_poll - poll to see if an async request has been completed
1204 * @gh: the holder
1205 *
1206 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1207 */
1208
1209int gfs2_glock_poll(struct gfs2_holder *gh)
1210{
1211 struct gfs2_glock *gl = gh->gh_gl;
1212 int ready = 0;
1213
1214 spin_lock(&gl->gl_spin);
1215
1216 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1217 ready = 1;
1218 else if (list_empty(&gh->gh_list)) {
1219 if (gh->gh_error == GLR_CANCELED) {
1220 spin_unlock(&gl->gl_spin);
190562bd 1221 msleep(100);
b3b94faa
DT
1222 if (gfs2_glock_nq(gh))
1223 return 1;
1224 return 0;
1225 } else
1226 ready = 1;
1227 }
1228
1229 spin_unlock(&gl->gl_spin);
1230
1231 return ready;
1232}
1233
1234/**
1235 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1236 * @gh: the holder structure
1237 *
1238 * Returns: 0, GLR_TRYFAILED, or errno on failure
1239 */
1240
1241int gfs2_glock_wait(struct gfs2_holder *gh)
1242{
1243 int error;
1244
1245 error = glock_wait_internal(gh);
1246 if (error == GLR_CANCELED) {
190562bd 1247 msleep(100);
b3b94faa
DT
1248 gh->gh_flags &= ~GL_ASYNC;
1249 error = gfs2_glock_nq(gh);
1250 }
1251
1252 return error;
1253}
1254
1255/**
1256 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1257 * @gh: the glock holder
1258 *
1259 */
1260
1261void gfs2_glock_dq(struct gfs2_holder *gh)
1262{
1263 struct gfs2_glock *gl = gh->gh_gl;
b3b94faa
DT
1264 struct gfs2_glock_operations *glops = gl->gl_ops;
1265
b3b94faa
DT
1266 if (gh->gh_flags & GL_SYNC)
1267 set_bit(GLF_SYNC, &gl->gl_flags);
1268
1269 if (gh->gh_flags & GL_NOCACHE)
1270 handle_callback(gl, LM_ST_UNLOCKED);
1271
1272 gfs2_glmutex_lock(gl);
1273
1274 spin_lock(&gl->gl_spin);
1275 list_del_init(&gh->gh_list);
1276
1277 if (list_empty(&gl->gl_holders)) {
1278 spin_unlock(&gl->gl_spin);
1279
1280 if (glops->go_unlock)
1281 glops->go_unlock(gh);
1282
1283 if (test_bit(GLF_SYNC, &gl->gl_flags)) {
1284 if (glops->go_sync)
1285 glops->go_sync(gl, DIO_METADATA | DIO_DATA);
1286 }
1287
1288 gl->gl_stamp = jiffies;
1289
1290 spin_lock(&gl->gl_spin);
1291 }
1292
1293 clear_bit(GLF_LOCK, &gl->gl_flags);
1294 run_queue(gl);
1295 spin_unlock(&gl->gl_spin);
1296}
1297
1298/**
1299 * gfs2_glock_prefetch - Try to prefetch a glock
1300 * @gl: the glock
1301 * @state: the state to prefetch in
1302 * @flags: flags passed to go_xmote_th()
1303 *
1304 */
1305
08bc2dbc
AB
1306static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
1307 int flags)
b3b94faa
DT
1308{
1309 struct gfs2_glock_operations *glops = gl->gl_ops;
1310
1311 spin_lock(&gl->gl_spin);
1312
1313 if (test_bit(GLF_LOCK, &gl->gl_flags) ||
1314 !list_empty(&gl->gl_holders) ||
1315 !list_empty(&gl->gl_waiters1) ||
1316 !list_empty(&gl->gl_waiters2) ||
1317 !list_empty(&gl->gl_waiters3) ||
1318 relaxed_state_ok(gl->gl_state, state, flags)) {
1319 spin_unlock(&gl->gl_spin);
1320 return;
1321 }
1322
1323 set_bit(GLF_PREFETCH, &gl->gl_flags);
1324 set_bit(GLF_LOCK, &gl->gl_flags);
1325 spin_unlock(&gl->gl_spin);
1326
1327 glops->go_xmote_th(gl, state, flags);
b3b94faa
DT
1328}
1329
b3b94faa
DT
1330static void greedy_work(void *data)
1331{
e7f5c01c 1332 struct greedy *gr = data;
b3b94faa
DT
1333 struct gfs2_holder *gh = &gr->gr_gh;
1334 struct gfs2_glock *gl = gh->gh_gl;
1335 struct gfs2_glock_operations *glops = gl->gl_ops;
1336
1337 clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1338
1339 if (glops->go_greedy)
1340 glops->go_greedy(gl);
1341
1342 spin_lock(&gl->gl_spin);
1343
1344 if (list_empty(&gl->gl_waiters2)) {
1345 clear_bit(GLF_GREEDY, &gl->gl_flags);
1346 spin_unlock(&gl->gl_spin);
1347 gfs2_holder_uninit(gh);
1348 kfree(gr);
1349 } else {
1350 gfs2_glock_hold(gl);
1351 list_add_tail(&gh->gh_list, &gl->gl_waiters2);
1352 run_queue(gl);
1353 spin_unlock(&gl->gl_spin);
1354 gfs2_glock_put(gl);
1355 }
1356}
1357
1358/**
1359 * gfs2_glock_be_greedy -
1360 * @gl:
1361 * @time:
1362 *
1363 * Returns: 0 if go_greedy will be called, 1 otherwise
1364 */
1365
1366int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
1367{
1368 struct greedy *gr;
1369 struct gfs2_holder *gh;
1370
1371 if (!time ||
1372 gl->gl_sbd->sd_args.ar_localcaching ||
1373 test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
1374 return 1;
1375
1376 gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
1377 if (!gr) {
1378 clear_bit(GLF_GREEDY, &gl->gl_flags);
1379 return 1;
1380 }
1381 gh = &gr->gr_gh;
1382
579b78a4 1383 gfs2_holder_init(gl, 0, 0, gh);
b3b94faa
DT
1384 set_bit(HIF_GREEDY, &gh->gh_iflags);
1385 INIT_WORK(&gr->gr_work, greedy_work, gr);
1386
1387 set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1388 schedule_delayed_work(&gr->gr_work, time);
1389
1390 return 0;
1391}
1392
b3b94faa
DT
1393/**
1394 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1395 * @gh: the holder structure
1396 *
1397 */
1398
1399void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1400{
1401 gfs2_glock_dq(gh);
1402 gfs2_holder_uninit(gh);
1403}
1404
1405/**
1406 * gfs2_glock_nq_num - acquire a glock based on lock number
1407 * @sdp: the filesystem
1408 * @number: the lock number
1409 * @glops: the glock operations for the type of glock
1410 * @state: the state to acquire the glock in
1411 * @flags: modifier flags for the aquisition
1412 * @gh: the struct gfs2_holder
1413 *
1414 * Returns: errno
1415 */
1416
1417int gfs2_glock_nq_num(struct gfs2_sbd *sdp, uint64_t number,
1418 struct gfs2_glock_operations *glops, unsigned int state,
1419 int flags, struct gfs2_holder *gh)
1420{
1421 struct gfs2_glock *gl;
1422 int error;
1423
1424 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1425 if (!error) {
1426 error = gfs2_glock_nq_init(gl, state, flags, gh);
1427 gfs2_glock_put(gl);
1428 }
1429
1430 return error;
1431}
1432
1433/**
1434 * glock_compare - Compare two struct gfs2_glock structures for sorting
1435 * @arg_a: the first structure
1436 * @arg_b: the second structure
1437 *
1438 */
1439
1440static int glock_compare(const void *arg_a, const void *arg_b)
1441{
1442 struct gfs2_holder *gh_a = *(struct gfs2_holder **)arg_a;
1443 struct gfs2_holder *gh_b = *(struct gfs2_holder **)arg_b;
1444 struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1445 struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1446 int ret = 0;
1447
1448 if (a->ln_number > b->ln_number)
1449 ret = 1;
1450 else if (a->ln_number < b->ln_number)
1451 ret = -1;
1452 else {
1453 if (gh_a->gh_state == LM_ST_SHARED &&
1454 gh_b->gh_state == LM_ST_EXCLUSIVE)
1455 ret = 1;
1456 else if (!(gh_a->gh_flags & GL_LOCAL_EXCL) &&
1457 (gh_b->gh_flags & GL_LOCAL_EXCL))
1458 ret = 1;
1459 }
1460
1461 return ret;
1462}
1463
1464/**
1465 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1466 * @num_gh: the number of structures
1467 * @ghs: an array of struct gfs2_holder structures
1468 *
1469 * Returns: 0 on success (all glocks acquired),
1470 * errno on failure (no glocks acquired)
1471 */
1472
1473static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1474 struct gfs2_holder **p)
1475{
1476 unsigned int x;
1477 int error = 0;
1478
1479 for (x = 0; x < num_gh; x++)
1480 p[x] = &ghs[x];
1481
1482 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1483
1484 for (x = 0; x < num_gh; x++) {
1485 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1486
1487 error = gfs2_glock_nq(p[x]);
1488 if (error) {
1489 while (x--)
1490 gfs2_glock_dq(p[x]);
1491 break;
1492 }
1493 }
1494
1495 return error;
1496}
1497
1498/**
1499 * gfs2_glock_nq_m - acquire multiple glocks
1500 * @num_gh: the number of structures
1501 * @ghs: an array of struct gfs2_holder structures
1502 *
1503 * Figure out how big an impact this function has. Either:
1504 * 1) Replace this code with code that calls gfs2_glock_prefetch()
1505 * 2) Forget async stuff and just call nq_m_sync()
1506 * 3) Leave it like it is
1507 *
1508 * Returns: 0 on success (all glocks acquired),
1509 * errno on failure (no glocks acquired)
1510 */
1511
1512int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1513{
1514 int *e;
1515 unsigned int x;
1516 int borked = 0, serious = 0;
1517 int error = 0;
1518
1519 if (!num_gh)
1520 return 0;
1521
1522 if (num_gh == 1) {
1523 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1524 return gfs2_glock_nq(ghs);
1525 }
1526
1527 e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1528 if (!e)
1529 return -ENOMEM;
1530
1531 for (x = 0; x < num_gh; x++) {
1532 ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
1533 error = gfs2_glock_nq(&ghs[x]);
1534 if (error) {
1535 borked = 1;
1536 serious = error;
1537 num_gh = x;
1538 break;
1539 }
1540 }
1541
1542 for (x = 0; x < num_gh; x++) {
1543 error = e[x] = glock_wait_internal(&ghs[x]);
1544 if (error) {
1545 borked = 1;
1546 if (error != GLR_TRYFAILED && error != GLR_CANCELED)
1547 serious = error;
1548 }
1549 }
1550
1551 if (!borked) {
1552 kfree(e);
1553 return 0;
1554 }
1555
1556 for (x = 0; x < num_gh; x++)
1557 if (!e[x])
1558 gfs2_glock_dq(&ghs[x]);
1559
1560 if (serious)
1561 error = serious;
1562 else {
1563 for (x = 0; x < num_gh; x++)
1564 gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
1565 &ghs[x]);
1566 error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
1567 }
1568
1569 kfree(e);
1570
1571 return error;
1572}
1573
1574/**
1575 * gfs2_glock_dq_m - release multiple glocks
1576 * @num_gh: the number of structures
1577 * @ghs: an array of struct gfs2_holder structures
1578 *
1579 */
1580
1581void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1582{
1583 unsigned int x;
1584
1585 for (x = 0; x < num_gh; x++)
1586 gfs2_glock_dq(&ghs[x]);
1587}
1588
1589/**
1590 * gfs2_glock_dq_uninit_m - release multiple glocks
1591 * @num_gh: the number of structures
1592 * @ghs: an array of struct gfs2_holder structures
1593 *
1594 */
1595
1596void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1597{
1598 unsigned int x;
1599
1600 for (x = 0; x < num_gh; x++)
1601 gfs2_glock_dq_uninit(&ghs[x]);
1602}
1603
1604/**
1605 * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1606 * @sdp: the filesystem
1607 * @number: the lock number
1608 * @glops: the glock operations for the type of glock
1609 * @state: the state to acquire the glock in
1610 * @flags: modifier flags for the aquisition
1611 *
1612 * Returns: errno
1613 */
1614
1615void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, uint64_t number,
1616 struct gfs2_glock_operations *glops,
1617 unsigned int state, int flags)
1618{
1619 struct gfs2_glock *gl;
1620 int error;
1621
1622 if (atomic_read(&sdp->sd_reclaim_count) <
1623 gfs2_tune_get(sdp, gt_reclaim_limit)) {
1624 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1625 if (!error) {
1626 gfs2_glock_prefetch(gl, state, flags);
1627 gfs2_glock_put(gl);
1628 }
1629 }
1630}
1631
1632/**
1633 * gfs2_lvb_hold - attach a LVB from a glock
1634 * @gl: The glock in question
1635 *
1636 */
1637
1638int gfs2_lvb_hold(struct gfs2_glock *gl)
1639{
1640 int error;
1641
1642 gfs2_glmutex_lock(gl);
1643
1644 if (!atomic_read(&gl->gl_lvb_count)) {
1645 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1646 if (error) {
1647 gfs2_glmutex_unlock(gl);
1648 return error;
1649 }
1650 gfs2_glock_hold(gl);
1651 }
1652 atomic_inc(&gl->gl_lvb_count);
1653
1654 gfs2_glmutex_unlock(gl);
1655
1656 return 0;
1657}
1658
1659/**
1660 * gfs2_lvb_unhold - detach a LVB from a glock
1661 * @gl: The glock in question
1662 *
1663 */
1664
1665void gfs2_lvb_unhold(struct gfs2_glock *gl)
1666{
1667 gfs2_glock_hold(gl);
1668 gfs2_glmutex_lock(gl);
1669
1670 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1671 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1672 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1673 gl->gl_lvb = NULL;
1674 gfs2_glock_put(gl);
1675 }
1676
1677 gfs2_glmutex_unlock(gl);
1678 gfs2_glock_put(gl);
1679}
1680
08bc2dbc 1681#if 0
b3b94faa
DT
1682void gfs2_lvb_sync(struct gfs2_glock *gl)
1683{
1684 gfs2_glmutex_lock(gl);
1685
1686 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count));
1687 if (!gfs2_assert_warn(gl->gl_sbd, gfs2_glock_is_held_excl(gl)))
1688 gfs2_lm_sync_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1689
1690 gfs2_glmutex_unlock(gl);
1691}
08bc2dbc 1692#endif /* 0 */
b3b94faa
DT
1693
1694static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1695 unsigned int state)
1696{
1697 struct gfs2_glock *gl;
1698
1699 gl = gfs2_glock_find(sdp, name);
1700 if (!gl)
1701 return;
1702
1703 if (gl->gl_ops->go_callback)
1704 gl->gl_ops->go_callback(gl, state);
1705 handle_callback(gl, state);
1706
1707 spin_lock(&gl->gl_spin);
1708 run_queue(gl);
1709 spin_unlock(&gl->gl_spin);
1710
1711 gfs2_glock_put(gl);
1712}
1713
1714/**
1715 * gfs2_glock_cb - Callback used by locking module
1716 * @fsdata: Pointer to the superblock
1717 * @type: Type of callback
1718 * @data: Type dependent data pointer
1719 *
1720 * Called by the locking module when it wants to tell us something.
1721 * Either we need to drop a lock, one of our ASYNC requests completed, or
1722 * a journal from another client needs to be recovered.
1723 */
1724
1725void gfs2_glock_cb(lm_fsdata_t *fsdata, unsigned int type, void *data)
1726{
1727 struct gfs2_sbd *sdp = (struct gfs2_sbd *)fsdata;
1728
b3b94faa
DT
1729 switch (type) {
1730 case LM_CB_NEED_E:
e7f5c01c 1731 blocking_cb(sdp, data, LM_ST_UNLOCKED);
b3b94faa
DT
1732 return;
1733
1734 case LM_CB_NEED_D:
e7f5c01c 1735 blocking_cb(sdp, data, LM_ST_DEFERRED);
b3b94faa
DT
1736 return;
1737
1738 case LM_CB_NEED_S:
e7f5c01c 1739 blocking_cb(sdp, data, LM_ST_SHARED);
b3b94faa
DT
1740 return;
1741
1742 case LM_CB_ASYNC: {
e7f5c01c 1743 struct lm_async_cb *async = data;
b3b94faa
DT
1744 struct gfs2_glock *gl;
1745
1746 gl = gfs2_glock_find(sdp, &async->lc_name);
1747 if (gfs2_assert_warn(sdp, gl))
1748 return;
1749 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1750 gl->gl_req_bh(gl, async->lc_ret);
1751 gfs2_glock_put(gl);
b3b94faa
DT
1752 return;
1753 }
1754
1755 case LM_CB_NEED_RECOVERY:
1756 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1757 if (sdp->sd_recoverd_process)
1758 wake_up_process(sdp->sd_recoverd_process);
1759 return;
1760
1761 case LM_CB_DROPLOCKS:
1762 gfs2_gl_hash_clear(sdp, NO_WAIT);
1763 gfs2_quota_scan(sdp);
1764 return;
1765
1766 default:
1767 gfs2_assert_warn(sdp, 0);
1768 return;
1769 }
1770}
1771
1772/**
1773 * gfs2_try_toss_inode - try to remove a particular inode struct from cache
1774 * sdp: the filesystem
1775 * inum: the inode number
1776 *
1777 */
1778
1779void gfs2_try_toss_inode(struct gfs2_sbd *sdp, struct gfs2_inum *inum)
1780{
1781 struct gfs2_glock *gl;
1782 struct gfs2_inode *ip;
1783 int error;
1784
1785 error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_inode_glops,
1786 NO_CREATE, &gl);
1787 if (error || !gl)
1788 return;
1789
1790 if (!gfs2_glmutex_trylock(gl))
1791 goto out;
1792
5c676f6d 1793 ip = gl->gl_object;
b3b94faa
DT
1794 if (!ip)
1795 goto out_unlock;
1796
1797 if (atomic_read(&ip->i_count))
1798 goto out_unlock;
1799
36327521 1800 gfs2_inode_destroy(ip, 1);
b3b94faa
DT
1801
1802 out_unlock:
1803 gfs2_glmutex_unlock(gl);
1804
1805 out:
1806 gfs2_glock_put(gl);
1807}
1808
1809/**
1810 * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
1811 * iopen glock from memory
1812 * @io_gl: the iopen glock
1813 * @state: the state into which the glock should be put
1814 *
1815 */
1816
1817void gfs2_iopen_go_callback(struct gfs2_glock *io_gl, unsigned int state)
1818{
1819 struct gfs2_glock *i_gl;
1820
1821 if (state != LM_ST_UNLOCKED)
1822 return;
1823
1824 spin_lock(&io_gl->gl_spin);
5c676f6d 1825 i_gl = io_gl->gl_object;
b3b94faa
DT
1826 if (i_gl) {
1827 gfs2_glock_hold(i_gl);
1828 spin_unlock(&io_gl->gl_spin);
1829 } else {
1830 spin_unlock(&io_gl->gl_spin);
1831 return;
1832 }
1833
1834 if (gfs2_glmutex_trylock(i_gl)) {
5c676f6d 1835 struct gfs2_inode *ip = i_gl->gl_object;
b3b94faa
DT
1836 if (ip) {
1837 gfs2_try_toss_vnode(ip);
1838 gfs2_glmutex_unlock(i_gl);
1839 gfs2_glock_schedule_for_reclaim(i_gl);
1840 goto out;
1841 }
1842 gfs2_glmutex_unlock(i_gl);
1843 }
1844
1845 out:
1846 gfs2_glock_put(i_gl);
1847}
1848
1849/**
1850 * demote_ok - Check to see if it's ok to unlock a glock
1851 * @gl: the glock
1852 *
1853 * Returns: 1 if it's ok
1854 */
1855
1856static int demote_ok(struct gfs2_glock *gl)
1857{
1858 struct gfs2_sbd *sdp = gl->gl_sbd;
1859 struct gfs2_glock_operations *glops = gl->gl_ops;
1860 int demote = 1;
1861
1862 if (test_bit(GLF_STICKY, &gl->gl_flags))
1863 demote = 0;
1864 else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
1865 demote = time_after_eq(jiffies,
1866 gl->gl_stamp +
1867 gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
1868 else if (glops->go_demote_ok)
1869 demote = glops->go_demote_ok(gl);
1870
1871 return demote;
1872}
1873
1874/**
1875 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1876 * @gl: the glock
1877 *
1878 */
1879
1880void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1881{
1882 struct gfs2_sbd *sdp = gl->gl_sbd;
1883
1884 spin_lock(&sdp->sd_reclaim_lock);
1885 if (list_empty(&gl->gl_reclaim)) {
1886 gfs2_glock_hold(gl);
1887 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1888 atomic_inc(&sdp->sd_reclaim_count);
1889 }
1890 spin_unlock(&sdp->sd_reclaim_lock);
1891
1892 wake_up(&sdp->sd_reclaim_wq);
1893}
1894
1895/**
1896 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1897 * @sdp: the filesystem
1898 *
1899 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1900 * different glock and we notice that there are a lot of glocks in the
1901 * reclaim list.
1902 *
1903 */
1904
1905void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1906{
1907 struct gfs2_glock *gl;
1908
1909 spin_lock(&sdp->sd_reclaim_lock);
1910 if (list_empty(&sdp->sd_reclaim_list)) {
1911 spin_unlock(&sdp->sd_reclaim_lock);
1912 return;
1913 }
1914 gl = list_entry(sdp->sd_reclaim_list.next,
1915 struct gfs2_glock, gl_reclaim);
1916 list_del_init(&gl->gl_reclaim);
1917 spin_unlock(&sdp->sd_reclaim_lock);
1918
1919 atomic_dec(&sdp->sd_reclaim_count);
1920 atomic_inc(&sdp->sd_reclaimed);
1921
1922 if (gfs2_glmutex_trylock(gl)) {
1923 if (gl->gl_ops == &gfs2_inode_glops) {
5c676f6d 1924 struct gfs2_inode *ip = gl->gl_object;
b3b94faa 1925 if (ip && !atomic_read(&ip->i_count))
36327521 1926 gfs2_inode_destroy(ip, 1);
b3b94faa
DT
1927 }
1928 if (queue_empty(gl, &gl->gl_holders) &&
1929 gl->gl_state != LM_ST_UNLOCKED &&
1930 demote_ok(gl))
1931 handle_callback(gl, LM_ST_UNLOCKED);
1932 gfs2_glmutex_unlock(gl);
1933 }
1934
1935 gfs2_glock_put(gl);
1936}
1937
1938/**
1939 * examine_bucket - Call a function for glock in a hash bucket
1940 * @examiner: the function
1941 * @sdp: the filesystem
1942 * @bucket: the bucket
1943 *
1944 * Returns: 1 if the bucket has entries
1945 */
1946
1947static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1948 struct gfs2_gl_hash_bucket *bucket)
1949{
1950 struct glock_plug plug;
1951 struct list_head *tmp;
1952 struct gfs2_glock *gl;
1953 int entries;
1954
1955 /* Add "plug" to end of bucket list, work back up list from there */
1956 memset(&plug.gl_flags, 0, sizeof(unsigned long));
1957 set_bit(GLF_PLUG, &plug.gl_flags);
1958
1959 write_lock(&bucket->hb_lock);
1960 list_add(&plug.gl_list, &bucket->hb_list);
1961 write_unlock(&bucket->hb_lock);
1962
1963 for (;;) {
1964 write_lock(&bucket->hb_lock);
1965
1966 for (;;) {
1967 tmp = plug.gl_list.next;
1968
1969 if (tmp == &bucket->hb_list) {
1970 list_del(&plug.gl_list);
1971 entries = !list_empty(&bucket->hb_list);
1972 write_unlock(&bucket->hb_lock);
1973 return entries;
1974 }
1975 gl = list_entry(tmp, struct gfs2_glock, gl_list);
1976
1977 /* Move plug up list */
1978 list_move(&plug.gl_list, &gl->gl_list);
1979
1980 if (test_bit(GLF_PLUG, &gl->gl_flags))
1981 continue;
1982
1983 /* examiner() must glock_put() */
1984 gfs2_glock_hold(gl);
1985
1986 break;
1987 }
1988
1989 write_unlock(&bucket->hb_lock);
1990
1991 examiner(gl);
1992 }
1993}
1994
1995/**
1996 * scan_glock - look at a glock and see if we can reclaim it
1997 * @gl: the glock to look at
1998 *
1999 */
2000
2001static void scan_glock(struct gfs2_glock *gl)
2002{
2003 if (gfs2_glmutex_trylock(gl)) {
2004 if (gl->gl_ops == &gfs2_inode_glops) {
5c676f6d 2005 struct gfs2_inode *ip = gl->gl_object;
b3b94faa
DT
2006 if (ip && !atomic_read(&ip->i_count))
2007 goto out_schedule;
2008 }
2009 if (queue_empty(gl, &gl->gl_holders) &&
2010 gl->gl_state != LM_ST_UNLOCKED &&
2011 demote_ok(gl))
2012 goto out_schedule;
2013
2014 gfs2_glmutex_unlock(gl);
2015 }
2016
2017 gfs2_glock_put(gl);
2018
2019 return;
2020
2021 out_schedule:
2022 gfs2_glmutex_unlock(gl);
2023 gfs2_glock_schedule_for_reclaim(gl);
2024 gfs2_glock_put(gl);
2025}
2026
2027/**
2028 * gfs2_scand_internal - Look for glocks and inodes to toss from memory
2029 * @sdp: the filesystem
2030 *
2031 */
2032
2033void gfs2_scand_internal(struct gfs2_sbd *sdp)
2034{
2035 unsigned int x;
2036
2037 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2038 examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]);
2039 cond_resched();
2040 }
2041}
2042
2043/**
2044 * clear_glock - look at a glock and see if we can free it from glock cache
2045 * @gl: the glock to look at
2046 *
2047 */
2048
2049static void clear_glock(struct gfs2_glock *gl)
2050{
2051 struct gfs2_sbd *sdp = gl->gl_sbd;
2052 int released;
2053
2054 spin_lock(&sdp->sd_reclaim_lock);
2055 if (!list_empty(&gl->gl_reclaim)) {
2056 list_del_init(&gl->gl_reclaim);
2057 atomic_dec(&sdp->sd_reclaim_count);
190562bd 2058 spin_unlock(&sdp->sd_reclaim_lock);
b3b94faa
DT
2059 released = gfs2_glock_put(gl);
2060 gfs2_assert(sdp, !released);
190562bd
SW
2061 } else {
2062 spin_unlock(&sdp->sd_reclaim_lock);
b3b94faa 2063 }
b3b94faa
DT
2064
2065 if (gfs2_glmutex_trylock(gl)) {
2066 if (gl->gl_ops == &gfs2_inode_glops) {
5c676f6d 2067 struct gfs2_inode *ip = gl->gl_object;
b3b94faa 2068 if (ip && !atomic_read(&ip->i_count))
36327521 2069 gfs2_inode_destroy(ip, 1);
b3b94faa
DT
2070 }
2071 if (queue_empty(gl, &gl->gl_holders) &&
2072 gl->gl_state != LM_ST_UNLOCKED)
2073 handle_callback(gl, LM_ST_UNLOCKED);
2074
2075 gfs2_glmutex_unlock(gl);
2076 }
2077
2078 gfs2_glock_put(gl);
2079}
2080
2081/**
2082 * gfs2_gl_hash_clear - Empty out the glock hash table
2083 * @sdp: the filesystem
2084 * @wait: wait until it's all gone
2085 *
2086 * Called when unmounting the filesystem, or when inter-node lock manager
2087 * requests DROPLOCKS because it is running out of capacity.
2088 */
2089
2090void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
2091{
2092 unsigned long t;
2093 unsigned int x;
2094 int cont;
2095
2096 t = jiffies;
2097
2098 for (;;) {
2099 cont = 0;
2100
2101 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2102 if (examine_bucket(clear_glock, sdp,
2103 &sdp->sd_gl_hash[x]))
2104 cont = 1;
2105
2106 if (!wait || !cont)
2107 break;
2108
2109 if (time_after_eq(jiffies,
2110 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
2111 fs_warn(sdp, "Unmount seems to be stalled. "
2112 "Dumping lock state...\n");
2113 gfs2_dump_lockstate(sdp);
2114 t = jiffies;
2115 }
2116
2117 /* invalidate_inodes() requires that the sb inodes list
2118 not change, but an async completion callback for an
2119 unlock can occur which does glock_put() which
2120 can call iput() which will change the sb inodes list.
2121 invalidate_inodes_mutex prevents glock_put()'s during
2122 an invalidate_inodes() */
2123
f55ab26a 2124 mutex_lock(&sdp->sd_invalidate_inodes_mutex);
b3b94faa 2125 invalidate_inodes(sdp->sd_vfs);
f55ab26a 2126 mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
fd88de56 2127 msleep(10);
b3b94faa
DT
2128 }
2129}
2130
2131/*
2132 * Diagnostic routines to help debug distributed deadlock
2133 */
2134
2135/**
2136 * dump_holder - print information about a glock holder
2137 * @str: a string naming the type of holder
2138 * @gh: the glock holder
2139 *
2140 * Returns: 0 on success, -ENOBUFS when we run out of space
2141 */
2142
2143static int dump_holder(char *str, struct gfs2_holder *gh)
2144{
2145 unsigned int x;
2146 int error = -ENOBUFS;
2147
d92a8d48
SW
2148 printk(KERN_INFO " %s\n", str);
2149 printk(KERN_INFO " owner = %ld\n",
b3b94faa 2150 (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
d92a8d48
SW
2151 printk(KERN_INFO " gh_state = %u\n", gh->gh_state);
2152 printk(KERN_INFO " gh_flags =");
b3b94faa
DT
2153 for (x = 0; x < 32; x++)
2154 if (gh->gh_flags & (1 << x))
2155 printk(" %u", x);
2156 printk(" \n");
d92a8d48
SW
2157 printk(KERN_INFO " error = %d\n", gh->gh_error);
2158 printk(KERN_INFO " gh_iflags =");
b3b94faa
DT
2159 for (x = 0; x < 32; x++)
2160 if (test_bit(x, &gh->gh_iflags))
2161 printk(" %u", x);
2162 printk(" \n");
d0dc80db 2163 print_symbol(KERN_INFO " initialized at: %s\n", gh->gh_ip);
b3b94faa
DT
2164
2165 error = 0;
2166
2167 return error;
2168}
2169
2170/**
2171 * dump_inode - print information about an inode
2172 * @ip: the inode
2173 *
2174 * Returns: 0 on success, -ENOBUFS when we run out of space
2175 */
2176
2177static int dump_inode(struct gfs2_inode *ip)
2178{
2179 unsigned int x;
2180 int error = -ENOBUFS;
2181
d92a8d48
SW
2182 printk(KERN_INFO " Inode:\n");
2183 printk(KERN_INFO " num = %llu %llu\n",
b3b94faa 2184 ip->i_num.no_formal_ino, ip->i_num.no_addr);
d92a8d48
SW
2185 printk(KERN_INFO " type = %u\n", IF2DT(ip->i_di.di_mode));
2186 printk(KERN_INFO " i_count = %d\n", atomic_read(&ip->i_count));
2187 printk(KERN_INFO " i_flags =");
b3b94faa
DT
2188 for (x = 0; x < 32; x++)
2189 if (test_bit(x, &ip->i_flags))
2190 printk(" %u", x);
2191 printk(" \n");
d92a8d48 2192 printk(KERN_INFO " vnode = %s\n", (ip->i_vnode) ? "yes" : "no");
b3b94faa
DT
2193
2194 error = 0;
2195
2196 return error;
2197}
2198
2199/**
2200 * dump_glock - print information about a glock
2201 * @gl: the glock
2202 * @count: where we are in the buffer
2203 *
2204 * Returns: 0 on success, -ENOBUFS when we run out of space
2205 */
2206
2207static int dump_glock(struct gfs2_glock *gl)
2208{
2209 struct gfs2_holder *gh;
2210 unsigned int x;
2211 int error = -ENOBUFS;
2212
2213 spin_lock(&gl->gl_spin);
2214
d92a8d48 2215 printk(KERN_INFO "Glock (%u, %llu)\n",
b3b94faa
DT
2216 gl->gl_name.ln_type,
2217 gl->gl_name.ln_number);
d92a8d48 2218 printk(KERN_INFO " gl_flags =");
b3b94faa
DT
2219 for (x = 0; x < 32; x++)
2220 if (test_bit(x, &gl->gl_flags))
2221 printk(" %u", x);
2222 printk(" \n");
d92a8d48
SW
2223 printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
2224 printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
2225 printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
2226 printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
2227 printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
2228 printk(KERN_INFO " object = %s\n", (gl->gl_object) ? "yes" : "no");
2229 printk(KERN_INFO " le = %s\n",
b3b94faa 2230 (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
d92a8d48 2231 printk(KERN_INFO " reclaim = %s\n",
b3b94faa
DT
2232 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
2233 if (gl->gl_aspace)
d92a8d48 2234 printk(KERN_INFO " aspace = %lu\n",
b3b94faa
DT
2235 gl->gl_aspace->i_mapping->nrpages);
2236 else
d92a8d48
SW
2237 printk(KERN_INFO " aspace = no\n");
2238 printk(KERN_INFO " ail = %d\n", atomic_read(&gl->gl_ail_count));
b3b94faa
DT
2239 if (gl->gl_req_gh) {
2240 error = dump_holder("Request", gl->gl_req_gh);
2241 if (error)
2242 goto out;
2243 }
2244 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
2245 error = dump_holder("Holder", gh);
2246 if (error)
2247 goto out;
2248 }
2249 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
2250 error = dump_holder("Waiter1", gh);
2251 if (error)
2252 goto out;
2253 }
2254 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
2255 error = dump_holder("Waiter2", gh);
2256 if (error)
2257 goto out;
2258 }
2259 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
2260 error = dump_holder("Waiter3", gh);
2261 if (error)
2262 goto out;
2263 }
5c676f6d 2264 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
b3b94faa
DT
2265 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
2266 list_empty(&gl->gl_holders)) {
5c676f6d 2267 error = dump_inode(gl->gl_object);
b3b94faa
DT
2268 if (error)
2269 goto out;
2270 } else {
2271 error = -ENOBUFS;
d92a8d48 2272 printk(KERN_INFO " Inode: busy\n");
b3b94faa
DT
2273 }
2274 }
2275
2276 error = 0;
2277
2278 out:
2279 spin_unlock(&gl->gl_spin);
2280
2281 return error;
2282}
2283
2284/**
2285 * gfs2_dump_lockstate - print out the current lockstate
2286 * @sdp: the filesystem
2287 * @ub: the buffer to copy the information into
2288 *
2289 * If @ub is NULL, dump the lockstate to the console.
2290 *
2291 */
2292
08bc2dbc 2293static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
b3b94faa
DT
2294{
2295 struct gfs2_gl_hash_bucket *bucket;
2296 struct gfs2_glock *gl;
2297 unsigned int x;
2298 int error = 0;
2299
2300 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2301 bucket = &sdp->sd_gl_hash[x];
2302
2303 read_lock(&bucket->hb_lock);
2304
2305 list_for_each_entry(gl, &bucket->hb_list, gl_list) {
2306 if (test_bit(GLF_PLUG, &gl->gl_flags))
2307 continue;
2308
2309 error = dump_glock(gl);
2310 if (error)
2311 break;
2312 }
2313
2314 read_unlock(&bucket->hb_lock);
2315
2316 if (error)
2317 break;
2318 }
2319
2320
2321 return error;
2322}
2323