Linux v2.6.18-rc4
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / ocfs2 / dlm / dlmunlock.c
CommitLineData
6714d8e8
KH
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmunlock.c
5 *
6 * underlying calls for unlocking locks
7 *
8 * Copyright (C) 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 *
25 */
26
27
28#include <linux/module.h>
29#include <linux/fs.h>
30#include <linux/types.h>
31#include <linux/slab.h>
32#include <linux/highmem.h>
33#include <linux/utsname.h>
34#include <linux/init.h>
35#include <linux/sysctl.h>
36#include <linux/random.h>
37#include <linux/blkdev.h>
38#include <linux/socket.h>
39#include <linux/inet.h>
40#include <linux/spinlock.h>
41#include <linux/delay.h>
42
43#include "cluster/heartbeat.h"
44#include "cluster/nodemanager.h"
45#include "cluster/tcp.h"
46
47#include "dlmapi.h"
48#include "dlmcommon.h"
49
50#define MLOG_MASK_PREFIX ML_DLM
51#include "cluster/masklog.h"
52
53#define DLM_UNLOCK_FREE_LOCK 0x00000001
54#define DLM_UNLOCK_CALL_AST 0x00000002
55#define DLM_UNLOCK_REMOVE_LOCK 0x00000004
56#define DLM_UNLOCK_REGRANT_LOCK 0x00000008
57#define DLM_UNLOCK_CLEAR_CONVERT_TYPE 0x00000010
58
59
60static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
61 struct dlm_lock_resource *res,
62 struct dlm_lock *lock,
63 struct dlm_lockstatus *lksb,
64 int *actions);
65static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
66 struct dlm_lock_resource *res,
67 struct dlm_lock *lock,
68 struct dlm_lockstatus *lksb,
69 int *actions);
70
71static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
72 struct dlm_lock_resource *res,
73 struct dlm_lock *lock,
74 struct dlm_lockstatus *lksb,
75 int flags,
76 u8 owner);
77
78
79/*
80 * according to the spec:
81 * http://opendlm.sourceforge.net/cvsmirror/opendlm/docs/dlmbook_final.pdf
82 *
83 * flags & LKM_CANCEL != 0: must be converting or blocked
84 * flags & LKM_CANCEL == 0: must be granted
85 *
86 * So to unlock a converting lock, you must first cancel the
87 * convert (passing LKM_CANCEL in flags), then call the unlock
88 * again (with no LKM_CANCEL in flags).
89 */
90
91
92/*
93 * locking:
94 * caller needs: none
95 * taken: res->spinlock and lock->spinlock taken and dropped
96 * held on exit: none
97 * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
98 * all callers should have taken an extra ref on lock coming in
99 */
100static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
101 struct dlm_lock_resource *res,
102 struct dlm_lock *lock,
103 struct dlm_lockstatus *lksb,
104 int flags, int *call_ast,
105 int master_node)
106{
107 enum dlm_status status;
108 int actions = 0;
109 int in_use;
110 u8 owner;
111
112 mlog(0, "master_node = %d, valblk = %d\n", master_node,
113 flags & LKM_VALBLK);
114
115 if (master_node)
116 BUG_ON(res->owner != dlm->node_num);
117 else
118 BUG_ON(res->owner == dlm->node_num);
119
120 spin_lock(&dlm->spinlock);
121 /* We want to be sure that we're not freeing a lock
122 * that still has AST's pending... */
123 in_use = !list_empty(&lock->ast_list);
124 spin_unlock(&dlm->spinlock);
125 if (in_use) {
126 mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock "
127 "while waiting for an ast!", res->lockname.len,
128 res->lockname.name);
129 return DLM_BADPARAM;
130 }
131
132 spin_lock(&res->spinlock);
133 if (res->state & DLM_LOCK_RES_IN_PROGRESS) {
134 if (master_node) {
135 mlog(ML_ERROR, "lockres in progress!\n");
136 spin_unlock(&res->spinlock);
137 return DLM_FORWARD;
138 }
139 /* ok for this to sleep if not in a network handler */
140 __dlm_wait_on_lockres(res);
141 res->state |= DLM_LOCK_RES_IN_PROGRESS;
142 }
143 spin_lock(&lock->spinlock);
144
145 if (res->state & DLM_LOCK_RES_RECOVERING) {
146 status = DLM_RECOVERING;
147 goto leave;
148 }
149
150
151 /* see above for what the spec says about
152 * LKM_CANCEL and the lock queue state */
153 if (flags & LKM_CANCEL)
154 status = dlm_get_cancel_actions(dlm, res, lock, lksb, &actions);
155 else
156 status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions);
157
158 if (status != DLM_NORMAL)
159 goto leave;
160
161 /* By now this has been masked out of cancel requests. */
162 if (flags & LKM_VALBLK) {
163 /* make the final update to the lvb */
164 if (master_node)
165 memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN);
166 else
167 flags |= LKM_PUT_LVB; /* let the send function
168 * handle it. */
169 }
170
171 if (!master_node) {
172 owner = res->owner;
173 /* drop locks and send message */
174 if (flags & LKM_CANCEL)
175 lock->cancel_pending = 1;
176 else
177 lock->unlock_pending = 1;
178 spin_unlock(&lock->spinlock);
179 spin_unlock(&res->spinlock);
180 status = dlm_send_remote_unlock_request(dlm, res, lock, lksb,
181 flags, owner);
182 spin_lock(&res->spinlock);
183 spin_lock(&lock->spinlock);
184 /* if the master told us the lock was already granted,
185 * let the ast handle all of these actions */
186 if (status == DLM_NORMAL &&
187 lksb->status == DLM_CANCELGRANT) {
188 actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
189 DLM_UNLOCK_REGRANT_LOCK|
190 DLM_UNLOCK_CLEAR_CONVERT_TYPE);
e2faea4c
KH
191 } else if (status == DLM_RECOVERING ||
192 status == DLM_MIGRATING ||
193 status == DLM_FORWARD) {
194 /* must clear the actions because this unlock
195 * is about to be retried. cannot free or do
196 * any list manipulation. */
197 mlog(0, "%s:%.*s: clearing actions, %s\n",
198 dlm->name, res->lockname.len,
199 res->lockname.name,
200 status==DLM_RECOVERING?"recovering":
201 (status==DLM_MIGRATING?"migrating":
202 "forward"));
203 actions = 0;
6714d8e8
KH
204 }
205 if (flags & LKM_CANCEL)
206 lock->cancel_pending = 0;
207 else
208 lock->unlock_pending = 0;
209
210 }
211
212 /* get an extra ref on lock. if we are just switching
213 * lists here, we dont want the lock to go away. */
214 dlm_lock_get(lock);
215
216 if (actions & DLM_UNLOCK_REMOVE_LOCK) {
217 list_del_init(&lock->list);
218 dlm_lock_put(lock);
219 }
220 if (actions & DLM_UNLOCK_REGRANT_LOCK) {
221 dlm_lock_get(lock);
222 list_add_tail(&lock->list, &res->granted);
223 }
224 if (actions & DLM_UNLOCK_CLEAR_CONVERT_TYPE) {
225 mlog(0, "clearing convert_type at %smaster node\n",
226 master_node ? "" : "non-");
227 lock->ml.convert_type = LKM_IVMODE;
228 }
229
230 /* remove the extra ref on lock */
231 dlm_lock_put(lock);
232
233leave:
234 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
235 if (!dlm_lock_on_list(&res->converting, lock))
236 BUG_ON(lock->ml.convert_type != LKM_IVMODE);
237 else
238 BUG_ON(lock->ml.convert_type == LKM_IVMODE);
239 spin_unlock(&lock->spinlock);
240 spin_unlock(&res->spinlock);
241 wake_up(&res->wq);
242
243 /* let the caller's final dlm_lock_put handle the actual kfree */
244 if (actions & DLM_UNLOCK_FREE_LOCK) {
245 /* this should always be coupled with list removal */
246 BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK));
29004858
KH
247 mlog(0, "lock %u:%llu should be gone now! refs=%d\n",
248 dlm_get_lock_cookie_node(lock->ml.cookie),
249 dlm_get_lock_cookie_seq(lock->ml.cookie),
250 atomic_read(&lock->lock_refs.refcount)-1);
6714d8e8
KH
251 dlm_lock_put(lock);
252 }
253 if (actions & DLM_UNLOCK_CALL_AST)
254 *call_ast = 1;
255
256 /* if cancel or unlock succeeded, lvb work is done */
257 if (status == DLM_NORMAL)
258 lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB);
259
260 return status;
261}
262
263void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
264 struct dlm_lock *lock)
265{
266 /* leave DLM_LKSB_PUT_LVB on the lksb so any final
267 * update of the lvb will be sent to the new master */
268 list_del_init(&lock->list);
269}
270
271void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
272 struct dlm_lock *lock)
273{
f116629d 274 list_move_tail(&lock->list, &res->granted);
6714d8e8
KH
275 lock->ml.convert_type = LKM_IVMODE;
276}
277
278
279static inline enum dlm_status dlmunlock_master(struct dlm_ctxt *dlm,
280 struct dlm_lock_resource *res,
281 struct dlm_lock *lock,
282 struct dlm_lockstatus *lksb,
283 int flags,
284 int *call_ast)
285{
286 return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 1);
287}
288
289static inline enum dlm_status dlmunlock_remote(struct dlm_ctxt *dlm,
290 struct dlm_lock_resource *res,
291 struct dlm_lock *lock,
292 struct dlm_lockstatus *lksb,
293 int flags, int *call_ast)
294{
295 return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 0);
296}
297
298/*
299 * locking:
300 * caller needs: none
301 * taken: none
302 * held on exit: none
303 * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
304 */
305static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
306 struct dlm_lock_resource *res,
307 struct dlm_lock *lock,
308 struct dlm_lockstatus *lksb,
309 int flags,
310 u8 owner)
311{
312 struct dlm_unlock_lock unlock;
313 int tmpret;
314 enum dlm_status ret;
315 int status = 0;
316 struct kvec vec[2];
317 size_t veclen = 1;
318
319 mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
0032abd6 320
2580a580
KH
321 if (owner == dlm->node_num) {
322 /* ended up trying to contact ourself. this means
323 * that the lockres had been remote but became local
324 * via a migration. just retry it, now as local */
325 mlog(0, "%s:%.*s: this node became the master due to a "
326 "migration, re-evaluate now\n", dlm->name,
327 res->lockname.len, res->lockname.name);
328 return DLM_FORWARD;
329 }
6714d8e8
KH
330
331 memset(&unlock, 0, sizeof(unlock));
332 unlock.node_idx = dlm->node_num;
333 unlock.flags = cpu_to_be32(flags);
334 unlock.cookie = lock->ml.cookie;
335 unlock.namelen = res->lockname.len;
336 memcpy(unlock.name, res->lockname.name, unlock.namelen);
337
338 vec[0].iov_len = sizeof(struct dlm_unlock_lock);
339 vec[0].iov_base = &unlock;
340
341 if (flags & LKM_PUT_LVB) {
342 /* extra data to send if we are updating lvb */
343 vec[1].iov_len = DLM_LVB_LEN;
344 vec[1].iov_base = lock->lksb->lvb;
345 veclen++;
346 }
347
348 tmpret = o2net_send_message_vec(DLM_UNLOCK_LOCK_MSG, dlm->key,
349 vec, veclen, owner, &status);
350 if (tmpret >= 0) {
351 // successfully sent and received
352 if (status == DLM_CANCELGRANT)
353 ret = DLM_NORMAL;
354 else if (status == DLM_FORWARD) {
355 mlog(0, "master was in-progress. retry\n");
356 ret = DLM_FORWARD;
357 } else
358 ret = status;
359 lksb->status = status;
360 } else {
361 mlog_errno(tmpret);
362 if (dlm_is_host_down(tmpret)) {
363 /* NOTE: this seems strange, but it is what we want.
364 * when the master goes down during a cancel or
365 * unlock, the recovery code completes the operation
366 * as if the master had not died, then passes the
367 * updated state to the recovery master. this thread
368 * just needs to finish out the operation and call
369 * the unlockast. */
370 ret = DLM_NORMAL;
371 } else {
372 /* something bad. this will BUG in ocfs2 */
373 ret = dlm_err_to_dlm_status(tmpret);
374 }
375 lksb->status = ret;
376 }
377
378 return ret;
379}
380
381/*
382 * locking:
383 * caller needs: none
384 * taken: takes and drops res->spinlock
385 * held on exit: none
386 * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID,
387 * return value from dlmunlock_master
388 */
389int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data)
390{
391 struct dlm_ctxt *dlm = data;
392 struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf;
393 struct dlm_lock_resource *res = NULL;
394 struct list_head *iter;
395 struct dlm_lock *lock = NULL;
396 enum dlm_status status = DLM_NORMAL;
397 int found = 0, i;
398 struct dlm_lockstatus *lksb = NULL;
399 int ignore;
400 u32 flags;
401 struct list_head *queue;
402
403 flags = be32_to_cpu(unlock->flags);
404
405 if (flags & LKM_GET_LVB) {
406 mlog(ML_ERROR, "bad args! GET_LVB specified on unlock!\n");
407 return DLM_BADARGS;
408 }
409
410 if ((flags & (LKM_PUT_LVB|LKM_CANCEL)) == (LKM_PUT_LVB|LKM_CANCEL)) {
411 mlog(ML_ERROR, "bad args! cannot modify lvb on a CANCEL "
412 "request!\n");
413 return DLM_BADARGS;
414 }
415
416 if (unlock->namelen > DLM_LOCKID_NAME_MAX) {
417 mlog(ML_ERROR, "Invalid name length in unlock handler!\n");
418 return DLM_IVBUFLEN;
419 }
420
421 if (!dlm_grab(dlm))
422 return DLM_REJECTED;
423
424 mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
425 "Domain %s not fully joined!\n", dlm->name);
426
427 mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : "none");
428
429 res = dlm_lookup_lockres(dlm, unlock->name, unlock->namelen);
430 if (!res) {
431 /* We assume here that a no lock resource simply means
432 * it was migrated away and destroyed before the other
433 * node could detect it. */
434 mlog(0, "returning DLM_FORWARD -- res no longer exists\n");
435 status = DLM_FORWARD;
436 goto not_found;
437 }
438
439 queue=&res->granted;
440 found = 0;
441 spin_lock(&res->spinlock);
442 if (res->state & DLM_LOCK_RES_RECOVERING) {
443 spin_unlock(&res->spinlock);
444 mlog(0, "returning DLM_RECOVERING\n");
445 status = DLM_RECOVERING;
446 goto leave;
447 }
448
449 if (res->state & DLM_LOCK_RES_MIGRATING) {
450 spin_unlock(&res->spinlock);
451 mlog(0, "returning DLM_MIGRATING\n");
452 status = DLM_MIGRATING;
453 goto leave;
454 }
455
456 if (res->owner != dlm->node_num) {
457 spin_unlock(&res->spinlock);
458 mlog(0, "returning DLM_FORWARD -- not master\n");
459 status = DLM_FORWARD;
460 goto leave;
461 }
462
463 for (i=0; i<3; i++) {
464 list_for_each(iter, queue) {
465 lock = list_entry(iter, struct dlm_lock, list);
466 if (lock->ml.cookie == unlock->cookie &&
467 lock->ml.node == unlock->node_idx) {
468 dlm_lock_get(lock);
469 found = 1;
470 break;
471 }
472 }
473 if (found)
474 break;
475 /* scan granted -> converting -> blocked queues */
476 queue++;
477 }
478 spin_unlock(&res->spinlock);
479 if (!found) {
480 status = DLM_IVLOCKID;
481 goto not_found;
482 }
483
484 /* lock was found on queue */
485 lksb = lock->lksb;
486 /* unlockast only called on originating node */
487 if (flags & LKM_PUT_LVB) {
488 lksb->flags |= DLM_LKSB_PUT_LVB;
489 memcpy(&lksb->lvb[0], &unlock->lvb[0], DLM_LVB_LEN);
490 }
491
492 /* if this is in-progress, propagate the DLM_FORWARD
493 * all the way back out */
494 status = dlmunlock_master(dlm, res, lock, lksb, flags, &ignore);
495 if (status == DLM_FORWARD)
496 mlog(0, "lockres is in progress\n");
497
498 if (flags & LKM_PUT_LVB)
499 lksb->flags &= ~DLM_LKSB_PUT_LVB;
500
501 dlm_lockres_calc_usage(dlm, res);
502 dlm_kick_thread(dlm, res);
503
504not_found:
505 if (!found)
506 mlog(ML_ERROR, "failed to find lock to unlock! "
29004858
KH
507 "cookie=%u:%llu\n",
508 dlm_get_lock_cookie_node(unlock->cookie),
509 dlm_get_lock_cookie_seq(unlock->cookie));
6714d8e8
KH
510 else {
511 /* send the lksb->status back to the other node */
512 status = lksb->status;
513 dlm_lock_put(lock);
514 }
515
516leave:
517 if (res)
518 dlm_lockres_put(res);
519
520 dlm_put(dlm);
521
522 return status;
523}
524
525
526static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
527 struct dlm_lock_resource *res,
528 struct dlm_lock *lock,
529 struct dlm_lockstatus *lksb,
530 int *actions)
531{
532 enum dlm_status status;
533
534 if (dlm_lock_on_list(&res->blocked, lock)) {
535 /* cancel this outright */
536 lksb->status = DLM_NORMAL;
537 status = DLM_NORMAL;
538 *actions = (DLM_UNLOCK_CALL_AST |
539 DLM_UNLOCK_REMOVE_LOCK);
540 } else if (dlm_lock_on_list(&res->converting, lock)) {
541 /* cancel the request, put back on granted */
542 lksb->status = DLM_NORMAL;
543 status = DLM_NORMAL;
544 *actions = (DLM_UNLOCK_CALL_AST |
545 DLM_UNLOCK_REMOVE_LOCK |
546 DLM_UNLOCK_REGRANT_LOCK |
547 DLM_UNLOCK_CLEAR_CONVERT_TYPE);
548 } else if (dlm_lock_on_list(&res->granted, lock)) {
549 /* too late, already granted. DLM_CANCELGRANT */
550 lksb->status = DLM_CANCELGRANT;
551 status = DLM_NORMAL;
552 *actions = DLM_UNLOCK_CALL_AST;
553 } else {
554 mlog(ML_ERROR, "lock to cancel is not on any list!\n");
555 lksb->status = DLM_IVLOCKID;
556 status = DLM_IVLOCKID;
557 *actions = 0;
558 }
559 return status;
560}
561
562static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
563 struct dlm_lock_resource *res,
564 struct dlm_lock *lock,
565 struct dlm_lockstatus *lksb,
566 int *actions)
567{
568 enum dlm_status status;
569
570 /* unlock request */
571 if (!dlm_lock_on_list(&res->granted, lock)) {
572 lksb->status = DLM_DENIED;
573 status = DLM_DENIED;
574 dlm_error(status);
575 *actions = 0;
576 } else {
577 /* unlock granted lock */
578 lksb->status = DLM_NORMAL;
579 status = DLM_NORMAL;
580 *actions = (DLM_UNLOCK_FREE_LOCK |
581 DLM_UNLOCK_CALL_AST |
582 DLM_UNLOCK_REMOVE_LOCK);
583 }
584 return status;
585}
586
587/* there seems to be no point in doing this async
588 * since (even for the remote case) there is really
589 * no work to queue up... so just do it and fire the
590 * unlockast by hand when done... */
591enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb,
592 int flags, dlm_astunlockfunc_t *unlockast, void *data)
593{
594 enum dlm_status status;
595 struct dlm_lock_resource *res;
596 struct dlm_lock *lock = NULL;
597 int call_ast, is_master;
598
599 mlog_entry_void();
600
601 if (!lksb) {
602 dlm_error(DLM_BADARGS);
603 return DLM_BADARGS;
604 }
605
606 if (flags & ~(LKM_CANCEL | LKM_VALBLK | LKM_INVVALBLK)) {
607 dlm_error(DLM_BADPARAM);
608 return DLM_BADPARAM;
609 }
610
611 if ((flags & (LKM_VALBLK | LKM_CANCEL)) == (LKM_VALBLK | LKM_CANCEL)) {
612 mlog(0, "VALBLK given with CANCEL: ignoring VALBLK\n");
613 flags &= ~LKM_VALBLK;
614 }
615
616 if (!lksb->lockid || !lksb->lockid->lockres) {
617 dlm_error(DLM_BADPARAM);
618 return DLM_BADPARAM;
619 }
620
621 lock = lksb->lockid;
622 BUG_ON(!lock);
623 dlm_lock_get(lock);
624
625 res = lock->lockres;
626 BUG_ON(!res);
627 dlm_lockres_get(res);
628retry:
629 call_ast = 0;
630 /* need to retry up here because owner may have changed */
631 mlog(0, "lock=%p res=%p\n", lock, res);
632
633 spin_lock(&res->spinlock);
634 is_master = (res->owner == dlm->node_num);
635 spin_unlock(&res->spinlock);
636
637 if (is_master) {
638 status = dlmunlock_master(dlm, res, lock, lksb, flags,
639 &call_ast);
640 mlog(0, "done calling dlmunlock_master: returned %d, "
641 "call_ast is %d\n", status, call_ast);
642 } else {
643 status = dlmunlock_remote(dlm, res, lock, lksb, flags,
644 &call_ast);
645 mlog(0, "done calling dlmunlock_remote: returned %d, "
646 "call_ast is %d\n", status, call_ast);
647 }
648
649 if (status == DLM_RECOVERING ||
650 status == DLM_MIGRATING ||
651 status == DLM_FORWARD) {
652 /* We want to go away for a tiny bit to allow recovery
653 * / migration to complete on this resource. I don't
654 * know of any wait queue we could sleep on as this
655 * may be happening on another node. Perhaps the
656 * proper solution is to queue up requests on the
657 * other end? */
658
659 /* do we want to yield(); ?? */
660 msleep(50);
661
662 mlog(0, "retrying unlock due to pending recovery/"
663 "migration/in-progress\n");
664 goto retry;
665 }
666
667 if (call_ast) {
668 mlog(0, "calling unlockast(%p, %d)\n", data, lksb->status);
669 if (is_master) {
670 /* it is possible that there is one last bast
671 * pending. make sure it is flushed, then
672 * call the unlockast.
673 * not an issue if this is a mastered remotely,
674 * since this lock has been removed from the
675 * lockres queues and cannot be found. */
676 dlm_kick_thread(dlm, NULL);
677 wait_event(dlm->ast_wq,
678 dlm_lock_basts_flushed(dlm, lock));
679 }
680 (*unlockast)(data, lksb->status);
681 }
682
683 if (status == DLM_NORMAL) {
684 mlog(0, "kicking the thread\n");
685 dlm_kick_thread(dlm, res);
686 } else
687 dlm_error(status);
688
689 dlm_lockres_calc_usage(dlm, res);
690 dlm_lockres_put(res);
691 dlm_lock_put(lock);
692
693 mlog(0, "returning status=%d!\n", status);
694 return status;
695}
696EXPORT_SYMBOL_GPL(dlmunlock);
697