ocfs2: Update dlmglue for new dlmlock() API
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / ocfs2 / dlmglue.c
CommitLineData
ccd979bd
MF
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmglue.c
5 *
6 * Code which implements an OCFS2 specific interface to our DLM.
7 *
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 */
25
26#include <linux/types.h>
27#include <linux/slab.h>
28#include <linux/highmem.h>
29#include <linux/mm.h>
30#include <linux/smp_lock.h>
31#include <linux/crc32.h>
32#include <linux/kthread.h>
33#include <linux/pagemap.h>
34#include <linux/debugfs.h>
35#include <linux/seq_file.h>
36
37#include <cluster/heartbeat.h>
38#include <cluster/nodemanager.h>
39#include <cluster/tcp.h>
40
41#include <dlm/dlmapi.h>
42
43#define MLOG_MASK_PREFIX ML_DLM_GLUE
44#include <cluster/masklog.h>
45
46#include "ocfs2.h"
47
48#include "alloc.h"
49#include "dlmglue.h"
50#include "extent_map.h"
51#include "heartbeat.h"
52#include "inode.h"
53#include "journal.h"
54#include "slot_map.h"
55#include "super.h"
56#include "uptodate.h"
57#include "vote.h"
58
59#include "buffer_head_io.h"
60
61struct ocfs2_mask_waiter {
62 struct list_head mw_item;
63 int mw_status;
64 struct completion mw_complete;
65 unsigned long mw_mask;
66 unsigned long mw_goal;
67};
68
69static void ocfs2_inode_ast_func(void *opaque);
70static void ocfs2_inode_bast_func(void *opaque,
71 int level);
72static void ocfs2_super_ast_func(void *opaque);
73static void ocfs2_super_bast_func(void *opaque,
74 int level);
75static void ocfs2_rename_ast_func(void *opaque);
76static void ocfs2_rename_bast_func(void *opaque,
77 int level);
78
79/* so far, all locks have gotten along with the same unlock ast */
80static void ocfs2_unlock_ast_func(void *opaque,
81 enum dlm_status status);
82static int ocfs2_do_unblock_meta(struct inode *inode,
83 int *requeue);
84static int ocfs2_unblock_meta(struct ocfs2_lock_res *lockres,
85 int *requeue);
86static int ocfs2_unblock_data(struct ocfs2_lock_res *lockres,
87 int *requeue);
88static int ocfs2_unblock_inode_lock(struct ocfs2_lock_res *lockres,
89 int *requeue);
90static int ocfs2_unblock_osb_lock(struct ocfs2_lock_res *lockres,
91 int *requeue);
92typedef void (ocfs2_convert_worker_t)(struct ocfs2_lock_res *, int);
93static int ocfs2_generic_unblock_lock(struct ocfs2_super *osb,
94 struct ocfs2_lock_res *lockres,
95 int *requeue,
96 ocfs2_convert_worker_t *worker);
97
98struct ocfs2_lock_res_ops {
99 void (*ast)(void *);
100 void (*bast)(void *, int);
101 void (*unlock_ast)(void *, enum dlm_status);
102 int (*unblock)(struct ocfs2_lock_res *, int *);
103};
104
105static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
106 .ast = ocfs2_inode_ast_func,
107 .bast = ocfs2_inode_bast_func,
108 .unlock_ast = ocfs2_unlock_ast_func,
109 .unblock = ocfs2_unblock_inode_lock,
110};
111
112static struct ocfs2_lock_res_ops ocfs2_inode_meta_lops = {
113 .ast = ocfs2_inode_ast_func,
114 .bast = ocfs2_inode_bast_func,
115 .unlock_ast = ocfs2_unlock_ast_func,
116 .unblock = ocfs2_unblock_meta,
117};
118
119static void ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
120 int blocking);
121
122static struct ocfs2_lock_res_ops ocfs2_inode_data_lops = {
123 .ast = ocfs2_inode_ast_func,
124 .bast = ocfs2_inode_bast_func,
125 .unlock_ast = ocfs2_unlock_ast_func,
126 .unblock = ocfs2_unblock_data,
127};
128
129static struct ocfs2_lock_res_ops ocfs2_super_lops = {
130 .ast = ocfs2_super_ast_func,
131 .bast = ocfs2_super_bast_func,
132 .unlock_ast = ocfs2_unlock_ast_func,
133 .unblock = ocfs2_unblock_osb_lock,
134};
135
136static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
137 .ast = ocfs2_rename_ast_func,
138 .bast = ocfs2_rename_bast_func,
139 .unlock_ast = ocfs2_unlock_ast_func,
140 .unblock = ocfs2_unblock_osb_lock,
141};
142
143static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
144{
145 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
146 lockres->l_type == OCFS2_LOCK_TYPE_DATA ||
147 lockres->l_type == OCFS2_LOCK_TYPE_RW;
148}
149
150static inline int ocfs2_is_super_lock(struct ocfs2_lock_res *lockres)
151{
152 return lockres->l_type == OCFS2_LOCK_TYPE_SUPER;
153}
154
155static inline int ocfs2_is_rename_lock(struct ocfs2_lock_res *lockres)
156{
157 return lockres->l_type == OCFS2_LOCK_TYPE_RENAME;
158}
159
160static inline struct ocfs2_super *ocfs2_lock_res_super(struct ocfs2_lock_res *lockres)
161{
162 BUG_ON(!ocfs2_is_super_lock(lockres)
163 && !ocfs2_is_rename_lock(lockres));
164
165 return (struct ocfs2_super *) lockres->l_priv;
166}
167
168static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
169{
170 BUG_ON(!ocfs2_is_inode_lock(lockres));
171
172 return (struct inode *) lockres->l_priv;
173}
174
175static int ocfs2_lock_create(struct ocfs2_super *osb,
176 struct ocfs2_lock_res *lockres,
177 int level,
178 int dlm_flags);
179static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
180 int wanted);
181static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
182 struct ocfs2_lock_res *lockres,
183 int level);
184static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
185static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
186static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
187static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
188static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
189 struct ocfs2_lock_res *lockres);
190static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
191 int convert);
192#define ocfs2_log_dlm_error(_func, _stat, _lockres) do { \
193 mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \
194 "resource %s: %s\n", dlm_errname(_stat), _func, \
195 _lockres->l_name, dlm_errmsg(_stat)); \
196} while (0)
197static void ocfs2_vote_on_unlock(struct ocfs2_super *osb,
198 struct ocfs2_lock_res *lockres);
199static int ocfs2_meta_lock_update(struct inode *inode,
200 struct buffer_head **bh);
201static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
202static inline int ocfs2_highest_compat_lock_level(int level);
203static inline int ocfs2_can_downconvert_meta_lock(struct inode *inode,
204 struct ocfs2_lock_res *lockres,
205 int new_level);
206
207static char *ocfs2_lock_type_strings[] = {
208 [OCFS2_LOCK_TYPE_META] = "Meta",
209 [OCFS2_LOCK_TYPE_DATA] = "Data",
210 [OCFS2_LOCK_TYPE_SUPER] = "Super",
211 [OCFS2_LOCK_TYPE_RENAME] = "Rename",
212 /* Need to differntiate from [R]ename.. serializing writes is the
213 * important job it does, anyway. */
214 [OCFS2_LOCK_TYPE_RW] = "Write/Read",
215};
216
217static char *ocfs2_lock_type_string(enum ocfs2_lock_type type)
218{
219 mlog_bug_on_msg(type >= OCFS2_NUM_LOCK_TYPES, "%d\n", type);
220 return ocfs2_lock_type_strings[type];
221}
222
223static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
224 u64 blkno,
225 u32 generation,
226 char *name)
227{
228 int len;
229
230 mlog_entry_void();
231
232 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
233
b0697053
MF
234 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
235 ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
236 (long long)blkno, generation);
ccd979bd
MF
237
238 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
239
240 mlog(0, "built lock resource with name: %s\n", name);
241
242 mlog_exit_void();
243}
244
34af946a 245static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
ccd979bd
MF
246
247static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
248 struct ocfs2_dlm_debug *dlm_debug)
249{
250 mlog(0, "Add tracking for lockres %s\n", res->l_name);
251
252 spin_lock(&ocfs2_dlm_tracking_lock);
253 list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
254 spin_unlock(&ocfs2_dlm_tracking_lock);
255}
256
257static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
258{
259 spin_lock(&ocfs2_dlm_tracking_lock);
260 if (!list_empty(&res->l_debug_list))
261 list_del_init(&res->l_debug_list);
262 spin_unlock(&ocfs2_dlm_tracking_lock);
263}
264
265static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
266 struct ocfs2_lock_res *res,
267 enum ocfs2_lock_type type,
268 u64 blkno,
269 u32 generation,
270 struct ocfs2_lock_res_ops *ops,
271 void *priv)
272{
273 ocfs2_build_lock_name(type, blkno, generation, res->l_name);
274
275 res->l_type = type;
276 res->l_ops = ops;
277 res->l_priv = priv;
278
279 res->l_level = LKM_IVMODE;
280 res->l_requested = LKM_IVMODE;
281 res->l_blocking = LKM_IVMODE;
282 res->l_action = OCFS2_AST_INVALID;
283 res->l_unlock_action = OCFS2_UNLOCK_INVALID;
284
285 res->l_flags = OCFS2_LOCK_INITIALIZED;
286
287 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
288}
289
290void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
291{
292 /* This also clears out the lock status block */
293 memset(res, 0, sizeof(struct ocfs2_lock_res));
294 spin_lock_init(&res->l_lock);
295 init_waitqueue_head(&res->l_event);
296 INIT_LIST_HEAD(&res->l_blocked_list);
297 INIT_LIST_HEAD(&res->l_mask_waiters);
298}
299
300void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
301 enum ocfs2_lock_type type,
302 struct inode *inode)
303{
304 struct ocfs2_lock_res_ops *ops;
305
306 switch(type) {
307 case OCFS2_LOCK_TYPE_RW:
308 ops = &ocfs2_inode_rw_lops;
309 break;
310 case OCFS2_LOCK_TYPE_META:
311 ops = &ocfs2_inode_meta_lops;
312 break;
313 case OCFS2_LOCK_TYPE_DATA:
314 ops = &ocfs2_inode_data_lops;
315 break;
316 default:
317 mlog_bug_on_msg(1, "type: %d\n", type);
318 ops = NULL; /* thanks, gcc */
319 break;
320 };
321
322 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type,
323 OCFS2_I(inode)->ip_blkno,
324 inode->i_generation, ops, inode);
325}
326
327static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
328 struct ocfs2_super *osb)
329{
330 /* Superblock lockres doesn't come from a slab so we call init
331 * once on it manually. */
332 ocfs2_lock_res_init_once(res);
333 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
334 OCFS2_SUPER_BLOCK_BLKNO, 0,
335 &ocfs2_super_lops, osb);
336}
337
338static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
339 struct ocfs2_super *osb)
340{
341 /* Rename lockres doesn't come from a slab so we call init
342 * once on it manually. */
343 ocfs2_lock_res_init_once(res);
344 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME, 0, 0,
345 &ocfs2_rename_lops, osb);
346}
347
348void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
349{
350 mlog_entry_void();
351
352 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
353 return;
354
355 ocfs2_remove_lockres_tracking(res);
356
357 mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
358 "Lockres %s is on the blocked list\n",
359 res->l_name);
360 mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
361 "Lockres %s has mask waiters pending\n",
362 res->l_name);
363 mlog_bug_on_msg(spin_is_locked(&res->l_lock),
364 "Lockres %s is locked\n",
365 res->l_name);
366 mlog_bug_on_msg(res->l_ro_holders,
367 "Lockres %s has %u ro holders\n",
368 res->l_name, res->l_ro_holders);
369 mlog_bug_on_msg(res->l_ex_holders,
370 "Lockres %s has %u ex holders\n",
371 res->l_name, res->l_ex_holders);
372
373 /* Need to clear out the lock status block for the dlm */
374 memset(&res->l_lksb, 0, sizeof(res->l_lksb));
375
376 res->l_flags = 0UL;
377 mlog_exit_void();
378}
379
380static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
381 int level)
382{
383 mlog_entry_void();
384
385 BUG_ON(!lockres);
386
387 switch(level) {
388 case LKM_EXMODE:
389 lockres->l_ex_holders++;
390 break;
391 case LKM_PRMODE:
392 lockres->l_ro_holders++;
393 break;
394 default:
395 BUG();
396 }
397
398 mlog_exit_void();
399}
400
401static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
402 int level)
403{
404 mlog_entry_void();
405
406 BUG_ON(!lockres);
407
408 switch(level) {
409 case LKM_EXMODE:
410 BUG_ON(!lockres->l_ex_holders);
411 lockres->l_ex_holders--;
412 break;
413 case LKM_PRMODE:
414 BUG_ON(!lockres->l_ro_holders);
415 lockres->l_ro_holders--;
416 break;
417 default:
418 BUG();
419 }
420 mlog_exit_void();
421}
422
423/* WARNING: This function lives in a world where the only three lock
424 * levels are EX, PR, and NL. It *will* have to be adjusted when more
425 * lock types are added. */
426static inline int ocfs2_highest_compat_lock_level(int level)
427{
428 int new_level = LKM_EXMODE;
429
430 if (level == LKM_EXMODE)
431 new_level = LKM_NLMODE;
432 else if (level == LKM_PRMODE)
433 new_level = LKM_PRMODE;
434 return new_level;
435}
436
437static void lockres_set_flags(struct ocfs2_lock_res *lockres,
438 unsigned long newflags)
439{
440 struct list_head *pos, *tmp;
441 struct ocfs2_mask_waiter *mw;
442
443 assert_spin_locked(&lockres->l_lock);
444
445 lockres->l_flags = newflags;
446
447 list_for_each_safe(pos, tmp, &lockres->l_mask_waiters) {
448 mw = list_entry(pos, struct ocfs2_mask_waiter, mw_item);
449 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
450 continue;
451
452 list_del_init(&mw->mw_item);
453 mw->mw_status = 0;
454 complete(&mw->mw_complete);
455 }
456}
457static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
458{
459 lockres_set_flags(lockres, lockres->l_flags | or);
460}
461static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
462 unsigned long clear)
463{
464 lockres_set_flags(lockres, lockres->l_flags & ~clear);
465}
466
467static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
468{
469 mlog_entry_void();
470
471 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
472 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
473 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
474 BUG_ON(lockres->l_blocking <= LKM_NLMODE);
475
476 lockres->l_level = lockres->l_requested;
477 if (lockres->l_level <=
478 ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
479 lockres->l_blocking = LKM_NLMODE;
480 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
481 }
482 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
483
484 mlog_exit_void();
485}
486
487static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
488{
489 mlog_entry_void();
490
491 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
492 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
493
494 /* Convert from RO to EX doesn't really need anything as our
495 * information is already up to data. Convert from NL to
496 * *anything* however should mark ourselves as needing an
497 * update */
498 if (lockres->l_level == LKM_NLMODE)
499 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
500
501 lockres->l_level = lockres->l_requested;
502 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
503
504 mlog_exit_void();
505}
506
507static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
508{
509 mlog_entry_void();
510
511 BUG_ON((!lockres->l_flags & OCFS2_LOCK_BUSY));
512 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
513
514 if (lockres->l_requested > LKM_NLMODE &&
515 !(lockres->l_flags & OCFS2_LOCK_LOCAL))
516 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
517
518 lockres->l_level = lockres->l_requested;
519 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
520 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
521
522 mlog_exit_void();
523}
524
525static void ocfs2_inode_ast_func(void *opaque)
526{
527 struct ocfs2_lock_res *lockres = opaque;
528 struct inode *inode;
529 struct dlm_lockstatus *lksb;
530 unsigned long flags;
531
532 mlog_entry_void();
533
534 inode = ocfs2_lock_res_inode(lockres);
535
b0697053
MF
536 mlog(0, "AST fired for inode %llu, l_action = %u, type = %s\n",
537 (unsigned long long)OCFS2_I(inode)->ip_blkno, lockres->l_action,
ccd979bd
MF
538 ocfs2_lock_type_string(lockres->l_type));
539
540 BUG_ON(!ocfs2_is_inode_lock(lockres));
541
542 spin_lock_irqsave(&lockres->l_lock, flags);
543
544 lksb = &(lockres->l_lksb);
545 if (lksb->status != DLM_NORMAL) {
546 mlog(ML_ERROR, "ocfs2_inode_ast_func: lksb status value of %u "
b0697053
MF
547 "on inode %llu\n", lksb->status,
548 (unsigned long long)OCFS2_I(inode)->ip_blkno);
ccd979bd
MF
549 spin_unlock_irqrestore(&lockres->l_lock, flags);
550 mlog_exit_void();
551 return;
552 }
553
554 switch(lockres->l_action) {
555 case OCFS2_AST_ATTACH:
556 ocfs2_generic_handle_attach_action(lockres);
557 lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
558 break;
559 case OCFS2_AST_CONVERT:
560 ocfs2_generic_handle_convert_action(lockres);
561 break;
562 case OCFS2_AST_DOWNCONVERT:
563 ocfs2_generic_handle_downconvert_action(lockres);
564 break;
565 default:
566 mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u "
567 "lockres flags = 0x%lx, unlock action: %u\n",
568 lockres->l_name, lockres->l_action, lockres->l_flags,
569 lockres->l_unlock_action);
570
571 BUG();
572 }
573
574 /* data and rw locking ignores refresh flag for now. */
575 if (lockres->l_type != OCFS2_LOCK_TYPE_META)
576 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
577
578 /* set it to something invalid so if we get called again we
579 * can catch it. */
580 lockres->l_action = OCFS2_AST_INVALID;
581 spin_unlock_irqrestore(&lockres->l_lock, flags);
582 wake_up(&lockres->l_event);
583
584 mlog_exit_void();
585}
586
587static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
588 int level)
589{
590 int needs_downconvert = 0;
591 mlog_entry_void();
592
593 assert_spin_locked(&lockres->l_lock);
594
595 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
596
597 if (level > lockres->l_blocking) {
598 /* only schedule a downconvert if we haven't already scheduled
599 * one that goes low enough to satisfy the level we're
600 * blocking. this also catches the case where we get
601 * duplicate BASTs */
602 if (ocfs2_highest_compat_lock_level(level) <
603 ocfs2_highest_compat_lock_level(lockres->l_blocking))
604 needs_downconvert = 1;
605
606 lockres->l_blocking = level;
607 }
608
609 mlog_exit(needs_downconvert);
610 return needs_downconvert;
611}
612
613static void ocfs2_generic_bast_func(struct ocfs2_super *osb,
614 struct ocfs2_lock_res *lockres,
615 int level)
616{
617 int needs_downconvert;
618 unsigned long flags;
619
620 mlog_entry_void();
621
622 BUG_ON(level <= LKM_NLMODE);
623
624 spin_lock_irqsave(&lockres->l_lock, flags);
625 needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
626 if (needs_downconvert)
627 ocfs2_schedule_blocked_lock(osb, lockres);
628 spin_unlock_irqrestore(&lockres->l_lock, flags);
629
630 ocfs2_kick_vote_thread(osb);
631
632 wake_up(&lockres->l_event);
633 mlog_exit_void();
634}
635
636static void ocfs2_inode_bast_func(void *opaque, int level)
637{
638 struct ocfs2_lock_res *lockres = opaque;
639 struct inode *inode;
640 struct ocfs2_super *osb;
641
642 mlog_entry_void();
643
644 BUG_ON(!ocfs2_is_inode_lock(lockres));
645
646 inode = ocfs2_lock_res_inode(lockres);
647 osb = OCFS2_SB(inode->i_sb);
648
b0697053
MF
649 mlog(0, "BAST fired for inode %llu, blocking %d, level %d type %s\n",
650 (unsigned long long)OCFS2_I(inode)->ip_blkno, level,
651 lockres->l_level, ocfs2_lock_type_string(lockres->l_type));
ccd979bd
MF
652
653 ocfs2_generic_bast_func(osb, lockres, level);
654
655 mlog_exit_void();
656}
657
658static void ocfs2_generic_ast_func(struct ocfs2_lock_res *lockres,
659 int ignore_refresh)
660{
661 struct dlm_lockstatus *lksb = &lockres->l_lksb;
662 unsigned long flags;
663
664 spin_lock_irqsave(&lockres->l_lock, flags);
665
666 if (lksb->status != DLM_NORMAL) {
667 mlog(ML_ERROR, "lockres %s: lksb status value of %u!\n",
668 lockres->l_name, lksb->status);
669 spin_unlock_irqrestore(&lockres->l_lock, flags);
670 return;
671 }
672
673 switch(lockres->l_action) {
674 case OCFS2_AST_ATTACH:
675 ocfs2_generic_handle_attach_action(lockres);
676 break;
677 case OCFS2_AST_CONVERT:
678 ocfs2_generic_handle_convert_action(lockres);
679 break;
680 case OCFS2_AST_DOWNCONVERT:
681 ocfs2_generic_handle_downconvert_action(lockres);
682 break;
683 default:
684 BUG();
685 }
686
687 if (ignore_refresh)
688 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
689
690 /* set it to something invalid so if we get called again we
691 * can catch it. */
692 lockres->l_action = OCFS2_AST_INVALID;
693 spin_unlock_irqrestore(&lockres->l_lock, flags);
694
695 wake_up(&lockres->l_event);
696}
697
698static void ocfs2_super_ast_func(void *opaque)
699{
700 struct ocfs2_lock_res *lockres = opaque;
701
702 mlog_entry_void();
703 mlog(0, "Superblock AST fired\n");
704
705 BUG_ON(!ocfs2_is_super_lock(lockres));
706 ocfs2_generic_ast_func(lockres, 0);
707
708 mlog_exit_void();
709}
710
711static void ocfs2_super_bast_func(void *opaque,
712 int level)
713{
714 struct ocfs2_lock_res *lockres = opaque;
715 struct ocfs2_super *osb;
716
717 mlog_entry_void();
718 mlog(0, "Superblock BAST fired\n");
719
720 BUG_ON(!ocfs2_is_super_lock(lockres));
721 osb = ocfs2_lock_res_super(lockres);
722 ocfs2_generic_bast_func(osb, lockres, level);
723
724 mlog_exit_void();
725}
726
727static void ocfs2_rename_ast_func(void *opaque)
728{
729 struct ocfs2_lock_res *lockres = opaque;
730
731 mlog_entry_void();
732
733 mlog(0, "Rename AST fired\n");
734
735 BUG_ON(!ocfs2_is_rename_lock(lockres));
736
737 ocfs2_generic_ast_func(lockres, 1);
738
739 mlog_exit_void();
740}
741
742static void ocfs2_rename_bast_func(void *opaque,
743 int level)
744{
745 struct ocfs2_lock_res *lockres = opaque;
746 struct ocfs2_super *osb;
747
748 mlog_entry_void();
749
750 mlog(0, "Rename BAST fired\n");
751
752 BUG_ON(!ocfs2_is_rename_lock(lockres));
753
754 osb = ocfs2_lock_res_super(lockres);
755 ocfs2_generic_bast_func(osb, lockres, level);
756
757 mlog_exit_void();
758}
759
760static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
761 int convert)
762{
763 unsigned long flags;
764
765 mlog_entry_void();
766 spin_lock_irqsave(&lockres->l_lock, flags);
767 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
768 if (convert)
769 lockres->l_action = OCFS2_AST_INVALID;
770 else
771 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
772 spin_unlock_irqrestore(&lockres->l_lock, flags);
773
774 wake_up(&lockres->l_event);
775 mlog_exit_void();
776}
777
778/* Note: If we detect another process working on the lock (i.e.,
779 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
780 * to do the right thing in that case.
781 */
782static int ocfs2_lock_create(struct ocfs2_super *osb,
783 struct ocfs2_lock_res *lockres,
784 int level,
785 int dlm_flags)
786{
787 int ret = 0;
788 enum dlm_status status;
789 unsigned long flags;
790
791 mlog_entry_void();
792
793 mlog(0, "lock %s, level = %d, flags = %d\n", lockres->l_name, level,
794 dlm_flags);
795
796 spin_lock_irqsave(&lockres->l_lock, flags);
797 if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
798 (lockres->l_flags & OCFS2_LOCK_BUSY)) {
799 spin_unlock_irqrestore(&lockres->l_lock, flags);
800 goto bail;
801 }
802
803 lockres->l_action = OCFS2_AST_ATTACH;
804 lockres->l_requested = level;
805 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
806 spin_unlock_irqrestore(&lockres->l_lock, flags);
807
808 status = dlmlock(osb->dlm,
809 level,
810 &lockres->l_lksb,
811 dlm_flags,
812 lockres->l_name,
f0681062 813 OCFS2_LOCK_ID_MAX_LEN - 1,
ccd979bd
MF
814 lockres->l_ops->ast,
815 lockres,
816 lockres->l_ops->bast);
817 if (status != DLM_NORMAL) {
818 ocfs2_log_dlm_error("dlmlock", status, lockres);
819 ret = -EINVAL;
820 ocfs2_recover_from_dlm_error(lockres, 1);
821 }
822
823 mlog(0, "lock %s, successfull return from dlmlock\n", lockres->l_name);
824
825bail:
826 mlog_exit(ret);
827 return ret;
828}
829
830static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
831 int flag)
832{
833 unsigned long flags;
834 int ret;
835
836 spin_lock_irqsave(&lockres->l_lock, flags);
837 ret = lockres->l_flags & flag;
838 spin_unlock_irqrestore(&lockres->l_lock, flags);
839
840 return ret;
841}
842
843static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
844
845{
846 wait_event(lockres->l_event,
847 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
848}
849
850static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
851
852{
853 wait_event(lockres->l_event,
854 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
855}
856
857/* predict what lock level we'll be dropping down to on behalf
858 * of another node, and return true if the currently wanted
859 * level will be compatible with it. */
860static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
861 int wanted)
862{
863 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
864
865 return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
866}
867
868static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
869{
870 INIT_LIST_HEAD(&mw->mw_item);
871 init_completion(&mw->mw_complete);
872}
873
874static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
875{
876 wait_for_completion(&mw->mw_complete);
877 /* Re-arm the completion in case we want to wait on it again */
878 INIT_COMPLETION(mw->mw_complete);
879 return mw->mw_status;
880}
881
882static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
883 struct ocfs2_mask_waiter *mw,
884 unsigned long mask,
885 unsigned long goal)
886{
887 BUG_ON(!list_empty(&mw->mw_item));
888
889 assert_spin_locked(&lockres->l_lock);
890
891 list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
892 mw->mw_mask = mask;
893 mw->mw_goal = goal;
894}
895
896/* returns 0 if the mw that was removed was already satisfied, -EBUSY
897 * if the mask still hadn't reached its goal */
898static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
899 struct ocfs2_mask_waiter *mw)
900{
901 unsigned long flags;
902 int ret = 0;
903
904 spin_lock_irqsave(&lockres->l_lock, flags);
905 if (!list_empty(&mw->mw_item)) {
906 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
907 ret = -EBUSY;
908
909 list_del_init(&mw->mw_item);
910 init_completion(&mw->mw_complete);
911 }
912 spin_unlock_irqrestore(&lockres->l_lock, flags);
913
914 return ret;
915
916}
917
918static int ocfs2_cluster_lock(struct ocfs2_super *osb,
919 struct ocfs2_lock_res *lockres,
920 int level,
921 int lkm_flags,
922 int arg_flags)
923{
924 struct ocfs2_mask_waiter mw;
925 enum dlm_status status;
926 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
927 int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
928 unsigned long flags;
929
930 mlog_entry_void();
931
932 ocfs2_init_mask_waiter(&mw);
933
934again:
935 wait = 0;
936
937 if (catch_signals && signal_pending(current)) {
938 ret = -ERESTARTSYS;
939 goto out;
940 }
941
942 spin_lock_irqsave(&lockres->l_lock, flags);
943
944 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
945 "Cluster lock called on freeing lockres %s! flags "
946 "0x%lx\n", lockres->l_name, lockres->l_flags);
947
948 /* We only compare against the currently granted level
949 * here. If the lock is blocked waiting on a downconvert,
950 * we'll get caught below. */
951 if (lockres->l_flags & OCFS2_LOCK_BUSY &&
952 level > lockres->l_level) {
953 /* is someone sitting in dlm_lock? If so, wait on
954 * them. */
955 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
956 wait = 1;
957 goto unlock;
958 }
959
960 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
961 /* lock has not been created yet. */
962 spin_unlock_irqrestore(&lockres->l_lock, flags);
963
964 ret = ocfs2_lock_create(osb, lockres, LKM_NLMODE, 0);
965 if (ret < 0) {
966 mlog_errno(ret);
967 goto out;
968 }
969 goto again;
970 }
971
972 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
973 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
974 /* is the lock is currently blocked on behalf of
975 * another node */
976 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
977 wait = 1;
978 goto unlock;
979 }
980
981 if (level > lockres->l_level) {
982 if (lockres->l_action != OCFS2_AST_INVALID)
983 mlog(ML_ERROR, "lockres %s has action %u pending\n",
984 lockres->l_name, lockres->l_action);
985
986 lockres->l_action = OCFS2_AST_CONVERT;
987 lockres->l_requested = level;
988 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
989 spin_unlock_irqrestore(&lockres->l_lock, flags);
990
991 BUG_ON(level == LKM_IVMODE);
992 BUG_ON(level == LKM_NLMODE);
993
994 mlog(0, "lock %s, convert from %d to level = %d\n",
995 lockres->l_name, lockres->l_level, level);
996
997 /* call dlm_lock to upgrade lock now */
998 status = dlmlock(osb->dlm,
999 level,
1000 &lockres->l_lksb,
1001 lkm_flags|LKM_CONVERT|LKM_VALBLK,
1002 lockres->l_name,
f0681062 1003 OCFS2_LOCK_ID_MAX_LEN - 1,
ccd979bd
MF
1004 lockres->l_ops->ast,
1005 lockres,
1006 lockres->l_ops->bast);
1007 if (status != DLM_NORMAL) {
1008 if ((lkm_flags & LKM_NOQUEUE) &&
1009 (status == DLM_NOTQUEUED))
1010 ret = -EAGAIN;
1011 else {
1012 ocfs2_log_dlm_error("dlmlock", status,
1013 lockres);
1014 ret = -EINVAL;
1015 }
1016 ocfs2_recover_from_dlm_error(lockres, 1);
1017 goto out;
1018 }
1019
1020 mlog(0, "lock %s, successfull return from dlmlock\n",
1021 lockres->l_name);
1022
1023 /* At this point we've gone inside the dlm and need to
1024 * complete our work regardless. */
1025 catch_signals = 0;
1026
1027 /* wait for busy to clear and carry on */
1028 goto again;
1029 }
1030
1031 /* Ok, if we get here then we're good to go. */
1032 ocfs2_inc_holders(lockres, level);
1033
1034 ret = 0;
1035unlock:
1036 spin_unlock_irqrestore(&lockres->l_lock, flags);
1037out:
1038 /*
1039 * This is helping work around a lock inversion between the page lock
1040 * and dlm locks. One path holds the page lock while calling aops
1041 * which block acquiring dlm locks. The voting thread holds dlm
1042 * locks while acquiring page locks while down converting data locks.
1043 * This block is helping an aop path notice the inversion and back
1044 * off to unlock its page lock before trying the dlm lock again.
1045 */
1046 if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
1047 mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
1048 wait = 0;
1049 if (lockres_remove_mask_waiter(lockres, &mw))
1050 ret = -EAGAIN;
1051 else
1052 goto again;
1053 }
1054 if (wait) {
1055 ret = ocfs2_wait_for_mask(&mw);
1056 if (ret == 0)
1057 goto again;
1058 mlog_errno(ret);
1059 }
1060
1061 mlog_exit(ret);
1062 return ret;
1063}
1064
1065static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
1066 struct ocfs2_lock_res *lockres,
1067 int level)
1068{
1069 unsigned long flags;
1070
1071 mlog_entry_void();
1072 spin_lock_irqsave(&lockres->l_lock, flags);
1073 ocfs2_dec_holders(lockres, level);
1074 ocfs2_vote_on_unlock(osb, lockres);
1075 spin_unlock_irqrestore(&lockres->l_lock, flags);
1076 mlog_exit_void();
1077}
1078
1079static int ocfs2_create_new_inode_lock(struct inode *inode,
1080 struct ocfs2_lock_res *lockres)
1081{
1082 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1083 unsigned long flags;
1084
1085 spin_lock_irqsave(&lockres->l_lock, flags);
1086 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
1087 lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
1088 spin_unlock_irqrestore(&lockres->l_lock, flags);
1089
1090 return ocfs2_lock_create(osb, lockres, LKM_EXMODE, LKM_LOCAL);
1091}
1092
1093/* Grants us an EX lock on the data and metadata resources, skipping
1094 * the normal cluster directory lookup. Use this ONLY on newly created
1095 * inodes which other nodes can't possibly see, and which haven't been
1096 * hashed in the inode hash yet. This can give us a good performance
1097 * increase as it'll skip the network broadcast normally associated
1098 * with creating a new lock resource. */
1099int ocfs2_create_new_inode_locks(struct inode *inode)
1100{
1101 int ret;
1102
1103 BUG_ON(!inode);
1104 BUG_ON(!ocfs2_inode_is_new(inode));
1105
1106 mlog_entry_void();
1107
b0697053 1108 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
ccd979bd
MF
1109
1110 /* NOTE: That we don't increment any of the holder counts, nor
1111 * do we add anything to a journal handle. Since this is
1112 * supposed to be a new inode which the cluster doesn't know
1113 * about yet, there is no need to. As far as the LVB handling
1114 * is concerned, this is basically like acquiring an EX lock
1115 * on a resource which has an invalid one -- we'll set it
1116 * valid when we release the EX. */
1117
1118 ret = ocfs2_create_new_inode_lock(inode,
1119 &OCFS2_I(inode)->ip_rw_lockres);
1120 if (ret) {
1121 mlog_errno(ret);
1122 goto bail;
1123 }
1124
1125 ret = ocfs2_create_new_inode_lock(inode,
1126 &OCFS2_I(inode)->ip_meta_lockres);
1127 if (ret) {
1128 mlog_errno(ret);
1129 goto bail;
1130 }
1131
1132 ret = ocfs2_create_new_inode_lock(inode,
1133 &OCFS2_I(inode)->ip_data_lockres);
1134 if (ret) {
1135 mlog_errno(ret);
1136 goto bail;
1137 }
1138
1139bail:
1140 mlog_exit(ret);
1141 return ret;
1142}
1143
1144int ocfs2_rw_lock(struct inode *inode, int write)
1145{
1146 int status, level;
1147 struct ocfs2_lock_res *lockres;
1148
1149 BUG_ON(!inode);
1150
1151 mlog_entry_void();
1152
b0697053
MF
1153 mlog(0, "inode %llu take %s RW lock\n",
1154 (unsigned long long)OCFS2_I(inode)->ip_blkno,
ccd979bd
MF
1155 write ? "EXMODE" : "PRMODE");
1156
1157 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1158
1159 level = write ? LKM_EXMODE : LKM_PRMODE;
1160
1161 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
1162 0);
1163 if (status < 0)
1164 mlog_errno(status);
1165
1166 mlog_exit(status);
1167 return status;
1168}
1169
1170void ocfs2_rw_unlock(struct inode *inode, int write)
1171{
1172 int level = write ? LKM_EXMODE : LKM_PRMODE;
1173 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1174
1175 mlog_entry_void();
1176
b0697053
MF
1177 mlog(0, "inode %llu drop %s RW lock\n",
1178 (unsigned long long)OCFS2_I(inode)->ip_blkno,
ccd979bd
MF
1179 write ? "EXMODE" : "PRMODE");
1180
1181 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1182
1183 mlog_exit_void();
1184}
1185
1186int ocfs2_data_lock_full(struct inode *inode,
1187 int write,
1188 int arg_flags)
1189{
1190 int status = 0, level;
1191 struct ocfs2_lock_res *lockres;
1192
1193 BUG_ON(!inode);
1194
1195 mlog_entry_void();
1196
b0697053
MF
1197 mlog(0, "inode %llu take %s DATA lock\n",
1198 (unsigned long long)OCFS2_I(inode)->ip_blkno,
ccd979bd
MF
1199 write ? "EXMODE" : "PRMODE");
1200
1201 /* We'll allow faking a readonly data lock for
1202 * rodevices. */
1203 if (ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb))) {
1204 if (write) {
1205 status = -EROFS;
1206 mlog_errno(status);
1207 }
1208 goto out;
1209 }
1210
1211 lockres = &OCFS2_I(inode)->ip_data_lockres;
1212
1213 level = write ? LKM_EXMODE : LKM_PRMODE;
1214
1215 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level,
1216 0, arg_flags);
1217 if (status < 0 && status != -EAGAIN)
1218 mlog_errno(status);
1219
1220out:
1221 mlog_exit(status);
1222 return status;
1223}
1224
1225/* see ocfs2_meta_lock_with_page() */
1226int ocfs2_data_lock_with_page(struct inode *inode,
1227 int write,
1228 struct page *page)
1229{
1230 int ret;
1231
1232 ret = ocfs2_data_lock_full(inode, write, OCFS2_LOCK_NONBLOCK);
1233 if (ret == -EAGAIN) {
1234 unlock_page(page);
1235 if (ocfs2_data_lock(inode, write) == 0)
1236 ocfs2_data_unlock(inode, write);
1237 ret = AOP_TRUNCATED_PAGE;
1238 }
1239
1240 return ret;
1241}
1242
1243static void ocfs2_vote_on_unlock(struct ocfs2_super *osb,
1244 struct ocfs2_lock_res *lockres)
1245{
1246 int kick = 0;
1247
1248 mlog_entry_void();
1249
1250 /* If we know that another node is waiting on our lock, kick
1251 * the vote thread * pre-emptively when we reach a release
1252 * condition. */
1253 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
1254 switch(lockres->l_blocking) {
1255 case LKM_EXMODE:
1256 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
1257 kick = 1;
1258 break;
1259 case LKM_PRMODE:
1260 if (!lockres->l_ex_holders)
1261 kick = 1;
1262 break;
1263 default:
1264 BUG();
1265 }
1266 }
1267
1268 if (kick)
1269 ocfs2_kick_vote_thread(osb);
1270
1271 mlog_exit_void();
1272}
1273
1274void ocfs2_data_unlock(struct inode *inode,
1275 int write)
1276{
1277 int level = write ? LKM_EXMODE : LKM_PRMODE;
1278 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_data_lockres;
1279
1280 mlog_entry_void();
1281
b0697053
MF
1282 mlog(0, "inode %llu drop %s DATA lock\n",
1283 (unsigned long long)OCFS2_I(inode)->ip_blkno,
ccd979bd
MF
1284 write ? "EXMODE" : "PRMODE");
1285
1286 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)))
1287 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1288
1289 mlog_exit_void();
1290}
1291
1292#define OCFS2_SEC_BITS 34
1293#define OCFS2_SEC_SHIFT (64 - 34)
1294#define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
1295
1296/* LVB only has room for 64 bits of time here so we pack it for
1297 * now. */
1298static u64 ocfs2_pack_timespec(struct timespec *spec)
1299{
1300 u64 res;
1301 u64 sec = spec->tv_sec;
1302 u32 nsec = spec->tv_nsec;
1303
1304 res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
1305
1306 return res;
1307}
1308
1309/* Call this with the lockres locked. I am reasonably sure we don't
1310 * need ip_lock in this function as anyone who would be changing those
1311 * values is supposed to be blocked in ocfs2_meta_lock right now. */
1312static void __ocfs2_stuff_meta_lvb(struct inode *inode)
1313{
1314 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1315 struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
1316 struct ocfs2_meta_lvb *lvb;
1317
1318 mlog_entry_void();
1319
1320 lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1321
1322 lvb->lvb_version = cpu_to_be32(OCFS2_LVB_VERSION);
1323 lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
1324 lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
1325 lvb->lvb_iuid = cpu_to_be32(inode->i_uid);
1326 lvb->lvb_igid = cpu_to_be32(inode->i_gid);
1327 lvb->lvb_imode = cpu_to_be16(inode->i_mode);
1328 lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
1329 lvb->lvb_iatime_packed =
1330 cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
1331 lvb->lvb_ictime_packed =
1332 cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
1333 lvb->lvb_imtime_packed =
1334 cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
ca4d147e 1335 lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
ccd979bd
MF
1336
1337 mlog_meta_lvb(0, lockres);
1338
1339 mlog_exit_void();
1340}
1341
1342static void ocfs2_unpack_timespec(struct timespec *spec,
1343 u64 packed_time)
1344{
1345 spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
1346 spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
1347}
1348
1349static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
1350{
1351 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1352 struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
1353 struct ocfs2_meta_lvb *lvb;
1354
1355 mlog_entry_void();
1356
1357 mlog_meta_lvb(0, lockres);
1358
1359 lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1360
1361 /* We're safe here without the lockres lock... */
1362 spin_lock(&oi->ip_lock);
1363 oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
1364 i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
1365
ca4d147e
HP
1366 oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
1367 ocfs2_set_inode_flags(inode);
1368
ccd979bd
MF
1369 /* fast-symlinks are a special case */
1370 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
1371 inode->i_blocks = 0;
1372 else
1373 inode->i_blocks =
1374 ocfs2_align_bytes_to_sectors(i_size_read(inode));
1375
1376 inode->i_uid = be32_to_cpu(lvb->lvb_iuid);
1377 inode->i_gid = be32_to_cpu(lvb->lvb_igid);
1378 inode->i_mode = be16_to_cpu(lvb->lvb_imode);
1379 inode->i_nlink = be16_to_cpu(lvb->lvb_inlink);
1380 ocfs2_unpack_timespec(&inode->i_atime,
1381 be64_to_cpu(lvb->lvb_iatime_packed));
1382 ocfs2_unpack_timespec(&inode->i_mtime,
1383 be64_to_cpu(lvb->lvb_imtime_packed));
1384 ocfs2_unpack_timespec(&inode->i_ctime,
1385 be64_to_cpu(lvb->lvb_ictime_packed));
1386 spin_unlock(&oi->ip_lock);
1387
1388 mlog_exit_void();
1389}
1390
1391static inline int ocfs2_meta_lvb_is_trustable(struct ocfs2_lock_res *lockres)
1392{
1393 struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1394
1395 if (be32_to_cpu(lvb->lvb_version) == OCFS2_LVB_VERSION)
1396 return 1;
1397 return 0;
1398}
1399
1400/* Determine whether a lock resource needs to be refreshed, and
1401 * arbitrate who gets to refresh it.
1402 *
1403 * 0 means no refresh needed.
1404 *
1405 * > 0 means you need to refresh this and you MUST call
1406 * ocfs2_complete_lock_res_refresh afterwards. */
1407static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
1408{
1409 unsigned long flags;
1410 int status = 0;
1411
1412 mlog_entry_void();
1413
1414refresh_check:
1415 spin_lock_irqsave(&lockres->l_lock, flags);
1416 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
1417 spin_unlock_irqrestore(&lockres->l_lock, flags);
1418 goto bail;
1419 }
1420
1421 if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
1422 spin_unlock_irqrestore(&lockres->l_lock, flags);
1423
1424 ocfs2_wait_on_refreshing_lock(lockres);
1425 goto refresh_check;
1426 }
1427
1428 /* Ok, I'll be the one to refresh this lock. */
1429 lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
1430 spin_unlock_irqrestore(&lockres->l_lock, flags);
1431
1432 status = 1;
1433bail:
1434 mlog_exit(status);
1435 return status;
1436}
1437
1438/* If status is non zero, I'll mark it as not being in refresh
1439 * anymroe, but i won't clear the needs refresh flag. */
1440static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
1441 int status)
1442{
1443 unsigned long flags;
1444 mlog_entry_void();
1445
1446 spin_lock_irqsave(&lockres->l_lock, flags);
1447 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
1448 if (!status)
1449 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
1450 spin_unlock_irqrestore(&lockres->l_lock, flags);
1451
1452 wake_up(&lockres->l_event);
1453
1454 mlog_exit_void();
1455}
1456
1457/* may or may not return a bh if it went to disk. */
1458static int ocfs2_meta_lock_update(struct inode *inode,
1459 struct buffer_head **bh)
1460{
1461 int status = 0;
1462 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1463 struct ocfs2_lock_res *lockres;
1464 struct ocfs2_dinode *fe;
1465
1466 mlog_entry_void();
1467
1468 spin_lock(&oi->ip_lock);
1469 if (oi->ip_flags & OCFS2_INODE_DELETED) {
b0697053 1470 mlog(0, "Orphaned inode %llu was deleted while we "
ccd979bd 1471 "were waiting on a lock. ip_flags = 0x%x\n",
b0697053 1472 (unsigned long long)oi->ip_blkno, oi->ip_flags);
ccd979bd
MF
1473 spin_unlock(&oi->ip_lock);
1474 status = -ENOENT;
1475 goto bail;
1476 }
1477 spin_unlock(&oi->ip_lock);
1478
1479 lockres = &oi->ip_meta_lockres;
1480
1481 if (!ocfs2_should_refresh_lock_res(lockres))
1482 goto bail;
1483
1484 /* This will discard any caching information we might have had
1485 * for the inode metadata. */
1486 ocfs2_metadata_cache_purge(inode);
1487
1488 /* will do nothing for inode types that don't use the extent
1489 * map (directories, bitmap files, etc) */
1490 ocfs2_extent_map_trunc(inode, 0);
1491
1492 if (ocfs2_meta_lvb_is_trustable(lockres)) {
b0697053
MF
1493 mlog(0, "Trusting LVB on inode %llu\n",
1494 (unsigned long long)oi->ip_blkno);
ccd979bd
MF
1495 ocfs2_refresh_inode_from_lvb(inode);
1496 } else {
1497 /* Boo, we have to go to disk. */
1498 /* read bh, cast, ocfs2_refresh_inode */
1499 status = ocfs2_read_block(OCFS2_SB(inode->i_sb), oi->ip_blkno,
1500 bh, OCFS2_BH_CACHED, inode);
1501 if (status < 0) {
1502 mlog_errno(status);
1503 goto bail_refresh;
1504 }
1505 fe = (struct ocfs2_dinode *) (*bh)->b_data;
1506
1507 /* This is a good chance to make sure we're not
1508 * locking an invalid object.
1509 *
1510 * We bug on a stale inode here because we checked
1511 * above whether it was wiped from disk. The wiping
1512 * node provides a guarantee that we receive that
1513 * message and can mark the inode before dropping any
1514 * locks associated with it. */
1515 if (!OCFS2_IS_VALID_DINODE(fe)) {
1516 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
1517 status = -EIO;
1518 goto bail_refresh;
1519 }
1520 mlog_bug_on_msg(inode->i_generation !=
1521 le32_to_cpu(fe->i_generation),
b0697053 1522 "Invalid dinode %llu disk generation: %u "
ccd979bd 1523 "inode->i_generation: %u\n",
b0697053
MF
1524 (unsigned long long)oi->ip_blkno,
1525 le32_to_cpu(fe->i_generation),
ccd979bd
MF
1526 inode->i_generation);
1527 mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
1528 !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
b0697053
MF
1529 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
1530 (unsigned long long)oi->ip_blkno,
1531 (unsigned long long)le64_to_cpu(fe->i_dtime),
ccd979bd
MF
1532 le32_to_cpu(fe->i_flags));
1533
1534 ocfs2_refresh_inode(inode, fe);
1535 }
1536
1537 status = 0;
1538bail_refresh:
1539 ocfs2_complete_lock_res_refresh(lockres, status);
1540bail:
1541 mlog_exit(status);
1542 return status;
1543}
1544
1545static int ocfs2_assign_bh(struct inode *inode,
1546 struct buffer_head **ret_bh,
1547 struct buffer_head *passed_bh)
1548{
1549 int status;
1550
1551 if (passed_bh) {
1552 /* Ok, the update went to disk for us, use the
1553 * returned bh. */
1554 *ret_bh = passed_bh;
1555 get_bh(*ret_bh);
1556
1557 return 0;
1558 }
1559
1560 status = ocfs2_read_block(OCFS2_SB(inode->i_sb),
1561 OCFS2_I(inode)->ip_blkno,
1562 ret_bh,
1563 OCFS2_BH_CACHED,
1564 inode);
1565 if (status < 0)
1566 mlog_errno(status);
1567
1568 return status;
1569}
1570
1571/*
1572 * returns < 0 error if the callback will never be called, otherwise
1573 * the result of the lock will be communicated via the callback.
1574 */
1575int ocfs2_meta_lock_full(struct inode *inode,
1576 struct ocfs2_journal_handle *handle,
1577 struct buffer_head **ret_bh,
1578 int ex,
1579 int arg_flags)
1580{
1581 int status, level, dlm_flags, acquired;
1582 struct ocfs2_lock_res *lockres;
1583 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1584 struct buffer_head *local_bh = NULL;
1585
1586 BUG_ON(!inode);
1587
1588 mlog_entry_void();
1589
b0697053
MF
1590 mlog(0, "inode %llu, take %s META lock\n",
1591 (unsigned long long)OCFS2_I(inode)->ip_blkno,
ccd979bd
MF
1592 ex ? "EXMODE" : "PRMODE");
1593
1594 status = 0;
1595 acquired = 0;
1596 /* We'll allow faking a readonly metadata lock for
1597 * rodevices. */
1598 if (ocfs2_is_hard_readonly(osb)) {
1599 if (ex)
1600 status = -EROFS;
1601 goto bail;
1602 }
1603
1604 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1605 wait_event(osb->recovery_event,
1606 ocfs2_node_map_is_empty(osb, &osb->recovery_map));
1607
1608 acquired = 0;
1609 lockres = &OCFS2_I(inode)->ip_meta_lockres;
1610 level = ex ? LKM_EXMODE : LKM_PRMODE;
1611 dlm_flags = 0;
1612 if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
1613 dlm_flags |= LKM_NOQUEUE;
1614
1615 status = ocfs2_cluster_lock(osb, lockres, level, dlm_flags, arg_flags);
1616 if (status < 0) {
1617 if (status != -EAGAIN && status != -EIOCBRETRY)
1618 mlog_errno(status);
1619 goto bail;
1620 }
1621
1622 /* Notify the error cleanup path to drop the cluster lock. */
1623 acquired = 1;
1624
1625 /* We wait twice because a node may have died while we were in
1626 * the lower dlm layers. The second time though, we've
1627 * committed to owning this lock so we don't allow signals to
1628 * abort the operation. */
1629 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1630 wait_event(osb->recovery_event,
1631 ocfs2_node_map_is_empty(osb, &osb->recovery_map));
1632
1633 /* This is fun. The caller may want a bh back, or it may
1634 * not. ocfs2_meta_lock_update definitely wants one in, but
1635 * may or may not read one, depending on what's in the
1636 * LVB. The result of all of this is that we've *only* gone to
1637 * disk if we have to, so the complexity is worthwhile. */
1638 status = ocfs2_meta_lock_update(inode, &local_bh);
1639 if (status < 0) {
1640 if (status != -ENOENT)
1641 mlog_errno(status);
1642 goto bail;
1643 }
1644
1645 if (ret_bh) {
1646 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
1647 if (status < 0) {
1648 mlog_errno(status);
1649 goto bail;
1650 }
1651 }
1652
1653 if (handle) {
1654 status = ocfs2_handle_add_lock(handle, inode);
1655 if (status < 0)
1656 mlog_errno(status);
1657 }
1658
1659bail:
1660 if (status < 0) {
1661 if (ret_bh && (*ret_bh)) {
1662 brelse(*ret_bh);
1663 *ret_bh = NULL;
1664 }
1665 if (acquired)
1666 ocfs2_meta_unlock(inode, ex);
1667 }
1668
1669 if (local_bh)
1670 brelse(local_bh);
1671
1672 mlog_exit(status);
1673 return status;
1674}
1675
1676/*
1677 * This is working around a lock inversion between tasks acquiring DLM locks
1678 * while holding a page lock and the vote thread which blocks dlm lock acquiry
1679 * while acquiring page locks.
1680 *
1681 * ** These _with_page variantes are only intended to be called from aop
1682 * methods that hold page locks and return a very specific *positive* error
1683 * code that aop methods pass up to the VFS -- test for errors with != 0. **
1684 *
1685 * The DLM is called such that it returns -EAGAIN if it would have blocked
1686 * waiting for the vote thread. In that case we unlock our page so the vote
1687 * thread can make progress. Once we've done this we have to return
1688 * AOP_TRUNCATED_PAGE so the aop method that called us can bubble that back up
1689 * into the VFS who will then immediately retry the aop call.
1690 *
1691 * We do a blocking lock and immediate unlock before returning, though, so that
1692 * the lock has a great chance of being cached on this node by the time the VFS
1693 * calls back to retry the aop. This has a potential to livelock as nodes
1694 * ping locks back and forth, but that's a risk we're willing to take to avoid
1695 * the lock inversion simply.
1696 */
1697int ocfs2_meta_lock_with_page(struct inode *inode,
1698 struct ocfs2_journal_handle *handle,
1699 struct buffer_head **ret_bh,
1700 int ex,
1701 struct page *page)
1702{
1703 int ret;
1704
1705 ret = ocfs2_meta_lock_full(inode, handle, ret_bh, ex,
1706 OCFS2_LOCK_NONBLOCK);
1707 if (ret == -EAGAIN) {
1708 unlock_page(page);
1709 if (ocfs2_meta_lock(inode, handle, ret_bh, ex) == 0)
1710 ocfs2_meta_unlock(inode, ex);
1711 ret = AOP_TRUNCATED_PAGE;
1712 }
1713
1714 return ret;
1715}
1716
1717void ocfs2_meta_unlock(struct inode *inode,
1718 int ex)
1719{
1720 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1721 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_meta_lockres;
1722
1723 mlog_entry_void();
1724
b0697053
MF
1725 mlog(0, "inode %llu drop %s META lock\n",
1726 (unsigned long long)OCFS2_I(inode)->ip_blkno,
ccd979bd
MF
1727 ex ? "EXMODE" : "PRMODE");
1728
1729 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)))
1730 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1731
1732 mlog_exit_void();
1733}
1734
1735int ocfs2_super_lock(struct ocfs2_super *osb,
1736 int ex)
1737{
1738 int status;
1739 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1740 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
1741 struct buffer_head *bh;
1742 struct ocfs2_slot_info *si = osb->slot_info;
1743
1744 mlog_entry_void();
1745
1746 if (ocfs2_is_hard_readonly(osb))
1747 return -EROFS;
1748
1749 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
1750 if (status < 0) {
1751 mlog_errno(status);
1752 goto bail;
1753 }
1754
1755 /* The super block lock path is really in the best position to
1756 * know when resources covered by the lock need to be
1757 * refreshed, so we do it here. Of course, making sense of
1758 * everything is up to the caller :) */
1759 status = ocfs2_should_refresh_lock_res(lockres);
1760 if (status < 0) {
1761 mlog_errno(status);
1762 goto bail;
1763 }
1764 if (status) {
1765 bh = si->si_bh;
1766 status = ocfs2_read_block(osb, bh->b_blocknr, &bh, 0,
1767 si->si_inode);
1768 if (status == 0)
1769 ocfs2_update_slot_info(si);
1770
1771 ocfs2_complete_lock_res_refresh(lockres, status);
1772
1773 if (status < 0)
1774 mlog_errno(status);
1775 }
1776bail:
1777 mlog_exit(status);
1778 return status;
1779}
1780
1781void ocfs2_super_unlock(struct ocfs2_super *osb,
1782 int ex)
1783{
1784 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1785 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
1786
1787 ocfs2_cluster_unlock(osb, lockres, level);
1788}
1789
1790int ocfs2_rename_lock(struct ocfs2_super *osb)
1791{
1792 int status;
1793 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
1794
1795 if (ocfs2_is_hard_readonly(osb))
1796 return -EROFS;
1797
1798 status = ocfs2_cluster_lock(osb, lockres, LKM_EXMODE, 0, 0);
1799 if (status < 0)
1800 mlog_errno(status);
1801
1802 return status;
1803}
1804
1805void ocfs2_rename_unlock(struct ocfs2_super *osb)
1806{
1807 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
1808
1809 ocfs2_cluster_unlock(osb, lockres, LKM_EXMODE);
1810}
1811
1812/* Reference counting of the dlm debug structure. We want this because
1813 * open references on the debug inodes can live on after a mount, so
1814 * we can't rely on the ocfs2_super to always exist. */
1815static void ocfs2_dlm_debug_free(struct kref *kref)
1816{
1817 struct ocfs2_dlm_debug *dlm_debug;
1818
1819 dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
1820
1821 kfree(dlm_debug);
1822}
1823
1824void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
1825{
1826 if (dlm_debug)
1827 kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
1828}
1829
1830static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
1831{
1832 kref_get(&debug->d_refcnt);
1833}
1834
1835struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
1836{
1837 struct ocfs2_dlm_debug *dlm_debug;
1838
1839 dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
1840 if (!dlm_debug) {
1841 mlog_errno(-ENOMEM);
1842 goto out;
1843 }
1844
1845 kref_init(&dlm_debug->d_refcnt);
1846 INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
1847 dlm_debug->d_locking_state = NULL;
1848out:
1849 return dlm_debug;
1850}
1851
1852/* Access to this is arbitrated for us via seq_file->sem. */
1853struct ocfs2_dlm_seq_priv {
1854 struct ocfs2_dlm_debug *p_dlm_debug;
1855 struct ocfs2_lock_res p_iter_res;
1856 struct ocfs2_lock_res p_tmp_res;
1857};
1858
1859static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
1860 struct ocfs2_dlm_seq_priv *priv)
1861{
1862 struct ocfs2_lock_res *iter, *ret = NULL;
1863 struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
1864
1865 assert_spin_locked(&ocfs2_dlm_tracking_lock);
1866
1867 list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
1868 /* discover the head of the list */
1869 if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
1870 mlog(0, "End of list found, %p\n", ret);
1871 break;
1872 }
1873
1874 /* We track our "dummy" iteration lockres' by a NULL
1875 * l_ops field. */
1876 if (iter->l_ops != NULL) {
1877 ret = iter;
1878 break;
1879 }
1880 }
1881
1882 return ret;
1883}
1884
1885static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
1886{
1887 struct ocfs2_dlm_seq_priv *priv = m->private;
1888 struct ocfs2_lock_res *iter;
1889
1890 spin_lock(&ocfs2_dlm_tracking_lock);
1891 iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
1892 if (iter) {
1893 /* Since lockres' have the lifetime of their container
1894 * (which can be inodes, ocfs2_supers, etc) we want to
1895 * copy this out to a temporary lockres while still
1896 * under the spinlock. Obviously after this we can't
1897 * trust any pointers on the copy returned, but that's
1898 * ok as the information we want isn't typically held
1899 * in them. */
1900 priv->p_tmp_res = *iter;
1901 iter = &priv->p_tmp_res;
1902 }
1903 spin_unlock(&ocfs2_dlm_tracking_lock);
1904
1905 return iter;
1906}
1907
1908static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
1909{
1910}
1911
1912static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
1913{
1914 struct ocfs2_dlm_seq_priv *priv = m->private;
1915 struct ocfs2_lock_res *iter = v;
1916 struct ocfs2_lock_res *dummy = &priv->p_iter_res;
1917
1918 spin_lock(&ocfs2_dlm_tracking_lock);
1919 iter = ocfs2_dlm_next_res(iter, priv);
1920 list_del_init(&dummy->l_debug_list);
1921 if (iter) {
1922 list_add(&dummy->l_debug_list, &iter->l_debug_list);
1923 priv->p_tmp_res = *iter;
1924 iter = &priv->p_tmp_res;
1925 }
1926 spin_unlock(&ocfs2_dlm_tracking_lock);
1927
1928 return iter;
1929}
1930
1931/* So that debugfs.ocfs2 can determine which format is being used */
1932#define OCFS2_DLM_DEBUG_STR_VERSION 1
1933static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
1934{
1935 int i;
1936 char *lvb;
1937 struct ocfs2_lock_res *lockres = v;
1938
1939 if (!lockres)
1940 return -EINVAL;
1941
1942 seq_printf(m, "0x%x\t"
1943 "%.*s\t"
1944 "%d\t"
1945 "0x%lx\t"
1946 "0x%x\t"
1947 "0x%x\t"
1948 "%u\t"
1949 "%u\t"
1950 "%d\t"
1951 "%d\t",
1952 OCFS2_DLM_DEBUG_STR_VERSION,
1953 OCFS2_LOCK_ID_MAX_LEN, lockres->l_name,
1954 lockres->l_level,
1955 lockres->l_flags,
1956 lockres->l_action,
1957 lockres->l_unlock_action,
1958 lockres->l_ro_holders,
1959 lockres->l_ex_holders,
1960 lockres->l_requested,
1961 lockres->l_blocking);
1962
1963 /* Dump the raw LVB */
1964 lvb = lockres->l_lksb.lvb;
1965 for(i = 0; i < DLM_LVB_LEN; i++)
1966 seq_printf(m, "0x%x\t", lvb[i]);
1967
1968 /* End the line */
1969 seq_printf(m, "\n");
1970 return 0;
1971}
1972
1973static struct seq_operations ocfs2_dlm_seq_ops = {
1974 .start = ocfs2_dlm_seq_start,
1975 .stop = ocfs2_dlm_seq_stop,
1976 .next = ocfs2_dlm_seq_next,
1977 .show = ocfs2_dlm_seq_show,
1978};
1979
1980static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
1981{
1982 struct seq_file *seq = (struct seq_file *) file->private_data;
1983 struct ocfs2_dlm_seq_priv *priv = seq->private;
1984 struct ocfs2_lock_res *res = &priv->p_iter_res;
1985
1986 ocfs2_remove_lockres_tracking(res);
1987 ocfs2_put_dlm_debug(priv->p_dlm_debug);
1988 return seq_release_private(inode, file);
1989}
1990
1991static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
1992{
1993 int ret;
1994 struct ocfs2_dlm_seq_priv *priv;
1995 struct seq_file *seq;
1996 struct ocfs2_super *osb;
1997
1998 priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL);
1999 if (!priv) {
2000 ret = -ENOMEM;
2001 mlog_errno(ret);
2002 goto out;
2003 }
2004 osb = (struct ocfs2_super *) inode->u.generic_ip;
2005 ocfs2_get_dlm_debug(osb->osb_dlm_debug);
2006 priv->p_dlm_debug = osb->osb_dlm_debug;
2007 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
2008
2009 ret = seq_open(file, &ocfs2_dlm_seq_ops);
2010 if (ret) {
2011 kfree(priv);
2012 mlog_errno(ret);
2013 goto out;
2014 }
2015
2016 seq = (struct seq_file *) file->private_data;
2017 seq->private = priv;
2018
2019 ocfs2_add_lockres_tracking(&priv->p_iter_res,
2020 priv->p_dlm_debug);
2021
2022out:
2023 return ret;
2024}
2025
4b6f5d20 2026static const struct file_operations ocfs2_dlm_debug_fops = {
ccd979bd
MF
2027 .open = ocfs2_dlm_debug_open,
2028 .release = ocfs2_dlm_debug_release,
2029 .read = seq_read,
2030 .llseek = seq_lseek,
2031};
2032
2033static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
2034{
2035 int ret = 0;
2036 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2037
2038 dlm_debug->d_locking_state = debugfs_create_file("locking_state",
2039 S_IFREG|S_IRUSR,
2040 osb->osb_debug_root,
2041 osb,
2042 &ocfs2_dlm_debug_fops);
2043 if (!dlm_debug->d_locking_state) {
2044 ret = -EINVAL;
2045 mlog(ML_ERROR,
2046 "Unable to create locking state debugfs file.\n");
2047 goto out;
2048 }
2049
2050 ocfs2_get_dlm_debug(dlm_debug);
2051out:
2052 return ret;
2053}
2054
2055static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
2056{
2057 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2058
2059 if (dlm_debug) {
2060 debugfs_remove(dlm_debug->d_locking_state);
2061 ocfs2_put_dlm_debug(dlm_debug);
2062 }
2063}
2064
2065int ocfs2_dlm_init(struct ocfs2_super *osb)
2066{
2067 int status;
2068 u32 dlm_key;
2069 struct dlm_ctxt *dlm;
2070
2071 mlog_entry_void();
2072
2073 status = ocfs2_dlm_init_debug(osb);
2074 if (status < 0) {
2075 mlog_errno(status);
2076 goto bail;
2077 }
2078
2079 /* launch vote thread */
78427043 2080 osb->vote_task = kthread_run(ocfs2_vote_thread, osb, "ocfs2vote");
ccd979bd
MF
2081 if (IS_ERR(osb->vote_task)) {
2082 status = PTR_ERR(osb->vote_task);
2083 osb->vote_task = NULL;
2084 mlog_errno(status);
2085 goto bail;
2086 }
2087
2088 /* used by the dlm code to make message headers unique, each
2089 * node in this domain must agree on this. */
2090 dlm_key = crc32_le(0, osb->uuid_str, strlen(osb->uuid_str));
2091
2092 /* for now, uuid == domain */
2093 dlm = dlm_register_domain(osb->uuid_str, dlm_key);
2094 if (IS_ERR(dlm)) {
2095 status = PTR_ERR(dlm);
2096 mlog_errno(status);
2097 goto bail;
2098 }
2099
2100 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
2101 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
2102
2103 dlm_register_eviction_cb(dlm, &osb->osb_eviction_cb);
2104
2105 osb->dlm = dlm;
2106
2107 status = 0;
2108bail:
2109 if (status < 0) {
2110 ocfs2_dlm_shutdown_debug(osb);
2111 if (osb->vote_task)
2112 kthread_stop(osb->vote_task);
2113 }
2114
2115 mlog_exit(status);
2116 return status;
2117}
2118
2119void ocfs2_dlm_shutdown(struct ocfs2_super *osb)
2120{
2121 mlog_entry_void();
2122
2123 dlm_unregister_eviction_cb(&osb->osb_eviction_cb);
2124
2125 ocfs2_drop_osb_locks(osb);
2126
2127 if (osb->vote_task) {
2128 kthread_stop(osb->vote_task);
2129 osb->vote_task = NULL;
2130 }
2131
2132 ocfs2_lock_res_free(&osb->osb_super_lockres);
2133 ocfs2_lock_res_free(&osb->osb_rename_lockres);
2134
2135 dlm_unregister_domain(osb->dlm);
2136 osb->dlm = NULL;
2137
2138 ocfs2_dlm_shutdown_debug(osb);
2139
2140 mlog_exit_void();
2141}
2142
2143static void ocfs2_unlock_ast_func(void *opaque, enum dlm_status status)
2144{
2145 struct ocfs2_lock_res *lockres = opaque;
2146 unsigned long flags;
2147
2148 mlog_entry_void();
2149
2150 mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name,
2151 lockres->l_unlock_action);
2152
2153 spin_lock_irqsave(&lockres->l_lock, flags);
2154 /* We tried to cancel a convert request, but it was already
2155 * granted. All we want to do here is clear our unlock
2156 * state. The wake_up call done at the bottom is redundant
2157 * (ocfs2_prepare_cancel_convert doesn't sleep on this) but doesn't
2158 * hurt anything anyway */
2159 if (status == DLM_CANCELGRANT &&
2160 lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
2161 mlog(0, "Got cancelgrant for %s\n", lockres->l_name);
2162
2163 /* We don't clear the busy flag in this case as it
2164 * should have been cleared by the ast which the dlm
2165 * has called. */
2166 goto complete_unlock;
2167 }
2168
2169 if (status != DLM_NORMAL) {
2170 mlog(ML_ERROR, "Dlm passes status %d for lock %s, "
2171 "unlock_action %d\n", status, lockres->l_name,
2172 lockres->l_unlock_action);
2173 spin_unlock_irqrestore(&lockres->l_lock, flags);
2174 return;
2175 }
2176
2177 switch(lockres->l_unlock_action) {
2178 case OCFS2_UNLOCK_CANCEL_CONVERT:
2179 mlog(0, "Cancel convert success for %s\n", lockres->l_name);
2180 lockres->l_action = OCFS2_AST_INVALID;
2181 break;
2182 case OCFS2_UNLOCK_DROP_LOCK:
2183 lockres->l_level = LKM_IVMODE;
2184 break;
2185 default:
2186 BUG();
2187 }
2188
2189 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
2190complete_unlock:
2191 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
2192 spin_unlock_irqrestore(&lockres->l_lock, flags);
2193
2194 wake_up(&lockres->l_event);
2195
2196 mlog_exit_void();
2197}
2198
2199typedef void (ocfs2_pre_drop_cb_t)(struct ocfs2_lock_res *, void *);
2200
2201struct drop_lock_cb {
2202 ocfs2_pre_drop_cb_t *drop_func;
2203 void *drop_data;
2204};
2205
2206static int ocfs2_drop_lock(struct ocfs2_super *osb,
2207 struct ocfs2_lock_res *lockres,
2208 struct drop_lock_cb *dcb)
2209{
2210 enum dlm_status status;
2211 unsigned long flags;
2212
2213 /* We didn't get anywhere near actually using this lockres. */
2214 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
2215 goto out;
2216
2217 spin_lock_irqsave(&lockres->l_lock, flags);
2218
2219 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
2220 "lockres %s, flags 0x%lx\n",
2221 lockres->l_name, lockres->l_flags);
2222
2223 while (lockres->l_flags & OCFS2_LOCK_BUSY) {
2224 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
2225 "%u, unlock_action = %u\n",
2226 lockres->l_name, lockres->l_flags, lockres->l_action,
2227 lockres->l_unlock_action);
2228
2229 spin_unlock_irqrestore(&lockres->l_lock, flags);
2230
2231 /* XXX: Today we just wait on any busy
2232 * locks... Perhaps we need to cancel converts in the
2233 * future? */
2234 ocfs2_wait_on_busy_lock(lockres);
2235
2236 spin_lock_irqsave(&lockres->l_lock, flags);
2237 }
2238
2239 if (dcb)
2240 dcb->drop_func(lockres, dcb->drop_data);
2241
2242 if (lockres->l_flags & OCFS2_LOCK_BUSY)
2243 mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
2244 lockres->l_name);
2245 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
2246 mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
2247
2248 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
2249 spin_unlock_irqrestore(&lockres->l_lock, flags);
2250 goto out;
2251 }
2252
2253 lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
2254
2255 /* make sure we never get here while waiting for an ast to
2256 * fire. */
2257 BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
2258
2259 /* is this necessary? */
2260 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
2261 lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
2262 spin_unlock_irqrestore(&lockres->l_lock, flags);
2263
2264 mlog(0, "lock %s\n", lockres->l_name);
2265
2266 status = dlmunlock(osb->dlm, &lockres->l_lksb, LKM_VALBLK,
2267 lockres->l_ops->unlock_ast, lockres);
2268 if (status != DLM_NORMAL) {
2269 ocfs2_log_dlm_error("dlmunlock", status, lockres);
2270 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
2271 dlm_print_one_lock(lockres->l_lksb.lockid);
2272 BUG();
2273 }
2274 mlog(0, "lock %s, successfull return from dlmunlock\n",
2275 lockres->l_name);
2276
2277 ocfs2_wait_on_busy_lock(lockres);
2278out:
2279 mlog_exit(0);
2280 return 0;
2281}
2282
2283/* Mark the lockres as being dropped. It will no longer be
2284 * queued if blocking, but we still may have to wait on it
2285 * being dequeued from the vote thread before we can consider
2286 * it safe to drop.
2287 *
2288 * You can *not* attempt to call cluster_lock on this lockres anymore. */
2289void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
2290{
2291 int status;
2292 struct ocfs2_mask_waiter mw;
2293 unsigned long flags;
2294
2295 ocfs2_init_mask_waiter(&mw);
2296
2297 spin_lock_irqsave(&lockres->l_lock, flags);
2298 lockres->l_flags |= OCFS2_LOCK_FREEING;
2299 while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
2300 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
2301 spin_unlock_irqrestore(&lockres->l_lock, flags);
2302
2303 mlog(0, "Waiting on lockres %s\n", lockres->l_name);
2304
2305 status = ocfs2_wait_for_mask(&mw);
2306 if (status)
2307 mlog_errno(status);
2308
2309 spin_lock_irqsave(&lockres->l_lock, flags);
2310 }
2311 spin_unlock_irqrestore(&lockres->l_lock, flags);
2312}
2313
2314static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
2315{
2316 int status;
2317
2318 mlog_entry_void();
2319
2320 ocfs2_mark_lockres_freeing(&osb->osb_super_lockres);
2321
2322 status = ocfs2_drop_lock(osb, &osb->osb_super_lockres, NULL);
2323 if (status < 0)
2324 mlog_errno(status);
2325
2326 ocfs2_mark_lockres_freeing(&osb->osb_rename_lockres);
2327
2328 status = ocfs2_drop_lock(osb, &osb->osb_rename_lockres, NULL);
2329 if (status < 0)
2330 mlog_errno(status);
2331
2332 mlog_exit(status);
2333}
2334
2335static void ocfs2_meta_pre_drop(struct ocfs2_lock_res *lockres, void *data)
2336{
2337 struct inode *inode = data;
2338
2339 /* the metadata lock requires a bit more work as we have an
2340 * LVB to worry about. */
2341 if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
2342 lockres->l_level == LKM_EXMODE &&
2343 !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
2344 __ocfs2_stuff_meta_lvb(inode);
2345}
2346
2347int ocfs2_drop_inode_locks(struct inode *inode)
2348{
2349 int status, err;
2350 struct drop_lock_cb meta_dcb = { ocfs2_meta_pre_drop, inode, };
2351
2352 mlog_entry_void();
2353
2354 /* No need to call ocfs2_mark_lockres_freeing here -
2355 * ocfs2_clear_inode has done it for us. */
2356
2357 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2358 &OCFS2_I(inode)->ip_data_lockres,
2359 NULL);
2360 if (err < 0)
2361 mlog_errno(err);
2362
2363 status = err;
2364
2365 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2366 &OCFS2_I(inode)->ip_meta_lockres,
2367 &meta_dcb);
2368 if (err < 0)
2369 mlog_errno(err);
2370 if (err < 0 && !status)
2371 status = err;
2372
2373 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2374 &OCFS2_I(inode)->ip_rw_lockres,
2375 NULL);
2376 if (err < 0)
2377 mlog_errno(err);
2378 if (err < 0 && !status)
2379 status = err;
2380
2381 mlog_exit(status);
2382 return status;
2383}
2384
2385static void ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
2386 int new_level)
2387{
2388 assert_spin_locked(&lockres->l_lock);
2389
2390 BUG_ON(lockres->l_blocking <= LKM_NLMODE);
2391
2392 if (lockres->l_level <= new_level) {
2393 mlog(ML_ERROR, "lockres->l_level (%u) <= new_level (%u)\n",
2394 lockres->l_level, new_level);
2395 BUG();
2396 }
2397
2398 mlog(0, "lock %s, new_level = %d, l_blocking = %d\n",
2399 lockres->l_name, new_level, lockres->l_blocking);
2400
2401 lockres->l_action = OCFS2_AST_DOWNCONVERT;
2402 lockres->l_requested = new_level;
2403 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
2404}
2405
2406static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
2407 struct ocfs2_lock_res *lockres,
2408 int new_level,
2409 int lvb)
2410{
2411 int ret, dlm_flags = LKM_CONVERT;
2412 enum dlm_status status;
2413
2414 mlog_entry_void();
2415
2416 if (lvb)
2417 dlm_flags |= LKM_VALBLK;
2418
2419 status = dlmlock(osb->dlm,
2420 new_level,
2421 &lockres->l_lksb,
2422 dlm_flags,
2423 lockres->l_name,
f0681062 2424 OCFS2_LOCK_ID_MAX_LEN - 1,
ccd979bd
MF
2425 lockres->l_ops->ast,
2426 lockres,
2427 lockres->l_ops->bast);
2428 if (status != DLM_NORMAL) {
2429 ocfs2_log_dlm_error("dlmlock", status, lockres);
2430 ret = -EINVAL;
2431 ocfs2_recover_from_dlm_error(lockres, 1);
2432 goto bail;
2433 }
2434
2435 ret = 0;
2436bail:
2437 mlog_exit(ret);
2438 return ret;
2439}
2440
2441/* returns 1 when the caller should unlock and call dlmunlock */
2442static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
2443 struct ocfs2_lock_res *lockres)
2444{
2445 assert_spin_locked(&lockres->l_lock);
2446
2447 mlog_entry_void();
2448 mlog(0, "lock %s\n", lockres->l_name);
2449
2450 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
2451 /* If we're already trying to cancel a lock conversion
2452 * then just drop the spinlock and allow the caller to
2453 * requeue this lock. */
2454
2455 mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
2456 return 0;
2457 }
2458
2459 /* were we in a convert when we got the bast fire? */
2460 BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
2461 lockres->l_action != OCFS2_AST_DOWNCONVERT);
2462 /* set things up for the unlockast to know to just
2463 * clear out the ast_action and unset busy, etc. */
2464 lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
2465
2466 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
2467 "lock %s, invalid flags: 0x%lx\n",
2468 lockres->l_name, lockres->l_flags);
2469
2470 return 1;
2471}
2472
2473static int ocfs2_cancel_convert(struct ocfs2_super *osb,
2474 struct ocfs2_lock_res *lockres)
2475{
2476 int ret;
2477 enum dlm_status status;
2478
2479 mlog_entry_void();
2480 mlog(0, "lock %s\n", lockres->l_name);
2481
2482 ret = 0;
2483 status = dlmunlock(osb->dlm,
2484 &lockres->l_lksb,
2485 LKM_CANCEL,
2486 lockres->l_ops->unlock_ast,
2487 lockres);
2488 if (status != DLM_NORMAL) {
2489 ocfs2_log_dlm_error("dlmunlock", status, lockres);
2490 ret = -EINVAL;
2491 ocfs2_recover_from_dlm_error(lockres, 0);
2492 }
2493
2494 mlog(0, "lock %s return from dlmunlock\n", lockres->l_name);
2495
2496 mlog_exit(ret);
2497 return ret;
2498}
2499
2500static inline int ocfs2_can_downconvert_meta_lock(struct inode *inode,
2501 struct ocfs2_lock_res *lockres,
2502 int new_level)
2503{
2504 int ret;
2505
2506 mlog_entry_void();
2507
2508 BUG_ON(new_level != LKM_NLMODE && new_level != LKM_PRMODE);
2509
2510 if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
2511 ret = 0;
2512 mlog(0, "lockres %s currently being refreshed -- backing "
2513 "off!\n", lockres->l_name);
2514 } else if (new_level == LKM_PRMODE)
2515 ret = !lockres->l_ex_holders &&
2516 ocfs2_inode_fully_checkpointed(inode);
2517 else /* Must be NLMODE we're converting to. */
2518 ret = !lockres->l_ro_holders && !lockres->l_ex_holders &&
2519 ocfs2_inode_fully_checkpointed(inode);
2520
2521 mlog_exit(ret);
2522 return ret;
2523}
2524
2525static int ocfs2_do_unblock_meta(struct inode *inode,
2526 int *requeue)
2527{
2528 int new_level;
2529 int set_lvb = 0;
2530 int ret = 0;
2531 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_meta_lockres;
2532 unsigned long flags;
2533
2534 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2535
2536 mlog_entry_void();
2537
2538 spin_lock_irqsave(&lockres->l_lock, flags);
2539
2540 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
2541
2542 mlog(0, "l_level=%d, l_blocking=%d\n", lockres->l_level,
2543 lockres->l_blocking);
2544
2545 BUG_ON(lockres->l_level != LKM_EXMODE &&
2546 lockres->l_level != LKM_PRMODE);
2547
2548 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
2549 *requeue = 1;
2550 ret = ocfs2_prepare_cancel_convert(osb, lockres);
2551 spin_unlock_irqrestore(&lockres->l_lock, flags);
2552 if (ret) {
2553 ret = ocfs2_cancel_convert(osb, lockres);
2554 if (ret < 0)
2555 mlog_errno(ret);
2556 }
2557 goto leave;
2558 }
2559
2560 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
2561
2562 mlog(0, "l_level=%d, l_blocking=%d, new_level=%d\n",
2563 lockres->l_level, lockres->l_blocking, new_level);
2564
2565 if (ocfs2_can_downconvert_meta_lock(inode, lockres, new_level)) {
2566 if (lockres->l_level == LKM_EXMODE)
2567 set_lvb = 1;
2568
2569 /* If the lock hasn't been refreshed yet (rare), then
2570 * our memory inode values are old and we skip
2571 * stuffing the lvb. There's no need to actually clear
2572 * out the lvb here as it's value is still valid. */
2573 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
2574 if (set_lvb)
2575 __ocfs2_stuff_meta_lvb(inode);
2576 } else
2577 mlog(0, "lockres %s: downconverting stale lock!\n",
2578 lockres->l_name);
2579
2580 mlog(0, "calling ocfs2_downconvert_lock with l_level=%d, "
2581 "l_blocking=%d, new_level=%d\n",
2582 lockres->l_level, lockres->l_blocking, new_level);
2583
2584 ocfs2_prepare_downconvert(lockres, new_level);
2585 spin_unlock_irqrestore(&lockres->l_lock, flags);
2586 ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb);
2587 goto leave;
2588 }
2589 if (!ocfs2_inode_fully_checkpointed(inode))
2590 ocfs2_start_checkpoint(osb);
2591
2592 *requeue = 1;
2593 spin_unlock_irqrestore(&lockres->l_lock, flags);
2594 ret = 0;
2595leave:
2596 mlog_exit(ret);
2597 return ret;
2598}
2599
2600static int ocfs2_generic_unblock_lock(struct ocfs2_super *osb,
2601 struct ocfs2_lock_res *lockres,
2602 int *requeue,
2603 ocfs2_convert_worker_t *worker)
2604{
2605 unsigned long flags;
2606 int blocking;
2607 int new_level;
2608 int ret = 0;
2609
2610 mlog_entry_void();
2611
2612 spin_lock_irqsave(&lockres->l_lock, flags);
2613
2614 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
2615
2616recheck:
2617 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
2618 *requeue = 1;
2619 ret = ocfs2_prepare_cancel_convert(osb, lockres);
2620 spin_unlock_irqrestore(&lockres->l_lock, flags);
2621 if (ret) {
2622 ret = ocfs2_cancel_convert(osb, lockres);
2623 if (ret < 0)
2624 mlog_errno(ret);
2625 }
2626 goto leave;
2627 }
2628
2629 /* if we're blocking an exclusive and we have *any* holders,
2630 * then requeue. */
2631 if ((lockres->l_blocking == LKM_EXMODE)
2632 && (lockres->l_ex_holders || lockres->l_ro_holders)) {
2633 spin_unlock_irqrestore(&lockres->l_lock, flags);
2634 *requeue = 1;
2635 ret = 0;
2636 goto leave;
2637 }
2638
2639 /* If it's a PR we're blocking, then only
2640 * requeue if we've got any EX holders */
2641 if (lockres->l_blocking == LKM_PRMODE &&
2642 lockres->l_ex_holders) {
2643 spin_unlock_irqrestore(&lockres->l_lock, flags);
2644 *requeue = 1;
2645 ret = 0;
2646 goto leave;
2647 }
2648
2649 /* If we get here, then we know that there are no more
2650 * incompatible holders (and anyone asking for an incompatible
2651 * lock is blocked). We can now downconvert the lock */
2652 if (!worker)
2653 goto downconvert;
2654
2655 /* Some lockres types want to do a bit of work before
2656 * downconverting a lock. Allow that here. The worker function
2657 * may sleep, so we save off a copy of what we're blocking as
2658 * it may change while we're not holding the spin lock. */
2659 blocking = lockres->l_blocking;
2660 spin_unlock_irqrestore(&lockres->l_lock, flags);
2661
2662 worker(lockres, blocking);
2663
2664 spin_lock_irqsave(&lockres->l_lock, flags);
2665 if (blocking != lockres->l_blocking) {
2666 /* If this changed underneath us, then we can't drop
2667 * it just yet. */
2668 goto recheck;
2669 }
2670
2671downconvert:
2672 *requeue = 0;
2673 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
2674
2675 ocfs2_prepare_downconvert(lockres, new_level);
2676 spin_unlock_irqrestore(&lockres->l_lock, flags);
2677 ret = ocfs2_downconvert_lock(osb, lockres, new_level, 0);
2678leave:
2679 mlog_exit(ret);
2680 return ret;
2681}
2682
2683static void ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
2684 int blocking)
2685{
2686 struct inode *inode;
2687 struct address_space *mapping;
2688
2689 mlog_entry_void();
2690
2691 inode = ocfs2_lock_res_inode(lockres);
2692 mapping = inode->i_mapping;
2693
2694 if (filemap_fdatawrite(mapping)) {
b0697053
MF
2695 mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
2696 (unsigned long long)OCFS2_I(inode)->ip_blkno);
ccd979bd
MF
2697 }
2698 sync_mapping_buffers(mapping);
2699 if (blocking == LKM_EXMODE) {
2700 truncate_inode_pages(mapping, 0);
2701 unmap_mapping_range(mapping, 0, 0, 0);
2702 } else {
2703 /* We only need to wait on the I/O if we're not also
2704 * truncating pages because truncate_inode_pages waits
2705 * for us above. We don't truncate pages if we're
2706 * blocking anything < EXMODE because we want to keep
2707 * them around in that case. */
2708 filemap_fdatawait(mapping);
2709 }
2710
2711 mlog_exit_void();
2712}
2713
2714int ocfs2_unblock_data(struct ocfs2_lock_res *lockres,
2715 int *requeue)
2716{
2717 int status;
2718 struct inode *inode;
2719 struct ocfs2_super *osb;
2720
2721 mlog_entry_void();
2722
2723 inode = ocfs2_lock_res_inode(lockres);
2724 osb = OCFS2_SB(inode->i_sb);
2725
b0697053
MF
2726 mlog(0, "unblock inode %llu\n",
2727 (unsigned long long)OCFS2_I(inode)->ip_blkno);
ccd979bd
MF
2728
2729 status = ocfs2_generic_unblock_lock(osb,
2730 lockres,
2731 requeue,
2732 ocfs2_data_convert_worker);
2733 if (status < 0)
2734 mlog_errno(status);
2735
b0697053
MF
2736 mlog(0, "inode %llu, requeue = %d\n",
2737 (unsigned long long)OCFS2_I(inode)->ip_blkno, *requeue);
ccd979bd
MF
2738
2739 mlog_exit(status);
2740 return status;
2741}
2742
2743static int ocfs2_unblock_inode_lock(struct ocfs2_lock_res *lockres,
2744 int *requeue)
2745{
2746 int status;
2747 struct inode *inode;
2748
2749 mlog_entry_void();
2750
2751 mlog(0, "Unblock lockres %s\n", lockres->l_name);
2752
2753 inode = ocfs2_lock_res_inode(lockres);
2754
2755 status = ocfs2_generic_unblock_lock(OCFS2_SB(inode->i_sb),
2756 lockres,
2757 requeue,
2758 NULL);
2759 if (status < 0)
2760 mlog_errno(status);
2761
2762 mlog_exit(status);
2763 return status;
2764}
2765
2766
2767int ocfs2_unblock_meta(struct ocfs2_lock_res *lockres,
2768 int *requeue)
2769{
2770 int status;
2771 struct inode *inode;
2772
2773 mlog_entry_void();
2774
2775 inode = ocfs2_lock_res_inode(lockres);
2776
b0697053
MF
2777 mlog(0, "unblock inode %llu\n",
2778 (unsigned long long)OCFS2_I(inode)->ip_blkno);
ccd979bd
MF
2779
2780 status = ocfs2_do_unblock_meta(inode, requeue);
2781 if (status < 0)
2782 mlog_errno(status);
2783
b0697053
MF
2784 mlog(0, "inode %llu, requeue = %d\n",
2785 (unsigned long long)OCFS2_I(inode)->ip_blkno, *requeue);
ccd979bd
MF
2786
2787 mlog_exit(status);
2788 return status;
2789}
2790
2791/* Generic unblock function for any lockres whose private data is an
2792 * ocfs2_super pointer. */
2793static int ocfs2_unblock_osb_lock(struct ocfs2_lock_res *lockres,
2794 int *requeue)
2795{
2796 int status;
2797 struct ocfs2_super *osb;
2798
2799 mlog_entry_void();
2800
2801 mlog(0, "Unblock lockres %s\n", lockres->l_name);
2802
2803 osb = ocfs2_lock_res_super(lockres);
2804
2805 status = ocfs2_generic_unblock_lock(osb,
2806 lockres,
2807 requeue,
2808 NULL);
2809 if (status < 0)
2810 mlog_errno(status);
2811
2812 mlog_exit(status);
2813 return status;
2814}
2815
2816void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
2817 struct ocfs2_lock_res *lockres)
2818{
2819 int status;
2820 int requeue = 0;
2821 unsigned long flags;
2822
2823 /* Our reference to the lockres in this function can be
2824 * considered valid until we remove the OCFS2_LOCK_QUEUED
2825 * flag. */
2826
2827 mlog_entry_void();
2828
2829 BUG_ON(!lockres);
2830 BUG_ON(!lockres->l_ops);
2831 BUG_ON(!lockres->l_ops->unblock);
2832
2833 mlog(0, "lockres %s blocked.\n", lockres->l_name);
2834
2835 /* Detect whether a lock has been marked as going away while
2836 * the vote thread was processing other things. A lock can
2837 * still be marked with OCFS2_LOCK_FREEING after this check,
2838 * but short circuiting here will still save us some
2839 * performance. */
2840 spin_lock_irqsave(&lockres->l_lock, flags);
2841 if (lockres->l_flags & OCFS2_LOCK_FREEING)
2842 goto unqueue;
2843 spin_unlock_irqrestore(&lockres->l_lock, flags);
2844
2845 status = lockres->l_ops->unblock(lockres, &requeue);
2846 if (status < 0)
2847 mlog_errno(status);
2848
2849 spin_lock_irqsave(&lockres->l_lock, flags);
2850unqueue:
2851 if (lockres->l_flags & OCFS2_LOCK_FREEING || !requeue) {
2852 lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
2853 } else
2854 ocfs2_schedule_blocked_lock(osb, lockres);
2855
2856 mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name,
2857 requeue ? "yes" : "no");
2858 spin_unlock_irqrestore(&lockres->l_lock, flags);
2859
2860 mlog_exit_void();
2861}
2862
2863static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
2864 struct ocfs2_lock_res *lockres)
2865{
2866 mlog_entry_void();
2867
2868 assert_spin_locked(&lockres->l_lock);
2869
2870 if (lockres->l_flags & OCFS2_LOCK_FREEING) {
2871 /* Do not schedule a lock for downconvert when it's on
2872 * the way to destruction - any nodes wanting access
2873 * to the resource will get it soon. */
2874 mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n",
2875 lockres->l_name, lockres->l_flags);
2876 return;
2877 }
2878
2879 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
2880
2881 spin_lock(&osb->vote_task_lock);
2882 if (list_empty(&lockres->l_blocked_list)) {
2883 list_add_tail(&lockres->l_blocked_list,
2884 &osb->blocked_lock_list);
2885 osb->blocked_lock_count++;
2886 }
2887 spin_unlock(&osb->vote_task_lock);
2888
2889 mlog_exit_void();
2890}
2891
2892/* This aids in debugging situations where a bad LVB might be involved. */
2893void ocfs2_dump_meta_lvb_info(u64 level,
2894 const char *function,
2895 unsigned int line,
2896 struct ocfs2_lock_res *lockres)
2897{
2898 struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
2899
2900 mlog(level, "LVB information for %s (called from %s:%u):\n",
2901 lockres->l_name, function, line);
2902 mlog(level, "version: %u, clusters: %u\n",
2903 be32_to_cpu(lvb->lvb_version), be32_to_cpu(lvb->lvb_iclusters));
b0697053
MF
2904 mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
2905 (unsigned long long)be64_to_cpu(lvb->lvb_isize),
2906 be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
2907 be16_to_cpu(lvb->lvb_imode));
2908 mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
ca4d147e 2909 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
b0697053
MF
2910 (long long)be64_to_cpu(lvb->lvb_iatime_packed),
2911 (long long)be64_to_cpu(lvb->lvb_ictime_packed),
ca4d147e
HP
2912 (long long)be64_to_cpu(lvb->lvb_imtime_packed),
2913 be32_to_cpu(lvb->lvb_iattr));
ccd979bd 2914}