ocfs2: Local alloc window size changeable via mount option
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / ocfs2 / dlmglue.c
CommitLineData
ccd979bd
MF
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmglue.c
5 *
6 * Code which implements an OCFS2 specific interface to our DLM.
7 *
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 */
25
26#include <linux/types.h>
27#include <linux/slab.h>
28#include <linux/highmem.h>
29#include <linux/mm.h>
ccd979bd
MF
30#include <linux/crc32.h>
31#include <linux/kthread.h>
32#include <linux/pagemap.h>
33#include <linux/debugfs.h>
34#include <linux/seq_file.h>
35
36#include <cluster/heartbeat.h>
37#include <cluster/nodemanager.h>
38#include <cluster/tcp.h>
39
40#include <dlm/dlmapi.h>
41
42#define MLOG_MASK_PREFIX ML_DLM_GLUE
43#include <cluster/masklog.h>
44
45#include "ocfs2.h"
46
47#include "alloc.h"
d680efe9 48#include "dcache.h"
ccd979bd
MF
49#include "dlmglue.h"
50#include "extent_map.h"
7f1a37e3 51#include "file.h"
ccd979bd
MF
52#include "heartbeat.h"
53#include "inode.h"
54#include "journal.h"
55#include "slot_map.h"
56#include "super.h"
57#include "uptodate.h"
ccd979bd
MF
58
59#include "buffer_head_io.h"
60
61struct ocfs2_mask_waiter {
62 struct list_head mw_item;
63 int mw_status;
64 struct completion mw_complete;
65 unsigned long mw_mask;
66 unsigned long mw_goal;
67};
68
54a7e755
MF
69static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
70static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
ccd979bd 71
d680efe9 72/*
cc567d89 73 * Return value from ->downconvert_worker functions.
d680efe9 74 *
b5e500e2 75 * These control the precise actions of ocfs2_unblock_lock()
d680efe9
MF
76 * and ocfs2_process_blocked_lock()
77 *
78 */
79enum ocfs2_unblock_action {
80 UNBLOCK_CONTINUE = 0, /* Continue downconvert */
81 UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire
82 * ->post_unlock callback */
83 UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire
84 * ->post_unlock() callback. */
85};
86
87struct ocfs2_unblock_ctl {
88 int requeue;
89 enum ocfs2_unblock_action unblock_action;
90};
91
810d5aeb
MF
92static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
93 int new_level);
94static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
95
cc567d89
MF
96static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
97 int blocking);
98
cc567d89
MF
99static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
100 int blocking);
d680efe9
MF
101
102static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
103 struct ocfs2_lock_res *lockres);
ccd979bd 104
6cb129f5
AB
105
106#define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
107
108/* This aids in debugging situations where a bad LVB might be involved. */
109static void ocfs2_dump_meta_lvb_info(u64 level,
110 const char *function,
111 unsigned int line,
112 struct ocfs2_lock_res *lockres)
113{
114 struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
115
116 mlog(level, "LVB information for %s (called from %s:%u):\n",
117 lockres->l_name, function, line);
118 mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
119 lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
120 be32_to_cpu(lvb->lvb_igeneration));
121 mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
122 (unsigned long long)be64_to_cpu(lvb->lvb_isize),
123 be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
124 be16_to_cpu(lvb->lvb_imode));
125 mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
126 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
127 (long long)be64_to_cpu(lvb->lvb_iatime_packed),
128 (long long)be64_to_cpu(lvb->lvb_ictime_packed),
129 (long long)be64_to_cpu(lvb->lvb_imtime_packed),
130 be32_to_cpu(lvb->lvb_iattr));
131}
132
133
f625c979
MF
134/*
135 * OCFS2 Lock Resource Operations
136 *
137 * These fine tune the behavior of the generic dlmglue locking infrastructure.
0d5dc6c2
MF
138 *
139 * The most basic of lock types can point ->l_priv to their respective
140 * struct ocfs2_super and allow the default actions to manage things.
141 *
142 * Right now, each lock type also needs to implement an init function,
143 * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
144 * should be called when the lock is no longer needed (i.e., object
145 * destruction time).
f625c979 146 */
ccd979bd 147struct ocfs2_lock_res_ops {
54a7e755
MF
148 /*
149 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
150 * this callback if ->l_priv is not an ocfs2_super pointer
151 */
152 struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
b5e500e2 153
0d5dc6c2 154 /*
34d024f8
MF
155 * Optionally called in the downconvert thread after a
156 * successful downconvert. The lockres will not be referenced
157 * after this callback is called, so it is safe to free
158 * memory, etc.
0d5dc6c2
MF
159 *
160 * The exact semantics of when this is called are controlled
161 * by ->downconvert_worker()
162 */
d680efe9 163 void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
f625c979 164
16d5b956
MF
165 /*
166 * Allow a lock type to add checks to determine whether it is
167 * safe to downconvert a lock. Return 0 to re-queue the
168 * downconvert at a later time, nonzero to continue.
169 *
170 * For most locks, the default checks that there are no
171 * incompatible holders are sufficient.
172 *
173 * Called with the lockres spinlock held.
174 */
175 int (*check_downconvert)(struct ocfs2_lock_res *, int);
176
5ef0d4ea
MF
177 /*
178 * Allows a lock type to populate the lock value block. This
179 * is called on downconvert, and when we drop a lock.
180 *
181 * Locks that want to use this should set LOCK_TYPE_USES_LVB
182 * in the flags field.
183 *
184 * Called with the lockres spinlock held.
185 */
186 void (*set_lvb)(struct ocfs2_lock_res *);
187
cc567d89
MF
188 /*
189 * Called from the downconvert thread when it is determined
190 * that a lock will be downconverted. This is called without
191 * any locks held so the function can do work that might
192 * schedule (syncing out data, etc).
193 *
194 * This should return any one of the ocfs2_unblock_action
195 * values, depending on what it wants the thread to do.
196 */
197 int (*downconvert_worker)(struct ocfs2_lock_res *, int);
198
f625c979
MF
199 /*
200 * LOCK_TYPE_* flags which describe the specific requirements
201 * of a lock type. Descriptions of each individual flag follow.
202 */
203 int flags;
ccd979bd
MF
204};
205
f625c979
MF
206/*
207 * Some locks want to "refresh" potentially stale data when a
208 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
209 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
210 * individual lockres l_flags member from the ast function. It is
211 * expected that the locking wrapper will clear the
212 * OCFS2_LOCK_NEEDS_REFRESH flag when done.
213 */
214#define LOCK_TYPE_REQUIRES_REFRESH 0x1
215
b80fc012 216/*
5ef0d4ea
MF
217 * Indicate that a lock type makes use of the lock value block. The
218 * ->set_lvb lock type callback must be defined.
b80fc012
MF
219 */
220#define LOCK_TYPE_USES_LVB 0x2
221
ccd979bd 222static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
54a7e755 223 .get_osb = ocfs2_get_inode_osb,
f625c979 224 .flags = 0,
ccd979bd
MF
225};
226
e63aecb6 227static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
54a7e755 228 .get_osb = ocfs2_get_inode_osb,
810d5aeb
MF
229 .check_downconvert = ocfs2_check_meta_downconvert,
230 .set_lvb = ocfs2_set_meta_lvb,
f1f54068 231 .downconvert_worker = ocfs2_data_convert_worker,
b80fc012 232 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
ccd979bd
MF
233};
234
ccd979bd 235static struct ocfs2_lock_res_ops ocfs2_super_lops = {
f625c979 236 .flags = LOCK_TYPE_REQUIRES_REFRESH,
ccd979bd
MF
237};
238
239static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
f625c979 240 .flags = 0,
ccd979bd
MF
241};
242
d680efe9 243static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
54a7e755 244 .get_osb = ocfs2_get_dentry_osb,
d680efe9 245 .post_unlock = ocfs2_dentry_post_unlock,
cc567d89 246 .downconvert_worker = ocfs2_dentry_convert_worker,
f625c979 247 .flags = 0,
d680efe9
MF
248};
249
50008630
TY
250static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
251 .get_osb = ocfs2_get_inode_osb,
252 .flags = 0,
253};
254
ccd979bd
MF
255static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
256{
257 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
50008630
TY
258 lockres->l_type == OCFS2_LOCK_TYPE_RW ||
259 lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
ccd979bd
MF
260}
261
ccd979bd
MF
262static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
263{
264 BUG_ON(!ocfs2_is_inode_lock(lockres));
265
266 return (struct inode *) lockres->l_priv;
267}
268
d680efe9
MF
269static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
270{
271 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
272
273 return (struct ocfs2_dentry_lock *)lockres->l_priv;
274}
275
54a7e755
MF
276static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
277{
278 if (lockres->l_ops->get_osb)
279 return lockres->l_ops->get_osb(lockres);
280
281 return (struct ocfs2_super *)lockres->l_priv;
282}
283
ccd979bd
MF
284static int ocfs2_lock_create(struct ocfs2_super *osb,
285 struct ocfs2_lock_res *lockres,
286 int level,
287 int dlm_flags);
288static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
289 int wanted);
290static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
291 struct ocfs2_lock_res *lockres,
292 int level);
293static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
294static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
295static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
296static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
297static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
298 struct ocfs2_lock_res *lockres);
299static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
300 int convert);
301#define ocfs2_log_dlm_error(_func, _stat, _lockres) do { \
302 mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \
303 "resource %s: %s\n", dlm_errname(_stat), _func, \
304 _lockres->l_name, dlm_errmsg(_stat)); \
305} while (0)
34d024f8
MF
306static int ocfs2_downconvert_thread(void *arg);
307static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
308 struct ocfs2_lock_res *lockres);
e63aecb6 309static int ocfs2_inode_lock_update(struct inode *inode,
ccd979bd
MF
310 struct buffer_head **bh);
311static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
312static inline int ocfs2_highest_compat_lock_level(int level);
ccd979bd 313
ccd979bd
MF
314static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
315 u64 blkno,
316 u32 generation,
317 char *name)
318{
319 int len;
320
321 mlog_entry_void();
322
323 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
324
b0697053
MF
325 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
326 ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
327 (long long)blkno, generation);
ccd979bd
MF
328
329 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
330
331 mlog(0, "built lock resource with name: %s\n", name);
332
333 mlog_exit_void();
334}
335
34af946a 336static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
ccd979bd
MF
337
338static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
339 struct ocfs2_dlm_debug *dlm_debug)
340{
341 mlog(0, "Add tracking for lockres %s\n", res->l_name);
342
343 spin_lock(&ocfs2_dlm_tracking_lock);
344 list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
345 spin_unlock(&ocfs2_dlm_tracking_lock);
346}
347
348static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
349{
350 spin_lock(&ocfs2_dlm_tracking_lock);
351 if (!list_empty(&res->l_debug_list))
352 list_del_init(&res->l_debug_list);
353 spin_unlock(&ocfs2_dlm_tracking_lock);
354}
355
356static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
357 struct ocfs2_lock_res *res,
358 enum ocfs2_lock_type type,
ccd979bd
MF
359 struct ocfs2_lock_res_ops *ops,
360 void *priv)
361{
ccd979bd
MF
362 res->l_type = type;
363 res->l_ops = ops;
364 res->l_priv = priv;
365
366 res->l_level = LKM_IVMODE;
367 res->l_requested = LKM_IVMODE;
368 res->l_blocking = LKM_IVMODE;
369 res->l_action = OCFS2_AST_INVALID;
370 res->l_unlock_action = OCFS2_UNLOCK_INVALID;
371
372 res->l_flags = OCFS2_LOCK_INITIALIZED;
373
374 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
375}
376
377void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
378{
379 /* This also clears out the lock status block */
380 memset(res, 0, sizeof(struct ocfs2_lock_res));
381 spin_lock_init(&res->l_lock);
382 init_waitqueue_head(&res->l_event);
383 INIT_LIST_HEAD(&res->l_blocked_list);
384 INIT_LIST_HEAD(&res->l_mask_waiters);
385}
386
387void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
388 enum ocfs2_lock_type type,
24c19ef4 389 unsigned int generation,
ccd979bd
MF
390 struct inode *inode)
391{
392 struct ocfs2_lock_res_ops *ops;
393
394 switch(type) {
395 case OCFS2_LOCK_TYPE_RW:
396 ops = &ocfs2_inode_rw_lops;
397 break;
398 case OCFS2_LOCK_TYPE_META:
e63aecb6 399 ops = &ocfs2_inode_inode_lops;
ccd979bd 400 break;
50008630
TY
401 case OCFS2_LOCK_TYPE_OPEN:
402 ops = &ocfs2_inode_open_lops;
403 break;
ccd979bd
MF
404 default:
405 mlog_bug_on_msg(1, "type: %d\n", type);
406 ops = NULL; /* thanks, gcc */
407 break;
408 };
409
d680efe9 410 ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
24c19ef4 411 generation, res->l_name);
d680efe9
MF
412 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
413}
414
54a7e755
MF
415static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
416{
417 struct inode *inode = ocfs2_lock_res_inode(lockres);
418
419 return OCFS2_SB(inode->i_sb);
420}
421
d680efe9
MF
422static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
423{
424 __be64 inode_blkno_be;
425
426 memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
427 sizeof(__be64));
428
429 return be64_to_cpu(inode_blkno_be);
430}
431
54a7e755
MF
432static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
433{
434 struct ocfs2_dentry_lock *dl = lockres->l_priv;
435
436 return OCFS2_SB(dl->dl_inode->i_sb);
437}
438
d680efe9
MF
439void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
440 u64 parent, struct inode *inode)
441{
442 int len;
443 u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
444 __be64 inode_blkno_be = cpu_to_be64(inode_blkno);
445 struct ocfs2_lock_res *lockres = &dl->dl_lockres;
446
447 ocfs2_lock_res_init_once(lockres);
448
449 /*
450 * Unfortunately, the standard lock naming scheme won't work
451 * here because we have two 16 byte values to use. Instead,
452 * we'll stuff the inode number as a binary value. We still
453 * want error prints to show something without garbling the
454 * display, so drop a null byte in there before the inode
455 * number. A future version of OCFS2 will likely use all
456 * binary lock names. The stringified names have been a
457 * tremendous aid in debugging, but now that the debugfs
458 * interface exists, we can mangle things there if need be.
459 *
460 * NOTE: We also drop the standard "pad" value (the total lock
461 * name size stays the same though - the last part is all
462 * zeros due to the memset in ocfs2_lock_res_init_once()
463 */
464 len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
465 "%c%016llx",
466 ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
467 (long long)parent);
468
469 BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
470
471 memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
472 sizeof(__be64));
473
474 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
475 OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
476 dl);
ccd979bd
MF
477}
478
479static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
480 struct ocfs2_super *osb)
481{
482 /* Superblock lockres doesn't come from a slab so we call init
483 * once on it manually. */
484 ocfs2_lock_res_init_once(res);
d680efe9
MF
485 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
486 0, res->l_name);
ccd979bd 487 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
ccd979bd
MF
488 &ocfs2_super_lops, osb);
489}
490
491static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
492 struct ocfs2_super *osb)
493{
494 /* Rename lockres doesn't come from a slab so we call init
495 * once on it manually. */
496 ocfs2_lock_res_init_once(res);
d680efe9
MF
497 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
498 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
ccd979bd
MF
499 &ocfs2_rename_lops, osb);
500}
501
502void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
503{
504 mlog_entry_void();
505
506 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
507 return;
508
509 ocfs2_remove_lockres_tracking(res);
510
511 mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
512 "Lockres %s is on the blocked list\n",
513 res->l_name);
514 mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
515 "Lockres %s has mask waiters pending\n",
516 res->l_name);
517 mlog_bug_on_msg(spin_is_locked(&res->l_lock),
518 "Lockres %s is locked\n",
519 res->l_name);
520 mlog_bug_on_msg(res->l_ro_holders,
521 "Lockres %s has %u ro holders\n",
522 res->l_name, res->l_ro_holders);
523 mlog_bug_on_msg(res->l_ex_holders,
524 "Lockres %s has %u ex holders\n",
525 res->l_name, res->l_ex_holders);
526
527 /* Need to clear out the lock status block for the dlm */
528 memset(&res->l_lksb, 0, sizeof(res->l_lksb));
529
530 res->l_flags = 0UL;
531 mlog_exit_void();
532}
533
534static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
535 int level)
536{
537 mlog_entry_void();
538
539 BUG_ON(!lockres);
540
541 switch(level) {
542 case LKM_EXMODE:
543 lockres->l_ex_holders++;
544 break;
545 case LKM_PRMODE:
546 lockres->l_ro_holders++;
547 break;
548 default:
549 BUG();
550 }
551
552 mlog_exit_void();
553}
554
555static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
556 int level)
557{
558 mlog_entry_void();
559
560 BUG_ON(!lockres);
561
562 switch(level) {
563 case LKM_EXMODE:
564 BUG_ON(!lockres->l_ex_holders);
565 lockres->l_ex_holders--;
566 break;
567 case LKM_PRMODE:
568 BUG_ON(!lockres->l_ro_holders);
569 lockres->l_ro_holders--;
570 break;
571 default:
572 BUG();
573 }
574 mlog_exit_void();
575}
576
577/* WARNING: This function lives in a world where the only three lock
578 * levels are EX, PR, and NL. It *will* have to be adjusted when more
579 * lock types are added. */
580static inline int ocfs2_highest_compat_lock_level(int level)
581{
582 int new_level = LKM_EXMODE;
583
584 if (level == LKM_EXMODE)
585 new_level = LKM_NLMODE;
586 else if (level == LKM_PRMODE)
587 new_level = LKM_PRMODE;
588 return new_level;
589}
590
591static void lockres_set_flags(struct ocfs2_lock_res *lockres,
592 unsigned long newflags)
593{
800deef3 594 struct ocfs2_mask_waiter *mw, *tmp;
ccd979bd
MF
595
596 assert_spin_locked(&lockres->l_lock);
597
598 lockres->l_flags = newflags;
599
800deef3 600 list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
ccd979bd
MF
601 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
602 continue;
603
604 list_del_init(&mw->mw_item);
605 mw->mw_status = 0;
606 complete(&mw->mw_complete);
607 }
608}
609static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
610{
611 lockres_set_flags(lockres, lockres->l_flags | or);
612}
613static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
614 unsigned long clear)
615{
616 lockres_set_flags(lockres, lockres->l_flags & ~clear);
617}
618
619static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
620{
621 mlog_entry_void();
622
623 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
624 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
625 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
626 BUG_ON(lockres->l_blocking <= LKM_NLMODE);
627
628 lockres->l_level = lockres->l_requested;
629 if (lockres->l_level <=
630 ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
631 lockres->l_blocking = LKM_NLMODE;
632 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
633 }
634 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
635
636 mlog_exit_void();
637}
638
639static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
640{
641 mlog_entry_void();
642
643 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
644 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
645
646 /* Convert from RO to EX doesn't really need anything as our
647 * information is already up to data. Convert from NL to
648 * *anything* however should mark ourselves as needing an
649 * update */
f625c979
MF
650 if (lockres->l_level == LKM_NLMODE &&
651 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
ccd979bd
MF
652 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
653
654 lockres->l_level = lockres->l_requested;
655 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
656
657 mlog_exit_void();
658}
659
660static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
661{
662 mlog_entry_void();
663
3cf0c507 664 BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
ccd979bd
MF
665 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
666
667 if (lockres->l_requested > LKM_NLMODE &&
f625c979
MF
668 !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
669 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
ccd979bd
MF
670 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
671
672 lockres->l_level = lockres->l_requested;
673 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
674 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
675
676 mlog_exit_void();
677}
678
ccd979bd
MF
679static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
680 int level)
681{
682 int needs_downconvert = 0;
683 mlog_entry_void();
684
685 assert_spin_locked(&lockres->l_lock);
686
687 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
688
689 if (level > lockres->l_blocking) {
690 /* only schedule a downconvert if we haven't already scheduled
691 * one that goes low enough to satisfy the level we're
692 * blocking. this also catches the case where we get
693 * duplicate BASTs */
694 if (ocfs2_highest_compat_lock_level(level) <
695 ocfs2_highest_compat_lock_level(lockres->l_blocking))
696 needs_downconvert = 1;
697
698 lockres->l_blocking = level;
699 }
700
701 mlog_exit(needs_downconvert);
702 return needs_downconvert;
703}
704
aa2623ad 705static void ocfs2_blocking_ast(void *opaque, int level)
ccd979bd 706{
aa2623ad
MF
707 struct ocfs2_lock_res *lockres = opaque;
708 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
ccd979bd
MF
709 int needs_downconvert;
710 unsigned long flags;
711
ccd979bd
MF
712 BUG_ON(level <= LKM_NLMODE);
713
aa2623ad
MF
714 mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n",
715 lockres->l_name, level, lockres->l_level,
716 ocfs2_lock_type_string(lockres->l_type));
717
ccd979bd
MF
718 spin_lock_irqsave(&lockres->l_lock, flags);
719 needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
720 if (needs_downconvert)
721 ocfs2_schedule_blocked_lock(osb, lockres);
722 spin_unlock_irqrestore(&lockres->l_lock, flags);
723
d680efe9
MF
724 wake_up(&lockres->l_event);
725
34d024f8 726 ocfs2_wake_downconvert_thread(osb);
ccd979bd
MF
727}
728
e92d57df 729static void ocfs2_locking_ast(void *opaque)
ccd979bd 730{
e92d57df 731 struct ocfs2_lock_res *lockres = opaque;
ccd979bd
MF
732 struct dlm_lockstatus *lksb = &lockres->l_lksb;
733 unsigned long flags;
734
735 spin_lock_irqsave(&lockres->l_lock, flags);
736
737 if (lksb->status != DLM_NORMAL) {
738 mlog(ML_ERROR, "lockres %s: lksb status value of %u!\n",
739 lockres->l_name, lksb->status);
740 spin_unlock_irqrestore(&lockres->l_lock, flags);
741 return;
742 }
743
744 switch(lockres->l_action) {
745 case OCFS2_AST_ATTACH:
746 ocfs2_generic_handle_attach_action(lockres);
e92d57df 747 lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
ccd979bd
MF
748 break;
749 case OCFS2_AST_CONVERT:
750 ocfs2_generic_handle_convert_action(lockres);
751 break;
752 case OCFS2_AST_DOWNCONVERT:
753 ocfs2_generic_handle_downconvert_action(lockres);
754 break;
755 default:
e92d57df
MF
756 mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u "
757 "lockres flags = 0x%lx, unlock action: %u\n",
758 lockres->l_name, lockres->l_action, lockres->l_flags,
759 lockres->l_unlock_action);
ccd979bd
MF
760 BUG();
761 }
762
ccd979bd
MF
763 /* set it to something invalid so if we get called again we
764 * can catch it. */
765 lockres->l_action = OCFS2_AST_INVALID;
ccd979bd
MF
766
767 wake_up(&lockres->l_event);
d680efe9 768 spin_unlock_irqrestore(&lockres->l_lock, flags);
ccd979bd
MF
769}
770
ccd979bd
MF
771static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
772 int convert)
773{
774 unsigned long flags;
775
776 mlog_entry_void();
777 spin_lock_irqsave(&lockres->l_lock, flags);
778 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
779 if (convert)
780 lockres->l_action = OCFS2_AST_INVALID;
781 else
782 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
783 spin_unlock_irqrestore(&lockres->l_lock, flags);
784
785 wake_up(&lockres->l_event);
786 mlog_exit_void();
787}
788
789/* Note: If we detect another process working on the lock (i.e.,
790 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
791 * to do the right thing in that case.
792 */
793static int ocfs2_lock_create(struct ocfs2_super *osb,
794 struct ocfs2_lock_res *lockres,
795 int level,
796 int dlm_flags)
797{
798 int ret = 0;
c271c5c2 799 enum dlm_status status = DLM_NORMAL;
ccd979bd
MF
800 unsigned long flags;
801
802 mlog_entry_void();
803
804 mlog(0, "lock %s, level = %d, flags = %d\n", lockres->l_name, level,
805 dlm_flags);
806
807 spin_lock_irqsave(&lockres->l_lock, flags);
808 if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
809 (lockres->l_flags & OCFS2_LOCK_BUSY)) {
810 spin_unlock_irqrestore(&lockres->l_lock, flags);
811 goto bail;
812 }
813
814 lockres->l_action = OCFS2_AST_ATTACH;
815 lockres->l_requested = level;
816 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
817 spin_unlock_irqrestore(&lockres->l_lock, flags);
818
819 status = dlmlock(osb->dlm,
820 level,
821 &lockres->l_lksb,
822 dlm_flags,
823 lockres->l_name,
f0681062 824 OCFS2_LOCK_ID_MAX_LEN - 1,
e92d57df 825 ocfs2_locking_ast,
ccd979bd 826 lockres,
aa2623ad 827 ocfs2_blocking_ast);
ccd979bd
MF
828 if (status != DLM_NORMAL) {
829 ocfs2_log_dlm_error("dlmlock", status, lockres);
830 ret = -EINVAL;
831 ocfs2_recover_from_dlm_error(lockres, 1);
832 }
833
834 mlog(0, "lock %s, successfull return from dlmlock\n", lockres->l_name);
835
836bail:
837 mlog_exit(ret);
838 return ret;
839}
840
841static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
842 int flag)
843{
844 unsigned long flags;
845 int ret;
846
847 spin_lock_irqsave(&lockres->l_lock, flags);
848 ret = lockres->l_flags & flag;
849 spin_unlock_irqrestore(&lockres->l_lock, flags);
850
851 return ret;
852}
853
854static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
855
856{
857 wait_event(lockres->l_event,
858 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
859}
860
861static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
862
863{
864 wait_event(lockres->l_event,
865 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
866}
867
868/* predict what lock level we'll be dropping down to on behalf
869 * of another node, and return true if the currently wanted
870 * level will be compatible with it. */
871static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
872 int wanted)
873{
874 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
875
876 return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
877}
878
879static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
880{
881 INIT_LIST_HEAD(&mw->mw_item);
882 init_completion(&mw->mw_complete);
883}
884
885static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
886{
887 wait_for_completion(&mw->mw_complete);
888 /* Re-arm the completion in case we want to wait on it again */
889 INIT_COMPLETION(mw->mw_complete);
890 return mw->mw_status;
891}
892
893static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
894 struct ocfs2_mask_waiter *mw,
895 unsigned long mask,
896 unsigned long goal)
897{
898 BUG_ON(!list_empty(&mw->mw_item));
899
900 assert_spin_locked(&lockres->l_lock);
901
902 list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
903 mw->mw_mask = mask;
904 mw->mw_goal = goal;
905}
906
907/* returns 0 if the mw that was removed was already satisfied, -EBUSY
908 * if the mask still hadn't reached its goal */
909static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
910 struct ocfs2_mask_waiter *mw)
911{
912 unsigned long flags;
913 int ret = 0;
914
915 spin_lock_irqsave(&lockres->l_lock, flags);
916 if (!list_empty(&mw->mw_item)) {
917 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
918 ret = -EBUSY;
919
920 list_del_init(&mw->mw_item);
921 init_completion(&mw->mw_complete);
922 }
923 spin_unlock_irqrestore(&lockres->l_lock, flags);
924
925 return ret;
926
927}
928
929static int ocfs2_cluster_lock(struct ocfs2_super *osb,
930 struct ocfs2_lock_res *lockres,
931 int level,
932 int lkm_flags,
933 int arg_flags)
934{
935 struct ocfs2_mask_waiter mw;
936 enum dlm_status status;
937 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
938 int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
939 unsigned long flags;
940
941 mlog_entry_void();
942
943 ocfs2_init_mask_waiter(&mw);
944
b80fc012
MF
945 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
946 lkm_flags |= LKM_VALBLK;
947
ccd979bd
MF
948again:
949 wait = 0;
950
951 if (catch_signals && signal_pending(current)) {
952 ret = -ERESTARTSYS;
953 goto out;
954 }
955
956 spin_lock_irqsave(&lockres->l_lock, flags);
957
958 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
959 "Cluster lock called on freeing lockres %s! flags "
960 "0x%lx\n", lockres->l_name, lockres->l_flags);
961
962 /* We only compare against the currently granted level
963 * here. If the lock is blocked waiting on a downconvert,
964 * we'll get caught below. */
965 if (lockres->l_flags & OCFS2_LOCK_BUSY &&
966 level > lockres->l_level) {
967 /* is someone sitting in dlm_lock? If so, wait on
968 * them. */
969 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
970 wait = 1;
971 goto unlock;
972 }
973
ccd979bd
MF
974 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
975 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
976 /* is the lock is currently blocked on behalf of
977 * another node */
978 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
979 wait = 1;
980 goto unlock;
981 }
982
983 if (level > lockres->l_level) {
984 if (lockres->l_action != OCFS2_AST_INVALID)
985 mlog(ML_ERROR, "lockres %s has action %u pending\n",
986 lockres->l_name, lockres->l_action);
987
019d1b22
MF
988 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
989 lockres->l_action = OCFS2_AST_ATTACH;
990 lkm_flags &= ~LKM_CONVERT;
991 } else {
992 lockres->l_action = OCFS2_AST_CONVERT;
993 lkm_flags |= LKM_CONVERT;
994 }
995
ccd979bd
MF
996 lockres->l_requested = level;
997 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
998 spin_unlock_irqrestore(&lockres->l_lock, flags);
999
1000 BUG_ON(level == LKM_IVMODE);
1001 BUG_ON(level == LKM_NLMODE);
1002
1003 mlog(0, "lock %s, convert from %d to level = %d\n",
1004 lockres->l_name, lockres->l_level, level);
1005
1006 /* call dlm_lock to upgrade lock now */
1007 status = dlmlock(osb->dlm,
1008 level,
1009 &lockres->l_lksb,
019d1b22 1010 lkm_flags,
ccd979bd 1011 lockres->l_name,
f0681062 1012 OCFS2_LOCK_ID_MAX_LEN - 1,
e92d57df 1013 ocfs2_locking_ast,
ccd979bd 1014 lockres,
aa2623ad 1015 ocfs2_blocking_ast);
ccd979bd
MF
1016 if (status != DLM_NORMAL) {
1017 if ((lkm_flags & LKM_NOQUEUE) &&
1018 (status == DLM_NOTQUEUED))
1019 ret = -EAGAIN;
1020 else {
1021 ocfs2_log_dlm_error("dlmlock", status,
1022 lockres);
1023 ret = -EINVAL;
1024 }
1025 ocfs2_recover_from_dlm_error(lockres, 1);
1026 goto out;
1027 }
1028
1029 mlog(0, "lock %s, successfull return from dlmlock\n",
1030 lockres->l_name);
1031
1032 /* At this point we've gone inside the dlm and need to
1033 * complete our work regardless. */
1034 catch_signals = 0;
1035
1036 /* wait for busy to clear and carry on */
1037 goto again;
1038 }
1039
1040 /* Ok, if we get here then we're good to go. */
1041 ocfs2_inc_holders(lockres, level);
1042
1043 ret = 0;
1044unlock:
1045 spin_unlock_irqrestore(&lockres->l_lock, flags);
1046out:
1047 /*
1048 * This is helping work around a lock inversion between the page lock
1049 * and dlm locks. One path holds the page lock while calling aops
1050 * which block acquiring dlm locks. The voting thread holds dlm
1051 * locks while acquiring page locks while down converting data locks.
1052 * This block is helping an aop path notice the inversion and back
1053 * off to unlock its page lock before trying the dlm lock again.
1054 */
1055 if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
1056 mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
1057 wait = 0;
1058 if (lockres_remove_mask_waiter(lockres, &mw))
1059 ret = -EAGAIN;
1060 else
1061 goto again;
1062 }
1063 if (wait) {
1064 ret = ocfs2_wait_for_mask(&mw);
1065 if (ret == 0)
1066 goto again;
1067 mlog_errno(ret);
1068 }
1069
1070 mlog_exit(ret);
1071 return ret;
1072}
1073
1074static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
1075 struct ocfs2_lock_res *lockres,
1076 int level)
1077{
1078 unsigned long flags;
1079
1080 mlog_entry_void();
1081 spin_lock_irqsave(&lockres->l_lock, flags);
1082 ocfs2_dec_holders(lockres, level);
34d024f8 1083 ocfs2_downconvert_on_unlock(osb, lockres);
ccd979bd
MF
1084 spin_unlock_irqrestore(&lockres->l_lock, flags);
1085 mlog_exit_void();
1086}
1087
da66116e
AB
1088static int ocfs2_create_new_lock(struct ocfs2_super *osb,
1089 struct ocfs2_lock_res *lockres,
1090 int ex,
1091 int local)
ccd979bd 1092{
d680efe9 1093 int level = ex ? LKM_EXMODE : LKM_PRMODE;
ccd979bd 1094 unsigned long flags;
24c19ef4 1095 int lkm_flags = local ? LKM_LOCAL : 0;
ccd979bd
MF
1096
1097 spin_lock_irqsave(&lockres->l_lock, flags);
1098 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
1099 lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
1100 spin_unlock_irqrestore(&lockres->l_lock, flags);
1101
24c19ef4 1102 return ocfs2_lock_create(osb, lockres, level, lkm_flags);
ccd979bd
MF
1103}
1104
1105/* Grants us an EX lock on the data and metadata resources, skipping
1106 * the normal cluster directory lookup. Use this ONLY on newly created
1107 * inodes which other nodes can't possibly see, and which haven't been
1108 * hashed in the inode hash yet. This can give us a good performance
1109 * increase as it'll skip the network broadcast normally associated
1110 * with creating a new lock resource. */
1111int ocfs2_create_new_inode_locks(struct inode *inode)
1112{
1113 int ret;
d680efe9 1114 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
ccd979bd
MF
1115
1116 BUG_ON(!inode);
1117 BUG_ON(!ocfs2_inode_is_new(inode));
1118
1119 mlog_entry_void();
1120
b0697053 1121 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
ccd979bd
MF
1122
1123 /* NOTE: That we don't increment any of the holder counts, nor
1124 * do we add anything to a journal handle. Since this is
1125 * supposed to be a new inode which the cluster doesn't know
1126 * about yet, there is no need to. As far as the LVB handling
1127 * is concerned, this is basically like acquiring an EX lock
1128 * on a resource which has an invalid one -- we'll set it
1129 * valid when we release the EX. */
1130
24c19ef4 1131 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
ccd979bd
MF
1132 if (ret) {
1133 mlog_errno(ret);
1134 goto bail;
1135 }
1136
24c19ef4
MF
1137 /*
1138 * We don't want to use LKM_LOCAL on a meta data lock as they
1139 * don't use a generation in their lock names.
1140 */
e63aecb6 1141 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
ccd979bd
MF
1142 if (ret) {
1143 mlog_errno(ret);
1144 goto bail;
1145 }
1146
50008630
TY
1147 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
1148 if (ret) {
1149 mlog_errno(ret);
1150 goto bail;
1151 }
1152
ccd979bd
MF
1153bail:
1154 mlog_exit(ret);
1155 return ret;
1156}
1157
1158int ocfs2_rw_lock(struct inode *inode, int write)
1159{
1160 int status, level;
1161 struct ocfs2_lock_res *lockres;
c271c5c2 1162 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
ccd979bd
MF
1163
1164 BUG_ON(!inode);
1165
1166 mlog_entry_void();
1167
b0697053
MF
1168 mlog(0, "inode %llu take %s RW lock\n",
1169 (unsigned long long)OCFS2_I(inode)->ip_blkno,
ccd979bd
MF
1170 write ? "EXMODE" : "PRMODE");
1171
c271c5c2
SM
1172 if (ocfs2_mount_local(osb))
1173 return 0;
1174
ccd979bd
MF
1175 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1176
1177 level = write ? LKM_EXMODE : LKM_PRMODE;
1178
1179 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
1180 0);
1181 if (status < 0)
1182 mlog_errno(status);
1183
1184 mlog_exit(status);
1185 return status;
1186}
1187
1188void ocfs2_rw_unlock(struct inode *inode, int write)
1189{
1190 int level = write ? LKM_EXMODE : LKM_PRMODE;
1191 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
c271c5c2 1192 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
ccd979bd
MF
1193
1194 mlog_entry_void();
1195
b0697053
MF
1196 mlog(0, "inode %llu drop %s RW lock\n",
1197 (unsigned long long)OCFS2_I(inode)->ip_blkno,
ccd979bd
MF
1198 write ? "EXMODE" : "PRMODE");
1199
c271c5c2
SM
1200 if (!ocfs2_mount_local(osb))
1201 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
ccd979bd
MF
1202
1203 mlog_exit_void();
1204}
1205
50008630
TY
1206/*
1207 * ocfs2_open_lock always get PR mode lock.
1208 */
1209int ocfs2_open_lock(struct inode *inode)
1210{
1211 int status = 0;
1212 struct ocfs2_lock_res *lockres;
1213 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1214
1215 BUG_ON(!inode);
1216
1217 mlog_entry_void();
1218
1219 mlog(0, "inode %llu take PRMODE open lock\n",
1220 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1221
1222 if (ocfs2_mount_local(osb))
1223 goto out;
1224
1225 lockres = &OCFS2_I(inode)->ip_open_lockres;
1226
1227 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1228 LKM_PRMODE, 0, 0);
1229 if (status < 0)
1230 mlog_errno(status);
1231
1232out:
1233 mlog_exit(status);
1234 return status;
1235}
1236
1237int ocfs2_try_open_lock(struct inode *inode, int write)
1238{
1239 int status = 0, level;
1240 struct ocfs2_lock_res *lockres;
1241 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1242
1243 BUG_ON(!inode);
1244
1245 mlog_entry_void();
1246
1247 mlog(0, "inode %llu try to take %s open lock\n",
1248 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1249 write ? "EXMODE" : "PRMODE");
1250
1251 if (ocfs2_mount_local(osb))
1252 goto out;
1253
1254 lockres = &OCFS2_I(inode)->ip_open_lockres;
1255
1256 level = write ? LKM_EXMODE : LKM_PRMODE;
1257
1258 /*
1259 * The file system may already holding a PRMODE/EXMODE open lock.
1260 * Since we pass LKM_NOQUEUE, the request won't block waiting on
1261 * other nodes and the -EAGAIN will indicate to the caller that
1262 * this inode is still in use.
1263 */
1264 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1265 level, LKM_NOQUEUE, 0);
1266
1267out:
1268 mlog_exit(status);
1269 return status;
1270}
1271
1272/*
1273 * ocfs2_open_unlock unlock PR and EX mode open locks.
1274 */
1275void ocfs2_open_unlock(struct inode *inode)
1276{
1277 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1278 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1279
1280 mlog_entry_void();
1281
1282 mlog(0, "inode %llu drop open lock\n",
1283 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1284
1285 if (ocfs2_mount_local(osb))
1286 goto out;
1287
1288 if(lockres->l_ro_holders)
1289 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1290 LKM_PRMODE);
1291 if(lockres->l_ex_holders)
1292 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1293 LKM_EXMODE);
1294
1295out:
1296 mlog_exit_void();
1297}
1298
34d024f8
MF
1299static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
1300 struct ocfs2_lock_res *lockres)
ccd979bd
MF
1301{
1302 int kick = 0;
1303
1304 mlog_entry_void();
1305
1306 /* If we know that another node is waiting on our lock, kick
34d024f8 1307 * the downconvert thread * pre-emptively when we reach a release
ccd979bd
MF
1308 * condition. */
1309 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
1310 switch(lockres->l_blocking) {
1311 case LKM_EXMODE:
1312 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
1313 kick = 1;
1314 break;
1315 case LKM_PRMODE:
1316 if (!lockres->l_ex_holders)
1317 kick = 1;
1318 break;
1319 default:
1320 BUG();
1321 }
1322 }
1323
1324 if (kick)
34d024f8 1325 ocfs2_wake_downconvert_thread(osb);
ccd979bd
MF
1326
1327 mlog_exit_void();
1328}
1329
ccd979bd
MF
1330#define OCFS2_SEC_BITS 34
1331#define OCFS2_SEC_SHIFT (64 - 34)
1332#define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
1333
1334/* LVB only has room for 64 bits of time here so we pack it for
1335 * now. */
1336static u64 ocfs2_pack_timespec(struct timespec *spec)
1337{
1338 u64 res;
1339 u64 sec = spec->tv_sec;
1340 u32 nsec = spec->tv_nsec;
1341
1342 res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
1343
1344 return res;
1345}
1346
1347/* Call this with the lockres locked. I am reasonably sure we don't
1348 * need ip_lock in this function as anyone who would be changing those
e63aecb6 1349 * values is supposed to be blocked in ocfs2_inode_lock right now. */
ccd979bd
MF
1350static void __ocfs2_stuff_meta_lvb(struct inode *inode)
1351{
1352 struct ocfs2_inode_info *oi = OCFS2_I(inode);
e63aecb6 1353 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
ccd979bd
MF
1354 struct ocfs2_meta_lvb *lvb;
1355
1356 mlog_entry_void();
1357
1358 lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1359
24c19ef4
MF
1360 /*
1361 * Invalidate the LVB of a deleted inode - this way other
1362 * nodes are forced to go to disk and discover the new inode
1363 * status.
1364 */
1365 if (oi->ip_flags & OCFS2_INODE_DELETED) {
1366 lvb->lvb_version = 0;
1367 goto out;
1368 }
1369
4d3b83f7 1370 lvb->lvb_version = OCFS2_LVB_VERSION;
ccd979bd
MF
1371 lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
1372 lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
1373 lvb->lvb_iuid = cpu_to_be32(inode->i_uid);
1374 lvb->lvb_igid = cpu_to_be32(inode->i_gid);
1375 lvb->lvb_imode = cpu_to_be16(inode->i_mode);
1376 lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
1377 lvb->lvb_iatime_packed =
1378 cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
1379 lvb->lvb_ictime_packed =
1380 cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
1381 lvb->lvb_imtime_packed =
1382 cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
ca4d147e 1383 lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
15b1e36b 1384 lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
f9e2d82e 1385 lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
ccd979bd 1386
24c19ef4 1387out:
ccd979bd
MF
1388 mlog_meta_lvb(0, lockres);
1389
1390 mlog_exit_void();
1391}
1392
1393static void ocfs2_unpack_timespec(struct timespec *spec,
1394 u64 packed_time)
1395{
1396 spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
1397 spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
1398}
1399
1400static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
1401{
1402 struct ocfs2_inode_info *oi = OCFS2_I(inode);
e63aecb6 1403 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
ccd979bd
MF
1404 struct ocfs2_meta_lvb *lvb;
1405
1406 mlog_entry_void();
1407
1408 mlog_meta_lvb(0, lockres);
1409
1410 lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1411
1412 /* We're safe here without the lockres lock... */
1413 spin_lock(&oi->ip_lock);
1414 oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
1415 i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
1416
ca4d147e 1417 oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
15b1e36b 1418 oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
ca4d147e
HP
1419 ocfs2_set_inode_flags(inode);
1420
ccd979bd
MF
1421 /* fast-symlinks are a special case */
1422 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
1423 inode->i_blocks = 0;
1424 else
8110b073 1425 inode->i_blocks = ocfs2_inode_sector_count(inode);
ccd979bd
MF
1426
1427 inode->i_uid = be32_to_cpu(lvb->lvb_iuid);
1428 inode->i_gid = be32_to_cpu(lvb->lvb_igid);
1429 inode->i_mode = be16_to_cpu(lvb->lvb_imode);
1430 inode->i_nlink = be16_to_cpu(lvb->lvb_inlink);
1431 ocfs2_unpack_timespec(&inode->i_atime,
1432 be64_to_cpu(lvb->lvb_iatime_packed));
1433 ocfs2_unpack_timespec(&inode->i_mtime,
1434 be64_to_cpu(lvb->lvb_imtime_packed));
1435 ocfs2_unpack_timespec(&inode->i_ctime,
1436 be64_to_cpu(lvb->lvb_ictime_packed));
1437 spin_unlock(&oi->ip_lock);
1438
1439 mlog_exit_void();
1440}
1441
f9e2d82e
MF
1442static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
1443 struct ocfs2_lock_res *lockres)
ccd979bd
MF
1444{
1445 struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1446
f9e2d82e
MF
1447 if (lvb->lvb_version == OCFS2_LVB_VERSION
1448 && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
ccd979bd
MF
1449 return 1;
1450 return 0;
1451}
1452
1453/* Determine whether a lock resource needs to be refreshed, and
1454 * arbitrate who gets to refresh it.
1455 *
1456 * 0 means no refresh needed.
1457 *
1458 * > 0 means you need to refresh this and you MUST call
1459 * ocfs2_complete_lock_res_refresh afterwards. */
1460static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
1461{
1462 unsigned long flags;
1463 int status = 0;
1464
1465 mlog_entry_void();
1466
1467refresh_check:
1468 spin_lock_irqsave(&lockres->l_lock, flags);
1469 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
1470 spin_unlock_irqrestore(&lockres->l_lock, flags);
1471 goto bail;
1472 }
1473
1474 if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
1475 spin_unlock_irqrestore(&lockres->l_lock, flags);
1476
1477 ocfs2_wait_on_refreshing_lock(lockres);
1478 goto refresh_check;
1479 }
1480
1481 /* Ok, I'll be the one to refresh this lock. */
1482 lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
1483 spin_unlock_irqrestore(&lockres->l_lock, flags);
1484
1485 status = 1;
1486bail:
1487 mlog_exit(status);
1488 return status;
1489}
1490
1491/* If status is non zero, I'll mark it as not being in refresh
1492 * anymroe, but i won't clear the needs refresh flag. */
1493static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
1494 int status)
1495{
1496 unsigned long flags;
1497 mlog_entry_void();
1498
1499 spin_lock_irqsave(&lockres->l_lock, flags);
1500 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
1501 if (!status)
1502 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
1503 spin_unlock_irqrestore(&lockres->l_lock, flags);
1504
1505 wake_up(&lockres->l_event);
1506
1507 mlog_exit_void();
1508}
1509
1510/* may or may not return a bh if it went to disk. */
e63aecb6 1511static int ocfs2_inode_lock_update(struct inode *inode,
ccd979bd
MF
1512 struct buffer_head **bh)
1513{
1514 int status = 0;
1515 struct ocfs2_inode_info *oi = OCFS2_I(inode);
e63aecb6 1516 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
ccd979bd 1517 struct ocfs2_dinode *fe;
c271c5c2 1518 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
ccd979bd
MF
1519
1520 mlog_entry_void();
1521
be9e986b
MF
1522 if (ocfs2_mount_local(osb))
1523 goto bail;
1524
ccd979bd
MF
1525 spin_lock(&oi->ip_lock);
1526 if (oi->ip_flags & OCFS2_INODE_DELETED) {
b0697053 1527 mlog(0, "Orphaned inode %llu was deleted while we "
ccd979bd 1528 "were waiting on a lock. ip_flags = 0x%x\n",
b0697053 1529 (unsigned long long)oi->ip_blkno, oi->ip_flags);
ccd979bd
MF
1530 spin_unlock(&oi->ip_lock);
1531 status = -ENOENT;
1532 goto bail;
1533 }
1534 spin_unlock(&oi->ip_lock);
1535
be9e986b
MF
1536 if (!ocfs2_should_refresh_lock_res(lockres))
1537 goto bail;
ccd979bd
MF
1538
1539 /* This will discard any caching information we might have had
1540 * for the inode metadata. */
1541 ocfs2_metadata_cache_purge(inode);
1542
83418978
MF
1543 ocfs2_extent_map_trunc(inode, 0);
1544
be9e986b 1545 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
b0697053
MF
1546 mlog(0, "Trusting LVB on inode %llu\n",
1547 (unsigned long long)oi->ip_blkno);
ccd979bd
MF
1548 ocfs2_refresh_inode_from_lvb(inode);
1549 } else {
1550 /* Boo, we have to go to disk. */
1551 /* read bh, cast, ocfs2_refresh_inode */
1552 status = ocfs2_read_block(OCFS2_SB(inode->i_sb), oi->ip_blkno,
1553 bh, OCFS2_BH_CACHED, inode);
1554 if (status < 0) {
1555 mlog_errno(status);
1556 goto bail_refresh;
1557 }
1558 fe = (struct ocfs2_dinode *) (*bh)->b_data;
1559
1560 /* This is a good chance to make sure we're not
1561 * locking an invalid object.
1562 *
1563 * We bug on a stale inode here because we checked
1564 * above whether it was wiped from disk. The wiping
1565 * node provides a guarantee that we receive that
1566 * message and can mark the inode before dropping any
1567 * locks associated with it. */
1568 if (!OCFS2_IS_VALID_DINODE(fe)) {
1569 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
1570 status = -EIO;
1571 goto bail_refresh;
1572 }
1573 mlog_bug_on_msg(inode->i_generation !=
1574 le32_to_cpu(fe->i_generation),
b0697053 1575 "Invalid dinode %llu disk generation: %u "
ccd979bd 1576 "inode->i_generation: %u\n",
b0697053
MF
1577 (unsigned long long)oi->ip_blkno,
1578 le32_to_cpu(fe->i_generation),
ccd979bd
MF
1579 inode->i_generation);
1580 mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
1581 !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
b0697053
MF
1582 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
1583 (unsigned long long)oi->ip_blkno,
1584 (unsigned long long)le64_to_cpu(fe->i_dtime),
ccd979bd
MF
1585 le32_to_cpu(fe->i_flags));
1586
1587 ocfs2_refresh_inode(inode, fe);
1588 }
1589
1590 status = 0;
1591bail_refresh:
be9e986b 1592 ocfs2_complete_lock_res_refresh(lockres, status);
ccd979bd
MF
1593bail:
1594 mlog_exit(status);
1595 return status;
1596}
1597
1598static int ocfs2_assign_bh(struct inode *inode,
1599 struct buffer_head **ret_bh,
1600 struct buffer_head *passed_bh)
1601{
1602 int status;
1603
1604 if (passed_bh) {
1605 /* Ok, the update went to disk for us, use the
1606 * returned bh. */
1607 *ret_bh = passed_bh;
1608 get_bh(*ret_bh);
1609
1610 return 0;
1611 }
1612
1613 status = ocfs2_read_block(OCFS2_SB(inode->i_sb),
1614 OCFS2_I(inode)->ip_blkno,
1615 ret_bh,
1616 OCFS2_BH_CACHED,
1617 inode);
1618 if (status < 0)
1619 mlog_errno(status);
1620
1621 return status;
1622}
1623
1624/*
1625 * returns < 0 error if the callback will never be called, otherwise
1626 * the result of the lock will be communicated via the callback.
1627 */
e63aecb6 1628int ocfs2_inode_lock_full(struct inode *inode,
ccd979bd
MF
1629 struct buffer_head **ret_bh,
1630 int ex,
1631 int arg_flags)
1632{
1633 int status, level, dlm_flags, acquired;
c271c5c2 1634 struct ocfs2_lock_res *lockres = NULL;
ccd979bd
MF
1635 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1636 struct buffer_head *local_bh = NULL;
1637
1638 BUG_ON(!inode);
1639
1640 mlog_entry_void();
1641
b0697053
MF
1642 mlog(0, "inode %llu, take %s META lock\n",
1643 (unsigned long long)OCFS2_I(inode)->ip_blkno,
ccd979bd
MF
1644 ex ? "EXMODE" : "PRMODE");
1645
1646 status = 0;
1647 acquired = 0;
1648 /* We'll allow faking a readonly metadata lock for
1649 * rodevices. */
1650 if (ocfs2_is_hard_readonly(osb)) {
1651 if (ex)
1652 status = -EROFS;
1653 goto bail;
1654 }
1655
c271c5c2
SM
1656 if (ocfs2_mount_local(osb))
1657 goto local;
1658
ccd979bd
MF
1659 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1660 wait_event(osb->recovery_event,
1661 ocfs2_node_map_is_empty(osb, &osb->recovery_map));
1662
e63aecb6 1663 lockres = &OCFS2_I(inode)->ip_inode_lockres;
ccd979bd
MF
1664 level = ex ? LKM_EXMODE : LKM_PRMODE;
1665 dlm_flags = 0;
1666 if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
1667 dlm_flags |= LKM_NOQUEUE;
1668
1669 status = ocfs2_cluster_lock(osb, lockres, level, dlm_flags, arg_flags);
1670 if (status < 0) {
1671 if (status != -EAGAIN && status != -EIOCBRETRY)
1672 mlog_errno(status);
1673 goto bail;
1674 }
1675
1676 /* Notify the error cleanup path to drop the cluster lock. */
1677 acquired = 1;
1678
1679 /* We wait twice because a node may have died while we were in
1680 * the lower dlm layers. The second time though, we've
1681 * committed to owning this lock so we don't allow signals to
1682 * abort the operation. */
1683 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1684 wait_event(osb->recovery_event,
1685 ocfs2_node_map_is_empty(osb, &osb->recovery_map));
1686
c271c5c2 1687local:
24c19ef4
MF
1688 /*
1689 * We only see this flag if we're being called from
1690 * ocfs2_read_locked_inode(). It means we're locking an inode
1691 * which hasn't been populated yet, so clear the refresh flag
1692 * and let the caller handle it.
1693 */
1694 if (inode->i_state & I_NEW) {
1695 status = 0;
c271c5c2
SM
1696 if (lockres)
1697 ocfs2_complete_lock_res_refresh(lockres, 0);
24c19ef4
MF
1698 goto bail;
1699 }
1700
ccd979bd 1701 /* This is fun. The caller may want a bh back, or it may
e63aecb6 1702 * not. ocfs2_inode_lock_update definitely wants one in, but
ccd979bd
MF
1703 * may or may not read one, depending on what's in the
1704 * LVB. The result of all of this is that we've *only* gone to
1705 * disk if we have to, so the complexity is worthwhile. */
e63aecb6 1706 status = ocfs2_inode_lock_update(inode, &local_bh);
ccd979bd
MF
1707 if (status < 0) {
1708 if (status != -ENOENT)
1709 mlog_errno(status);
1710 goto bail;
1711 }
1712
1713 if (ret_bh) {
1714 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
1715 if (status < 0) {
1716 mlog_errno(status);
1717 goto bail;
1718 }
1719 }
1720
ccd979bd
MF
1721bail:
1722 if (status < 0) {
1723 if (ret_bh && (*ret_bh)) {
1724 brelse(*ret_bh);
1725 *ret_bh = NULL;
1726 }
1727 if (acquired)
e63aecb6 1728 ocfs2_inode_unlock(inode, ex);
ccd979bd
MF
1729 }
1730
1731 if (local_bh)
1732 brelse(local_bh);
1733
1734 mlog_exit(status);
1735 return status;
1736}
1737
1738/*
34d024f8
MF
1739 * This is working around a lock inversion between tasks acquiring DLM
1740 * locks while holding a page lock and the downconvert thread which
1741 * blocks dlm lock acquiry while acquiring page locks.
ccd979bd
MF
1742 *
1743 * ** These _with_page variantes are only intended to be called from aop
1744 * methods that hold page locks and return a very specific *positive* error
1745 * code that aop methods pass up to the VFS -- test for errors with != 0. **
1746 *
34d024f8
MF
1747 * The DLM is called such that it returns -EAGAIN if it would have
1748 * blocked waiting for the downconvert thread. In that case we unlock
1749 * our page so the downconvert thread can make progress. Once we've
1750 * done this we have to return AOP_TRUNCATED_PAGE so the aop method
1751 * that called us can bubble that back up into the VFS who will then
1752 * immediately retry the aop call.
ccd979bd
MF
1753 *
1754 * We do a blocking lock and immediate unlock before returning, though, so that
1755 * the lock has a great chance of being cached on this node by the time the VFS
1756 * calls back to retry the aop. This has a potential to livelock as nodes
1757 * ping locks back and forth, but that's a risk we're willing to take to avoid
1758 * the lock inversion simply.
1759 */
e63aecb6 1760int ocfs2_inode_lock_with_page(struct inode *inode,
ccd979bd
MF
1761 struct buffer_head **ret_bh,
1762 int ex,
1763 struct page *page)
1764{
1765 int ret;
1766
e63aecb6 1767 ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
ccd979bd
MF
1768 if (ret == -EAGAIN) {
1769 unlock_page(page);
e63aecb6
MF
1770 if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
1771 ocfs2_inode_unlock(inode, ex);
ccd979bd
MF
1772 ret = AOP_TRUNCATED_PAGE;
1773 }
1774
1775 return ret;
1776}
1777
e63aecb6 1778int ocfs2_inode_lock_atime(struct inode *inode,
7f1a37e3
TY
1779 struct vfsmount *vfsmnt,
1780 int *level)
1781{
1782 int ret;
1783
1784 mlog_entry_void();
e63aecb6 1785 ret = ocfs2_inode_lock(inode, NULL, 0);
7f1a37e3
TY
1786 if (ret < 0) {
1787 mlog_errno(ret);
1788 return ret;
1789 }
1790
1791 /*
1792 * If we should update atime, we will get EX lock,
1793 * otherwise we just get PR lock.
1794 */
1795 if (ocfs2_should_update_atime(inode, vfsmnt)) {
1796 struct buffer_head *bh = NULL;
1797
e63aecb6
MF
1798 ocfs2_inode_unlock(inode, 0);
1799 ret = ocfs2_inode_lock(inode, &bh, 1);
7f1a37e3
TY
1800 if (ret < 0) {
1801 mlog_errno(ret);
1802 return ret;
1803 }
1804 *level = 1;
1805 if (ocfs2_should_update_atime(inode, vfsmnt))
1806 ocfs2_update_inode_atime(inode, bh);
1807 if (bh)
1808 brelse(bh);
1809 } else
1810 *level = 0;
1811
1812 mlog_exit(ret);
1813 return ret;
1814}
1815
e63aecb6 1816void ocfs2_inode_unlock(struct inode *inode,
ccd979bd
MF
1817 int ex)
1818{
1819 int level = ex ? LKM_EXMODE : LKM_PRMODE;
e63aecb6 1820 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
c271c5c2 1821 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
ccd979bd
MF
1822
1823 mlog_entry_void();
1824
b0697053
MF
1825 mlog(0, "inode %llu drop %s META lock\n",
1826 (unsigned long long)OCFS2_I(inode)->ip_blkno,
ccd979bd
MF
1827 ex ? "EXMODE" : "PRMODE");
1828
c271c5c2
SM
1829 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
1830 !ocfs2_mount_local(osb))
ccd979bd
MF
1831 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1832
1833 mlog_exit_void();
1834}
1835
1836int ocfs2_super_lock(struct ocfs2_super *osb,
1837 int ex)
1838{
c271c5c2 1839 int status = 0;
ccd979bd
MF
1840 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1841 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
1842 struct buffer_head *bh;
1843 struct ocfs2_slot_info *si = osb->slot_info;
1844
1845 mlog_entry_void();
1846
1847 if (ocfs2_is_hard_readonly(osb))
1848 return -EROFS;
1849
c271c5c2
SM
1850 if (ocfs2_mount_local(osb))
1851 goto bail;
1852
ccd979bd
MF
1853 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
1854 if (status < 0) {
1855 mlog_errno(status);
1856 goto bail;
1857 }
1858
1859 /* The super block lock path is really in the best position to
1860 * know when resources covered by the lock need to be
1861 * refreshed, so we do it here. Of course, making sense of
1862 * everything is up to the caller :) */
1863 status = ocfs2_should_refresh_lock_res(lockres);
1864 if (status < 0) {
1865 mlog_errno(status);
1866 goto bail;
1867 }
1868 if (status) {
1869 bh = si->si_bh;
1870 status = ocfs2_read_block(osb, bh->b_blocknr, &bh, 0,
1871 si->si_inode);
1872 if (status == 0)
1873 ocfs2_update_slot_info(si);
1874
1875 ocfs2_complete_lock_res_refresh(lockres, status);
1876
1877 if (status < 0)
1878 mlog_errno(status);
1879 }
1880bail:
1881 mlog_exit(status);
1882 return status;
1883}
1884
1885void ocfs2_super_unlock(struct ocfs2_super *osb,
1886 int ex)
1887{
1888 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1889 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
1890
c271c5c2
SM
1891 if (!ocfs2_mount_local(osb))
1892 ocfs2_cluster_unlock(osb, lockres, level);
ccd979bd
MF
1893}
1894
1895int ocfs2_rename_lock(struct ocfs2_super *osb)
1896{
1897 int status;
1898 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
1899
1900 if (ocfs2_is_hard_readonly(osb))
1901 return -EROFS;
1902
c271c5c2
SM
1903 if (ocfs2_mount_local(osb))
1904 return 0;
1905
ccd979bd
MF
1906 status = ocfs2_cluster_lock(osb, lockres, LKM_EXMODE, 0, 0);
1907 if (status < 0)
1908 mlog_errno(status);
1909
1910 return status;
1911}
1912
1913void ocfs2_rename_unlock(struct ocfs2_super *osb)
1914{
1915 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
1916
c271c5c2
SM
1917 if (!ocfs2_mount_local(osb))
1918 ocfs2_cluster_unlock(osb, lockres, LKM_EXMODE);
ccd979bd
MF
1919}
1920
d680efe9
MF
1921int ocfs2_dentry_lock(struct dentry *dentry, int ex)
1922{
1923 int ret;
1924 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1925 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
1926 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
1927
1928 BUG_ON(!dl);
1929
1930 if (ocfs2_is_hard_readonly(osb))
1931 return -EROFS;
1932
c271c5c2
SM
1933 if (ocfs2_mount_local(osb))
1934 return 0;
1935
d680efe9
MF
1936 ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
1937 if (ret < 0)
1938 mlog_errno(ret);
1939
1940 return ret;
1941}
1942
1943void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
1944{
1945 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1946 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
1947 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
1948
c271c5c2
SM
1949 if (!ocfs2_mount_local(osb))
1950 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
d680efe9
MF
1951}
1952
ccd979bd
MF
1953/* Reference counting of the dlm debug structure. We want this because
1954 * open references on the debug inodes can live on after a mount, so
1955 * we can't rely on the ocfs2_super to always exist. */
1956static void ocfs2_dlm_debug_free(struct kref *kref)
1957{
1958 struct ocfs2_dlm_debug *dlm_debug;
1959
1960 dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
1961
1962 kfree(dlm_debug);
1963}
1964
1965void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
1966{
1967 if (dlm_debug)
1968 kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
1969}
1970
1971static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
1972{
1973 kref_get(&debug->d_refcnt);
1974}
1975
1976struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
1977{
1978 struct ocfs2_dlm_debug *dlm_debug;
1979
1980 dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
1981 if (!dlm_debug) {
1982 mlog_errno(-ENOMEM);
1983 goto out;
1984 }
1985
1986 kref_init(&dlm_debug->d_refcnt);
1987 INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
1988 dlm_debug->d_locking_state = NULL;
1989out:
1990 return dlm_debug;
1991}
1992
1993/* Access to this is arbitrated for us via seq_file->sem. */
1994struct ocfs2_dlm_seq_priv {
1995 struct ocfs2_dlm_debug *p_dlm_debug;
1996 struct ocfs2_lock_res p_iter_res;
1997 struct ocfs2_lock_res p_tmp_res;
1998};
1999
2000static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
2001 struct ocfs2_dlm_seq_priv *priv)
2002{
2003 struct ocfs2_lock_res *iter, *ret = NULL;
2004 struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
2005
2006 assert_spin_locked(&ocfs2_dlm_tracking_lock);
2007
2008 list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
2009 /* discover the head of the list */
2010 if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
2011 mlog(0, "End of list found, %p\n", ret);
2012 break;
2013 }
2014
2015 /* We track our "dummy" iteration lockres' by a NULL
2016 * l_ops field. */
2017 if (iter->l_ops != NULL) {
2018 ret = iter;
2019 break;
2020 }
2021 }
2022
2023 return ret;
2024}
2025
2026static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
2027{
2028 struct ocfs2_dlm_seq_priv *priv = m->private;
2029 struct ocfs2_lock_res *iter;
2030
2031 spin_lock(&ocfs2_dlm_tracking_lock);
2032 iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
2033 if (iter) {
2034 /* Since lockres' have the lifetime of their container
2035 * (which can be inodes, ocfs2_supers, etc) we want to
2036 * copy this out to a temporary lockres while still
2037 * under the spinlock. Obviously after this we can't
2038 * trust any pointers on the copy returned, but that's
2039 * ok as the information we want isn't typically held
2040 * in them. */
2041 priv->p_tmp_res = *iter;
2042 iter = &priv->p_tmp_res;
2043 }
2044 spin_unlock(&ocfs2_dlm_tracking_lock);
2045
2046 return iter;
2047}
2048
2049static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
2050{
2051}
2052
2053static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
2054{
2055 struct ocfs2_dlm_seq_priv *priv = m->private;
2056 struct ocfs2_lock_res *iter = v;
2057 struct ocfs2_lock_res *dummy = &priv->p_iter_res;
2058
2059 spin_lock(&ocfs2_dlm_tracking_lock);
2060 iter = ocfs2_dlm_next_res(iter, priv);
2061 list_del_init(&dummy->l_debug_list);
2062 if (iter) {
2063 list_add(&dummy->l_debug_list, &iter->l_debug_list);
2064 priv->p_tmp_res = *iter;
2065 iter = &priv->p_tmp_res;
2066 }
2067 spin_unlock(&ocfs2_dlm_tracking_lock);
2068
2069 return iter;
2070}
2071
2072/* So that debugfs.ocfs2 can determine which format is being used */
2073#define OCFS2_DLM_DEBUG_STR_VERSION 1
2074static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
2075{
2076 int i;
2077 char *lvb;
2078 struct ocfs2_lock_res *lockres = v;
2079
2080 if (!lockres)
2081 return -EINVAL;
2082
d680efe9
MF
2083 seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
2084
2085 if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
2086 seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
2087 lockres->l_name,
2088 (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
2089 else
2090 seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
2091
2092 seq_printf(m, "%d\t"
ccd979bd
MF
2093 "0x%lx\t"
2094 "0x%x\t"
2095 "0x%x\t"
2096 "%u\t"
2097 "%u\t"
2098 "%d\t"
2099 "%d\t",
ccd979bd
MF
2100 lockres->l_level,
2101 lockres->l_flags,
2102 lockres->l_action,
2103 lockres->l_unlock_action,
2104 lockres->l_ro_holders,
2105 lockres->l_ex_holders,
2106 lockres->l_requested,
2107 lockres->l_blocking);
2108
2109 /* Dump the raw LVB */
2110 lvb = lockres->l_lksb.lvb;
2111 for(i = 0; i < DLM_LVB_LEN; i++)
2112 seq_printf(m, "0x%x\t", lvb[i]);
2113
2114 /* End the line */
2115 seq_printf(m, "\n");
2116 return 0;
2117}
2118
2119static struct seq_operations ocfs2_dlm_seq_ops = {
2120 .start = ocfs2_dlm_seq_start,
2121 .stop = ocfs2_dlm_seq_stop,
2122 .next = ocfs2_dlm_seq_next,
2123 .show = ocfs2_dlm_seq_show,
2124};
2125
2126static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
2127{
2128 struct seq_file *seq = (struct seq_file *) file->private_data;
2129 struct ocfs2_dlm_seq_priv *priv = seq->private;
2130 struct ocfs2_lock_res *res = &priv->p_iter_res;
2131
2132 ocfs2_remove_lockres_tracking(res);
2133 ocfs2_put_dlm_debug(priv->p_dlm_debug);
2134 return seq_release_private(inode, file);
2135}
2136
2137static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
2138{
2139 int ret;
2140 struct ocfs2_dlm_seq_priv *priv;
2141 struct seq_file *seq;
2142 struct ocfs2_super *osb;
2143
2144 priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL);
2145 if (!priv) {
2146 ret = -ENOMEM;
2147 mlog_errno(ret);
2148 goto out;
2149 }
8e18e294 2150 osb = inode->i_private;
ccd979bd
MF
2151 ocfs2_get_dlm_debug(osb->osb_dlm_debug);
2152 priv->p_dlm_debug = osb->osb_dlm_debug;
2153 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
2154
2155 ret = seq_open(file, &ocfs2_dlm_seq_ops);
2156 if (ret) {
2157 kfree(priv);
2158 mlog_errno(ret);
2159 goto out;
2160 }
2161
2162 seq = (struct seq_file *) file->private_data;
2163 seq->private = priv;
2164
2165 ocfs2_add_lockres_tracking(&priv->p_iter_res,
2166 priv->p_dlm_debug);
2167
2168out:
2169 return ret;
2170}
2171
4b6f5d20 2172static const struct file_operations ocfs2_dlm_debug_fops = {
ccd979bd
MF
2173 .open = ocfs2_dlm_debug_open,
2174 .release = ocfs2_dlm_debug_release,
2175 .read = seq_read,
2176 .llseek = seq_lseek,
2177};
2178
2179static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
2180{
2181 int ret = 0;
2182 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2183
2184 dlm_debug->d_locking_state = debugfs_create_file("locking_state",
2185 S_IFREG|S_IRUSR,
2186 osb->osb_debug_root,
2187 osb,
2188 &ocfs2_dlm_debug_fops);
2189 if (!dlm_debug->d_locking_state) {
2190 ret = -EINVAL;
2191 mlog(ML_ERROR,
2192 "Unable to create locking state debugfs file.\n");
2193 goto out;
2194 }
2195
2196 ocfs2_get_dlm_debug(dlm_debug);
2197out:
2198 return ret;
2199}
2200
2201static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
2202{
2203 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2204
2205 if (dlm_debug) {
2206 debugfs_remove(dlm_debug->d_locking_state);
2207 ocfs2_put_dlm_debug(dlm_debug);
2208 }
2209}
2210
2211int ocfs2_dlm_init(struct ocfs2_super *osb)
2212{
c271c5c2 2213 int status = 0;
ccd979bd 2214 u32 dlm_key;
c271c5c2 2215 struct dlm_ctxt *dlm = NULL;
ccd979bd
MF
2216
2217 mlog_entry_void();
2218
c271c5c2
SM
2219 if (ocfs2_mount_local(osb))
2220 goto local;
2221
ccd979bd
MF
2222 status = ocfs2_dlm_init_debug(osb);
2223 if (status < 0) {
2224 mlog_errno(status);
2225 goto bail;
2226 }
2227
34d024f8
MF
2228 /* launch downconvert thread */
2229 osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc");
2230 if (IS_ERR(osb->dc_task)) {
2231 status = PTR_ERR(osb->dc_task);
2232 osb->dc_task = NULL;
ccd979bd
MF
2233 mlog_errno(status);
2234 goto bail;
2235 }
2236
2237 /* used by the dlm code to make message headers unique, each
2238 * node in this domain must agree on this. */
2239 dlm_key = crc32_le(0, osb->uuid_str, strlen(osb->uuid_str));
2240
2241 /* for now, uuid == domain */
2242 dlm = dlm_register_domain(osb->uuid_str, dlm_key);
2243 if (IS_ERR(dlm)) {
2244 status = PTR_ERR(dlm);
2245 mlog_errno(status);
2246 goto bail;
2247 }
2248
c271c5c2
SM
2249 dlm_register_eviction_cb(dlm, &osb->osb_eviction_cb);
2250
2251local:
ccd979bd
MF
2252 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
2253 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
2254
ccd979bd
MF
2255 osb->dlm = dlm;
2256
2257 status = 0;
2258bail:
2259 if (status < 0) {
2260 ocfs2_dlm_shutdown_debug(osb);
34d024f8
MF
2261 if (osb->dc_task)
2262 kthread_stop(osb->dc_task);
ccd979bd
MF
2263 }
2264
2265 mlog_exit(status);
2266 return status;
2267}
2268
2269void ocfs2_dlm_shutdown(struct ocfs2_super *osb)
2270{
2271 mlog_entry_void();
2272
2273 dlm_unregister_eviction_cb(&osb->osb_eviction_cb);
2274
2275 ocfs2_drop_osb_locks(osb);
2276
34d024f8
MF
2277 if (osb->dc_task) {
2278 kthread_stop(osb->dc_task);
2279 osb->dc_task = NULL;
ccd979bd
MF
2280 }
2281
2282 ocfs2_lock_res_free(&osb->osb_super_lockres);
2283 ocfs2_lock_res_free(&osb->osb_rename_lockres);
2284
2285 dlm_unregister_domain(osb->dlm);
2286 osb->dlm = NULL;
2287
2288 ocfs2_dlm_shutdown_debug(osb);
2289
2290 mlog_exit_void();
2291}
2292
2a45f2d1 2293static void ocfs2_unlock_ast(void *opaque, enum dlm_status status)
ccd979bd
MF
2294{
2295 struct ocfs2_lock_res *lockres = opaque;
2296 unsigned long flags;
2297
2298 mlog_entry_void();
2299
2300 mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name,
2301 lockres->l_unlock_action);
2302
2303 spin_lock_irqsave(&lockres->l_lock, flags);
2304 /* We tried to cancel a convert request, but it was already
2305 * granted. All we want to do here is clear our unlock
2306 * state. The wake_up call done at the bottom is redundant
2307 * (ocfs2_prepare_cancel_convert doesn't sleep on this) but doesn't
2308 * hurt anything anyway */
2309 if (status == DLM_CANCELGRANT &&
2310 lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
2311 mlog(0, "Got cancelgrant for %s\n", lockres->l_name);
2312
2313 /* We don't clear the busy flag in this case as it
2314 * should have been cleared by the ast which the dlm
2315 * has called. */
2316 goto complete_unlock;
2317 }
2318
2319 if (status != DLM_NORMAL) {
2320 mlog(ML_ERROR, "Dlm passes status %d for lock %s, "
2321 "unlock_action %d\n", status, lockres->l_name,
2322 lockres->l_unlock_action);
2323 spin_unlock_irqrestore(&lockres->l_lock, flags);
2324 return;
2325 }
2326
2327 switch(lockres->l_unlock_action) {
2328 case OCFS2_UNLOCK_CANCEL_CONVERT:
2329 mlog(0, "Cancel convert success for %s\n", lockres->l_name);
2330 lockres->l_action = OCFS2_AST_INVALID;
2331 break;
2332 case OCFS2_UNLOCK_DROP_LOCK:
2333 lockres->l_level = LKM_IVMODE;
2334 break;
2335 default:
2336 BUG();
2337 }
2338
2339 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
2340complete_unlock:
2341 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
2342 spin_unlock_irqrestore(&lockres->l_lock, flags);
2343
2344 wake_up(&lockres->l_event);
2345
2346 mlog_exit_void();
2347}
2348
ccd979bd 2349static int ocfs2_drop_lock(struct ocfs2_super *osb,
0d5dc6c2 2350 struct ocfs2_lock_res *lockres)
ccd979bd
MF
2351{
2352 enum dlm_status status;
2353 unsigned long flags;
b80fc012 2354 int lkm_flags = 0;
ccd979bd
MF
2355
2356 /* We didn't get anywhere near actually using this lockres. */
2357 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
2358 goto out;
2359
b80fc012
MF
2360 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
2361 lkm_flags |= LKM_VALBLK;
2362
ccd979bd
MF
2363 spin_lock_irqsave(&lockres->l_lock, flags);
2364
2365 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
2366 "lockres %s, flags 0x%lx\n",
2367 lockres->l_name, lockres->l_flags);
2368
2369 while (lockres->l_flags & OCFS2_LOCK_BUSY) {
2370 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
2371 "%u, unlock_action = %u\n",
2372 lockres->l_name, lockres->l_flags, lockres->l_action,
2373 lockres->l_unlock_action);
2374
2375 spin_unlock_irqrestore(&lockres->l_lock, flags);
2376
2377 /* XXX: Today we just wait on any busy
2378 * locks... Perhaps we need to cancel converts in the
2379 * future? */
2380 ocfs2_wait_on_busy_lock(lockres);
2381
2382 spin_lock_irqsave(&lockres->l_lock, flags);
2383 }
2384
0d5dc6c2
MF
2385 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
2386 if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
2387 lockres->l_level == LKM_EXMODE &&
2388 !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
2389 lockres->l_ops->set_lvb(lockres);
2390 }
ccd979bd
MF
2391
2392 if (lockres->l_flags & OCFS2_LOCK_BUSY)
2393 mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
2394 lockres->l_name);
2395 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
2396 mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
2397
2398 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
2399 spin_unlock_irqrestore(&lockres->l_lock, flags);
2400 goto out;
2401 }
2402
2403 lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
2404
2405 /* make sure we never get here while waiting for an ast to
2406 * fire. */
2407 BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
2408
2409 /* is this necessary? */
2410 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
2411 lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
2412 spin_unlock_irqrestore(&lockres->l_lock, flags);
2413
2414 mlog(0, "lock %s\n", lockres->l_name);
2415
b80fc012 2416 status = dlmunlock(osb->dlm, &lockres->l_lksb, lkm_flags,
2a45f2d1 2417 ocfs2_unlock_ast, lockres);
ccd979bd
MF
2418 if (status != DLM_NORMAL) {
2419 ocfs2_log_dlm_error("dlmunlock", status, lockres);
2420 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
2421 dlm_print_one_lock(lockres->l_lksb.lockid);
2422 BUG();
2423 }
2424 mlog(0, "lock %s, successfull return from dlmunlock\n",
2425 lockres->l_name);
2426
2427 ocfs2_wait_on_busy_lock(lockres);
2428out:
2429 mlog_exit(0);
2430 return 0;
2431}
2432
2433/* Mark the lockres as being dropped. It will no longer be
2434 * queued if blocking, but we still may have to wait on it
34d024f8 2435 * being dequeued from the downconvert thread before we can consider
ccd979bd
MF
2436 * it safe to drop.
2437 *
2438 * You can *not* attempt to call cluster_lock on this lockres anymore. */
2439void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
2440{
2441 int status;
2442 struct ocfs2_mask_waiter mw;
2443 unsigned long flags;
2444
2445 ocfs2_init_mask_waiter(&mw);
2446
2447 spin_lock_irqsave(&lockres->l_lock, flags);
2448 lockres->l_flags |= OCFS2_LOCK_FREEING;
2449 while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
2450 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
2451 spin_unlock_irqrestore(&lockres->l_lock, flags);
2452
2453 mlog(0, "Waiting on lockres %s\n", lockres->l_name);
2454
2455 status = ocfs2_wait_for_mask(&mw);
2456 if (status)
2457 mlog_errno(status);
2458
2459 spin_lock_irqsave(&lockres->l_lock, flags);
2460 }
2461 spin_unlock_irqrestore(&lockres->l_lock, flags);
2462}
2463
d680efe9
MF
2464void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
2465 struct ocfs2_lock_res *lockres)
ccd979bd 2466{
d680efe9 2467 int ret;
ccd979bd 2468
d680efe9 2469 ocfs2_mark_lockres_freeing(lockres);
0d5dc6c2 2470 ret = ocfs2_drop_lock(osb, lockres);
d680efe9
MF
2471 if (ret)
2472 mlog_errno(ret);
2473}
ccd979bd 2474
d680efe9
MF
2475static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
2476{
2477 ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
2478 ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
ccd979bd
MF
2479}
2480
ccd979bd
MF
2481int ocfs2_drop_inode_locks(struct inode *inode)
2482{
2483 int status, err;
ccd979bd
MF
2484
2485 mlog_entry_void();
2486
2487 /* No need to call ocfs2_mark_lockres_freeing here -
2488 * ocfs2_clear_inode has done it for us. */
2489
2490 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
50008630 2491 &OCFS2_I(inode)->ip_open_lockres);
ccd979bd
MF
2492 if (err < 0)
2493 mlog_errno(err);
2494
2495 status = err;
2496
2497 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
e63aecb6 2498 &OCFS2_I(inode)->ip_inode_lockres);
ccd979bd
MF
2499 if (err < 0)
2500 mlog_errno(err);
2501 if (err < 0 && !status)
2502 status = err;
2503
2504 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
0d5dc6c2 2505 &OCFS2_I(inode)->ip_rw_lockres);
ccd979bd
MF
2506 if (err < 0)
2507 mlog_errno(err);
2508 if (err < 0 && !status)
2509 status = err;
2510
2511 mlog_exit(status);
2512 return status;
2513}
2514
2515static void ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
2516 int new_level)
2517{
2518 assert_spin_locked(&lockres->l_lock);
2519
2520 BUG_ON(lockres->l_blocking <= LKM_NLMODE);
2521
2522 if (lockres->l_level <= new_level) {
2523 mlog(ML_ERROR, "lockres->l_level (%u) <= new_level (%u)\n",
2524 lockres->l_level, new_level);
2525 BUG();
2526 }
2527
2528 mlog(0, "lock %s, new_level = %d, l_blocking = %d\n",
2529 lockres->l_name, new_level, lockres->l_blocking);
2530
2531 lockres->l_action = OCFS2_AST_DOWNCONVERT;
2532 lockres->l_requested = new_level;
2533 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
2534}
2535
2536static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
2537 struct ocfs2_lock_res *lockres,
2538 int new_level,
2539 int lvb)
2540{
2541 int ret, dlm_flags = LKM_CONVERT;
2542 enum dlm_status status;
2543
2544 mlog_entry_void();
2545
2546 if (lvb)
2547 dlm_flags |= LKM_VALBLK;
2548
2549 status = dlmlock(osb->dlm,
2550 new_level,
2551 &lockres->l_lksb,
2552 dlm_flags,
2553 lockres->l_name,
f0681062 2554 OCFS2_LOCK_ID_MAX_LEN - 1,
e92d57df 2555 ocfs2_locking_ast,
ccd979bd 2556 lockres,
aa2623ad 2557 ocfs2_blocking_ast);
ccd979bd
MF
2558 if (status != DLM_NORMAL) {
2559 ocfs2_log_dlm_error("dlmlock", status, lockres);
2560 ret = -EINVAL;
2561 ocfs2_recover_from_dlm_error(lockres, 1);
2562 goto bail;
2563 }
2564
2565 ret = 0;
2566bail:
2567 mlog_exit(ret);
2568 return ret;
2569}
2570
2571/* returns 1 when the caller should unlock and call dlmunlock */
2572static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
2573 struct ocfs2_lock_res *lockres)
2574{
2575 assert_spin_locked(&lockres->l_lock);
2576
2577 mlog_entry_void();
2578 mlog(0, "lock %s\n", lockres->l_name);
2579
2580 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
2581 /* If we're already trying to cancel a lock conversion
2582 * then just drop the spinlock and allow the caller to
2583 * requeue this lock. */
2584
2585 mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
2586 return 0;
2587 }
2588
2589 /* were we in a convert when we got the bast fire? */
2590 BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
2591 lockres->l_action != OCFS2_AST_DOWNCONVERT);
2592 /* set things up for the unlockast to know to just
2593 * clear out the ast_action and unset busy, etc. */
2594 lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
2595
2596 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
2597 "lock %s, invalid flags: 0x%lx\n",
2598 lockres->l_name, lockres->l_flags);
2599
2600 return 1;
2601}
2602
2603static int ocfs2_cancel_convert(struct ocfs2_super *osb,
2604 struct ocfs2_lock_res *lockres)
2605{
2606 int ret;
2607 enum dlm_status status;
2608
2609 mlog_entry_void();
2610 mlog(0, "lock %s\n", lockres->l_name);
2611
2612 ret = 0;
2613 status = dlmunlock(osb->dlm,
2614 &lockres->l_lksb,
2615 LKM_CANCEL,
2a45f2d1 2616 ocfs2_unlock_ast,
ccd979bd
MF
2617 lockres);
2618 if (status != DLM_NORMAL) {
2619 ocfs2_log_dlm_error("dlmunlock", status, lockres);
2620 ret = -EINVAL;
2621 ocfs2_recover_from_dlm_error(lockres, 0);
2622 }
2623
2624 mlog(0, "lock %s return from dlmunlock\n", lockres->l_name);
2625
ccd979bd
MF
2626 mlog_exit(ret);
2627 return ret;
2628}
2629
b5e500e2
MF
2630static int ocfs2_unblock_lock(struct ocfs2_super *osb,
2631 struct ocfs2_lock_res *lockres,
2632 struct ocfs2_unblock_ctl *ctl)
ccd979bd
MF
2633{
2634 unsigned long flags;
2635 int blocking;
2636 int new_level;
2637 int ret = 0;
5ef0d4ea 2638 int set_lvb = 0;
ccd979bd
MF
2639
2640 mlog_entry_void();
2641
2642 spin_lock_irqsave(&lockres->l_lock, flags);
2643
2644 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
2645
2646recheck:
2647 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
d680efe9 2648 ctl->requeue = 1;
ccd979bd
MF
2649 ret = ocfs2_prepare_cancel_convert(osb, lockres);
2650 spin_unlock_irqrestore(&lockres->l_lock, flags);
2651 if (ret) {
2652 ret = ocfs2_cancel_convert(osb, lockres);
2653 if (ret < 0)
2654 mlog_errno(ret);
2655 }
2656 goto leave;
2657 }
2658
2659 /* if we're blocking an exclusive and we have *any* holders,
2660 * then requeue. */
2661 if ((lockres->l_blocking == LKM_EXMODE)
f7fbfdd1
MF
2662 && (lockres->l_ex_holders || lockres->l_ro_holders))
2663 goto leave_requeue;
ccd979bd
MF
2664
2665 /* If it's a PR we're blocking, then only
2666 * requeue if we've got any EX holders */
2667 if (lockres->l_blocking == LKM_PRMODE &&
f7fbfdd1
MF
2668 lockres->l_ex_holders)
2669 goto leave_requeue;
2670
2671 /*
2672 * Can we get a lock in this state if the holder counts are
2673 * zero? The meta data unblock code used to check this.
2674 */
2675 if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
2676 && (lockres->l_flags & OCFS2_LOCK_REFRESHING))
2677 goto leave_requeue;
ccd979bd 2678
16d5b956
MF
2679 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
2680
2681 if (lockres->l_ops->check_downconvert
2682 && !lockres->l_ops->check_downconvert(lockres, new_level))
2683 goto leave_requeue;
2684
ccd979bd
MF
2685 /* If we get here, then we know that there are no more
2686 * incompatible holders (and anyone asking for an incompatible
2687 * lock is blocked). We can now downconvert the lock */
cc567d89 2688 if (!lockres->l_ops->downconvert_worker)
ccd979bd
MF
2689 goto downconvert;
2690
2691 /* Some lockres types want to do a bit of work before
2692 * downconverting a lock. Allow that here. The worker function
2693 * may sleep, so we save off a copy of what we're blocking as
2694 * it may change while we're not holding the spin lock. */
2695 blocking = lockres->l_blocking;
2696 spin_unlock_irqrestore(&lockres->l_lock, flags);
2697
cc567d89 2698 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
d680efe9
MF
2699
2700 if (ctl->unblock_action == UNBLOCK_STOP_POST)
2701 goto leave;
ccd979bd
MF
2702
2703 spin_lock_irqsave(&lockres->l_lock, flags);
2704 if (blocking != lockres->l_blocking) {
2705 /* If this changed underneath us, then we can't drop
2706 * it just yet. */
2707 goto recheck;
2708 }
2709
2710downconvert:
d680efe9 2711 ctl->requeue = 0;
ccd979bd 2712
5ef0d4ea
MF
2713 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
2714 if (lockres->l_level == LKM_EXMODE)
2715 set_lvb = 1;
2716
2717 /*
2718 * We only set the lvb if the lock has been fully
2719 * refreshed - otherwise we risk setting stale
2720 * data. Otherwise, there's no need to actually clear
2721 * out the lvb here as it's value is still valid.
2722 */
2723 if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
2724 lockres->l_ops->set_lvb(lockres);
2725 }
2726
ccd979bd
MF
2727 ocfs2_prepare_downconvert(lockres, new_level);
2728 spin_unlock_irqrestore(&lockres->l_lock, flags);
5ef0d4ea 2729 ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb);
ccd979bd
MF
2730leave:
2731 mlog_exit(ret);
2732 return ret;
f7fbfdd1
MF
2733
2734leave_requeue:
2735 spin_unlock_irqrestore(&lockres->l_lock, flags);
2736 ctl->requeue = 1;
2737
2738 mlog_exit(0);
2739 return 0;
ccd979bd
MF
2740}
2741
d680efe9
MF
2742static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
2743 int blocking)
ccd979bd
MF
2744{
2745 struct inode *inode;
2746 struct address_space *mapping;
2747
ccd979bd
MF
2748 inode = ocfs2_lock_res_inode(lockres);
2749 mapping = inode->i_mapping;
2750
f1f54068
MF
2751 if (S_ISREG(inode->i_mode))
2752 goto out;
2753
7f4a2a97
MF
2754 /*
2755 * We need this before the filemap_fdatawrite() so that it can
2756 * transfer the dirty bit from the PTE to the
2757 * page. Unfortunately this means that even for EX->PR
2758 * downconverts, we'll lose our mappings and have to build
2759 * them up again.
2760 */
2761 unmap_mapping_range(mapping, 0, 0, 0);
2762
ccd979bd 2763 if (filemap_fdatawrite(mapping)) {
b0697053
MF
2764 mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
2765 (unsigned long long)OCFS2_I(inode)->ip_blkno);
ccd979bd
MF
2766 }
2767 sync_mapping_buffers(mapping);
2768 if (blocking == LKM_EXMODE) {
2769 truncate_inode_pages(mapping, 0);
ccd979bd
MF
2770 } else {
2771 /* We only need to wait on the I/O if we're not also
2772 * truncating pages because truncate_inode_pages waits
2773 * for us above. We don't truncate pages if we're
2774 * blocking anything < EXMODE because we want to keep
2775 * them around in that case. */
2776 filemap_fdatawait(mapping);
2777 }
2778
f1f54068 2779out:
d680efe9 2780 return UNBLOCK_CONTINUE;
ccd979bd
MF
2781}
2782
810d5aeb
MF
2783static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
2784 int new_level)
2785{
2786 struct inode *inode = ocfs2_lock_res_inode(lockres);
2787 int checkpointed = ocfs2_inode_fully_checkpointed(inode);
2788
2789 BUG_ON(new_level != LKM_NLMODE && new_level != LKM_PRMODE);
2790 BUG_ON(lockres->l_level != LKM_EXMODE && !checkpointed);
2791
2792 if (checkpointed)
2793 return 1;
2794
2795 ocfs2_start_checkpoint(OCFS2_SB(inode->i_sb));
2796 return 0;
2797}
2798
2799static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
2800{
2801 struct inode *inode = ocfs2_lock_res_inode(lockres);
2802
2803 __ocfs2_stuff_meta_lvb(inode);
2804}
2805
d680efe9
MF
2806/*
2807 * Does the final reference drop on our dentry lock. Right now this
34d024f8 2808 * happens in the downconvert thread, but we could choose to simplify the
d680efe9
MF
2809 * dlmglue API and push these off to the ocfs2_wq in the future.
2810 */
2811static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
2812 struct ocfs2_lock_res *lockres)
2813{
2814 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
2815 ocfs2_dentry_lock_put(osb, dl);
2816}
2817
2818/*
2819 * d_delete() matching dentries before the lock downconvert.
2820 *
2821 * At this point, any process waiting to destroy the
2822 * dentry_lock due to last ref count is stopped by the
2823 * OCFS2_LOCK_QUEUED flag.
2824 *
2825 * We have two potential problems
2826 *
2827 * 1) If we do the last reference drop on our dentry_lock (via dput)
2828 * we'll wind up in ocfs2_release_dentry_lock(), waiting on
2829 * the downconvert to finish. Instead we take an elevated
2830 * reference and push the drop until after we've completed our
2831 * unblock processing.
2832 *
2833 * 2) There might be another process with a final reference,
2834 * waiting on us to finish processing. If this is the case, we
2835 * detect it and exit out - there's no more dentries anyway.
2836 */
2837static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
2838 int blocking)
2839{
2840 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
2841 struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
2842 struct dentry *dentry;
2843 unsigned long flags;
2844 int extra_ref = 0;
2845
2846 /*
2847 * This node is blocking another node from getting a read
2848 * lock. This happens when we've renamed within a
2849 * directory. We've forced the other nodes to d_delete(), but
2850 * we never actually dropped our lock because it's still
2851 * valid. The downconvert code will retain a PR for this node,
2852 * so there's no further work to do.
2853 */
2854 if (blocking == LKM_PRMODE)
2855 return UNBLOCK_CONTINUE;
2856
2857 /*
2858 * Mark this inode as potentially orphaned. The code in
2859 * ocfs2_delete_inode() will figure out whether it actually
2860 * needs to be freed or not.
2861 */
2862 spin_lock(&oi->ip_lock);
2863 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
2864 spin_unlock(&oi->ip_lock);
2865
2866 /*
2867 * Yuck. We need to make sure however that the check of
2868 * OCFS2_LOCK_FREEING and the extra reference are atomic with
2869 * respect to a reference decrement or the setting of that
2870 * flag.
2871 */
2872 spin_lock_irqsave(&lockres->l_lock, flags);
2873 spin_lock(&dentry_attach_lock);
2874 if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
2875 && dl->dl_count) {
2876 dl->dl_count++;
2877 extra_ref = 1;
2878 }
2879 spin_unlock(&dentry_attach_lock);
2880 spin_unlock_irqrestore(&lockres->l_lock, flags);
2881
2882 mlog(0, "extra_ref = %d\n", extra_ref);
2883
2884 /*
2885 * We have a process waiting on us in ocfs2_dentry_iput(),
2886 * which means we can't have any more outstanding
2887 * aliases. There's no need to do any more work.
2888 */
2889 if (!extra_ref)
2890 return UNBLOCK_CONTINUE;
2891
2892 spin_lock(&dentry_attach_lock);
2893 while (1) {
2894 dentry = ocfs2_find_local_alias(dl->dl_inode,
2895 dl->dl_parent_blkno, 1);
2896 if (!dentry)
2897 break;
2898 spin_unlock(&dentry_attach_lock);
2899
2900 mlog(0, "d_delete(%.*s);\n", dentry->d_name.len,
2901 dentry->d_name.name);
2902
2903 /*
2904 * The following dcache calls may do an
2905 * iput(). Normally we don't want that from the
2906 * downconverting thread, but in this case it's ok
2907 * because the requesting node already has an
2908 * exclusive lock on the inode, so it can't be queued
2909 * for a downconvert.
2910 */
2911 d_delete(dentry);
2912 dput(dentry);
2913
2914 spin_lock(&dentry_attach_lock);
2915 }
2916 spin_unlock(&dentry_attach_lock);
2917
2918 /*
2919 * If we are the last holder of this dentry lock, there is no
2920 * reason to downconvert so skip straight to the unlock.
2921 */
2922 if (dl->dl_count == 1)
2923 return UNBLOCK_STOP_POST;
2924
2925 return UNBLOCK_CONTINUE_POST;
2926}
2927
ccd979bd
MF
2928void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
2929 struct ocfs2_lock_res *lockres)
2930{
2931 int status;
d680efe9 2932 struct ocfs2_unblock_ctl ctl = {0, 0,};
ccd979bd
MF
2933 unsigned long flags;
2934
2935 /* Our reference to the lockres in this function can be
2936 * considered valid until we remove the OCFS2_LOCK_QUEUED
2937 * flag. */
2938
2939 mlog_entry_void();
2940
2941 BUG_ON(!lockres);
2942 BUG_ON(!lockres->l_ops);
ccd979bd
MF
2943
2944 mlog(0, "lockres %s blocked.\n", lockres->l_name);
2945
2946 /* Detect whether a lock has been marked as going away while
34d024f8 2947 * the downconvert thread was processing other things. A lock can
ccd979bd
MF
2948 * still be marked with OCFS2_LOCK_FREEING after this check,
2949 * but short circuiting here will still save us some
2950 * performance. */
2951 spin_lock_irqsave(&lockres->l_lock, flags);
2952 if (lockres->l_flags & OCFS2_LOCK_FREEING)
2953 goto unqueue;
2954 spin_unlock_irqrestore(&lockres->l_lock, flags);
2955
b5e500e2 2956 status = ocfs2_unblock_lock(osb, lockres, &ctl);
ccd979bd
MF
2957 if (status < 0)
2958 mlog_errno(status);
2959
2960 spin_lock_irqsave(&lockres->l_lock, flags);
2961unqueue:
d680efe9 2962 if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
ccd979bd
MF
2963 lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
2964 } else
2965 ocfs2_schedule_blocked_lock(osb, lockres);
2966
2967 mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name,
d680efe9 2968 ctl.requeue ? "yes" : "no");
ccd979bd
MF
2969 spin_unlock_irqrestore(&lockres->l_lock, flags);
2970
d680efe9
MF
2971 if (ctl.unblock_action != UNBLOCK_CONTINUE
2972 && lockres->l_ops->post_unlock)
2973 lockres->l_ops->post_unlock(osb, lockres);
2974
ccd979bd
MF
2975 mlog_exit_void();
2976}
2977
2978static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
2979 struct ocfs2_lock_res *lockres)
2980{
2981 mlog_entry_void();
2982
2983 assert_spin_locked(&lockres->l_lock);
2984
2985 if (lockres->l_flags & OCFS2_LOCK_FREEING) {
2986 /* Do not schedule a lock for downconvert when it's on
2987 * the way to destruction - any nodes wanting access
2988 * to the resource will get it soon. */
2989 mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n",
2990 lockres->l_name, lockres->l_flags);
2991 return;
2992 }
2993
2994 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
2995
34d024f8 2996 spin_lock(&osb->dc_task_lock);
ccd979bd
MF
2997 if (list_empty(&lockres->l_blocked_list)) {
2998 list_add_tail(&lockres->l_blocked_list,
2999 &osb->blocked_lock_list);
3000 osb->blocked_lock_count++;
3001 }
34d024f8 3002 spin_unlock(&osb->dc_task_lock);
ccd979bd
MF
3003
3004 mlog_exit_void();
3005}
34d024f8
MF
3006
3007static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
3008{
3009 unsigned long processed;
3010 struct ocfs2_lock_res *lockres;
3011
3012 mlog_entry_void();
3013
3014 spin_lock(&osb->dc_task_lock);
3015 /* grab this early so we know to try again if a state change and
3016 * wake happens part-way through our work */
3017 osb->dc_work_sequence = osb->dc_wake_sequence;
3018
3019 processed = osb->blocked_lock_count;
3020 while (processed) {
3021 BUG_ON(list_empty(&osb->blocked_lock_list));
3022
3023 lockres = list_entry(osb->blocked_lock_list.next,
3024 struct ocfs2_lock_res, l_blocked_list);
3025 list_del_init(&lockres->l_blocked_list);
3026 osb->blocked_lock_count--;
3027 spin_unlock(&osb->dc_task_lock);
3028
3029 BUG_ON(!processed);
3030 processed--;
3031
3032 ocfs2_process_blocked_lock(osb, lockres);
3033
3034 spin_lock(&osb->dc_task_lock);
3035 }
3036 spin_unlock(&osb->dc_task_lock);
3037
3038 mlog_exit_void();
3039}
3040
3041static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
3042{
3043 int empty = 0;
3044
3045 spin_lock(&osb->dc_task_lock);
3046 if (list_empty(&osb->blocked_lock_list))
3047 empty = 1;
3048
3049 spin_unlock(&osb->dc_task_lock);
3050 return empty;
3051}
3052
3053static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
3054{
3055 int should_wake = 0;
3056
3057 spin_lock(&osb->dc_task_lock);
3058 if (osb->dc_work_sequence != osb->dc_wake_sequence)
3059 should_wake = 1;
3060 spin_unlock(&osb->dc_task_lock);
3061
3062 return should_wake;
3063}
3064
3065int ocfs2_downconvert_thread(void *arg)
3066{
3067 int status = 0;
3068 struct ocfs2_super *osb = arg;
3069
3070 /* only quit once we've been asked to stop and there is no more
3071 * work available */
3072 while (!(kthread_should_stop() &&
3073 ocfs2_downconvert_thread_lists_empty(osb))) {
3074
3075 wait_event_interruptible(osb->dc_event,
3076 ocfs2_downconvert_thread_should_wake(osb) ||
3077 kthread_should_stop());
3078
3079 mlog(0, "downconvert_thread: awoken\n");
3080
3081 ocfs2_downconvert_thread_do_work(osb);
3082 }
3083
3084 osb->dc_task = NULL;
3085 return status;
3086}
3087
3088void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
3089{
3090 spin_lock(&osb->dc_task_lock);
3091 /* make sure the voting thread gets a swipe at whatever changes
3092 * the caller may have made to the voting state */
3093 osb->dc_wake_sequence++;
3094 spin_unlock(&osb->dc_task_lock);
3095 wake_up(&osb->dc_event);
3096}