uml: an idle system should have zero load average
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / gfs2 / glops.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3a8a9a10 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
b3b94faa
DT
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <linux/completion.h>
13#include <linux/buffer_head.h>
5c676f6d 14#include <linux/gfs2_ondisk.h>
7d308590 15#include <linux/lm_interface.h>
b3b94faa
DT
16
17#include "gfs2.h"
5c676f6d 18#include "incore.h"
b3b94faa
DT
19#include "bmap.h"
20#include "glock.h"
21#include "glops.h"
22#include "inode.h"
23#include "log.h"
24#include "meta_io.h"
b3b94faa
DT
25#include "recovery.h"
26#include "rgrp.h"
5c676f6d 27#include "util.h"
ddacfaf7 28#include "trans.h"
b3b94faa 29
ddacfaf7
SW
30/**
31 * ail_empty_gl - remove all buffers for a given lock from the AIL
32 * @gl: the glock
33 *
34 * None of the buffers should be dirty, locked, or pinned.
35 */
36
37static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
38{
39 struct gfs2_sbd *sdp = gl->gl_sbd;
40 unsigned int blocks;
41 struct list_head *head = &gl->gl_ail_list;
42 struct gfs2_bufdata *bd;
43 struct buffer_head *bh;
44 u64 blkno;
45 int error;
46
47 blocks = atomic_read(&gl->gl_ail_count);
48 if (!blocks)
49 return;
50
51 error = gfs2_trans_begin(sdp, 0, blocks);
52 if (gfs2_assert_withdraw(sdp, !error))
53 return;
54
55 gfs2_log_lock(sdp);
56 while (!list_empty(head)) {
57 bd = list_entry(head->next, struct gfs2_bufdata,
58 bd_ail_gl_list);
59 bh = bd->bd_bh;
60 blkno = bh->b_blocknr;
61 gfs2_assert_withdraw(sdp, !buffer_busy(bh));
62
63 bd->bd_ail = NULL;
64 list_del(&bd->bd_ail_st_list);
65 list_del(&bd->bd_ail_gl_list);
66 atomic_dec(&gl->gl_ail_count);
67 brelse(bh);
68 gfs2_log_unlock(sdp);
69
70 gfs2_trans_add_revoke(sdp, blkno);
71
72 gfs2_log_lock(sdp);
73 }
74 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
75 gfs2_log_unlock(sdp);
76
77 gfs2_trans_end(sdp);
78 gfs2_log_flush(sdp, NULL);
79}
ba7f7290
SW
80
81/**
82 * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock
83 * @gl: the glock
84 *
85 */
86
87static void gfs2_pte_inval(struct gfs2_glock *gl)
88{
89 struct gfs2_inode *ip;
90 struct inode *inode;
91
92 ip = gl->gl_object;
93 inode = &ip->i_inode;
b60623c2 94 if (!ip || !S_ISREG(inode->i_mode))
ba7f7290
SW
95 return;
96
97 if (!test_bit(GIF_PAGED, &ip->i_flags))
98 return;
99
100 unmap_shared_mapping_range(inode->i_mapping, 0, 0);
101
102 if (test_bit(GIF_SW_PAGED, &ip->i_flags))
103 set_bit(GLF_DIRTY, &gl->gl_flags);
104
105 clear_bit(GIF_SW_PAGED, &ip->i_flags);
106}
107
b3b94faa
DT
108/**
109 * meta_go_sync - sync out the metadata for this glock
110 * @gl: the glock
b3b94faa
DT
111 *
112 * Called when demoting or unlocking an EX glock. We must flush
113 * to disk all dirty buffers/pages relating to this glock, and must not
114 * not return to caller to demote/unlock the glock until I/O is complete.
115 */
116
1a14d3a6 117static void meta_go_sync(struct gfs2_glock *gl)
b3b94faa 118{
b5d32bea
SW
119 if (gl->gl_state != LM_ST_EXCLUSIVE)
120 return;
121
b3b94faa 122 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
b09e593d 123 gfs2_log_flush(gl->gl_sbd, gl);
7276b3b0 124 gfs2_meta_sync(gl);
1a14d3a6 125 gfs2_ail_empty_gl(gl);
b3b94faa 126 }
b3b94faa
DT
127}
128
129/**
130 * meta_go_inval - invalidate the metadata for this glock
131 * @gl: the glock
132 * @flags:
133 *
134 */
135
136static void meta_go_inval(struct gfs2_glock *gl, int flags)
137{
138 if (!(flags & DIO_METADATA))
139 return;
140
141 gfs2_meta_inval(gl);
142 gl->gl_vn++;
143}
144
b5d32bea
SW
145/**
146 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
147 * @gl: the glock protecting the inode
148 *
149 */
150
151static void inode_go_sync(struct gfs2_glock *gl)
152{
153 struct gfs2_inode *ip = gl->gl_object;
154
155 if (ip && !S_ISREG(ip->i_inode.i_mode))
156 ip = NULL;
157
158 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
159 gfs2_log_flush(gl->gl_sbd, gl);
160 if (ip)
161 filemap_fdatawrite(ip->i_inode.i_mapping);
162 gfs2_meta_sync(gl);
163 if (ip) {
164 struct address_space *mapping = ip->i_inode.i_mapping;
165 int error = filemap_fdatawait(mapping);
166 if (error == -ENOSPC)
167 set_bit(AS_ENOSPC, &mapping->flags);
168 else if (error)
169 set_bit(AS_EIO, &mapping->flags);
170 }
171 clear_bit(GLF_DIRTY, &gl->gl_flags);
172 gfs2_ail_empty_gl(gl);
173 }
174}
175
b3b94faa
DT
176/**
177 * inode_go_xmote_th - promote/demote a glock
178 * @gl: the glock
179 * @state: the requested state
180 * @flags:
181 *
182 */
183
b5d32bea 184static void inode_go_xmote_th(struct gfs2_glock *gl)
b3b94faa
DT
185{
186 if (gl->gl_state != LM_ST_UNLOCKED)
187 gfs2_pte_inval(gl);
b5d32bea
SW
188 if (gl->gl_state == LM_ST_EXCLUSIVE)
189 inode_go_sync(gl);
b3b94faa
DT
190}
191
192/**
193 * inode_go_xmote_bh - After promoting/demoting a glock
194 * @gl: the glock
195 *
196 */
197
198static void inode_go_xmote_bh(struct gfs2_glock *gl)
199{
200 struct gfs2_holder *gh = gl->gl_req_gh;
201 struct buffer_head *bh;
202 int error;
203
204 if (gl->gl_state != LM_ST_UNLOCKED &&
205 (!gh || !(gh->gh_flags & GL_SKIP))) {
7276b3b0 206 error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh);
b3b94faa
DT
207 if (!error)
208 brelse(bh);
209 }
210}
211
212/**
213 * inode_go_drop_th - unlock a glock
214 * @gl: the glock
215 *
216 * Invoked from rq_demote().
217 * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long)
218 * is being purged from our node's glock cache; we're dropping lock.
219 */
220
221static void inode_go_drop_th(struct gfs2_glock *gl)
222{
223 gfs2_pte_inval(gl);
b5d32bea
SW
224 if (gl->gl_state == LM_ST_EXCLUSIVE)
225 inode_go_sync(gl);
b3b94faa
DT
226}
227
228/**
229 * inode_go_inval - prepare a inode glock to be released
230 * @gl: the glock
231 * @flags:
232 *
233 */
234
235static void inode_go_inval(struct gfs2_glock *gl, int flags)
236{
b004157a 237 struct gfs2_inode *ip = gl->gl_object;
b3b94faa 238 int meta = (flags & DIO_METADATA);
b3b94faa
DT
239
240 if (meta) {
241 gfs2_meta_inval(gl);
b004157a
SW
242 if (ip)
243 set_bit(GIF_INVALID, &ip->i_flags);
244 }
245
246 if (ip && S_ISREG(ip->i_inode.i_mode)) {
247 truncate_inode_pages(ip->i_inode.i_mapping, 0);
b004157a 248 clear_bit(GIF_PAGED, &ip->i_flags);
b3b94faa 249 }
b3b94faa
DT
250}
251
252/**
253 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
254 * @gl: the glock
255 *
256 * Returns: 1 if it's ok
257 */
258
259static int inode_go_demote_ok(struct gfs2_glock *gl)
260{
261 struct gfs2_sbd *sdp = gl->gl_sbd;
262 int demote = 0;
263
5c676f6d 264 if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages)
b3b94faa
DT
265 demote = 1;
266 else if (!sdp->sd_args.ar_localcaching &&
267 time_after_eq(jiffies, gl->gl_stamp +
268 gfs2_tune_get(sdp, gt_demote_secs) * HZ))
269 demote = 1;
270
271 return demote;
272}
273
274/**
275 * inode_go_lock - operation done after an inode lock is locked by a process
276 * @gl: the glock
277 * @flags:
278 *
279 * Returns: errno
280 */
281
282static int inode_go_lock(struct gfs2_holder *gh)
283{
284 struct gfs2_glock *gl = gh->gh_gl;
5c676f6d 285 struct gfs2_inode *ip = gl->gl_object;
b3b94faa
DT
286 int error = 0;
287
288 if (!ip)
289 return 0;
290
bfded27b 291 if (test_bit(GIF_INVALID, &ip->i_flags)) {
b3b94faa
DT
292 error = gfs2_inode_refresh(ip);
293 if (error)
294 return error;
b3b94faa
DT
295 }
296
297 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
298 (gl->gl_state == LM_ST_EXCLUSIVE) &&
1c0f4872 299 (gh->gh_state == LM_ST_EXCLUSIVE))
b3b94faa
DT
300 error = gfs2_truncatei_resume(ip);
301
302 return error;
303}
304
305/**
306 * inode_go_unlock - operation done before an inode lock is unlocked by a
307 * process
308 * @gl: the glock
309 * @flags:
310 *
311 */
312
313static void inode_go_unlock(struct gfs2_holder *gh)
314{
315 struct gfs2_glock *gl = gh->gh_gl;
5c676f6d 316 struct gfs2_inode *ip = gl->gl_object;
b3b94faa 317
9e2dbdac
SW
318 if (ip)
319 gfs2_meta_cache_flush(ip);
b3b94faa
DT
320}
321
b3b94faa
DT
322/**
323 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
324 * @gl: the glock
325 *
326 * Returns: 1 if it's ok
327 */
328
329static int rgrp_go_demote_ok(struct gfs2_glock *gl)
330{
331 return !gl->gl_aspace->i_mapping->nrpages;
332}
333
334/**
335 * rgrp_go_lock - operation done after an rgrp lock is locked by
336 * a first holder on this node.
337 * @gl: the glock
338 * @flags:
339 *
340 * Returns: errno
341 */
342
343static int rgrp_go_lock(struct gfs2_holder *gh)
344{
5c676f6d 345 return gfs2_rgrp_bh_get(gh->gh_gl->gl_object);
b3b94faa
DT
346}
347
348/**
349 * rgrp_go_unlock - operation done before an rgrp lock is unlocked by
350 * a last holder on this node.
351 * @gl: the glock
352 * @flags:
353 *
354 */
355
356static void rgrp_go_unlock(struct gfs2_holder *gh)
357{
5c676f6d 358 gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
b3b94faa
DT
359}
360
361/**
362 * trans_go_xmote_th - promote/demote the transaction glock
363 * @gl: the glock
364 * @state: the requested state
365 * @flags:
366 *
367 */
368
b5d32bea 369static void trans_go_xmote_th(struct gfs2_glock *gl)
b3b94faa
DT
370{
371 struct gfs2_sbd *sdp = gl->gl_sbd;
372
373 if (gl->gl_state != LM_ST_UNLOCKED &&
374 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
375 gfs2_meta_syncfs(sdp);
376 gfs2_log_shutdown(sdp);
377 }
b3b94faa
DT
378}
379
380/**
381 * trans_go_xmote_bh - After promoting/demoting the transaction glock
382 * @gl: the glock
383 *
384 */
385
386static void trans_go_xmote_bh(struct gfs2_glock *gl)
387{
388 struct gfs2_sbd *sdp = gl->gl_sbd;
feaa7bba 389 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
5c676f6d 390 struct gfs2_glock *j_gl = ip->i_gl;
55167622 391 struct gfs2_log_header_host head;
b3b94faa
DT
392 int error;
393
394 if (gl->gl_state != LM_ST_UNLOCKED &&
395 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
feaa7bba 396 gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode));
1a14d3a6 397 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
b3b94faa
DT
398
399 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
400 if (error)
401 gfs2_consist(sdp);
402 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
403 gfs2_consist(sdp);
404
405 /* Initialize some head of the log stuff */
406 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
407 sdp->sd_log_sequence = head.lh_sequence + 1;
408 gfs2_log_pointers_init(sdp, head.lh_blkno);
409 }
410 }
411}
412
413/**
414 * trans_go_drop_th - unlock the transaction glock
415 * @gl: the glock
416 *
417 * We want to sync the device even with localcaching. Remember
418 * that localcaching journal replay only marks buffers dirty.
419 */
420
421static void trans_go_drop_th(struct gfs2_glock *gl)
422{
423 struct gfs2_sbd *sdp = gl->gl_sbd;
424
425 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
426 gfs2_meta_syncfs(sdp);
427 gfs2_log_shutdown(sdp);
428 }
b3b94faa
DT
429}
430
431/**
432 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
433 * @gl: the glock
434 *
435 * Returns: 1 if it's ok
436 */
437
438static int quota_go_demote_ok(struct gfs2_glock *gl)
439{
440 return !atomic_read(&gl->gl_lvb_count);
441}
442
8fb4b536 443const struct gfs2_glock_operations gfs2_meta_glops = {
b5d32bea
SW
444 .go_xmote_th = meta_go_sync,
445 .go_drop_th = meta_go_sync,
ea67eedb 446 .go_type = LM_TYPE_META,
b3b94faa
DT
447};
448
8fb4b536 449const struct gfs2_glock_operations gfs2_inode_glops = {
b3b94faa
DT
450 .go_xmote_th = inode_go_xmote_th,
451 .go_xmote_bh = inode_go_xmote_bh,
452 .go_drop_th = inode_go_drop_th,
b3b94faa
DT
453 .go_inval = inode_go_inval,
454 .go_demote_ok = inode_go_demote_ok,
455 .go_lock = inode_go_lock,
456 .go_unlock = inode_go_unlock,
ea67eedb 457 .go_type = LM_TYPE_INODE,
b3b94faa
DT
458};
459
8fb4b536 460const struct gfs2_glock_operations gfs2_rgrp_glops = {
cad5b939
SW
461 .go_xmote_th = meta_go_sync,
462 .go_drop_th = meta_go_sync,
b3b94faa
DT
463 .go_inval = meta_go_inval,
464 .go_demote_ok = rgrp_go_demote_ok,
465 .go_lock = rgrp_go_lock,
466 .go_unlock = rgrp_go_unlock,
ea67eedb 467 .go_type = LM_TYPE_RGRP,
b3b94faa
DT
468};
469
8fb4b536 470const struct gfs2_glock_operations gfs2_trans_glops = {
b3b94faa
DT
471 .go_xmote_th = trans_go_xmote_th,
472 .go_xmote_bh = trans_go_xmote_bh,
473 .go_drop_th = trans_go_drop_th,
ea67eedb 474 .go_type = LM_TYPE_NONDISK,
b3b94faa
DT
475};
476
8fb4b536 477const struct gfs2_glock_operations gfs2_iopen_glops = {
ea67eedb 478 .go_type = LM_TYPE_IOPEN,
b3b94faa
DT
479};
480
8fb4b536 481const struct gfs2_glock_operations gfs2_flock_glops = {
ea67eedb 482 .go_type = LM_TYPE_FLOCK,
b3b94faa
DT
483};
484
8fb4b536 485const struct gfs2_glock_operations gfs2_nondisk_glops = {
ea67eedb 486 .go_type = LM_TYPE_NONDISK,
b3b94faa
DT
487};
488
8fb4b536 489const struct gfs2_glock_operations gfs2_quota_glops = {
b3b94faa 490 .go_demote_ok = quota_go_demote_ok,
ea67eedb 491 .go_type = LM_TYPE_QUOTA,
b3b94faa
DT
492};
493
8fb4b536 494const struct gfs2_glock_operations gfs2_journal_glops = {
ea67eedb 495 .go_type = LM_TYPE_JOURNAL,
b3b94faa
DT
496};
497