2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <asm/semaphore.h>
26 static void glock_lo_add(struct gfs2_sbd
*sdp
, struct gfs2_log_element
*le
)
28 struct gfs2_glock
*gl
;
30 get_transaction
->tr_touched
= 1;
32 if (!list_empty(&le
->le_list
))
35 gl
= container_of(le
, struct gfs2_glock
, gl_le
);
36 if (gfs2_assert_withdraw(sdp
, gfs2_glock_is_held_excl(gl
)))
39 set_bit(GLF_DIRTY
, &gl
->gl_flags
);
43 list_add(&le
->le_list
, &sdp
->sd_log_le_gl
);
47 static void glock_lo_after_commit(struct gfs2_sbd
*sdp
, struct gfs2_ail
*ai
)
49 struct list_head
*head
= &sdp
->sd_log_le_gl
;
50 struct gfs2_glock
*gl
;
52 while (!list_empty(head
)) {
53 gl
= list_entry(head
->next
, struct gfs2_glock
, gl_le
.le_list
);
54 list_del_init(&gl
->gl_le
.le_list
);
57 gfs2_assert_withdraw(sdp
, gfs2_glock_is_held_excl(gl
));
60 gfs2_assert_warn(sdp
, !sdp
->sd_log_num_gl
);
63 static void buf_lo_add(struct gfs2_sbd
*sdp
, struct gfs2_log_element
*le
)
65 struct gfs2_bufdata
*bd
= container_of(le
, struct gfs2_bufdata
, bd_le
);
66 struct gfs2_trans
*tr
;
68 if (!list_empty(&bd
->bd_list_tr
))
74 list_add(&bd
->bd_list_tr
, &tr
->tr_list_buf
);
76 if (!list_empty(&le
->le_list
))
79 gfs2_trans_add_gl(bd
->bd_gl
);
81 gfs2_meta_check(sdp
, bd
->bd_bh
);
82 gfs2_pin(sdp
, bd
->bd_bh
);
85 sdp
->sd_log_num_buf
++;
86 list_add(&le
->le_list
, &sdp
->sd_log_le_buf
);
92 static void buf_lo_incore_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
94 struct list_head
*head
= &tr
->tr_list_buf
;
95 struct gfs2_bufdata
*bd
;
97 while (!list_empty(head
)) {
98 bd
= list_entry(head
->next
, struct gfs2_bufdata
, bd_list_tr
);
99 list_del_init(&bd
->bd_list_tr
);
102 gfs2_assert_warn(sdp
, !tr
->tr_num_buf
);
105 static void buf_lo_before_commit(struct gfs2_sbd
*sdp
)
107 struct buffer_head
*bh
;
108 struct gfs2_log_descriptor
*ld
;
109 struct gfs2_bufdata
*bd1
= NULL
, *bd2
;
110 unsigned int total
= sdp
->sd_log_num_buf
;
111 unsigned int offset
= sizeof(struct gfs2_log_descriptor
);
117 offset
+= (sizeof(__be64
) - 1);
118 offset
&= ~(sizeof(__be64
) - 1);
119 limit
= (sdp
->sd_sb
.sb_bsize
- offset
)/sizeof(__be64
);
120 /* for 4k blocks, limit = 503 */
122 bd1
= bd2
= list_prepare_entry(bd1
, &sdp
->sd_log_le_buf
, bd_le
.le_list
);
127 bh
= gfs2_log_get_buf(sdp
);
128 ld
= (struct gfs2_log_descriptor
*)bh
->b_data
;
129 ptr
= (__be64
*)(bh
->b_data
+ offset
);
130 ld
->ld_header
.mh_magic
= cpu_to_be32(GFS2_MAGIC
);
131 ld
->ld_header
.mh_type
= cpu_to_be16(GFS2_METATYPE_LD
);
132 ld
->ld_header
.mh_format
= cpu_to_be16(GFS2_FORMAT_LD
);
133 ld
->ld_type
= cpu_to_be32(GFS2_LOG_DESC_METADATA
);
134 ld
->ld_length
= cpu_to_be32(num
+ 1);
135 ld
->ld_data1
= cpu_to_be32(num
);
136 ld
->ld_data2
= cpu_to_be32(0);
137 memset(ld
->ld_reserved
, 0, sizeof(ld
->ld_reserved
));
140 list_for_each_entry_continue(bd1
, &sdp
->sd_log_le_buf
, bd_le
.le_list
) {
141 *ptr
++ = cpu_to_be64(bd1
->bd_bh
->b_blocknr
);
146 set_buffer_dirty(bh
);
147 ll_rw_block(WRITE
, 1, &bh
);
150 list_for_each_entry_continue(bd2
, &sdp
->sd_log_le_buf
, bd_le
.le_list
) {
151 bh
= gfs2_log_fake_buf(sdp
, bd2
->bd_bh
);
152 set_buffer_dirty(bh
);
153 ll_rw_block(WRITE
, 1, &bh
);
162 static void buf_lo_after_commit(struct gfs2_sbd
*sdp
, struct gfs2_ail
*ai
)
164 struct list_head
*head
= &sdp
->sd_log_le_buf
;
165 struct gfs2_bufdata
*bd
;
167 while (!list_empty(head
)) {
168 bd
= list_entry(head
->next
, struct gfs2_bufdata
, bd_le
.le_list
);
169 list_del_init(&bd
->bd_le
.le_list
);
170 sdp
->sd_log_num_buf
--;
172 gfs2_unpin(sdp
, bd
->bd_bh
, ai
);
174 gfs2_assert_warn(sdp
, !sdp
->sd_log_num_buf
);
177 static void buf_lo_before_scan(struct gfs2_jdesc
*jd
,
178 struct gfs2_log_header
*head
, int pass
)
180 struct gfs2_sbd
*sdp
= get_v2ip(jd
->jd_inode
)->i_sbd
;
185 sdp
->sd_found_blocks
= 0;
186 sdp
->sd_replayed_blocks
= 0;
189 static int buf_lo_scan_elements(struct gfs2_jdesc
*jd
, unsigned int start
,
190 struct gfs2_log_descriptor
*ld
, __be64
*ptr
,
193 struct gfs2_sbd
*sdp
= get_v2ip(jd
->jd_inode
)->i_sbd
;
194 struct gfs2_glock
*gl
= get_v2ip(jd
->jd_inode
)->i_gl
;
195 unsigned int blks
= be32_to_cpu(ld
->ld_data1
);
196 struct buffer_head
*bh_log
, *bh_ip
;
200 if (pass
!= 1 || be32_to_cpu(ld
->ld_type
) != GFS2_LOG_DESC_METADATA
)
203 gfs2_replay_incr_blk(sdp
, &start
);
205 for (; blks
; gfs2_replay_incr_blk(sdp
, &start
), blks
--) {
206 blkno
= be64_to_cpu(*ptr
++);
208 sdp
->sd_found_blocks
++;
210 if (gfs2_revoke_check(sdp
, blkno
, start
))
213 error
= gfs2_replay_read_block(jd
, start
, &bh_log
);
217 bh_ip
= gfs2_meta_new(gl
, blkno
);
218 memcpy(bh_ip
->b_data
, bh_log
->b_data
, bh_log
->b_size
);
220 if (gfs2_meta_check(sdp
, bh_ip
))
223 mark_buffer_dirty(bh_ip
);
231 sdp
->sd_replayed_blocks
++;
237 static void buf_lo_after_scan(struct gfs2_jdesc
*jd
, int error
, int pass
)
239 struct gfs2_sbd
*sdp
= get_v2ip(jd
->jd_inode
)->i_sbd
;
242 gfs2_meta_sync(get_v2ip(jd
->jd_inode
)->i_gl
, DIO_START
| DIO_WAIT
);
248 gfs2_meta_sync(get_v2ip(jd
->jd_inode
)->i_gl
, DIO_START
| DIO_WAIT
);
250 fs_info(sdp
, "jid=%u: Replayed %u of %u blocks\n",
251 jd
->jd_jid
, sdp
->sd_replayed_blocks
, sdp
->sd_found_blocks
);
254 static void revoke_lo_add(struct gfs2_sbd
*sdp
, struct gfs2_log_element
*le
)
256 struct gfs2_trans
*tr
;
258 tr
= get_transaction
;
263 sdp
->sd_log_num_revoke
++;
264 list_add(&le
->le_list
, &sdp
->sd_log_le_revoke
);
265 gfs2_log_unlock(sdp
);
268 static void revoke_lo_before_commit(struct gfs2_sbd
*sdp
)
270 struct gfs2_log_descriptor
*ld
;
271 struct gfs2_meta_header
*mh
;
272 struct buffer_head
*bh
;
274 struct list_head
*head
= &sdp
->sd_log_le_revoke
;
275 struct gfs2_revoke
*rv
;
277 if (!sdp
->sd_log_num_revoke
)
280 bh
= gfs2_log_get_buf(sdp
);
281 ld
= (struct gfs2_log_descriptor
*)bh
->b_data
;
282 ld
->ld_header
.mh_magic
= cpu_to_be32(GFS2_MAGIC
);
283 ld
->ld_header
.mh_type
= cpu_to_be16(GFS2_METATYPE_LD
);
284 ld
->ld_header
.mh_format
= cpu_to_be16(GFS2_FORMAT_LD
);
285 ld
->ld_type
= cpu_to_be32(GFS2_LOG_DESC_REVOKE
);
286 ld
->ld_length
= cpu_to_be32(gfs2_struct2blk(sdp
, sdp
->sd_log_num_revoke
, sizeof(uint64_t)));
287 ld
->ld_data1
= cpu_to_be32(sdp
->sd_log_num_revoke
);
288 ld
->ld_data2
= cpu_to_be32(0);
289 memset(ld
->ld_reserved
, 0, sizeof(ld
->ld_reserved
));
290 offset
= sizeof(struct gfs2_log_descriptor
);
292 while (!list_empty(head
)) {
293 rv
= list_entry(head
->next
, struct gfs2_revoke
, rv_le
.le_list
);
294 list_del_init(&rv
->rv_le
.le_list
);
295 sdp
->sd_log_num_revoke
--;
297 if (offset
+ sizeof(uint64_t) > sdp
->sd_sb
.sb_bsize
) {
298 set_buffer_dirty(bh
);
299 ll_rw_block(WRITE
, 1, &bh
);
301 bh
= gfs2_log_get_buf(sdp
);
302 mh
= (struct gfs2_meta_header
*)bh
->b_data
;
303 mh
->mh_magic
= cpu_to_be32(GFS2_MAGIC
);
304 mh
->mh_type
= cpu_to_be16(GFS2_METATYPE_LB
);
305 mh
->mh_format
= cpu_to_be16(GFS2_FORMAT_LB
);
306 offset
= sizeof(struct gfs2_meta_header
);
309 *(__be64
*)(bh
->b_data
+ offset
) = cpu_to_be64(rv
->rv_blkno
);
312 offset
+= sizeof(uint64_t);
314 gfs2_assert_withdraw(sdp
, !sdp
->sd_log_num_revoke
);
316 set_buffer_dirty(bh
);
317 ll_rw_block(WRITE
, 1, &bh
);
320 static void revoke_lo_before_scan(struct gfs2_jdesc
*jd
,
321 struct gfs2_log_header
*head
, int pass
)
323 struct gfs2_sbd
*sdp
= get_v2ip(jd
->jd_inode
)->i_sbd
;
328 sdp
->sd_found_revokes
= 0;
329 sdp
->sd_replay_tail
= head
->lh_tail
;
332 static int revoke_lo_scan_elements(struct gfs2_jdesc
*jd
, unsigned int start
,
333 struct gfs2_log_descriptor
*ld
, __be64
*ptr
,
336 struct gfs2_sbd
*sdp
= get_v2ip(jd
->jd_inode
)->i_sbd
;
337 unsigned int blks
= be32_to_cpu(ld
->ld_length
);
338 unsigned int revokes
= be32_to_cpu(ld
->ld_data1
);
339 struct buffer_head
*bh
;
345 if (pass
!= 0 || be32_to_cpu(ld
->ld_type
) != GFS2_LOG_DESC_REVOKE
)
348 offset
= sizeof(struct gfs2_log_descriptor
);
350 for (; blks
; gfs2_replay_incr_blk(sdp
, &start
), blks
--) {
351 error
= gfs2_replay_read_block(jd
, start
, &bh
);
356 gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_LB
);
358 while (offset
+ sizeof(uint64_t) <= sdp
->sd_sb
.sb_bsize
) {
359 blkno
= be64_to_cpu(*(__be64
*)(bh
->b_data
+ offset
));
361 error
= gfs2_revoke_add(sdp
, blkno
, start
);
365 sdp
->sd_found_revokes
++;
369 offset
+= sizeof(uint64_t);
373 offset
= sizeof(struct gfs2_meta_header
);
380 static void revoke_lo_after_scan(struct gfs2_jdesc
*jd
, int error
, int pass
)
382 struct gfs2_sbd
*sdp
= get_v2ip(jd
->jd_inode
)->i_sbd
;
385 gfs2_revoke_clean(sdp
);
391 fs_info(sdp
, "jid=%u: Found %u revoke tags\n",
392 jd
->jd_jid
, sdp
->sd_found_revokes
);
394 gfs2_revoke_clean(sdp
);
397 static void rg_lo_add(struct gfs2_sbd
*sdp
, struct gfs2_log_element
*le
)
399 struct gfs2_rgrpd
*rgd
;
401 get_transaction
->tr_touched
= 1;
403 if (!list_empty(&le
->le_list
))
406 rgd
= container_of(le
, struct gfs2_rgrpd
, rd_le
);
407 gfs2_rgrp_bh_hold(rgd
);
410 sdp
->sd_log_num_rg
++;
411 list_add(&le
->le_list
, &sdp
->sd_log_le_rg
);
412 gfs2_log_unlock(sdp
);
415 static void rg_lo_after_commit(struct gfs2_sbd
*sdp
, struct gfs2_ail
*ai
)
417 struct list_head
*head
= &sdp
->sd_log_le_rg
;
418 struct gfs2_rgrpd
*rgd
;
420 while (!list_empty(head
)) {
421 rgd
= list_entry(head
->next
, struct gfs2_rgrpd
, rd_le
.le_list
);
422 list_del_init(&rgd
->rd_le
.le_list
);
423 sdp
->sd_log_num_rg
--;
425 gfs2_rgrp_repolish_clones(rgd
);
426 gfs2_rgrp_bh_put(rgd
);
428 gfs2_assert_warn(sdp
, !sdp
->sd_log_num_rg
);
432 * databuf_lo_add - Add a databuf to the transaction.
434 * This is used in two distinct cases:
435 * i) In ordered write mode
436 * We put the data buffer on a list so that we can ensure that its
437 * synced to disk at the right time
438 * ii) In journaled data mode
439 * We need to journal the data block in the same way as metadata in
440 * the functions above. The difference is that here we have a tag
441 * which is two __be64's being the block number (as per meta data)
442 * and a flag which says whether the data block needs escaping or
443 * not. This means we need a new log entry for each 251 or so data
444 * blocks, which isn't an enormous overhead but twice as much as
445 * for normal metadata blocks.
447 static void databuf_lo_add(struct gfs2_sbd
*sdp
, struct gfs2_log_element
*le
)
449 struct gfs2_bufdata
*bd
= container_of(le
, struct gfs2_bufdata
, bd_le
);
450 struct gfs2_trans
*tr
= get_transaction
;
451 struct address_space
*mapping
= bd
->bd_bh
->b_page
->mapping
;
452 struct gfs2_inode
*ip
= get_v2ip(mapping
->host
);
455 if (!list_empty(&bd
->bd_list_tr
) &&
456 (ip
->i_di
.di_flags
& GFS2_DIF_JDATA
)) {
458 gfs2_trans_add_gl(bd
->bd_gl
);
459 list_add(&bd
->bd_list_tr
, &tr
->tr_list_buf
);
460 gfs2_pin(sdp
, bd
->bd_bh
);
463 if (!list_empty(&le
->le_list
)) {
464 if (ip
->i_di
.di_flags
& GFS2_DIF_JDATA
)
465 sdp
->sd_log_num_jdata
++;
466 sdp
->sd_log_num_databuf
++;
467 list_add(&le
->le_list
, &sdp
->sd_log_le_databuf
);
469 gfs2_log_unlock(sdp
);
472 static int gfs2_check_magic(struct buffer_head
*bh
)
474 struct page
*page
= bh
->b_page
;
479 kaddr
= kmap_atomic(page
, KM_USER0
);
480 ptr
= kaddr
+ bh_offset(bh
);
481 if (*ptr
== cpu_to_be32(GFS2_MAGIC
))
483 kunmap_atomic(page
, KM_USER0
);
489 * databuf_lo_before_commit - Scan the data buffers, writing as we go
491 * Here we scan through the lists of buffers and make the assumption
492 * that any buffer thats been pinned is being journaled, and that
493 * any unpinned buffer is an ordered write data buffer and therefore
494 * will be written back rather than journaled.
496 static void databuf_lo_before_commit(struct gfs2_sbd
*sdp
)
499 struct gfs2_bufdata
*bd1
= NULL
, *bd2
, *bdt
;
500 struct buffer_head
*bh
= NULL
;
501 unsigned int offset
= sizeof(struct gfs2_log_descriptor
);
502 struct gfs2_log_descriptor
*ld
;
504 unsigned int total_dbuf
= sdp
->sd_log_num_databuf
;
505 unsigned int total_jdata
= sdp
->sd_log_num_jdata
;
509 offset
+= (2*sizeof(__be64
) - 1);
510 offset
&= ~(2*sizeof(__be64
) - 1);
511 limit
= (sdp
->sd_sb
.sb_bsize
- offset
)/sizeof(__be64
);
514 * Start writing ordered buffers, write journaled buffers
515 * into the log along with a header
518 bd2
= bd1
= list_prepare_entry(bd1
, &sdp
->sd_log_le_databuf
, bd_le
.le_list
);
524 list_for_each_entry_safe_continue(bd1
, bdt
, &sdp
->sd_log_le_databuf
, bd_le
.le_list
) {
525 /* An ordered write buffer */
526 if (bd1
->bd_bh
&& !buffer_pinned(bd1
->bd_bh
)) {
527 list_move(&bd1
->bd_le
.le_list
, &started
);
530 bd2
= list_prepare_entry(bd2
, &sdp
->sd_log_le_databuf
, bd_le
.le_list
);
535 if (buffer_dirty(bd1
->bd_bh
)) {
536 gfs2_log_unlock(sdp
);
537 wait_on_buffer(bd1
->bd_bh
);
538 ll_rw_block(WRITE
, 1, &bd1
->bd_bh
);
545 } else if (bd1
->bd_bh
) { /* A journaled buffer */
547 gfs2_log_unlock(sdp
);
549 bh
= gfs2_log_get_buf(sdp
);
550 ld
= (struct gfs2_log_descriptor
*)bh
->b_data
;
551 ptr
= (__be64
*)(bh
->b_data
+ offset
);
552 ld
->ld_header
.mh_magic
= cpu_to_be32(GFS2_MAGIC
);
553 ld
->ld_header
.mh_type
= cpu_to_be16(GFS2_METATYPE_LD
);
554 ld
->ld_header
.mh_format
= cpu_to_be16(GFS2_FORMAT_LD
);
555 ld
->ld_type
= cpu_to_be32(GFS2_LOG_DESC_JDATA
);
556 ld
->ld_length
= cpu_to_be32(num
+ 1);
557 ld
->ld_data1
= cpu_to_be32(num
);
558 ld
->ld_data2
= cpu_to_be32(0);
559 memset(ld
->ld_reserved
, 0, sizeof(ld
->ld_reserved
));
561 magic
= gfs2_check_magic(bd1
->bd_bh
);
562 *ptr
++ = cpu_to_be64(bd1
->bd_bh
->b_blocknr
);
563 *ptr
++ = cpu_to_be64((__u64
)magic
);
564 clear_buffer_escaped(bd1
->bd_bh
);
565 if (unlikely(magic
!= 0))
566 set_buffer_escaped(bd1
->bd_bh
);
572 gfs2_log_unlock(sdp
);
574 set_buffer_dirty(bh
);
575 ll_rw_block(WRITE
, 1, &bh
);
580 list_for_each_entry_continue(bd2
, &sdp
->sd_log_le_databuf
, bd_le
.le_list
) {
583 /* copy buffer if it needs escaping */
584 gfs2_log_unlock(sdp
);
585 if (unlikely(buffer_escaped(bd2
->bd_bh
))) {
587 struct page
*page
= bd2
->bd_bh
->b_page
;
588 bh
= gfs2_log_get_buf(sdp
);
589 kaddr
= kmap_atomic(page
, KM_USER0
);
590 memcpy(bh
->b_data
, kaddr
+ bh_offset(bd2
->bd_bh
), sdp
->sd_sb
.sb_bsize
);
591 kunmap_atomic(page
, KM_USER0
);
592 *(__be32
*)bh
->b_data
= 0;
594 bh
= gfs2_log_fake_buf(sdp
, bd2
->bd_bh
);
596 set_buffer_dirty(bh
);
597 ll_rw_block(WRITE
, 1, &bh
);
606 gfs2_log_unlock(sdp
);
608 /* Wait on all ordered buffers */
609 while (!list_empty(&started
)) {
611 bd1
= list_entry(started
.next
, struct gfs2_bufdata
, bd_le
.le_list
);
612 list_del(&bd1
->bd_le
.le_list
);
613 sdp
->sd_log_num_databuf
--;
618 gfs2_log_unlock(sdp
);
622 gfs2_log_unlock(sdp
);
627 /* We've removed all the ordered write bufs here, so only jdata left */
628 gfs2_assert_warn(sdp
, sdp
->sd_log_num_databuf
== sdp
->sd_log_num_jdata
);
631 static int databuf_lo_scan_elements(struct gfs2_jdesc
*jd
, unsigned int start
,
632 struct gfs2_log_descriptor
*ld
,
633 __be64
*ptr
, int pass
)
635 struct gfs2_sbd
*sdp
= get_v2ip(jd
->jd_inode
)->i_sbd
;
636 struct gfs2_glock
*gl
= get_v2ip(jd
->jd_inode
)->i_gl
;
637 unsigned int blks
= be32_to_cpu(ld
->ld_data1
);
638 struct buffer_head
*bh_log
, *bh_ip
;
643 if (pass
!= 1 || be32_to_cpu(ld
->ld_type
) != GFS2_LOG_DESC_JDATA
)
646 gfs2_replay_incr_blk(sdp
, &start
);
647 for (; blks
; gfs2_replay_incr_blk(sdp
, &start
), blks
--) {
648 blkno
= be64_to_cpu(*ptr
++);
649 esc
= be64_to_cpu(*ptr
++);
651 sdp
->sd_found_blocks
++;
653 if (gfs2_revoke_check(sdp
, blkno
, start
))
656 error
= gfs2_replay_read_block(jd
, start
, &bh_log
);
660 bh_ip
= gfs2_meta_new(gl
, blkno
);
661 memcpy(bh_ip
->b_data
, bh_log
->b_data
, bh_log
->b_size
);
665 __be32
*eptr
= (__be32
*)bh_ip
->b_data
;
666 *eptr
= cpu_to_be32(GFS2_MAGIC
);
668 mark_buffer_dirty(bh_ip
);
675 sdp
->sd_replayed_blocks
++;
681 /* FIXME: sort out accounting for log blocks etc. */
683 static void databuf_lo_after_scan(struct gfs2_jdesc
*jd
, int error
, int pass
)
685 struct gfs2_sbd
*sdp
= get_v2ip(jd
->jd_inode
)->i_sbd
;
688 gfs2_meta_sync(get_v2ip(jd
->jd_inode
)->i_gl
, DIO_START
| DIO_WAIT
);
695 gfs2_meta_sync(get_v2ip(jd
->jd_inode
)->i_gl
, DIO_START
| DIO_WAIT
);
697 fs_info(sdp
, "jid=%u: Replayed %u of %u data blocks\n",
698 jd
->jd_jid
, sdp
->sd_replayed_blocks
, sdp
->sd_found_blocks
);
701 static void databuf_lo_after_commit(struct gfs2_sbd
*sdp
, struct gfs2_ail
*ai
)
703 struct list_head
*head
= &sdp
->sd_log_le_databuf
;
704 struct gfs2_bufdata
*bd
;
706 while (!list_empty(head
)) {
707 bd
= list_entry(head
->next
, struct gfs2_bufdata
, bd_le
.le_list
);
708 list_del(&bd
->bd_le
.le_list
);
709 sdp
->sd_log_num_databuf
--;
710 sdp
->sd_log_num_jdata
--;
711 gfs2_unpin(sdp
, bd
->bd_bh
, ai
);
713 gfs2_assert_warn(sdp
, !sdp
->sd_log_num_databuf
);
714 gfs2_assert_warn(sdp
, !sdp
->sd_log_num_jdata
);
718 struct gfs2_log_operations gfs2_glock_lops
= {
719 .lo_add
= glock_lo_add
,
720 .lo_after_commit
= glock_lo_after_commit
,
724 struct gfs2_log_operations gfs2_buf_lops
= {
725 .lo_add
= buf_lo_add
,
726 .lo_incore_commit
= buf_lo_incore_commit
,
727 .lo_before_commit
= buf_lo_before_commit
,
728 .lo_after_commit
= buf_lo_after_commit
,
729 .lo_before_scan
= buf_lo_before_scan
,
730 .lo_scan_elements
= buf_lo_scan_elements
,
731 .lo_after_scan
= buf_lo_after_scan
,
735 struct gfs2_log_operations gfs2_revoke_lops
= {
736 .lo_add
= revoke_lo_add
,
737 .lo_before_commit
= revoke_lo_before_commit
,
738 .lo_before_scan
= revoke_lo_before_scan
,
739 .lo_scan_elements
= revoke_lo_scan_elements
,
740 .lo_after_scan
= revoke_lo_after_scan
,
744 struct gfs2_log_operations gfs2_rg_lops
= {
746 .lo_after_commit
= rg_lo_after_commit
,
750 struct gfs2_log_operations gfs2_databuf_lops
= {
751 .lo_add
= databuf_lo_add
,
752 .lo_incore_commit
= buf_lo_incore_commit
,
753 .lo_before_commit
= databuf_lo_before_commit
,
754 .lo_after_commit
= databuf_lo_after_commit
,
755 .lo_scan_elements
= databuf_lo_scan_elements
,
756 .lo_after_scan
= databuf_lo_after_scan
,
760 struct gfs2_log_operations
*gfs2_log_ops
[] = {