[XFS] Interim solution for attribute insertion failure during file
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / xfs / linux-2.6 / xfs_buf.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4
LT
18#include <linux/stddef.h>
19#include <linux/errno.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include <linux/init.h>
23#include <linux/vmalloc.h>
24#include <linux/bio.h>
25#include <linux/sysctl.h>
26#include <linux/proc_fs.h>
27#include <linux/workqueue.h>
28#include <linux/percpu.h>
29#include <linux/blkdev.h>
30#include <linux/hash.h>
4df08c52 31#include <linux/kthread.h>
1da177e4
LT
32#include "xfs_linux.h"
33
ce8e922c
NS
34STATIC kmem_zone_t *xfs_buf_zone;
35STATIC kmem_shaker_t xfs_buf_shake;
a6867a68 36STATIC int xfsbufd(void *);
27496a8c 37STATIC int xfsbufd_wakeup(int, gfp_t);
ce8e922c 38STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
23ea4032
CH
39
40STATIC struct workqueue_struct *xfslogd_workqueue;
0829c360 41struct workqueue_struct *xfsdatad_workqueue;
1da177e4 42
ce8e922c 43#ifdef XFS_BUF_TRACE
1da177e4 44void
ce8e922c
NS
45xfs_buf_trace(
46 xfs_buf_t *bp,
1da177e4
LT
47 char *id,
48 void *data,
49 void *ra)
50{
ce8e922c
NS
51 ktrace_enter(xfs_buf_trace_buf,
52 bp, id,
53 (void *)(unsigned long)bp->b_flags,
54 (void *)(unsigned long)bp->b_hold.counter,
55 (void *)(unsigned long)bp->b_sema.count.counter,
1da177e4
LT
56 (void *)current,
57 data, ra,
ce8e922c
NS
58 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
59 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
60 (void *)(unsigned long)bp->b_buffer_length,
1da177e4
LT
61 NULL, NULL, NULL, NULL, NULL);
62}
ce8e922c
NS
63ktrace_t *xfs_buf_trace_buf;
64#define XFS_BUF_TRACE_SIZE 4096
65#define XB_TRACE(bp, id, data) \
66 xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
1da177e4 67#else
ce8e922c 68#define XB_TRACE(bp, id, data) do { } while (0)
1da177e4
LT
69#endif
70
ce8e922c
NS
71#ifdef XFS_BUF_LOCK_TRACKING
72# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
73# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
74# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
1da177e4 75#else
ce8e922c
NS
76# define XB_SET_OWNER(bp) do { } while (0)
77# define XB_CLEAR_OWNER(bp) do { } while (0)
78# define XB_GET_OWNER(bp) do { } while (0)
1da177e4
LT
79#endif
80
ce8e922c
NS
81#define xb_to_gfp(flags) \
82 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
83 ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
1da177e4 84
ce8e922c
NS
85#define xb_to_km(flags) \
86 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
1da177e4 87
ce8e922c
NS
88#define xfs_buf_allocate(flags) \
89 kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
90#define xfs_buf_deallocate(bp) \
91 kmem_zone_free(xfs_buf_zone, (bp));
1da177e4
LT
92
93/*
ce8e922c 94 * Page Region interfaces.
1da177e4 95 *
ce8e922c
NS
96 * For pages in filesystems where the blocksize is smaller than the
97 * pagesize, we use the page->private field (long) to hold a bitmap
98 * of uptodate regions within the page.
1da177e4 99 *
ce8e922c 100 * Each such region is "bytes per page / bits per long" bytes long.
1da177e4 101 *
ce8e922c
NS
102 * NBPPR == number-of-bytes-per-page-region
103 * BTOPR == bytes-to-page-region (rounded up)
104 * BTOPRT == bytes-to-page-region-truncated (rounded down)
1da177e4
LT
105 */
106#if (BITS_PER_LONG == 32)
107#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
108#elif (BITS_PER_LONG == 64)
109#define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
110#else
111#error BITS_PER_LONG must be 32 or 64
112#endif
113#define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
114#define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
115#define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
116
117STATIC unsigned long
118page_region_mask(
119 size_t offset,
120 size_t length)
121{
122 unsigned long mask;
123 int first, final;
124
125 first = BTOPR(offset);
126 final = BTOPRT(offset + length - 1);
127 first = min(first, final);
128
129 mask = ~0UL;
130 mask <<= BITS_PER_LONG - (final - first);
131 mask >>= BITS_PER_LONG - (final);
132
133 ASSERT(offset + length <= PAGE_CACHE_SIZE);
134 ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
135
136 return mask;
137}
138
139STATIC inline void
140set_page_region(
141 struct page *page,
142 size_t offset,
143 size_t length)
144{
4c21e2f2
HD
145 set_page_private(page,
146 page_private(page) | page_region_mask(offset, length));
147 if (page_private(page) == ~0UL)
1da177e4
LT
148 SetPageUptodate(page);
149}
150
151STATIC inline int
152test_page_region(
153 struct page *page,
154 size_t offset,
155 size_t length)
156{
157 unsigned long mask = page_region_mask(offset, length);
158
4c21e2f2 159 return (mask && (page_private(page) & mask) == mask);
1da177e4
LT
160}
161
162/*
ce8e922c 163 * Mapping of multi-page buffers into contiguous virtual space
1da177e4
LT
164 */
165
166typedef struct a_list {
167 void *vm_addr;
168 struct a_list *next;
169} a_list_t;
170
171STATIC a_list_t *as_free_head;
172STATIC int as_list_len;
173STATIC DEFINE_SPINLOCK(as_lock);
174
175/*
ce8e922c 176 * Try to batch vunmaps because they are costly.
1da177e4
LT
177 */
178STATIC void
179free_address(
180 void *addr)
181{
182 a_list_t *aentry;
183
184 aentry = kmalloc(sizeof(a_list_t), GFP_ATOMIC & ~__GFP_HIGH);
185 if (likely(aentry)) {
186 spin_lock(&as_lock);
187 aentry->next = as_free_head;
188 aentry->vm_addr = addr;
189 as_free_head = aentry;
190 as_list_len++;
191 spin_unlock(&as_lock);
192 } else {
193 vunmap(addr);
194 }
195}
196
197STATIC void
198purge_addresses(void)
199{
200 a_list_t *aentry, *old;
201
202 if (as_free_head == NULL)
203 return;
204
205 spin_lock(&as_lock);
206 aentry = as_free_head;
207 as_free_head = NULL;
208 as_list_len = 0;
209 spin_unlock(&as_lock);
210
211 while ((old = aentry) != NULL) {
212 vunmap(aentry->vm_addr);
213 aentry = aentry->next;
214 kfree(old);
215 }
216}
217
218/*
ce8e922c 219 * Internal xfs_buf_t object manipulation
1da177e4
LT
220 */
221
222STATIC void
ce8e922c
NS
223_xfs_buf_initialize(
224 xfs_buf_t *bp,
1da177e4 225 xfs_buftarg_t *target,
204ab25f 226 xfs_off_t range_base,
1da177e4 227 size_t range_length,
ce8e922c 228 xfs_buf_flags_t flags)
1da177e4
LT
229{
230 /*
ce8e922c 231 * We don't want certain flags to appear in b_flags.
1da177e4 232 */
ce8e922c
NS
233 flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
234
235 memset(bp, 0, sizeof(xfs_buf_t));
236 atomic_set(&bp->b_hold, 1);
237 init_MUTEX_LOCKED(&bp->b_iodonesema);
238 INIT_LIST_HEAD(&bp->b_list);
239 INIT_LIST_HEAD(&bp->b_hash_list);
240 init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
241 XB_SET_OWNER(bp);
242 bp->b_target = target;
243 bp->b_file_offset = range_base;
1da177e4
LT
244 /*
245 * Set buffer_length and count_desired to the same value initially.
246 * I/O routines should use count_desired, which will be the same in
247 * most cases but may be reset (e.g. XFS recovery).
248 */
ce8e922c
NS
249 bp->b_buffer_length = bp->b_count_desired = range_length;
250 bp->b_flags = flags;
251 bp->b_bn = XFS_BUF_DADDR_NULL;
252 atomic_set(&bp->b_pin_count, 0);
253 init_waitqueue_head(&bp->b_waiters);
254
255 XFS_STATS_INC(xb_create);
256 XB_TRACE(bp, "initialize", target);
1da177e4
LT
257}
258
259/*
ce8e922c
NS
260 * Allocate a page array capable of holding a specified number
261 * of pages, and point the page buf at it.
1da177e4
LT
262 */
263STATIC int
ce8e922c
NS
264_xfs_buf_get_pages(
265 xfs_buf_t *bp,
1da177e4 266 int page_count,
ce8e922c 267 xfs_buf_flags_t flags)
1da177e4
LT
268{
269 /* Make sure that we have a page list */
ce8e922c
NS
270 if (bp->b_pages == NULL) {
271 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
272 bp->b_page_count = page_count;
273 if (page_count <= XB_PAGES) {
274 bp->b_pages = bp->b_page_array;
1da177e4 275 } else {
ce8e922c
NS
276 bp->b_pages = kmem_alloc(sizeof(struct page *) *
277 page_count, xb_to_km(flags));
278 if (bp->b_pages == NULL)
1da177e4
LT
279 return -ENOMEM;
280 }
ce8e922c 281 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
1da177e4
LT
282 }
283 return 0;
284}
285
286/*
ce8e922c 287 * Frees b_pages if it was allocated.
1da177e4
LT
288 */
289STATIC void
ce8e922c 290_xfs_buf_free_pages(
1da177e4
LT
291 xfs_buf_t *bp)
292{
ce8e922c
NS
293 if (bp->b_pages != bp->b_page_array) {
294 kmem_free(bp->b_pages,
295 bp->b_page_count * sizeof(struct page *));
1da177e4
LT
296 }
297}
298
299/*
300 * Releases the specified buffer.
301 *
302 * The modification state of any associated pages is left unchanged.
ce8e922c 303 * The buffer most not be on any hash - use xfs_buf_rele instead for
1da177e4
LT
304 * hashed and refcounted buffers
305 */
306void
ce8e922c 307xfs_buf_free(
1da177e4
LT
308 xfs_buf_t *bp)
309{
ce8e922c 310 XB_TRACE(bp, "free", 0);
1da177e4 311
ce8e922c 312 ASSERT(list_empty(&bp->b_hash_list));
1da177e4 313
ce8e922c 314 if (bp->b_flags & _XBF_PAGE_CACHE) {
1da177e4
LT
315 uint i;
316
ce8e922c
NS
317 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
318 free_address(bp->b_addr - bp->b_offset);
1da177e4 319
ce8e922c
NS
320 for (i = 0; i < bp->b_page_count; i++)
321 page_cache_release(bp->b_pages[i]);
322 _xfs_buf_free_pages(bp);
323 } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
1da177e4 324 /*
ce8e922c
NS
325 * XXX(hch): bp->b_count_desired might be incorrect (see
326 * xfs_buf_associate_memory for details), but fortunately
1da177e4
LT
327 * the Linux version of kmem_free ignores the len argument..
328 */
ce8e922c
NS
329 kmem_free(bp->b_addr, bp->b_count_desired);
330 _xfs_buf_free_pages(bp);
1da177e4
LT
331 }
332
ce8e922c 333 xfs_buf_deallocate(bp);
1da177e4
LT
334}
335
336/*
337 * Finds all pages for buffer in question and builds it's page list.
338 */
339STATIC int
ce8e922c 340_xfs_buf_lookup_pages(
1da177e4
LT
341 xfs_buf_t *bp,
342 uint flags)
343{
ce8e922c
NS
344 struct address_space *mapping = bp->b_target->bt_mapping;
345 size_t blocksize = bp->b_target->bt_bsize;
346 size_t size = bp->b_count_desired;
1da177e4 347 size_t nbytes, offset;
ce8e922c 348 gfp_t gfp_mask = xb_to_gfp(flags);
1da177e4
LT
349 unsigned short page_count, i;
350 pgoff_t first;
204ab25f 351 xfs_off_t end;
1da177e4
LT
352 int error;
353
ce8e922c
NS
354 end = bp->b_file_offset + bp->b_buffer_length;
355 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
1da177e4 356
ce8e922c 357 error = _xfs_buf_get_pages(bp, page_count, flags);
1da177e4
LT
358 if (unlikely(error))
359 return error;
ce8e922c 360 bp->b_flags |= _XBF_PAGE_CACHE;
1da177e4 361
ce8e922c
NS
362 offset = bp->b_offset;
363 first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
1da177e4 364
ce8e922c 365 for (i = 0; i < bp->b_page_count; i++) {
1da177e4
LT
366 struct page *page;
367 uint retries = 0;
368
369 retry:
370 page = find_or_create_page(mapping, first + i, gfp_mask);
371 if (unlikely(page == NULL)) {
ce8e922c
NS
372 if (flags & XBF_READ_AHEAD) {
373 bp->b_page_count = i;
374 for (i = 0; i < bp->b_page_count; i++)
375 unlock_page(bp->b_pages[i]);
1da177e4
LT
376 return -ENOMEM;
377 }
378
379 /*
380 * This could deadlock.
381 *
382 * But until all the XFS lowlevel code is revamped to
383 * handle buffer allocation failures we can't do much.
384 */
385 if (!(++retries % 100))
386 printk(KERN_ERR
387 "XFS: possible memory allocation "
388 "deadlock in %s (mode:0x%x)\n",
389 __FUNCTION__, gfp_mask);
390
ce8e922c 391 XFS_STATS_INC(xb_page_retries);
23ea4032 392 xfsbufd_wakeup(0, gfp_mask);
1da177e4
LT
393 blk_congestion_wait(WRITE, HZ/50);
394 goto retry;
395 }
396
ce8e922c 397 XFS_STATS_INC(xb_page_found);
1da177e4
LT
398
399 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
400 size -= nbytes;
401
402 if (!PageUptodate(page)) {
403 page_count--;
404 if (blocksize >= PAGE_CACHE_SIZE) {
ce8e922c
NS
405 if (flags & XBF_READ)
406 bp->b_locked = 1;
1da177e4
LT
407 } else if (!PagePrivate(page)) {
408 if (test_page_region(page, offset, nbytes))
409 page_count++;
410 }
411 }
412
ce8e922c 413 bp->b_pages[i] = page;
1da177e4
LT
414 offset = 0;
415 }
416
ce8e922c
NS
417 if (!bp->b_locked) {
418 for (i = 0; i < bp->b_page_count; i++)
419 unlock_page(bp->b_pages[i]);
1da177e4
LT
420 }
421
ce8e922c
NS
422 if (page_count == bp->b_page_count)
423 bp->b_flags |= XBF_DONE;
1da177e4 424
ce8e922c 425 XB_TRACE(bp, "lookup_pages", (long)page_count);
1da177e4
LT
426 return error;
427}
428
429/*
430 * Map buffer into kernel address-space if nessecary.
431 */
432STATIC int
ce8e922c 433_xfs_buf_map_pages(
1da177e4
LT
434 xfs_buf_t *bp,
435 uint flags)
436{
437 /* A single page buffer is always mappable */
ce8e922c
NS
438 if (bp->b_page_count == 1) {
439 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
440 bp->b_flags |= XBF_MAPPED;
441 } else if (flags & XBF_MAPPED) {
1da177e4
LT
442 if (as_list_len > 64)
443 purge_addresses();
ce8e922c
NS
444 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
445 VM_MAP, PAGE_KERNEL);
446 if (unlikely(bp->b_addr == NULL))
1da177e4 447 return -ENOMEM;
ce8e922c
NS
448 bp->b_addr += bp->b_offset;
449 bp->b_flags |= XBF_MAPPED;
1da177e4
LT
450 }
451
452 return 0;
453}
454
455/*
456 * Finding and Reading Buffers
457 */
458
459/*
ce8e922c 460 * Look up, and creates if absent, a lockable buffer for
1da177e4
LT
461 * a given range of an inode. The buffer is returned
462 * locked. If other overlapping buffers exist, they are
463 * released before the new buffer is created and locked,
464 * which may imply that this call will block until those buffers
465 * are unlocked. No I/O is implied by this call.
466 */
467xfs_buf_t *
ce8e922c 468_xfs_buf_find(
1da177e4 469 xfs_buftarg_t *btp, /* block device target */
204ab25f 470 xfs_off_t ioff, /* starting offset of range */
1da177e4 471 size_t isize, /* length of range */
ce8e922c
NS
472 xfs_buf_flags_t flags,
473 xfs_buf_t *new_bp)
1da177e4 474{
204ab25f 475 xfs_off_t range_base;
1da177e4
LT
476 size_t range_length;
477 xfs_bufhash_t *hash;
ce8e922c 478 xfs_buf_t *bp, *n;
1da177e4
LT
479
480 range_base = (ioff << BBSHIFT);
481 range_length = (isize << BBSHIFT);
482
483 /* Check for IOs smaller than the sector size / not sector aligned */
ce8e922c 484 ASSERT(!(range_length < (1 << btp->bt_sshift)));
204ab25f 485 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
1da177e4
LT
486
487 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
488
489 spin_lock(&hash->bh_lock);
490
ce8e922c
NS
491 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
492 ASSERT(btp == bp->b_target);
493 if (bp->b_file_offset == range_base &&
494 bp->b_buffer_length == range_length) {
1da177e4 495 /*
ce8e922c 496 * If we look at something, bring it to the
1da177e4
LT
497 * front of the list for next time.
498 */
ce8e922c
NS
499 atomic_inc(&bp->b_hold);
500 list_move(&bp->b_hash_list, &hash->bh_list);
1da177e4
LT
501 goto found;
502 }
503 }
504
505 /* No match found */
ce8e922c
NS
506 if (new_bp) {
507 _xfs_buf_initialize(new_bp, btp, range_base,
1da177e4 508 range_length, flags);
ce8e922c
NS
509 new_bp->b_hash = hash;
510 list_add(&new_bp->b_hash_list, &hash->bh_list);
1da177e4 511 } else {
ce8e922c 512 XFS_STATS_INC(xb_miss_locked);
1da177e4
LT
513 }
514
515 spin_unlock(&hash->bh_lock);
ce8e922c 516 return new_bp;
1da177e4
LT
517
518found:
519 spin_unlock(&hash->bh_lock);
520
521 /* Attempt to get the semaphore without sleeping,
522 * if this does not work then we need to drop the
523 * spinlock and do a hard attempt on the semaphore.
524 */
ce8e922c
NS
525 if (down_trylock(&bp->b_sema)) {
526 if (!(flags & XBF_TRYLOCK)) {
1da177e4 527 /* wait for buffer ownership */
ce8e922c
NS
528 XB_TRACE(bp, "get_lock", 0);
529 xfs_buf_lock(bp);
530 XFS_STATS_INC(xb_get_locked_waited);
1da177e4
LT
531 } else {
532 /* We asked for a trylock and failed, no need
533 * to look at file offset and length here, we
ce8e922c
NS
534 * know that this buffer at least overlaps our
535 * buffer and is locked, therefore our buffer
536 * either does not exist, or is this buffer.
1da177e4 537 */
ce8e922c
NS
538 xfs_buf_rele(bp);
539 XFS_STATS_INC(xb_busy_locked);
540 return NULL;
1da177e4
LT
541 }
542 } else {
543 /* trylock worked */
ce8e922c 544 XB_SET_OWNER(bp);
1da177e4
LT
545 }
546
ce8e922c
NS
547 if (bp->b_flags & XBF_STALE) {
548 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
549 bp->b_flags &= XBF_MAPPED;
2f926587 550 }
ce8e922c
NS
551 XB_TRACE(bp, "got_lock", 0);
552 XFS_STATS_INC(xb_get_locked);
553 return bp;
1da177e4
LT
554}
555
556/*
ce8e922c 557 * Assembles a buffer covering the specified range.
1da177e4
LT
558 * Storage in memory for all portions of the buffer will be allocated,
559 * although backing storage may not be.
560 */
561xfs_buf_t *
ce8e922c 562xfs_buf_get_flags(
1da177e4 563 xfs_buftarg_t *target,/* target for buffer */
204ab25f 564 xfs_off_t ioff, /* starting offset of range */
1da177e4 565 size_t isize, /* length of range */
ce8e922c 566 xfs_buf_flags_t flags)
1da177e4 567{
ce8e922c 568 xfs_buf_t *bp, *new_bp;
1da177e4
LT
569 int error = 0, i;
570
ce8e922c
NS
571 new_bp = xfs_buf_allocate(flags);
572 if (unlikely(!new_bp))
1da177e4
LT
573 return NULL;
574
ce8e922c
NS
575 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
576 if (bp == new_bp) {
577 error = _xfs_buf_lookup_pages(bp, flags);
1da177e4
LT
578 if (error)
579 goto no_buffer;
580 } else {
ce8e922c
NS
581 xfs_buf_deallocate(new_bp);
582 if (unlikely(bp == NULL))
1da177e4
LT
583 return NULL;
584 }
585
ce8e922c
NS
586 for (i = 0; i < bp->b_page_count; i++)
587 mark_page_accessed(bp->b_pages[i]);
1da177e4 588
ce8e922c
NS
589 if (!(bp->b_flags & XBF_MAPPED)) {
590 error = _xfs_buf_map_pages(bp, flags);
1da177e4
LT
591 if (unlikely(error)) {
592 printk(KERN_WARNING "%s: failed to map pages\n",
593 __FUNCTION__);
594 goto no_buffer;
595 }
596 }
597
ce8e922c 598 XFS_STATS_INC(xb_get);
1da177e4
LT
599
600 /*
601 * Always fill in the block number now, the mapped cases can do
602 * their own overlay of this later.
603 */
ce8e922c
NS
604 bp->b_bn = ioff;
605 bp->b_count_desired = bp->b_buffer_length;
1da177e4 606
ce8e922c
NS
607 XB_TRACE(bp, "get", (unsigned long)flags);
608 return bp;
1da177e4
LT
609
610 no_buffer:
ce8e922c
NS
611 if (flags & (XBF_LOCK | XBF_TRYLOCK))
612 xfs_buf_unlock(bp);
613 xfs_buf_rele(bp);
1da177e4
LT
614 return NULL;
615}
616
617xfs_buf_t *
618xfs_buf_read_flags(
619 xfs_buftarg_t *target,
204ab25f 620 xfs_off_t ioff,
1da177e4 621 size_t isize,
ce8e922c 622 xfs_buf_flags_t flags)
1da177e4 623{
ce8e922c
NS
624 xfs_buf_t *bp;
625
626 flags |= XBF_READ;
627
628 bp = xfs_buf_get_flags(target, ioff, isize, flags);
629 if (bp) {
630 if (!XFS_BUF_ISDONE(bp)) {
631 XB_TRACE(bp, "read", (unsigned long)flags);
632 XFS_STATS_INC(xb_get_read);
633 xfs_buf_iostart(bp, flags);
634 } else if (flags & XBF_ASYNC) {
635 XB_TRACE(bp, "read_async", (unsigned long)flags);
1da177e4
LT
636 /*
637 * Read ahead call which is already satisfied,
638 * drop the buffer
639 */
640 goto no_buffer;
641 } else {
ce8e922c 642 XB_TRACE(bp, "read_done", (unsigned long)flags);
1da177e4 643 /* We do not want read in the flags */
ce8e922c 644 bp->b_flags &= ~XBF_READ;
1da177e4
LT
645 }
646 }
647
ce8e922c 648 return bp;
1da177e4
LT
649
650 no_buffer:
ce8e922c
NS
651 if (flags & (XBF_LOCK | XBF_TRYLOCK))
652 xfs_buf_unlock(bp);
653 xfs_buf_rele(bp);
1da177e4
LT
654 return NULL;
655}
656
1da177e4 657/*
ce8e922c
NS
658 * If we are not low on memory then do the readahead in a deadlock
659 * safe manner.
1da177e4
LT
660 */
661void
ce8e922c 662xfs_buf_readahead(
1da177e4 663 xfs_buftarg_t *target,
204ab25f 664 xfs_off_t ioff,
1da177e4 665 size_t isize,
ce8e922c 666 xfs_buf_flags_t flags)
1da177e4
LT
667{
668 struct backing_dev_info *bdi;
669
ce8e922c 670 bdi = target->bt_mapping->backing_dev_info;
1da177e4
LT
671 if (bdi_read_congested(bdi))
672 return;
673
ce8e922c 674 flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
1da177e4
LT
675 xfs_buf_read_flags(target, ioff, isize, flags);
676}
677
678xfs_buf_t *
ce8e922c 679xfs_buf_get_empty(
1da177e4
LT
680 size_t len,
681 xfs_buftarg_t *target)
682{
ce8e922c 683 xfs_buf_t *bp;
1da177e4 684
ce8e922c
NS
685 bp = xfs_buf_allocate(0);
686 if (bp)
687 _xfs_buf_initialize(bp, target, 0, len, 0);
688 return bp;
1da177e4
LT
689}
690
691static inline struct page *
692mem_to_page(
693 void *addr)
694{
695 if (((unsigned long)addr < VMALLOC_START) ||
696 ((unsigned long)addr >= VMALLOC_END)) {
697 return virt_to_page(addr);
698 } else {
699 return vmalloc_to_page(addr);
700 }
701}
702
703int
ce8e922c
NS
704xfs_buf_associate_memory(
705 xfs_buf_t *bp,
1da177e4
LT
706 void *mem,
707 size_t len)
708{
709 int rval;
710 int i = 0;
711 size_t ptr;
712 size_t end, end_cur;
713 off_t offset;
714 int page_count;
715
716 page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
717 offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
718 if (offset && (len > PAGE_CACHE_SIZE))
719 page_count++;
720
721 /* Free any previous set of page pointers */
ce8e922c
NS
722 if (bp->b_pages)
723 _xfs_buf_free_pages(bp);
1da177e4 724
ce8e922c
NS
725 bp->b_pages = NULL;
726 bp->b_addr = mem;
1da177e4 727
ce8e922c 728 rval = _xfs_buf_get_pages(bp, page_count, 0);
1da177e4
LT
729 if (rval)
730 return rval;
731
ce8e922c 732 bp->b_offset = offset;
1da177e4
LT
733 ptr = (size_t) mem & PAGE_CACHE_MASK;
734 end = PAGE_CACHE_ALIGN((size_t) mem + len);
735 end_cur = end;
736 /* set up first page */
ce8e922c 737 bp->b_pages[0] = mem_to_page(mem);
1da177e4
LT
738
739 ptr += PAGE_CACHE_SIZE;
ce8e922c 740 bp->b_page_count = ++i;
1da177e4 741 while (ptr < end) {
ce8e922c
NS
742 bp->b_pages[i] = mem_to_page((void *)ptr);
743 bp->b_page_count = ++i;
1da177e4
LT
744 ptr += PAGE_CACHE_SIZE;
745 }
ce8e922c 746 bp->b_locked = 0;
1da177e4 747
ce8e922c
NS
748 bp->b_count_desired = bp->b_buffer_length = len;
749 bp->b_flags |= XBF_MAPPED;
1da177e4
LT
750
751 return 0;
752}
753
754xfs_buf_t *
ce8e922c 755xfs_buf_get_noaddr(
1da177e4
LT
756 size_t len,
757 xfs_buftarg_t *target)
758{
759 size_t malloc_len = len;
760 xfs_buf_t *bp;
761 void *data;
762 int error;
763
ce8e922c 764 bp = xfs_buf_allocate(0);
1da177e4
LT
765 if (unlikely(bp == NULL))
766 goto fail;
ce8e922c 767 _xfs_buf_initialize(bp, target, 0, len, 0);
1da177e4
LT
768
769 try_again:
770 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
771 if (unlikely(data == NULL))
772 goto fail_free_buf;
773
774 /* check whether alignment matches.. */
775 if ((__psunsigned_t)data !=
ce8e922c 776 ((__psunsigned_t)data & ~target->bt_smask)) {
1da177e4
LT
777 /* .. else double the size and try again */
778 kmem_free(data, malloc_len);
779 malloc_len <<= 1;
780 goto try_again;
781 }
782
ce8e922c 783 error = xfs_buf_associate_memory(bp, data, len);
1da177e4
LT
784 if (error)
785 goto fail_free_mem;
ce8e922c 786 bp->b_flags |= _XBF_KMEM_ALLOC;
1da177e4 787
ce8e922c 788 xfs_buf_unlock(bp);
1da177e4 789
ce8e922c 790 XB_TRACE(bp, "no_daddr", data);
1da177e4
LT
791 return bp;
792 fail_free_mem:
793 kmem_free(data, malloc_len);
794 fail_free_buf:
ce8e922c 795 xfs_buf_free(bp);
1da177e4
LT
796 fail:
797 return NULL;
798}
799
800/*
1da177e4
LT
801 * Increment reference count on buffer, to hold the buffer concurrently
802 * with another thread which may release (free) the buffer asynchronously.
1da177e4
LT
803 * Must hold the buffer already to call this function.
804 */
805void
ce8e922c
NS
806xfs_buf_hold(
807 xfs_buf_t *bp)
1da177e4 808{
ce8e922c
NS
809 atomic_inc(&bp->b_hold);
810 XB_TRACE(bp, "hold", 0);
1da177e4
LT
811}
812
813/*
ce8e922c
NS
814 * Releases a hold on the specified buffer. If the
815 * the hold count is 1, calls xfs_buf_free.
1da177e4
LT
816 */
817void
ce8e922c
NS
818xfs_buf_rele(
819 xfs_buf_t *bp)
1da177e4 820{
ce8e922c 821 xfs_bufhash_t *hash = bp->b_hash;
1da177e4 822
ce8e922c 823 XB_TRACE(bp, "rele", bp->b_relse);
1da177e4 824
ce8e922c
NS
825 if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
826 if (bp->b_relse) {
827 atomic_inc(&bp->b_hold);
1da177e4 828 spin_unlock(&hash->bh_lock);
ce8e922c
NS
829 (*(bp->b_relse)) (bp);
830 } else if (bp->b_flags & XBF_FS_MANAGED) {
1da177e4 831 spin_unlock(&hash->bh_lock);
1da177e4 832 } else {
ce8e922c
NS
833 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
834 list_del_init(&bp->b_hash_list);
1da177e4 835 spin_unlock(&hash->bh_lock);
ce8e922c 836 xfs_buf_free(bp);
1da177e4 837 }
2f926587
DC
838 } else {
839 /*
840 * Catch reference count leaks
841 */
ce8e922c 842 ASSERT(atomic_read(&bp->b_hold) >= 0);
1da177e4
LT
843 }
844}
845
846
847/*
848 * Mutual exclusion on buffers. Locking model:
849 *
850 * Buffers associated with inodes for which buffer locking
851 * is not enabled are not protected by semaphores, and are
852 * assumed to be exclusively owned by the caller. There is a
853 * spinlock in the buffer, used by the caller when concurrent
854 * access is possible.
855 */
856
857/*
ce8e922c
NS
858 * Locks a buffer object, if it is not already locked.
859 * Note that this in no way locks the underlying pages, so it is only
860 * useful for synchronizing concurrent use of buffer objects, not for
861 * synchronizing independent access to the underlying pages.
1da177e4
LT
862 */
863int
ce8e922c
NS
864xfs_buf_cond_lock(
865 xfs_buf_t *bp)
1da177e4
LT
866{
867 int locked;
868
ce8e922c 869 locked = down_trylock(&bp->b_sema) == 0;
1da177e4 870 if (locked) {
ce8e922c 871 XB_SET_OWNER(bp);
1da177e4 872 }
ce8e922c
NS
873 XB_TRACE(bp, "cond_lock", (long)locked);
874 return locked ? 0 : -EBUSY;
1da177e4
LT
875}
876
877#if defined(DEBUG) || defined(XFS_BLI_TRACE)
1da177e4 878int
ce8e922c
NS
879xfs_buf_lock_value(
880 xfs_buf_t *bp)
1da177e4 881{
ce8e922c 882 return atomic_read(&bp->b_sema.count);
1da177e4
LT
883}
884#endif
885
886/*
ce8e922c
NS
887 * Locks a buffer object.
888 * Note that this in no way locks the underlying pages, so it is only
889 * useful for synchronizing concurrent use of buffer objects, not for
890 * synchronizing independent access to the underlying pages.
1da177e4 891 */
ce8e922c
NS
892void
893xfs_buf_lock(
894 xfs_buf_t *bp)
1da177e4 895{
ce8e922c
NS
896 XB_TRACE(bp, "lock", 0);
897 if (atomic_read(&bp->b_io_remaining))
898 blk_run_address_space(bp->b_target->bt_mapping);
899 down(&bp->b_sema);
900 XB_SET_OWNER(bp);
901 XB_TRACE(bp, "locked", 0);
1da177e4
LT
902}
903
904/*
ce8e922c 905 * Releases the lock on the buffer object.
2f926587 906 * If the buffer is marked delwri but is not queued, do so before we
ce8e922c 907 * unlock the buffer as we need to set flags correctly. We also need to
2f926587
DC
908 * take a reference for the delwri queue because the unlocker is going to
909 * drop their's and they don't know we just queued it.
1da177e4
LT
910 */
911void
ce8e922c
NS
912xfs_buf_unlock(
913 xfs_buf_t *bp)
1da177e4 914{
ce8e922c
NS
915 if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
916 atomic_inc(&bp->b_hold);
917 bp->b_flags |= XBF_ASYNC;
918 xfs_buf_delwri_queue(bp, 0);
2f926587
DC
919 }
920
ce8e922c
NS
921 XB_CLEAR_OWNER(bp);
922 up(&bp->b_sema);
923 XB_TRACE(bp, "unlock", 0);
1da177e4
LT
924}
925
926
927/*
928 * Pinning Buffer Storage in Memory
ce8e922c 929 * Ensure that no attempt to force a buffer to disk will succeed.
1da177e4
LT
930 */
931void
ce8e922c
NS
932xfs_buf_pin(
933 xfs_buf_t *bp)
1da177e4 934{
ce8e922c
NS
935 atomic_inc(&bp->b_pin_count);
936 XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
1da177e4
LT
937}
938
1da177e4 939void
ce8e922c
NS
940xfs_buf_unpin(
941 xfs_buf_t *bp)
1da177e4 942{
ce8e922c
NS
943 if (atomic_dec_and_test(&bp->b_pin_count))
944 wake_up_all(&bp->b_waiters);
945 XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
1da177e4
LT
946}
947
948int
ce8e922c
NS
949xfs_buf_ispin(
950 xfs_buf_t *bp)
1da177e4 951{
ce8e922c 952 return atomic_read(&bp->b_pin_count);
1da177e4
LT
953}
954
ce8e922c
NS
955STATIC void
956xfs_buf_wait_unpin(
957 xfs_buf_t *bp)
1da177e4
LT
958{
959 DECLARE_WAITQUEUE (wait, current);
960
ce8e922c 961 if (atomic_read(&bp->b_pin_count) == 0)
1da177e4
LT
962 return;
963
ce8e922c 964 add_wait_queue(&bp->b_waiters, &wait);
1da177e4
LT
965 for (;;) {
966 set_current_state(TASK_UNINTERRUPTIBLE);
ce8e922c 967 if (atomic_read(&bp->b_pin_count) == 0)
1da177e4 968 break;
ce8e922c
NS
969 if (atomic_read(&bp->b_io_remaining))
970 blk_run_address_space(bp->b_target->bt_mapping);
1da177e4
LT
971 schedule();
972 }
ce8e922c 973 remove_wait_queue(&bp->b_waiters, &wait);
1da177e4
LT
974 set_current_state(TASK_RUNNING);
975}
976
977/*
978 * Buffer Utility Routines
979 */
980
1da177e4 981STATIC void
ce8e922c 982xfs_buf_iodone_work(
1da177e4
LT
983 void *v)
984{
985 xfs_buf_t *bp = (xfs_buf_t *)v;
986
ce8e922c
NS
987 if (bp->b_iodone)
988 (*(bp->b_iodone))(bp);
989 else if (bp->b_flags & XBF_ASYNC)
1da177e4
LT
990 xfs_buf_relse(bp);
991}
992
993void
ce8e922c
NS
994xfs_buf_ioend(
995 xfs_buf_t *bp,
1da177e4
LT
996 int schedule)
997{
ce8e922c
NS
998 bp->b_flags &= ~(XBF_READ | XBF_WRITE);
999 if (bp->b_error == 0)
1000 bp->b_flags |= XBF_DONE;
1da177e4 1001
ce8e922c 1002 XB_TRACE(bp, "iodone", bp->b_iodone);
1da177e4 1003
ce8e922c 1004 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1da177e4 1005 if (schedule) {
ce8e922c
NS
1006 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
1007 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1da177e4 1008 } else {
ce8e922c 1009 xfs_buf_iodone_work(bp);
1da177e4
LT
1010 }
1011 } else {
ce8e922c 1012 up(&bp->b_iodonesema);
1da177e4
LT
1013 }
1014}
1015
1da177e4 1016void
ce8e922c
NS
1017xfs_buf_ioerror(
1018 xfs_buf_t *bp,
1019 int error)
1da177e4
LT
1020{
1021 ASSERT(error >= 0 && error <= 0xffff);
ce8e922c
NS
1022 bp->b_error = (unsigned short)error;
1023 XB_TRACE(bp, "ioerror", (unsigned long)error);
1da177e4
LT
1024}
1025
1026/*
ce8e922c
NS
1027 * Initiate I/O on a buffer, based on the flags supplied.
1028 * The b_iodone routine in the buffer supplied will only be called
1da177e4 1029 * when all of the subsidiary I/O requests, if any, have been completed.
1da177e4
LT
1030 */
1031int
ce8e922c
NS
1032xfs_buf_iostart(
1033 xfs_buf_t *bp,
1034 xfs_buf_flags_t flags)
1da177e4
LT
1035{
1036 int status = 0;
1037
ce8e922c 1038 XB_TRACE(bp, "iostart", (unsigned long)flags);
1da177e4 1039
ce8e922c
NS
1040 if (flags & XBF_DELWRI) {
1041 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
1042 bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
1043 xfs_buf_delwri_queue(bp, 1);
1da177e4
LT
1044 return status;
1045 }
1046
ce8e922c
NS
1047 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
1048 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1049 bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
1050 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1da177e4 1051
ce8e922c 1052 BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
1da177e4
LT
1053
1054 /* For writes allow an alternate strategy routine to precede
1055 * the actual I/O request (which may not be issued at all in
1056 * a shutdown situation, for example).
1057 */
ce8e922c
NS
1058 status = (flags & XBF_WRITE) ?
1059 xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
1da177e4
LT
1060
1061 /* Wait for I/O if we are not an async request.
1062 * Note: async I/O request completion will release the buffer,
1063 * and that can already be done by this point. So using the
1064 * buffer pointer from here on, after async I/O, is invalid.
1065 */
ce8e922c
NS
1066 if (!status && !(flags & XBF_ASYNC))
1067 status = xfs_buf_iowait(bp);
1da177e4
LT
1068
1069 return status;
1070}
1071
1da177e4 1072STATIC __inline__ int
ce8e922c
NS
1073_xfs_buf_iolocked(
1074 xfs_buf_t *bp)
1da177e4 1075{
ce8e922c
NS
1076 ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
1077 if (bp->b_flags & XBF_READ)
1078 return bp->b_locked;
1da177e4
LT
1079 return 0;
1080}
1081
1082STATIC __inline__ void
ce8e922c
NS
1083_xfs_buf_ioend(
1084 xfs_buf_t *bp,
1da177e4
LT
1085 int schedule)
1086{
ce8e922c
NS
1087 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1088 bp->b_locked = 0;
1089 xfs_buf_ioend(bp, schedule);
1da177e4
LT
1090 }
1091}
1092
1093STATIC int
ce8e922c 1094xfs_buf_bio_end_io(
1da177e4
LT
1095 struct bio *bio,
1096 unsigned int bytes_done,
1097 int error)
1098{
ce8e922c
NS
1099 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1100 unsigned int blocksize = bp->b_target->bt_bsize;
eedb5530 1101 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1da177e4
LT
1102
1103 if (bio->bi_size)
1104 return 1;
1105
1106 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
ce8e922c 1107 bp->b_error = EIO;
1da177e4 1108
eedb5530 1109 do {
1da177e4
LT
1110 struct page *page = bvec->bv_page;
1111
ce8e922c
NS
1112 if (unlikely(bp->b_error)) {
1113 if (bp->b_flags & XBF_READ)
eedb5530 1114 ClearPageUptodate(page);
1da177e4 1115 SetPageError(page);
ce8e922c 1116 } else if (blocksize >= PAGE_CACHE_SIZE) {
1da177e4
LT
1117 SetPageUptodate(page);
1118 } else if (!PagePrivate(page) &&
ce8e922c 1119 (bp->b_flags & _XBF_PAGE_CACHE)) {
1da177e4
LT
1120 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1121 }
1122
eedb5530
NS
1123 if (--bvec >= bio->bi_io_vec)
1124 prefetchw(&bvec->bv_page->flags);
1125
ce8e922c 1126 if (_xfs_buf_iolocked(bp)) {
1da177e4
LT
1127 unlock_page(page);
1128 }
eedb5530 1129 } while (bvec >= bio->bi_io_vec);
1da177e4 1130
ce8e922c 1131 _xfs_buf_ioend(bp, 1);
1da177e4
LT
1132 bio_put(bio);
1133 return 0;
1134}
1135
1136STATIC void
ce8e922c
NS
1137_xfs_buf_ioapply(
1138 xfs_buf_t *bp)
1da177e4
LT
1139{
1140 int i, rw, map_i, total_nr_pages, nr_pages;
1141 struct bio *bio;
ce8e922c
NS
1142 int offset = bp->b_offset;
1143 int size = bp->b_count_desired;
1144 sector_t sector = bp->b_bn;
1145 unsigned int blocksize = bp->b_target->bt_bsize;
1146 int locking = _xfs_buf_iolocked(bp);
1da177e4 1147
ce8e922c 1148 total_nr_pages = bp->b_page_count;
1da177e4
LT
1149 map_i = 0;
1150
ce8e922c
NS
1151 if (bp->b_flags & _XBF_RUN_QUEUES) {
1152 bp->b_flags &= ~_XBF_RUN_QUEUES;
1153 rw = (bp->b_flags & XBF_READ) ? READ_SYNC : WRITE_SYNC;
1da177e4 1154 } else {
ce8e922c 1155 rw = (bp->b_flags & XBF_READ) ? READ : WRITE;
1da177e4
LT
1156 }
1157
ce8e922c
NS
1158 if (bp->b_flags & XBF_ORDERED) {
1159 ASSERT(!(bp->b_flags & XBF_READ));
f538d4da
CH
1160 rw = WRITE_BARRIER;
1161 }
1162
ce8e922c 1163 /* Special code path for reading a sub page size buffer in --
1da177e4
LT
1164 * we populate up the whole page, and hence the other metadata
1165 * in the same page. This optimization is only valid when the
ce8e922c 1166 * filesystem block size is not smaller than the page size.
1da177e4 1167 */
ce8e922c
NS
1168 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1169 (bp->b_flags & XBF_READ) && locking &&
1170 (blocksize >= PAGE_CACHE_SIZE)) {
1da177e4
LT
1171 bio = bio_alloc(GFP_NOIO, 1);
1172
ce8e922c 1173 bio->bi_bdev = bp->b_target->bt_bdev;
1da177e4 1174 bio->bi_sector = sector - (offset >> BBSHIFT);
ce8e922c
NS
1175 bio->bi_end_io = xfs_buf_bio_end_io;
1176 bio->bi_private = bp;
1da177e4 1177
ce8e922c 1178 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1da177e4
LT
1179 size = 0;
1180
ce8e922c 1181 atomic_inc(&bp->b_io_remaining);
1da177e4
LT
1182
1183 goto submit_io;
1184 }
1185
1186 /* Lock down the pages which we need to for the request */
ce8e922c 1187 if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
1da177e4
LT
1188 for (i = 0; size; i++) {
1189 int nbytes = PAGE_CACHE_SIZE - offset;
ce8e922c 1190 struct page *page = bp->b_pages[i];
1da177e4
LT
1191
1192 if (nbytes > size)
1193 nbytes = size;
1194
1195 lock_page(page);
1196
1197 size -= nbytes;
1198 offset = 0;
1199 }
ce8e922c
NS
1200 offset = bp->b_offset;
1201 size = bp->b_count_desired;
1da177e4
LT
1202 }
1203
1204next_chunk:
ce8e922c 1205 atomic_inc(&bp->b_io_remaining);
1da177e4
LT
1206 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1207 if (nr_pages > total_nr_pages)
1208 nr_pages = total_nr_pages;
1209
1210 bio = bio_alloc(GFP_NOIO, nr_pages);
ce8e922c 1211 bio->bi_bdev = bp->b_target->bt_bdev;
1da177e4 1212 bio->bi_sector = sector;
ce8e922c
NS
1213 bio->bi_end_io = xfs_buf_bio_end_io;
1214 bio->bi_private = bp;
1da177e4
LT
1215
1216 for (; size && nr_pages; nr_pages--, map_i++) {
ce8e922c 1217 int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1da177e4
LT
1218
1219 if (nbytes > size)
1220 nbytes = size;
1221
ce8e922c
NS
1222 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1223 if (rbytes < nbytes)
1da177e4
LT
1224 break;
1225
1226 offset = 0;
1227 sector += nbytes >> BBSHIFT;
1228 size -= nbytes;
1229 total_nr_pages--;
1230 }
1231
1232submit_io:
1233 if (likely(bio->bi_size)) {
1234 submit_bio(rw, bio);
1235 if (size)
1236 goto next_chunk;
1237 } else {
1238 bio_put(bio);
ce8e922c 1239 xfs_buf_ioerror(bp, EIO);
1da177e4
LT
1240 }
1241}
1242
1da177e4 1243int
ce8e922c
NS
1244xfs_buf_iorequest(
1245 xfs_buf_t *bp)
1da177e4 1246{
ce8e922c 1247 XB_TRACE(bp, "iorequest", 0);
1da177e4 1248
ce8e922c
NS
1249 if (bp->b_flags & XBF_DELWRI) {
1250 xfs_buf_delwri_queue(bp, 1);
1da177e4
LT
1251 return 0;
1252 }
1253
ce8e922c
NS
1254 if (bp->b_flags & XBF_WRITE) {
1255 xfs_buf_wait_unpin(bp);
1da177e4
LT
1256 }
1257
ce8e922c 1258 xfs_buf_hold(bp);
1da177e4
LT
1259
1260 /* Set the count to 1 initially, this will stop an I/O
1261 * completion callout which happens before we have started
ce8e922c 1262 * all the I/O from calling xfs_buf_ioend too early.
1da177e4 1263 */
ce8e922c
NS
1264 atomic_set(&bp->b_io_remaining, 1);
1265 _xfs_buf_ioapply(bp);
1266 _xfs_buf_ioend(bp, 0);
1da177e4 1267
ce8e922c 1268 xfs_buf_rele(bp);
1da177e4
LT
1269 return 0;
1270}
1271
1272/*
ce8e922c
NS
1273 * Waits for I/O to complete on the buffer supplied.
1274 * It returns immediately if no I/O is pending.
1275 * It returns the I/O error code, if any, or 0 if there was no error.
1da177e4
LT
1276 */
1277int
ce8e922c
NS
1278xfs_buf_iowait(
1279 xfs_buf_t *bp)
1da177e4 1280{
ce8e922c
NS
1281 XB_TRACE(bp, "iowait", 0);
1282 if (atomic_read(&bp->b_io_remaining))
1283 blk_run_address_space(bp->b_target->bt_mapping);
1284 down(&bp->b_iodonesema);
1285 XB_TRACE(bp, "iowaited", (long)bp->b_error);
1286 return bp->b_error;
1da177e4
LT
1287}
1288
ce8e922c
NS
1289xfs_caddr_t
1290xfs_buf_offset(
1291 xfs_buf_t *bp,
1da177e4
LT
1292 size_t offset)
1293{
1294 struct page *page;
1295
ce8e922c
NS
1296 if (bp->b_flags & XBF_MAPPED)
1297 return XFS_BUF_PTR(bp) + offset;
1da177e4 1298
ce8e922c
NS
1299 offset += bp->b_offset;
1300 page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1301 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1da177e4
LT
1302}
1303
1304/*
1da177e4
LT
1305 * Move data into or out of a buffer.
1306 */
1307void
ce8e922c
NS
1308xfs_buf_iomove(
1309 xfs_buf_t *bp, /* buffer to process */
1da177e4
LT
1310 size_t boff, /* starting buffer offset */
1311 size_t bsize, /* length to copy */
1312 caddr_t data, /* data address */
ce8e922c 1313 xfs_buf_rw_t mode) /* read/write/zero flag */
1da177e4
LT
1314{
1315 size_t bend, cpoff, csize;
1316 struct page *page;
1317
1318 bend = boff + bsize;
1319 while (boff < bend) {
ce8e922c
NS
1320 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1321 cpoff = xfs_buf_poff(boff + bp->b_offset);
1da177e4 1322 csize = min_t(size_t,
ce8e922c 1323 PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1da177e4
LT
1324
1325 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1326
1327 switch (mode) {
ce8e922c 1328 case XBRW_ZERO:
1da177e4
LT
1329 memset(page_address(page) + cpoff, 0, csize);
1330 break;
ce8e922c 1331 case XBRW_READ:
1da177e4
LT
1332 memcpy(data, page_address(page) + cpoff, csize);
1333 break;
ce8e922c 1334 case XBRW_WRITE:
1da177e4
LT
1335 memcpy(page_address(page) + cpoff, data, csize);
1336 }
1337
1338 boff += csize;
1339 data += csize;
1340 }
1341}
1342
1343/*
ce8e922c 1344 * Handling of buffer targets (buftargs).
1da177e4
LT
1345 */
1346
1347/*
ce8e922c
NS
1348 * Wait for any bufs with callbacks that have been submitted but
1349 * have not yet returned... walk the hash list for the target.
1da177e4
LT
1350 */
1351void
1352xfs_wait_buftarg(
1353 xfs_buftarg_t *btp)
1354{
1355 xfs_buf_t *bp, *n;
1356 xfs_bufhash_t *hash;
1357 uint i;
1358
1359 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1360 hash = &btp->bt_hash[i];
1361again:
1362 spin_lock(&hash->bh_lock);
ce8e922c
NS
1363 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1364 ASSERT(btp == bp->b_target);
1365 if (!(bp->b_flags & XBF_FS_MANAGED)) {
1da177e4 1366 spin_unlock(&hash->bh_lock);
2f926587
DC
1367 /*
1368 * Catch superblock reference count leaks
1369 * immediately
1370 */
ce8e922c 1371 BUG_ON(bp->b_bn == 0);
1da177e4
LT
1372 delay(100);
1373 goto again;
1374 }
1375 }
1376 spin_unlock(&hash->bh_lock);
1377 }
1378}
1379
1380/*
ce8e922c
NS
1381 * Allocate buffer hash table for a given target.
1382 * For devices containing metadata (i.e. not the log/realtime devices)
1383 * we need to allocate a much larger hash table.
1da177e4
LT
1384 */
1385STATIC void
1386xfs_alloc_bufhash(
1387 xfs_buftarg_t *btp,
1388 int external)
1389{
1390 unsigned int i;
1391
1392 btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
1393 btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1394 btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
1395 sizeof(xfs_bufhash_t), KM_SLEEP);
1396 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1397 spin_lock_init(&btp->bt_hash[i].bh_lock);
1398 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1399 }
1400}
1401
1402STATIC void
1403xfs_free_bufhash(
1404 xfs_buftarg_t *btp)
1405{
ce8e922c 1406 kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1da177e4
LT
1407 btp->bt_hash = NULL;
1408}
1409
a6867a68 1410/*
ce8e922c 1411 * buftarg list for delwrite queue processing
a6867a68
DC
1412 */
1413STATIC LIST_HEAD(xfs_buftarg_list);
1414STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
1415
1416STATIC void
1417xfs_register_buftarg(
1418 xfs_buftarg_t *btp)
1419{
1420 spin_lock(&xfs_buftarg_lock);
1421 list_add(&btp->bt_list, &xfs_buftarg_list);
1422 spin_unlock(&xfs_buftarg_lock);
1423}
1424
1425STATIC void
1426xfs_unregister_buftarg(
1427 xfs_buftarg_t *btp)
1428{
1429 spin_lock(&xfs_buftarg_lock);
1430 list_del(&btp->bt_list);
1431 spin_unlock(&xfs_buftarg_lock);
1432}
1433
1da177e4
LT
1434void
1435xfs_free_buftarg(
1436 xfs_buftarg_t *btp,
1437 int external)
1438{
1439 xfs_flush_buftarg(btp, 1);
1440 if (external)
ce8e922c 1441 xfs_blkdev_put(btp->bt_bdev);
1da177e4 1442 xfs_free_bufhash(btp);
ce8e922c 1443 iput(btp->bt_mapping->host);
a6867a68 1444
ce8e922c
NS
1445 /* Unregister the buftarg first so that we don't get a
1446 * wakeup finding a non-existent task
1447 */
a6867a68
DC
1448 xfs_unregister_buftarg(btp);
1449 kthread_stop(btp->bt_task);
1450
1da177e4
LT
1451 kmem_free(btp, sizeof(*btp));
1452}
1453
1da177e4
LT
1454STATIC int
1455xfs_setsize_buftarg_flags(
1456 xfs_buftarg_t *btp,
1457 unsigned int blocksize,
1458 unsigned int sectorsize,
1459 int verbose)
1460{
ce8e922c
NS
1461 btp->bt_bsize = blocksize;
1462 btp->bt_sshift = ffs(sectorsize) - 1;
1463 btp->bt_smask = sectorsize - 1;
1da177e4 1464
ce8e922c 1465 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1da177e4
LT
1466 printk(KERN_WARNING
1467 "XFS: Cannot set_blocksize to %u on device %s\n",
1468 sectorsize, XFS_BUFTARG_NAME(btp));
1469 return EINVAL;
1470 }
1471
1472 if (verbose &&
1473 (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1474 printk(KERN_WARNING
1475 "XFS: %u byte sectors in use on device %s. "
1476 "This is suboptimal; %u or greater is ideal.\n",
1477 sectorsize, XFS_BUFTARG_NAME(btp),
1478 (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1479 }
1480
1481 return 0;
1482}
1483
1484/*
ce8e922c
NS
1485 * When allocating the initial buffer target we have not yet
1486 * read in the superblock, so don't know what sized sectors
1487 * are being used is at this early stage. Play safe.
1488 */
1da177e4
LT
1489STATIC int
1490xfs_setsize_buftarg_early(
1491 xfs_buftarg_t *btp,
1492 struct block_device *bdev)
1493{
1494 return xfs_setsize_buftarg_flags(btp,
1495 PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1496}
1497
1498int
1499xfs_setsize_buftarg(
1500 xfs_buftarg_t *btp,
1501 unsigned int blocksize,
1502 unsigned int sectorsize)
1503{
1504 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1505}
1506
1507STATIC int
1508xfs_mapping_buftarg(
1509 xfs_buftarg_t *btp,
1510 struct block_device *bdev)
1511{
1512 struct backing_dev_info *bdi;
1513 struct inode *inode;
1514 struct address_space *mapping;
1515 static struct address_space_operations mapping_aops = {
1516 .sync_page = block_sync_page,
1517 };
1518
1519 inode = new_inode(bdev->bd_inode->i_sb);
1520 if (!inode) {
1521 printk(KERN_WARNING
1522 "XFS: Cannot allocate mapping inode for device %s\n",
1523 XFS_BUFTARG_NAME(btp));
1524 return ENOMEM;
1525 }
1526 inode->i_mode = S_IFBLK;
1527 inode->i_bdev = bdev;
1528 inode->i_rdev = bdev->bd_dev;
1529 bdi = blk_get_backing_dev_info(bdev);
1530 if (!bdi)
1531 bdi = &default_backing_dev_info;
1532 mapping = &inode->i_data;
1533 mapping->a_ops = &mapping_aops;
1534 mapping->backing_dev_info = bdi;
1535 mapping_set_gfp_mask(mapping, GFP_NOFS);
ce8e922c 1536 btp->bt_mapping = mapping;
1da177e4
LT
1537 return 0;
1538}
1539
a6867a68
DC
1540STATIC int
1541xfs_alloc_delwrite_queue(
1542 xfs_buftarg_t *btp)
1543{
1544 int error = 0;
1545
1546 INIT_LIST_HEAD(&btp->bt_list);
1547 INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1548 spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
1549 btp->bt_flags = 0;
1550 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1551 if (IS_ERR(btp->bt_task)) {
1552 error = PTR_ERR(btp->bt_task);
1553 goto out_error;
1554 }
1555 xfs_register_buftarg(btp);
1556out_error:
1557 return error;
1558}
1559
1da177e4
LT
1560xfs_buftarg_t *
1561xfs_alloc_buftarg(
1562 struct block_device *bdev,
1563 int external)
1564{
1565 xfs_buftarg_t *btp;
1566
1567 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1568
ce8e922c
NS
1569 btp->bt_dev = bdev->bd_dev;
1570 btp->bt_bdev = bdev;
1da177e4
LT
1571 if (xfs_setsize_buftarg_early(btp, bdev))
1572 goto error;
1573 if (xfs_mapping_buftarg(btp, bdev))
1574 goto error;
a6867a68
DC
1575 if (xfs_alloc_delwrite_queue(btp))
1576 goto error;
1da177e4
LT
1577 xfs_alloc_bufhash(btp, external);
1578 return btp;
1579
1580error:
1581 kmem_free(btp, sizeof(*btp));
1582 return NULL;
1583}
1584
1585
1586/*
ce8e922c 1587 * Delayed write buffer handling
1da177e4 1588 */
1da177e4 1589STATIC void
ce8e922c
NS
1590xfs_buf_delwri_queue(
1591 xfs_buf_t *bp,
1da177e4
LT
1592 int unlock)
1593{
ce8e922c
NS
1594 struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
1595 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
a6867a68 1596
ce8e922c
NS
1597 XB_TRACE(bp, "delwri_q", (long)unlock);
1598 ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1da177e4 1599
a6867a68 1600 spin_lock(dwlk);
1da177e4 1601 /* If already in the queue, dequeue and place at tail */
ce8e922c
NS
1602 if (!list_empty(&bp->b_list)) {
1603 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1604 if (unlock)
1605 atomic_dec(&bp->b_hold);
1606 list_del(&bp->b_list);
1da177e4
LT
1607 }
1608
ce8e922c
NS
1609 bp->b_flags |= _XBF_DELWRI_Q;
1610 list_add_tail(&bp->b_list, dwq);
1611 bp->b_queuetime = jiffies;
a6867a68 1612 spin_unlock(dwlk);
1da177e4
LT
1613
1614 if (unlock)
ce8e922c 1615 xfs_buf_unlock(bp);
1da177e4
LT
1616}
1617
1618void
ce8e922c
NS
1619xfs_buf_delwri_dequeue(
1620 xfs_buf_t *bp)
1da177e4 1621{
ce8e922c 1622 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1da177e4
LT
1623 int dequeued = 0;
1624
a6867a68 1625 spin_lock(dwlk);
ce8e922c
NS
1626 if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1627 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1628 list_del_init(&bp->b_list);
1da177e4
LT
1629 dequeued = 1;
1630 }
ce8e922c 1631 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
a6867a68 1632 spin_unlock(dwlk);
1da177e4
LT
1633
1634 if (dequeued)
ce8e922c 1635 xfs_buf_rele(bp);
1da177e4 1636
ce8e922c 1637 XB_TRACE(bp, "delwri_dq", (long)dequeued);
1da177e4
LT
1638}
1639
1640STATIC void
ce8e922c 1641xfs_buf_runall_queues(
1da177e4
LT
1642 struct workqueue_struct *queue)
1643{
1644 flush_workqueue(queue);
1645}
1646
1da177e4 1647STATIC int
23ea4032 1648xfsbufd_wakeup(
15c84a47
NS
1649 int priority,
1650 gfp_t mask)
1da177e4 1651{
da7f93e9 1652 xfs_buftarg_t *btp;
a6867a68
DC
1653
1654 spin_lock(&xfs_buftarg_lock);
da7f93e9 1655 list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
ce8e922c 1656 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
a6867a68 1657 continue;
ce8e922c 1658 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
a6867a68
DC
1659 wake_up_process(btp->bt_task);
1660 }
1661 spin_unlock(&xfs_buftarg_lock);
1da177e4
LT
1662 return 0;
1663}
1664
1665STATIC int
23ea4032 1666xfsbufd(
1da177e4
LT
1667 void *data)
1668{
1669 struct list_head tmp;
1670 unsigned long age;
a6867a68 1671 xfs_buftarg_t *target = (xfs_buftarg_t *)data;
ce8e922c 1672 xfs_buf_t *bp, *n;
a6867a68
DC
1673 struct list_head *dwq = &target->bt_delwrite_queue;
1674 spinlock_t *dwlk = &target->bt_delwrite_lock;
1da177e4 1675
1da177e4
LT
1676 current->flags |= PF_MEMALLOC;
1677
1da177e4
LT
1678 INIT_LIST_HEAD(&tmp);
1679 do {
3e1d1d28 1680 if (unlikely(freezing(current))) {
ce8e922c 1681 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
3e1d1d28 1682 refrigerator();
abd0cf7a 1683 } else {
ce8e922c 1684 clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
abd0cf7a 1685 }
1da177e4 1686
15c84a47
NS
1687 schedule_timeout_interruptible(
1688 xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1da177e4 1689
041e0e3b 1690 age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
a6867a68 1691 spin_lock(dwlk);
ce8e922c
NS
1692 list_for_each_entry_safe(bp, n, dwq, b_list) {
1693 XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1694 ASSERT(bp->b_flags & XBF_DELWRI);
1da177e4 1695
ce8e922c
NS
1696 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1697 if (!test_bit(XBT_FORCE_FLUSH,
a6867a68 1698 &target->bt_flags) &&
1da177e4 1699 time_before(jiffies,
ce8e922c
NS
1700 bp->b_queuetime + age)) {
1701 xfs_buf_unlock(bp);
1da177e4
LT
1702 break;
1703 }
1704
ce8e922c
NS
1705 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1706 bp->b_flags |= XBF_WRITE;
1707 list_move(&bp->b_list, &tmp);
1da177e4
LT
1708 }
1709 }
a6867a68 1710 spin_unlock(dwlk);
1da177e4
LT
1711
1712 while (!list_empty(&tmp)) {
ce8e922c
NS
1713 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1714 ASSERT(target == bp->b_target);
1da177e4 1715
ce8e922c
NS
1716 list_del_init(&bp->b_list);
1717 xfs_buf_iostrategy(bp);
1da177e4 1718
ce8e922c 1719 blk_run_address_space(target->bt_mapping);
1da177e4
LT
1720 }
1721
1722 if (as_list_len > 0)
1723 purge_addresses();
1724
ce8e922c 1725 clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
4df08c52 1726 } while (!kthread_should_stop());
1da177e4 1727
4df08c52 1728 return 0;
1da177e4
LT
1729}
1730
1731/*
ce8e922c
NS
1732 * Go through all incore buffers, and release buffers if they belong to
1733 * the given device. This is used in filesystem error handling to
1734 * preserve the consistency of its metadata.
1da177e4
LT
1735 */
1736int
1737xfs_flush_buftarg(
1738 xfs_buftarg_t *target,
1739 int wait)
1740{
1741 struct list_head tmp;
ce8e922c 1742 xfs_buf_t *bp, *n;
1da177e4 1743 int pincount = 0;
a6867a68
DC
1744 struct list_head *dwq = &target->bt_delwrite_queue;
1745 spinlock_t *dwlk = &target->bt_delwrite_lock;
1da177e4 1746
ce8e922c
NS
1747 xfs_buf_runall_queues(xfsdatad_workqueue);
1748 xfs_buf_runall_queues(xfslogd_workqueue);
1da177e4
LT
1749
1750 INIT_LIST_HEAD(&tmp);
a6867a68 1751 spin_lock(dwlk);
ce8e922c
NS
1752 list_for_each_entry_safe(bp, n, dwq, b_list) {
1753 ASSERT(bp->b_target == target);
1754 ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
1755 XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
1756 if (xfs_buf_ispin(bp)) {
1da177e4
LT
1757 pincount++;
1758 continue;
1759 }
1760
ce8e922c 1761 list_move(&bp->b_list, &tmp);
1da177e4 1762 }
a6867a68 1763 spin_unlock(dwlk);
1da177e4
LT
1764
1765 /*
1766 * Dropped the delayed write list lock, now walk the temporary list
1767 */
ce8e922c
NS
1768 list_for_each_entry_safe(bp, n, &tmp, b_list) {
1769 xfs_buf_lock(bp);
1770 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1771 bp->b_flags |= XBF_WRITE;
1da177e4 1772 if (wait)
ce8e922c 1773 bp->b_flags &= ~XBF_ASYNC;
1da177e4 1774 else
ce8e922c 1775 list_del_init(&bp->b_list);
1da177e4 1776
ce8e922c 1777 xfs_buf_iostrategy(bp);
1da177e4
LT
1778 }
1779
1780 /*
1781 * Remaining list items must be flushed before returning
1782 */
1783 while (!list_empty(&tmp)) {
ce8e922c 1784 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1da177e4 1785
ce8e922c
NS
1786 list_del_init(&bp->b_list);
1787 xfs_iowait(bp);
1788 xfs_buf_relse(bp);
1da177e4
LT
1789 }
1790
1791 if (wait)
ce8e922c 1792 blk_run_address_space(target->bt_mapping);
1da177e4
LT
1793
1794 return pincount;
1795}
1796
04d8b284 1797int __init
ce8e922c 1798xfs_buf_init(void)
1da177e4 1799{
23ea4032 1800 int error = -ENOMEM;
1da177e4 1801
ce8e922c
NS
1802#ifdef XFS_BUF_TRACE
1803 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
04d8b284
CH
1804#endif
1805
ce8e922c
NS
1806 xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
1807 if (!xfs_buf_zone)
04d8b284
CH
1808 goto out_free_trace_buf;
1809
23ea4032
CH
1810 xfslogd_workqueue = create_workqueue("xfslogd");
1811 if (!xfslogd_workqueue)
04d8b284 1812 goto out_free_buf_zone;
1da177e4 1813
23ea4032
CH
1814 xfsdatad_workqueue = create_workqueue("xfsdatad");
1815 if (!xfsdatad_workqueue)
1816 goto out_destroy_xfslogd_workqueue;
1da177e4 1817
ce8e922c
NS
1818 xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
1819 if (!xfs_buf_shake)
a6867a68 1820 goto out_destroy_xfsdatad_workqueue;
04d8b284 1821
23ea4032 1822 return 0;
1da177e4 1823
23ea4032
CH
1824 out_destroy_xfsdatad_workqueue:
1825 destroy_workqueue(xfsdatad_workqueue);
1826 out_destroy_xfslogd_workqueue:
1827 destroy_workqueue(xfslogd_workqueue);
23ea4032 1828 out_free_buf_zone:
ce8e922c 1829 kmem_zone_destroy(xfs_buf_zone);
04d8b284 1830 out_free_trace_buf:
ce8e922c
NS
1831#ifdef XFS_BUF_TRACE
1832 ktrace_free(xfs_buf_trace_buf);
23ea4032 1833#endif
23ea4032 1834 return error;
1da177e4
LT
1835}
1836
1da177e4 1837void
ce8e922c 1838xfs_buf_terminate(void)
1da177e4 1839{
ce8e922c 1840 kmem_shake_deregister(xfs_buf_shake);
04d8b284
CH
1841 destroy_workqueue(xfsdatad_workqueue);
1842 destroy_workqueue(xfslogd_workqueue);
ce8e922c
NS
1843 kmem_zone_destroy(xfs_buf_zone);
1844#ifdef XFS_BUF_TRACE
1845 ktrace_free(xfs_buf_trace_buf);
1da177e4 1846#endif
1da177e4 1847}