Merge branches 'devel-stable', 'entry', 'fixes', 'mach-types', 'misc' and 'smp-hotplu...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / xfs / xfs_buf.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/gfp.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36
37 #include "xfs_sb.h"
38 #include "xfs_log.h"
39 #include "xfs_ag.h"
40 #include "xfs_mount.h"
41 #include "xfs_trace.h"
42
43 static kmem_zone_t *xfs_buf_zone;
44
45 static struct workqueue_struct *xfslogd_workqueue;
46
47 #ifdef XFS_BUF_LOCK_TRACKING
48 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
49 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
50 # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
51 #else
52 # define XB_SET_OWNER(bp) do { } while (0)
53 # define XB_CLEAR_OWNER(bp) do { } while (0)
54 # define XB_GET_OWNER(bp) do { } while (0)
55 #endif
56
57 #define xb_to_gfp(flags) \
58 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
59
60
61 static inline int
62 xfs_buf_is_vmapped(
63 struct xfs_buf *bp)
64 {
65 /*
66 * Return true if the buffer is vmapped.
67 *
68 * b_addr is null if the buffer is not mapped, but the code is clever
69 * enough to know it doesn't have to map a single page, so the check has
70 * to be both for b_addr and bp->b_page_count > 1.
71 */
72 return bp->b_addr && bp->b_page_count > 1;
73 }
74
75 static inline int
76 xfs_buf_vmap_len(
77 struct xfs_buf *bp)
78 {
79 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
80 }
81
82 /*
83 * xfs_buf_lru_add - add a buffer to the LRU.
84 *
85 * The LRU takes a new reference to the buffer so that it will only be freed
86 * once the shrinker takes the buffer off the LRU.
87 */
88 STATIC void
89 xfs_buf_lru_add(
90 struct xfs_buf *bp)
91 {
92 struct xfs_buftarg *btp = bp->b_target;
93
94 spin_lock(&btp->bt_lru_lock);
95 if (list_empty(&bp->b_lru)) {
96 atomic_inc(&bp->b_hold);
97 list_add_tail(&bp->b_lru, &btp->bt_lru);
98 btp->bt_lru_nr++;
99 bp->b_lru_flags &= ~_XBF_LRU_DISPOSE;
100 }
101 spin_unlock(&btp->bt_lru_lock);
102 }
103
104 /*
105 * xfs_buf_lru_del - remove a buffer from the LRU
106 *
107 * The unlocked check is safe here because it only occurs when there are not
108 * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
109 * to optimise the shrinker removing the buffer from the LRU and calling
110 * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
111 * bt_lru_lock.
112 */
113 STATIC void
114 xfs_buf_lru_del(
115 struct xfs_buf *bp)
116 {
117 struct xfs_buftarg *btp = bp->b_target;
118
119 if (list_empty(&bp->b_lru))
120 return;
121
122 spin_lock(&btp->bt_lru_lock);
123 if (!list_empty(&bp->b_lru)) {
124 list_del_init(&bp->b_lru);
125 btp->bt_lru_nr--;
126 }
127 spin_unlock(&btp->bt_lru_lock);
128 }
129
130 /*
131 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
132 * b_lru_ref count so that the buffer is freed immediately when the buffer
133 * reference count falls to zero. If the buffer is already on the LRU, we need
134 * to remove the reference that LRU holds on the buffer.
135 *
136 * This prevents build-up of stale buffers on the LRU.
137 */
138 void
139 xfs_buf_stale(
140 struct xfs_buf *bp)
141 {
142 ASSERT(xfs_buf_islocked(bp));
143
144 bp->b_flags |= XBF_STALE;
145
146 /*
147 * Clear the delwri status so that a delwri queue walker will not
148 * flush this buffer to disk now that it is stale. The delwri queue has
149 * a reference to the buffer, so this is safe to do.
150 */
151 bp->b_flags &= ~_XBF_DELWRI_Q;
152
153 atomic_set(&(bp)->b_lru_ref, 0);
154 if (!list_empty(&bp->b_lru)) {
155 struct xfs_buftarg *btp = bp->b_target;
156
157 spin_lock(&btp->bt_lru_lock);
158 if (!list_empty(&bp->b_lru) &&
159 !(bp->b_lru_flags & _XBF_LRU_DISPOSE)) {
160 list_del_init(&bp->b_lru);
161 btp->bt_lru_nr--;
162 atomic_dec(&bp->b_hold);
163 }
164 spin_unlock(&btp->bt_lru_lock);
165 }
166 ASSERT(atomic_read(&bp->b_hold) >= 1);
167 }
168
169 static int
170 xfs_buf_get_maps(
171 struct xfs_buf *bp,
172 int map_count)
173 {
174 ASSERT(bp->b_maps == NULL);
175 bp->b_map_count = map_count;
176
177 if (map_count == 1) {
178 bp->b_maps = &bp->__b_map;
179 return 0;
180 }
181
182 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
183 KM_NOFS);
184 if (!bp->b_maps)
185 return ENOMEM;
186 return 0;
187 }
188
189 /*
190 * Frees b_pages if it was allocated.
191 */
192 static void
193 xfs_buf_free_maps(
194 struct xfs_buf *bp)
195 {
196 if (bp->b_maps != &bp->__b_map) {
197 kmem_free(bp->b_maps);
198 bp->b_maps = NULL;
199 }
200 }
201
202 struct xfs_buf *
203 _xfs_buf_alloc(
204 struct xfs_buftarg *target,
205 struct xfs_buf_map *map,
206 int nmaps,
207 xfs_buf_flags_t flags)
208 {
209 struct xfs_buf *bp;
210 int error;
211 int i;
212
213 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
214 if (unlikely(!bp))
215 return NULL;
216
217 /*
218 * We don't want certain flags to appear in b_flags unless they are
219 * specifically set by later operations on the buffer.
220 */
221 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
222
223 atomic_set(&bp->b_hold, 1);
224 atomic_set(&bp->b_lru_ref, 1);
225 init_completion(&bp->b_iowait);
226 INIT_LIST_HEAD(&bp->b_lru);
227 INIT_LIST_HEAD(&bp->b_list);
228 RB_CLEAR_NODE(&bp->b_rbnode);
229 sema_init(&bp->b_sema, 0); /* held, no waiters */
230 XB_SET_OWNER(bp);
231 bp->b_target = target;
232 bp->b_flags = flags;
233
234 /*
235 * Set length and io_length to the same value initially.
236 * I/O routines should use io_length, which will be the same in
237 * most cases but may be reset (e.g. XFS recovery).
238 */
239 error = xfs_buf_get_maps(bp, nmaps);
240 if (error) {
241 kmem_zone_free(xfs_buf_zone, bp);
242 return NULL;
243 }
244
245 bp->b_bn = map[0].bm_bn;
246 bp->b_length = 0;
247 for (i = 0; i < nmaps; i++) {
248 bp->b_maps[i].bm_bn = map[i].bm_bn;
249 bp->b_maps[i].bm_len = map[i].bm_len;
250 bp->b_length += map[i].bm_len;
251 }
252 bp->b_io_length = bp->b_length;
253
254 atomic_set(&bp->b_pin_count, 0);
255 init_waitqueue_head(&bp->b_waiters);
256
257 XFS_STATS_INC(xb_create);
258 trace_xfs_buf_init(bp, _RET_IP_);
259
260 return bp;
261 }
262
263 /*
264 * Allocate a page array capable of holding a specified number
265 * of pages, and point the page buf at it.
266 */
267 STATIC int
268 _xfs_buf_get_pages(
269 xfs_buf_t *bp,
270 int page_count,
271 xfs_buf_flags_t flags)
272 {
273 /* Make sure that we have a page list */
274 if (bp->b_pages == NULL) {
275 bp->b_page_count = page_count;
276 if (page_count <= XB_PAGES) {
277 bp->b_pages = bp->b_page_array;
278 } else {
279 bp->b_pages = kmem_alloc(sizeof(struct page *) *
280 page_count, KM_NOFS);
281 if (bp->b_pages == NULL)
282 return -ENOMEM;
283 }
284 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
285 }
286 return 0;
287 }
288
289 /*
290 * Frees b_pages if it was allocated.
291 */
292 STATIC void
293 _xfs_buf_free_pages(
294 xfs_buf_t *bp)
295 {
296 if (bp->b_pages != bp->b_page_array) {
297 kmem_free(bp->b_pages);
298 bp->b_pages = NULL;
299 }
300 }
301
302 /*
303 * Releases the specified buffer.
304 *
305 * The modification state of any associated pages is left unchanged.
306 * The buffer most not be on any hash - use xfs_buf_rele instead for
307 * hashed and refcounted buffers
308 */
309 void
310 xfs_buf_free(
311 xfs_buf_t *bp)
312 {
313 trace_xfs_buf_free(bp, _RET_IP_);
314
315 ASSERT(list_empty(&bp->b_lru));
316
317 if (bp->b_flags & _XBF_PAGES) {
318 uint i;
319
320 if (xfs_buf_is_vmapped(bp))
321 vm_unmap_ram(bp->b_addr - bp->b_offset,
322 bp->b_page_count);
323
324 for (i = 0; i < bp->b_page_count; i++) {
325 struct page *page = bp->b_pages[i];
326
327 __free_page(page);
328 }
329 } else if (bp->b_flags & _XBF_KMEM)
330 kmem_free(bp->b_addr);
331 _xfs_buf_free_pages(bp);
332 xfs_buf_free_maps(bp);
333 kmem_zone_free(xfs_buf_zone, bp);
334 }
335
336 /*
337 * Allocates all the pages for buffer in question and builds it's page list.
338 */
339 STATIC int
340 xfs_buf_allocate_memory(
341 xfs_buf_t *bp,
342 uint flags)
343 {
344 size_t size;
345 size_t nbytes, offset;
346 gfp_t gfp_mask = xb_to_gfp(flags);
347 unsigned short page_count, i;
348 xfs_off_t start, end;
349 int error;
350
351 /*
352 * for buffers that are contained within a single page, just allocate
353 * the memory from the heap - there's no need for the complexity of
354 * page arrays to keep allocation down to order 0.
355 */
356 size = BBTOB(bp->b_length);
357 if (size < PAGE_SIZE) {
358 bp->b_addr = kmem_alloc(size, KM_NOFS);
359 if (!bp->b_addr) {
360 /* low memory - use alloc_page loop instead */
361 goto use_alloc_page;
362 }
363
364 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
365 ((unsigned long)bp->b_addr & PAGE_MASK)) {
366 /* b_addr spans two pages - use alloc_page instead */
367 kmem_free(bp->b_addr);
368 bp->b_addr = NULL;
369 goto use_alloc_page;
370 }
371 bp->b_offset = offset_in_page(bp->b_addr);
372 bp->b_pages = bp->b_page_array;
373 bp->b_pages[0] = virt_to_page(bp->b_addr);
374 bp->b_page_count = 1;
375 bp->b_flags |= _XBF_KMEM;
376 return 0;
377 }
378
379 use_alloc_page:
380 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
381 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
382 >> PAGE_SHIFT;
383 page_count = end - start;
384 error = _xfs_buf_get_pages(bp, page_count, flags);
385 if (unlikely(error))
386 return error;
387
388 offset = bp->b_offset;
389 bp->b_flags |= _XBF_PAGES;
390
391 for (i = 0; i < bp->b_page_count; i++) {
392 struct page *page;
393 uint retries = 0;
394 retry:
395 page = alloc_page(gfp_mask);
396 if (unlikely(page == NULL)) {
397 if (flags & XBF_READ_AHEAD) {
398 bp->b_page_count = i;
399 error = ENOMEM;
400 goto out_free_pages;
401 }
402
403 /*
404 * This could deadlock.
405 *
406 * But until all the XFS lowlevel code is revamped to
407 * handle buffer allocation failures we can't do much.
408 */
409 if (!(++retries % 100))
410 xfs_err(NULL,
411 "possible memory allocation deadlock in %s (mode:0x%x)",
412 __func__, gfp_mask);
413
414 XFS_STATS_INC(xb_page_retries);
415 congestion_wait(BLK_RW_ASYNC, HZ/50);
416 goto retry;
417 }
418
419 XFS_STATS_INC(xb_page_found);
420
421 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
422 size -= nbytes;
423 bp->b_pages[i] = page;
424 offset = 0;
425 }
426 return 0;
427
428 out_free_pages:
429 for (i = 0; i < bp->b_page_count; i++)
430 __free_page(bp->b_pages[i]);
431 return error;
432 }
433
434 /*
435 * Map buffer into kernel address-space if necessary.
436 */
437 STATIC int
438 _xfs_buf_map_pages(
439 xfs_buf_t *bp,
440 uint flags)
441 {
442 ASSERT(bp->b_flags & _XBF_PAGES);
443 if (bp->b_page_count == 1) {
444 /* A single page buffer is always mappable */
445 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
446 } else if (flags & XBF_UNMAPPED) {
447 bp->b_addr = NULL;
448 } else {
449 int retried = 0;
450
451 do {
452 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
453 -1, PAGE_KERNEL);
454 if (bp->b_addr)
455 break;
456 vm_unmap_aliases();
457 } while (retried++ <= 1);
458
459 if (!bp->b_addr)
460 return -ENOMEM;
461 bp->b_addr += bp->b_offset;
462 }
463
464 return 0;
465 }
466
467 /*
468 * Finding and Reading Buffers
469 */
470
471 /*
472 * Look up, and creates if absent, a lockable buffer for
473 * a given range of an inode. The buffer is returned
474 * locked. No I/O is implied by this call.
475 */
476 xfs_buf_t *
477 _xfs_buf_find(
478 struct xfs_buftarg *btp,
479 struct xfs_buf_map *map,
480 int nmaps,
481 xfs_buf_flags_t flags,
482 xfs_buf_t *new_bp)
483 {
484 size_t numbytes;
485 struct xfs_perag *pag;
486 struct rb_node **rbp;
487 struct rb_node *parent;
488 xfs_buf_t *bp;
489 xfs_daddr_t blkno = map[0].bm_bn;
490 xfs_daddr_t eofs;
491 int numblks = 0;
492 int i;
493
494 for (i = 0; i < nmaps; i++)
495 numblks += map[i].bm_len;
496 numbytes = BBTOB(numblks);
497
498 /* Check for IOs smaller than the sector size / not sector aligned */
499 ASSERT(!(numbytes < (1 << btp->bt_sshift)));
500 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
501
502 /*
503 * Corrupted block numbers can get through to here, unfortunately, so we
504 * have to check that the buffer falls within the filesystem bounds.
505 */
506 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
507 if (blkno >= eofs) {
508 /*
509 * XXX (dgc): we should really be returning EFSCORRUPTED here,
510 * but none of the higher level infrastructure supports
511 * returning a specific error on buffer lookup failures.
512 */
513 xfs_alert(btp->bt_mount,
514 "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
515 __func__, blkno, eofs);
516 return NULL;
517 }
518
519 /* get tree root */
520 pag = xfs_perag_get(btp->bt_mount,
521 xfs_daddr_to_agno(btp->bt_mount, blkno));
522
523 /* walk tree */
524 spin_lock(&pag->pag_buf_lock);
525 rbp = &pag->pag_buf_tree.rb_node;
526 parent = NULL;
527 bp = NULL;
528 while (*rbp) {
529 parent = *rbp;
530 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
531
532 if (blkno < bp->b_bn)
533 rbp = &(*rbp)->rb_left;
534 else if (blkno > bp->b_bn)
535 rbp = &(*rbp)->rb_right;
536 else {
537 /*
538 * found a block number match. If the range doesn't
539 * match, the only way this is allowed is if the buffer
540 * in the cache is stale and the transaction that made
541 * it stale has not yet committed. i.e. we are
542 * reallocating a busy extent. Skip this buffer and
543 * continue searching to the right for an exact match.
544 */
545 if (bp->b_length != numblks) {
546 ASSERT(bp->b_flags & XBF_STALE);
547 rbp = &(*rbp)->rb_right;
548 continue;
549 }
550 atomic_inc(&bp->b_hold);
551 goto found;
552 }
553 }
554
555 /* No match found */
556 if (new_bp) {
557 rb_link_node(&new_bp->b_rbnode, parent, rbp);
558 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
559 /* the buffer keeps the perag reference until it is freed */
560 new_bp->b_pag = pag;
561 spin_unlock(&pag->pag_buf_lock);
562 } else {
563 XFS_STATS_INC(xb_miss_locked);
564 spin_unlock(&pag->pag_buf_lock);
565 xfs_perag_put(pag);
566 }
567 return new_bp;
568
569 found:
570 spin_unlock(&pag->pag_buf_lock);
571 xfs_perag_put(pag);
572
573 if (!xfs_buf_trylock(bp)) {
574 if (flags & XBF_TRYLOCK) {
575 xfs_buf_rele(bp);
576 XFS_STATS_INC(xb_busy_locked);
577 return NULL;
578 }
579 xfs_buf_lock(bp);
580 XFS_STATS_INC(xb_get_locked_waited);
581 }
582
583 /*
584 * if the buffer is stale, clear all the external state associated with
585 * it. We need to keep flags such as how we allocated the buffer memory
586 * intact here.
587 */
588 if (bp->b_flags & XBF_STALE) {
589 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
590 ASSERT(bp->b_iodone == NULL);
591 bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
592 bp->b_ops = NULL;
593 }
594
595 trace_xfs_buf_find(bp, flags, _RET_IP_);
596 XFS_STATS_INC(xb_get_locked);
597 return bp;
598 }
599
600 /*
601 * Assembles a buffer covering the specified range. The code is optimised for
602 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
603 * more hits than misses.
604 */
605 struct xfs_buf *
606 xfs_buf_get_map(
607 struct xfs_buftarg *target,
608 struct xfs_buf_map *map,
609 int nmaps,
610 xfs_buf_flags_t flags)
611 {
612 struct xfs_buf *bp;
613 struct xfs_buf *new_bp;
614 int error = 0;
615
616 bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
617 if (likely(bp))
618 goto found;
619
620 new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
621 if (unlikely(!new_bp))
622 return NULL;
623
624 error = xfs_buf_allocate_memory(new_bp, flags);
625 if (error) {
626 xfs_buf_free(new_bp);
627 return NULL;
628 }
629
630 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
631 if (!bp) {
632 xfs_buf_free(new_bp);
633 return NULL;
634 }
635
636 if (bp != new_bp)
637 xfs_buf_free(new_bp);
638
639 found:
640 if (!bp->b_addr) {
641 error = _xfs_buf_map_pages(bp, flags);
642 if (unlikely(error)) {
643 xfs_warn(target->bt_mount,
644 "%s: failed to map pages\n", __func__);
645 xfs_buf_relse(bp);
646 return NULL;
647 }
648 }
649
650 XFS_STATS_INC(xb_get);
651 trace_xfs_buf_get(bp, flags, _RET_IP_);
652 return bp;
653 }
654
655 STATIC int
656 _xfs_buf_read(
657 xfs_buf_t *bp,
658 xfs_buf_flags_t flags)
659 {
660 ASSERT(!(flags & XBF_WRITE));
661 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
662
663 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
664 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
665
666 xfs_buf_iorequest(bp);
667 if (flags & XBF_ASYNC)
668 return 0;
669 return xfs_buf_iowait(bp);
670 }
671
672 xfs_buf_t *
673 xfs_buf_read_map(
674 struct xfs_buftarg *target,
675 struct xfs_buf_map *map,
676 int nmaps,
677 xfs_buf_flags_t flags,
678 const struct xfs_buf_ops *ops)
679 {
680 struct xfs_buf *bp;
681
682 flags |= XBF_READ;
683
684 bp = xfs_buf_get_map(target, map, nmaps, flags);
685 if (bp) {
686 trace_xfs_buf_read(bp, flags, _RET_IP_);
687
688 if (!XFS_BUF_ISDONE(bp)) {
689 XFS_STATS_INC(xb_get_read);
690 bp->b_ops = ops;
691 _xfs_buf_read(bp, flags);
692 } else if (flags & XBF_ASYNC) {
693 /*
694 * Read ahead call which is already satisfied,
695 * drop the buffer
696 */
697 xfs_buf_relse(bp);
698 return NULL;
699 } else {
700 /* We do not want read in the flags */
701 bp->b_flags &= ~XBF_READ;
702 }
703 }
704
705 return bp;
706 }
707
708 /*
709 * If we are not low on memory then do the readahead in a deadlock
710 * safe manner.
711 */
712 void
713 xfs_buf_readahead_map(
714 struct xfs_buftarg *target,
715 struct xfs_buf_map *map,
716 int nmaps,
717 const struct xfs_buf_ops *ops)
718 {
719 if (bdi_read_congested(target->bt_bdi))
720 return;
721
722 xfs_buf_read_map(target, map, nmaps,
723 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
724 }
725
726 /*
727 * Read an uncached buffer from disk. Allocates and returns a locked
728 * buffer containing the disk contents or nothing.
729 */
730 struct xfs_buf *
731 xfs_buf_read_uncached(
732 struct xfs_buftarg *target,
733 xfs_daddr_t daddr,
734 size_t numblks,
735 int flags,
736 const struct xfs_buf_ops *ops)
737 {
738 struct xfs_buf *bp;
739
740 bp = xfs_buf_get_uncached(target, numblks, flags);
741 if (!bp)
742 return NULL;
743
744 /* set up the buffer for a read IO */
745 ASSERT(bp->b_map_count == 1);
746 bp->b_bn = daddr;
747 bp->b_maps[0].bm_bn = daddr;
748 bp->b_flags |= XBF_READ;
749 bp->b_ops = ops;
750
751 xfsbdstrat(target->bt_mount, bp);
752 xfs_buf_iowait(bp);
753 return bp;
754 }
755
756 /*
757 * Return a buffer allocated as an empty buffer and associated to external
758 * memory via xfs_buf_associate_memory() back to it's empty state.
759 */
760 void
761 xfs_buf_set_empty(
762 struct xfs_buf *bp,
763 size_t numblks)
764 {
765 if (bp->b_pages)
766 _xfs_buf_free_pages(bp);
767
768 bp->b_pages = NULL;
769 bp->b_page_count = 0;
770 bp->b_addr = NULL;
771 bp->b_length = numblks;
772 bp->b_io_length = numblks;
773
774 ASSERT(bp->b_map_count == 1);
775 bp->b_bn = XFS_BUF_DADDR_NULL;
776 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
777 bp->b_maps[0].bm_len = bp->b_length;
778 }
779
780 static inline struct page *
781 mem_to_page(
782 void *addr)
783 {
784 if ((!is_vmalloc_addr(addr))) {
785 return virt_to_page(addr);
786 } else {
787 return vmalloc_to_page(addr);
788 }
789 }
790
791 int
792 xfs_buf_associate_memory(
793 xfs_buf_t *bp,
794 void *mem,
795 size_t len)
796 {
797 int rval;
798 int i = 0;
799 unsigned long pageaddr;
800 unsigned long offset;
801 size_t buflen;
802 int page_count;
803
804 pageaddr = (unsigned long)mem & PAGE_MASK;
805 offset = (unsigned long)mem - pageaddr;
806 buflen = PAGE_ALIGN(len + offset);
807 page_count = buflen >> PAGE_SHIFT;
808
809 /* Free any previous set of page pointers */
810 if (bp->b_pages)
811 _xfs_buf_free_pages(bp);
812
813 bp->b_pages = NULL;
814 bp->b_addr = mem;
815
816 rval = _xfs_buf_get_pages(bp, page_count, 0);
817 if (rval)
818 return rval;
819
820 bp->b_offset = offset;
821
822 for (i = 0; i < bp->b_page_count; i++) {
823 bp->b_pages[i] = mem_to_page((void *)pageaddr);
824 pageaddr += PAGE_SIZE;
825 }
826
827 bp->b_io_length = BTOBB(len);
828 bp->b_length = BTOBB(buflen);
829
830 return 0;
831 }
832
833 xfs_buf_t *
834 xfs_buf_get_uncached(
835 struct xfs_buftarg *target,
836 size_t numblks,
837 int flags)
838 {
839 unsigned long page_count;
840 int error, i;
841 struct xfs_buf *bp;
842 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
843
844 bp = _xfs_buf_alloc(target, &map, 1, 0);
845 if (unlikely(bp == NULL))
846 goto fail;
847
848 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
849 error = _xfs_buf_get_pages(bp, page_count, 0);
850 if (error)
851 goto fail_free_buf;
852
853 for (i = 0; i < page_count; i++) {
854 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
855 if (!bp->b_pages[i])
856 goto fail_free_mem;
857 }
858 bp->b_flags |= _XBF_PAGES;
859
860 error = _xfs_buf_map_pages(bp, 0);
861 if (unlikely(error)) {
862 xfs_warn(target->bt_mount,
863 "%s: failed to map pages\n", __func__);
864 goto fail_free_mem;
865 }
866
867 trace_xfs_buf_get_uncached(bp, _RET_IP_);
868 return bp;
869
870 fail_free_mem:
871 while (--i >= 0)
872 __free_page(bp->b_pages[i]);
873 _xfs_buf_free_pages(bp);
874 fail_free_buf:
875 xfs_buf_free_maps(bp);
876 kmem_zone_free(xfs_buf_zone, bp);
877 fail:
878 return NULL;
879 }
880
881 /*
882 * Increment reference count on buffer, to hold the buffer concurrently
883 * with another thread which may release (free) the buffer asynchronously.
884 * Must hold the buffer already to call this function.
885 */
886 void
887 xfs_buf_hold(
888 xfs_buf_t *bp)
889 {
890 trace_xfs_buf_hold(bp, _RET_IP_);
891 atomic_inc(&bp->b_hold);
892 }
893
894 /*
895 * Releases a hold on the specified buffer. If the
896 * the hold count is 1, calls xfs_buf_free.
897 */
898 void
899 xfs_buf_rele(
900 xfs_buf_t *bp)
901 {
902 struct xfs_perag *pag = bp->b_pag;
903
904 trace_xfs_buf_rele(bp, _RET_IP_);
905
906 if (!pag) {
907 ASSERT(list_empty(&bp->b_lru));
908 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
909 if (atomic_dec_and_test(&bp->b_hold))
910 xfs_buf_free(bp);
911 return;
912 }
913
914 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
915
916 ASSERT(atomic_read(&bp->b_hold) > 0);
917 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
918 if (!(bp->b_flags & XBF_STALE) &&
919 atomic_read(&bp->b_lru_ref)) {
920 xfs_buf_lru_add(bp);
921 spin_unlock(&pag->pag_buf_lock);
922 } else {
923 xfs_buf_lru_del(bp);
924 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
925 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
926 spin_unlock(&pag->pag_buf_lock);
927 xfs_perag_put(pag);
928 xfs_buf_free(bp);
929 }
930 }
931 }
932
933
934 /*
935 * Lock a buffer object, if it is not already locked.
936 *
937 * If we come across a stale, pinned, locked buffer, we know that we are
938 * being asked to lock a buffer that has been reallocated. Because it is
939 * pinned, we know that the log has not been pushed to disk and hence it
940 * will still be locked. Rather than continuing to have trylock attempts
941 * fail until someone else pushes the log, push it ourselves before
942 * returning. This means that the xfsaild will not get stuck trying
943 * to push on stale inode buffers.
944 */
945 int
946 xfs_buf_trylock(
947 struct xfs_buf *bp)
948 {
949 int locked;
950
951 locked = down_trylock(&bp->b_sema) == 0;
952 if (locked)
953 XB_SET_OWNER(bp);
954
955 trace_xfs_buf_trylock(bp, _RET_IP_);
956 return locked;
957 }
958
959 /*
960 * Lock a buffer object.
961 *
962 * If we come across a stale, pinned, locked buffer, we know that we
963 * are being asked to lock a buffer that has been reallocated. Because
964 * it is pinned, we know that the log has not been pushed to disk and
965 * hence it will still be locked. Rather than sleeping until someone
966 * else pushes the log, push it ourselves before trying to get the lock.
967 */
968 void
969 xfs_buf_lock(
970 struct xfs_buf *bp)
971 {
972 trace_xfs_buf_lock(bp, _RET_IP_);
973
974 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
975 xfs_log_force(bp->b_target->bt_mount, 0);
976 down(&bp->b_sema);
977 XB_SET_OWNER(bp);
978
979 trace_xfs_buf_lock_done(bp, _RET_IP_);
980 }
981
982 void
983 xfs_buf_unlock(
984 struct xfs_buf *bp)
985 {
986 XB_CLEAR_OWNER(bp);
987 up(&bp->b_sema);
988
989 trace_xfs_buf_unlock(bp, _RET_IP_);
990 }
991
992 STATIC void
993 xfs_buf_wait_unpin(
994 xfs_buf_t *bp)
995 {
996 DECLARE_WAITQUEUE (wait, current);
997
998 if (atomic_read(&bp->b_pin_count) == 0)
999 return;
1000
1001 add_wait_queue(&bp->b_waiters, &wait);
1002 for (;;) {
1003 set_current_state(TASK_UNINTERRUPTIBLE);
1004 if (atomic_read(&bp->b_pin_count) == 0)
1005 break;
1006 io_schedule();
1007 }
1008 remove_wait_queue(&bp->b_waiters, &wait);
1009 set_current_state(TASK_RUNNING);
1010 }
1011
1012 /*
1013 * Buffer Utility Routines
1014 */
1015
1016 STATIC void
1017 xfs_buf_iodone_work(
1018 struct work_struct *work)
1019 {
1020 struct xfs_buf *bp =
1021 container_of(work, xfs_buf_t, b_iodone_work);
1022 bool read = !!(bp->b_flags & XBF_READ);
1023
1024 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1025 if (read && bp->b_ops)
1026 bp->b_ops->verify_read(bp);
1027
1028 if (bp->b_iodone)
1029 (*(bp->b_iodone))(bp);
1030 else if (bp->b_flags & XBF_ASYNC)
1031 xfs_buf_relse(bp);
1032 else {
1033 ASSERT(read && bp->b_ops);
1034 complete(&bp->b_iowait);
1035 }
1036 }
1037
1038 void
1039 xfs_buf_ioend(
1040 struct xfs_buf *bp,
1041 int schedule)
1042 {
1043 bool read = !!(bp->b_flags & XBF_READ);
1044
1045 trace_xfs_buf_iodone(bp, _RET_IP_);
1046
1047 if (bp->b_error == 0)
1048 bp->b_flags |= XBF_DONE;
1049
1050 if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) {
1051 if (schedule) {
1052 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1053 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1054 } else {
1055 xfs_buf_iodone_work(&bp->b_iodone_work);
1056 }
1057 } else {
1058 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1059 complete(&bp->b_iowait);
1060 }
1061 }
1062
1063 void
1064 xfs_buf_ioerror(
1065 xfs_buf_t *bp,
1066 int error)
1067 {
1068 ASSERT(error >= 0 && error <= 0xffff);
1069 bp->b_error = (unsigned short)error;
1070 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1071 }
1072
1073 void
1074 xfs_buf_ioerror_alert(
1075 struct xfs_buf *bp,
1076 const char *func)
1077 {
1078 xfs_alert(bp->b_target->bt_mount,
1079 "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
1080 (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
1081 }
1082
1083 /*
1084 * Called when we want to stop a buffer from getting written or read.
1085 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1086 * so that the proper iodone callbacks get called.
1087 */
1088 STATIC int
1089 xfs_bioerror(
1090 xfs_buf_t *bp)
1091 {
1092 #ifdef XFSERRORDEBUG
1093 ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1094 #endif
1095
1096 /*
1097 * No need to wait until the buffer is unpinned, we aren't flushing it.
1098 */
1099 xfs_buf_ioerror(bp, EIO);
1100
1101 /*
1102 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1103 */
1104 XFS_BUF_UNREAD(bp);
1105 XFS_BUF_UNDONE(bp);
1106 xfs_buf_stale(bp);
1107
1108 xfs_buf_ioend(bp, 0);
1109
1110 return EIO;
1111 }
1112
1113 /*
1114 * Same as xfs_bioerror, except that we are releasing the buffer
1115 * here ourselves, and avoiding the xfs_buf_ioend call.
1116 * This is meant for userdata errors; metadata bufs come with
1117 * iodone functions attached, so that we can track down errors.
1118 */
1119 STATIC int
1120 xfs_bioerror_relse(
1121 struct xfs_buf *bp)
1122 {
1123 int64_t fl = bp->b_flags;
1124 /*
1125 * No need to wait until the buffer is unpinned.
1126 * We aren't flushing it.
1127 *
1128 * chunkhold expects B_DONE to be set, whether
1129 * we actually finish the I/O or not. We don't want to
1130 * change that interface.
1131 */
1132 XFS_BUF_UNREAD(bp);
1133 XFS_BUF_DONE(bp);
1134 xfs_buf_stale(bp);
1135 bp->b_iodone = NULL;
1136 if (!(fl & XBF_ASYNC)) {
1137 /*
1138 * Mark b_error and B_ERROR _both_.
1139 * Lot's of chunkcache code assumes that.
1140 * There's no reason to mark error for
1141 * ASYNC buffers.
1142 */
1143 xfs_buf_ioerror(bp, EIO);
1144 complete(&bp->b_iowait);
1145 } else {
1146 xfs_buf_relse(bp);
1147 }
1148
1149 return EIO;
1150 }
1151
1152 STATIC int
1153 xfs_bdstrat_cb(
1154 struct xfs_buf *bp)
1155 {
1156 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1157 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1158 /*
1159 * Metadata write that didn't get logged but
1160 * written delayed anyway. These aren't associated
1161 * with a transaction, and can be ignored.
1162 */
1163 if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1164 return xfs_bioerror_relse(bp);
1165 else
1166 return xfs_bioerror(bp);
1167 }
1168
1169 xfs_buf_iorequest(bp);
1170 return 0;
1171 }
1172
1173 int
1174 xfs_bwrite(
1175 struct xfs_buf *bp)
1176 {
1177 int error;
1178
1179 ASSERT(xfs_buf_islocked(bp));
1180
1181 bp->b_flags |= XBF_WRITE;
1182 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
1183
1184 xfs_bdstrat_cb(bp);
1185
1186 error = xfs_buf_iowait(bp);
1187 if (error) {
1188 xfs_force_shutdown(bp->b_target->bt_mount,
1189 SHUTDOWN_META_IO_ERROR);
1190 }
1191 return error;
1192 }
1193
1194 /*
1195 * Wrapper around bdstrat so that we can stop data from going to disk in case
1196 * we are shutting down the filesystem. Typically user data goes thru this
1197 * path; one of the exceptions is the superblock.
1198 */
1199 void
1200 xfsbdstrat(
1201 struct xfs_mount *mp,
1202 struct xfs_buf *bp)
1203 {
1204 if (XFS_FORCED_SHUTDOWN(mp)) {
1205 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1206 xfs_bioerror_relse(bp);
1207 return;
1208 }
1209
1210 xfs_buf_iorequest(bp);
1211 }
1212
1213 STATIC void
1214 _xfs_buf_ioend(
1215 xfs_buf_t *bp,
1216 int schedule)
1217 {
1218 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1219 xfs_buf_ioend(bp, schedule);
1220 }
1221
1222 STATIC void
1223 xfs_buf_bio_end_io(
1224 struct bio *bio,
1225 int error)
1226 {
1227 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1228
1229 /*
1230 * don't overwrite existing errors - otherwise we can lose errors on
1231 * buffers that require multiple bios to complete.
1232 */
1233 if (!bp->b_error)
1234 xfs_buf_ioerror(bp, -error);
1235
1236 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1237 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1238
1239 _xfs_buf_ioend(bp, 1);
1240 bio_put(bio);
1241 }
1242
1243 static void
1244 xfs_buf_ioapply_map(
1245 struct xfs_buf *bp,
1246 int map,
1247 int *buf_offset,
1248 int *count,
1249 int rw)
1250 {
1251 int page_index;
1252 int total_nr_pages = bp->b_page_count;
1253 int nr_pages;
1254 struct bio *bio;
1255 sector_t sector = bp->b_maps[map].bm_bn;
1256 int size;
1257 int offset;
1258
1259 total_nr_pages = bp->b_page_count;
1260
1261 /* skip the pages in the buffer before the start offset */
1262 page_index = 0;
1263 offset = *buf_offset;
1264 while (offset >= PAGE_SIZE) {
1265 page_index++;
1266 offset -= PAGE_SIZE;
1267 }
1268
1269 /*
1270 * Limit the IO size to the length of the current vector, and update the
1271 * remaining IO count for the next time around.
1272 */
1273 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1274 *count -= size;
1275 *buf_offset += size;
1276
1277 next_chunk:
1278 atomic_inc(&bp->b_io_remaining);
1279 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1280 if (nr_pages > total_nr_pages)
1281 nr_pages = total_nr_pages;
1282
1283 bio = bio_alloc(GFP_NOIO, nr_pages);
1284 bio->bi_bdev = bp->b_target->bt_bdev;
1285 bio->bi_sector = sector;
1286 bio->bi_end_io = xfs_buf_bio_end_io;
1287 bio->bi_private = bp;
1288
1289
1290 for (; size && nr_pages; nr_pages--, page_index++) {
1291 int rbytes, nbytes = PAGE_SIZE - offset;
1292
1293 if (nbytes > size)
1294 nbytes = size;
1295
1296 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1297 offset);
1298 if (rbytes < nbytes)
1299 break;
1300
1301 offset = 0;
1302 sector += BTOBB(nbytes);
1303 size -= nbytes;
1304 total_nr_pages--;
1305 }
1306
1307 if (likely(bio->bi_size)) {
1308 if (xfs_buf_is_vmapped(bp)) {
1309 flush_kernel_vmap_range(bp->b_addr,
1310 xfs_buf_vmap_len(bp));
1311 }
1312 submit_bio(rw, bio);
1313 if (size)
1314 goto next_chunk;
1315 } else {
1316 /*
1317 * This is guaranteed not to be the last io reference count
1318 * because the caller (xfs_buf_iorequest) holds a count itself.
1319 */
1320 atomic_dec(&bp->b_io_remaining);
1321 xfs_buf_ioerror(bp, EIO);
1322 bio_put(bio);
1323 }
1324
1325 }
1326
1327 STATIC void
1328 _xfs_buf_ioapply(
1329 struct xfs_buf *bp)
1330 {
1331 struct blk_plug plug;
1332 int rw;
1333 int offset;
1334 int size;
1335 int i;
1336
1337 /*
1338 * Make sure we capture only current IO errors rather than stale errors
1339 * left over from previous use of the buffer (e.g. failed readahead).
1340 */
1341 bp->b_error = 0;
1342
1343 if (bp->b_flags & XBF_WRITE) {
1344 if (bp->b_flags & XBF_SYNCIO)
1345 rw = WRITE_SYNC;
1346 else
1347 rw = WRITE;
1348 if (bp->b_flags & XBF_FUA)
1349 rw |= REQ_FUA;
1350 if (bp->b_flags & XBF_FLUSH)
1351 rw |= REQ_FLUSH;
1352
1353 /*
1354 * Run the write verifier callback function if it exists. If
1355 * this function fails it will mark the buffer with an error and
1356 * the IO should not be dispatched.
1357 */
1358 if (bp->b_ops) {
1359 bp->b_ops->verify_write(bp);
1360 if (bp->b_error) {
1361 xfs_force_shutdown(bp->b_target->bt_mount,
1362 SHUTDOWN_CORRUPT_INCORE);
1363 return;
1364 }
1365 }
1366 } else if (bp->b_flags & XBF_READ_AHEAD) {
1367 rw = READA;
1368 } else {
1369 rw = READ;
1370 }
1371
1372 /* we only use the buffer cache for meta-data */
1373 rw |= REQ_META;
1374
1375 /*
1376 * Walk all the vectors issuing IO on them. Set up the initial offset
1377 * into the buffer and the desired IO size before we start -
1378 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1379 * subsequent call.
1380 */
1381 offset = bp->b_offset;
1382 size = BBTOB(bp->b_io_length);
1383 blk_start_plug(&plug);
1384 for (i = 0; i < bp->b_map_count; i++) {
1385 xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
1386 if (bp->b_error)
1387 break;
1388 if (size <= 0)
1389 break; /* all done */
1390 }
1391 blk_finish_plug(&plug);
1392 }
1393
1394 void
1395 xfs_buf_iorequest(
1396 xfs_buf_t *bp)
1397 {
1398 trace_xfs_buf_iorequest(bp, _RET_IP_);
1399
1400 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1401
1402 if (bp->b_flags & XBF_WRITE)
1403 xfs_buf_wait_unpin(bp);
1404 xfs_buf_hold(bp);
1405
1406 /* Set the count to 1 initially, this will stop an I/O
1407 * completion callout which happens before we have started
1408 * all the I/O from calling xfs_buf_ioend too early.
1409 */
1410 atomic_set(&bp->b_io_remaining, 1);
1411 _xfs_buf_ioapply(bp);
1412 _xfs_buf_ioend(bp, 1);
1413
1414 xfs_buf_rele(bp);
1415 }
1416
1417 /*
1418 * Waits for I/O to complete on the buffer supplied. It returns immediately if
1419 * no I/O is pending or there is already a pending error on the buffer. It
1420 * returns the I/O error code, if any, or 0 if there was no error.
1421 */
1422 int
1423 xfs_buf_iowait(
1424 xfs_buf_t *bp)
1425 {
1426 trace_xfs_buf_iowait(bp, _RET_IP_);
1427
1428 if (!bp->b_error)
1429 wait_for_completion(&bp->b_iowait);
1430
1431 trace_xfs_buf_iowait_done(bp, _RET_IP_);
1432 return bp->b_error;
1433 }
1434
1435 xfs_caddr_t
1436 xfs_buf_offset(
1437 xfs_buf_t *bp,
1438 size_t offset)
1439 {
1440 struct page *page;
1441
1442 if (bp->b_addr)
1443 return bp->b_addr + offset;
1444
1445 offset += bp->b_offset;
1446 page = bp->b_pages[offset >> PAGE_SHIFT];
1447 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1448 }
1449
1450 /*
1451 * Move data into or out of a buffer.
1452 */
1453 void
1454 xfs_buf_iomove(
1455 xfs_buf_t *bp, /* buffer to process */
1456 size_t boff, /* starting buffer offset */
1457 size_t bsize, /* length to copy */
1458 void *data, /* data address */
1459 xfs_buf_rw_t mode) /* read/write/zero flag */
1460 {
1461 size_t bend;
1462
1463 bend = boff + bsize;
1464 while (boff < bend) {
1465 struct page *page;
1466 int page_index, page_offset, csize;
1467
1468 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1469 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1470 page = bp->b_pages[page_index];
1471 csize = min_t(size_t, PAGE_SIZE - page_offset,
1472 BBTOB(bp->b_io_length) - boff);
1473
1474 ASSERT((csize + page_offset) <= PAGE_SIZE);
1475
1476 switch (mode) {
1477 case XBRW_ZERO:
1478 memset(page_address(page) + page_offset, 0, csize);
1479 break;
1480 case XBRW_READ:
1481 memcpy(data, page_address(page) + page_offset, csize);
1482 break;
1483 case XBRW_WRITE:
1484 memcpy(page_address(page) + page_offset, data, csize);
1485 }
1486
1487 boff += csize;
1488 data += csize;
1489 }
1490 }
1491
1492 /*
1493 * Handling of buffer targets (buftargs).
1494 */
1495
1496 /*
1497 * Wait for any bufs with callbacks that have been submitted but have not yet
1498 * returned. These buffers will have an elevated hold count, so wait on those
1499 * while freeing all the buffers only held by the LRU.
1500 */
1501 void
1502 xfs_wait_buftarg(
1503 struct xfs_buftarg *btp)
1504 {
1505 struct xfs_buf *bp;
1506
1507 restart:
1508 spin_lock(&btp->bt_lru_lock);
1509 while (!list_empty(&btp->bt_lru)) {
1510 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1511 if (atomic_read(&bp->b_hold) > 1) {
1512 trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1513 list_move_tail(&bp->b_lru, &btp->bt_lru);
1514 spin_unlock(&btp->bt_lru_lock);
1515 delay(100);
1516 goto restart;
1517 }
1518 /*
1519 * clear the LRU reference count so the buffer doesn't get
1520 * ignored in xfs_buf_rele().
1521 */
1522 atomic_set(&bp->b_lru_ref, 0);
1523 spin_unlock(&btp->bt_lru_lock);
1524 xfs_buf_rele(bp);
1525 spin_lock(&btp->bt_lru_lock);
1526 }
1527 spin_unlock(&btp->bt_lru_lock);
1528 }
1529
1530 int
1531 xfs_buftarg_shrink(
1532 struct shrinker *shrink,
1533 struct shrink_control *sc)
1534 {
1535 struct xfs_buftarg *btp = container_of(shrink,
1536 struct xfs_buftarg, bt_shrinker);
1537 struct xfs_buf *bp;
1538 int nr_to_scan = sc->nr_to_scan;
1539 LIST_HEAD(dispose);
1540
1541 if (!nr_to_scan)
1542 return btp->bt_lru_nr;
1543
1544 spin_lock(&btp->bt_lru_lock);
1545 while (!list_empty(&btp->bt_lru)) {
1546 if (nr_to_scan-- <= 0)
1547 break;
1548
1549 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1550
1551 /*
1552 * Decrement the b_lru_ref count unless the value is already
1553 * zero. If the value is already zero, we need to reclaim the
1554 * buffer, otherwise it gets another trip through the LRU.
1555 */
1556 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1557 list_move_tail(&bp->b_lru, &btp->bt_lru);
1558 continue;
1559 }
1560
1561 /*
1562 * remove the buffer from the LRU now to avoid needing another
1563 * lock round trip inside xfs_buf_rele().
1564 */
1565 list_move(&bp->b_lru, &dispose);
1566 btp->bt_lru_nr--;
1567 bp->b_lru_flags |= _XBF_LRU_DISPOSE;
1568 }
1569 spin_unlock(&btp->bt_lru_lock);
1570
1571 while (!list_empty(&dispose)) {
1572 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1573 list_del_init(&bp->b_lru);
1574 xfs_buf_rele(bp);
1575 }
1576
1577 return btp->bt_lru_nr;
1578 }
1579
1580 void
1581 xfs_free_buftarg(
1582 struct xfs_mount *mp,
1583 struct xfs_buftarg *btp)
1584 {
1585 unregister_shrinker(&btp->bt_shrinker);
1586
1587 if (mp->m_flags & XFS_MOUNT_BARRIER)
1588 xfs_blkdev_issue_flush(btp);
1589
1590 kmem_free(btp);
1591 }
1592
1593 STATIC int
1594 xfs_setsize_buftarg_flags(
1595 xfs_buftarg_t *btp,
1596 unsigned int blocksize,
1597 unsigned int sectorsize,
1598 int verbose)
1599 {
1600 btp->bt_bsize = blocksize;
1601 btp->bt_sshift = ffs(sectorsize) - 1;
1602 btp->bt_smask = sectorsize - 1;
1603
1604 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1605 char name[BDEVNAME_SIZE];
1606
1607 bdevname(btp->bt_bdev, name);
1608
1609 xfs_warn(btp->bt_mount,
1610 "Cannot set_blocksize to %u on device %s\n",
1611 sectorsize, name);
1612 return EINVAL;
1613 }
1614
1615 return 0;
1616 }
1617
1618 /*
1619 * When allocating the initial buffer target we have not yet
1620 * read in the superblock, so don't know what sized sectors
1621 * are being used is at this early stage. Play safe.
1622 */
1623 STATIC int
1624 xfs_setsize_buftarg_early(
1625 xfs_buftarg_t *btp,
1626 struct block_device *bdev)
1627 {
1628 return xfs_setsize_buftarg_flags(btp,
1629 PAGE_SIZE, bdev_logical_block_size(bdev), 0);
1630 }
1631
1632 int
1633 xfs_setsize_buftarg(
1634 xfs_buftarg_t *btp,
1635 unsigned int blocksize,
1636 unsigned int sectorsize)
1637 {
1638 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1639 }
1640
1641 xfs_buftarg_t *
1642 xfs_alloc_buftarg(
1643 struct xfs_mount *mp,
1644 struct block_device *bdev,
1645 int external,
1646 const char *fsname)
1647 {
1648 xfs_buftarg_t *btp;
1649
1650 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1651
1652 btp->bt_mount = mp;
1653 btp->bt_dev = bdev->bd_dev;
1654 btp->bt_bdev = bdev;
1655 btp->bt_bdi = blk_get_backing_dev_info(bdev);
1656 if (!btp->bt_bdi)
1657 goto error;
1658
1659 INIT_LIST_HEAD(&btp->bt_lru);
1660 spin_lock_init(&btp->bt_lru_lock);
1661 if (xfs_setsize_buftarg_early(btp, bdev))
1662 goto error;
1663 btp->bt_shrinker.shrink = xfs_buftarg_shrink;
1664 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1665 register_shrinker(&btp->bt_shrinker);
1666 return btp;
1667
1668 error:
1669 kmem_free(btp);
1670 return NULL;
1671 }
1672
1673 /*
1674 * Add a buffer to the delayed write list.
1675 *
1676 * This queues a buffer for writeout if it hasn't already been. Note that
1677 * neither this routine nor the buffer list submission functions perform
1678 * any internal synchronization. It is expected that the lists are thread-local
1679 * to the callers.
1680 *
1681 * Returns true if we queued up the buffer, or false if it already had
1682 * been on the buffer list.
1683 */
1684 bool
1685 xfs_buf_delwri_queue(
1686 struct xfs_buf *bp,
1687 struct list_head *list)
1688 {
1689 ASSERT(xfs_buf_islocked(bp));
1690 ASSERT(!(bp->b_flags & XBF_READ));
1691
1692 /*
1693 * If the buffer is already marked delwri it already is queued up
1694 * by someone else for imediate writeout. Just ignore it in that
1695 * case.
1696 */
1697 if (bp->b_flags & _XBF_DELWRI_Q) {
1698 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1699 return false;
1700 }
1701
1702 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1703
1704 /*
1705 * If a buffer gets written out synchronously or marked stale while it
1706 * is on a delwri list we lazily remove it. To do this, the other party
1707 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1708 * It remains referenced and on the list. In a rare corner case it
1709 * might get readded to a delwri list after the synchronous writeout, in
1710 * which case we need just need to re-add the flag here.
1711 */
1712 bp->b_flags |= _XBF_DELWRI_Q;
1713 if (list_empty(&bp->b_list)) {
1714 atomic_inc(&bp->b_hold);
1715 list_add_tail(&bp->b_list, list);
1716 }
1717
1718 return true;
1719 }
1720
1721 /*
1722 * Compare function is more complex than it needs to be because
1723 * the return value is only 32 bits and we are doing comparisons
1724 * on 64 bit values
1725 */
1726 static int
1727 xfs_buf_cmp(
1728 void *priv,
1729 struct list_head *a,
1730 struct list_head *b)
1731 {
1732 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
1733 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
1734 xfs_daddr_t diff;
1735
1736 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
1737 if (diff < 0)
1738 return -1;
1739 if (diff > 0)
1740 return 1;
1741 return 0;
1742 }
1743
1744 static int
1745 __xfs_buf_delwri_submit(
1746 struct list_head *buffer_list,
1747 struct list_head *io_list,
1748 bool wait)
1749 {
1750 struct blk_plug plug;
1751 struct xfs_buf *bp, *n;
1752 int pinned = 0;
1753
1754 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1755 if (!wait) {
1756 if (xfs_buf_ispinned(bp)) {
1757 pinned++;
1758 continue;
1759 }
1760 if (!xfs_buf_trylock(bp))
1761 continue;
1762 } else {
1763 xfs_buf_lock(bp);
1764 }
1765
1766 /*
1767 * Someone else might have written the buffer synchronously or
1768 * marked it stale in the meantime. In that case only the
1769 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1770 * reference and remove it from the list here.
1771 */
1772 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1773 list_del_init(&bp->b_list);
1774 xfs_buf_relse(bp);
1775 continue;
1776 }
1777
1778 list_move_tail(&bp->b_list, io_list);
1779 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1780 }
1781
1782 list_sort(NULL, io_list, xfs_buf_cmp);
1783
1784 blk_start_plug(&plug);
1785 list_for_each_entry_safe(bp, n, io_list, b_list) {
1786 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
1787 bp->b_flags |= XBF_WRITE;
1788
1789 if (!wait) {
1790 bp->b_flags |= XBF_ASYNC;
1791 list_del_init(&bp->b_list);
1792 }
1793 xfs_bdstrat_cb(bp);
1794 }
1795 blk_finish_plug(&plug);
1796
1797 return pinned;
1798 }
1799
1800 /*
1801 * Write out a buffer list asynchronously.
1802 *
1803 * This will take the @buffer_list, write all non-locked and non-pinned buffers
1804 * out and not wait for I/O completion on any of the buffers. This interface
1805 * is only safely useable for callers that can track I/O completion by higher
1806 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1807 * function.
1808 */
1809 int
1810 xfs_buf_delwri_submit_nowait(
1811 struct list_head *buffer_list)
1812 {
1813 LIST_HEAD (io_list);
1814 return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1815 }
1816
1817 /*
1818 * Write out a buffer list synchronously.
1819 *
1820 * This will take the @buffer_list, write all buffers out and wait for I/O
1821 * completion on all of the buffers. @buffer_list is consumed by the function,
1822 * so callers must have some other way of tracking buffers if they require such
1823 * functionality.
1824 */
1825 int
1826 xfs_buf_delwri_submit(
1827 struct list_head *buffer_list)
1828 {
1829 LIST_HEAD (io_list);
1830 int error = 0, error2;
1831 struct xfs_buf *bp;
1832
1833 __xfs_buf_delwri_submit(buffer_list, &io_list, true);
1834
1835 /* Wait for IO to complete. */
1836 while (!list_empty(&io_list)) {
1837 bp = list_first_entry(&io_list, struct xfs_buf, b_list);
1838
1839 list_del_init(&bp->b_list);
1840 error2 = xfs_buf_iowait(bp);
1841 xfs_buf_relse(bp);
1842 if (!error)
1843 error = error2;
1844 }
1845
1846 return error;
1847 }
1848
1849 int __init
1850 xfs_buf_init(void)
1851 {
1852 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1853 KM_ZONE_HWALIGN, NULL);
1854 if (!xfs_buf_zone)
1855 goto out;
1856
1857 xfslogd_workqueue = alloc_workqueue("xfslogd",
1858 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1859 if (!xfslogd_workqueue)
1860 goto out_free_buf_zone;
1861
1862 return 0;
1863
1864 out_free_buf_zone:
1865 kmem_zone_destroy(xfs_buf_zone);
1866 out:
1867 return -ENOMEM;
1868 }
1869
1870 void
1871 xfs_buf_terminate(void)
1872 {
1873 destroy_workqueue(xfslogd_workqueue);
1874 kmem_zone_destroy(xfs_buf_zone);
1875 }