block: Abstract out bvec iterator
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / fs / xfs / xfs_buf.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/gfp.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36
37 #include "xfs_log_format.h"
38 #include "xfs_trans_resv.h"
39 #include "xfs_sb.h"
40 #include "xfs_ag.h"
41 #include "xfs_mount.h"
42 #include "xfs_trace.h"
43 #include "xfs_log.h"
44
45 static kmem_zone_t *xfs_buf_zone;
46
47 static struct workqueue_struct *xfslogd_workqueue;
48
49 #ifdef XFS_BUF_LOCK_TRACKING
50 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
51 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
52 # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
53 #else
54 # define XB_SET_OWNER(bp) do { } while (0)
55 # define XB_CLEAR_OWNER(bp) do { } while (0)
56 # define XB_GET_OWNER(bp) do { } while (0)
57 #endif
58
59 #define xb_to_gfp(flags) \
60 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
61
62
63 static inline int
64 xfs_buf_is_vmapped(
65 struct xfs_buf *bp)
66 {
67 /*
68 * Return true if the buffer is vmapped.
69 *
70 * b_addr is null if the buffer is not mapped, but the code is clever
71 * enough to know it doesn't have to map a single page, so the check has
72 * to be both for b_addr and bp->b_page_count > 1.
73 */
74 return bp->b_addr && bp->b_page_count > 1;
75 }
76
77 static inline int
78 xfs_buf_vmap_len(
79 struct xfs_buf *bp)
80 {
81 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
82 }
83
84 /*
85 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
86 * b_lru_ref count so that the buffer is freed immediately when the buffer
87 * reference count falls to zero. If the buffer is already on the LRU, we need
88 * to remove the reference that LRU holds on the buffer.
89 *
90 * This prevents build-up of stale buffers on the LRU.
91 */
92 void
93 xfs_buf_stale(
94 struct xfs_buf *bp)
95 {
96 ASSERT(xfs_buf_islocked(bp));
97
98 bp->b_flags |= XBF_STALE;
99
100 /*
101 * Clear the delwri status so that a delwri queue walker will not
102 * flush this buffer to disk now that it is stale. The delwri queue has
103 * a reference to the buffer, so this is safe to do.
104 */
105 bp->b_flags &= ~_XBF_DELWRI_Q;
106
107 spin_lock(&bp->b_lock);
108 atomic_set(&bp->b_lru_ref, 0);
109 if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
110 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
111 atomic_dec(&bp->b_hold);
112
113 ASSERT(atomic_read(&bp->b_hold) >= 1);
114 spin_unlock(&bp->b_lock);
115 }
116
117 static int
118 xfs_buf_get_maps(
119 struct xfs_buf *bp,
120 int map_count)
121 {
122 ASSERT(bp->b_maps == NULL);
123 bp->b_map_count = map_count;
124
125 if (map_count == 1) {
126 bp->b_maps = &bp->__b_map;
127 return 0;
128 }
129
130 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
131 KM_NOFS);
132 if (!bp->b_maps)
133 return ENOMEM;
134 return 0;
135 }
136
137 /*
138 * Frees b_pages if it was allocated.
139 */
140 static void
141 xfs_buf_free_maps(
142 struct xfs_buf *bp)
143 {
144 if (bp->b_maps != &bp->__b_map) {
145 kmem_free(bp->b_maps);
146 bp->b_maps = NULL;
147 }
148 }
149
150 struct xfs_buf *
151 _xfs_buf_alloc(
152 struct xfs_buftarg *target,
153 struct xfs_buf_map *map,
154 int nmaps,
155 xfs_buf_flags_t flags)
156 {
157 struct xfs_buf *bp;
158 int error;
159 int i;
160
161 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
162 if (unlikely(!bp))
163 return NULL;
164
165 /*
166 * We don't want certain flags to appear in b_flags unless they are
167 * specifically set by later operations on the buffer.
168 */
169 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
170
171 atomic_set(&bp->b_hold, 1);
172 atomic_set(&bp->b_lru_ref, 1);
173 init_completion(&bp->b_iowait);
174 INIT_LIST_HEAD(&bp->b_lru);
175 INIT_LIST_HEAD(&bp->b_list);
176 RB_CLEAR_NODE(&bp->b_rbnode);
177 sema_init(&bp->b_sema, 0); /* held, no waiters */
178 spin_lock_init(&bp->b_lock);
179 XB_SET_OWNER(bp);
180 bp->b_target = target;
181 bp->b_flags = flags;
182
183 /*
184 * Set length and io_length to the same value initially.
185 * I/O routines should use io_length, which will be the same in
186 * most cases but may be reset (e.g. XFS recovery).
187 */
188 error = xfs_buf_get_maps(bp, nmaps);
189 if (error) {
190 kmem_zone_free(xfs_buf_zone, bp);
191 return NULL;
192 }
193
194 bp->b_bn = map[0].bm_bn;
195 bp->b_length = 0;
196 for (i = 0; i < nmaps; i++) {
197 bp->b_maps[i].bm_bn = map[i].bm_bn;
198 bp->b_maps[i].bm_len = map[i].bm_len;
199 bp->b_length += map[i].bm_len;
200 }
201 bp->b_io_length = bp->b_length;
202
203 atomic_set(&bp->b_pin_count, 0);
204 init_waitqueue_head(&bp->b_waiters);
205
206 XFS_STATS_INC(xb_create);
207 trace_xfs_buf_init(bp, _RET_IP_);
208
209 return bp;
210 }
211
212 /*
213 * Allocate a page array capable of holding a specified number
214 * of pages, and point the page buf at it.
215 */
216 STATIC int
217 _xfs_buf_get_pages(
218 xfs_buf_t *bp,
219 int page_count,
220 xfs_buf_flags_t flags)
221 {
222 /* Make sure that we have a page list */
223 if (bp->b_pages == NULL) {
224 bp->b_page_count = page_count;
225 if (page_count <= XB_PAGES) {
226 bp->b_pages = bp->b_page_array;
227 } else {
228 bp->b_pages = kmem_alloc(sizeof(struct page *) *
229 page_count, KM_NOFS);
230 if (bp->b_pages == NULL)
231 return -ENOMEM;
232 }
233 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
234 }
235 return 0;
236 }
237
238 /*
239 * Frees b_pages if it was allocated.
240 */
241 STATIC void
242 _xfs_buf_free_pages(
243 xfs_buf_t *bp)
244 {
245 if (bp->b_pages != bp->b_page_array) {
246 kmem_free(bp->b_pages);
247 bp->b_pages = NULL;
248 }
249 }
250
251 /*
252 * Releases the specified buffer.
253 *
254 * The modification state of any associated pages is left unchanged.
255 * The buffer must not be on any hash - use xfs_buf_rele instead for
256 * hashed and refcounted buffers
257 */
258 void
259 xfs_buf_free(
260 xfs_buf_t *bp)
261 {
262 trace_xfs_buf_free(bp, _RET_IP_);
263
264 ASSERT(list_empty(&bp->b_lru));
265
266 if (bp->b_flags & _XBF_PAGES) {
267 uint i;
268
269 if (xfs_buf_is_vmapped(bp))
270 vm_unmap_ram(bp->b_addr - bp->b_offset,
271 bp->b_page_count);
272
273 for (i = 0; i < bp->b_page_count; i++) {
274 struct page *page = bp->b_pages[i];
275
276 __free_page(page);
277 }
278 } else if (bp->b_flags & _XBF_KMEM)
279 kmem_free(bp->b_addr);
280 _xfs_buf_free_pages(bp);
281 xfs_buf_free_maps(bp);
282 kmem_zone_free(xfs_buf_zone, bp);
283 }
284
285 /*
286 * Allocates all the pages for buffer in question and builds it's page list.
287 */
288 STATIC int
289 xfs_buf_allocate_memory(
290 xfs_buf_t *bp,
291 uint flags)
292 {
293 size_t size;
294 size_t nbytes, offset;
295 gfp_t gfp_mask = xb_to_gfp(flags);
296 unsigned short page_count, i;
297 xfs_off_t start, end;
298 int error;
299
300 /*
301 * for buffers that are contained within a single page, just allocate
302 * the memory from the heap - there's no need for the complexity of
303 * page arrays to keep allocation down to order 0.
304 */
305 size = BBTOB(bp->b_length);
306 if (size < PAGE_SIZE) {
307 bp->b_addr = kmem_alloc(size, KM_NOFS);
308 if (!bp->b_addr) {
309 /* low memory - use alloc_page loop instead */
310 goto use_alloc_page;
311 }
312
313 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
314 ((unsigned long)bp->b_addr & PAGE_MASK)) {
315 /* b_addr spans two pages - use alloc_page instead */
316 kmem_free(bp->b_addr);
317 bp->b_addr = NULL;
318 goto use_alloc_page;
319 }
320 bp->b_offset = offset_in_page(bp->b_addr);
321 bp->b_pages = bp->b_page_array;
322 bp->b_pages[0] = virt_to_page(bp->b_addr);
323 bp->b_page_count = 1;
324 bp->b_flags |= _XBF_KMEM;
325 return 0;
326 }
327
328 use_alloc_page:
329 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
330 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
331 >> PAGE_SHIFT;
332 page_count = end - start;
333 error = _xfs_buf_get_pages(bp, page_count, flags);
334 if (unlikely(error))
335 return error;
336
337 offset = bp->b_offset;
338 bp->b_flags |= _XBF_PAGES;
339
340 for (i = 0; i < bp->b_page_count; i++) {
341 struct page *page;
342 uint retries = 0;
343 retry:
344 page = alloc_page(gfp_mask);
345 if (unlikely(page == NULL)) {
346 if (flags & XBF_READ_AHEAD) {
347 bp->b_page_count = i;
348 error = ENOMEM;
349 goto out_free_pages;
350 }
351
352 /*
353 * This could deadlock.
354 *
355 * But until all the XFS lowlevel code is revamped to
356 * handle buffer allocation failures we can't do much.
357 */
358 if (!(++retries % 100))
359 xfs_err(NULL,
360 "possible memory allocation deadlock in %s (mode:0x%x)",
361 __func__, gfp_mask);
362
363 XFS_STATS_INC(xb_page_retries);
364 congestion_wait(BLK_RW_ASYNC, HZ/50);
365 goto retry;
366 }
367
368 XFS_STATS_INC(xb_page_found);
369
370 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
371 size -= nbytes;
372 bp->b_pages[i] = page;
373 offset = 0;
374 }
375 return 0;
376
377 out_free_pages:
378 for (i = 0; i < bp->b_page_count; i++)
379 __free_page(bp->b_pages[i]);
380 return error;
381 }
382
383 /*
384 * Map buffer into kernel address-space if necessary.
385 */
386 STATIC int
387 _xfs_buf_map_pages(
388 xfs_buf_t *bp,
389 uint flags)
390 {
391 ASSERT(bp->b_flags & _XBF_PAGES);
392 if (bp->b_page_count == 1) {
393 /* A single page buffer is always mappable */
394 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
395 } else if (flags & XBF_UNMAPPED) {
396 bp->b_addr = NULL;
397 } else {
398 int retried = 0;
399
400 do {
401 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
402 -1, PAGE_KERNEL);
403 if (bp->b_addr)
404 break;
405 vm_unmap_aliases();
406 } while (retried++ <= 1);
407
408 if (!bp->b_addr)
409 return -ENOMEM;
410 bp->b_addr += bp->b_offset;
411 }
412
413 return 0;
414 }
415
416 /*
417 * Finding and Reading Buffers
418 */
419
420 /*
421 * Look up, and creates if absent, a lockable buffer for
422 * a given range of an inode. The buffer is returned
423 * locked. No I/O is implied by this call.
424 */
425 xfs_buf_t *
426 _xfs_buf_find(
427 struct xfs_buftarg *btp,
428 struct xfs_buf_map *map,
429 int nmaps,
430 xfs_buf_flags_t flags,
431 xfs_buf_t *new_bp)
432 {
433 size_t numbytes;
434 struct xfs_perag *pag;
435 struct rb_node **rbp;
436 struct rb_node *parent;
437 xfs_buf_t *bp;
438 xfs_daddr_t blkno = map[0].bm_bn;
439 xfs_daddr_t eofs;
440 int numblks = 0;
441 int i;
442
443 for (i = 0; i < nmaps; i++)
444 numblks += map[i].bm_len;
445 numbytes = BBTOB(numblks);
446
447 /* Check for IOs smaller than the sector size / not sector aligned */
448 ASSERT(!(numbytes < (1 << btp->bt_sshift)));
449 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
450
451 /*
452 * Corrupted block numbers can get through to here, unfortunately, so we
453 * have to check that the buffer falls within the filesystem bounds.
454 */
455 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
456 if (blkno >= eofs) {
457 /*
458 * XXX (dgc): we should really be returning EFSCORRUPTED here,
459 * but none of the higher level infrastructure supports
460 * returning a specific error on buffer lookup failures.
461 */
462 xfs_alert(btp->bt_mount,
463 "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
464 __func__, blkno, eofs);
465 WARN_ON(1);
466 return NULL;
467 }
468
469 /* get tree root */
470 pag = xfs_perag_get(btp->bt_mount,
471 xfs_daddr_to_agno(btp->bt_mount, blkno));
472
473 /* walk tree */
474 spin_lock(&pag->pag_buf_lock);
475 rbp = &pag->pag_buf_tree.rb_node;
476 parent = NULL;
477 bp = NULL;
478 while (*rbp) {
479 parent = *rbp;
480 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
481
482 if (blkno < bp->b_bn)
483 rbp = &(*rbp)->rb_left;
484 else if (blkno > bp->b_bn)
485 rbp = &(*rbp)->rb_right;
486 else {
487 /*
488 * found a block number match. If the range doesn't
489 * match, the only way this is allowed is if the buffer
490 * in the cache is stale and the transaction that made
491 * it stale has not yet committed. i.e. we are
492 * reallocating a busy extent. Skip this buffer and
493 * continue searching to the right for an exact match.
494 */
495 if (bp->b_length != numblks) {
496 ASSERT(bp->b_flags & XBF_STALE);
497 rbp = &(*rbp)->rb_right;
498 continue;
499 }
500 atomic_inc(&bp->b_hold);
501 goto found;
502 }
503 }
504
505 /* No match found */
506 if (new_bp) {
507 rb_link_node(&new_bp->b_rbnode, parent, rbp);
508 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
509 /* the buffer keeps the perag reference until it is freed */
510 new_bp->b_pag = pag;
511 spin_unlock(&pag->pag_buf_lock);
512 } else {
513 XFS_STATS_INC(xb_miss_locked);
514 spin_unlock(&pag->pag_buf_lock);
515 xfs_perag_put(pag);
516 }
517 return new_bp;
518
519 found:
520 spin_unlock(&pag->pag_buf_lock);
521 xfs_perag_put(pag);
522
523 if (!xfs_buf_trylock(bp)) {
524 if (flags & XBF_TRYLOCK) {
525 xfs_buf_rele(bp);
526 XFS_STATS_INC(xb_busy_locked);
527 return NULL;
528 }
529 xfs_buf_lock(bp);
530 XFS_STATS_INC(xb_get_locked_waited);
531 }
532
533 /*
534 * if the buffer is stale, clear all the external state associated with
535 * it. We need to keep flags such as how we allocated the buffer memory
536 * intact here.
537 */
538 if (bp->b_flags & XBF_STALE) {
539 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
540 ASSERT(bp->b_iodone == NULL);
541 bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
542 bp->b_ops = NULL;
543 }
544
545 trace_xfs_buf_find(bp, flags, _RET_IP_);
546 XFS_STATS_INC(xb_get_locked);
547 return bp;
548 }
549
550 /*
551 * Assembles a buffer covering the specified range. The code is optimised for
552 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
553 * more hits than misses.
554 */
555 struct xfs_buf *
556 xfs_buf_get_map(
557 struct xfs_buftarg *target,
558 struct xfs_buf_map *map,
559 int nmaps,
560 xfs_buf_flags_t flags)
561 {
562 struct xfs_buf *bp;
563 struct xfs_buf *new_bp;
564 int error = 0;
565
566 bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
567 if (likely(bp))
568 goto found;
569
570 new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
571 if (unlikely(!new_bp))
572 return NULL;
573
574 error = xfs_buf_allocate_memory(new_bp, flags);
575 if (error) {
576 xfs_buf_free(new_bp);
577 return NULL;
578 }
579
580 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
581 if (!bp) {
582 xfs_buf_free(new_bp);
583 return NULL;
584 }
585
586 if (bp != new_bp)
587 xfs_buf_free(new_bp);
588
589 found:
590 if (!bp->b_addr) {
591 error = _xfs_buf_map_pages(bp, flags);
592 if (unlikely(error)) {
593 xfs_warn(target->bt_mount,
594 "%s: failed to map pagesn", __func__);
595 xfs_buf_relse(bp);
596 return NULL;
597 }
598 }
599
600 XFS_STATS_INC(xb_get);
601 trace_xfs_buf_get(bp, flags, _RET_IP_);
602 return bp;
603 }
604
605 STATIC int
606 _xfs_buf_read(
607 xfs_buf_t *bp,
608 xfs_buf_flags_t flags)
609 {
610 ASSERT(!(flags & XBF_WRITE));
611 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
612
613 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
614 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
615
616 xfs_buf_iorequest(bp);
617 if (flags & XBF_ASYNC)
618 return 0;
619 return xfs_buf_iowait(bp);
620 }
621
622 xfs_buf_t *
623 xfs_buf_read_map(
624 struct xfs_buftarg *target,
625 struct xfs_buf_map *map,
626 int nmaps,
627 xfs_buf_flags_t flags,
628 const struct xfs_buf_ops *ops)
629 {
630 struct xfs_buf *bp;
631
632 flags |= XBF_READ;
633
634 bp = xfs_buf_get_map(target, map, nmaps, flags);
635 if (bp) {
636 trace_xfs_buf_read(bp, flags, _RET_IP_);
637
638 if (!XFS_BUF_ISDONE(bp)) {
639 XFS_STATS_INC(xb_get_read);
640 bp->b_ops = ops;
641 _xfs_buf_read(bp, flags);
642 } else if (flags & XBF_ASYNC) {
643 /*
644 * Read ahead call which is already satisfied,
645 * drop the buffer
646 */
647 xfs_buf_relse(bp);
648 return NULL;
649 } else {
650 /* We do not want read in the flags */
651 bp->b_flags &= ~XBF_READ;
652 }
653 }
654
655 return bp;
656 }
657
658 /*
659 * If we are not low on memory then do the readahead in a deadlock
660 * safe manner.
661 */
662 void
663 xfs_buf_readahead_map(
664 struct xfs_buftarg *target,
665 struct xfs_buf_map *map,
666 int nmaps,
667 const struct xfs_buf_ops *ops)
668 {
669 if (bdi_read_congested(target->bt_bdi))
670 return;
671
672 xfs_buf_read_map(target, map, nmaps,
673 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
674 }
675
676 /*
677 * Read an uncached buffer from disk. Allocates and returns a locked
678 * buffer containing the disk contents or nothing.
679 */
680 struct xfs_buf *
681 xfs_buf_read_uncached(
682 struct xfs_buftarg *target,
683 xfs_daddr_t daddr,
684 size_t numblks,
685 int flags,
686 const struct xfs_buf_ops *ops)
687 {
688 struct xfs_buf *bp;
689
690 bp = xfs_buf_get_uncached(target, numblks, flags);
691 if (!bp)
692 return NULL;
693
694 /* set up the buffer for a read IO */
695 ASSERT(bp->b_map_count == 1);
696 bp->b_bn = daddr;
697 bp->b_maps[0].bm_bn = daddr;
698 bp->b_flags |= XBF_READ;
699 bp->b_ops = ops;
700
701 xfsbdstrat(target->bt_mount, bp);
702 xfs_buf_iowait(bp);
703 return bp;
704 }
705
706 /*
707 * Return a buffer allocated as an empty buffer and associated to external
708 * memory via xfs_buf_associate_memory() back to it's empty state.
709 */
710 void
711 xfs_buf_set_empty(
712 struct xfs_buf *bp,
713 size_t numblks)
714 {
715 if (bp->b_pages)
716 _xfs_buf_free_pages(bp);
717
718 bp->b_pages = NULL;
719 bp->b_page_count = 0;
720 bp->b_addr = NULL;
721 bp->b_length = numblks;
722 bp->b_io_length = numblks;
723
724 ASSERT(bp->b_map_count == 1);
725 bp->b_bn = XFS_BUF_DADDR_NULL;
726 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
727 bp->b_maps[0].bm_len = bp->b_length;
728 }
729
730 static inline struct page *
731 mem_to_page(
732 void *addr)
733 {
734 if ((!is_vmalloc_addr(addr))) {
735 return virt_to_page(addr);
736 } else {
737 return vmalloc_to_page(addr);
738 }
739 }
740
741 int
742 xfs_buf_associate_memory(
743 xfs_buf_t *bp,
744 void *mem,
745 size_t len)
746 {
747 int rval;
748 int i = 0;
749 unsigned long pageaddr;
750 unsigned long offset;
751 size_t buflen;
752 int page_count;
753
754 pageaddr = (unsigned long)mem & PAGE_MASK;
755 offset = (unsigned long)mem - pageaddr;
756 buflen = PAGE_ALIGN(len + offset);
757 page_count = buflen >> PAGE_SHIFT;
758
759 /* Free any previous set of page pointers */
760 if (bp->b_pages)
761 _xfs_buf_free_pages(bp);
762
763 bp->b_pages = NULL;
764 bp->b_addr = mem;
765
766 rval = _xfs_buf_get_pages(bp, page_count, 0);
767 if (rval)
768 return rval;
769
770 bp->b_offset = offset;
771
772 for (i = 0; i < bp->b_page_count; i++) {
773 bp->b_pages[i] = mem_to_page((void *)pageaddr);
774 pageaddr += PAGE_SIZE;
775 }
776
777 bp->b_io_length = BTOBB(len);
778 bp->b_length = BTOBB(buflen);
779
780 return 0;
781 }
782
783 xfs_buf_t *
784 xfs_buf_get_uncached(
785 struct xfs_buftarg *target,
786 size_t numblks,
787 int flags)
788 {
789 unsigned long page_count;
790 int error, i;
791 struct xfs_buf *bp;
792 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
793
794 bp = _xfs_buf_alloc(target, &map, 1, 0);
795 if (unlikely(bp == NULL))
796 goto fail;
797
798 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
799 error = _xfs_buf_get_pages(bp, page_count, 0);
800 if (error)
801 goto fail_free_buf;
802
803 for (i = 0; i < page_count; i++) {
804 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
805 if (!bp->b_pages[i])
806 goto fail_free_mem;
807 }
808 bp->b_flags |= _XBF_PAGES;
809
810 error = _xfs_buf_map_pages(bp, 0);
811 if (unlikely(error)) {
812 xfs_warn(target->bt_mount,
813 "%s: failed to map pages", __func__);
814 goto fail_free_mem;
815 }
816
817 trace_xfs_buf_get_uncached(bp, _RET_IP_);
818 return bp;
819
820 fail_free_mem:
821 while (--i >= 0)
822 __free_page(bp->b_pages[i]);
823 _xfs_buf_free_pages(bp);
824 fail_free_buf:
825 xfs_buf_free_maps(bp);
826 kmem_zone_free(xfs_buf_zone, bp);
827 fail:
828 return NULL;
829 }
830
831 /*
832 * Increment reference count on buffer, to hold the buffer concurrently
833 * with another thread which may release (free) the buffer asynchronously.
834 * Must hold the buffer already to call this function.
835 */
836 void
837 xfs_buf_hold(
838 xfs_buf_t *bp)
839 {
840 trace_xfs_buf_hold(bp, _RET_IP_);
841 atomic_inc(&bp->b_hold);
842 }
843
844 /*
845 * Releases a hold on the specified buffer. If the
846 * the hold count is 1, calls xfs_buf_free.
847 */
848 void
849 xfs_buf_rele(
850 xfs_buf_t *bp)
851 {
852 struct xfs_perag *pag = bp->b_pag;
853
854 trace_xfs_buf_rele(bp, _RET_IP_);
855
856 if (!pag) {
857 ASSERT(list_empty(&bp->b_lru));
858 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
859 if (atomic_dec_and_test(&bp->b_hold))
860 xfs_buf_free(bp);
861 return;
862 }
863
864 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
865
866 ASSERT(atomic_read(&bp->b_hold) > 0);
867 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
868 spin_lock(&bp->b_lock);
869 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
870 /*
871 * If the buffer is added to the LRU take a new
872 * reference to the buffer for the LRU and clear the
873 * (now stale) dispose list state flag
874 */
875 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
876 bp->b_state &= ~XFS_BSTATE_DISPOSE;
877 atomic_inc(&bp->b_hold);
878 }
879 spin_unlock(&bp->b_lock);
880 spin_unlock(&pag->pag_buf_lock);
881 } else {
882 /*
883 * most of the time buffers will already be removed from
884 * the LRU, so optimise that case by checking for the
885 * XFS_BSTATE_DISPOSE flag indicating the last list the
886 * buffer was on was the disposal list
887 */
888 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
889 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
890 } else {
891 ASSERT(list_empty(&bp->b_lru));
892 }
893 spin_unlock(&bp->b_lock);
894
895 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
896 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
897 spin_unlock(&pag->pag_buf_lock);
898 xfs_perag_put(pag);
899 xfs_buf_free(bp);
900 }
901 }
902 }
903
904
905 /*
906 * Lock a buffer object, if it is not already locked.
907 *
908 * If we come across a stale, pinned, locked buffer, we know that we are
909 * being asked to lock a buffer that has been reallocated. Because it is
910 * pinned, we know that the log has not been pushed to disk and hence it
911 * will still be locked. Rather than continuing to have trylock attempts
912 * fail until someone else pushes the log, push it ourselves before
913 * returning. This means that the xfsaild will not get stuck trying
914 * to push on stale inode buffers.
915 */
916 int
917 xfs_buf_trylock(
918 struct xfs_buf *bp)
919 {
920 int locked;
921
922 locked = down_trylock(&bp->b_sema) == 0;
923 if (locked)
924 XB_SET_OWNER(bp);
925
926 trace_xfs_buf_trylock(bp, _RET_IP_);
927 return locked;
928 }
929
930 /*
931 * Lock a buffer object.
932 *
933 * If we come across a stale, pinned, locked buffer, we know that we
934 * are being asked to lock a buffer that has been reallocated. Because
935 * it is pinned, we know that the log has not been pushed to disk and
936 * hence it will still be locked. Rather than sleeping until someone
937 * else pushes the log, push it ourselves before trying to get the lock.
938 */
939 void
940 xfs_buf_lock(
941 struct xfs_buf *bp)
942 {
943 trace_xfs_buf_lock(bp, _RET_IP_);
944
945 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
946 xfs_log_force(bp->b_target->bt_mount, 0);
947 down(&bp->b_sema);
948 XB_SET_OWNER(bp);
949
950 trace_xfs_buf_lock_done(bp, _RET_IP_);
951 }
952
953 void
954 xfs_buf_unlock(
955 struct xfs_buf *bp)
956 {
957 XB_CLEAR_OWNER(bp);
958 up(&bp->b_sema);
959
960 trace_xfs_buf_unlock(bp, _RET_IP_);
961 }
962
963 STATIC void
964 xfs_buf_wait_unpin(
965 xfs_buf_t *bp)
966 {
967 DECLARE_WAITQUEUE (wait, current);
968
969 if (atomic_read(&bp->b_pin_count) == 0)
970 return;
971
972 add_wait_queue(&bp->b_waiters, &wait);
973 for (;;) {
974 set_current_state(TASK_UNINTERRUPTIBLE);
975 if (atomic_read(&bp->b_pin_count) == 0)
976 break;
977 io_schedule();
978 }
979 remove_wait_queue(&bp->b_waiters, &wait);
980 set_current_state(TASK_RUNNING);
981 }
982
983 /*
984 * Buffer Utility Routines
985 */
986
987 STATIC void
988 xfs_buf_iodone_work(
989 struct work_struct *work)
990 {
991 struct xfs_buf *bp =
992 container_of(work, xfs_buf_t, b_iodone_work);
993 bool read = !!(bp->b_flags & XBF_READ);
994
995 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
996
997 /* only validate buffers that were read without errors */
998 if (read && bp->b_ops && !bp->b_error && (bp->b_flags & XBF_DONE))
999 bp->b_ops->verify_read(bp);
1000
1001 if (bp->b_iodone)
1002 (*(bp->b_iodone))(bp);
1003 else if (bp->b_flags & XBF_ASYNC)
1004 xfs_buf_relse(bp);
1005 else {
1006 ASSERT(read && bp->b_ops);
1007 complete(&bp->b_iowait);
1008 }
1009 }
1010
1011 void
1012 xfs_buf_ioend(
1013 struct xfs_buf *bp,
1014 int schedule)
1015 {
1016 bool read = !!(bp->b_flags & XBF_READ);
1017
1018 trace_xfs_buf_iodone(bp, _RET_IP_);
1019
1020 if (bp->b_error == 0)
1021 bp->b_flags |= XBF_DONE;
1022
1023 if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) {
1024 if (schedule) {
1025 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1026 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1027 } else {
1028 xfs_buf_iodone_work(&bp->b_iodone_work);
1029 }
1030 } else {
1031 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1032 complete(&bp->b_iowait);
1033 }
1034 }
1035
1036 void
1037 xfs_buf_ioerror(
1038 xfs_buf_t *bp,
1039 int error)
1040 {
1041 ASSERT(error >= 0 && error <= 0xffff);
1042 bp->b_error = (unsigned short)error;
1043 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1044 }
1045
1046 void
1047 xfs_buf_ioerror_alert(
1048 struct xfs_buf *bp,
1049 const char *func)
1050 {
1051 xfs_alert(bp->b_target->bt_mount,
1052 "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
1053 (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
1054 }
1055
1056 /*
1057 * Called when we want to stop a buffer from getting written or read.
1058 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1059 * so that the proper iodone callbacks get called.
1060 */
1061 STATIC int
1062 xfs_bioerror(
1063 xfs_buf_t *bp)
1064 {
1065 #ifdef XFSERRORDEBUG
1066 ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1067 #endif
1068
1069 /*
1070 * No need to wait until the buffer is unpinned, we aren't flushing it.
1071 */
1072 xfs_buf_ioerror(bp, EIO);
1073
1074 /*
1075 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1076 */
1077 XFS_BUF_UNREAD(bp);
1078 XFS_BUF_UNDONE(bp);
1079 xfs_buf_stale(bp);
1080
1081 xfs_buf_ioend(bp, 0);
1082
1083 return EIO;
1084 }
1085
1086 /*
1087 * Same as xfs_bioerror, except that we are releasing the buffer
1088 * here ourselves, and avoiding the xfs_buf_ioend call.
1089 * This is meant for userdata errors; metadata bufs come with
1090 * iodone functions attached, so that we can track down errors.
1091 */
1092 STATIC int
1093 xfs_bioerror_relse(
1094 struct xfs_buf *bp)
1095 {
1096 int64_t fl = bp->b_flags;
1097 /*
1098 * No need to wait until the buffer is unpinned.
1099 * We aren't flushing it.
1100 *
1101 * chunkhold expects B_DONE to be set, whether
1102 * we actually finish the I/O or not. We don't want to
1103 * change that interface.
1104 */
1105 XFS_BUF_UNREAD(bp);
1106 XFS_BUF_DONE(bp);
1107 xfs_buf_stale(bp);
1108 bp->b_iodone = NULL;
1109 if (!(fl & XBF_ASYNC)) {
1110 /*
1111 * Mark b_error and B_ERROR _both_.
1112 * Lot's of chunkcache code assumes that.
1113 * There's no reason to mark error for
1114 * ASYNC buffers.
1115 */
1116 xfs_buf_ioerror(bp, EIO);
1117 complete(&bp->b_iowait);
1118 } else {
1119 xfs_buf_relse(bp);
1120 }
1121
1122 return EIO;
1123 }
1124
1125 STATIC int
1126 xfs_bdstrat_cb(
1127 struct xfs_buf *bp)
1128 {
1129 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1130 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1131 /*
1132 * Metadata write that didn't get logged but
1133 * written delayed anyway. These aren't associated
1134 * with a transaction, and can be ignored.
1135 */
1136 if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1137 return xfs_bioerror_relse(bp);
1138 else
1139 return xfs_bioerror(bp);
1140 }
1141
1142 xfs_buf_iorequest(bp);
1143 return 0;
1144 }
1145
1146 int
1147 xfs_bwrite(
1148 struct xfs_buf *bp)
1149 {
1150 int error;
1151
1152 ASSERT(xfs_buf_islocked(bp));
1153
1154 bp->b_flags |= XBF_WRITE;
1155 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
1156
1157 xfs_bdstrat_cb(bp);
1158
1159 error = xfs_buf_iowait(bp);
1160 if (error) {
1161 xfs_force_shutdown(bp->b_target->bt_mount,
1162 SHUTDOWN_META_IO_ERROR);
1163 }
1164 return error;
1165 }
1166
1167 /*
1168 * Wrapper around bdstrat so that we can stop data from going to disk in case
1169 * we are shutting down the filesystem. Typically user data goes thru this
1170 * path; one of the exceptions is the superblock.
1171 */
1172 void
1173 xfsbdstrat(
1174 struct xfs_mount *mp,
1175 struct xfs_buf *bp)
1176 {
1177 if (XFS_FORCED_SHUTDOWN(mp)) {
1178 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1179 xfs_bioerror_relse(bp);
1180 return;
1181 }
1182
1183 xfs_buf_iorequest(bp);
1184 }
1185
1186 STATIC void
1187 _xfs_buf_ioend(
1188 xfs_buf_t *bp,
1189 int schedule)
1190 {
1191 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1192 xfs_buf_ioend(bp, schedule);
1193 }
1194
1195 STATIC void
1196 xfs_buf_bio_end_io(
1197 struct bio *bio,
1198 int error)
1199 {
1200 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1201
1202 /*
1203 * don't overwrite existing errors - otherwise we can lose errors on
1204 * buffers that require multiple bios to complete.
1205 */
1206 if (!bp->b_error)
1207 xfs_buf_ioerror(bp, -error);
1208
1209 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1210 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1211
1212 _xfs_buf_ioend(bp, 1);
1213 bio_put(bio);
1214 }
1215
1216 static void
1217 xfs_buf_ioapply_map(
1218 struct xfs_buf *bp,
1219 int map,
1220 int *buf_offset,
1221 int *count,
1222 int rw)
1223 {
1224 int page_index;
1225 int total_nr_pages = bp->b_page_count;
1226 int nr_pages;
1227 struct bio *bio;
1228 sector_t sector = bp->b_maps[map].bm_bn;
1229 int size;
1230 int offset;
1231
1232 total_nr_pages = bp->b_page_count;
1233
1234 /* skip the pages in the buffer before the start offset */
1235 page_index = 0;
1236 offset = *buf_offset;
1237 while (offset >= PAGE_SIZE) {
1238 page_index++;
1239 offset -= PAGE_SIZE;
1240 }
1241
1242 /*
1243 * Limit the IO size to the length of the current vector, and update the
1244 * remaining IO count for the next time around.
1245 */
1246 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1247 *count -= size;
1248 *buf_offset += size;
1249
1250 next_chunk:
1251 atomic_inc(&bp->b_io_remaining);
1252 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1253 if (nr_pages > total_nr_pages)
1254 nr_pages = total_nr_pages;
1255
1256 bio = bio_alloc(GFP_NOIO, nr_pages);
1257 bio->bi_bdev = bp->b_target->bt_bdev;
1258 bio->bi_iter.bi_sector = sector;
1259 bio->bi_end_io = xfs_buf_bio_end_io;
1260 bio->bi_private = bp;
1261
1262
1263 for (; size && nr_pages; nr_pages--, page_index++) {
1264 int rbytes, nbytes = PAGE_SIZE - offset;
1265
1266 if (nbytes > size)
1267 nbytes = size;
1268
1269 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1270 offset);
1271 if (rbytes < nbytes)
1272 break;
1273
1274 offset = 0;
1275 sector += BTOBB(nbytes);
1276 size -= nbytes;
1277 total_nr_pages--;
1278 }
1279
1280 if (likely(bio->bi_iter.bi_size)) {
1281 if (xfs_buf_is_vmapped(bp)) {
1282 flush_kernel_vmap_range(bp->b_addr,
1283 xfs_buf_vmap_len(bp));
1284 }
1285 submit_bio(rw, bio);
1286 if (size)
1287 goto next_chunk;
1288 } else {
1289 /*
1290 * This is guaranteed not to be the last io reference count
1291 * because the caller (xfs_buf_iorequest) holds a count itself.
1292 */
1293 atomic_dec(&bp->b_io_remaining);
1294 xfs_buf_ioerror(bp, EIO);
1295 bio_put(bio);
1296 }
1297
1298 }
1299
1300 STATIC void
1301 _xfs_buf_ioapply(
1302 struct xfs_buf *bp)
1303 {
1304 struct blk_plug plug;
1305 int rw;
1306 int offset;
1307 int size;
1308 int i;
1309
1310 /*
1311 * Make sure we capture only current IO errors rather than stale errors
1312 * left over from previous use of the buffer (e.g. failed readahead).
1313 */
1314 bp->b_error = 0;
1315
1316 if (bp->b_flags & XBF_WRITE) {
1317 if (bp->b_flags & XBF_SYNCIO)
1318 rw = WRITE_SYNC;
1319 else
1320 rw = WRITE;
1321 if (bp->b_flags & XBF_FUA)
1322 rw |= REQ_FUA;
1323 if (bp->b_flags & XBF_FLUSH)
1324 rw |= REQ_FLUSH;
1325
1326 /*
1327 * Run the write verifier callback function if it exists. If
1328 * this function fails it will mark the buffer with an error and
1329 * the IO should not be dispatched.
1330 */
1331 if (bp->b_ops) {
1332 bp->b_ops->verify_write(bp);
1333 if (bp->b_error) {
1334 xfs_force_shutdown(bp->b_target->bt_mount,
1335 SHUTDOWN_CORRUPT_INCORE);
1336 return;
1337 }
1338 }
1339 } else if (bp->b_flags & XBF_READ_AHEAD) {
1340 rw = READA;
1341 } else {
1342 rw = READ;
1343 }
1344
1345 /* we only use the buffer cache for meta-data */
1346 rw |= REQ_META;
1347
1348 /*
1349 * Walk all the vectors issuing IO on them. Set up the initial offset
1350 * into the buffer and the desired IO size before we start -
1351 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1352 * subsequent call.
1353 */
1354 offset = bp->b_offset;
1355 size = BBTOB(bp->b_io_length);
1356 blk_start_plug(&plug);
1357 for (i = 0; i < bp->b_map_count; i++) {
1358 xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
1359 if (bp->b_error)
1360 break;
1361 if (size <= 0)
1362 break; /* all done */
1363 }
1364 blk_finish_plug(&plug);
1365 }
1366
1367 void
1368 xfs_buf_iorequest(
1369 xfs_buf_t *bp)
1370 {
1371 trace_xfs_buf_iorequest(bp, _RET_IP_);
1372
1373 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1374
1375 if (bp->b_flags & XBF_WRITE)
1376 xfs_buf_wait_unpin(bp);
1377 xfs_buf_hold(bp);
1378
1379 /* Set the count to 1 initially, this will stop an I/O
1380 * completion callout which happens before we have started
1381 * all the I/O from calling xfs_buf_ioend too early.
1382 */
1383 atomic_set(&bp->b_io_remaining, 1);
1384 _xfs_buf_ioapply(bp);
1385 _xfs_buf_ioend(bp, 1);
1386
1387 xfs_buf_rele(bp);
1388 }
1389
1390 /*
1391 * Waits for I/O to complete on the buffer supplied. It returns immediately if
1392 * no I/O is pending or there is already a pending error on the buffer. It
1393 * returns the I/O error code, if any, or 0 if there was no error.
1394 */
1395 int
1396 xfs_buf_iowait(
1397 xfs_buf_t *bp)
1398 {
1399 trace_xfs_buf_iowait(bp, _RET_IP_);
1400
1401 if (!bp->b_error)
1402 wait_for_completion(&bp->b_iowait);
1403
1404 trace_xfs_buf_iowait_done(bp, _RET_IP_);
1405 return bp->b_error;
1406 }
1407
1408 xfs_caddr_t
1409 xfs_buf_offset(
1410 xfs_buf_t *bp,
1411 size_t offset)
1412 {
1413 struct page *page;
1414
1415 if (bp->b_addr)
1416 return bp->b_addr + offset;
1417
1418 offset += bp->b_offset;
1419 page = bp->b_pages[offset >> PAGE_SHIFT];
1420 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1421 }
1422
1423 /*
1424 * Move data into or out of a buffer.
1425 */
1426 void
1427 xfs_buf_iomove(
1428 xfs_buf_t *bp, /* buffer to process */
1429 size_t boff, /* starting buffer offset */
1430 size_t bsize, /* length to copy */
1431 void *data, /* data address */
1432 xfs_buf_rw_t mode) /* read/write/zero flag */
1433 {
1434 size_t bend;
1435
1436 bend = boff + bsize;
1437 while (boff < bend) {
1438 struct page *page;
1439 int page_index, page_offset, csize;
1440
1441 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1442 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1443 page = bp->b_pages[page_index];
1444 csize = min_t(size_t, PAGE_SIZE - page_offset,
1445 BBTOB(bp->b_io_length) - boff);
1446
1447 ASSERT((csize + page_offset) <= PAGE_SIZE);
1448
1449 switch (mode) {
1450 case XBRW_ZERO:
1451 memset(page_address(page) + page_offset, 0, csize);
1452 break;
1453 case XBRW_READ:
1454 memcpy(data, page_address(page) + page_offset, csize);
1455 break;
1456 case XBRW_WRITE:
1457 memcpy(page_address(page) + page_offset, data, csize);
1458 }
1459
1460 boff += csize;
1461 data += csize;
1462 }
1463 }
1464
1465 /*
1466 * Handling of buffer targets (buftargs).
1467 */
1468
1469 /*
1470 * Wait for any bufs with callbacks that have been submitted but have not yet
1471 * returned. These buffers will have an elevated hold count, so wait on those
1472 * while freeing all the buffers only held by the LRU.
1473 */
1474 static enum lru_status
1475 xfs_buftarg_wait_rele(
1476 struct list_head *item,
1477 spinlock_t *lru_lock,
1478 void *arg)
1479
1480 {
1481 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1482 struct list_head *dispose = arg;
1483
1484 if (atomic_read(&bp->b_hold) > 1) {
1485 /* need to wait, so skip it this pass */
1486 trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1487 return LRU_SKIP;
1488 }
1489 if (!spin_trylock(&bp->b_lock))
1490 return LRU_SKIP;
1491
1492 /*
1493 * clear the LRU reference count so the buffer doesn't get
1494 * ignored in xfs_buf_rele().
1495 */
1496 atomic_set(&bp->b_lru_ref, 0);
1497 bp->b_state |= XFS_BSTATE_DISPOSE;
1498 list_move(item, dispose);
1499 spin_unlock(&bp->b_lock);
1500 return LRU_REMOVED;
1501 }
1502
1503 void
1504 xfs_wait_buftarg(
1505 struct xfs_buftarg *btp)
1506 {
1507 LIST_HEAD(dispose);
1508 int loop = 0;
1509
1510 /* loop until there is nothing left on the lru list. */
1511 while (list_lru_count(&btp->bt_lru)) {
1512 list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
1513 &dispose, LONG_MAX);
1514
1515 while (!list_empty(&dispose)) {
1516 struct xfs_buf *bp;
1517 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1518 list_del_init(&bp->b_lru);
1519 xfs_buf_rele(bp);
1520 }
1521 if (loop++ != 0)
1522 delay(100);
1523 }
1524 }
1525
1526 static enum lru_status
1527 xfs_buftarg_isolate(
1528 struct list_head *item,
1529 spinlock_t *lru_lock,
1530 void *arg)
1531 {
1532 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1533 struct list_head *dispose = arg;
1534
1535 /*
1536 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1537 * If we fail to get the lock, just skip it.
1538 */
1539 if (!spin_trylock(&bp->b_lock))
1540 return LRU_SKIP;
1541 /*
1542 * Decrement the b_lru_ref count unless the value is already
1543 * zero. If the value is already zero, we need to reclaim the
1544 * buffer, otherwise it gets another trip through the LRU.
1545 */
1546 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1547 spin_unlock(&bp->b_lock);
1548 return LRU_ROTATE;
1549 }
1550
1551 bp->b_state |= XFS_BSTATE_DISPOSE;
1552 list_move(item, dispose);
1553 spin_unlock(&bp->b_lock);
1554 return LRU_REMOVED;
1555 }
1556
1557 static unsigned long
1558 xfs_buftarg_shrink_scan(
1559 struct shrinker *shrink,
1560 struct shrink_control *sc)
1561 {
1562 struct xfs_buftarg *btp = container_of(shrink,
1563 struct xfs_buftarg, bt_shrinker);
1564 LIST_HEAD(dispose);
1565 unsigned long freed;
1566 unsigned long nr_to_scan = sc->nr_to_scan;
1567
1568 freed = list_lru_walk_node(&btp->bt_lru, sc->nid, xfs_buftarg_isolate,
1569 &dispose, &nr_to_scan);
1570
1571 while (!list_empty(&dispose)) {
1572 struct xfs_buf *bp;
1573 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1574 list_del_init(&bp->b_lru);
1575 xfs_buf_rele(bp);
1576 }
1577
1578 return freed;
1579 }
1580
1581 static unsigned long
1582 xfs_buftarg_shrink_count(
1583 struct shrinker *shrink,
1584 struct shrink_control *sc)
1585 {
1586 struct xfs_buftarg *btp = container_of(shrink,
1587 struct xfs_buftarg, bt_shrinker);
1588 return list_lru_count_node(&btp->bt_lru, sc->nid);
1589 }
1590
1591 void
1592 xfs_free_buftarg(
1593 struct xfs_mount *mp,
1594 struct xfs_buftarg *btp)
1595 {
1596 unregister_shrinker(&btp->bt_shrinker);
1597 list_lru_destroy(&btp->bt_lru);
1598
1599 if (mp->m_flags & XFS_MOUNT_BARRIER)
1600 xfs_blkdev_issue_flush(btp);
1601
1602 kmem_free(btp);
1603 }
1604
1605 STATIC int
1606 xfs_setsize_buftarg_flags(
1607 xfs_buftarg_t *btp,
1608 unsigned int blocksize,
1609 unsigned int sectorsize,
1610 int verbose)
1611 {
1612 btp->bt_bsize = blocksize;
1613 btp->bt_sshift = ffs(sectorsize) - 1;
1614 btp->bt_smask = sectorsize - 1;
1615
1616 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1617 char name[BDEVNAME_SIZE];
1618
1619 bdevname(btp->bt_bdev, name);
1620
1621 xfs_warn(btp->bt_mount,
1622 "Cannot set_blocksize to %u on device %s",
1623 sectorsize, name);
1624 return EINVAL;
1625 }
1626
1627 return 0;
1628 }
1629
1630 /*
1631 * When allocating the initial buffer target we have not yet
1632 * read in the superblock, so don't know what sized sectors
1633 * are being used at this early stage. Play safe.
1634 */
1635 STATIC int
1636 xfs_setsize_buftarg_early(
1637 xfs_buftarg_t *btp,
1638 struct block_device *bdev)
1639 {
1640 return xfs_setsize_buftarg_flags(btp,
1641 PAGE_SIZE, bdev_logical_block_size(bdev), 0);
1642 }
1643
1644 int
1645 xfs_setsize_buftarg(
1646 xfs_buftarg_t *btp,
1647 unsigned int blocksize,
1648 unsigned int sectorsize)
1649 {
1650 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1651 }
1652
1653 xfs_buftarg_t *
1654 xfs_alloc_buftarg(
1655 struct xfs_mount *mp,
1656 struct block_device *bdev,
1657 int external,
1658 const char *fsname)
1659 {
1660 xfs_buftarg_t *btp;
1661
1662 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
1663
1664 btp->bt_mount = mp;
1665 btp->bt_dev = bdev->bd_dev;
1666 btp->bt_bdev = bdev;
1667 btp->bt_bdi = blk_get_backing_dev_info(bdev);
1668 if (!btp->bt_bdi)
1669 goto error;
1670
1671 if (xfs_setsize_buftarg_early(btp, bdev))
1672 goto error;
1673
1674 if (list_lru_init(&btp->bt_lru))
1675 goto error;
1676
1677 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1678 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1679 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1680 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
1681 register_shrinker(&btp->bt_shrinker);
1682 return btp;
1683
1684 error:
1685 kmem_free(btp);
1686 return NULL;
1687 }
1688
1689 /*
1690 * Add a buffer to the delayed write list.
1691 *
1692 * This queues a buffer for writeout if it hasn't already been. Note that
1693 * neither this routine nor the buffer list submission functions perform
1694 * any internal synchronization. It is expected that the lists are thread-local
1695 * to the callers.
1696 *
1697 * Returns true if we queued up the buffer, or false if it already had
1698 * been on the buffer list.
1699 */
1700 bool
1701 xfs_buf_delwri_queue(
1702 struct xfs_buf *bp,
1703 struct list_head *list)
1704 {
1705 ASSERT(xfs_buf_islocked(bp));
1706 ASSERT(!(bp->b_flags & XBF_READ));
1707
1708 /*
1709 * If the buffer is already marked delwri it already is queued up
1710 * by someone else for imediate writeout. Just ignore it in that
1711 * case.
1712 */
1713 if (bp->b_flags & _XBF_DELWRI_Q) {
1714 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1715 return false;
1716 }
1717
1718 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1719
1720 /*
1721 * If a buffer gets written out synchronously or marked stale while it
1722 * is on a delwri list we lazily remove it. To do this, the other party
1723 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1724 * It remains referenced and on the list. In a rare corner case it
1725 * might get readded to a delwri list after the synchronous writeout, in
1726 * which case we need just need to re-add the flag here.
1727 */
1728 bp->b_flags |= _XBF_DELWRI_Q;
1729 if (list_empty(&bp->b_list)) {
1730 atomic_inc(&bp->b_hold);
1731 list_add_tail(&bp->b_list, list);
1732 }
1733
1734 return true;
1735 }
1736
1737 /*
1738 * Compare function is more complex than it needs to be because
1739 * the return value is only 32 bits and we are doing comparisons
1740 * on 64 bit values
1741 */
1742 static int
1743 xfs_buf_cmp(
1744 void *priv,
1745 struct list_head *a,
1746 struct list_head *b)
1747 {
1748 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
1749 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
1750 xfs_daddr_t diff;
1751
1752 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
1753 if (diff < 0)
1754 return -1;
1755 if (diff > 0)
1756 return 1;
1757 return 0;
1758 }
1759
1760 static int
1761 __xfs_buf_delwri_submit(
1762 struct list_head *buffer_list,
1763 struct list_head *io_list,
1764 bool wait)
1765 {
1766 struct blk_plug plug;
1767 struct xfs_buf *bp, *n;
1768 int pinned = 0;
1769
1770 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1771 if (!wait) {
1772 if (xfs_buf_ispinned(bp)) {
1773 pinned++;
1774 continue;
1775 }
1776 if (!xfs_buf_trylock(bp))
1777 continue;
1778 } else {
1779 xfs_buf_lock(bp);
1780 }
1781
1782 /*
1783 * Someone else might have written the buffer synchronously or
1784 * marked it stale in the meantime. In that case only the
1785 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1786 * reference and remove it from the list here.
1787 */
1788 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1789 list_del_init(&bp->b_list);
1790 xfs_buf_relse(bp);
1791 continue;
1792 }
1793
1794 list_move_tail(&bp->b_list, io_list);
1795 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1796 }
1797
1798 list_sort(NULL, io_list, xfs_buf_cmp);
1799
1800 blk_start_plug(&plug);
1801 list_for_each_entry_safe(bp, n, io_list, b_list) {
1802 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
1803 bp->b_flags |= XBF_WRITE;
1804
1805 if (!wait) {
1806 bp->b_flags |= XBF_ASYNC;
1807 list_del_init(&bp->b_list);
1808 }
1809 xfs_bdstrat_cb(bp);
1810 }
1811 blk_finish_plug(&plug);
1812
1813 return pinned;
1814 }
1815
1816 /*
1817 * Write out a buffer list asynchronously.
1818 *
1819 * This will take the @buffer_list, write all non-locked and non-pinned buffers
1820 * out and not wait for I/O completion on any of the buffers. This interface
1821 * is only safely useable for callers that can track I/O completion by higher
1822 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1823 * function.
1824 */
1825 int
1826 xfs_buf_delwri_submit_nowait(
1827 struct list_head *buffer_list)
1828 {
1829 LIST_HEAD (io_list);
1830 return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1831 }
1832
1833 /*
1834 * Write out a buffer list synchronously.
1835 *
1836 * This will take the @buffer_list, write all buffers out and wait for I/O
1837 * completion on all of the buffers. @buffer_list is consumed by the function,
1838 * so callers must have some other way of tracking buffers if they require such
1839 * functionality.
1840 */
1841 int
1842 xfs_buf_delwri_submit(
1843 struct list_head *buffer_list)
1844 {
1845 LIST_HEAD (io_list);
1846 int error = 0, error2;
1847 struct xfs_buf *bp;
1848
1849 __xfs_buf_delwri_submit(buffer_list, &io_list, true);
1850
1851 /* Wait for IO to complete. */
1852 while (!list_empty(&io_list)) {
1853 bp = list_first_entry(&io_list, struct xfs_buf, b_list);
1854
1855 list_del_init(&bp->b_list);
1856 error2 = xfs_buf_iowait(bp);
1857 xfs_buf_relse(bp);
1858 if (!error)
1859 error = error2;
1860 }
1861
1862 return error;
1863 }
1864
1865 int __init
1866 xfs_buf_init(void)
1867 {
1868 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1869 KM_ZONE_HWALIGN, NULL);
1870 if (!xfs_buf_zone)
1871 goto out;
1872
1873 xfslogd_workqueue = alloc_workqueue("xfslogd",
1874 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1875 if (!xfslogd_workqueue)
1876 goto out_free_buf_zone;
1877
1878 return 0;
1879
1880 out_free_buf_zone:
1881 kmem_zone_destroy(xfs_buf_zone);
1882 out:
1883 return -ENOMEM;
1884 }
1885
1886 void
1887 xfs_buf_terminate(void)
1888 {
1889 destroy_workqueue(xfslogd_workqueue);
1890 kmem_zone_destroy(xfs_buf_zone);
1891 }