Btrfs: use the inode's mapping mask for allocating pages
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / btrfs / free-space-cache.c
1 /*
2 * Copyright (C) 2008 Red Hat. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
23 #include <linux/ratelimit.h>
24 #include "ctree.h"
25 #include "free-space-cache.h"
26 #include "transaction.h"
27 #include "disk-io.h"
28 #include "extent_io.h"
29 #include "inode-map.h"
30
31 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
32 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
33
34 static int link_free_space(struct btrfs_free_space_ctl *ctl,
35 struct btrfs_free_space *info);
36
37 static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
38 struct btrfs_path *path,
39 u64 offset)
40 {
41 struct btrfs_key key;
42 struct btrfs_key location;
43 struct btrfs_disk_key disk_key;
44 struct btrfs_free_space_header *header;
45 struct extent_buffer *leaf;
46 struct inode *inode = NULL;
47 int ret;
48
49 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
50 key.offset = offset;
51 key.type = 0;
52
53 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
54 if (ret < 0)
55 return ERR_PTR(ret);
56 if (ret > 0) {
57 btrfs_release_path(path);
58 return ERR_PTR(-ENOENT);
59 }
60
61 leaf = path->nodes[0];
62 header = btrfs_item_ptr(leaf, path->slots[0],
63 struct btrfs_free_space_header);
64 btrfs_free_space_key(leaf, header, &disk_key);
65 btrfs_disk_key_to_cpu(&location, &disk_key);
66 btrfs_release_path(path);
67
68 inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
69 if (!inode)
70 return ERR_PTR(-ENOENT);
71 if (IS_ERR(inode))
72 return inode;
73 if (is_bad_inode(inode)) {
74 iput(inode);
75 return ERR_PTR(-ENOENT);
76 }
77
78 inode->i_mapping->flags &= ~__GFP_FS;
79
80 return inode;
81 }
82
83 struct inode *lookup_free_space_inode(struct btrfs_root *root,
84 struct btrfs_block_group_cache
85 *block_group, struct btrfs_path *path)
86 {
87 struct inode *inode = NULL;
88
89 spin_lock(&block_group->lock);
90 if (block_group->inode)
91 inode = igrab(block_group->inode);
92 spin_unlock(&block_group->lock);
93 if (inode)
94 return inode;
95
96 inode = __lookup_free_space_inode(root, path,
97 block_group->key.objectid);
98 if (IS_ERR(inode))
99 return inode;
100
101 spin_lock(&block_group->lock);
102 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) {
103 printk(KERN_INFO "Old style space inode found, converting.\n");
104 BTRFS_I(inode)->flags &= ~BTRFS_INODE_NODATASUM;
105 block_group->disk_cache_state = BTRFS_DC_CLEAR;
106 }
107
108 if (!block_group->iref) {
109 block_group->inode = igrab(inode);
110 block_group->iref = 1;
111 }
112 spin_unlock(&block_group->lock);
113
114 return inode;
115 }
116
117 int __create_free_space_inode(struct btrfs_root *root,
118 struct btrfs_trans_handle *trans,
119 struct btrfs_path *path, u64 ino, u64 offset)
120 {
121 struct btrfs_key key;
122 struct btrfs_disk_key disk_key;
123 struct btrfs_free_space_header *header;
124 struct btrfs_inode_item *inode_item;
125 struct extent_buffer *leaf;
126 int ret;
127
128 ret = btrfs_insert_empty_inode(trans, root, path, ino);
129 if (ret)
130 return ret;
131
132 leaf = path->nodes[0];
133 inode_item = btrfs_item_ptr(leaf, path->slots[0],
134 struct btrfs_inode_item);
135 btrfs_item_key(leaf, &disk_key, path->slots[0]);
136 memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
137 sizeof(*inode_item));
138 btrfs_set_inode_generation(leaf, inode_item, trans->transid);
139 btrfs_set_inode_size(leaf, inode_item, 0);
140 btrfs_set_inode_nbytes(leaf, inode_item, 0);
141 btrfs_set_inode_uid(leaf, inode_item, 0);
142 btrfs_set_inode_gid(leaf, inode_item, 0);
143 btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
144 btrfs_set_inode_flags(leaf, inode_item, BTRFS_INODE_NOCOMPRESS |
145 BTRFS_INODE_PREALLOC);
146 btrfs_set_inode_nlink(leaf, inode_item, 1);
147 btrfs_set_inode_transid(leaf, inode_item, trans->transid);
148 btrfs_set_inode_block_group(leaf, inode_item, offset);
149 btrfs_mark_buffer_dirty(leaf);
150 btrfs_release_path(path);
151
152 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
153 key.offset = offset;
154 key.type = 0;
155
156 ret = btrfs_insert_empty_item(trans, root, path, &key,
157 sizeof(struct btrfs_free_space_header));
158 if (ret < 0) {
159 btrfs_release_path(path);
160 return ret;
161 }
162 leaf = path->nodes[0];
163 header = btrfs_item_ptr(leaf, path->slots[0],
164 struct btrfs_free_space_header);
165 memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
166 btrfs_set_free_space_key(leaf, header, &disk_key);
167 btrfs_mark_buffer_dirty(leaf);
168 btrfs_release_path(path);
169
170 return 0;
171 }
172
173 int create_free_space_inode(struct btrfs_root *root,
174 struct btrfs_trans_handle *trans,
175 struct btrfs_block_group_cache *block_group,
176 struct btrfs_path *path)
177 {
178 int ret;
179 u64 ino;
180
181 ret = btrfs_find_free_objectid(root, &ino);
182 if (ret < 0)
183 return ret;
184
185 return __create_free_space_inode(root, trans, path, ino,
186 block_group->key.objectid);
187 }
188
189 int btrfs_truncate_free_space_cache(struct btrfs_root *root,
190 struct btrfs_trans_handle *trans,
191 struct btrfs_path *path,
192 struct inode *inode)
193 {
194 struct btrfs_block_rsv *rsv;
195 loff_t oldsize;
196 int ret = 0;
197
198 rsv = trans->block_rsv;
199 trans->block_rsv = root->orphan_block_rsv;
200 ret = btrfs_block_rsv_check(root, root->orphan_block_rsv, 0, 5, 0);
201 if (ret)
202 return ret;
203
204 oldsize = i_size_read(inode);
205 btrfs_i_size_write(inode, 0);
206 truncate_pagecache(inode, oldsize, 0);
207
208 /*
209 * We don't need an orphan item because truncating the free space cache
210 * will never be split across transactions.
211 */
212 ret = btrfs_truncate_inode_items(trans, root, inode,
213 0, BTRFS_EXTENT_DATA_KEY);
214
215 trans->block_rsv = rsv;
216 if (ret) {
217 WARN_ON(1);
218 return ret;
219 }
220
221 ret = btrfs_update_inode(trans, root, inode);
222 return ret;
223 }
224
225 static int readahead_cache(struct inode *inode)
226 {
227 struct file_ra_state *ra;
228 unsigned long last_index;
229
230 ra = kzalloc(sizeof(*ra), GFP_NOFS);
231 if (!ra)
232 return -ENOMEM;
233
234 file_ra_state_init(ra, inode->i_mapping);
235 last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
236
237 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
238
239 kfree(ra);
240
241 return 0;
242 }
243
244 int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
245 struct btrfs_free_space_ctl *ctl,
246 struct btrfs_path *path, u64 offset)
247 {
248 struct btrfs_free_space_header *header;
249 struct extent_buffer *leaf;
250 struct page *page;
251 struct btrfs_key key;
252 struct list_head bitmaps;
253 u64 num_entries;
254 u64 num_bitmaps;
255 u64 generation;
256 pgoff_t index = 0;
257 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
258 int ret = 0;
259
260 INIT_LIST_HEAD(&bitmaps);
261
262 /* Nothing in the space cache, goodbye */
263 if (!i_size_read(inode))
264 goto out;
265
266 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
267 key.offset = offset;
268 key.type = 0;
269
270 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
271 if (ret < 0)
272 goto out;
273 else if (ret > 0) {
274 btrfs_release_path(path);
275 ret = 0;
276 goto out;
277 }
278
279 ret = -1;
280
281 leaf = path->nodes[0];
282 header = btrfs_item_ptr(leaf, path->slots[0],
283 struct btrfs_free_space_header);
284 num_entries = btrfs_free_space_entries(leaf, header);
285 num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
286 generation = btrfs_free_space_generation(leaf, header);
287 btrfs_release_path(path);
288
289 if (BTRFS_I(inode)->generation != generation) {
290 printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
291 " not match free space cache generation (%llu)\n",
292 (unsigned long long)BTRFS_I(inode)->generation,
293 (unsigned long long)generation);
294 goto out;
295 }
296
297 if (!num_entries)
298 goto out;
299
300 ret = readahead_cache(inode);
301 if (ret)
302 goto out;
303
304 while (1) {
305 struct btrfs_free_space_entry *entry;
306 struct btrfs_free_space *e;
307 void *addr;
308 unsigned long offset = 0;
309 int need_loop = 0;
310
311 if (!num_entries && !num_bitmaps)
312 break;
313
314 page = find_or_create_page(inode->i_mapping, index, mask);
315 if (!page)
316 goto free_cache;
317
318 if (!PageUptodate(page)) {
319 btrfs_readpage(NULL, page);
320 lock_page(page);
321 if (!PageUptodate(page)) {
322 unlock_page(page);
323 page_cache_release(page);
324 printk(KERN_ERR "btrfs: error reading free "
325 "space cache\n");
326 goto free_cache;
327 }
328 }
329 addr = kmap(page);
330
331 if (index == 0) {
332 u64 *gen;
333
334 /*
335 * We put a bogus crc in the front of the first page in
336 * case old kernels try to mount a fs with the new
337 * format to make sure they discard the cache.
338 */
339 addr += sizeof(u64);
340 offset += sizeof(u64);
341
342 gen = addr;
343 if (*gen != BTRFS_I(inode)->generation) {
344 printk_ratelimited(KERN_ERR "btrfs: space cache"
345 " generation (%llu) does not match "
346 "inode (%llu)\n",
347 (unsigned long long)*gen,
348 (unsigned long long)
349 BTRFS_I(inode)->generation);
350 kunmap(page);
351 unlock_page(page);
352 page_cache_release(page);
353 goto free_cache;
354 }
355 addr += sizeof(u64);
356 offset += sizeof(u64);
357 }
358 entry = addr;
359
360 while (1) {
361 if (!num_entries)
362 break;
363
364 need_loop = 1;
365 e = kmem_cache_zalloc(btrfs_free_space_cachep,
366 GFP_NOFS);
367 if (!e) {
368 kunmap(page);
369 unlock_page(page);
370 page_cache_release(page);
371 goto free_cache;
372 }
373
374 e->offset = le64_to_cpu(entry->offset);
375 e->bytes = le64_to_cpu(entry->bytes);
376 if (!e->bytes) {
377 kunmap(page);
378 kmem_cache_free(btrfs_free_space_cachep, e);
379 unlock_page(page);
380 page_cache_release(page);
381 goto free_cache;
382 }
383
384 if (entry->type == BTRFS_FREE_SPACE_EXTENT) {
385 spin_lock(&ctl->tree_lock);
386 ret = link_free_space(ctl, e);
387 spin_unlock(&ctl->tree_lock);
388 if (ret) {
389 printk(KERN_ERR "Duplicate entries in "
390 "free space cache, dumping\n");
391 kunmap(page);
392 unlock_page(page);
393 page_cache_release(page);
394 goto free_cache;
395 }
396 } else {
397 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
398 if (!e->bitmap) {
399 kunmap(page);
400 kmem_cache_free(
401 btrfs_free_space_cachep, e);
402 unlock_page(page);
403 page_cache_release(page);
404 goto free_cache;
405 }
406 spin_lock(&ctl->tree_lock);
407 ret = link_free_space(ctl, e);
408 ctl->total_bitmaps++;
409 ctl->op->recalc_thresholds(ctl);
410 spin_unlock(&ctl->tree_lock);
411 if (ret) {
412 printk(KERN_ERR "Duplicate entries in "
413 "free space cache, dumping\n");
414 kunmap(page);
415 unlock_page(page);
416 page_cache_release(page);
417 goto free_cache;
418 }
419 list_add_tail(&e->list, &bitmaps);
420 }
421
422 num_entries--;
423 offset += sizeof(struct btrfs_free_space_entry);
424 if (offset + sizeof(struct btrfs_free_space_entry) >=
425 PAGE_CACHE_SIZE)
426 break;
427 entry++;
428 }
429
430 /*
431 * We read an entry out of this page, we need to move on to the
432 * next page.
433 */
434 if (need_loop) {
435 kunmap(page);
436 goto next;
437 }
438
439 /*
440 * We add the bitmaps at the end of the entries in order that
441 * the bitmap entries are added to the cache.
442 */
443 e = list_entry(bitmaps.next, struct btrfs_free_space, list);
444 list_del_init(&e->list);
445 memcpy(e->bitmap, addr, PAGE_CACHE_SIZE);
446 kunmap(page);
447 num_bitmaps--;
448 next:
449 unlock_page(page);
450 page_cache_release(page);
451 index++;
452 }
453
454 ret = 1;
455 out:
456 return ret;
457 free_cache:
458 __btrfs_remove_free_space_cache(ctl);
459 goto out;
460 }
461
462 int load_free_space_cache(struct btrfs_fs_info *fs_info,
463 struct btrfs_block_group_cache *block_group)
464 {
465 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
466 struct btrfs_root *root = fs_info->tree_root;
467 struct inode *inode;
468 struct btrfs_path *path;
469 int ret;
470 bool matched;
471 u64 used = btrfs_block_group_used(&block_group->item);
472
473 /*
474 * If we're unmounting then just return, since this does a search on the
475 * normal root and not the commit root and we could deadlock.
476 */
477 if (btrfs_fs_closing(fs_info))
478 return 0;
479
480 /*
481 * If this block group has been marked to be cleared for one reason or
482 * another then we can't trust the on disk cache, so just return.
483 */
484 spin_lock(&block_group->lock);
485 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
486 spin_unlock(&block_group->lock);
487 return 0;
488 }
489 spin_unlock(&block_group->lock);
490
491 path = btrfs_alloc_path();
492 if (!path)
493 return 0;
494
495 inode = lookup_free_space_inode(root, block_group, path);
496 if (IS_ERR(inode)) {
497 btrfs_free_path(path);
498 return 0;
499 }
500
501 ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
502 path, block_group->key.objectid);
503 btrfs_free_path(path);
504 if (ret <= 0)
505 goto out;
506
507 spin_lock(&ctl->tree_lock);
508 matched = (ctl->free_space == (block_group->key.offset - used -
509 block_group->bytes_super));
510 spin_unlock(&ctl->tree_lock);
511
512 if (!matched) {
513 __btrfs_remove_free_space_cache(ctl);
514 printk(KERN_ERR "block group %llu has an wrong amount of free "
515 "space\n", block_group->key.objectid);
516 ret = -1;
517 }
518 out:
519 if (ret < 0) {
520 /* This cache is bogus, make sure it gets cleared */
521 spin_lock(&block_group->lock);
522 block_group->disk_cache_state = BTRFS_DC_CLEAR;
523 spin_unlock(&block_group->lock);
524 ret = 0;
525
526 printk(KERN_ERR "btrfs: failed to load free space cache "
527 "for block group %llu\n", block_group->key.objectid);
528 }
529
530 iput(inode);
531 return ret;
532 }
533
534 /**
535 * __btrfs_write_out_cache - write out cached info to an inode
536 * @root - the root the inode belongs to
537 * @ctl - the free space cache we are going to write out
538 * @block_group - the block_group for this cache if it belongs to a block_group
539 * @trans - the trans handle
540 * @path - the path to use
541 * @offset - the offset for the key we'll insert
542 *
543 * This function writes out a free space cache struct to disk for quick recovery
544 * on mount. This will return 0 if it was successfull in writing the cache out,
545 * and -1 if it was not.
546 */
547 int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
548 struct btrfs_free_space_ctl *ctl,
549 struct btrfs_block_group_cache *block_group,
550 struct btrfs_trans_handle *trans,
551 struct btrfs_path *path, u64 offset)
552 {
553 struct btrfs_free_space_header *header;
554 struct extent_buffer *leaf;
555 struct rb_node *node;
556 struct list_head *pos, *n;
557 struct page **pages;
558 struct page *page;
559 struct extent_state *cached_state = NULL;
560 struct btrfs_free_cluster *cluster = NULL;
561 struct extent_io_tree *unpin = NULL;
562 struct list_head bitmap_list;
563 struct btrfs_key key;
564 u64 start, end, len;
565 u64 bytes = 0;
566 u32 crc = ~(u32)0;
567 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
568 int index = 0, num_pages = 0;
569 int entries = 0;
570 int bitmaps = 0;
571 int ret;
572 int err = -1;
573 bool next_page = false;
574 bool out_of_space = false;
575
576 INIT_LIST_HEAD(&bitmap_list);
577
578 node = rb_first(&ctl->free_space_offset);
579 if (!node)
580 return -1;
581
582 if (!i_size_read(inode))
583 return -1;
584
585 num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
586 PAGE_CACHE_SHIFT;
587
588 filemap_write_and_wait(inode->i_mapping);
589 btrfs_wait_ordered_range(inode, inode->i_size &
590 ~(root->sectorsize - 1), (u64)-1);
591
592 pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
593 if (!pages)
594 return -1;
595
596 /* Get the cluster for this block_group if it exists */
597 if (block_group && !list_empty(&block_group->cluster_list))
598 cluster = list_entry(block_group->cluster_list.next,
599 struct btrfs_free_cluster,
600 block_group_list);
601
602 /*
603 * We shouldn't have switched the pinned extents yet so this is the
604 * right one
605 */
606 unpin = root->fs_info->pinned_extents;
607
608 /*
609 * Lock all pages first so we can lock the extent safely.
610 *
611 * NOTE: Because we hold the ref the entire time we're going to write to
612 * the page find_get_page should never fail, so we don't do a check
613 * after find_get_page at this point. Just putting this here so people
614 * know and don't freak out.
615 */
616 while (index < num_pages) {
617 page = find_or_create_page(inode->i_mapping, index, mask);
618 if (!page) {
619 int i;
620
621 for (i = 0; i < num_pages; i++) {
622 unlock_page(pages[i]);
623 page_cache_release(pages[i]);
624 }
625 goto out;
626 }
627 pages[index] = page;
628 index++;
629 }
630
631 index = 0;
632 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
633 0, &cached_state, GFP_NOFS);
634
635 /*
636 * When searching for pinned extents, we need to start at our start
637 * offset.
638 */
639 if (block_group)
640 start = block_group->key.objectid;
641
642 /* Write out the extent entries */
643 do {
644 struct btrfs_free_space_entry *entry;
645 void *addr, *orig;
646 unsigned long offset = 0;
647
648 next_page = false;
649
650 if (index >= num_pages) {
651 out_of_space = true;
652 break;
653 }
654
655 page = pages[index];
656
657 orig = addr = kmap(page);
658 if (index == 0) {
659 u64 *gen;
660
661 /*
662 * We're going to put in a bogus crc for this page to
663 * make sure that old kernels who aren't aware of this
664 * format will be sure to discard the cache.
665 */
666 addr += sizeof(u64);
667 offset += sizeof(u64);
668
669 gen = addr;
670 *gen = trans->transid;
671 addr += sizeof(u64);
672 offset += sizeof(u64);
673 }
674 entry = addr;
675
676 memset(addr, 0, PAGE_CACHE_SIZE - offset);
677 while (node && !next_page) {
678 struct btrfs_free_space *e;
679
680 e = rb_entry(node, struct btrfs_free_space, offset_index);
681 entries++;
682
683 entry->offset = cpu_to_le64(e->offset);
684 entry->bytes = cpu_to_le64(e->bytes);
685 if (e->bitmap) {
686 entry->type = BTRFS_FREE_SPACE_BITMAP;
687 list_add_tail(&e->list, &bitmap_list);
688 bitmaps++;
689 } else {
690 entry->type = BTRFS_FREE_SPACE_EXTENT;
691 }
692 node = rb_next(node);
693 if (!node && cluster) {
694 node = rb_first(&cluster->root);
695 cluster = NULL;
696 }
697 offset += sizeof(struct btrfs_free_space_entry);
698 if (offset + sizeof(struct btrfs_free_space_entry) >=
699 PAGE_CACHE_SIZE)
700 next_page = true;
701 entry++;
702 }
703
704 /*
705 * We want to add any pinned extents to our free space cache
706 * so we don't leak the space
707 */
708 while (block_group && !next_page &&
709 (start < block_group->key.objectid +
710 block_group->key.offset)) {
711 ret = find_first_extent_bit(unpin, start, &start, &end,
712 EXTENT_DIRTY);
713 if (ret) {
714 ret = 0;
715 break;
716 }
717
718 /* This pinned extent is out of our range */
719 if (start >= block_group->key.objectid +
720 block_group->key.offset)
721 break;
722
723 len = block_group->key.objectid +
724 block_group->key.offset - start;
725 len = min(len, end + 1 - start);
726
727 entries++;
728 entry->offset = cpu_to_le64(start);
729 entry->bytes = cpu_to_le64(len);
730 entry->type = BTRFS_FREE_SPACE_EXTENT;
731
732 start = end + 1;
733 offset += sizeof(struct btrfs_free_space_entry);
734 if (offset + sizeof(struct btrfs_free_space_entry) >=
735 PAGE_CACHE_SIZE)
736 next_page = true;
737 entry++;
738 }
739
740 /* Generate bogus crc value */
741 if (index == 0) {
742 u32 *tmp;
743 crc = btrfs_csum_data(root, orig + sizeof(u64), crc,
744 PAGE_CACHE_SIZE - sizeof(u64));
745 btrfs_csum_final(crc, (char *)&crc);
746 crc++;
747 tmp = orig;
748 *tmp = crc;
749 }
750
751 kunmap(page);
752
753 bytes += PAGE_CACHE_SIZE;
754
755 index++;
756 } while (node || next_page);
757
758 /* Write out the bitmaps */
759 list_for_each_safe(pos, n, &bitmap_list) {
760 void *addr;
761 struct btrfs_free_space *entry =
762 list_entry(pos, struct btrfs_free_space, list);
763
764 if (index >= num_pages) {
765 out_of_space = true;
766 break;
767 }
768 page = pages[index];
769
770 addr = kmap(page);
771 memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE);
772 kunmap(page);
773 bytes += PAGE_CACHE_SIZE;
774
775 list_del_init(&entry->list);
776 index++;
777 }
778
779 if (out_of_space) {
780 btrfs_drop_pages(pages, num_pages);
781 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
782 i_size_read(inode) - 1, &cached_state,
783 GFP_NOFS);
784 goto out;
785 }
786
787 /* Zero out the rest of the pages just to make sure */
788 while (index < num_pages) {
789 void *addr;
790
791 page = pages[index];
792 addr = kmap(page);
793 memset(addr, 0, PAGE_CACHE_SIZE);
794 kunmap(page);
795 bytes += PAGE_CACHE_SIZE;
796 index++;
797 }
798
799 ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0,
800 bytes, &cached_state);
801 btrfs_drop_pages(pages, num_pages);
802 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
803 i_size_read(inode) - 1, &cached_state, GFP_NOFS);
804
805 if (ret)
806 goto out;
807
808 BTRFS_I(inode)->generation = trans->transid;
809
810 filemap_write_and_wait(inode->i_mapping);
811
812 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
813 key.offset = offset;
814 key.type = 0;
815
816 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
817 if (ret < 0) {
818 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
819 EXTENT_DIRTY | EXTENT_DELALLOC |
820 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
821 goto out;
822 }
823 leaf = path->nodes[0];
824 if (ret > 0) {
825 struct btrfs_key found_key;
826 BUG_ON(!path->slots[0]);
827 path->slots[0]--;
828 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
829 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
830 found_key.offset != offset) {
831 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
832 EXTENT_DIRTY | EXTENT_DELALLOC |
833 EXTENT_DO_ACCOUNTING, 0, 0, NULL,
834 GFP_NOFS);
835 btrfs_release_path(path);
836 goto out;
837 }
838 }
839 header = btrfs_item_ptr(leaf, path->slots[0],
840 struct btrfs_free_space_header);
841 btrfs_set_free_space_entries(leaf, header, entries);
842 btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
843 btrfs_set_free_space_generation(leaf, header, trans->transid);
844 btrfs_mark_buffer_dirty(leaf);
845 btrfs_release_path(path);
846
847 err = 0;
848 out:
849 kfree(pages);
850 if (err) {
851 invalidate_inode_pages2_range(inode->i_mapping, 0, index);
852 BTRFS_I(inode)->generation = 0;
853 }
854 btrfs_update_inode(trans, root, inode);
855 return err;
856 }
857
858 int btrfs_write_out_cache(struct btrfs_root *root,
859 struct btrfs_trans_handle *trans,
860 struct btrfs_block_group_cache *block_group,
861 struct btrfs_path *path)
862 {
863 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
864 struct inode *inode;
865 int ret = 0;
866
867 root = root->fs_info->tree_root;
868
869 spin_lock(&block_group->lock);
870 if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
871 spin_unlock(&block_group->lock);
872 return 0;
873 }
874 spin_unlock(&block_group->lock);
875
876 inode = lookup_free_space_inode(root, block_group, path);
877 if (IS_ERR(inode))
878 return 0;
879
880 ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
881 path, block_group->key.objectid);
882 if (ret) {
883 btrfs_delalloc_release_metadata(inode, inode->i_size);
884 spin_lock(&block_group->lock);
885 block_group->disk_cache_state = BTRFS_DC_ERROR;
886 spin_unlock(&block_group->lock);
887 ret = 0;
888 #ifdef DEBUG
889 printk(KERN_ERR "btrfs: failed to write free space cace "
890 "for block group %llu\n", block_group->key.objectid);
891 #endif
892 }
893
894 iput(inode);
895 return ret;
896 }
897
898 static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
899 u64 offset)
900 {
901 BUG_ON(offset < bitmap_start);
902 offset -= bitmap_start;
903 return (unsigned long)(div_u64(offset, unit));
904 }
905
906 static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
907 {
908 return (unsigned long)(div_u64(bytes, unit));
909 }
910
911 static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
912 u64 offset)
913 {
914 u64 bitmap_start;
915 u64 bytes_per_bitmap;
916
917 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
918 bitmap_start = offset - ctl->start;
919 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
920 bitmap_start *= bytes_per_bitmap;
921 bitmap_start += ctl->start;
922
923 return bitmap_start;
924 }
925
926 static int tree_insert_offset(struct rb_root *root, u64 offset,
927 struct rb_node *node, int bitmap)
928 {
929 struct rb_node **p = &root->rb_node;
930 struct rb_node *parent = NULL;
931 struct btrfs_free_space *info;
932
933 while (*p) {
934 parent = *p;
935 info = rb_entry(parent, struct btrfs_free_space, offset_index);
936
937 if (offset < info->offset) {
938 p = &(*p)->rb_left;
939 } else if (offset > info->offset) {
940 p = &(*p)->rb_right;
941 } else {
942 /*
943 * we could have a bitmap entry and an extent entry
944 * share the same offset. If this is the case, we want
945 * the extent entry to always be found first if we do a
946 * linear search through the tree, since we want to have
947 * the quickest allocation time, and allocating from an
948 * extent is faster than allocating from a bitmap. So
949 * if we're inserting a bitmap and we find an entry at
950 * this offset, we want to go right, or after this entry
951 * logically. If we are inserting an extent and we've
952 * found a bitmap, we want to go left, or before
953 * logically.
954 */
955 if (bitmap) {
956 if (info->bitmap) {
957 WARN_ON_ONCE(1);
958 return -EEXIST;
959 }
960 p = &(*p)->rb_right;
961 } else {
962 if (!info->bitmap) {
963 WARN_ON_ONCE(1);
964 return -EEXIST;
965 }
966 p = &(*p)->rb_left;
967 }
968 }
969 }
970
971 rb_link_node(node, parent, p);
972 rb_insert_color(node, root);
973
974 return 0;
975 }
976
977 /*
978 * searches the tree for the given offset.
979 *
980 * fuzzy - If this is set, then we are trying to make an allocation, and we just
981 * want a section that has at least bytes size and comes at or after the given
982 * offset.
983 */
984 static struct btrfs_free_space *
985 tree_search_offset(struct btrfs_free_space_ctl *ctl,
986 u64 offset, int bitmap_only, int fuzzy)
987 {
988 struct rb_node *n = ctl->free_space_offset.rb_node;
989 struct btrfs_free_space *entry, *prev = NULL;
990
991 /* find entry that is closest to the 'offset' */
992 while (1) {
993 if (!n) {
994 entry = NULL;
995 break;
996 }
997
998 entry = rb_entry(n, struct btrfs_free_space, offset_index);
999 prev = entry;
1000
1001 if (offset < entry->offset)
1002 n = n->rb_left;
1003 else if (offset > entry->offset)
1004 n = n->rb_right;
1005 else
1006 break;
1007 }
1008
1009 if (bitmap_only) {
1010 if (!entry)
1011 return NULL;
1012 if (entry->bitmap)
1013 return entry;
1014
1015 /*
1016 * bitmap entry and extent entry may share same offset,
1017 * in that case, bitmap entry comes after extent entry.
1018 */
1019 n = rb_next(n);
1020 if (!n)
1021 return NULL;
1022 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1023 if (entry->offset != offset)
1024 return NULL;
1025
1026 WARN_ON(!entry->bitmap);
1027 return entry;
1028 } else if (entry) {
1029 if (entry->bitmap) {
1030 /*
1031 * if previous extent entry covers the offset,
1032 * we should return it instead of the bitmap entry
1033 */
1034 n = &entry->offset_index;
1035 while (1) {
1036 n = rb_prev(n);
1037 if (!n)
1038 break;
1039 prev = rb_entry(n, struct btrfs_free_space,
1040 offset_index);
1041 if (!prev->bitmap) {
1042 if (prev->offset + prev->bytes > offset)
1043 entry = prev;
1044 break;
1045 }
1046 }
1047 }
1048 return entry;
1049 }
1050
1051 if (!prev)
1052 return NULL;
1053
1054 /* find last entry before the 'offset' */
1055 entry = prev;
1056 if (entry->offset > offset) {
1057 n = rb_prev(&entry->offset_index);
1058 if (n) {
1059 entry = rb_entry(n, struct btrfs_free_space,
1060 offset_index);
1061 BUG_ON(entry->offset > offset);
1062 } else {
1063 if (fuzzy)
1064 return entry;
1065 else
1066 return NULL;
1067 }
1068 }
1069
1070 if (entry->bitmap) {
1071 n = &entry->offset_index;
1072 while (1) {
1073 n = rb_prev(n);
1074 if (!n)
1075 break;
1076 prev = rb_entry(n, struct btrfs_free_space,
1077 offset_index);
1078 if (!prev->bitmap) {
1079 if (prev->offset + prev->bytes > offset)
1080 return prev;
1081 break;
1082 }
1083 }
1084 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1085 return entry;
1086 } else if (entry->offset + entry->bytes > offset)
1087 return entry;
1088
1089 if (!fuzzy)
1090 return NULL;
1091
1092 while (1) {
1093 if (entry->bitmap) {
1094 if (entry->offset + BITS_PER_BITMAP *
1095 ctl->unit > offset)
1096 break;
1097 } else {
1098 if (entry->offset + entry->bytes > offset)
1099 break;
1100 }
1101
1102 n = rb_next(&entry->offset_index);
1103 if (!n)
1104 return NULL;
1105 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1106 }
1107 return entry;
1108 }
1109
1110 static inline void
1111 __unlink_free_space(struct btrfs_free_space_ctl *ctl,
1112 struct btrfs_free_space *info)
1113 {
1114 rb_erase(&info->offset_index, &ctl->free_space_offset);
1115 ctl->free_extents--;
1116 }
1117
1118 static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1119 struct btrfs_free_space *info)
1120 {
1121 __unlink_free_space(ctl, info);
1122 ctl->free_space -= info->bytes;
1123 }
1124
1125 static int link_free_space(struct btrfs_free_space_ctl *ctl,
1126 struct btrfs_free_space *info)
1127 {
1128 int ret = 0;
1129
1130 BUG_ON(!info->bitmap && !info->bytes);
1131 ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1132 &info->offset_index, (info->bitmap != NULL));
1133 if (ret)
1134 return ret;
1135
1136 ctl->free_space += info->bytes;
1137 ctl->free_extents++;
1138 return ret;
1139 }
1140
1141 static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1142 {
1143 struct btrfs_block_group_cache *block_group = ctl->private;
1144 u64 max_bytes;
1145 u64 bitmap_bytes;
1146 u64 extent_bytes;
1147 u64 size = block_group->key.offset;
1148 u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
1149 int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1150
1151 BUG_ON(ctl->total_bitmaps > max_bitmaps);
1152
1153 /*
1154 * The goal is to keep the total amount of memory used per 1gb of space
1155 * at or below 32k, so we need to adjust how much memory we allow to be
1156 * used by extent based free space tracking
1157 */
1158 if (size < 1024 * 1024 * 1024)
1159 max_bytes = MAX_CACHE_BYTES_PER_GIG;
1160 else
1161 max_bytes = MAX_CACHE_BYTES_PER_GIG *
1162 div64_u64(size, 1024 * 1024 * 1024);
1163
1164 /*
1165 * we want to account for 1 more bitmap than what we have so we can make
1166 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1167 * we add more bitmaps.
1168 */
1169 bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
1170
1171 if (bitmap_bytes >= max_bytes) {
1172 ctl->extents_thresh = 0;
1173 return;
1174 }
1175
1176 /*
1177 * we want the extent entry threshold to always be at most 1/2 the maxw
1178 * bytes we can have, or whatever is less than that.
1179 */
1180 extent_bytes = max_bytes - bitmap_bytes;
1181 extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
1182
1183 ctl->extents_thresh =
1184 div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
1185 }
1186
1187 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1188 struct btrfs_free_space *info,
1189 u64 offset, u64 bytes)
1190 {
1191 unsigned long start, count;
1192
1193 start = offset_to_bit(info->offset, ctl->unit, offset);
1194 count = bytes_to_bits(bytes, ctl->unit);
1195 BUG_ON(start + count > BITS_PER_BITMAP);
1196
1197 bitmap_clear(info->bitmap, start, count);
1198
1199 info->bytes -= bytes;
1200 }
1201
1202 static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1203 struct btrfs_free_space *info, u64 offset,
1204 u64 bytes)
1205 {
1206 __bitmap_clear_bits(ctl, info, offset, bytes);
1207 ctl->free_space -= bytes;
1208 }
1209
1210 static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1211 struct btrfs_free_space *info, u64 offset,
1212 u64 bytes)
1213 {
1214 unsigned long start, count;
1215
1216 start = offset_to_bit(info->offset, ctl->unit, offset);
1217 count = bytes_to_bits(bytes, ctl->unit);
1218 BUG_ON(start + count > BITS_PER_BITMAP);
1219
1220 bitmap_set(info->bitmap, start, count);
1221
1222 info->bytes += bytes;
1223 ctl->free_space += bytes;
1224 }
1225
1226 static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1227 struct btrfs_free_space *bitmap_info, u64 *offset,
1228 u64 *bytes)
1229 {
1230 unsigned long found_bits = 0;
1231 unsigned long bits, i;
1232 unsigned long next_zero;
1233
1234 i = offset_to_bit(bitmap_info->offset, ctl->unit,
1235 max_t(u64, *offset, bitmap_info->offset));
1236 bits = bytes_to_bits(*bytes, ctl->unit);
1237
1238 for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
1239 i < BITS_PER_BITMAP;
1240 i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
1241 next_zero = find_next_zero_bit(bitmap_info->bitmap,
1242 BITS_PER_BITMAP, i);
1243 if ((next_zero - i) >= bits) {
1244 found_bits = next_zero - i;
1245 break;
1246 }
1247 i = next_zero;
1248 }
1249
1250 if (found_bits) {
1251 *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1252 *bytes = (u64)(found_bits) * ctl->unit;
1253 return 0;
1254 }
1255
1256 return -1;
1257 }
1258
1259 static struct btrfs_free_space *
1260 find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
1261 {
1262 struct btrfs_free_space *entry;
1263 struct rb_node *node;
1264 int ret;
1265
1266 if (!ctl->free_space_offset.rb_node)
1267 return NULL;
1268
1269 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1270 if (!entry)
1271 return NULL;
1272
1273 for (node = &entry->offset_index; node; node = rb_next(node)) {
1274 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1275 if (entry->bytes < *bytes)
1276 continue;
1277
1278 if (entry->bitmap) {
1279 ret = search_bitmap(ctl, entry, offset, bytes);
1280 if (!ret)
1281 return entry;
1282 continue;
1283 }
1284
1285 *offset = entry->offset;
1286 *bytes = entry->bytes;
1287 return entry;
1288 }
1289
1290 return NULL;
1291 }
1292
1293 static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1294 struct btrfs_free_space *info, u64 offset)
1295 {
1296 info->offset = offset_to_bitmap(ctl, offset);
1297 info->bytes = 0;
1298 link_free_space(ctl, info);
1299 ctl->total_bitmaps++;
1300
1301 ctl->op->recalc_thresholds(ctl);
1302 }
1303
1304 static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1305 struct btrfs_free_space *bitmap_info)
1306 {
1307 unlink_free_space(ctl, bitmap_info);
1308 kfree(bitmap_info->bitmap);
1309 kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1310 ctl->total_bitmaps--;
1311 ctl->op->recalc_thresholds(ctl);
1312 }
1313
1314 static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1315 struct btrfs_free_space *bitmap_info,
1316 u64 *offset, u64 *bytes)
1317 {
1318 u64 end;
1319 u64 search_start, search_bytes;
1320 int ret;
1321
1322 again:
1323 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1324
1325 /*
1326 * XXX - this can go away after a few releases.
1327 *
1328 * since the only user of btrfs_remove_free_space is the tree logging
1329 * stuff, and the only way to test that is under crash conditions, we
1330 * want to have this debug stuff here just in case somethings not
1331 * working. Search the bitmap for the space we are trying to use to
1332 * make sure its actually there. If its not there then we need to stop
1333 * because something has gone wrong.
1334 */
1335 search_start = *offset;
1336 search_bytes = *bytes;
1337 search_bytes = min(search_bytes, end - search_start + 1);
1338 ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
1339 BUG_ON(ret < 0 || search_start != *offset);
1340
1341 if (*offset > bitmap_info->offset && *offset + *bytes > end) {
1342 bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
1343 *bytes -= end - *offset + 1;
1344 *offset = end + 1;
1345 } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
1346 bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
1347 *bytes = 0;
1348 }
1349
1350 if (*bytes) {
1351 struct rb_node *next = rb_next(&bitmap_info->offset_index);
1352 if (!bitmap_info->bytes)
1353 free_bitmap(ctl, bitmap_info);
1354
1355 /*
1356 * no entry after this bitmap, but we still have bytes to
1357 * remove, so something has gone wrong.
1358 */
1359 if (!next)
1360 return -EINVAL;
1361
1362 bitmap_info = rb_entry(next, struct btrfs_free_space,
1363 offset_index);
1364
1365 /*
1366 * if the next entry isn't a bitmap we need to return to let the
1367 * extent stuff do its work.
1368 */
1369 if (!bitmap_info->bitmap)
1370 return -EAGAIN;
1371
1372 /*
1373 * Ok the next item is a bitmap, but it may not actually hold
1374 * the information for the rest of this free space stuff, so
1375 * look for it, and if we don't find it return so we can try
1376 * everything over again.
1377 */
1378 search_start = *offset;
1379 search_bytes = *bytes;
1380 ret = search_bitmap(ctl, bitmap_info, &search_start,
1381 &search_bytes);
1382 if (ret < 0 || search_start != *offset)
1383 return -EAGAIN;
1384
1385 goto again;
1386 } else if (!bitmap_info->bytes)
1387 free_bitmap(ctl, bitmap_info);
1388
1389 return 0;
1390 }
1391
1392 static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
1393 struct btrfs_free_space *info, u64 offset,
1394 u64 bytes)
1395 {
1396 u64 bytes_to_set = 0;
1397 u64 end;
1398
1399 end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1400
1401 bytes_to_set = min(end - offset, bytes);
1402
1403 bitmap_set_bits(ctl, info, offset, bytes_to_set);
1404
1405 return bytes_to_set;
1406
1407 }
1408
1409 static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1410 struct btrfs_free_space *info)
1411 {
1412 struct btrfs_block_group_cache *block_group = ctl->private;
1413
1414 /*
1415 * If we are below the extents threshold then we can add this as an
1416 * extent, and don't have to deal with the bitmap
1417 */
1418 if (ctl->free_extents < ctl->extents_thresh) {
1419 /*
1420 * If this block group has some small extents we don't want to
1421 * use up all of our free slots in the cache with them, we want
1422 * to reserve them to larger extents, however if we have plent
1423 * of cache left then go ahead an dadd them, no sense in adding
1424 * the overhead of a bitmap if we don't have to.
1425 */
1426 if (info->bytes <= block_group->sectorsize * 4) {
1427 if (ctl->free_extents * 2 <= ctl->extents_thresh)
1428 return false;
1429 } else {
1430 return false;
1431 }
1432 }
1433
1434 /*
1435 * some block groups are so tiny they can't be enveloped by a bitmap, so
1436 * don't even bother to create a bitmap for this
1437 */
1438 if (BITS_PER_BITMAP * block_group->sectorsize >
1439 block_group->key.offset)
1440 return false;
1441
1442 return true;
1443 }
1444
1445 static struct btrfs_free_space_op free_space_op = {
1446 .recalc_thresholds = recalculate_thresholds,
1447 .use_bitmap = use_bitmap,
1448 };
1449
1450 static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
1451 struct btrfs_free_space *info)
1452 {
1453 struct btrfs_free_space *bitmap_info;
1454 struct btrfs_block_group_cache *block_group = NULL;
1455 int added = 0;
1456 u64 bytes, offset, bytes_added;
1457 int ret;
1458
1459 bytes = info->bytes;
1460 offset = info->offset;
1461
1462 if (!ctl->op->use_bitmap(ctl, info))
1463 return 0;
1464
1465 if (ctl->op == &free_space_op)
1466 block_group = ctl->private;
1467 again:
1468 /*
1469 * Since we link bitmaps right into the cluster we need to see if we
1470 * have a cluster here, and if so and it has our bitmap we need to add
1471 * the free space to that bitmap.
1472 */
1473 if (block_group && !list_empty(&block_group->cluster_list)) {
1474 struct btrfs_free_cluster *cluster;
1475 struct rb_node *node;
1476 struct btrfs_free_space *entry;
1477
1478 cluster = list_entry(block_group->cluster_list.next,
1479 struct btrfs_free_cluster,
1480 block_group_list);
1481 spin_lock(&cluster->lock);
1482 node = rb_first(&cluster->root);
1483 if (!node) {
1484 spin_unlock(&cluster->lock);
1485 goto no_cluster_bitmap;
1486 }
1487
1488 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1489 if (!entry->bitmap) {
1490 spin_unlock(&cluster->lock);
1491 goto no_cluster_bitmap;
1492 }
1493
1494 if (entry->offset == offset_to_bitmap(ctl, offset)) {
1495 bytes_added = add_bytes_to_bitmap(ctl, entry,
1496 offset, bytes);
1497 bytes -= bytes_added;
1498 offset += bytes_added;
1499 }
1500 spin_unlock(&cluster->lock);
1501 if (!bytes) {
1502 ret = 1;
1503 goto out;
1504 }
1505 }
1506
1507 no_cluster_bitmap:
1508 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1509 1, 0);
1510 if (!bitmap_info) {
1511 BUG_ON(added);
1512 goto new_bitmap;
1513 }
1514
1515 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
1516 bytes -= bytes_added;
1517 offset += bytes_added;
1518 added = 0;
1519
1520 if (!bytes) {
1521 ret = 1;
1522 goto out;
1523 } else
1524 goto again;
1525
1526 new_bitmap:
1527 if (info && info->bitmap) {
1528 add_new_bitmap(ctl, info, offset);
1529 added = 1;
1530 info = NULL;
1531 goto again;
1532 } else {
1533 spin_unlock(&ctl->tree_lock);
1534
1535 /* no pre-allocated info, allocate a new one */
1536 if (!info) {
1537 info = kmem_cache_zalloc(btrfs_free_space_cachep,
1538 GFP_NOFS);
1539 if (!info) {
1540 spin_lock(&ctl->tree_lock);
1541 ret = -ENOMEM;
1542 goto out;
1543 }
1544 }
1545
1546 /* allocate the bitmap */
1547 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
1548 spin_lock(&ctl->tree_lock);
1549 if (!info->bitmap) {
1550 ret = -ENOMEM;
1551 goto out;
1552 }
1553 goto again;
1554 }
1555
1556 out:
1557 if (info) {
1558 if (info->bitmap)
1559 kfree(info->bitmap);
1560 kmem_cache_free(btrfs_free_space_cachep, info);
1561 }
1562
1563 return ret;
1564 }
1565
1566 static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
1567 struct btrfs_free_space *info, bool update_stat)
1568 {
1569 struct btrfs_free_space *left_info;
1570 struct btrfs_free_space *right_info;
1571 bool merged = false;
1572 u64 offset = info->offset;
1573 u64 bytes = info->bytes;
1574
1575 /*
1576 * first we want to see if there is free space adjacent to the range we
1577 * are adding, if there is remove that struct and add a new one to
1578 * cover the entire range
1579 */
1580 right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
1581 if (right_info && rb_prev(&right_info->offset_index))
1582 left_info = rb_entry(rb_prev(&right_info->offset_index),
1583 struct btrfs_free_space, offset_index);
1584 else
1585 left_info = tree_search_offset(ctl, offset - 1, 0, 0);
1586
1587 if (right_info && !right_info->bitmap) {
1588 if (update_stat)
1589 unlink_free_space(ctl, right_info);
1590 else
1591 __unlink_free_space(ctl, right_info);
1592 info->bytes += right_info->bytes;
1593 kmem_cache_free(btrfs_free_space_cachep, right_info);
1594 merged = true;
1595 }
1596
1597 if (left_info && !left_info->bitmap &&
1598 left_info->offset + left_info->bytes == offset) {
1599 if (update_stat)
1600 unlink_free_space(ctl, left_info);
1601 else
1602 __unlink_free_space(ctl, left_info);
1603 info->offset = left_info->offset;
1604 info->bytes += left_info->bytes;
1605 kmem_cache_free(btrfs_free_space_cachep, left_info);
1606 merged = true;
1607 }
1608
1609 return merged;
1610 }
1611
1612 int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
1613 u64 offset, u64 bytes)
1614 {
1615 struct btrfs_free_space *info;
1616 int ret = 0;
1617
1618 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
1619 if (!info)
1620 return -ENOMEM;
1621
1622 info->offset = offset;
1623 info->bytes = bytes;
1624
1625 spin_lock(&ctl->tree_lock);
1626
1627 if (try_merge_free_space(ctl, info, true))
1628 goto link;
1629
1630 /*
1631 * There was no extent directly to the left or right of this new
1632 * extent then we know we're going to have to allocate a new extent, so
1633 * before we do that see if we need to drop this into a bitmap
1634 */
1635 ret = insert_into_bitmap(ctl, info);
1636 if (ret < 0) {
1637 goto out;
1638 } else if (ret) {
1639 ret = 0;
1640 goto out;
1641 }
1642 link:
1643 ret = link_free_space(ctl, info);
1644 if (ret)
1645 kmem_cache_free(btrfs_free_space_cachep, info);
1646 out:
1647 spin_unlock(&ctl->tree_lock);
1648
1649 if (ret) {
1650 printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
1651 BUG_ON(ret == -EEXIST);
1652 }
1653
1654 return ret;
1655 }
1656
1657 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1658 u64 offset, u64 bytes)
1659 {
1660 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1661 struct btrfs_free_space *info;
1662 struct btrfs_free_space *next_info = NULL;
1663 int ret = 0;
1664
1665 spin_lock(&ctl->tree_lock);
1666
1667 again:
1668 info = tree_search_offset(ctl, offset, 0, 0);
1669 if (!info) {
1670 /*
1671 * oops didn't find an extent that matched the space we wanted
1672 * to remove, look for a bitmap instead
1673 */
1674 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1675 1, 0);
1676 if (!info) {
1677 WARN_ON(1);
1678 goto out_lock;
1679 }
1680 }
1681
1682 if (info->bytes < bytes && rb_next(&info->offset_index)) {
1683 u64 end;
1684 next_info = rb_entry(rb_next(&info->offset_index),
1685 struct btrfs_free_space,
1686 offset_index);
1687
1688 if (next_info->bitmap)
1689 end = next_info->offset +
1690 BITS_PER_BITMAP * ctl->unit - 1;
1691 else
1692 end = next_info->offset + next_info->bytes;
1693
1694 if (next_info->bytes < bytes ||
1695 next_info->offset > offset || offset > end) {
1696 printk(KERN_CRIT "Found free space at %llu, size %llu,"
1697 " trying to use %llu\n",
1698 (unsigned long long)info->offset,
1699 (unsigned long long)info->bytes,
1700 (unsigned long long)bytes);
1701 WARN_ON(1);
1702 ret = -EINVAL;
1703 goto out_lock;
1704 }
1705
1706 info = next_info;
1707 }
1708
1709 if (info->bytes == bytes) {
1710 unlink_free_space(ctl, info);
1711 if (info->bitmap) {
1712 kfree(info->bitmap);
1713 ctl->total_bitmaps--;
1714 }
1715 kmem_cache_free(btrfs_free_space_cachep, info);
1716 goto out_lock;
1717 }
1718
1719 if (!info->bitmap && info->offset == offset) {
1720 unlink_free_space(ctl, info);
1721 info->offset += bytes;
1722 info->bytes -= bytes;
1723 link_free_space(ctl, info);
1724 goto out_lock;
1725 }
1726
1727 if (!info->bitmap && info->offset <= offset &&
1728 info->offset + info->bytes >= offset + bytes) {
1729 u64 old_start = info->offset;
1730 /*
1731 * we're freeing space in the middle of the info,
1732 * this can happen during tree log replay
1733 *
1734 * first unlink the old info and then
1735 * insert it again after the hole we're creating
1736 */
1737 unlink_free_space(ctl, info);
1738 if (offset + bytes < info->offset + info->bytes) {
1739 u64 old_end = info->offset + info->bytes;
1740
1741 info->offset = offset + bytes;
1742 info->bytes = old_end - info->offset;
1743 ret = link_free_space(ctl, info);
1744 WARN_ON(ret);
1745 if (ret)
1746 goto out_lock;
1747 } else {
1748 /* the hole we're creating ends at the end
1749 * of the info struct, just free the info
1750 */
1751 kmem_cache_free(btrfs_free_space_cachep, info);
1752 }
1753 spin_unlock(&ctl->tree_lock);
1754
1755 /* step two, insert a new info struct to cover
1756 * anything before the hole
1757 */
1758 ret = btrfs_add_free_space(block_group, old_start,
1759 offset - old_start);
1760 WARN_ON(ret);
1761 goto out;
1762 }
1763
1764 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1765 if (ret == -EAGAIN)
1766 goto again;
1767 BUG_ON(ret);
1768 out_lock:
1769 spin_unlock(&ctl->tree_lock);
1770 out:
1771 return ret;
1772 }
1773
1774 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
1775 u64 bytes)
1776 {
1777 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1778 struct btrfs_free_space *info;
1779 struct rb_node *n;
1780 int count = 0;
1781
1782 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
1783 info = rb_entry(n, struct btrfs_free_space, offset_index);
1784 if (info->bytes >= bytes)
1785 count++;
1786 printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
1787 (unsigned long long)info->offset,
1788 (unsigned long long)info->bytes,
1789 (info->bitmap) ? "yes" : "no");
1790 }
1791 printk(KERN_INFO "block group has cluster?: %s\n",
1792 list_empty(&block_group->cluster_list) ? "no" : "yes");
1793 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
1794 "\n", count);
1795 }
1796
1797 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
1798 {
1799 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1800
1801 spin_lock_init(&ctl->tree_lock);
1802 ctl->unit = block_group->sectorsize;
1803 ctl->start = block_group->key.objectid;
1804 ctl->private = block_group;
1805 ctl->op = &free_space_op;
1806
1807 /*
1808 * we only want to have 32k of ram per block group for keeping
1809 * track of free space, and if we pass 1/2 of that we want to
1810 * start converting things over to using bitmaps
1811 */
1812 ctl->extents_thresh = ((1024 * 32) / 2) /
1813 sizeof(struct btrfs_free_space);
1814 }
1815
1816 /*
1817 * for a given cluster, put all of its extents back into the free
1818 * space cache. If the block group passed doesn't match the block group
1819 * pointed to by the cluster, someone else raced in and freed the
1820 * cluster already. In that case, we just return without changing anything
1821 */
1822 static int
1823 __btrfs_return_cluster_to_free_space(
1824 struct btrfs_block_group_cache *block_group,
1825 struct btrfs_free_cluster *cluster)
1826 {
1827 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1828 struct btrfs_free_space *entry;
1829 struct rb_node *node;
1830
1831 spin_lock(&cluster->lock);
1832 if (cluster->block_group != block_group)
1833 goto out;
1834
1835 cluster->block_group = NULL;
1836 cluster->window_start = 0;
1837 list_del_init(&cluster->block_group_list);
1838
1839 node = rb_first(&cluster->root);
1840 while (node) {
1841 bool bitmap;
1842
1843 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1844 node = rb_next(&entry->offset_index);
1845 rb_erase(&entry->offset_index, &cluster->root);
1846
1847 bitmap = (entry->bitmap != NULL);
1848 if (!bitmap)
1849 try_merge_free_space(ctl, entry, false);
1850 tree_insert_offset(&ctl->free_space_offset,
1851 entry->offset, &entry->offset_index, bitmap);
1852 }
1853 cluster->root = RB_ROOT;
1854
1855 out:
1856 spin_unlock(&cluster->lock);
1857 btrfs_put_block_group(block_group);
1858 return 0;
1859 }
1860
1861 void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
1862 {
1863 struct btrfs_free_space *info;
1864 struct rb_node *node;
1865
1866 while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
1867 info = rb_entry(node, struct btrfs_free_space, offset_index);
1868 if (!info->bitmap) {
1869 unlink_free_space(ctl, info);
1870 kmem_cache_free(btrfs_free_space_cachep, info);
1871 } else {
1872 free_bitmap(ctl, info);
1873 }
1874 if (need_resched()) {
1875 spin_unlock(&ctl->tree_lock);
1876 cond_resched();
1877 spin_lock(&ctl->tree_lock);
1878 }
1879 }
1880 }
1881
1882 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
1883 {
1884 spin_lock(&ctl->tree_lock);
1885 __btrfs_remove_free_space_cache_locked(ctl);
1886 spin_unlock(&ctl->tree_lock);
1887 }
1888
1889 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
1890 {
1891 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1892 struct btrfs_free_cluster *cluster;
1893 struct list_head *head;
1894
1895 spin_lock(&ctl->tree_lock);
1896 while ((head = block_group->cluster_list.next) !=
1897 &block_group->cluster_list) {
1898 cluster = list_entry(head, struct btrfs_free_cluster,
1899 block_group_list);
1900
1901 WARN_ON(cluster->block_group != block_group);
1902 __btrfs_return_cluster_to_free_space(block_group, cluster);
1903 if (need_resched()) {
1904 spin_unlock(&ctl->tree_lock);
1905 cond_resched();
1906 spin_lock(&ctl->tree_lock);
1907 }
1908 }
1909 __btrfs_remove_free_space_cache_locked(ctl);
1910 spin_unlock(&ctl->tree_lock);
1911
1912 }
1913
1914 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
1915 u64 offset, u64 bytes, u64 empty_size)
1916 {
1917 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1918 struct btrfs_free_space *entry = NULL;
1919 u64 bytes_search = bytes + empty_size;
1920 u64 ret = 0;
1921
1922 spin_lock(&ctl->tree_lock);
1923 entry = find_free_space(ctl, &offset, &bytes_search);
1924 if (!entry)
1925 goto out;
1926
1927 ret = offset;
1928 if (entry->bitmap) {
1929 bitmap_clear_bits(ctl, entry, offset, bytes);
1930 if (!entry->bytes)
1931 free_bitmap(ctl, entry);
1932 } else {
1933 unlink_free_space(ctl, entry);
1934 entry->offset += bytes;
1935 entry->bytes -= bytes;
1936 if (!entry->bytes)
1937 kmem_cache_free(btrfs_free_space_cachep, entry);
1938 else
1939 link_free_space(ctl, entry);
1940 }
1941
1942 out:
1943 spin_unlock(&ctl->tree_lock);
1944
1945 return ret;
1946 }
1947
1948 /*
1949 * given a cluster, put all of its extents back into the free space
1950 * cache. If a block group is passed, this function will only free
1951 * a cluster that belongs to the passed block group.
1952 *
1953 * Otherwise, it'll get a reference on the block group pointed to by the
1954 * cluster and remove the cluster from it.
1955 */
1956 int btrfs_return_cluster_to_free_space(
1957 struct btrfs_block_group_cache *block_group,
1958 struct btrfs_free_cluster *cluster)
1959 {
1960 struct btrfs_free_space_ctl *ctl;
1961 int ret;
1962
1963 /* first, get a safe pointer to the block group */
1964 spin_lock(&cluster->lock);
1965 if (!block_group) {
1966 block_group = cluster->block_group;
1967 if (!block_group) {
1968 spin_unlock(&cluster->lock);
1969 return 0;
1970 }
1971 } else if (cluster->block_group != block_group) {
1972 /* someone else has already freed it don't redo their work */
1973 spin_unlock(&cluster->lock);
1974 return 0;
1975 }
1976 atomic_inc(&block_group->count);
1977 spin_unlock(&cluster->lock);
1978
1979 ctl = block_group->free_space_ctl;
1980
1981 /* now return any extents the cluster had on it */
1982 spin_lock(&ctl->tree_lock);
1983 ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
1984 spin_unlock(&ctl->tree_lock);
1985
1986 /* finally drop our ref */
1987 btrfs_put_block_group(block_group);
1988 return ret;
1989 }
1990
1991 static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
1992 struct btrfs_free_cluster *cluster,
1993 struct btrfs_free_space *entry,
1994 u64 bytes, u64 min_start)
1995 {
1996 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1997 int err;
1998 u64 search_start = cluster->window_start;
1999 u64 search_bytes = bytes;
2000 u64 ret = 0;
2001
2002 search_start = min_start;
2003 search_bytes = bytes;
2004
2005 err = search_bitmap(ctl, entry, &search_start, &search_bytes);
2006 if (err)
2007 return 0;
2008
2009 ret = search_start;
2010 __bitmap_clear_bits(ctl, entry, ret, bytes);
2011
2012 return ret;
2013 }
2014
2015 /*
2016 * given a cluster, try to allocate 'bytes' from it, returns 0
2017 * if it couldn't find anything suitably large, or a logical disk offset
2018 * if things worked out
2019 */
2020 u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2021 struct btrfs_free_cluster *cluster, u64 bytes,
2022 u64 min_start)
2023 {
2024 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2025 struct btrfs_free_space *entry = NULL;
2026 struct rb_node *node;
2027 u64 ret = 0;
2028
2029 spin_lock(&cluster->lock);
2030 if (bytes > cluster->max_size)
2031 goto out;
2032
2033 if (cluster->block_group != block_group)
2034 goto out;
2035
2036 node = rb_first(&cluster->root);
2037 if (!node)
2038 goto out;
2039
2040 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2041 while(1) {
2042 if (entry->bytes < bytes ||
2043 (!entry->bitmap && entry->offset < min_start)) {
2044 node = rb_next(&entry->offset_index);
2045 if (!node)
2046 break;
2047 entry = rb_entry(node, struct btrfs_free_space,
2048 offset_index);
2049 continue;
2050 }
2051
2052 if (entry->bitmap) {
2053 ret = btrfs_alloc_from_bitmap(block_group,
2054 cluster, entry, bytes,
2055 min_start);
2056 if (ret == 0) {
2057 node = rb_next(&entry->offset_index);
2058 if (!node)
2059 break;
2060 entry = rb_entry(node, struct btrfs_free_space,
2061 offset_index);
2062 continue;
2063 }
2064 } else {
2065 ret = entry->offset;
2066
2067 entry->offset += bytes;
2068 entry->bytes -= bytes;
2069 }
2070
2071 if (entry->bytes == 0)
2072 rb_erase(&entry->offset_index, &cluster->root);
2073 break;
2074 }
2075 out:
2076 spin_unlock(&cluster->lock);
2077
2078 if (!ret)
2079 return 0;
2080
2081 spin_lock(&ctl->tree_lock);
2082
2083 ctl->free_space -= bytes;
2084 if (entry->bytes == 0) {
2085 ctl->free_extents--;
2086 if (entry->bitmap) {
2087 kfree(entry->bitmap);
2088 ctl->total_bitmaps--;
2089 ctl->op->recalc_thresholds(ctl);
2090 }
2091 kmem_cache_free(btrfs_free_space_cachep, entry);
2092 }
2093
2094 spin_unlock(&ctl->tree_lock);
2095
2096 return ret;
2097 }
2098
2099 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
2100 struct btrfs_free_space *entry,
2101 struct btrfs_free_cluster *cluster,
2102 u64 offset, u64 bytes, u64 min_bytes)
2103 {
2104 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2105 unsigned long next_zero;
2106 unsigned long i;
2107 unsigned long search_bits;
2108 unsigned long total_bits;
2109 unsigned long found_bits;
2110 unsigned long start = 0;
2111 unsigned long total_found = 0;
2112 int ret;
2113 bool found = false;
2114
2115 i = offset_to_bit(entry->offset, block_group->sectorsize,
2116 max_t(u64, offset, entry->offset));
2117 search_bits = bytes_to_bits(bytes, block_group->sectorsize);
2118 total_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
2119
2120 again:
2121 found_bits = 0;
2122 for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i);
2123 i < BITS_PER_BITMAP;
2124 i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
2125 next_zero = find_next_zero_bit(entry->bitmap,
2126 BITS_PER_BITMAP, i);
2127 if (next_zero - i >= search_bits) {
2128 found_bits = next_zero - i;
2129 break;
2130 }
2131 i = next_zero;
2132 }
2133
2134 if (!found_bits)
2135 return -ENOSPC;
2136
2137 if (!found) {
2138 start = i;
2139 found = true;
2140 }
2141
2142 total_found += found_bits;
2143
2144 if (cluster->max_size < found_bits * block_group->sectorsize)
2145 cluster->max_size = found_bits * block_group->sectorsize;
2146
2147 if (total_found < total_bits) {
2148 i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero);
2149 if (i - start > total_bits * 2) {
2150 total_found = 0;
2151 cluster->max_size = 0;
2152 found = false;
2153 }
2154 goto again;
2155 }
2156
2157 cluster->window_start = start * block_group->sectorsize +
2158 entry->offset;
2159 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2160 ret = tree_insert_offset(&cluster->root, entry->offset,
2161 &entry->offset_index, 1);
2162 BUG_ON(ret);
2163
2164 return 0;
2165 }
2166
2167 /*
2168 * This searches the block group for just extents to fill the cluster with.
2169 */
2170 static noinline int
2171 setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2172 struct btrfs_free_cluster *cluster,
2173 struct list_head *bitmaps, u64 offset, u64 bytes,
2174 u64 min_bytes)
2175 {
2176 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2177 struct btrfs_free_space *first = NULL;
2178 struct btrfs_free_space *entry = NULL;
2179 struct btrfs_free_space *prev = NULL;
2180 struct btrfs_free_space *last;
2181 struct rb_node *node;
2182 u64 window_start;
2183 u64 window_free;
2184 u64 max_extent;
2185 u64 max_gap = 128 * 1024;
2186
2187 entry = tree_search_offset(ctl, offset, 0, 1);
2188 if (!entry)
2189 return -ENOSPC;
2190
2191 /*
2192 * We don't want bitmaps, so just move along until we find a normal
2193 * extent entry.
2194 */
2195 while (entry->bitmap) {
2196 if (list_empty(&entry->list))
2197 list_add_tail(&entry->list, bitmaps);
2198 node = rb_next(&entry->offset_index);
2199 if (!node)
2200 return -ENOSPC;
2201 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2202 }
2203
2204 window_start = entry->offset;
2205 window_free = entry->bytes;
2206 max_extent = entry->bytes;
2207 first = entry;
2208 last = entry;
2209 prev = entry;
2210
2211 while (window_free <= min_bytes) {
2212 node = rb_next(&entry->offset_index);
2213 if (!node)
2214 return -ENOSPC;
2215 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2216
2217 if (entry->bitmap) {
2218 if (list_empty(&entry->list))
2219 list_add_tail(&entry->list, bitmaps);
2220 continue;
2221 }
2222
2223 /*
2224 * we haven't filled the empty size and the window is
2225 * very large. reset and try again
2226 */
2227 if (entry->offset - (prev->offset + prev->bytes) > max_gap ||
2228 entry->offset - window_start > (min_bytes * 2)) {
2229 first = entry;
2230 window_start = entry->offset;
2231 window_free = entry->bytes;
2232 last = entry;
2233 max_extent = entry->bytes;
2234 } else {
2235 last = entry;
2236 window_free += entry->bytes;
2237 if (entry->bytes > max_extent)
2238 max_extent = entry->bytes;
2239 }
2240 prev = entry;
2241 }
2242
2243 cluster->window_start = first->offset;
2244
2245 node = &first->offset_index;
2246
2247 /*
2248 * now we've found our entries, pull them out of the free space
2249 * cache and put them into the cluster rbtree
2250 */
2251 do {
2252 int ret;
2253
2254 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2255 node = rb_next(&entry->offset_index);
2256 if (entry->bitmap)
2257 continue;
2258
2259 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2260 ret = tree_insert_offset(&cluster->root, entry->offset,
2261 &entry->offset_index, 0);
2262 BUG_ON(ret);
2263 } while (node && entry != last);
2264
2265 cluster->max_size = max_extent;
2266
2267 return 0;
2268 }
2269
2270 /*
2271 * This specifically looks for bitmaps that may work in the cluster, we assume
2272 * that we have already failed to find extents that will work.
2273 */
2274 static noinline int
2275 setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2276 struct btrfs_free_cluster *cluster,
2277 struct list_head *bitmaps, u64 offset, u64 bytes,
2278 u64 min_bytes)
2279 {
2280 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2281 struct btrfs_free_space *entry;
2282 struct rb_node *node;
2283 int ret = -ENOSPC;
2284
2285 if (ctl->total_bitmaps == 0)
2286 return -ENOSPC;
2287
2288 /*
2289 * First check our cached list of bitmaps and see if there is an entry
2290 * here that will work.
2291 */
2292 list_for_each_entry(entry, bitmaps, list) {
2293 if (entry->bytes < min_bytes)
2294 continue;
2295 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2296 bytes, min_bytes);
2297 if (!ret)
2298 return 0;
2299 }
2300
2301 /*
2302 * If we do have entries on our list and we are here then we didn't find
2303 * anything, so go ahead and get the next entry after the last entry in
2304 * this list and start the search from there.
2305 */
2306 if (!list_empty(bitmaps)) {
2307 entry = list_entry(bitmaps->prev, struct btrfs_free_space,
2308 list);
2309 node = rb_next(&entry->offset_index);
2310 if (!node)
2311 return -ENOSPC;
2312 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2313 goto search;
2314 }
2315
2316 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
2317 if (!entry)
2318 return -ENOSPC;
2319
2320 search:
2321 node = &entry->offset_index;
2322 do {
2323 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2324 node = rb_next(&entry->offset_index);
2325 if (!entry->bitmap)
2326 continue;
2327 if (entry->bytes < min_bytes)
2328 continue;
2329 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2330 bytes, min_bytes);
2331 } while (ret && node);
2332
2333 return ret;
2334 }
2335
2336 /*
2337 * here we try to find a cluster of blocks in a block group. The goal
2338 * is to find at least bytes free and up to empty_size + bytes free.
2339 * We might not find them all in one contiguous area.
2340 *
2341 * returns zero and sets up cluster if things worked out, otherwise
2342 * it returns -enospc
2343 */
2344 int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2345 struct btrfs_root *root,
2346 struct btrfs_block_group_cache *block_group,
2347 struct btrfs_free_cluster *cluster,
2348 u64 offset, u64 bytes, u64 empty_size)
2349 {
2350 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2351 struct list_head bitmaps;
2352 struct btrfs_free_space *entry, *tmp;
2353 u64 min_bytes;
2354 int ret;
2355
2356 /* for metadata, allow allocates with more holes */
2357 if (btrfs_test_opt(root, SSD_SPREAD)) {
2358 min_bytes = bytes + empty_size;
2359 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
2360 /*
2361 * we want to do larger allocations when we are
2362 * flushing out the delayed refs, it helps prevent
2363 * making more work as we go along.
2364 */
2365 if (trans->transaction->delayed_refs.flushing)
2366 min_bytes = max(bytes, (bytes + empty_size) >> 1);
2367 else
2368 min_bytes = max(bytes, (bytes + empty_size) >> 4);
2369 } else
2370 min_bytes = max(bytes, (bytes + empty_size) >> 2);
2371
2372 spin_lock(&ctl->tree_lock);
2373
2374 /*
2375 * If we know we don't have enough space to make a cluster don't even
2376 * bother doing all the work to try and find one.
2377 */
2378 if (ctl->free_space < min_bytes) {
2379 spin_unlock(&ctl->tree_lock);
2380 return -ENOSPC;
2381 }
2382
2383 spin_lock(&cluster->lock);
2384
2385 /* someone already found a cluster, hooray */
2386 if (cluster->block_group) {
2387 ret = 0;
2388 goto out;
2389 }
2390
2391 INIT_LIST_HEAD(&bitmaps);
2392 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
2393 bytes, min_bytes);
2394 if (ret)
2395 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
2396 offset, bytes, min_bytes);
2397
2398 /* Clear our temporary list */
2399 list_for_each_entry_safe(entry, tmp, &bitmaps, list)
2400 list_del_init(&entry->list);
2401
2402 if (!ret) {
2403 atomic_inc(&block_group->count);
2404 list_add_tail(&cluster->block_group_list,
2405 &block_group->cluster_list);
2406 cluster->block_group = block_group;
2407 }
2408 out:
2409 spin_unlock(&cluster->lock);
2410 spin_unlock(&ctl->tree_lock);
2411
2412 return ret;
2413 }
2414
2415 /*
2416 * simple code to zero out a cluster
2417 */
2418 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
2419 {
2420 spin_lock_init(&cluster->lock);
2421 spin_lock_init(&cluster->refill_lock);
2422 cluster->root = RB_ROOT;
2423 cluster->max_size = 0;
2424 INIT_LIST_HEAD(&cluster->block_group_list);
2425 cluster->block_group = NULL;
2426 }
2427
2428 int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2429 u64 *trimmed, u64 start, u64 end, u64 minlen)
2430 {
2431 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2432 struct btrfs_free_space *entry = NULL;
2433 struct btrfs_fs_info *fs_info = block_group->fs_info;
2434 u64 bytes = 0;
2435 u64 actually_trimmed;
2436 int ret = 0;
2437
2438 *trimmed = 0;
2439
2440 while (start < end) {
2441 spin_lock(&ctl->tree_lock);
2442
2443 if (ctl->free_space < minlen) {
2444 spin_unlock(&ctl->tree_lock);
2445 break;
2446 }
2447
2448 entry = tree_search_offset(ctl, start, 0, 1);
2449 if (!entry)
2450 entry = tree_search_offset(ctl,
2451 offset_to_bitmap(ctl, start),
2452 1, 1);
2453
2454 if (!entry || entry->offset >= end) {
2455 spin_unlock(&ctl->tree_lock);
2456 break;
2457 }
2458
2459 if (entry->bitmap) {
2460 ret = search_bitmap(ctl, entry, &start, &bytes);
2461 if (!ret) {
2462 if (start >= end) {
2463 spin_unlock(&ctl->tree_lock);
2464 break;
2465 }
2466 bytes = min(bytes, end - start);
2467 bitmap_clear_bits(ctl, entry, start, bytes);
2468 if (entry->bytes == 0)
2469 free_bitmap(ctl, entry);
2470 } else {
2471 start = entry->offset + BITS_PER_BITMAP *
2472 block_group->sectorsize;
2473 spin_unlock(&ctl->tree_lock);
2474 ret = 0;
2475 continue;
2476 }
2477 } else {
2478 start = entry->offset;
2479 bytes = min(entry->bytes, end - start);
2480 unlink_free_space(ctl, entry);
2481 kmem_cache_free(btrfs_free_space_cachep, entry);
2482 }
2483
2484 spin_unlock(&ctl->tree_lock);
2485
2486 if (bytes >= minlen) {
2487 struct btrfs_space_info *space_info;
2488 int update = 0;
2489
2490 space_info = block_group->space_info;
2491 spin_lock(&space_info->lock);
2492 spin_lock(&block_group->lock);
2493 if (!block_group->ro) {
2494 block_group->reserved += bytes;
2495 space_info->bytes_reserved += bytes;
2496 update = 1;
2497 }
2498 spin_unlock(&block_group->lock);
2499 spin_unlock(&space_info->lock);
2500
2501 ret = btrfs_error_discard_extent(fs_info->extent_root,
2502 start,
2503 bytes,
2504 &actually_trimmed);
2505
2506 btrfs_add_free_space(block_group, start, bytes);
2507 if (update) {
2508 spin_lock(&space_info->lock);
2509 spin_lock(&block_group->lock);
2510 if (block_group->ro)
2511 space_info->bytes_readonly += bytes;
2512 block_group->reserved -= bytes;
2513 space_info->bytes_reserved -= bytes;
2514 spin_unlock(&space_info->lock);
2515 spin_unlock(&block_group->lock);
2516 }
2517
2518 if (ret)
2519 break;
2520 *trimmed += actually_trimmed;
2521 }
2522 start += bytes;
2523 bytes = 0;
2524
2525 if (fatal_signal_pending(current)) {
2526 ret = -ERESTARTSYS;
2527 break;
2528 }
2529
2530 cond_resched();
2531 }
2532
2533 return ret;
2534 }
2535
2536 /*
2537 * Find the left-most item in the cache tree, and then return the
2538 * smallest inode number in the item.
2539 *
2540 * Note: the returned inode number may not be the smallest one in
2541 * the tree, if the left-most item is a bitmap.
2542 */
2543 u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
2544 {
2545 struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
2546 struct btrfs_free_space *entry = NULL;
2547 u64 ino = 0;
2548
2549 spin_lock(&ctl->tree_lock);
2550
2551 if (RB_EMPTY_ROOT(&ctl->free_space_offset))
2552 goto out;
2553
2554 entry = rb_entry(rb_first(&ctl->free_space_offset),
2555 struct btrfs_free_space, offset_index);
2556
2557 if (!entry->bitmap) {
2558 ino = entry->offset;
2559
2560 unlink_free_space(ctl, entry);
2561 entry->offset++;
2562 entry->bytes--;
2563 if (!entry->bytes)
2564 kmem_cache_free(btrfs_free_space_cachep, entry);
2565 else
2566 link_free_space(ctl, entry);
2567 } else {
2568 u64 offset = 0;
2569 u64 count = 1;
2570 int ret;
2571
2572 ret = search_bitmap(ctl, entry, &offset, &count);
2573 BUG_ON(ret);
2574
2575 ino = offset;
2576 bitmap_clear_bits(ctl, entry, offset, 1);
2577 if (entry->bytes == 0)
2578 free_bitmap(ctl, entry);
2579 }
2580 out:
2581 spin_unlock(&ctl->tree_lock);
2582
2583 return ino;
2584 }
2585
2586 struct inode *lookup_free_ino_inode(struct btrfs_root *root,
2587 struct btrfs_path *path)
2588 {
2589 struct inode *inode = NULL;
2590
2591 spin_lock(&root->cache_lock);
2592 if (root->cache_inode)
2593 inode = igrab(root->cache_inode);
2594 spin_unlock(&root->cache_lock);
2595 if (inode)
2596 return inode;
2597
2598 inode = __lookup_free_space_inode(root, path, 0);
2599 if (IS_ERR(inode))
2600 return inode;
2601
2602 spin_lock(&root->cache_lock);
2603 if (!btrfs_fs_closing(root->fs_info))
2604 root->cache_inode = igrab(inode);
2605 spin_unlock(&root->cache_lock);
2606
2607 return inode;
2608 }
2609
2610 int create_free_ino_inode(struct btrfs_root *root,
2611 struct btrfs_trans_handle *trans,
2612 struct btrfs_path *path)
2613 {
2614 return __create_free_space_inode(root, trans, path,
2615 BTRFS_FREE_INO_OBJECTID, 0);
2616 }
2617
2618 int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2619 {
2620 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2621 struct btrfs_path *path;
2622 struct inode *inode;
2623 int ret = 0;
2624 u64 root_gen = btrfs_root_generation(&root->root_item);
2625
2626 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2627 return 0;
2628
2629 /*
2630 * If we're unmounting then just return, since this does a search on the
2631 * normal root and not the commit root and we could deadlock.
2632 */
2633 if (btrfs_fs_closing(fs_info))
2634 return 0;
2635
2636 path = btrfs_alloc_path();
2637 if (!path)
2638 return 0;
2639
2640 inode = lookup_free_ino_inode(root, path);
2641 if (IS_ERR(inode))
2642 goto out;
2643
2644 if (root_gen != BTRFS_I(inode)->generation)
2645 goto out_put;
2646
2647 ret = __load_free_space_cache(root, inode, ctl, path, 0);
2648
2649 if (ret < 0)
2650 printk(KERN_ERR "btrfs: failed to load free ino cache for "
2651 "root %llu\n", root->root_key.objectid);
2652 out_put:
2653 iput(inode);
2654 out:
2655 btrfs_free_path(path);
2656 return ret;
2657 }
2658
2659 int btrfs_write_out_ino_cache(struct btrfs_root *root,
2660 struct btrfs_trans_handle *trans,
2661 struct btrfs_path *path)
2662 {
2663 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2664 struct inode *inode;
2665 int ret;
2666
2667 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2668 return 0;
2669
2670 inode = lookup_free_ino_inode(root, path);
2671 if (IS_ERR(inode))
2672 return 0;
2673
2674 ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
2675 if (ret) {
2676 btrfs_delalloc_release_metadata(inode, inode->i_size);
2677 #ifdef DEBUG
2678 printk(KERN_ERR "btrfs: failed to write free ino cache "
2679 "for root %llu\n", root->root_key.objectid);
2680 #endif
2681 }
2682
2683 iput(inode);
2684 return ret;
2685 }