fs_info->thread_pool_size),
&fs_info->generic_worker);
+ btrfs_init_workers(&fs_info->caching_workers, "cache",
+ 2, &fs_info->generic_worker);
+
/* a higher idle thresh on the submit workers makes it much more
* likely that bios will be send down in a sane order to the
* devices
btrfs_start_workers(&fs_info->endio_write_workers, 1);
btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
btrfs_start_workers(&fs_info->delayed_workers, 1);
+ btrfs_start_workers(&fs_info->caching_workers, 1);
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
btrfs_stop_workers(&fs_info->endio_freespace_worker);
btrfs_stop_workers(&fs_info->submit_workers);
btrfs_stop_workers(&fs_info->delayed_workers);
+ btrfs_stop_workers(&fs_info->caching_workers);
fail_alloc:
kfree(fs_info->delayed_root);
fail_iput:
btrfs_stop_workers(&fs_info->endio_freespace_worker);
btrfs_stop_workers(&fs_info->submit_workers);
btrfs_stop_workers(&fs_info->delayed_workers);
+ btrfs_stop_workers(&fs_info->caching_workers);
btrfs_close_devices(fs_info->fs_devices);
btrfs_mapping_tree_free(&fs_info->mapping_tree);
return total_added;
}
-static int caching_kthread(void *data)
+static noinline void caching_thread(struct btrfs_work *work)
{
- struct btrfs_block_group_cache *block_group = data;
- struct btrfs_fs_info *fs_info = block_group->fs_info;
- struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
- struct btrfs_root *extent_root = fs_info->extent_root;
+ struct btrfs_block_group_cache *block_group;
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_caching_control *caching_ctl;
+ struct btrfs_root *extent_root;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key key;
u32 nritems;
int ret = 0;
+ caching_ctl = container_of(work, struct btrfs_caching_control, work);
+ block_group = caching_ctl->block_group;
+ fs_info = block_group->fs_info;
+ extent_root = fs_info->extent_root;
+
path = btrfs_alloc_path();
if (!path)
- return -ENOMEM;
+ goto out;
last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
free_excluded_extents(extent_root, block_group);
mutex_unlock(&caching_ctl->mutex);
+out:
wake_up(&caching_ctl->wait);
put_caching_control(caching_ctl);
- atomic_dec(&block_group->space_info->caching_threads);
btrfs_put_block_group(block_group);
-
- return 0;
}
static int cache_block_group(struct btrfs_block_group_cache *cache,
{
struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_caching_control *caching_ctl;
- struct task_struct *tsk;
int ret = 0;
smp_mb();
caching_ctl->progress = cache->key.objectid;
/* one for caching kthread, one for caching block group list */
atomic_set(&caching_ctl->count, 2);
+ caching_ctl->work.func = caching_thread;
spin_lock(&cache->lock);
if (cache->cached != BTRFS_CACHE_NO) {
list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
up_write(&fs_info->extent_commit_sem);
- atomic_inc(&cache->space_info->caching_threads);
btrfs_get_block_group(cache);
- tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
- cache->key.objectid);
- if (IS_ERR(tsk)) {
- ret = PTR_ERR(tsk);
- printk(KERN_ERR "error running thread %d\n", ret);
- BUG();
- }
+ btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
return ret;
}
init_waitqueue_head(&found->wait);
*space_info = found;
list_add_rcu(&found->list, &info->space_info);
- atomic_set(&found->caching_threads, 0);
return 0;
}
}
/*
- * We only want to start kthread caching if we are at
- * the point where we will wait for caching to make
- * progress, or if our ideal search is over and we've
- * found somebody to start caching.
+ * The caching workers are limited to 2 threads, so we
+ * can queue as much work as we care to.
*/
- if (loop > LOOP_CACHING_NOWAIT ||
- (loop > LOOP_FIND_IDEAL &&
- atomic_read(&space_info->caching_threads) < 2)) {
+ if (loop > LOOP_FIND_IDEAL) {
ret = cache_block_group(block_group, trans,
orig_root, 0);
BUG_ON(ret);
if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
found_uncached_bg = false;
loop++;
- if (!ideal_cache_percent &&
- atomic_read(&space_info->caching_threads))
+ if (!ideal_cache_percent)
goto search;
/*