*/
struct btrfs_workers generic_worker;
struct btrfs_workqueue_struct *workers;
- struct btrfs_workers delalloc_workers;
+ struct btrfs_workqueue_struct *delalloc_workers;
struct btrfs_workers flush_workers;
struct btrfs_workers endio_workers;
struct btrfs_workers endio_meta_workers;
{
btrfs_stop_workers(&fs_info->generic_worker);
btrfs_stop_workers(&fs_info->fixup_workers);
- btrfs_stop_workers(&fs_info->delalloc_workers);
+ btrfs_destroy_workqueue(fs_info->delalloc_workers);
btrfs_destroy_workqueue(fs_info->workers);
btrfs_stop_workers(&fs_info->endio_workers);
btrfs_stop_workers(&fs_info->endio_meta_workers);
btrfs_alloc_workqueue("worker", flags | WQ_HIGHPRI,
max_active, 16);
- btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
- fs_info->thread_pool_size, NULL);
+ fs_info->delalloc_workers =
+ btrfs_alloc_workqueue("delalloc", flags, max_active, 2);
btrfs_init_workers(&fs_info->flush_workers, "flush_delalloc",
fs_info->thread_pool_size, NULL);
*/
fs_info->submit_workers.idle_thresh = 64;
- fs_info->delalloc_workers.idle_thresh = 2;
- fs_info->delalloc_workers.ordered = 1;
-
btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
&fs_info->generic_worker);
btrfs_init_workers(&fs_info->endio_workers, "endio",
*/
ret = btrfs_start_workers(&fs_info->generic_worker);
ret |= btrfs_start_workers(&fs_info->submit_workers);
- ret |= btrfs_start_workers(&fs_info->delalloc_workers);
ret |= btrfs_start_workers(&fs_info->fixup_workers);
ret |= btrfs_start_workers(&fs_info->endio_workers);
ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
err = -ENOMEM;
goto fail_sb_buffer;
}
- if (!(fs_info->workers)) {
+ if (!(fs_info->workers && fs_info->delalloc_workers)) {
err = -ENOMEM;
goto fail_sb_buffer;
}
u64 start;
u64 end;
struct list_head extents;
- struct btrfs_work work;
+ struct btrfs_work_struct work;
};
static noinline int add_async_extent(struct async_cow *cow,
/*
* work queue call back to started compression on a file and pages
*/
-static noinline void async_cow_start(struct btrfs_work *work)
+static noinline void async_cow_start(struct btrfs_work_struct *work)
{
struct async_cow *async_cow;
int num_added = 0;
/*
* work queue call back to submit previously compressed pages
*/
-static noinline void async_cow_submit(struct btrfs_work *work)
+static noinline void async_cow_submit(struct btrfs_work_struct *work)
{
struct async_cow *async_cow;
struct btrfs_root *root;
submit_compressed_extents(async_cow->inode, async_cow);
}
-static noinline void async_cow_free(struct btrfs_work *work)
+static noinline void async_cow_free(struct btrfs_work_struct *work)
{
struct async_cow *async_cow;
async_cow = container_of(work, struct async_cow, work);
async_cow->end = cur_end;
INIT_LIST_HEAD(&async_cow->extents);
- async_cow->work.func = async_cow_start;
- async_cow->work.ordered_func = async_cow_submit;
- async_cow->work.ordered_free = async_cow_free;
- async_cow->work.flags = 0;
+ btrfs_init_work(&async_cow->work, async_cow_start,
+ async_cow_submit, async_cow_free);
nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
PAGE_CACHE_SHIFT;
atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
- btrfs_queue_worker(&root->fs_info->delalloc_workers,
- &async_cow->work);
+ btrfs_queue_work(root->fs_info->delalloc_workers,
+ &async_cow->work);
if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
wait_event(root->fs_info->async_submit_wait,
btrfs_set_max_workers(&fs_info->generic_worker, new_pool_size);
btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
- btrfs_set_max_workers(&fs_info->delalloc_workers, new_pool_size);
+ btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
btrfs_set_max_workers(&fs_info->submit_workers, new_pool_size);
btrfs_set_max_workers(&fs_info->caching_workers, new_pool_size);
btrfs_set_max_workers(&fs_info->fixup_workers, new_pool_size);