struct list_head *head);
extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled);
extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed);
-extern void drbd_flush_workqueue(struct drbd_conf *mdev);
+extern void drbd_flush_workqueue(struct drbd_tconn *tconn);
/* yes, there is kernel_setsockopt, but only since 2.6.18. we don't need to
* mess with get_fs/set_fs, we know we are KERNEL_DS always. */
wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
drbd_thread_start(&mdev->tconn->worker);
- drbd_flush_workqueue(mdev);
+ drbd_flush_workqueue(mdev->tconn);
}
/* if still unconfigured, stops worker again.
/* also wait for the last barrier ack. */
wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
/* and for any other previously queued work */
- drbd_flush_workqueue(mdev);
+ drbd_flush_workqueue(mdev->tconn);
rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
retcode = rv; /* FIXME: Type mismatch. */
}
}
- drbd_flush_workqueue(mdev);
+ drbd_flush_workqueue(mdev->tconn);
spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->tconn->net_conf != NULL) {
retcode = ERR_NET_CONFIGURED;
drbd_md_sync(mdev);
}
-void drbd_flush_workqueue(struct drbd_conf *mdev)
+void drbd_flush_workqueue(struct drbd_tconn *tconn)
{
struct drbd_wq_barrier barr;
barr.w.cb = w_prev_work_done;
init_completion(&barr.done);
- drbd_queue_work(&mdev->tconn->data.work, &barr.w);
+ drbd_queue_work(&tconn->data.work, &barr.w);
wait_for_completion(&barr.done);
}
/* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
* w_make_resync_request etc. which may still be on the worker queue
* to be "canceled" */
- drbd_flush_workqueue(mdev);
+ drbd_flush_workqueue(mdev->tconn);
/* This also does reclaim_net_ee(). If we do this too early, we might
* miss some resync ee and pages.*/