struct xen_blkif *blkif = arg;
struct xen_vbd *vbd = &blkif->vbd;
unsigned long timeout;
+ int ret;
xen_blkif_get(blkif);
blkif->waiting_reqs = 0;
smp_mb(); /* clear flag *before* checking for work */
- if (do_block_io_op(blkif))
+ ret = do_block_io_op(blkif);
+ if (ret > 0)
blkif->waiting_reqs = 1;
+ if (ret == -EACCES)
+ wait_event_interruptible(blkif->shutdown_wq,
+ kthread_should_stop());
purge_gnt_list:
if (blkif->vbd.feature_gnt_persistent &&
rp = blk_rings->common.sring->req_prod;
rmb(); /* Ensure we see queued requests up to 'rp'. */
+ if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
+ rc = blk_rings->common.rsp_prod_pvt;
+ pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
+ rp, rc, rp - rc, blkif->vbd.pdevice);
+ return -EACCES;
+ }
while (rc != rp) {
if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
unsigned long long st_wr_sect;
wait_queue_head_t waiting_to_free;
+ /* Thread shutdown wait queue. */
+ wait_queue_head_t shutdown_wq;
};
struct seg_buf {
}
spin_lock_init(&blkif->pending_free_lock);
init_waitqueue_head(&blkif->pending_free_wq);
+ init_waitqueue_head(&blkif->shutdown_wq);
return blkif;
{
if (blkif->xenblkd) {
kthread_stop(blkif->xenblkd);
+ wake_up(&blkif->shutdown_wq);
blkif->xenblkd = NULL;
}