goto fail;
drbd_clear_interval(&e->i);
+ e->i.size = data_size;
+ e->i.sector = sector;
+ e->i.waiting = false;
+
e->epoch = NULL;
e->mdev = mdev;
e->pages = page;
atomic_set(&e->pending_bios, 0);
- e->i.size = data_size;
e->flags = 0;
- e->i.sector = sector;
/*
* The block_id is opaque to the receiver. It is not endianness
* converted, and sent back to the sender unchanged.
return err;
}
+static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
+ struct drbd_epoch_entry *e)
+{
+ struct drbd_interval *i = &e->i;
+
+ drbd_remove_interval(&mdev->write_requests, i);
+ drbd_clear_interval(i);
+
+ /* Wake up any processes waiting for this epoch entry to complete. */
+ if (i->waiting)
+ wake_up(&mdev->misc_wait);
+}
+
static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packet cmd,
unsigned int data_size)
{
if (mdev->tconn->net_conf->two_primaries) {
spin_lock_irq(&mdev->tconn->req_lock);
D_ASSERT(!drbd_interval_empty(&e->i));
- drbd_remove_interval(&mdev->epoch_entries, &e->i);
- drbd_clear_interval(&e->i);
+ drbd_remove_epoch_entry_interval(mdev, e);
spin_unlock_irq(&mdev->tconn->req_lock);
} else
D_ASSERT(drbd_interval_empty(&e->i));
spin_lock_irq(&mdev->tconn->req_lock);
D_ASSERT(!drbd_interval_empty(&e->i));
- drbd_remove_interval(&mdev->epoch_entries, &e->i);
- drbd_clear_interval(&e->i);
+ drbd_remove_epoch_entry_interval(mdev, e);
spin_unlock_irq(&mdev->tconn->req_lock);
dec_unacked(mdev);
}
if (signal_pending(current)) {
- drbd_remove_interval(&mdev->epoch_entries, &e->i);
- drbd_clear_interval(&e->i);
-
+ drbd_remove_epoch_entry_interval(mdev, e);
spin_unlock_irq(&mdev->tconn->req_lock);
-
finish_wait(&mdev->misc_wait, &wait);
goto out_interrupted;
}
/* Indicate to wake up mdev->misc_wait upon completion. */
- req2->rq_state |= RQ_COLLISION;
+ i->waiting = true;
spin_unlock_irq(&mdev->tconn->req_lock);
if (first) {
dev_err(DEV, "submit failed, triggering re-connect\n");
spin_lock_irq(&mdev->tconn->req_lock);
list_del(&e->w.list);
- drbd_remove_interval(&mdev->epoch_entries, &e->i);
- drbd_clear_interval(&e->i);
+ drbd_remove_epoch_entry_interval(mdev, e);
spin_unlock_irq(&mdev->tconn->req_lock);
if (e->flags & EE_CALL_AL_COMPLETE_IO)
drbd_al_complete_io(mdev, e->i.sector);
req->mdev = mdev;
req->master_bio = bio_src;
req->epoch = 0;
+
drbd_clear_interval(&req->i);
req->i.sector = bio_src->bi_sector;
req->i.size = bio_src->bi_size;
+ req->i.waiting = false;
+
INIT_LIST_HEAD(&req->tl_requests);
INIT_LIST_HEAD(&req->w.list);
(s & RQ_NET_SENT) != 0 &&
req->epoch == mdev->tconn->newest_tle->br_number)
queue_barrier(mdev);
-
- /* Wake up any processes waiting for this request to complete. */
- if ((s & RQ_NET_DONE) && (s & RQ_COLLISION))
- wake_up(&mdev->misc_wait);
}
void complete_master_bio(struct drbd_conf *mdev,
dec_ap_bio(mdev);
}
+
+static void drbd_remove_request_interval(struct rb_root *root,
+ struct drbd_request *req)
+{
+ struct drbd_conf *mdev = req->mdev;
+ struct drbd_interval *i = &req->i;
+
+ drbd_remove_interval(root, i);
+
+ /* Wake up any processes waiting for this request to complete. */
+ if (i->waiting)
+ wake_up(&mdev->misc_wait);
+}
+
/* Helper for __req_mod().
* Set m->bio to the master bio, if it is fit to be completed,
* or leave it alone (it is initialized to NULL in __req_mod),
root = &mdev->write_requests;
else
root = &mdev->read_requests;
- drbd_remove_interval(root, &req->i);
+ drbd_remove_request_interval(root, req);
} else
D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);