spin_lock_init(&mdev->al_lock);
spin_lock_init(&mdev->peer_seq_lock);
- spin_lock_init(&mdev->epoch_lock);
INIT_LIST_HEAD(&mdev->active_ee);
INIT_LIST_HEAD(&mdev->sync_ee);
dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
mdev->tconn->receiver.t_state);
- /* no need to lock it, I'm the only thread alive */
- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
mdev->al_writ_cnt =
mdev->bm_writ_cnt =
mdev->read_cnt =
kfree(mdev->p_uuid);
/* mdev->p_uuid = NULL; */
- kfree(mdev->current_epoch);
if (mdev->bitmap) /* should no longer be there. */
drbd_bm_cleanup(mdev);
__free_page(mdev->md_io_page);
if (!tl_init(tconn))
goto fail;
+ tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
+ if (!tconn->current_epoch)
+ goto fail;
+ INIT_LIST_HEAD(&tconn->current_epoch->list);
+ tconn->epochs = 1;
+ spin_lock_init(&tconn->epoch_lock);
tconn->write_ordering = WO_bdev_flush;
tconn->cstate = C_STANDALONE;
return tconn;
fail:
+ kfree(tconn->current_epoch);
tl_cleanup(tconn);
free_cpumask_var(tconn->cpu_mask);
drbd_free_socket(&tconn->meta);
{
struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
+ if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
+ conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
+ kfree(tconn->current_epoch);
+
idr_destroy(&tconn->volumes);
free_cpumask_var(tconn->cpu_mask);
mdev->read_requests = RB_ROOT;
mdev->write_requests = RB_ROOT;
- mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
- if (!mdev->current_epoch)
- goto out_no_epoch;
-
- INIT_LIST_HEAD(&mdev->current_epoch->list);
- mdev->epochs = 1;
-
if (!idr_pre_get(&minors, GFP_KERNEL))
goto out_no_minor_idr;
if (idr_get_new_above(&minors, mdev, minor, &minor_got))
idr_remove(&minors, minor_got);
synchronize_rcu();
out_no_minor_idr:
- kfree(mdev->current_epoch);
-out_no_epoch:
drbd_bm_cleanup(mdev);
out_no_bitmap:
__free_page(mdev->md_io_page);
int epoch_size;
struct drbd_epoch *next_epoch;
enum finish_epoch rv = FE_STILL_LIVE;
+ struct drbd_tconn *tconn = mdev->tconn;
- spin_lock(&mdev->epoch_lock);
+ spin_lock(&tconn->epoch_lock);
do {
next_epoch = NULL;
atomic_read(&epoch->active) == 0 &&
(test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
if (!(ev & EV_CLEANUP)) {
- spin_unlock(&mdev->epoch_lock);
+ spin_unlock(&tconn->epoch_lock);
drbd_send_b_ack(epoch->mdev, epoch->barrier_nr, epoch_size);
- spin_lock(&mdev->epoch_lock);
+ spin_lock(&tconn->epoch_lock);
}
if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
dec_unacked(epoch->mdev);
- if (mdev->current_epoch != epoch) {
+ if (tconn->current_epoch != epoch) {
next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
list_del(&epoch->list);
ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
- mdev->epochs--;
+ tconn->epochs--;
kfree(epoch);
if (rv == FE_STILL_LIVE)
epoch = next_epoch;
} while (1);
- spin_unlock(&mdev->epoch_lock);
+ spin_unlock(&tconn->epoch_lock);
return rv;
}
inc_unacked(mdev);
- mdev->current_epoch->barrier_nr = p->barrier;
- mdev->current_epoch->mdev = mdev;
- rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
+ tconn->current_epoch->barrier_nr = p->barrier;
+ tconn->current_epoch->mdev = mdev;
+ rv = drbd_may_finish_epoch(mdev, tconn->current_epoch, EV_GOT_BARRIER_NR);
/* P_BARRIER_ACK may imply that the corresponding extent is dropped from
* the activity log, which means it would not be resynced in case the
drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
drbd_flush(tconn);
- if (atomic_read(&mdev->current_epoch->epoch_size)) {
+ if (atomic_read(&tconn->current_epoch->epoch_size)) {
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
if (epoch)
break;
}
- epoch = mdev->current_epoch;
+ epoch = tconn->current_epoch;
wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
D_ASSERT(atomic_read(&epoch->active) == 0);
atomic_set(&epoch->epoch_size, 0);
atomic_set(&epoch->active, 0);
- spin_lock(&mdev->epoch_lock);
- if (atomic_read(&mdev->current_epoch->epoch_size)) {
- list_add(&epoch->list, &mdev->current_epoch->list);
- mdev->current_epoch = epoch;
- mdev->epochs++;
+ spin_lock(&tconn->epoch_lock);
+ if (atomic_read(&tconn->current_epoch->epoch_size)) {
+ list_add(&epoch->list, &tconn->current_epoch->list);
+ tconn->current_epoch = epoch;
+ tconn->epochs++;
} else {
/* The current_epoch got recycled while we allocated this one... */
kfree(epoch);
}
- spin_unlock(&mdev->epoch_lock);
+ spin_unlock(&tconn->epoch_lock);
return 0;
}
err = wait_for_and_update_peer_seq(mdev, peer_seq);
drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
- atomic_inc(&mdev->current_epoch->epoch_size);
+ atomic_inc(&tconn->current_epoch->epoch_size);
err2 = drbd_drain_block(mdev, pi->size);
if (!err)
err = err2;
if (dp_flags & DP_MAY_SET_IN_SYNC)
peer_req->flags |= EE_MAY_SET_IN_SYNC;
- spin_lock(&mdev->epoch_lock);
- peer_req->epoch = mdev->current_epoch;
+ spin_lock(&tconn->epoch_lock);
+ peer_req->epoch = tconn->current_epoch;
atomic_inc(&peer_req->epoch->epoch_size);
atomic_inc(&peer_req->epoch->active);
- spin_unlock(&mdev->epoch_lock);
+ spin_unlock(&tconn->epoch_lock);
rcu_read_lock();
tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
}
rcu_read_unlock();
+ if (!list_empty(&tconn->current_epoch->list))
+ conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
+ /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
+ atomic_set(&tconn->current_epoch->epoch_size, 0);
+
conn_info(tconn, "Connection closed\n");
if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
D_ASSERT(list_empty(&mdev->sync_ee));
D_ASSERT(list_empty(&mdev->done_ee));
- /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
- atomic_set(&mdev->current_epoch->epoch_size, 0);
- D_ASSERT(list_empty(&mdev->current_epoch->list));
-
return 0;
}