struct drbd_tconn { /* is a resource from the config file */
char *name; /* Resource name */
struct list_head all_tconn; /* List of all drbd_tconn, prot by global_state_lock */
- struct idr volumes; /* <tconn, vnr> to mdev mapping */
- enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
+ struct idr volumes; /* <tconn, vnr> to mdev mapping */
+ enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
+ struct mutex cstate_mutex; /* Protects graceful disconnects */
unsigned long flags;
struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */
unsigned long comm_bm_set; /* communicated number of set bits. */
struct bm_io_work bm_io_work;
u64 ed_uuid; /* UUID of the exposed data */
- struct mutex state_mutex;
+ struct mutex own_state_mutex;
+ struct mutex *state_mutex; /* either own_state_mutex or mdev->tconn->cstate_mutex */
char congestion_reason; /* Why we where congested... */
atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
atomic_t rs_sect_ev; /* for submitted resync data rate, both */
atomic_set(&mdev->ap_in_flight, 0);
mutex_init(&mdev->md_io_mutex);
- mutex_init(&mdev->state_mutex);
+ mutex_init(&mdev->own_state_mutex);
+ mdev->state_mutex = &mdev->own_state_mutex;
spin_lock_init(&mdev->al_lock);
spin_lock_init(&mdev->peer_seq_lock);
goto fail;
tconn->cstate = C_STANDALONE;
+ mutex_init(&tconn->cstate_mutex);
spin_lock_init(&tconn->req_lock);
atomic_set(&tconn->net_cnt, 0);
init_waitqueue_head(&tconn->net_cnt_wait);
if (new_role == R_PRIMARY)
request_ping(mdev->tconn); /* Detect a dead peer ASAP */
- mutex_lock(&mdev->state_mutex);
+ mutex_lock(mdev->state_mutex);
mask.i = 0; mask.role = R_MASK;
val.i = 0; val.role = new_role;
kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
fail:
- mutex_unlock(&mdev->state_mutex);
+ mutex_unlock(mdev->state_mutex);
return rv;
}
return 0;
}
- mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
+ mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
if (!get_ldev(mdev)) {
retcode = ERR_NO_DISK;
out_dec:
put_ldev(mdev);
out:
- mutex_unlock(&mdev->state_mutex);
+ mutex_unlock(mdev->state_mutex);
reply->ret_code = retcode;
return 0;
atomic_set(&mdev->packet_seq, 0);
mdev->peer_seq = 0;
+ mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
+ &mdev->tconn->cstate_mutex :
+ &mdev->own_state_mutex;
+
ok &= drbd_send_sync_param(mdev, &mdev->sync_conf);
ok &= drbd_send_sizes(mdev, 0, 0);
ok &= drbd_send_uuids(mdev);
clear_bit(USE_DEGR_WFC_T, &mdev->flags);
clear_bit(RESIZE_PENDING, &mdev->flags);
+
return !ok;
}
ongoing cluster wide state change is finished. That is important if
we are primary and are detaching from our disk. We need to see the
new disk state... */
- mutex_lock(&mdev->state_mutex);
- mutex_unlock(&mdev->state_mutex);
+ mutex_lock(mdev->state_mutex);
+ mutex_unlock(mdev->state_mutex);
if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
val.i = be32_to_cpu(p->val);
if (test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags) &&
- mutex_is_locked(&mdev->state_mutex)) {
+ mutex_is_locked(mdev->state_mutex)) {
drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
return true;
}
init_completion(&done);
if (f & CS_SERIALIZE)
- mutex_lock(&mdev->state_mutex);
+ mutex_lock(mdev->state_mutex);
spin_lock_irqsave(&mdev->tconn->req_lock, flags);
os = mdev->state;
abort:
if (f & CS_SERIALIZE)
- mutex_unlock(&mdev->state_mutex);
+ mutex_unlock(mdev->state_mutex);
return rv;
}
if (current == mdev->tconn->worker.task) {
/* The worker should not sleep waiting for state_mutex,
that can take long */
- if (!mutex_trylock(&mdev->state_mutex)) {
+ if (!mutex_trylock(mdev->state_mutex)) {
set_bit(B_RS_H_DONE, &mdev->flags);
mdev->start_resync_timer.expires = jiffies + HZ/5;
add_timer(&mdev->start_resync_timer);
return;
}
} else {
- mutex_lock(&mdev->state_mutex);
+ mutex_lock(mdev->state_mutex);
}
clear_bit(B_RS_H_DONE, &mdev->flags);
if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
- mutex_unlock(&mdev->state_mutex);
+ mutex_unlock(mdev->state_mutex);
return;
}
drbd_md_sync(mdev);
}
put_ldev(mdev);
- mutex_unlock(&mdev->state_mutex);
+ mutex_unlock(mdev->state_mutex);
}
static int _worker_dying(int vnr, void *p, void *data)