dt = MAX_SCHEDULE_TIMEOUT;
dt = wait_event_timeout(mdev->misc_wait,
- *done || test_bit(FORCE_DETACH, &mdev->flags), dt);
+ *done || drbd_test_flag(mdev, FORCE_DETACH), dt);
if (dt == 0) {
dev_err(DEV, "meta-data IO operation timed out\n");
drbd_chk_io_error(mdev, 1, DRBD_FORCE_DETACH);
mdev->md_io.done = 0;
mdev->md_io.error = -ENODEV;
- if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
+ if ((rw & WRITE) && !drbd_test_flag(mdev, MD_NO_FUA))
rw |= REQ_FUA | REQ_FLUSH;
rw |= REQ_SYNC;
#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
/* global flag bits */
-enum {
+enum drbd_flag {
CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */
SIGNAL_ASENDER, /* whether asender wants to be interrupted */
SEND_PING, /* whether asender should send a ping asap */
* and potentially deadlock on, this drbd worker.
*/
DISCONNECT_SENT, /* Currently the last bit in this 32bit word */
+
+ /* keep last */
+ DRBD_N_FLAGS,
};
struct drbd_bitmap; /* opaque for drbd_conf */
};
struct drbd_conf {
- /* things that are stored as / read from meta data on disk */
- unsigned long flags;
+ unsigned long drbd_flags[(DRBD_N_FLAGS + BITS_PER_LONG -1)/BITS_PER_LONG];
/* configured by drbdsetup */
struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */
unsigned int local_max_bio_size;
};
+static inline void drbd_set_flag(struct drbd_conf *mdev, enum drbd_flag f)
+{
+ set_bit(f, &mdev->drbd_flags[0]);
+}
+
+static inline void drbd_clear_flag(struct drbd_conf *mdev, enum drbd_flag f)
+{
+ clear_bit(f, &mdev->drbd_flags[0]);
+}
+
+static inline int drbd_test_flag(struct drbd_conf *mdev, enum drbd_flag f)
+{
+ return test_bit(f, &mdev->drbd_flags[0]);
+}
+
+static inline int drbd_test_and_set_flag(struct drbd_conf *mdev, enum drbd_flag f)
+{
+ return test_and_set_bit(f, &mdev->drbd_flags[0]);
+}
+
+static inline int drbd_test_and_clear_flag(struct drbd_conf *mdev, enum drbd_flag f)
+{
+ return test_and_clear_bit(f, &mdev->drbd_flags[0]);
+}
+
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
{
struct drbd_conf *mdev;
static inline void drbd_state_lock(struct drbd_conf *mdev)
{
wait_event(mdev->misc_wait,
- !test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags));
+ !drbd_test_and_set_flag(mdev, CLUSTER_ST_CHANGE));
}
static inline void drbd_state_unlock(struct drbd_conf *mdev)
{
- clear_bit(CLUSTER_ST_CHANGE, &mdev->flags);
+ drbd_clear_flag(mdev, CLUSTER_ST_CHANGE);
wake_up(&mdev->misc_wait);
}
/* NOTE fall through to detach case if forcedetach set */
case EP_DETACH:
case EP_CALL_HELPER:
- set_bit(WAS_IO_ERROR, &mdev->flags);
+ drbd_set_flag(mdev, WAS_IO_ERROR);
if (forcedetach == DRBD_FORCE_DETACH)
- set_bit(FORCE_DETACH, &mdev->flags);
+ drbd_set_flag(mdev, FORCE_DETACH);
if (mdev->state.disk > D_FAILED) {
_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
dev_err(DEV,
static inline void wake_asender(struct drbd_conf *mdev)
{
- if (test_bit(SIGNAL_ASENDER, &mdev->flags))
+ if (drbd_test_flag(mdev, SIGNAL_ASENDER))
force_sig(DRBD_SIG, mdev->asender.task);
}
static inline void request_ping(struct drbd_conf *mdev)
{
- set_bit(SEND_PING, &mdev->flags);
+ drbd_set_flag(mdev, SEND_PING);
wake_asender(mdev);
}
if (is_susp(mdev->state))
return false;
- if (test_bit(SUSPEND_IO, &mdev->flags))
+ if (drbd_test_flag(mdev, SUSPEND_IO))
return false;
/* to avoid potential deadlock or bitmap corruption,
* and we are within the spinlock anyways, we have this workaround. */
if (atomic_read(&mdev->ap_bio_cnt) > mxb)
return false;
- if (test_bit(BITMAP_IO, &mdev->flags))
+ if (drbd_test_flag(mdev, BITMAP_IO))
return false;
return true;
}
D_ASSERT(ap_bio >= 0);
- if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
- if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
+ if (ap_bio == 0 && drbd_test_flag(mdev, BITMAP_IO)) {
+ if (!drbd_test_and_set_flag(mdev, BITMAP_IO_QUEUED))
drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
}
{
struct sock *sk = mdev->data.socket->sk;
if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
- set_bit(NET_CONGESTED, &mdev->flags);
+ drbd_set_flag(mdev, NET_CONGESTED);
}
static inline int drbd_queue_order_type(struct drbd_conf *mdev)
{
int r;
- if (test_bit(MD_NO_FUA, &mdev->flags))
+ if (drbd_test_flag(mdev, MD_NO_FUA))
return;
r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_NOIO, NULL);
if (r) {
- set_bit(MD_NO_FUA, &mdev->flags);
+ drbd_set_flag(mdev, MD_NO_FUA);
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
}
}
+
#endif
list_splice_init(&b->requests, &mdev->barrier_acked_requests);
nob = b->next;
- if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
+ if (drbd_test_and_clear_flag(mdev, CREATE_BARRIER)) {
_tl_add_barrier(mdev, b);
if (nob)
mdev->oldest_tle = nob;
if (b->w.cb == NULL) {
b->w.cb = w_send_barrier;
inc_ap_pending(mdev);
- set_bit(CREATE_BARRIER, &mdev->flags);
+ drbd_set_flag(mdev, CREATE_BARRIER);
}
drbd_queue_work(&mdev->data.work, &b->w);
}
/* ensure bit indicating barrier is required is clear */
- clear_bit(CREATE_BARRIER, &mdev->flags);
+ drbd_clear_flag(mdev, CREATE_BARRIER);
memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
unsigned long flags;
enum drbd_state_rv rv;
- if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
+ if (drbd_test_and_clear_flag(mdev, CL_ST_CHG_SUCCESS))
return SS_CW_SUCCESS;
- if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
+ if (drbd_test_and_clear_flag(mdev, CL_ST_CHG_FAIL))
return SS_CW_FAILED_BY_PEER;
rv = 0;
}
if (mask.conn == C_MASK && val.conn == C_DISCONNECTING)
- set_bit(DISCONNECT_SENT, &mdev->flags);
+ drbd_set_flag(mdev, DISCONNECT_SENT);
wait_event(mdev->state_wait,
(rv = _req_st_cond(mdev, mask, val)));
/* While establishing a connection only allow cstate to change.
Delay/refuse role changes, detach attach etc... */
- if (test_bit(STATE_SENT, &mdev->flags) &&
+ if (drbd_test_flag(mdev, STATE_SENT) &&
!(os.conn == C_WF_REPORT_PARAMS ||
(ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
rv = SS_IN_TRANSIENT_STATE;
static void drbd_resume_al(struct drbd_conf *mdev)
{
- if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
+ if (drbd_test_and_clear_flag(mdev, AL_SUSPENDED))
dev_info(DEV, "Resumed AL updates\n");
}
if (ns.disk == D_DISKLESS &&
ns.conn == C_STANDALONE &&
ns.role == R_SECONDARY &&
- !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
- set_bit(DEVICE_DYING, &mdev->flags);
+ !drbd_test_and_set_flag(mdev, CONFIG_PENDING))
+ drbd_set_flag(mdev, DEVICE_DYING);
/* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
* on the ldev here, to be sure the transition -> D_DISKLESS resp.
MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
- if (test_bit(CRASHED_PRIMARY, &mdev->flags))
+ if (drbd_test_flag(mdev, CRASHED_PRIMARY))
mdf |= MDF_CRASHED_PRIMARY;
if (mdev->state.role == R_PRIMARY ||
(mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
/* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
- set_bit(CONSIDER_RESYNC, &mdev->flags);
+ drbd_set_flag(mdev, CONSIDER_RESYNC);
/* Receiver should clean up itself */
if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
D_ASSERT(current == mdev->worker.task);
/* open coded non-blocking drbd_suspend_io(mdev); */
- set_bit(SUSPEND_IO, &mdev->flags);
+ drbd_set_flag(mdev, SUSPEND_IO);
drbd_bm_lock(mdev, why, flags);
rv = io_fn(mdev);
union drbd_state nsm = (union drbd_state){ .i = -1 };
if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
- clear_bit(CRASHED_PRIMARY, &mdev->flags);
+ drbd_clear_flag(mdev, CRASHED_PRIMARY);
if (mdev->p_uuid)
mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
}
if (ns.susp_fen) {
/* case1: The outdate peer handler is successful: */
if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
- if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
+ if (drbd_test_flag(mdev, NEW_CUR_UUID)) {
drbd_uuid_new_current(mdev);
- clear_bit(NEW_CUR_UUID, &mdev->flags);
+ drbd_clear_flag(mdev, NEW_CUR_UUID);
}
spin_lock_irq(&mdev->req_lock);
_tl_clear(mdev);
}
/* case2: The connection was established again: */
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
- clear_bit(NEW_CUR_UUID, &mdev->flags);
+ drbd_clear_flag(mdev, NEW_CUR_UUID);
what = resend;
nsm.susp_fen = 0;
}
if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
if (is_susp(mdev->state)) {
- set_bit(NEW_CUR_UUID, &mdev->flags);
+ drbd_set_flag(mdev, NEW_CUR_UUID);
} else {
drbd_uuid_new_current(mdev);
drbd_send_uuids(mdev);
* we might come from an failed Attach before ldev was set. */
if (mdev->ldev) {
eh = mdev->ldev->dc.on_io_error;
- was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+ was_io_error = drbd_test_and_clear_flag(mdev, WAS_IO_ERROR);
if (was_io_error && eh == EP_CALL_HELPER)
drbd_khelper(mdev, "local-io-error");
* So aborting local requests may cause crashes,
* or even worse, silent data corruption.
*/
- if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
+ if (drbd_test_and_clear_flag(mdev, FORCE_DETACH))
tl_abort_disk_io(mdev);
/* current state still has to be D_FAILED,
/* Disks got bigger while they were detached */
if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
- test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
+ drbd_test_and_clear_flag(mdev, RESYNC_AFTER_NEG)) {
if (ns.conn == C_CONNECTED)
resync_after_online_grow(mdev);
}
/* Wake up role changes, that were delayed because of connection establishing */
if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS) {
- clear_bit(STATE_SENT, &mdev->flags);
+ drbd_clear_flag(mdev, STATE_SENT);
wake_up(&mdev->state_wait);
}
if (os.aftr_isp != ns.aftr_isp)
resume_next_sg(mdev);
/* set in __drbd_set_state, unless CONFIG_PENDING was set */
- if (test_bit(DEVICE_DYING, &mdev->flags))
+ if (drbd_test_flag(mdev, DEVICE_DYING))
drbd_thread_stop_nowait(&mdev->worker);
}
mdev->comm_bm_set = drbd_bm_total_weight(mdev);
p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
- uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
+ uuid_flags |= drbd_test_flag(mdev, CRASHED_PRIMARY) ? 2 : 0;
uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
offset += sent;
} while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
set_fs(oldfs);
- clear_bit(NET_CONGESTED, &mdev->flags);
+ drbd_clear_flag(mdev, NET_CONGESTED);
ok = (len == 0);
if (likely(ok))
dp_flags |= DP_MAY_SET_IN_SYNC;
p.dp_flags = cpu_to_be32(dp_flags);
- set_bit(UNPLUG_REMOTE, &mdev->flags);
+ drbd_set_flag(mdev, UNPLUG_REMOTE);
ok = (sizeof(p) ==
drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
if (ok && dgs) {
} while (sent < size);
if (sock == mdev->data.socket)
- clear_bit(NET_CONGESTED, &mdev->flags);
+ drbd_clear_flag(mdev, NET_CONGESTED);
if (rv <= 0) {
if (rv != -EAGAIN) {
}
drbd_free_resources(mdev);
- clear_bit(AL_SUSPENDED, &mdev->flags);
+ drbd_clear_flag(mdev, AL_SUSPENDED);
/*
* currently we drbd_init_ee only on module load, so
goto out;
}
- if (test_bit(CALLBACK_PENDING, &mdev->flags)) {
+ if (drbd_test_flag(mdev, CALLBACK_PENDING)) {
r |= (1 << BDI_async_congested);
/* Without good local data, we would need to read from remote,
* and that would need the worker thread as well, which is
reason = 'b';
}
- if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
+ if (bdi_bits & (1 << BDI_async_congested) && drbd_test_flag(mdev, NET_CONGESTED)) {
r |= (1 << BDI_async_congested);
reason = reason == 'b' ? 'a' : 'n';
}
del_timer(&mdev->md_sync_timer);
/* timer may be rearmed by drbd_md_mark_dirty() now. */
- if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
+ if (!drbd_test_and_clear_flag(mdev, MD_DIRTY))
return;
/* We use here D_FAILED and not D_ATTACHING because we try to write
#ifdef DEBUG
void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
{
- if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
+ if (!drbd_test_and_set_flag(mdev, MD_DIRTY)) {
mod_timer(&mdev->md_sync_timer, jiffies + HZ);
mdev->last_md_mark_dirty.line = line;
mdev->last_md_mark_dirty.func = func;
#else
void drbd_md_mark_dirty(struct drbd_conf *mdev)
{
- if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
+ if (!drbd_test_and_set_flag(mdev, MD_DIRTY))
mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
}
#endif
put_ldev(mdev);
}
- clear_bit(BITMAP_IO, &mdev->flags);
+ drbd_clear_flag(mdev, BITMAP_IO);
smp_mb__after_clear_bit();
wake_up(&mdev->misc_wait);
if (work->done)
work->done(mdev, rv);
- clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
+ drbd_clear_flag(mdev, BITMAP_IO_QUEUED);
work->why = NULL;
work->flags = 0;
__free_page(mdev->md_io_tmpp);
mdev->md_io_tmpp = NULL;
}
- clear_bit(GO_DISKLESS, &mdev->flags);
+ drbd_clear_flag(mdev, GO_DISKLESS);
}
static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
void drbd_go_diskless(struct drbd_conf *mdev)
{
D_ASSERT(mdev->state.disk == D_FAILED);
- if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
+ if (!drbd_test_and_set_flag(mdev, GO_DISKLESS))
drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
}
{
D_ASSERT(current == mdev->worker.task);
- D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
- D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
+ D_ASSERT(!drbd_test_flag(mdev, BITMAP_IO_QUEUED));
+ D_ASSERT(!drbd_test_flag(mdev, BITMAP_IO));
D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
if (mdev->bm_io_work.why)
dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
mdev->bm_io_work.flags = flags;
spin_lock_irq(&mdev->req_lock);
- set_bit(BITMAP_IO, &mdev->flags);
+ drbd_set_flag(mdev, BITMAP_IO);
if (atomic_read(&mdev->ap_bio_cnt) == 0) {
- if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
+ if (!drbd_test_and_set_flag(mdev, BITMAP_IO_QUEUED))
drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
}
spin_unlock_irq(&mdev->req_lock);
int ret;
if (current == mdev->worker.task)
- set_bit(CALLBACK_PENDING, &mdev->flags);
+ drbd_set_flag(mdev, CALLBACK_PENDING);
snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
(ret >> 8) & 0xff, ret);
if (current == mdev->worker.task)
- clear_bit(CALLBACK_PENDING, &mdev->flags);
+ drbd_clear_flag(mdev, CALLBACK_PENDING);
if (ret < 0) /* Ignore any ERRNOs we got. */
ret = 0;
*/
spin_lock_irq(&mdev->req_lock);
ns = mdev->state;
- if (ns.conn < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &mdev->flags)) {
+ if (ns.conn < C_WF_REPORT_PARAMS && !drbd_test_flag(mdev, STATE_SENT)) {
ns.pdsk = nps;
_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
}
*/
void drbd_suspend_io(struct drbd_conf *mdev)
{
- set_bit(SUSPEND_IO, &mdev->flags);
+ drbd_set_flag(mdev, SUSPEND_IO);
if (is_susp(mdev->state))
return;
wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
void drbd_resume_io(struct drbd_conf *mdev)
{
- clear_bit(SUSPEND_IO, &mdev->flags);
+ drbd_clear_flag(mdev, SUSPEND_IO);
wake_up(&mdev->misc_wait);
}
*/
static void drbd_reconfig_start(struct drbd_conf *mdev)
{
- wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
- wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
+ wait_event(mdev->state_wait, !drbd_test_and_set_flag(mdev, CONFIG_PENDING));
+ wait_event(mdev->state_wait, !drbd_test_flag(mdev, DEVICE_DYING));
drbd_thread_start(&mdev->worker);
drbd_flush_workqueue(mdev);
}
if (mdev->state.disk == D_DISKLESS &&
mdev->state.conn == C_STANDALONE &&
mdev->state.role == R_SECONDARY) {
- set_bit(DEVICE_DYING, &mdev->flags);
+ drbd_set_flag(mdev, DEVICE_DYING);
drbd_thread_stop_nowait(&mdev->worker);
} else
- clear_bit(CONFIG_PENDING, &mdev->flags);
+ drbd_clear_flag(mdev, CONFIG_PENDING);
spin_unlock_irq(&mdev->req_lock);
wake_up(&mdev->state_wait);
}
spin_lock_irq(&mdev->req_lock);
if (mdev->state.conn < C_CONNECTED)
- s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
+ s = !drbd_test_and_set_flag(mdev, AL_SUSPENDED);
spin_unlock_irq(&mdev->req_lock);
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
/* make sure there is no leftover from previous force-detach attempts */
- clear_bit(FORCE_DETACH, &mdev->flags);
+ drbd_clear_flag(mdev, FORCE_DETACH);
/* and no leftover from previously aborted resync or verify, either */
mdev->rs_total = 0;
/* Reset the "barriers don't work" bits here, then force meta data to
* be written, to ensure we determine if barriers are supported. */
if (nbc->dc.no_md_flush)
- set_bit(MD_NO_FUA, &mdev->flags);
+ drbd_set_flag(mdev, MD_NO_FUA);
else
- clear_bit(MD_NO_FUA, &mdev->flags);
+ drbd_clear_flag(mdev, MD_NO_FUA);
/* Point of no return reached.
* Devices and memory are no longer released by error cleanup below.
drbd_bump_write_ordering(mdev, WO_bdev_flush);
if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
- set_bit(CRASHED_PRIMARY, &mdev->flags);
+ drbd_set_flag(mdev, CRASHED_PRIMARY);
else
- clear_bit(CRASHED_PRIMARY, &mdev->flags);
+ drbd_clear_flag(mdev, CRASHED_PRIMARY);
if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
!(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
- set_bit(CRASHED_PRIMARY, &mdev->flags);
+ drbd_set_flag(mdev, CRASHED_PRIMARY);
cp_discovered = 1;
}
* so we can automatically recover from a crash of a
* degraded but active "cluster" after a certain timeout.
*/
- clear_bit(USE_DEGR_WFC_T, &mdev->flags);
+ drbd_clear_flag(mdev, USE_DEGR_WFC_T);
if (mdev->state.role != R_PRIMARY &&
drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
!drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
- set_bit(USE_DEGR_WFC_T, &mdev->flags);
+ drbd_set_flag(mdev, USE_DEGR_WFC_T);
dd = drbd_determine_dev_size(mdev, 0);
if (dd == dev_size_error) {
retcode = ERR_NOMEM_BITMAP;
goto force_diskless_dec;
} else if (dd == grew)
- set_bit(RESYNC_AFTER_NEG, &mdev->flags);
+ drbd_set_flag(mdev, RESYNC_AFTER_NEG);
if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
dev_info(DEV, "Assuming that all blocks are out of sync "
}
if (dt.detach_force) {
- set_bit(FORCE_DETACH, &mdev->flags);
+ drbd_set_flag(mdev, FORCE_DETACH);
drbd_force_state(mdev, NS(disk, D_FAILED));
reply->ret_code = SS_SUCCESS;
goto out;
if (mdev->state.role != mdev->state.peer)
iass = (mdev->state.role == R_PRIMARY);
else
- iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+ iass = drbd_test_flag(mdev, DISCARD_CONCURRENT);
if (iass)
drbd_start_resync(mdev, C_SYNC_SOURCE);
if (mdev->state.conn == C_CONNECTED) {
if (dd == grew)
- set_bit(RESIZE_PENDING, &mdev->flags);
+ drbd_set_flag(mdev, RESIZE_PENDING);
drbd_send_uuids(mdev);
drbd_send_sizes(mdev, 1, ddsf);
* resync just being finished, wait for it before requesting a new resync.
* Also wait for it's after_state_ch(). */
drbd_suspend_io(mdev);
- wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+ wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
drbd_flush_workqueue(mdev);
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
* resync just being finished, wait for it before requesting a new resync.
* Also wait for it's after_state_ch(). */
drbd_suspend_io(mdev);
- wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+ wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
drbd_flush_workqueue(mdev);
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
- if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
+ if (drbd_test_flag(mdev, NEW_CUR_UUID)) {
drbd_uuid_new_current(mdev);
- clear_bit(NEW_CUR_UUID, &mdev->flags);
+ drbd_clear_flag(mdev, NEW_CUR_UUID);
}
drbd_suspend_io(mdev);
reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
tl = reply->tag_list;
rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
- test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
+ drbd_test_flag(mdev, USE_DEGR_WFC_T) ? UT_DEGRADED : UT_DEFAULT;
tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
put_unaligned(TT_END, tl++); /* Close the tag list */
/* If there is still bitmap IO pending, e.g. previous resync or verify
* just being finished, wait for it before requesting a new resync. */
drbd_suspend_io(mdev);
- wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+ wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
/* w_make_ov_request expects start position to be aligned */
mdev->ov_start_sector = args.start_sector & ~(BM_SECT_PER_BIT-1);
mdev->state.peer_isp ? 'p' : '-',
mdev->state.user_isp ? 'u' : '-',
mdev->congestion_reason ?: '-',
- test_bit(AL_SUSPENDED, &mdev->flags) ? 's' : '-',
+ drbd_test_flag(mdev, AL_SUSPENDED) ? 's' : '-',
mdev->send_cnt/2,
mdev->recv_cnt/2,
mdev->writ_cnt/2,
else if (rv != -ERESTARTSYS)
dev_err(DEV, "sock_recvmsg returned %d\n", rv);
} else if (rv == 0) {
- if (test_bit(DISCONNECT_SENT, &mdev->flags)) {
+ if (drbd_test_flag(mdev, DISCONNECT_SENT)) {
long t; /* time_left */
t = wait_event_timeout(mdev->state_wait, mdev->state.conn < C_CONNECTED,
mdev->net_conf->ping_timeo * HZ/10);
D_ASSERT(!mdev->data.socket);
- clear_bit(DISCONNECT_SENT, &mdev->flags);
+ drbd_clear_flag(mdev, DISCONNECT_SENT);
if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
return -2;
sock = s;
s = NULL;
} else if (!msock) {
- clear_bit(DISCARD_CONCURRENT, &mdev->flags);
+ drbd_clear_flag(mdev, DISCARD_CONCURRENT);
drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
msock = s;
s = NULL;
sock_release(msock);
}
msock = s;
- set_bit(DISCARD_CONCURRENT, &mdev->flags);
+ drbd_set_flag(mdev, DISCARD_CONCURRENT);
break;
default:
dev_warn(DEV, "Error receiving initial packet\n");
if (drbd_send_protocol(mdev) == -1)
return -1;
- set_bit(STATE_SENT, &mdev->flags);
+ drbd_set_flag(mdev, STATE_SENT);
drbd_send_sync_param(mdev, &mdev->sync_conf);
drbd_send_sizes(mdev, 0, 0);
drbd_send_uuids(mdev);
drbd_send_current_state(mdev);
- clear_bit(USE_DEGR_WFC_T, &mdev->flags);
- clear_bit(RESIZE_PENDING, &mdev->flags);
+ drbd_clear_flag(mdev, USE_DEGR_WFC_T);
+ drbd_clear_flag(mdev, RESIZE_PENDING);
spin_lock_irq(&mdev->req_lock);
rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL);
if (mdev->state.conn != C_WF_REPORT_PARAMS)
- clear_bit(STATE_SENT, &mdev->flags);
+ drbd_clear_flag(mdev, STATE_SENT);
spin_unlock_irq(&mdev->req_lock);
if (rv < SS_SUCCESS)
/* don't get the req_lock yet,
* we may sleep in drbd_wait_peer_seq */
const int size = e->size;
- const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+ const int discard = drbd_test_flag(mdev, DISCARD_CONCURRENT);
DEFINE_WAIT(wait);
struct drbd_request *i;
struct hlist_node *n;
"Using discard-least-changes instead\n");
case ASB_DISCARD_ZERO_CHG:
if (ch_peer == 0 && ch_self == 0) {
- rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
+ rv = drbd_test_flag(mdev, DISCARD_CONCURRENT)
? -1 : 1;
break;
} else {
rv = 1;
else /* ( ch_self == ch_peer ) */
/* Well, then use something else. */
- rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
+ rv = drbd_test_flag(mdev, DISCARD_CONCURRENT)
? -1 : 1;
break;
case ASB_DISCARD_LOCAL:
}
/* Common power [off|failure] */
- rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
+ rct = (drbd_test_flag(mdev, CRASHED_PRIMARY) ? 1 : 0) +
(mdev->p_uuid[UI_FLAGS] & 2);
/* lowest bit is set when we were primary,
* next bit (weight 2) is set when peer was primary */
case 1: /* self_pri && !peer_pri */ return 1;
case 2: /* !self_pri && peer_pri */ return -1;
case 3: /* self_pri && peer_pri */
- dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+ dc = drbd_test_flag(mdev, DISCARD_CONCURRENT);
return dc ? -1 : 1;
}
}
}
}
- if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
+ if (mdev->net_conf->dry_run || drbd_test_flag(mdev, CONN_DRY_RUN)) {
if (hg == 0)
dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
else
cf = be32_to_cpu(p->conn_flags);
p_want_lose = cf & CF_WANT_LOSE;
- clear_bit(CONN_DRY_RUN, &mdev->flags);
+ drbd_clear_flag(mdev, CONN_DRY_RUN);
if (cf & CF_DRY_RUN)
- set_bit(CONN_DRY_RUN, &mdev->flags);
+ drbd_set_flag(mdev, CONN_DRY_RUN);
if (p_proto != mdev->net_conf->wire_protocol) {
dev_err(DEV, "incompatible communication protocols\n");
* needs to know my new size... */
drbd_send_sizes(mdev, 0, ddsf);
}
- if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
+ if (drbd_test_and_clear_flag(mdev, RESIZE_PENDING) ||
(dd == grew && mdev->state.conn == C_CONNECTED)) {
if (mdev->state.pdsk >= D_INCONSISTENT &&
mdev->state.disk >= D_INCONSISTENT) {
else
resync_after_online_grow(mdev);
} else
- set_bit(RESYNC_AFTER_NEG, &mdev->flags);
+ drbd_set_flag(mdev, RESYNC_AFTER_NEG);
}
}
ongoing cluster wide state change is finished. That is important if
we are primary and are detaching from our disk. We need to see the
new disk state... */
- wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
+ wait_event(mdev->misc_wait, !drbd_test_flag(mdev, CLUSTER_ST_CHANGE));
if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
mask.i = be32_to_cpu(p->mask);
val.i = be32_to_cpu(p->val);
- if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
- test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
+ if (drbd_test_flag(mdev, DISCARD_CONCURRENT) &&
+ drbd_test_flag(mdev, CLUSTER_ST_CHANGE)) {
drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
return true;
}
os.disk == D_NEGOTIATING));
/* if we have both been inconsistent, and the peer has been
* forced to be UpToDate with --overwrite-data */
- cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
+ cr |= drbd_test_flag(mdev, CONSIDER_RESYNC);
/* if we had been plain connected, and the admin requested to
* start a sync by "invalidate" or "invalidate-remote" */
cr |= (os.conn == C_CONNECTED &&
peer_state.disk = D_DISKLESS;
real_peer_disk = D_DISKLESS;
} else {
- if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
+ if (drbd_test_and_clear_flag(mdev, CONN_DRY_RUN))
return false;
D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
spin_lock_irq(&mdev->req_lock);
if (mdev->state.i != os.i)
goto retry;
- clear_bit(CONSIDER_RESYNC, &mdev->flags);
+ drbd_clear_flag(mdev, CONSIDER_RESYNC);
ns.peer = peer_state.role;
ns.pdsk = real_peer_disk;
ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
ns.disk = mdev->new_state_tmp.disk;
cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
- test_bit(NEW_CUR_UUID, &mdev->flags)) {
+ drbd_test_flag(mdev, NEW_CUR_UUID)) {
/* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
for temporal network outages! */
spin_unlock_irq(&mdev->req_lock);
dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
tl_clear(mdev);
drbd_uuid_new_current(mdev);
- clear_bit(NEW_CUR_UUID, &mdev->flags);
+ drbd_clear_flag(mdev, NEW_CUR_UUID);
drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
return false;
}
/* serialize with bitmap writeout triggered by the state change,
* if any. */
- wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+ wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
/* tcp_close and release of sendpage pages can be deferred. I don't
* want to use SO_LINGER, because apparently it can be deferred for
int retcode = be32_to_cpu(p->retcode);
if (retcode >= SS_SUCCESS) {
- set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
+ drbd_set_flag(mdev, CL_ST_CHG_SUCCESS);
} else {
- set_bit(CL_ST_CHG_FAIL, &mdev->flags);
+ drbd_set_flag(mdev, CL_ST_CHG_FAIL);
dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
drbd_set_st_err_str(retcode), retcode);
}
{
/* restore idle timeout */
mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
- if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
+ if (!drbd_test_and_set_flag(mdev, GOT_PING_ACK))
wake_up(&mdev->misc_wait);
return true;
if (mdev->state.conn == C_AHEAD &&
atomic_read(&mdev->ap_in_flight) == 0 &&
- !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
+ !drbd_test_and_set_flag(mdev, AHEAD_TO_SYNC_SOURCE)) {
mdev->start_resync_timer.expires = jiffies + HZ;
add_timer(&mdev->start_resync_timer);
}
while (get_t_state(thi) == Running) {
drbd_thread_current_set_cpu(mdev);
- if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
+ if (drbd_test_and_clear_flag(mdev, SEND_PING)) {
ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
mdev->meta.socket->sk->sk_rcvtimeo =
mdev->net_conf->ping_timeo*HZ/10;
3 < atomic_read(&mdev->unacked_cnt))
drbd_tcp_cork(mdev->meta.socket);
while (1) {
- clear_bit(SIGNAL_ASENDER, &mdev->flags);
+ drbd_clear_flag(mdev, SIGNAL_ASENDER);
flush_signals(current);
if (!drbd_process_done_ee(mdev))
goto reconnect;
/* to avoid race with newly queued ACKs */
- set_bit(SIGNAL_ASENDER, &mdev->flags);
+ drbd_set_flag(mdev, SIGNAL_ASENDER);
spin_lock_irq(&mdev->req_lock);
empty = list_empty(&mdev->done_ee);
spin_unlock_irq(&mdev->req_lock);
rv = drbd_recv_short(mdev, mdev->meta.socket,
buf, expect-received, 0);
- clear_bit(SIGNAL_ASENDER, &mdev->flags);
+ drbd_clear_flag(mdev, SIGNAL_ASENDER);
flush_signals(current);
received += rv;
buf += rv;
} else if (rv == 0) {
- if (test_bit(DISCONNECT_SENT, &mdev->flags)) {
+ if (drbd_test_flag(mdev, DISCONNECT_SENT)) {
long t; /* time_left */
t = wait_event_timeout(mdev->state_wait, mdev->state.conn < C_CONNECTED,
mdev->net_conf->ping_timeo * HZ/10);
dev_err(DEV, "PingAck did not arrive in time.\n");
goto reconnect;
}
- set_bit(SEND_PING, &mdev->flags);
+ drbd_set_flag(mdev, SEND_PING);
continue;
} else if (rv == -EINTR) {
continue;
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
drbd_md_sync(mdev);
}
- clear_bit(SIGNAL_ASENDER, &mdev->flags);
+ drbd_clear_flag(mdev, SIGNAL_ASENDER);
D_ASSERT(mdev->state.conn < C_CONNECTED);
dev_info(DEV, "asender terminated\n");
* barrier/epoch object is added. This is the only place this bit is
* set. It indicates that the barrier for this epoch is already queued,
* and no new epoch has been created yet. */
- if (test_bit(CREATE_BARRIER, &mdev->flags))
+ if (drbd_test_flag(mdev, CREATE_BARRIER))
return;
b = mdev->newest_tle;
* or (on connection loss) in tl_clear. */
inc_ap_pending(mdev);
drbd_queue_work(&mdev->data.work, &b->w);
- set_bit(CREATE_BARRIER, &mdev->flags);
+ drbd_set_flag(mdev, CREATE_BARRIER);
}
static void _about_to_complete_local_write(struct drbd_conf *mdev,
* corresponding hlist_del is in _req_may_be_done() */
hlist_add_head(&req->collision, ar_hash_slot(mdev, req->sector));
- set_bit(UNPLUG_REMOTE, &mdev->flags);
+ drbd_set_flag(mdev, UNPLUG_REMOTE);
D_ASSERT(req->rq_state & RQ_NET_PENDING);
req->rq_state |= RQ_NET_QUEUED;
/* otherwise we may lose an unplug, which may cause some remote
* io-scheduler timeout to expire, increasing maximum latency,
* hurting performance. */
- set_bit(UNPLUG_REMOTE, &mdev->flags);
+ drbd_set_flag(mdev, UNPLUG_REMOTE);
/* see drbd_make_request_common,
* just after it grabs the req_lock */
- D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
+ D_ASSERT(drbd_test_flag(mdev, CREATE_BARRIER) == 0);
req->epoch = mdev->newest_tle->br_number;
* Empty flushes don't need to go into the activity log, they can only
* flush data for pending writes which are already in there. */
if (rw == WRITE && local && size
- && !test_bit(AL_SUSPENDED, &mdev->flags)) {
+ && !drbd_test_flag(mdev, AL_SUSPENDED)) {
req->rq_state |= RQ_IN_ACT_LOG;
drbd_al_begin_io(mdev, sector);
}
* if we lost that race, we retry. */
if (rw == WRITE && (remote || send_oos) &&
mdev->unused_spare_tle == NULL &&
- test_bit(CREATE_BARRIER, &mdev->flags)) {
+ drbd_test_flag(mdev, CREATE_BARRIER)) {
allocate_barrier:
b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
if (!b) {
}
if (rw == WRITE && (remote || send_oos) &&
mdev->unused_spare_tle == NULL &&
- test_bit(CREATE_BARRIER, &mdev->flags)) {
+ drbd_test_flag(mdev, CREATE_BARRIER)) {
/* someone closed the current epoch
* while we were grabbing the spinlock */
spin_unlock_irq(&mdev->req_lock);
* make sure that, if this is a write request and it triggered a
* barrier packet, this request is queued within the same spinlock. */
if ((remote || send_oos) && mdev->unused_spare_tle &&
- test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
+ drbd_test_and_clear_flag(mdev, CREATE_BARRIER)) {
_tl_add_barrier(mdev, mdev->unused_spare_tle);
mdev->unused_spare_tle = NULL;
} else {
D_ASSERT(!(remote && rw == WRITE &&
- test_bit(CREATE_BARRIER, &mdev->flags)));
+ drbd_test_flag(mdev, CREATE_BARRIER)));
}
/* NOTE
}
drbd_start_resync(mdev, C_SYNC_SOURCE);
- clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
+ drbd_clear_flag(mdev, AHEAD_TO_SYNC_SOURCE);
return 1;
}
static void ping_peer(struct drbd_conf *mdev)
{
- clear_bit(GOT_PING_ACK, &mdev->flags);
+ drbd_clear_flag(mdev, GOT_PING_ACK);
request_ping(mdev);
wait_event(mdev->misc_wait,
- test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED);
+ drbd_test_flag(mdev, GOT_PING_ACK) || mdev->state.conn < C_CONNECTED);
}
int drbd_resync_finished(struct drbd_conf *mdev)
NS(conn, C_NETWORK_FAILURE));
}
}
- D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags));
- D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags));
+ D_ASSERT(drbd_test_flag(mdev, DEVICE_DYING));
+ D_ASSERT(drbd_test_flag(mdev, CONFIG_PENDING));
spin_lock_irq(&mdev->data.work.q_lock);
i = 0;
dev_info(DEV, "worker terminated\n");
- clear_bit(DEVICE_DYING, &mdev->flags);
- clear_bit(CONFIG_PENDING, &mdev->flags);
+ drbd_clear_flag(mdev, DEVICE_DYING);
+ drbd_clear_flag(mdev, CONFIG_PENDING);
wake_up(&mdev->state_wait);
return 0;