unsigned long last_received; /* in jiffies, either socket */
unsigned int ko_count;
+ spinlock_t req_lock;
+ struct drbd_tl_epoch *unused_spare_tle; /* for pre-allocation */
+ struct drbd_tl_epoch *newest_tle;
+ struct drbd_tl_epoch *oldest_tle;
+ struct list_head out_of_sequence_requests;
+
struct drbd_thread receiver;
struct drbd_thread worker;
struct drbd_thread asender;
atomic_t unacked_cnt; /* Need to send replys for */
atomic_t local_cnt; /* Waiting for local completion */
- spinlock_t req_lock;
- struct drbd_tl_epoch *unused_spare_tle; /* for pre-allocation */
- struct drbd_tl_epoch *newest_tle;
- struct drbd_tl_epoch *oldest_tle;
- struct list_head out_of_sequence_requests;
-
/* Interval tree of pending local requests */
struct rb_root read_requests;
struct rb_root write_requests;
{
if (error) {
unsigned long flags;
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
__drbd_chk_io_error_(mdev, forcedetach, where);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
}
}
{
bool rv = false;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
rv = may_inc_ap_bio(mdev);
if (rv)
atomic_add(count, &mdev->ap_bio_cnt);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
return rv;
}
* DOC: The transfer log
*
* The transfer log is a single linked list of &struct drbd_tl_epoch objects.
- * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
+ * mdev->tconn->newest_tle points to the head, mdev->tconn->oldest_tle points to the tail
* of the list. There is always at least one &struct drbd_tl_epoch object.
*
* Each &struct drbd_tl_epoch has a circular double linked list of requests
b->n_writes = 0;
b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
- mdev->oldest_tle = b;
- mdev->newest_tle = b;
- INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
+ mdev->tconn->oldest_tle = b;
+ mdev->tconn->newest_tle = b;
+ INIT_LIST_HEAD(&mdev->tconn->out_of_sequence_requests);
return 1;
}
static void tl_cleanup(struct drbd_conf *mdev)
{
- D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
- D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
- kfree(mdev->oldest_tle);
- mdev->oldest_tle = NULL;
- kfree(mdev->unused_spare_tle);
- mdev->unused_spare_tle = NULL;
+ D_ASSERT(mdev->tconn->oldest_tle == mdev->tconn->newest_tle);
+ D_ASSERT(list_empty(&mdev->tconn->out_of_sequence_requests));
+ kfree(mdev->tconn->oldest_tle);
+ mdev->tconn->oldest_tle = NULL;
+ kfree(mdev->tconn->unused_spare_tle);
+ mdev->tconn->unused_spare_tle = NULL;
}
/**
new->next = NULL;
new->n_writes = 0;
- newest_before = mdev->newest_tle;
+ newest_before = mdev->tconn->newest_tle;
/* never send a barrier number == 0, because that is special-cased
* when using TCQ for our write ordering code */
new->br_number = (newest_before->br_number+1) ?: 1;
- if (mdev->newest_tle != new) {
- mdev->newest_tle->next = new;
- mdev->newest_tle = new;
+ if (mdev->tconn->newest_tle != new) {
+ mdev->tconn->newest_tle->next = new;
+ mdev->tconn->newest_tle = new;
}
}
struct list_head *le, *tle;
struct drbd_request *r;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
- b = mdev->oldest_tle;
+ b = mdev->tconn->oldest_tle;
/* first some paranoia code */
if (b == NULL) {
if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
_tl_add_barrier(mdev, b);
if (nob)
- mdev->oldest_tle = nob;
+ mdev->tconn->oldest_tle = nob;
/* if nob == NULL b was the only barrier, and becomes the new
- barrier. Therefore mdev->oldest_tle points already to b */
+ barrier. Therefore mdev->tconn->oldest_tle points already to b */
} else {
D_ASSERT(nob != NULL);
- mdev->oldest_tle = nob;
+ mdev->tconn->oldest_tle = nob;
kfree(b);
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
dec_ap_pending(mdev);
return;
bail:
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
}
struct drbd_request *req;
int rv, n_writes, n_reads;
- b = mdev->oldest_tle;
- pn = &mdev->oldest_tle;
+ b = mdev->tconn->oldest_tle;
+ pn = &mdev->tconn->oldest_tle;
while (b) {
n_writes = 0;
n_reads = 0;
if (b->w.cb != NULL)
dec_ap_pending(mdev);
- if (b == mdev->newest_tle) {
+ if (b == mdev->tconn->newest_tle) {
/* recycle, but reinit! */
D_ASSERT(tmp == NULL);
INIT_LIST_HEAD(&b->requests);
struct list_head *le, *tle;
struct drbd_request *r;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_tl_restart(mdev, CONNECTION_LOST_WHILE_PENDING);
/* we expect this list to be empty. */
- D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
+ D_ASSERT(list_empty(&mdev->tconn->out_of_sequence_requests));
/* but just in case, clean it up anyways! */
- list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
+ list_for_each_safe(le, tle, &mdev->tconn->out_of_sequence_requests) {
r = list_entry(le, struct drbd_request, tl_requests);
/* It would be nice to complete outside of spinlock.
* But this is easier for now. */
/* ensure bit indicating barrier is required is clear */
clear_bit(CREATE_BARRIER, &mdev->flags);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
{
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_tl_restart(mdev, what);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
/**
union drbd_state os, ns;
enum drbd_state_rv rv;
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
os = mdev->state;
ns.i = (os.i & ~mask.i) | val.i;
rv = _drbd_set_state(mdev, ns, f, NULL);
ns = mdev->state;
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
return rv;
}
return SS_CW_FAILED_BY_PEER;
rv = 0;
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
os = mdev->state;
ns.i = (os.i & ~mask.i) | val.i;
ns = sanitize_state(mdev, os, ns, NULL);
rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
}
}
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
return rv;
}
if (f & CS_SERIALIZE)
mutex_lock(&mdev->state_mutex);
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
os = mdev->state;
ns.i = (os.i & ~mask.i) | val.i;
ns = sanitize_state(mdev, os, ns, NULL);
rv = is_valid_state(mdev, ns);
if (rv == SS_SUCCESS)
rv = is_valid_state_transition(mdev, ns, os);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
if (rv < SS_SUCCESS) {
if (f & CS_VERBOSE)
print_st_err(mdev, os, ns, rv);
goto abort;
}
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
os = mdev->state;
ns.i = (os.i & ~mask.i) | val.i;
rv = _drbd_set_state(mdev, ns, f, &done);
rv = _drbd_set_state(mdev, ns, f, &done);
}
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
D_ASSERT(current != mdev->tconn->worker.task);
drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags);
}
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
/* case2: The connection was established again: */
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
}
if (what != NOTHING) {
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_tl_restart(mdev, what);
nsm.i &= mdev->state.i;
_drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
/* Became sync source. With protocol >= 96, we still need to send out
int rv = 0;
mutex_lock(&drbd_main_mutex);
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
/* to have a stable mdev->state.role
* and no race with updating open_cnt */
if (!rv)
mdev->open_cnt++;
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
mutex_unlock(&drbd_main_mutex);
return rv;
spin_lock_init(&mdev->tconn->meta.work.q_lock);
spin_lock_init(&mdev->al_lock);
- spin_lock_init(&mdev->req_lock);
+ spin_lock_init(&mdev->tconn->req_lock);
spin_lock_init(&mdev->peer_seq_lock);
spin_lock_init(&mdev->epoch_lock);
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(q, drbd_merge_bvec);
- q->queue_lock = &mdev->req_lock;
+ q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
mdev->md_io_page = alloc_page(GFP_KERNEL);
if (!mdev->md_io_page)
mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn < C_CONNECTED) {
int peer;
peer = be32_to_cpu(buffer->la_peer_max_bio_size);
peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
mdev->peer_max_bio_size = peer;
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (mdev->sync_conf.al_extents < 7)
mdev->sync_conf.al_extents = 127;
mdev->bm_io_work.why = why;
mdev->bm_io_work.flags = flags;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
set_bit(BITMAP_IO, &mdev->flags);
if (atomic_read(&mdev->ap_bio_cnt) == 0) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
/**
pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
therefore we have to have the pre state change check here.
*/
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
ns = mdev->state;
if (ns.conn < C_WF_REPORT_PARAMS) {
ns.pdsk = nps;
_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
return 0;
}
* wakes potential waiters */
static void drbd_reconfig_done(struct drbd_conf *mdev)
{
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.disk == D_DISKLESS &&
mdev->state.conn == C_STANDALONE &&
mdev->state.role == R_SECONDARY) {
drbd_thread_stop_nowait(&mdev->tconn->worker);
} else
clear_bit(CONFIG_PENDING, &mdev->flags);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
wake_up(&mdev->state_wait);
}
return;
}
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn < C_CONNECTED)
s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (s)
dev_info(DEV, "Suspended AL updates\n");
if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
drbd_suspend_al(mdev); /* IO is still suspended here... */
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
os = mdev->state;
ns.i = os.i;
/* If MDF_CONSISTENT is not set go into inconsistent state,
rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
ns = mdev->state;
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (rv < SS_SUCCESS)
goto force_diskless_dec;
}
drbd_flush_workqueue(mdev);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->tconn->net_conf != NULL) {
retcode = ERR_NET_CONFIGURED;
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
goto fail;
}
mdev->tconn->net_conf = new_conf;
mdev->int_dig_in=int_dig_in;
mdev->int_dig_vv=int_dig_vv;
retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
reply->ret_code = retcode;
}
if (dc.force) {
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn >= C_WF_CONNECTION)
_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
goto done;
}
retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
while (retcode == SS_NEED_CONNECTION) {
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn < C_CONNECTED)
retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (retcode != SS_NEED_CONNECTION)
break;
drbd_send_uuids_skip_initial_sync(mdev);
_drbd_uuid_set(mdev, UI_BITMAP, 0);
drbd_print_uuids(mdev, "cleared bitmap UUID");
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
}
LIST_HEAD(reclaimed);
struct drbd_epoch_entry *e, *t;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
reclaim_net_ee(mdev, &reclaimed);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
list_for_each_entry_safe(e, t, &reclaimed, w.list)
drbd_free_net_ee(mdev, e);
}
/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
- * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
+ * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
* Either links the page chain back to the global pool,
* or returns all pages to the system. */
static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
int count = 0;
int is_net = list == &mdev->net_ee;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_splice_init(list, &work_list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
list_for_each_entry_safe(e, t, &work_list, w.list) {
drbd_free_some_ee(mdev, e, is_net);
struct drbd_epoch_entry *e, *t;
int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
reclaim_net_ee(mdev, &reclaimed);
list_splice_init(&mdev->done_ee, &work_list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
list_for_each_entry_safe(e, t, &reclaimed, w.list)
drbd_free_net_ee(mdev, e);
* and calling prepare_to_wait in the fast path */
while (!list_empty(head)) {
prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
io_schedule();
finish_wait(&mdev->ee_wait, &wait);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
}
}
void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
{
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_drbd_wait_ee_list_empty(mdev, head);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
/* see also kernel_accept; which is only present since 2.6.18.
e->w.cb = e_end_resync_block;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_add(&e->w.list, &mdev->sync_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
atomic_add(data_size >> 9, &mdev->rs_sect_ev);
if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_del(&e->w.list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
drbd_free_ee(mdev, e);
fail:
sector = be64_to_cpu(p->sector);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (unlikely(!req))
return false;
/* we delete from the conflict detection hash _after_ we sent out the
* P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
if (mdev->tconn->net_conf->two_primaries) {
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
D_ASSERT(!drbd_interval_empty(&e->i));
drbd_remove_interval(&mdev->epoch_entries, &e->i);
drbd_clear_interval(&e->i);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
} else
D_ASSERT(drbd_interval_empty(&e->i));
D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
D_ASSERT(!drbd_interval_empty(&e->i));
drbd_remove_interval(&mdev->epoch_entries, &e->i);
drbd_clear_interval(&e->i);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
dec_unacked(mdev);
/* I'm the receiver, I do hold a net_cnt reference. */
if (!mdev->tconn->net_conf->two_primaries) {
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
} else {
/* don't get the req_lock yet,
* we may sleep in drbd_wait_peer_seq */
if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
goto out_interrupted;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
drbd_insert_interval(&mdev->epoch_entries, &e->i);
e->w.cb = e_send_discard_ack;
list_add_tail(&e->w.list, &mdev->done_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* we could probably send that P_DISCARD_ACK ourselves,
* but I don't like the receiver using the msock */
drbd_remove_interval(&mdev->epoch_entries, &e->i);
drbd_clear_interval(&e->i);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
finish_wait(&mdev->misc_wait, &wait);
goto out_interrupted;
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (first) {
first = 0;
dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
D_ASSERT(have_unacked == 0);
}
schedule();
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
}
finish_wait(&mdev->misc_wait, &wait);
}
list_add(&e->w.list, &mdev->active_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
switch (mdev->tconn->net_conf->wire_protocol) {
case DRBD_PROT_C:
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_del(&e->w.list);
drbd_remove_interval(&mdev->epoch_entries, &e->i);
drbd_clear_interval(&e->i);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (e->flags & EE_CALL_AL_COMPLETE_IO)
drbd_al_complete_io(mdev, e->i.sector);
submit:
inc_unacked(mdev);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_add_tail(&e->w.list, &mdev->read_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
return true;
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_del(&e->w.list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* no drbd_rs_complete_io(), we are dropping the connection anyways */
out_free_e:
dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
}
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
retry:
os = ns = mdev->state;
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* peer says his disk is uptodate, while we think it is inconsistent,
* and this happens while we think we have a sync going on. */
}
}
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.i != os.i)
goto retry;
clear_bit(CONSIDER_RESYNC, &mdev->flags);
test_bit(NEW_CUR_UUID, &mdev->flags)) {
/* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
for temporal network outages! */
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
tl_clear(mdev);
drbd_uuid_new_current(mdev);
}
rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
ns = mdev->state;
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (rv < SS_SUCCESS) {
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
drbd_free_sock(mdev);
/* wait for current activity to cease. */
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
_drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
_drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* We do not have data structures that would allow us to
* get the rs_pending_cnt down to 0 again.
if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
drbd_try_outdate_peer_async(mdev);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
os = mdev->state;
if (os.conn >= C_UNCONNECTED) {
/* Do not restart in case we are C_DISCONNECTING */
ns.conn = C_UNCONNECTED;
rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (os.conn == C_DISCONNECTING) {
wait_event(mdev->tconn->net_cnt_wait, atomic_read(&mdev->tconn->net_cnt) == 0);
struct drbd_request *req;
struct bio_and_error m;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
req = find_request(mdev, root, id, sector, missing_ok, func);
if (unlikely(!req)) {
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
return false;
}
__req_mod(req, what, &m);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (m.bio)
complete_master_bio(mdev, &m);
goto reconnect;
/* to avoid race with newly queued ACKs */
set_bit(SIGNAL_ASENDER, &mdev->flags);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
empty = list_empty(&mdev->done_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* new ack may have been queued right here,
* but then there is also a signal pending,
* and we start over... */
if (test_bit(CREATE_BARRIER, &mdev->flags))
return;
- b = mdev->newest_tle;
+ b = mdev->tconn->newest_tle;
b->w.cb = w_send_barrier;
/* inc_ap_pending done here, so we won't
* get imbalanced on connection loss.
*/
if (mdev->state.conn >= C_CONNECTED &&
(s & RQ_NET_SENT) != 0 &&
- req->epoch == mdev->newest_tle->br_number)
+ req->epoch == mdev->tconn->newest_tle->br_number)
queue_barrier(mdev);
/* we need to do the conflict detection stuff,
* just after it grabs the req_lock */
D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
- req->epoch = mdev->newest_tle->br_number;
+ req->epoch = mdev->tconn->newest_tle->br_number;
/* increment size of current epoch */
- mdev->newest_tle->n_writes++;
+ mdev->tconn->newest_tle->n_writes++;
/* queue work item to send data */
D_ASSERT(req->rq_state & RQ_NET_PENDING);
drbd_queue_work(&mdev->tconn->data.work, &req->w);
/* close the epoch, in case it outgrew the limit */
- if (mdev->newest_tle->n_writes >= mdev->tconn->net_conf->max_epoch_size)
+ if (mdev->tconn->newest_tle->n_writes >= mdev->tconn->net_conf->max_epoch_size)
queue_barrier(mdev);
break;
* this is bad, because if the connection is lost now,
* we won't be able to clean them up... */
dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n");
- list_move(&req->tl_requests, &mdev->out_of_sequence_requests);
+ list_move(&req->tl_requests, &mdev->tconn->out_of_sequence_requests);
}
if ((req->rq_state & RQ_NET_MASK) != 0) {
req->rq_state |= RQ_NET_DONE;
* spinlock, and grabbing the spinlock.
* if we lost that race, we retry. */
if (rw == WRITE && (remote || send_oos) &&
- mdev->unused_spare_tle == NULL &&
+ mdev->tconn->unused_spare_tle == NULL &&
test_bit(CREATE_BARRIER, &mdev->flags)) {
allocate_barrier:
b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
}
/* GOOD, everything prepared, grab the spin_lock */
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (is_susp(mdev->state)) {
/* If we got suspended, use the retry mechanism of
bio. In the next call to drbd_make_request
we sleep in inc_ap_bio() */
ret = 1;
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
goto fail_free_complete;
}
dev_warn(DEV, "lost connection while grabbing the req_lock!\n");
if (!(local || remote)) {
dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
goto fail_free_complete;
}
}
- if (b && mdev->unused_spare_tle == NULL) {
- mdev->unused_spare_tle = b;
+ if (b && mdev->tconn->unused_spare_tle == NULL) {
+ mdev->tconn->unused_spare_tle = b;
b = NULL;
}
if (rw == WRITE && (remote || send_oos) &&
- mdev->unused_spare_tle == NULL &&
+ mdev->tconn->unused_spare_tle == NULL &&
test_bit(CREATE_BARRIER, &mdev->flags)) {
/* someone closed the current epoch
* while we were grabbing the spinlock */
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
goto allocate_barrier;
}
* barrier packet. To get the write ordering right, we only have to
* make sure that, if this is a write request and it triggered a
* barrier packet, this request is queued within the same spinlock. */
- if ((remote || send_oos) && mdev->unused_spare_tle &&
+ if ((remote || send_oos) && mdev->tconn->unused_spare_tle &&
test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
- _tl_add_barrier(mdev, mdev->unused_spare_tle);
- mdev->unused_spare_tle = NULL;
+ _tl_add_barrier(mdev, mdev->tconn->unused_spare_tle);
+ mdev->tconn->unused_spare_tle = NULL;
} else {
D_ASSERT(!(remote && rw == WRITE &&
test_bit(CREATE_BARRIER, &mdev->flags)));
if (rw == WRITE && _req_conflicts(req))
goto fail_conflicting;
- list_add_tail(&req->tl_requests, &mdev->newest_tle->requests);
+ list_add_tail(&req->tl_requests, &mdev->tconn->newest_tle->requests);
/* NOTE remote first: to get the concurrent write detection right,
* we must register the request before start of local IO. */
}
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
kfree(b); /* if someone else has beaten us to it... */
if (local) {
* pretend that it was successfully served right now.
*/
_drbd_end_io_acct(mdev, req);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (remote)
dec_ap_pending(mdev);
/* THINK: do we want to fail it (-EIO), or pretend success?
if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)
return; /* Recurring timer stopped */
- spin_lock_irq(&mdev->req_lock);
- le = &mdev->oldest_tle->requests;
+ spin_lock_irq(&mdev->tconn->req_lock);
+ le = &mdev->tconn->oldest_tle->requests;
if (list_empty(le)) {
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
mod_timer(&mdev->request_timer, jiffies + et);
return;
}
mod_timer(&mdev->request_timer, req->start_time + et);
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
struct bio_and_error m;
int rv;
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
rv = __req_mod(req, what, &m);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
if (m.bio)
complete_master_bio(mdev, &m);
unsigned long flags = 0;
struct drbd_conf *mdev = e->mdev;
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
mdev->read_cnt += e->i.size >> 9;
list_del(&e->w.list);
if (list_empty(&mdev->read_ee))
wake_up(&mdev->ee_wait);
if (test_bit(__EE_WAS_ERROR, &e->flags))
__drbd_chk_io_error(mdev, false);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
drbd_queue_work(&mdev->tconn->data.work, &e->w);
put_ldev(mdev);
do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
block_id = e->block_id;
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
mdev->writ_cnt += e->i.size >> 9;
list_del(&e->w.list); /* has been on active_ee or sync_ee */
list_add_tail(&e->w.list, &mdev->done_ee);
if (test_bit(__EE_WAS_ERROR, &e->flags))
__drbd_chk_io_error(mdev, false);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
if (block_id == ID_SYNCER)
drbd_rs_complete_io(mdev, e_sector);
req->private_bio = ERR_PTR(error);
/* not req_mod(), we need irqsave here! */
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
__req_mod(req, what, &m);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
if (m.bio)
complete_master_bio(mdev, &m);
* but try to WRITE the P_DATA_REPLY to the failed location,
* to give the disk the chance to relocate that block */
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
_req_mod(req, READ_RETRY_REMOTE_CANCELED);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
return 1;
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
return w_send_read_req(mdev, w, 0);
}
goto defer;
e->w.cb = w_e_send_csum;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_add(&e->w.list, &mdev->read_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
atomic_add(size >> 9, &mdev->rs_sect_ev);
if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
* because bio_add_page failed (probably broken lower level driver),
* retry may or may not help.
* If it does not, you may need to force disconnect. */
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_del(&e->w.list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
drbd_free_ee(mdev, e);
defer:
ping_peer(mdev);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
os = mdev->state;
verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
out_unlock:
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
put_ldev(mdev);
out:
mdev->rs_total = 0;
int i = (e->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
atomic_add(i, &mdev->pp_in_use_by_net);
atomic_sub(i, &mdev->pp_in_use);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_add_tail(&e->w.list, &mdev->net_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
wake_up(&drbd_pp_wait);
} else
drbd_free_ee(mdev, e);
* actually, this race was harmless, since we only try to send the
* barrier packet here, and otherwise do nothing with the object.
* but compare with the head of w_clear_epoch */
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED)
cancel = 1;
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (cancel)
return 1;