mdev = b->w.mdev;
nob = b->next;
- if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
+ if (test_and_clear_bit(CREATE_BARRIER, &tconn->flags)) {
_tl_add_barrier(tconn, b);
if (nob)
tconn->oldest_tle = nob;
if (b->w.cb == NULL) {
b->w.cb = w_send_barrier;
inc_ap_pending(b->w.mdev);
- set_bit(CREATE_BARRIER, &b->w.mdev->flags);
+ set_bit(CREATE_BARRIER, &tconn->flags);
}
drbd_queue_work(&tconn->data.work, &b->w);
*/
void tl_clear(struct drbd_tconn *tconn)
{
- struct drbd_conf *mdev;
struct list_head *le, *tle;
struct drbd_request *r;
- int vnr;
spin_lock_irq(&tconn->req_lock);
}
/* ensure bit indicating barrier is required is clear */
- rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr)
- clear_bit(CREATE_BARRIER, &mdev->flags);
- rcu_read_unlock();
+ clear_bit(CREATE_BARRIER, &tconn->flags);
spin_unlock_irq(&tconn->req_lock);
}
static void queue_barrier(struct drbd_conf *mdev)
{
struct drbd_tl_epoch *b;
+ struct drbd_tconn *tconn = mdev->tconn;
/* We are within the req_lock. Once we queued the barrier for sending,
* we set the CREATE_BARRIER bit. It is cleared as soon as a new
* barrier/epoch object is added. This is the only place this bit is
* set. It indicates that the barrier for this epoch is already queued,
* and no new epoch has been created yet. */
- if (test_bit(CREATE_BARRIER, &mdev->flags))
+ if (test_bit(CREATE_BARRIER, &tconn->flags))
return;
- b = mdev->tconn->newest_tle;
+ b = tconn->newest_tle;
b->w.cb = w_send_barrier;
b->w.mdev = mdev;
/* inc_ap_pending done here, so we won't
* dec_ap_pending will be done in got_BarrierAck
* or (on connection loss) in tl_clear. */
inc_ap_pending(mdev);
- drbd_queue_work(&mdev->tconn->data.work, &b->w);
- set_bit(CREATE_BARRIER, &mdev->flags);
+ drbd_queue_work(&tconn->data.work, &b->w);
+ set_bit(CREATE_BARRIER, &tconn->flags);
}
static void _about_to_complete_local_write(struct drbd_conf *mdev,
/* see __drbd_make_request,
* just after it grabs the req_lock */
- D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
+ D_ASSERT(test_bit(CREATE_BARRIER, &mdev->tconn->flags) == 0);
req->epoch = mdev->tconn->newest_tle->br_number;
* if we lost that race, we retry. */
if (rw == WRITE && (remote || send_oos) &&
mdev->tconn->unused_spare_tle == NULL &&
- test_bit(CREATE_BARRIER, &mdev->flags)) {
+ test_bit(CREATE_BARRIER, &mdev->tconn->flags)) {
allocate_barrier:
b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
if (!b) {
}
if (rw == WRITE && (remote || send_oos) &&
mdev->tconn->unused_spare_tle == NULL &&
- test_bit(CREATE_BARRIER, &mdev->flags)) {
+ test_bit(CREATE_BARRIER, &mdev->tconn->flags)) {
/* someone closed the current epoch
* while we were grabbing the spinlock */
spin_unlock_irq(&mdev->tconn->req_lock);
* make sure that, if this is a write request and it triggered a
* barrier packet, this request is queued within the same spinlock. */
if ((remote || send_oos) && mdev->tconn->unused_spare_tle &&
- test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
+ test_and_clear_bit(CREATE_BARRIER, &mdev->tconn->flags)) {
_tl_add_barrier(mdev->tconn, mdev->tconn->unused_spare_tle);
mdev->tconn->unused_spare_tle = NULL;
} else {
D_ASSERT(!(remote && rw == WRITE &&
- test_bit(CREATE_BARRIER, &mdev->flags)));
+ test_bit(CREATE_BARRIER, &mdev->tconn->flags)));
}
/* NOTE