if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
+ if (!(req->rq_state & RQ_WRITE) &&
+ mdev->state.disk == D_UP_TO_DATE &&
+ !IS_ERR_OR_NULL(req->private_bio))
+ goto goto_read_retry_local;
+
/* if it is still queued, we may not complete it here.
* it will be canceled soon. */
if (!(req->rq_state & RQ_NET_QUEUED))
req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
req->rq_state |= RQ_NET_DONE;
+
+ if (!(req->rq_state & RQ_WRITE) &&
+ mdev->state.disk == D_UP_TO_DATE &&
+ !IS_ERR_OR_NULL(req->private_bio))
+ goto goto_read_retry_local;
+
_req_may_be_done_not_susp(req, m);
/* else: done by HANDED_OVER_TO_NETWORK */
break;
+ goto_read_retry_local:
+ req->rq_state |= RQ_LOCAL_PENDING;
+ req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
+ generic_make_request(req->private_bio);
+ break;
+
case FAIL_FROZEN_DISK_IO:
if (!(req->rq_state & RQ_LOCAL_COMPLETED))
break;
dec_ap_pending(mdev);
req->rq_state &= ~RQ_NET_PENDING;
req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
+ if (!IS_ERR_OR_NULL(req->private_bio)) {
+ bio_put(req->private_bio);
+ req->private_bio = NULL;
+ put_ldev(mdev);
+ }
_req_may_be_done_not_susp(req, m);
break;
};
return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0;
}
+static bool remote_due_to_read_balancing(struct drbd_conf *mdev)
+{
+ enum drbd_read_balancing rbm;
+ struct backing_dev_info *bdi;
+
+ if (mdev->state.pdsk < D_UP_TO_DATE)
+ return false;
+
+ rcu_read_lock();
+ rbm = rcu_dereference(mdev->ldev->disk_conf)->read_balancing;
+ rcu_read_unlock();
+
+ switch (rbm) {
+ case RB_CONGESTED_REMOTE:
+ bdi = &mdev->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
+ return bdi_read_congested(bdi);
+ case RB_LEAST_PENDING:
+ return atomic_read(&mdev->local_cnt) >
+ atomic_read(&mdev->ap_pending_cnt) + atomic_read(&mdev->rs_pending_cnt);
+ case RB_ROUND_ROBIN:
+ return test_and_change_bit(READ_BALANCE_RR, &mdev->flags);
+ case RB_PREFER_REMOTE:
+ return true;
+ case RB_PREFER_LOCAL:
+ default:
+ return false;
+ }
+}
+
/*
* complete_conflicting_writes - wait for any conflicting write requests
*
bio_put(req->private_bio);
req->private_bio = NULL;
put_ldev(mdev);
+ } else if (remote_due_to_read_balancing(mdev)) {
+ /* Keep the private bio in case we need it
+ for a local retry */
+ local = 0;
}
}
remote = !local && mdev->state.pdsk >= D_UP_TO_DATE;
if (req->rq_state & RQ_IN_ACT_LOG)
drbd_al_complete_io(mdev, &req->i);
fail_and_free_req:
- if (local) {
+ if (!IS_ERR_OR_NULL(req->private_bio)) {
bio_put(req->private_bio);
req->private_bio = NULL;
put_ldev(mdev);
__flg_field_def(18, DRBD_GENLA_F_MANDATORY, disk_drain, DRBD_DISK_DRAIN_DEF)
__flg_field_def(19, DRBD_GENLA_F_MANDATORY, md_flushes, DRBD_MD_FLUSHES_DEF)
__u32_field_def(20, DRBD_GENLA_F_MANDATORY, disk_timeout, DRBD_DISK_TIMEOUT_DEF)
+ __u32_field_def(21, 0 /* OPTIONAL */, read_balancing, DRBD_READ_BALANCING_DEF)
)
GENL_struct(DRBD_NLA_RESOURCE_OPTS, 4, res_opts,