dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
- if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
+ if (req->i.size <= DRBD_MAX_SIZE_H80_PACKET) {
p.head.h80.magic = cpu_to_be32(DRBD_MAGIC);
p.head.h80.command = cpu_to_be16(P_DATA);
p.head.h80.length =
- cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
+ cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->i.size);
} else {
p.head.h95.magic = cpu_to_be16(DRBD_MAGIC_BIG);
p.head.h95.command = cpu_to_be16(P_DATA);
p.head.h95.length =
- cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
+ cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->i.size);
}
- p.sector = cpu_to_be64(req->sector);
+ p.sector = cpu_to_be64(req->i.sector);
p.block_id = (unsigned long)req;
p.seq_num = cpu_to_be32(req->seq_num =
atomic_add_return(1, &mdev->packet_seq));
if (memcmp(mdev->int_dig_out, digest, dgs)) {
dev_warn(DEV,
"Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
- (unsigned long long)req->sector, req->size);
+ (unsigned long long)req->i.sector, req->i.size);
}
} /* else if (dgs > 64) {
... Be noisy about digest too large ...
{
struct p_block_desc p;
- p.sector = cpu_to_be64(req->sector);
- p.blksize = cpu_to_be32(req->size);
+ p.sector = cpu_to_be64(req->i.sector);
+ p.blksize = cpu_to_be32(req->i.size);
return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
}
* Other places where we set out-of-sync:
* READ with local io-error */
if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
- drbd_set_out_of_sync(mdev, req->sector, req->size);
+ drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
- drbd_set_in_sync(mdev, req->sector, req->size);
+ drbd_set_in_sync(mdev, req->i.sector, req->i.size);
/* one might be tempted to move the drbd_al_complete_io
* to the local io completion callback drbd_endio_pri.
if (s & RQ_LOCAL_MASK) {
if (get_ldev_if_state(mdev, D_FAILED)) {
if (s & RQ_IN_ACT_LOG)
- drbd_al_complete_io(mdev, req->sector);
+ drbd_al_complete_io(mdev, req->i.sector);
put_ldev(mdev);
} else if (__ratelimit(&drbd_ratelimit_state)) {
dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu), "
"but my Disk seems to have failed :(\n",
- (unsigned long long) req->sector);
+ (unsigned long long) req->i.sector);
}
}
}
* if we have the ee_hash (two_primaries) and
* this has been on the network */
if ((s & RQ_NET_DONE) && mdev->ee_hash != NULL) {
- const sector_t sector = req->sector;
- const int size = req->size;
+ const sector_t sector = req->i.sector;
+ const int size = req->i.size;
/* ASSERT:
* there must be no conflicting requests, since
* they must have been failed on the spot */
-#define OVERLAPS overlaps(sector, size, i->sector, i->size)
+#define OVERLAPS overlaps(sector, size, i->i.sector, i->i.size)
slot = tl_hash_slot(mdev, sector);
hlist_for_each_entry(i, n, slot, collision) {
if (OVERLAPS) {
dev_alert(DEV, "LOGIC BUG: completed: %p %llus +%u; "
"other: %p %llus +%u\n",
req, (unsigned long long)sector, size,
- i, (unsigned long long)i->sector, i->size);
+ i, (unsigned long long)i->i.sector, i->i.size);
}
}
* we just have to do a wake_up. */
#undef OVERLAPS
#define OVERLAPS overlaps(sector, size, e->sector, e->size)
- slot = ee_hash_slot(mdev, req->sector);
+ slot = ee_hash_slot(mdev, req->i.sector);
hlist_for_each_entry(e, n, slot, collision) {
if (OVERLAPS) {
wake_up(&mdev->misc_wait);
static int _req_conflicts(struct drbd_request *req)
{
struct drbd_conf *mdev = req->mdev;
- const sector_t sector = req->sector;
- const int size = req->size;
+ const sector_t sector = req->i.sector;
+ const int size = req->i.size;
struct drbd_request *i;
struct drbd_epoch_entry *e;
struct hlist_node *n;
goto out_no_conflict;
BUG_ON(mdev->tl_hash == NULL);
-#define OVERLAPS overlaps(i->sector, i->size, sector, size)
+#define OVERLAPS overlaps(i->i.sector, i->i.size, sector, size)
slot = tl_hash_slot(mdev, sector);
hlist_for_each_entry(i, n, slot, collision) {
if (OVERLAPS) {
"pending: %llus +%u\n",
current->comm, current->pid,
(unsigned long long)sector, size,
- (unsigned long long)i->sector, i->size);
+ (unsigned long long)i->i.sector, i->i.size);
goto out_conflict;
}
}
case completed_ok:
if (bio_data_dir(req->master_bio) == WRITE)
- mdev->writ_cnt += req->size>>9;
+ mdev->writ_cnt += req->i.size >> 9;
else
- mdev->read_cnt += req->size>>9;
+ mdev->read_cnt += req->i.size >> 9;
req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
req->rq_state &= ~RQ_LOCAL_PENDING;
break;
case read_completed_with_error:
- drbd_set_out_of_sync(mdev, req->sector, req->size);
+ drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
/* so we can verify the handle in the answer packet
* corresponding hlist_del is in _req_may_be_done() */
- hlist_add_head(&req->collision, ar_hash_slot(mdev, req->sector));
+ hlist_add_head(&req->collision, ar_hash_slot(mdev, req->i.sector));
set_bit(UNPLUG_REMOTE, &mdev->flags);
/* assert something? */
/* from drbd_make_request_common only */
- hlist_add_head(&req->collision, tl_hash_slot(mdev, req->sector));
+ hlist_add_head(&req->collision, tl_hash_slot(mdev, req->i.sector));
/* corresponding hlist_del is in _req_may_be_done() */
/* NOTE
case handed_over_to_network:
/* assert something? */
if (bio_data_dir(req->master_bio) == WRITE)
- atomic_add(req->size>>9, &mdev->ap_in_flight);
+ atomic_add(req->i.size >> 9, &mdev->ap_in_flight);
if (bio_data_dir(req->master_bio) == WRITE &&
mdev->net_conf->wire_protocol == DRBD_PROT_A) {
req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
req->rq_state |= RQ_NET_DONE;
if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
+ atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
/* if it is still queued, we may not complete it here.
* it will be canceled soon. */
if (what == conflict_discarded_by_peer)
dev_alert(DEV, "Got DiscardAck packet %llus +%u!"
" DRBD is not a random data generator!\n",
- (unsigned long long)req->sector, req->size);
+ (unsigned long long)req->i.sector, req->i.size);
req->rq_state |= RQ_NET_DONE;
/* fall through */
case write_acked_by_peer:
req->rq_state |= RQ_NET_OK;
D_ASSERT(req->rq_state & RQ_NET_PENDING);
dec_ap_pending(mdev);
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
+ atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
req->rq_state &= ~RQ_NET_PENDING;
_req_may_be_done_not_susp(req, m);
break;
/* assert something? */
if (req->rq_state & RQ_NET_PENDING) {
dec_ap_pending(mdev);
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
+ atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
}
req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
if ((req->rq_state & RQ_NET_MASK) != 0) {
req->rq_state |= RQ_NET_DONE;
if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
+ atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
}
_req_may_be_done(req, m); /* Allowed while state.susp */
break;