}
if (get_net_conf(mdev)) {
- if (!mdev->net_conf->two_primaries &&
+ if (!mdev->tconn->net_conf->two_primaries &&
ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
rv = SS_TWO_PRIMARIES;
put_net_conf(mdev);
size = sizeof(struct p_protocol);
if (mdev->agreed_pro_version >= 87)
- size += strlen(mdev->net_conf->integrity_alg) + 1;
+ size += strlen(mdev->tconn->net_conf->integrity_alg) + 1;
/* we must not recurse into our own queue,
* as that is blocked during handshake */
if (p == NULL)
return 0;
- p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol);
- p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
- p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
- p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
- p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
+ p->protocol = cpu_to_be32(mdev->tconn->net_conf->wire_protocol);
+ p->after_sb_0p = cpu_to_be32(mdev->tconn->net_conf->after_sb_0p);
+ p->after_sb_1p = cpu_to_be32(mdev->tconn->net_conf->after_sb_1p);
+ p->after_sb_2p = cpu_to_be32(mdev->tconn->net_conf->after_sb_2p);
+ p->two_primaries = cpu_to_be32(mdev->tconn->net_conf->two_primaries);
cf = 0;
- if (mdev->net_conf->want_lose)
+ if (mdev->tconn->net_conf->want_lose)
cf |= CF_WANT_LOSE;
- if (mdev->net_conf->dry_run) {
+ if (mdev->tconn->net_conf->dry_run) {
if (mdev->agreed_pro_version >= 92)
cf |= CF_DRY_RUN;
else {
p->conn_flags = cpu_to_be32(cf);
if (mdev->agreed_pro_version >= 87)
- strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
+ strcpy(p->integrity_alg, mdev->tconn->net_conf->integrity_alg);
rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
(struct p_header80 *)p, size);
mdev->comm_bm_set = drbd_bm_total_weight(mdev);
p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
- uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
+ uuid_flags |= mdev->tconn->net_conf->want_lose ? 1 : 0;
uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
* out ok after sending on this side, but does not fit on the
* receiving side, we sure have detected corruption elsewhere.
*/
- if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
+ if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A || dgs)
ok = _drbd_send_bio(mdev, req->master_bio);
else
ok = _drbd_send_zc_bio(mdev, req->master_bio);
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
if (sock == mdev->data.socket) {
- mdev->ko_count = mdev->net_conf->ko_count;
+ mdev->ko_count = mdev->tconn->net_conf->ko_count;
drbd_update_congested(mdev);
}
do {
mdev->rs_mark_left[i] = 0;
mdev->rs_mark_time[i] = 0;
}
- D_ASSERT(mdev->net_conf == NULL);
+ D_ASSERT(mdev->tconn->net_conf == NULL);
drbd_set_my_capacity(mdev, 0);
if (mdev->bitmap) {
snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
if (get_net_conf(mdev)) {
- switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) {
+ switch (((struct sockaddr *)mdev->tconn->net_conf->peer_addr)->sa_family) {
case AF_INET6:
afs = "ipv6";
snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
- &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr);
+ &((struct sockaddr_in6 *)mdev->tconn->net_conf->peer_addr)->sin6_addr);
break;
case AF_INET:
afs = "ipv4";
snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
- &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
+ &((struct sockaddr_in *)mdev->tconn->net_conf->peer_addr)->sin_addr);
break;
default:
afs = "ssocks";
snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
- &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
+ &((struct sockaddr_in *)mdev->tconn->net_conf->peer_addr)->sin_addr);
}
snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
envp[3]=af;
if (rv == SS_TWO_PRIMARIES) {
/* Maybe the peer is detected as dead very soon...
retry at most once more in this case. */
- schedule_timeout_interruptible((mdev->net_conf->ping_timeo+1)*HZ/10);
+ schedule_timeout_interruptible((mdev->tconn->net_conf->ping_timeo+1)*HZ/10);
if (try < max_tries)
try = max_tries - 1;
continue;
}
} else {
if (get_net_conf(mdev)) {
- mdev->net_conf->want_lose = 0;
+ mdev->tconn->net_conf->want_lose = 0;
put_net_conf(mdev);
}
set_disk_ro(mdev->vdisk, false);
}
if (get_net_conf(mdev)) {
- int prot = mdev->net_conf->wire_protocol;
+ int prot = mdev->tconn->net_conf->wire_protocol;
put_net_conf(mdev);
if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
retcode = ERR_STONITH_AND_PROT_A;
if (!odev || odev == mdev)
continue;
if (get_net_conf(odev)) {
- taken_addr = (struct sockaddr *)&odev->net_conf->my_addr;
- if (new_conf->my_addr_len == odev->net_conf->my_addr_len &&
+ taken_addr = (struct sockaddr *)&odev->tconn->net_conf->my_addr;
+ if (new_conf->my_addr_len == odev->tconn->net_conf->my_addr_len &&
!memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
retcode = ERR_LOCAL_ADDR;
- taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr;
- if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len &&
+ taken_addr = (struct sockaddr *)&odev->tconn->net_conf->peer_addr;
+ if (new_conf->peer_addr_len == odev->tconn->net_conf->peer_addr_len &&
!memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
retcode = ERR_PEER_ADDR;
drbd_flush_workqueue(mdev);
spin_lock_irq(&mdev->req_lock);
- if (mdev->net_conf != NULL) {
+ if (mdev->tconn->net_conf != NULL) {
retcode = ERR_NET_CONFIGURED;
spin_unlock_irq(&mdev->req_lock);
goto fail;
}
- mdev->net_conf = new_conf;
+ mdev->tconn->net_conf = new_conf;
mdev->send_cnt = 0;
mdev->recv_cnt = 0;
}
if (get_net_conf(mdev)) {
- tl = net_conf_to_tags(mdev, mdev->net_conf, tl);
+ tl = net_conf_to_tags(mdev, mdev->tconn->net_conf, tl);
put_net_conf(mdev);
}
tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl);
/* Yes, we may run up to @number over max_buffers. If we
* follow it strictly, the admin will get it wrong anyways. */
- if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
+ if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers)
page = drbd_pp_first_pages_or_try_alloc(mdev, number);
while (page == NULL) {
drbd_kick_lo_and_reclaim_net(mdev);
- if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
+ if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers) {
page = drbd_pp_first_pages_or_try_alloc(mdev, number);
if (page)
break;
return NULL;
what = "sock_create_kern";
- err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
+ err = sock_create_kern(((struct sockaddr *)mdev->tconn->net_conf->my_addr)->sa_family,
SOCK_STREAM, IPPROTO_TCP, &sock);
if (err < 0) {
sock = NULL;
}
sock->sk->sk_rcvtimeo =
- sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
- drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
- mdev->net_conf->rcvbuf_size);
+ sock->sk->sk_sndtimeo = mdev->tconn->net_conf->try_connect_int*HZ;
+ drbd_setbufsize(sock, mdev->tconn->net_conf->sndbuf_size,
+ mdev->tconn->net_conf->rcvbuf_size);
/* explicitly bind to the configured IP as source IP
* for the outgoing connections.
* Make sure to use 0 as port number, so linux selects
* a free one dynamically.
*/
- memcpy(&src_in6, mdev->net_conf->my_addr,
- min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
- if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
+ memcpy(&src_in6, mdev->tconn->net_conf->my_addr,
+ min_t(int, mdev->tconn->net_conf->my_addr_len, sizeof(src_in6)));
+ if (((struct sockaddr *)mdev->tconn->net_conf->my_addr)->sa_family == AF_INET6)
src_in6.sin6_port = 0;
else
((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
what = "bind before connect";
err = sock->ops->bind(sock,
(struct sockaddr *) &src_in6,
- mdev->net_conf->my_addr_len);
+ mdev->tconn->net_conf->my_addr_len);
if (err < 0)
goto out;
disconnect_on_error = 0;
what = "connect";
err = sock->ops->connect(sock,
- (struct sockaddr *)mdev->net_conf->peer_addr,
- mdev->net_conf->peer_addr_len, 0);
+ (struct sockaddr *)mdev->tconn->net_conf->peer_addr,
+ mdev->tconn->net_conf->peer_addr_len, 0);
out:
if (err < 0) {
return NULL;
what = "sock_create_kern";
- err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
+ err = sock_create_kern(((struct sockaddr *)mdev->tconn->net_conf->my_addr)->sa_family,
SOCK_STREAM, IPPROTO_TCP, &s_listen);
if (err) {
s_listen = NULL;
goto out;
}
- timeo = mdev->net_conf->try_connect_int * HZ;
+ timeo = mdev->tconn->net_conf->try_connect_int * HZ;
timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
s_listen->sk->sk_rcvtimeo = timeo;
s_listen->sk->sk_sndtimeo = timeo;
- drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
- mdev->net_conf->rcvbuf_size);
+ drbd_setbufsize(s_listen, mdev->tconn->net_conf->sndbuf_size,
+ mdev->tconn->net_conf->rcvbuf_size);
what = "bind before listen";
err = s_listen->ops->bind(s_listen,
- (struct sockaddr *) mdev->net_conf->my_addr,
- mdev->net_conf->my_addr_len);
+ (struct sockaddr *) mdev->tconn->net_conf->my_addr,
+ mdev->tconn->net_conf->my_addr_len);
if (err < 0)
goto out;
}
if (sock && msock) {
- schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
+ schedule_timeout_interruptible(mdev->tconn->net_conf->ping_timeo*HZ/10);
ok = drbd_socket_okay(mdev, &sock);
ok = drbd_socket_okay(mdev, &msock) && ok;
if (ok)
msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
/* NOT YET ...
- * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
+ * sock->sk->sk_sndtimeo = mdev->tconn->net_conf->timeout*HZ/10;
* sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
* first set it to the P_HAND_SHAKE timeout,
* which we set to 4x the configured ping_timeout. */
sock->sk->sk_sndtimeo =
- sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
+ sock->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_timeo*4*HZ/10;
- msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
- msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
+ msock->sk->sk_sndtimeo = mdev->tconn->net_conf->timeout*HZ/10;
+ msock->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_int*HZ;
/* we don't want delays.
* we use TCP_CORK where appropriate, though */
if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
return 0;
- sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
+ sock->sk->sk_sndtimeo = mdev->tconn->net_conf->timeout*HZ/10;
sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
atomic_set(&mdev->packet_seq, 0);
sector_t sector = e->i.sector;
int ok = 1, pcmd;
- if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
+ if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C) {
if (likely((e->flags & EE_WAS_ERROR) == 0)) {
pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
mdev->state.conn <= C_PAUSED_SYNC_T &&
}
/* we delete from the conflict detection hash _after_ we sent out the
* P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
- if (mdev->net_conf->two_primaries) {
+ if (mdev->tconn->net_conf->two_primaries) {
spin_lock_irq(&mdev->req_lock);
D_ASSERT(!drbd_interval_empty(&e->i));
drbd_remove_interval(&mdev->epoch_entries, &e->i);
struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
int ok = 1;
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
+ D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
spin_lock_irq(&mdev->req_lock);
spin_unlock(&mdev->epoch_lock);
/* I'm the receiver, I do hold a net_cnt reference. */
- if (!mdev->net_conf->two_primaries) {
+ if (!mdev->tconn->net_conf->two_primaries) {
spin_lock_irq(&mdev->req_lock);
} else {
/* don't get the req_lock yet,
DEFINE_WAIT(wait);
int first;
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
+ D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
/* conflict detection and handling:
* 1. wait on the sequence number,
list_add(&e->w.list, &mdev->active_ee);
spin_unlock_irq(&mdev->req_lock);
- switch (mdev->net_conf->wire_protocol) {
+ switch (mdev->tconn->net_conf->wire_protocol) {
case DRBD_PROT_C:
inc_unacked(mdev);
/* corresponding dec_unacked() in e_end_block()
ch_peer = mdev->p_uuid[UI_SIZE];
ch_self = mdev->comm_bm_set;
- switch (mdev->net_conf->after_sb_0p) {
+ switch (mdev->tconn->net_conf->after_sb_0p) {
case ASB_CONSENSUS:
case ASB_DISCARD_SECONDARY:
case ASB_CALL_HELPER:
if (ch_peer == 0) { rv = 1; break; }
if (ch_self == 0) { rv = -1; break; }
}
- if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
+ if (mdev->tconn->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
break;
case ASB_DISCARD_LEAST_CHG:
if (ch_self < ch_peer)
{
int hg, rv = -100;
- switch (mdev->net_conf->after_sb_1p) {
+ switch (mdev->tconn->net_conf->after_sb_1p) {
case ASB_DISCARD_YOUNGER_PRI:
case ASB_DISCARD_OLDER_PRI:
case ASB_DISCARD_LEAST_CHG:
{
int hg, rv = -100;
- switch (mdev->net_conf->after_sb_2p) {
+ switch (mdev->tconn->net_conf->after_sb_2p) {
case ASB_DISCARD_YOUNGER_PRI:
case ASB_DISCARD_OLDER_PRI:
case ASB_DISCARD_LEAST_CHG:
if (abs(hg) == 100)
drbd_khelper(mdev, "initial-split-brain");
- if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
+ if (hg == 100 || (hg == -100 && mdev->tconn->net_conf->always_asbp)) {
int pcount = (mdev->state.role == R_PRIMARY)
+ (peer_role == R_PRIMARY);
int forced = (hg == -100);
}
if (hg == -100) {
- if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
+ if (mdev->tconn->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
hg = -1;
- if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
+ if (!mdev->tconn->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
hg = 1;
if (abs(hg) < 100)
if (hg < 0 && /* by intention we do not use mydisk here. */
mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
- switch (mdev->net_conf->rr_conflict) {
+ switch (mdev->tconn->net_conf->rr_conflict) {
case ASB_CALL_HELPER:
drbd_khelper(mdev, "pri-lost");
/* fall through */
}
}
- if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
+ if (mdev->tconn->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
if (hg == 0)
dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
else
if (cf & CF_DRY_RUN)
set_bit(CONN_DRY_RUN, &mdev->flags);
- if (p_proto != mdev->net_conf->wire_protocol) {
+ if (p_proto != mdev->tconn->net_conf->wire_protocol) {
dev_err(DEV, "incompatible communication protocols\n");
goto disconnect;
}
- if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
+ if (cmp_after_sb(p_after_sb_0p, mdev->tconn->net_conf->after_sb_0p)) {
dev_err(DEV, "incompatible after-sb-0pri settings\n");
goto disconnect;
}
- if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
+ if (cmp_after_sb(p_after_sb_1p, mdev->tconn->net_conf->after_sb_1p)) {
dev_err(DEV, "incompatible after-sb-1pri settings\n");
goto disconnect;
}
- if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
+ if (cmp_after_sb(p_after_sb_2p, mdev->tconn->net_conf->after_sb_2p)) {
dev_err(DEV, "incompatible after-sb-2pri settings\n");
goto disconnect;
}
- if (p_want_lose && mdev->net_conf->want_lose) {
+ if (p_want_lose && mdev->tconn->net_conf->want_lose) {
dev_err(DEV, "both sides have the 'want_lose' flag set\n");
goto disconnect;
}
- if (p_two_primaries != mdev->net_conf->two_primaries) {
+ if (p_two_primaries != mdev->tconn->net_conf->two_primaries) {
dev_err(DEV, "incompatible setting of the two-primaries options\n");
goto disconnect;
}
if (mdev->agreed_pro_version >= 87) {
- unsigned char *my_alg = mdev->net_conf->integrity_alg;
+ unsigned char *my_alg = mdev->tconn->net_conf->integrity_alg;
if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
return false;
}
}
- mdev->net_conf->want_lose = 0;
+ mdev->tconn->net_conf->want_lose = 0;
drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
crypto_free_hash(mdev->cram_hmac_tfm);
mdev->cram_hmac_tfm = NULL;
- kfree(mdev->net_conf);
- mdev->net_conf = NULL;
+ kfree(mdev->tconn->net_conf);
+ mdev->tconn->net_conf = NULL;
drbd_request_state(mdev, NS(conn, C_STANDALONE));
}
char *response = NULL;
char *right_response = NULL;
char *peers_ch = NULL;
- unsigned int key_len = strlen(mdev->net_conf->shared_secret);
+ unsigned int key_len = strlen(mdev->tconn->net_conf->shared_secret);
unsigned int resp_size;
struct hash_desc desc;
enum drbd_packets cmd;
desc.flags = 0;
rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
- (u8 *)mdev->net_conf->shared_secret, key_len);
+ (u8 *)mdev->tconn->net_conf->shared_secret, key_len);
if (rv) {
dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
rv = -1;
if (rv)
dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
- resp_size, mdev->net_conf->cram_hmac_alg);
+ resp_size, mdev->tconn->net_conf->cram_hmac_alg);
else
rv = -1;
static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
{
/* restore idle timeout */
- mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
+ mdev->meta.socket->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_int*HZ;
if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
wake_up(&mdev->misc_wait);
}
switch (be16_to_cpu(h->command)) {
case P_RS_WRITE_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
+ D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
what = WRITE_ACKED_BY_PEER_AND_SIS;
break;
case P_WRITE_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
+ D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
what = WRITE_ACKED_BY_PEER;
break;
case P_RECV_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
+ D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B);
what = RECV_ACKED_BY_PEER;
break;
case P_DISCARD_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
+ D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
what = CONFLICT_DISCARDED_BY_PEER;
break;
default:
struct p_block_ack *p = (struct p_block_ack *)h;
sector_t sector = be64_to_cpu(p->sector);
int size = be32_to_cpu(p->blksize);
- bool missing_ok = mdev->net_conf->wire_protocol == DRBD_PROT_A ||
- mdev->net_conf->wire_protocol == DRBD_PROT_B;
+ bool missing_ok = mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A ||
+ mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B;
bool found;
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
goto reconnect;
}
mdev->meta.socket->sk->sk_rcvtimeo =
- mdev->net_conf->ping_timeo*HZ/10;
+ mdev->tconn->net_conf->ping_timeo*HZ/10;
ping_timeout_active = 1;
}
/* conditionally cork;
* it may hurt latency if we cork without much to send */
- if (!mdev->net_conf->no_cork &&
+ if (!mdev->tconn->net_conf->no_cork &&
3 < atomic_read(&mdev->unacked_cnt))
drbd_tcp_cork(mdev->meta.socket);
while (1) {
break;
}
/* but unconditionally uncork unless disabled */
- if (!mdev->net_conf->no_cork)
+ if (!mdev->tconn->net_conf->no_cork)
drbd_tcp_uncork(mdev->meta.socket);
/* short circuit, recv_msg would return EINTR anyways. */