enum drbd_packet cmd, struct p_header *h, size_t size,
unsigned msg_flags)
{
- return !_conn_send_cmd(mdev->tconn, mdev->vnr, sock, cmd, h, size, msg_flags);
+ return _conn_send_cmd(mdev->tconn, mdev->vnr, sock, cmd, h, size, msg_flags);
}
static inline int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
if (apv >= 89)
strcpy(p->csums_alg, mdev->tconn->net_conf->csums_alg);
- rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
+ rv = !_drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
} else
rv = 0; /* not ok */
p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
sock = mdev->tconn->data.socket;
- if (likely(sock != NULL)) {
- ok = _drbd_send_cmd(mdev, sock, P_STATE, &p.head, sizeof(p), 0);
- }
+ if (likely(sock != NULL))
+ ok = !_drbd_send_cmd(mdev, sock, P_STATE, &p.head, sizeof(p), 0);
mutex_unlock(&mdev->tconn->data.mutex);
if (len) {
DCBP_set_code(p, RLE_VLI_Bits);
- ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_COMPRESSED_BITMAP, h,
- sizeof(*p) + len, 0);
+ ok = !_drbd_send_cmd(mdev, mdev->tconn->data.socket, P_COMPRESSED_BITMAP, h,
+ sizeof(*p) + len, 0);
c->packets[0]++;
c->bytes[0] += sizeof(*p) + len;
len = num_words * sizeof(long);
if (len)
drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
- ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_BITMAP,
- h, sizeof(struct p_header80) + len, 0);
+ ok = !_drbd_send_cmd(mdev, mdev->tconn->data.socket, P_BITMAP,
+ h, sizeof(struct p_header80) + len, 0);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
/* inc_ap_pending was done where this was queued.
* dec_ap_pending will be done in got_BarrierAck
* or (on connection loss) in w_clear_epoch. */
- ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_BARRIER,
- &p->head, sizeof(*p), 0);
+ ok = !_drbd_send_cmd(mdev, mdev->tconn->data.socket, P_BARRIER,
+ &p->head, sizeof(*p), 0);
drbd_put_data_sock(mdev->tconn);
return ok;