u32 magic;
u16 command;
u16 length; /* bytes of data after this header */
- u8 payload[0];
} __packed;
/* Header for big packets, Used for data packets exceeding 64kB */
u8 payload[0];
} __packed;
-union p_header {
- struct p_header80 h80;
- struct p_header95 h95;
+struct p_header {
+ union {
+ struct p_header80 h80;
+ struct p_header95 h95;
+ };
+ u8 payload[0];
};
/*
#define DP_DISCARD 64 /* equals REQ_DISCARD */
struct p_data {
- union p_header head;
+ struct p_header head;
u64 sector; /* 64 bits sector number */
u64 block_id; /* to identify the request in protocol B&C */
u32 seq_num;
* P_DATA_REQUEST, P_RS_DATA_REQUEST
*/
struct p_block_ack {
- struct p_header80 head;
+ struct p_header head;
u64 sector;
u64 block_id;
u32 blksize;
struct p_block_req {
- struct p_header80 head;
+ struct p_header head;
u64 sector;
u64 block_id;
u32 blksize;
*/
struct p_handshake {
- struct p_header80 head; /* 8 bytes */
+ struct p_header head; /* Note: You must always use a h80 here */
u32 protocol_min;
u32 feature_flags;
u32 protocol_max;
/* 80 bytes, FIXED for the next century */
struct p_barrier {
- struct p_header80 head;
+ struct p_header head;
u32 barrier; /* barrier number _handle_ only */
u32 pad; /* to multiple of 8 Byte */
} __packed;
struct p_barrier_ack {
- struct p_header80 head;
+ struct p_header head;
u32 barrier;
u32 set_size;
} __packed;
struct p_rs_param {
- struct p_header80 head;
+ struct p_header head;
u32 rate;
/* Since protocol version 88 and higher. */
} __packed;
struct p_rs_param_89 {
- struct p_header80 head;
+ struct p_header head;
u32 rate;
/* protocol version 89: */
char verify_alg[SHARED_SECRET_MAX];
} __packed;
struct p_rs_param_95 {
- struct p_header80 head;
+ struct p_header head;
u32 rate;
char verify_alg[SHARED_SECRET_MAX];
char csums_alg[SHARED_SECRET_MAX];
};
struct p_protocol {
- struct p_header80 head;
+ struct p_header head;
u32 protocol;
u32 after_sb_0p;
u32 after_sb_1p;
} __packed;
struct p_uuids {
- struct p_header80 head;
+ struct p_header head;
u64 uuid[UI_EXTENDED_SIZE];
} __packed;
struct p_rs_uuid {
- struct p_header80 head;
+ struct p_header head;
u64 uuid;
} __packed;
struct p_sizes {
- struct p_header80 head;
+ struct p_header head;
u64 d_size; /* size of disk */
u64 u_size; /* user requested size */
u64 c_size; /* current exported size */
} __packed;
struct p_state {
- struct p_header80 head;
+ struct p_header head;
u32 state;
} __packed;
struct p_req_state {
- struct p_header80 head;
+ struct p_header head;
u32 mask;
u32 val;
} __packed;
struct p_req_state_reply {
- struct p_header80 head;
+ struct p_header head;
u32 retcode;
} __packed;
} __packed;
struct p_discard {
- struct p_header80 head;
+ struct p_header head;
u64 block_id;
u32 seq_num;
u32 pad;
} __packed;
struct p_block_desc {
- struct p_header80 head;
+ struct p_header head;
u64 sector;
u32 blksize;
u32 pad; /* to multiple of 8 Byte */
};
struct p_compressed_bm {
- struct p_header80 head;
+ struct p_header head;
/* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code
* (encoding & 0x80): polarity (set/unset) of first runlength
* ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits
} __packed;
struct p_delay_probe93 {
- struct p_header80 head;
+ struct p_header head;
u32 seq_num; /* sequence number to match the two probe packets */
u32 offset; /* usecs the probe got sent after the reference time point */
} __packed;
* so we need to use the fixed size 4KiB page size
* most architectures have used for a long time.
*/
-#define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header80))
+#define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header))
#define BM_PACKET_WORDS (BM_PACKET_PAYLOAD_BYTES/sizeof(long))
#define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct p_compressed_bm))
#if (PAGE_SIZE < 4096)
#endif
union p_polymorph {
- union p_header header;
+ struct p_header header;
struct p_handshake handshake;
struct p_data data;
struct p_block_ack block_ack;
extern int _drbd_send_state(struct drbd_conf *mdev);
extern int drbd_send_state(struct drbd_conf *mdev);
extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
- enum drbd_packets cmd, struct p_header80 *h,
+ enum drbd_packets cmd, struct p_header *h,
size_t size, unsigned msg_flags);
#define USE_DATA_SOCKET 1
#define USE_META_SOCKET 0
extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
- enum drbd_packets cmd, struct p_header80 *h,
+ enum drbd_packets cmd, struct p_header *h,
size_t size);
extern int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd,
char *data, size_t size);
static inline int drbd_send_short_cmd(struct drbd_conf *mdev,
enum drbd_packets cmd)
{
- struct p_header80 h;
+ struct p_header h;
return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h));
}
static inline int drbd_send_ping(struct drbd_conf *mdev)
{
- struct p_header80 h;
+ struct p_header h;
return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING, &h, sizeof(h));
}
static inline int drbd_send_ping_ack(struct drbd_conf *mdev)
{
- struct p_header80 h;
+ struct p_header h;
return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING_ACK, &h, sizeof(h));
}
/* the appropriate socket mutex must be held already */
int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
- enum drbd_packets cmd, struct p_header80 *h,
+ enum drbd_packets cmd, struct p_header *hg,
size_t size, unsigned msg_flags)
{
+ struct p_header80 *h = (struct p_header80 *)hg;
int sent, ok;
if (!expect(h))
* when we hold the appropriate socket mutex.
*/
int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
- enum drbd_packets cmd, struct p_header80 *h, size_t size)
+ enum drbd_packets cmd, struct p_header *h, size_t size)
{
int ok = 0;
struct socket *sock;
if (mdev->tconn->agreed_pro_version >= 87)
strcpy(p->integrity_alg, mdev->tconn->net_conf->integrity_alg);
- rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
- (struct p_header80 *)p, size);
+ rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL, &p->head, size);
kfree(p);
return rv;
}
put_ldev(mdev);
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
- (struct p_header80 *)&p, sizeof(p));
+ return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS, &p.head, sizeof(p));
}
int drbd_send_uuids(struct drbd_conf *mdev)
drbd_md_sync(mdev);
p.uuid = cpu_to_be64(uuid);
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
- (struct p_header80 *)&p, sizeof(p));
+ return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID, &p.head, sizeof(p));
}
int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
p.queue_order_type = cpu_to_be16(q_order_type);
p.dds_flags = cpu_to_be16(flags);
- ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
- (struct p_header80 *)&p, sizeof(p));
+ ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES, &p.head, sizeof(p));
return ok;
}
sock = mdev->tconn->data.socket;
if (likely(sock != NULL)) {
- ok = _drbd_send_cmd(mdev, sock, P_STATE,
- (struct p_header80 *)&p, sizeof(p), 0);
+ ok = _drbd_send_cmd(mdev, sock, P_STATE, &p.head, sizeof(p), 0);
}
mutex_unlock(&mdev->tconn->data.mutex);
p.mask = cpu_to_be32(mask.i);
p.val = cpu_to_be32(val.i);
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
- (struct p_header80 *)&p, sizeof(p));
+ return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ, &p.head, sizeof(p));
}
int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
p.retcode = cpu_to_be32(retcode);
- return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
- (struct p_header80 *)&p, sizeof(p));
+ return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY, &p.head, sizeof(p));
}
int fill_bitmap_rle_bits(struct drbd_conf *mdev,
*/
static int
send_bitmap_rle_or_plain(struct drbd_conf *mdev,
- struct p_header80 *h, struct bm_xfer_ctx *c)
+ struct p_header *h, struct bm_xfer_ctx *c)
{
struct p_compressed_bm *p = (void*)h;
unsigned long num_words;
int _drbd_send_bitmap(struct drbd_conf *mdev)
{
struct bm_xfer_ctx c;
- struct p_header80 *p;
+ struct p_header *p;
int err;
if (!expect(mdev->bitmap))
/* maybe we should use some per thread scratch page,
* and allocate that during initial device creation? */
- p = (struct p_header80 *) __get_free_page(GFP_NOIO);
+ p = (struct p_header *) __get_free_page(GFP_NOIO);
if (!p) {
dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
return false;
if (mdev->state.conn < C_CONNECTED)
return false;
- ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
- (struct p_header80 *)&p, sizeof(p));
+ ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK, &p.head, sizeof(p));
return ok;
}
if (!mdev->tconn->meta.socket || mdev->state.conn < C_CONNECTED)
return false;
- ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
- (struct p_header80 *)&p, sizeof(p));
+ ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd, &p.head, sizeof(p));
return ok;
}
p.block_id = block_id;
p.blksize = cpu_to_be32(size);
- ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
- (struct p_header80 *)&p, sizeof(p));
+ ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &p.head, sizeof(p));
return ok;
}
p.block_id = ID_SYNCER /* unused */;
p.blksize = cpu_to_be32(size);
- p.head.magic = cpu_to_be32(DRBD_MAGIC);
- p.head.command = cpu_to_be16(cmd);
- p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
+ p.head.h80.magic = cpu_to_be32(DRBD_MAGIC);
+ p.head.h80.command = cpu_to_be16(cmd);
+ p.head.h80.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
mutex_lock(&mdev->tconn->data.mutex);
p.block_id = ID_SYNCER /* unused */;
p.blksize = cpu_to_be32(size);
- ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
- (struct p_header80 *)&p, sizeof(p));
+ ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST, &p.head, sizeof(p));
return ok;
}
p.head.h80.magic = cpu_to_be32(DRBD_MAGIC);
p.head.h80.command = cpu_to_be16(P_DATA);
p.head.h80.length =
- cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->i.size);
+ cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + req->i.size);
} else {
p.head.h95.magic = cpu_to_be16(DRBD_MAGIC_BIG);
p.head.h95.command = cpu_to_be16(P_DATA);
p.head.h95.length =
- cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->i.size);
+ cpu_to_be32(sizeof(p) - sizeof(struct p_header) + dgs + req->i.size);
}
p.sector = cpu_to_be64(req->i.sector);
static int drbd_send_fp(struct drbd_conf *mdev,
struct socket *sock, enum drbd_packets cmd)
{
- struct p_header80 *h = &mdev->tconn->data.sbuf.header.h80;
+ struct p_header *h = &mdev->tconn->data.sbuf.header;
return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
}
static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
{
- union p_header *h = &mdev->tconn->data.rbuf.header;
+ struct p_header *h = &mdev->tconn->data.rbuf.header;
int r;
r = drbd_recv(mdev, h, sizeof(*h));
const char *direction, struct bm_xfer_ctx *c)
{
/* what would it take to transfer it "plaintext" */
- unsigned plain = sizeof(struct p_header80) *
+ unsigned plain = sizeof(struct p_header) *
((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
+ c->bm_words * sizeof(long);
unsigned total = c->bytes[0] + c->bytes[1];
static void drbdd(struct drbd_conf *mdev)
{
- union p_header *header = &mdev->tconn->data.rbuf.header;
+ struct p_header *header = &mdev->tconn->data.rbuf.header;
unsigned int packet_size;
enum drbd_packets cmd;
size_t shs; /* sub header size */
goto err_out;
}
- shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
+ shs = drbd_cmd_handler[cmd].pkt_size - sizeof(struct p_header);
if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
goto err_out;
}
if (shs) {
- rv = drbd_recv(mdev, &header->h80.payload, shs);
+ rv = drbd_recv(mdev, &header->payload, shs);
if (unlikely(rv != shs)) {
if (!signal_pending(current))
dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
memset(p, 0, sizeof(*p));
p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
- ok = _drbd_send_cmd( mdev, mdev->tconn->data.socket, P_HAND_SHAKE,
- (struct p_header80 *)p, sizeof(*p), 0 );
+ ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_HAND_SHAKE,
+ &p->head, sizeof(*p), 0 );
mutex_unlock(&mdev->tconn->data.mutex);
return ok;
}
* dec_ap_pending will be done in got_BarrierAck
* or (on connection loss) in w_clear_epoch. */
ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_BARRIER,
- (struct p_header80 *)p, sizeof(*p), 0);
+ &p->head, sizeof(*p), 0);
drbd_put_data_sock(mdev);
return ok;