else
con->out_msg_pos.page_pos = 0;
con->out_msg_pos.data_pos = 0;
- con->out_msg_pos.did_page_crc = 0;
+ con->out_msg_pos.did_page_crc = false;
con->out_more = 1; /* data + footer will follow */
} else {
/* no, queue up footer too and be done */
struct ceph_msg *msg = con->out_msg;
unsigned data_len = le32_to_cpu(msg->hdr.data_len);
size_t len;
- int crc = con->msgr->nocrc;
+ bool do_crc = con->msgr->nocrc;
int ret;
int total_max_write;
int in_trail = 0;
page = list_first_entry(&msg->trail->head,
struct page, lru);
- if (crc)
+ if (do_crc)
kaddr = kmap(page);
max_write = PAGE_SIZE;
} else if (msg->pages) {
page = msg->pages[con->out_msg_pos.page];
- if (crc)
+ if (do_crc)
kaddr = kmap(page);
} else if (msg->pagelist) {
page = list_first_entry(&msg->pagelist->head,
struct page, lru);
- if (crc)
+ if (do_crc)
kaddr = kmap(page);
#ifdef CONFIG_BLOCK
} else if (msg->bio) {
bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg);
page = bv->bv_page;
page_shift = bv->bv_offset;
- if (crc)
+ if (do_crc)
kaddr = kmap(page) + page_shift;
max_write = bv->bv_len;
#endif
} else {
page = zero_page;
- if (crc)
+ if (do_crc)
kaddr = zero_page_address;
}
len = min_t(int, max_write - con->out_msg_pos.page_pos,
total_max_write);
- if (crc && !con->out_msg_pos.did_page_crc) {
+ if (do_crc && !con->out_msg_pos.did_page_crc) {
void *base = kaddr + con->out_msg_pos.page_pos;
u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
BUG_ON(kaddr == NULL);
con->out_msg->footer.data_crc =
cpu_to_le32(crc32c(tmpcrc, base, len));
- con->out_msg_pos.did_page_crc = 1;
+ con->out_msg_pos.did_page_crc = true;
}
ret = kernel_sendpage(con->sock, page,
con->out_msg_pos.page_pos + page_shift,
MSG_DONTWAIT | MSG_NOSIGNAL |
MSG_MORE);
- if (crc &&
+ if (do_crc &&
(msg->pages || msg->pagelist || msg->bio || in_trail))
kunmap(page);
if (ret == len) {
con->out_msg_pos.page_pos = 0;
con->out_msg_pos.page++;
- con->out_msg_pos.did_page_crc = 0;
+ con->out_msg_pos.did_page_crc = false;
if (in_trail)
list_move_tail(&page->lru,
&msg->trail->head);
dout("write_partial_msg_pages %p msg %p done\n", con, msg);
/* prepare and queue up footer, too */
- if (!crc)
+ if (!do_crc)
con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
ceph_con_out_kvec_reset(con);
prepare_write_message_footer(con);
static int read_partial_message_pages(struct ceph_connection *con,
struct page **pages,
- unsigned data_len, int datacrc)
+ unsigned data_len, bool do_datacrc)
{
void *p;
int ret;
p = kmap(pages[con->in_msg_pos.page]);
ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
left);
- if (ret > 0 && datacrc)
+ if (ret > 0 && do_datacrc)
con->in_data_crc =
crc32c(con->in_data_crc,
p + con->in_msg_pos.page_pos, ret);
#ifdef CONFIG_BLOCK
static int read_partial_message_bio(struct ceph_connection *con,
struct bio **bio_iter, int *bio_seg,
- unsigned data_len, int datacrc)
+ unsigned data_len, bool do_datacrc)
{
struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
void *p;
ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
left);
- if (ret > 0 && datacrc)
+ if (ret > 0 && do_datacrc)
con->in_data_crc =
crc32c(con->in_data_crc,
p + con->in_msg_pos.page_pos, ret);
int ret;
int to, left;
unsigned front_len, middle_len, data_len;
- int datacrc = con->msgr->nocrc;
+ bool do_datacrc = con->msgr->nocrc;
int skip;
u64 seq;
while (con->in_msg_pos.data_pos < data_len) {
if (m->pages) {
ret = read_partial_message_pages(con, m->pages,
- data_len, datacrc);
+ data_len, do_datacrc);
if (ret <= 0)
return ret;
#ifdef CONFIG_BLOCK
ret = read_partial_message_bio(con,
&m->bio_iter, &m->bio_seg,
- data_len, datacrc);
+ data_len, do_datacrc);
if (ret <= 0)
return ret;
#endif
m, con->in_middle_crc, m->footer.middle_crc);
return -EBADMSG;
}
- if (datacrc &&
+ if (do_datacrc &&
(m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
pr_err("read_partial_message %p data crc %u != exp. %u\n", m,