For consistency.
Signed-off-by: Andy Grover <andy.grover@oracle.com>
rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
ib_dma_unmap_sg(ic->i_cm_id->device,
- rm->data.m_sg, rm->data.m_nents,
+ rm->data.op_sg, rm->data.op_nents,
DMA_TO_DEVICE);
if (rm->rdma.op_active) {
/* map the message the first time we see it */
if (!ic->i_rm) {
- if (rm->data.m_nents) {
- rm->data.m_count = ib_dma_map_sg(dev,
- rm->data.m_sg,
- rm->data.m_nents,
- DMA_TO_DEVICE);
- rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.m_count);
- if (rm->data.m_count == 0) {
+ if (rm->data.op_nents) {
+ rm->data.op_count = ib_dma_map_sg(dev,
+ rm->data.op_sg,
+ rm->data.op_nents,
+ DMA_TO_DEVICE);
+ rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
+ if (rm->data.op_count == 0) {
rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
ret = -ENOMEM; /* XXX ? */
goto out;
}
} else {
- rm->data.m_count = 0;
+ rm->data.op_count = 0;
}
rds_message_addref(rm);
send = &ic->i_sends[pos];
first = send;
prev = NULL;
- scat = &rm->data.m_sg[sg];
+ scat = &rm->data.op_sg[sg];
i = 0;
do {
unsigned int len = 0;
/* Set up the data, if present */
if (i < work_alloc
- && scat != &rm->data.m_sg[rm->data.m_count]) {
+ && scat != &rm->data.op_sg[rm->data.op_count]) {
len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
send->s_wr.num_sge = 2;
i++;
} while (i < work_alloc
- && scat != &rm->data.m_sg[rm->data.m_count]);
+ && scat != &rm->data.op_sg[rm->data.op_count]);
/* Account the RDS header in the number of bytes we sent, but just once.
* The caller has no concept of fragmentation. */
bytes_sent += sizeof(struct rds_header);
/* if we finished the message then send completion owns it */
- if (scat == &rm->data.m_sg[rm->data.m_count]) {
+ if (scat == &rm->data.op_sg[rm->data.op_count]) {
prev->s_rm = ic->i_rm;
prev->s_wr.send_flags |= IB_SEND_SOLICITED;
ic->i_rm = NULL;
rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
ib_dma_unmap_sg(ic->i_cm_id->device,
- rm->data.m_sg, rm->data.m_nents,
+ rm->data.op_sg, rm->data.op_nents,
DMA_TO_DEVICE);
if (rm->rdma.op_active) {
rm->m_inc.i_hdr.h_flags,
be32_to_cpu(rm->m_inc.i_hdr.h_len));
*/
- if (rm->data.m_nents) {
- rm->data.m_count = ib_dma_map_sg(dev,
- rm->data.m_sg,
- rm->data.m_nents,
- DMA_TO_DEVICE);
- rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.m_count);
- if (rm->data.m_count == 0) {
+ if (rm->data.op_nents) {
+ rm->data.op_count = ib_dma_map_sg(dev,
+ rm->data.op_sg,
+ rm->data.op_nents,
+ DMA_TO_DEVICE);
+ rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
+ if (rm->data.op_count == 0) {
rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
ret = -ENOMEM; /* XXX ? */
goto out;
}
} else {
- rm->data.m_count = 0;
+ rm->data.op_count = 0;
}
ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
send = &ic->i_sends[pos];
first = send;
prev = NULL;
- scat = &rm->data.m_sg[sg];
+ scat = &rm->data.op_sg[sg];
sent = 0;
i = 0;
}
/* if there's data reference it with a chain of work reqs */
- for (; i < work_alloc && scat != &rm->data.m_sg[rm->data.m_count]; i++) {
+ for (; i < work_alloc && scat != &rm->data.op_sg[rm->data.op_count]; i++) {
unsigned int len;
send = &ic->i_sends[pos];
sent += sizeof(struct rds_header);
/* if we finished the message then send completion owns it */
- if (scat == &rm->data.m_sg[rm->data.m_count]) {
+ if (scat == &rm->data.op_sg[rm->data.op_count]) {
prev->s_rm = ic->i_rm;
prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
ic->i_rm = NULL;
if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
return;
- for (i = 0; i < rm->data.m_nents; i++) {
- rdsdebug("putting data page %p\n", (void *)sg_page(&rm->data.m_sg[i]));
+ for (i = 0; i < rm->data.op_nents; i++) {
+ rdsdebug("putting data page %p\n", (void *)sg_page(&rm->data.op_sg[i]));
/* XXX will have to put_page for page refs */
- __free_page(sg_page(&rm->data.m_sg[i]));
+ __free_page(sg_page(&rm->data.op_sg[i]));
}
- rm->data.m_nents = 0;
+ rm->data.op_nents = 0;
if (rm->rdma.op_active)
rds_rdma_free_op(&rm->rdma);
set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
- rm->data.m_nents = ceil(total_len, PAGE_SIZE);
- rm->data.m_sg = rds_message_alloc_sgs(rm, num_sgs);
+ rm->data.op_nents = ceil(total_len, PAGE_SIZE);
+ rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
- for (i = 0; i < rm->data.m_nents; ++i) {
- sg_set_page(&rm->data.m_sg[i],
+ for (i = 0; i < rm->data.op_nents; ++i) {
+ sg_set_page(&rm->data.op_sg[i],
virt_to_page(page_addrs[i]),
PAGE_SIZE, 0);
}
/*
* now allocate and copy in the data payload.
*/
- sg = rm->data.m_sg;
+ sg = rm->data.op_sg;
iov = first_iov;
iov_off = 0;
sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
GFP_HIGHUSER);
if (ret)
goto out;
- rm->data.m_nents++;
+ rm->data.op_nents++;
sg_off = 0;
}
iov = first_iov;
iov_off = 0;
- sg = rm->data.m_sg;
+ sg = rm->data.op_sg;
vec_off = 0;
copied = 0;
} rdma;
struct rm_data_op {
unsigned int op_active:1;
- unsigned int m_nents;
- unsigned int m_count;
- struct scatterlist *m_sg;
+ unsigned int op_nents;
+ unsigned int op_count;
+ struct scatterlist *op_sg;
} data;
};
unsigned int m_used_sgs;
rm = conn->c_xmit_rm;
if (rm &&
conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
- conn->c_xmit_sg == rm->data.m_nents) {
+ conn->c_xmit_sg == rm->data.op_nents) {
conn->c_xmit_rm = NULL;
conn->c_xmit_sg = 0;
conn->c_xmit_hdr_off = 0;
if (rm->data.op_active
&& (conn->c_xmit_hdr_off < sizeof(struct rds_header) ||
- conn->c_xmit_sg < rm->data.m_nents)) {
+ conn->c_xmit_sg < rm->data.op_nents)) {
ret = conn->c_trans->xmit(conn, rm,
conn->c_xmit_hdr_off,
conn->c_xmit_sg,
ret -= tmp;
}
- sg = &rm->data.m_sg[conn->c_xmit_sg];
+ sg = &rm->data.op_sg[conn->c_xmit_sg];
while (ret) {
tmp = min_t(int, ret, sg->length -
conn->c_xmit_data_off);
sg++;
conn->c_xmit_sg++;
BUG_ON(ret != 0 &&
- conn->c_xmit_sg == rm->data.m_nents);
+ conn->c_xmit_sg == rm->data.op_nents);
}
}
}
goto out;
}
- rm->data.m_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
+ rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
/* XXX fix this to not allocate memory */
ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
if (ret)
goto out;
}
- while (sg < rm->data.m_nents) {
+ while (sg < rm->data.op_nents) {
ret = tc->t_sock->ops->sendpage(tc->t_sock,
- sg_page(&rm->data.m_sg[sg]),
- rm->data.m_sg[sg].offset + off,
- rm->data.m_sg[sg].length - off,
+ sg_page(&rm->data.op_sg[sg]),
+ rm->data.op_sg[sg].offset + off,
+ rm->data.op_sg[sg].length - off,
MSG_DONTWAIT|MSG_NOSIGNAL);
- rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.m_sg[sg]),
- rm->data.m_sg[sg].offset + off, rm->data.m_sg[sg].length - off,
+ rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]),
+ rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off,
ret);
if (ret <= 0)
break;
off += ret;
done += ret;
- if (off == rm->data.m_sg[sg].length) {
+ if (off == rm->data.op_sg[sg].length) {
off = 0;
sg++;
}