Use CPU ability to perform CRC calculations, by
replacing direct calls to crc32_le() with crypto_shash_updata().
The overall performance gain measured with ib_send_bw tool is 10% and it
was tested on "Intel CPU ES-2660 v2 @ 2.20Ghz" CPU.
ib_send_bw -d rxe0 -x 1 -n 9000 -e -s $((1024 * 1024 )) -l 100
---------------------------------------------------------------------------------------------
| | bytes | iterations | BW peak[MB/sec] | BW average[MB/sec] | MsgRate[Mpps] |
---------------------------------------------------------------------------------------------
| crc32_le |
1048576 | 9000 | inf | 497.60 | 0.000498 |
| CRC offload |
1048576 | 9000 | inf | 546.70 | 0.000547 |
---------------------------------------------------------------------------------------------
Fixes:
8700e3e7c485 ("Soft RoCE driver")
Signed-off-by: Yonatan Cohen <yonatanc@mellanox.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Doug Ledford <dledford@redhat.com>
tristate "Software RDMA over Ethernet (RoCE) driver"
depends on INET && PCI && INFINIBAND
depends on NET_UDP_TUNNEL
+ depends on CRYPTO_CRC32
select DMA_VIRT_OPS
---help---
This driver implements the InfiniBand RDMA transport over
rxe_pool_cleanup(&rxe->mc_elem_pool);
rxe_cleanup_ports(rxe);
+
+ crypto_free_shash(rxe->tfm);
}
/* called when all references have been dropped */
#include <rdma/ib_umem.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_addr.h>
+#include <crypto/hash.h>
#include "rxe_net.h"
#include "rxe_opcode.h"
#define RXE_ROCE_V2_SPORT (0xc000)
+static inline u32 rxe_crc32(struct rxe_dev *rxe,
+ u32 crc, void *next, size_t len)
+{
+ int err;
+
+ SHASH_DESC_ON_STACK(shash, rxe->tfm);
+
+ shash->tfm = rxe->tfm;
+ shash->flags = 0;
+ *(u32 *)shash_desc_ctx(shash) = crc;
+ err = crypto_shash_update(shash, next, len);
+ if (unlikely(err)) {
+ pr_warn_ratelimited("failed crc calculation, err: %d\n", err);
+ return crc32_le(crc, next, len);
+ }
+
+ return *(u32 *)shash_desc_ctx(shash);
+}
+
int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
int rxe_add(struct rxe_dev *rxe, unsigned int mtu);
bth->qpn |= cpu_to_be32(~BTH_QPN_MASK);
length = hdr_size + RXE_BTH_BYTES;
- crc = crc32_le(crc, pshdr, length);
+ crc = rxe_crc32(pkt->rxe, crc, pshdr, length);
/* And finish to compute the CRC on the remainder of the headers. */
- crc = crc32_le(crc, pkt->hdr + RXE_BTH_BYTES,
- rxe_opcode[pkt->opcode].length - RXE_BTH_BYTES);
+ crc = rxe_crc32(pkt->rxe, crc, pkt->hdr + RXE_BTH_BYTES,
+ rxe_opcode[pkt->opcode].length - RXE_BTH_BYTES);
return crc;
}
((void *)(uintptr_t)iova) : addr;
if (crcp)
- *crcp = crc32_le(*crcp, src, length);
+ crc = rxe_crc32(to_rdev(mem->pd->ibpd.device),
+ *crcp, src, length);
memcpy(dest, src, length);
bytes = length;
if (crcp)
- crc = crc32_le(crc, src, bytes);
+ crc = rxe_crc32(to_rdev(mem->pd->ibpd.device),
+ crc, src, bytes);
memcpy(dest, src, bytes);
pack_icrc = be32_to_cpu(*icrcp);
calc_icrc = rxe_icrc_hdr(pkt, skb);
- calc_icrc = crc32_le(calc_icrc, (u8 *)payload_addr(pkt),
- payload_size(pkt));
+ calc_icrc = rxe_crc32(rxe, calc_icrc, (u8 *)payload_addr(pkt),
+ payload_size(pkt));
calc_icrc = (__force u32)cpu_to_be32(~calc_icrc);
if (unlikely(calc_icrc != pack_icrc)) {
if (skb->protocol == htons(ETH_P_IPV6))
*/
#include <linux/skbuff.h>
+#include <crypto/hash.h>
#include "rxe.h"
#include "rxe_loc.h"
if (wqe->wr.send_flags & IB_SEND_INLINE) {
u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
- crc = crc32_le(crc, tmp, paylen);
-
+ crc = rxe_crc32(rxe, crc, tmp, paylen);
memcpy(payload_addr(pkt), tmp, paylen);
wqe->dma.resid -= paylen;
dev->get_hw_stats = rxe_ib_get_hw_stats;
dev->alloc_hw_stats = rxe_ib_alloc_hw_stats;
+ rxe->tfm = crypto_alloc_shash("crc32", 0, 0);
+ if (IS_ERR(rxe->tfm)) {
+ pr_err("failed to allocate crc algorithmi err:%ld",
+ PTR_ERR(rxe->tfm));
+ return PTR_ERR(rxe->tfm);
+ }
+
err = ib_register_device(dev, NULL);
if (err) {
pr_warn("rxe_register_device failed, err = %d\n", err);
err2:
ib_unregister_device(dev);
err1:
+ crypto_free_shash(rxe->tfm);
+
return err;
}
struct rxe_port port;
struct list_head list;
+ struct crypto_shash *tfm;
};
static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters cnt)