return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
+static inline void write_buffer_to_skb(struct sk_buff *skb,
+ unsigned int *frags,
+ char *bfr,
+ u8 bfr_len)
+{
+ skb->len += bfr_len;
+ skb->data_len += bfr_len;
+ skb->truesize += bfr_len;
+ get_page(virt_to_page(bfr));
+ skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
+ offset_in_page(bfr), bfr_len);
+ (*frags)++;
+}
+
+
static inline void
-write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
+write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
struct scatterlist *sg, unsigned int count)
{
struct page *spage;
}
static inline void create_wreq(struct chcr_context *ctx,
- struct fw_crypto_lookaside_wr *wreq,
+ struct chcr_wr *chcr_req,
void *req, struct sk_buff *skb,
int kctx_len, int hash_sz,
unsigned int phys_dsgl)
{
struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1);
- struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1);
int iv_loc = IV_DSGL;
int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
unsigned int immdatalen = 0, nr_frags = 0;
nr_frags = skb_shinfo(skb)->nr_frags;
}
- wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
- (kctx_len >> 4));
- wreq->pld_size_hash_size =
+ chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
+ ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
+ chcr_req->wreq.pld_size_hash_size =
htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
- wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
+ chcr_req->wreq.len16_pkd =
+ htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
(calc_tx_flits_ofld(skb) * 8), 16)));
- wreq->cookie = cpu_to_be64((uintptr_t)req);
- wreq->rx_chid_to_rx_q_id =
+ chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
+ chcr_req->wreq.rx_chid_to_rx_q_id =
FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
(hash_sz) ? IV_NOP : iv_loc);
- ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
- ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
- 16) - ((sizeof(*wreq)) >> 4)));
+ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
+ chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
+ 16) - ((sizeof(chcr_req->wreq)) >> 4)));
- sc_imm->cmd_more = FILL_CMD_MORE(immdatalen);
- sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len +
+ chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+ sizeof(chcr_req->key_ctx) +
+ kctx_len +
((hash_sz) ? DUMMY_BYTES :
(sizeof(struct cpl_rx_phys_dsgl) +
phys_dsgl)) + immdatalen);
* @op_type: encryption or decryption
*/
static struct sk_buff
-*create_cipher_wr(struct crypto_async_request *req_base,
- struct chcr_context *ctx, unsigned short qid,
+*create_cipher_wr(struct ablkcipher_request *req,
+ unsigned short qid,
unsigned short op_type)
{
- struct ablkcipher_request *req = (struct ablkcipher_request *)req_base;
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct sk_buff *skb = NULL;
- struct _key_ctx *key_ctx;
- struct fw_crypto_lookaside_wr *wreq;
- struct cpl_tx_sec_pdu *sec_cpl;
+ struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
struct phys_sge_parm sg_param;
unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
if (!req->info)
return ERR_PTR(-EINVAL);
ablkctx->enc = op_type;
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
- (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE))
+ (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
+ pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
+ ablkctx->enckey_len, req->nbytes, ivsize);
return ERR_PTR(-EINVAL);
+ }
phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
- kctx_len = sizeof(*key_ctx) +
- (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+ kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
- GFP_ATOMIC);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
return ERR_PTR(-ENOMEM);
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
-
- sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
- sec_cpl->op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1);
-
- sec_cpl->pldlen = htonl(ivsize + req->nbytes);
- sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0,
- ivsize + 1, 0);
-
- sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, 0,
- 0, 0);
- sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1);
+
+ chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
+ FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
+
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+ chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
ablkctx->ciph_mode,
- 0, 0, ivsize >> 1, 1);
- sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
+ 0, 0, ivsize >> 1);
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
0, 1, phys_dsgl);
- key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
- key_ctx->ctx_hdr = ablkctx->key_ctx_hdr;
+ chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
if (op_type == CHCR_DECRYPT_OP) {
- if (generate_copy_rrkey(ablkctx, key_ctx))
- goto map_fail1;
+ generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
} else {
if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
- memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key, ablkctx->key,
+ ablkctx->enckey_len);
} else {
- memcpy(key_ctx->key, ablkctx->key +
+ memcpy(chcr_req->key_ctx.key, ablkctx->key +
(ablkctx->enckey_len >> 1),
ablkctx->enckey_len >> 1);
- memcpy(key_ctx->key +
+ memcpy(chcr_req->key_ctx.key +
(ablkctx->enckey_len >> 1),
ablkctx->key,
ablkctx->enckey_len >> 1);
}
}
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len);
-
- memcpy(ablkctx->iv, req->info, ivsize);
- sg_init_table(&ablkctx->iv_sg, 1);
- sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize);
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
sg_param.nents = ablkctx->dst_nents;
- sg_param.obsize = dst_bufsize;
+ sg_param.obsize = req->nbytes;
sg_param.qid = qid;
sg_param.align = 1;
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
goto map_fail1;
skb_set_transport_header(skb, transhdr_len);
- write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize);
- write_sg_data_page_desc(skb, &frags, req->src, req->nbytes);
- create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl);
+ memcpy(ablkctx->iv, req->info, ivsize);
+ write_buffer_to_skb(skb, &frags, ablkctx->iv, ivsize);
+ write_sg_to_skb(skb, &frags, req->src, req->nbytes);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, phys_dsgl);
req_ctx->skb = skb;
skb_get(skb);
return skb;
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_async_request *req_base = &req->base;
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
return -EBUSY;
}
- skb = create_cipher_wr(req_base, ctx,
- u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
+ skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
CHCR_ENCRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_async_request *req_base = &req->base;
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
return -EBUSY;
}
- skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0],
+ skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
CHCR_DECRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
return 0;
}
-static inline int
-write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx,
- struct sk_buff *skb, unsigned int *frags, char *bfr,
- u8 bfr_len)
-{
- void *page_ptr = NULL;
-
- skb->len += bfr_len;
- skb->data_len += bfr_len;
- skb->truesize += bfr_len;
- page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA);
- if (!page_ptr)
- return -ENOMEM;
- get_page(virt_to_page(page_ptr));
- req_ctx->dummy_payload_ptr = page_ptr;
- memcpy(page_ptr, bfr, bfr_len);
- skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr),
- offset_in_page(page_ptr), bfr_len);
- (*frags)++;
- return 0;
-}
-
/**
- * create_final_hash_wr - Create hash work request
+ * create_hash_wr - Create hash work request
* @req - Cipher req base
*/
-static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
+static struct sk_buff *create_hash_wr(struct ahash_request *req,
struct hash_wr_param *param)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
struct sk_buff *skb = NULL;
- struct _key_ctx *key_ctx;
- struct fw_crypto_lookaside_wr *wreq;
- struct cpl_tx_sec_pdu *sec_cpl;
+ struct chcr_wr *chcr_req;
unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
unsigned int digestsize = crypto_ahash_digestsize(tfm);
- unsigned int kctx_len = sizeof(*key_ctx);
+ unsigned int kctx_len = 0;
u8 hash_size_in_response = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
- kctx_len += param->alg_prm.result_size + iopad_alignment;
+ kctx_len = param->alg_prm.result_size + iopad_alignment;
if (param->opad_needed)
kctx_len += param->alg_prm.result_size + iopad_alignment;
else
hash_size_in_response = param->alg_prm.result_size;
transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
- GFP_ATOMIC);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
return skb;
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
- memset(wreq, 0, transhdr_len);
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
- sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
- sec_cpl->op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0);
- sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len);
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0);
+ chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
- sec_cpl->aadstart_cipherstop_hi =
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
- sec_cpl->cipherstop_lo_authinsert =
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
- sec_cpl->seqno_numivs =
+ chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
- param->opad_needed, 0, 0);
+ param->opad_needed, 0);
- sec_cpl->ivgen_hdrlen =
+ chcr_req->sec_cpl.ivgen_hdrlen =
FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
- key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
- memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size);
+ memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
+ param->alg_prm.result_size);
if (param->opad_needed)
- memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 :
- CHCR_HASH_MAX_DIGEST_SIZE),
+ memcpy(chcr_req->key_ctx.key +
+ ((param->alg_prm.result_size <= 32) ? 32 :
+ CHCR_HASH_MAX_DIGEST_SIZE),
hmacctx->opad, param->alg_prm.result_size);
- key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
+ chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
param->alg_prm.mk_size, 0,
param->opad_needed,
- (kctx_len >> 4));
- sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1);
+ ((kctx_len +
+ sizeof(chcr_req->key_ctx)) >> 4));
+ chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
skb_set_transport_header(skb, transhdr_len);
if (param->bfr_len != 0)
- write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr,
+ write_buffer_to_skb(skb, &frags, req_ctx->bfr,
param->bfr_len);
if (param->sg_len != 0)
- write_sg_data_page_desc(skb, &frags, req->src, param->sg_len);
+ write_sg_to_skb(skb, &frags, req->src, param->sg_len);
- create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response,
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response,
0);
req_ctx->skb = skb;
skb_get(skb);
params.opad_needed = 0;
params.more = 1;
params.last = 0;
- params.sg_len = nbytes - req_ctx->bfr_len;
- params.bfr_len = req_ctx->bfr_len;
params.scmd1 = 0;
get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->result = 0;
req_ctx->data_len += params.sg_len + params.bfr_len;
- skb = create_final_hash_wr(req, ¶ms);
- if (!skb)
- return -ENOMEM;
+ skb = create_hash_wr(req, ¶ms);
req_ctx->bfr_len = remainder;
if (remainder)
params.last = 1;
params.more = 0;
}
- skb = create_final_hash_wr(req, ¶ms);
+ skb = create_hash_wr(req, ¶ms);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
params.more = 0;
}
- skb = create_final_hash_wr(req, ¶ms);
- if (!skb)
- return -ENOMEM;
+ skb = create_hash_wr(req, ¶ms);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
params.bfr_len = bs;
}
- skb = create_final_hash_wr(req, ¶ms);
- if (!skb)
- return -ENOMEM;
+ skb = create_hash_wr(req, ¶ms);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);