2 * Copyright (C) 2017 Marvell
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
11 #include <crypto/hmac.h>
12 #include <crypto/sha.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
20 struct safexcel_ahash_ctx
{
21 struct safexcel_context base
;
22 struct safexcel_crypto_priv
*priv
;
27 u32 ipad
[SHA1_DIGEST_SIZE
/ sizeof(u32
)];
28 u32 opad
[SHA1_DIGEST_SIZE
/ sizeof(u32
)];
31 struct safexcel_ahash_req
{
39 u8 state_sz
; /* expected sate size, only set once */
40 u32 state
[SHA256_DIGEST_SIZE
/ sizeof(u32
)];
45 u8 cache
[SHA256_BLOCK_SIZE
] __aligned(sizeof(u32
));
46 u8 cache_next
[SHA256_BLOCK_SIZE
] __aligned(sizeof(u32
));
49 struct safexcel_ahash_export_state
{
53 u32 state
[SHA256_DIGEST_SIZE
/ sizeof(u32
)];
54 u8 cache
[SHA256_BLOCK_SIZE
];
57 static void safexcel_hash_token(struct safexcel_command_desc
*cdesc
,
58 u32 input_length
, u32 result_length
)
60 struct safexcel_token
*token
=
61 (struct safexcel_token
*)cdesc
->control_data
.token
;
63 token
[0].opcode
= EIP197_TOKEN_OPCODE_DIRECTION
;
64 token
[0].packet_length
= input_length
;
65 token
[0].stat
= EIP197_TOKEN_STAT_LAST_HASH
;
66 token
[0].instructions
= EIP197_TOKEN_INS_TYPE_HASH
;
68 token
[1].opcode
= EIP197_TOKEN_OPCODE_INSERT
;
69 token
[1].packet_length
= result_length
;
70 token
[1].stat
= EIP197_TOKEN_STAT_LAST_HASH
|
71 EIP197_TOKEN_STAT_LAST_PACKET
;
72 token
[1].instructions
= EIP197_TOKEN_INS_TYPE_OUTPUT
|
73 EIP197_TOKEN_INS_INSERT_HASH_DIGEST
;
76 static void safexcel_context_control(struct safexcel_ahash_ctx
*ctx
,
77 struct safexcel_ahash_req
*req
,
78 struct safexcel_command_desc
*cdesc
,
79 unsigned int digestsize
,
80 unsigned int blocksize
)
84 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_TYPE_HASH_OUT
;
85 cdesc
->control_data
.control0
|= ctx
->alg
;
86 cdesc
->control_data
.control0
|= ctx
->digest
;
88 if (ctx
->digest
== CONTEXT_CONTROL_DIGEST_PRECOMPUTED
) {
90 if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA1
)
91 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_SIZE(6);
92 else if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA224
||
93 ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA256
)
94 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_SIZE(9);
96 cdesc
->control_data
.control1
|= CONTEXT_CONTROL_DIGEST_CNT
;
98 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_RESTART_HASH
;
102 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_NO_FINISH_HASH
;
105 * Copy the input digest if needed, and setup the context
106 * fields. Do this now as we need it to setup the first command
109 if (req
->processed
) {
110 for (i
= 0; i
< digestsize
/ sizeof(u32
); i
++)
111 ctx
->base
.ctxr
->data
[i
] = cpu_to_le32(req
->state
[i
]);
114 ctx
->base
.ctxr
->data
[i
] = cpu_to_le32(req
->processed
/ blocksize
);
116 } else if (ctx
->digest
== CONTEXT_CONTROL_DIGEST_HMAC
) {
117 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_SIZE(10);
119 memcpy(ctx
->base
.ctxr
->data
, ctx
->ipad
, digestsize
);
120 memcpy(ctx
->base
.ctxr
->data
+ digestsize
/ sizeof(u32
),
121 ctx
->opad
, digestsize
);
125 static int safexcel_handle_req_result(struct safexcel_crypto_priv
*priv
, int ring
,
126 struct crypto_async_request
*async
,
127 bool *should_complete
, int *ret
)
129 struct safexcel_result_desc
*rdesc
;
130 struct ahash_request
*areq
= ahash_request_cast(async
);
131 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
132 struct safexcel_ahash_req
*sreq
= ahash_request_ctx(areq
);
133 int cache_len
, result_sz
= sreq
->state_sz
;
137 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
138 rdesc
= safexcel_ring_next_rptr(priv
, &priv
->ring
[ring
].rdr
);
141 "hash: result: could not retrieve the result descriptor\n");
142 *ret
= PTR_ERR(rdesc
);
143 } else if (rdesc
->result_data
.error_code
) {
145 "hash: result: result descriptor error (%d)\n",
146 rdesc
->result_data
.error_code
);
150 safexcel_complete(priv
, ring
);
151 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
154 result_sz
= crypto_ahash_digestsize(ahash
);
155 memcpy(sreq
->state
, areq
->result
, result_sz
);
158 dma_unmap_sg(priv
->dev
, areq
->src
, sreq
->nents
, DMA_TO_DEVICE
);
162 safexcel_free_context(priv
, async
, sreq
->state_sz
);
164 cache_len
= sreq
->len
- sreq
->processed
;
166 memcpy(sreq
->cache
, sreq
->cache_next
, cache_len
);
168 *should_complete
= true;
173 static int safexcel_ahash_send_req(struct crypto_async_request
*async
, int ring
,
174 struct safexcel_request
*request
,
175 int *commands
, int *results
)
177 struct ahash_request
*areq
= ahash_request_cast(async
);
178 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
179 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
180 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
181 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
182 struct safexcel_command_desc
*cdesc
, *first_cdesc
= NULL
;
183 struct safexcel_result_desc
*rdesc
;
184 struct scatterlist
*sg
;
185 int i
, queued
, len
, cache_len
, extra
, n_cdesc
= 0, ret
= 0;
187 queued
= len
= req
->len
- req
->processed
;
188 if (queued
< crypto_ahash_blocksize(ahash
))
191 cache_len
= queued
- areq
->nbytes
;
193 if (!req
->last_req
) {
194 /* If this is not the last request and the queued data does not
195 * fit into full blocks, cache it for the next send() call.
197 extra
= queued
& (crypto_ahash_blocksize(ahash
) - 1);
199 /* If this is not the last request and the queued data
200 * is a multiple of a block, cache the last one for now.
202 extra
= queued
- crypto_ahash_blocksize(ahash
);
205 sg_pcopy_to_buffer(areq
->src
, sg_nents(areq
->src
),
206 req
->cache_next
, extra
,
207 areq
->nbytes
- extra
);
220 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
222 /* Add a command descriptor for the cached data, if any */
224 ctx
->base
.cache
= kzalloc(cache_len
, EIP197_GFP_FLAGS(*async
));
225 if (!ctx
->base
.cache
) {
229 memcpy(ctx
->base
.cache
, req
->cache
, cache_len
);
230 ctx
->base
.cache_dma
= dma_map_single(priv
->dev
, ctx
->base
.cache
,
231 cache_len
, DMA_TO_DEVICE
);
232 if (dma_mapping_error(priv
->dev
, ctx
->base
.cache_dma
)) {
237 ctx
->base
.cache_sz
= cache_len
;
238 first_cdesc
= safexcel_add_cdesc(priv
, ring
, 1,
243 if (IS_ERR(first_cdesc
)) {
244 ret
= PTR_ERR(first_cdesc
);
254 /* Now handle the current ahash request buffer(s) */
255 req
->nents
= dma_map_sg(priv
->dev
, areq
->src
,
256 sg_nents_for_len(areq
->src
, areq
->nbytes
),
263 for_each_sg(areq
->src
, sg
, req
->nents
, i
) {
264 int sglen
= sg_dma_len(sg
);
266 /* Do not overflow the request */
267 if (queued
- sglen
< 0)
270 cdesc
= safexcel_add_cdesc(priv
, ring
, !n_cdesc
,
271 !(queued
- sglen
), sg_dma_address(sg
),
272 sglen
, len
, ctx
->base
.ctxr_dma
);
274 ret
= PTR_ERR(cdesc
);
288 /* Setup the context options */
289 safexcel_context_control(ctx
, req
, first_cdesc
, req
->state_sz
,
290 crypto_ahash_blocksize(ahash
));
293 safexcel_hash_token(first_cdesc
, len
, req
->state_sz
);
295 ctx
->base
.result_dma
= dma_map_single(priv
->dev
, areq
->result
,
296 req
->state_sz
, DMA_FROM_DEVICE
);
297 if (dma_mapping_error(priv
->dev
, ctx
->base
.result_dma
)) {
302 /* Add a result descriptor */
303 rdesc
= safexcel_add_rdesc(priv
, ring
, 1, 1, ctx
->base
.result_dma
,
306 ret
= PTR_ERR(rdesc
);
310 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
312 req
->processed
+= len
;
313 request
->req
= &areq
->base
;
320 for (i
= 0; i
< n_cdesc
; i
++)
321 safexcel_ring_rollback_wptr(priv
, &priv
->ring
[ring
].cdr
);
323 if (ctx
->base
.cache_dma
) {
324 dma_unmap_single(priv
->dev
, ctx
->base
.cache_dma
,
325 ctx
->base
.cache_sz
, DMA_TO_DEVICE
);
326 ctx
->base
.cache_sz
= 0;
329 if (ctx
->base
.cache
) {
330 kfree(ctx
->base
.cache
);
331 ctx
->base
.cache
= NULL
;
335 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
339 static inline bool safexcel_ahash_needs_inv_get(struct ahash_request
*areq
)
341 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
342 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
343 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
344 unsigned int state_w_sz
= req
->state_sz
/ sizeof(u32
);
347 for (i
= 0; i
< state_w_sz
; i
++)
348 if (ctx
->base
.ctxr
->data
[i
] != cpu_to_le32(req
->state
[i
]))
351 if (ctx
->base
.ctxr
->data
[state_w_sz
] !=
352 cpu_to_le32(req
->processed
/ crypto_ahash_blocksize(ahash
)))
358 static int safexcel_handle_inv_result(struct safexcel_crypto_priv
*priv
,
360 struct crypto_async_request
*async
,
361 bool *should_complete
, int *ret
)
363 struct safexcel_result_desc
*rdesc
;
364 struct ahash_request
*areq
= ahash_request_cast(async
);
365 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
366 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
371 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
372 rdesc
= safexcel_ring_next_rptr(priv
, &priv
->ring
[ring
].rdr
);
375 "hash: invalidate: could not retrieve the result descriptor\n");
376 *ret
= PTR_ERR(rdesc
);
377 } else if (rdesc
->result_data
.error_code
) {
379 "hash: invalidate: result descriptor error (%d)\n",
380 rdesc
->result_data
.error_code
);
384 safexcel_complete(priv
, ring
);
385 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
387 if (ctx
->base
.exit_inv
) {
388 dma_pool_free(priv
->context_pool
, ctx
->base
.ctxr
,
391 *should_complete
= true;
395 ring
= safexcel_select_ring(priv
);
396 ctx
->base
.ring
= ring
;
398 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
399 enq_ret
= crypto_enqueue_request(&priv
->ring
[ring
].queue
, async
);
400 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
402 if (enq_ret
!= -EINPROGRESS
)
405 if (!priv
->ring
[ring
].need_dequeue
)
406 safexcel_dequeue(priv
, ring
);
408 *should_complete
= false;
413 static int safexcel_handle_result(struct safexcel_crypto_priv
*priv
, int ring
,
414 struct crypto_async_request
*async
,
415 bool *should_complete
, int *ret
)
417 struct ahash_request
*areq
= ahash_request_cast(async
);
418 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
421 if (req
->needs_inv
) {
422 req
->needs_inv
= false;
423 err
= safexcel_handle_inv_result(priv
, ring
, async
,
424 should_complete
, ret
);
426 err
= safexcel_handle_req_result(priv
, ring
, async
,
427 should_complete
, ret
);
433 static int safexcel_ahash_send_inv(struct crypto_async_request
*async
,
434 int ring
, struct safexcel_request
*request
,
435 int *commands
, int *results
)
437 struct ahash_request
*areq
= ahash_request_cast(async
);
438 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
441 ret
= safexcel_invalidate_cache(async
, &ctx
->base
, ctx
->priv
,
442 ctx
->base
.ctxr_dma
, ring
, request
);
452 static int safexcel_ahash_send(struct crypto_async_request
*async
,
453 int ring
, struct safexcel_request
*request
,
454 int *commands
, int *results
)
456 struct ahash_request
*areq
= ahash_request_cast(async
);
457 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
461 ret
= safexcel_ahash_send_inv(async
, ring
, request
,
464 ret
= safexcel_ahash_send_req(async
, ring
, request
,
469 static int safexcel_ahash_exit_inv(struct crypto_tfm
*tfm
)
471 struct safexcel_ahash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
472 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
473 AHASH_REQUEST_ON_STACK(req
, __crypto_ahash_cast(tfm
));
474 struct safexcel_ahash_req
*rctx
= ahash_request_ctx(req
);
475 struct safexcel_inv_result result
= {};
476 int ring
= ctx
->base
.ring
;
478 memset(req
, 0, sizeof(struct ahash_request
));
480 /* create invalidation request */
481 init_completion(&result
.completion
);
482 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
483 safexcel_inv_complete
, &result
);
485 ahash_request_set_tfm(req
, __crypto_ahash_cast(tfm
));
486 ctx
= crypto_tfm_ctx(req
->base
.tfm
);
487 ctx
->base
.exit_inv
= true;
488 rctx
->needs_inv
= true;
490 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
491 crypto_enqueue_request(&priv
->ring
[ring
].queue
, &req
->base
);
492 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
494 if (!priv
->ring
[ring
].need_dequeue
)
495 safexcel_dequeue(priv
, ring
);
497 wait_for_completion_interruptible(&result
.completion
);
500 dev_warn(priv
->dev
, "hash: completion error (%d)\n",
508 static int safexcel_ahash_cache(struct ahash_request
*areq
)
510 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
511 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
512 int queued
, cache_len
;
514 cache_len
= req
->len
- areq
->nbytes
- req
->processed
;
515 queued
= req
->len
- req
->processed
;
518 * In case there isn't enough bytes to proceed (less than a
519 * block size), cache the data until we have enough.
521 if (cache_len
+ areq
->nbytes
<= crypto_ahash_blocksize(ahash
)) {
522 sg_pcopy_to_buffer(areq
->src
, sg_nents(areq
->src
),
523 req
->cache
+ cache_len
,
528 /* We could'nt cache all the data */
532 static int safexcel_ahash_enqueue(struct ahash_request
*areq
)
534 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
535 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
536 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
539 req
->needs_inv
= false;
541 if (req
->processed
&& ctx
->digest
== CONTEXT_CONTROL_DIGEST_PRECOMPUTED
)
542 ctx
->base
.needs_inv
= safexcel_ahash_needs_inv_get(areq
);
544 if (ctx
->base
.ctxr
) {
545 if (ctx
->base
.needs_inv
) {
546 ctx
->base
.needs_inv
= false;
547 req
->needs_inv
= true;
550 ctx
->base
.ring
= safexcel_select_ring(priv
);
551 ctx
->base
.ctxr
= dma_pool_zalloc(priv
->context_pool
,
552 EIP197_GFP_FLAGS(areq
->base
),
553 &ctx
->base
.ctxr_dma
);
558 ring
= ctx
->base
.ring
;
560 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
561 ret
= crypto_enqueue_request(&priv
->ring
[ring
].queue
, &areq
->base
);
562 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
564 if (!priv
->ring
[ring
].need_dequeue
)
565 safexcel_dequeue(priv
, ring
);
570 static int safexcel_ahash_update(struct ahash_request
*areq
)
572 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
573 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
574 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
576 /* If the request is 0 length, do nothing */
580 req
->len
+= areq
->nbytes
;
582 safexcel_ahash_cache(areq
);
585 * We're not doing partial updates when performing an hmac request.
586 * Everything will be handled by the final() call.
588 if (ctx
->digest
== CONTEXT_CONTROL_DIGEST_HMAC
)
592 return safexcel_ahash_enqueue(areq
);
594 if (!req
->last_req
&&
595 req
->len
- req
->processed
> crypto_ahash_blocksize(ahash
))
596 return safexcel_ahash_enqueue(areq
);
601 static int safexcel_ahash_final(struct ahash_request
*areq
)
603 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
604 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
606 req
->last_req
= true;
609 /* If we have an overall 0 length request */
610 if (!(req
->len
+ areq
->nbytes
)) {
611 if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA1
)
612 memcpy(areq
->result
, sha1_zero_message_hash
,
614 else if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA224
)
615 memcpy(areq
->result
, sha224_zero_message_hash
,
617 else if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA256
)
618 memcpy(areq
->result
, sha256_zero_message_hash
,
624 return safexcel_ahash_enqueue(areq
);
627 static int safexcel_ahash_finup(struct ahash_request
*areq
)
629 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
631 req
->last_req
= true;
634 safexcel_ahash_update(areq
);
635 return safexcel_ahash_final(areq
);
638 static int safexcel_ahash_export(struct ahash_request
*areq
, void *out
)
640 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
641 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
642 struct safexcel_ahash_export_state
*export
= out
;
644 export
->len
= req
->len
;
645 export
->processed
= req
->processed
;
647 memcpy(export
->state
, req
->state
, req
->state_sz
);
648 memset(export
->cache
, 0, crypto_ahash_blocksize(ahash
));
649 memcpy(export
->cache
, req
->cache
, crypto_ahash_blocksize(ahash
));
654 static int safexcel_ahash_import(struct ahash_request
*areq
, const void *in
)
656 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
657 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
658 const struct safexcel_ahash_export_state
*export
= in
;
661 ret
= crypto_ahash_init(areq
);
665 req
->len
= export
->len
;
666 req
->processed
= export
->processed
;
668 memcpy(req
->cache
, export
->cache
, crypto_ahash_blocksize(ahash
));
669 memcpy(req
->state
, export
->state
, req
->state_sz
);
674 static int safexcel_ahash_cra_init(struct crypto_tfm
*tfm
)
676 struct safexcel_ahash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
677 struct safexcel_alg_template
*tmpl
=
678 container_of(__crypto_ahash_alg(tfm
->__crt_alg
),
679 struct safexcel_alg_template
, alg
.ahash
);
681 ctx
->priv
= tmpl
->priv
;
682 ctx
->base
.send
= safexcel_ahash_send
;
683 ctx
->base
.handle_result
= safexcel_handle_result
;
685 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
686 sizeof(struct safexcel_ahash_req
));
690 static int safexcel_sha1_init(struct ahash_request
*areq
)
692 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
693 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
695 memset(req
, 0, sizeof(*req
));
697 req
->state
[0] = SHA1_H0
;
698 req
->state
[1] = SHA1_H1
;
699 req
->state
[2] = SHA1_H2
;
700 req
->state
[3] = SHA1_H3
;
701 req
->state
[4] = SHA1_H4
;
703 ctx
->alg
= CONTEXT_CONTROL_CRYPTO_ALG_SHA1
;
704 ctx
->digest
= CONTEXT_CONTROL_DIGEST_PRECOMPUTED
;
705 req
->state_sz
= SHA1_DIGEST_SIZE
;
710 static int safexcel_sha1_digest(struct ahash_request
*areq
)
712 int ret
= safexcel_sha1_init(areq
);
717 return safexcel_ahash_finup(areq
);
720 static void safexcel_ahash_cra_exit(struct crypto_tfm
*tfm
)
722 struct safexcel_ahash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
723 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
726 /* context not allocated, skip invalidation */
730 ret
= safexcel_ahash_exit_inv(tfm
);
732 dev_warn(priv
->dev
, "hash: invalidation error %d\n", ret
);
735 struct safexcel_alg_template safexcel_alg_sha1
= {
736 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
738 .init
= safexcel_sha1_init
,
739 .update
= safexcel_ahash_update
,
740 .final
= safexcel_ahash_final
,
741 .finup
= safexcel_ahash_finup
,
742 .digest
= safexcel_sha1_digest
,
743 .export
= safexcel_ahash_export
,
744 .import
= safexcel_ahash_import
,
746 .digestsize
= SHA1_DIGEST_SIZE
,
747 .statesize
= sizeof(struct safexcel_ahash_export_state
),
750 .cra_driver_name
= "safexcel-sha1",
752 .cra_flags
= CRYPTO_ALG_ASYNC
|
753 CRYPTO_ALG_KERN_DRIVER_ONLY
,
754 .cra_blocksize
= SHA1_BLOCK_SIZE
,
755 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
756 .cra_init
= safexcel_ahash_cra_init
,
757 .cra_exit
= safexcel_ahash_cra_exit
,
758 .cra_module
= THIS_MODULE
,
764 static int safexcel_hmac_sha1_init(struct ahash_request
*areq
)
766 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
768 safexcel_sha1_init(areq
);
769 ctx
->digest
= CONTEXT_CONTROL_DIGEST_HMAC
;
773 static int safexcel_hmac_sha1_digest(struct ahash_request
*areq
)
775 int ret
= safexcel_hmac_sha1_init(areq
);
780 return safexcel_ahash_finup(areq
);
783 struct safexcel_ahash_result
{
784 struct completion completion
;
788 static void safexcel_ahash_complete(struct crypto_async_request
*req
, int error
)
790 struct safexcel_ahash_result
*result
= req
->data
;
792 if (error
== -EINPROGRESS
)
795 result
->error
= error
;
796 complete(&result
->completion
);
799 static int safexcel_hmac_init_pad(struct ahash_request
*areq
,
800 unsigned int blocksize
, const u8
*key
,
801 unsigned int keylen
, u8
*ipad
, u8
*opad
)
803 struct safexcel_ahash_result result
;
804 struct scatterlist sg
;
808 if (keylen
<= blocksize
) {
809 memcpy(ipad
, key
, keylen
);
811 keydup
= kmemdup(key
, keylen
, GFP_KERNEL
);
815 ahash_request_set_callback(areq
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
816 safexcel_ahash_complete
, &result
);
817 sg_init_one(&sg
, keydup
, keylen
);
818 ahash_request_set_crypt(areq
, &sg
, ipad
, keylen
);
819 init_completion(&result
.completion
);
821 ret
= crypto_ahash_digest(areq
);
822 if (ret
== -EINPROGRESS
) {
823 wait_for_completion_interruptible(&result
.completion
);
828 memzero_explicit(keydup
, keylen
);
834 keylen
= crypto_ahash_digestsize(crypto_ahash_reqtfm(areq
));
837 memset(ipad
+ keylen
, 0, blocksize
- keylen
);
838 memcpy(opad
, ipad
, blocksize
);
840 for (i
= 0; i
< blocksize
; i
++) {
841 ipad
[i
] ^= HMAC_IPAD_VALUE
;
842 opad
[i
] ^= HMAC_OPAD_VALUE
;
848 static int safexcel_hmac_init_iv(struct ahash_request
*areq
,
849 unsigned int blocksize
, u8
*pad
, void *state
)
851 struct safexcel_ahash_result result
;
852 struct safexcel_ahash_req
*req
;
853 struct scatterlist sg
;
856 ahash_request_set_callback(areq
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
857 safexcel_ahash_complete
, &result
);
858 sg_init_one(&sg
, pad
, blocksize
);
859 ahash_request_set_crypt(areq
, &sg
, pad
, blocksize
);
860 init_completion(&result
.completion
);
862 ret
= crypto_ahash_init(areq
);
866 req
= ahash_request_ctx(areq
);
868 req
->last_req
= true;
870 ret
= crypto_ahash_update(areq
);
871 if (ret
&& ret
!= -EINPROGRESS
)
874 wait_for_completion_interruptible(&result
.completion
);
878 return crypto_ahash_export(areq
, state
);
881 static int safexcel_hmac_setkey(const char *alg
, const u8
*key
,
882 unsigned int keylen
, void *istate
, void *ostate
)
884 struct ahash_request
*areq
;
885 struct crypto_ahash
*tfm
;
886 unsigned int blocksize
;
890 tfm
= crypto_alloc_ahash(alg
, CRYPTO_ALG_TYPE_AHASH
,
891 CRYPTO_ALG_TYPE_AHASH_MASK
);
895 areq
= ahash_request_alloc(tfm
, GFP_KERNEL
);
901 crypto_ahash_clear_flags(tfm
, ~0);
902 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
904 ipad
= kzalloc(2 * blocksize
, GFP_KERNEL
);
910 opad
= ipad
+ blocksize
;
912 ret
= safexcel_hmac_init_pad(areq
, blocksize
, key
, keylen
, ipad
, opad
);
916 ret
= safexcel_hmac_init_iv(areq
, blocksize
, ipad
, istate
);
920 ret
= safexcel_hmac_init_iv(areq
, blocksize
, opad
, ostate
);
925 ahash_request_free(areq
);
927 crypto_free_ahash(tfm
);
932 static int safexcel_hmac_sha1_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
935 struct safexcel_ahash_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
936 struct safexcel_ahash_export_state istate
, ostate
;
939 ret
= safexcel_hmac_setkey("safexcel-sha1", key
, keylen
, &istate
, &ostate
);
943 for (i
= 0; i
< SHA1_DIGEST_SIZE
/ sizeof(u32
); i
++) {
944 if (ctx
->ipad
[i
] != le32_to_cpu(istate
.state
[i
]) ||
945 ctx
->opad
[i
] != le32_to_cpu(ostate
.state
[i
])) {
946 ctx
->base
.needs_inv
= true;
951 memcpy(ctx
->ipad
, &istate
.state
, SHA1_DIGEST_SIZE
);
952 memcpy(ctx
->opad
, &ostate
.state
, SHA1_DIGEST_SIZE
);
957 struct safexcel_alg_template safexcel_alg_hmac_sha1
= {
958 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
960 .init
= safexcel_hmac_sha1_init
,
961 .update
= safexcel_ahash_update
,
962 .final
= safexcel_ahash_final
,
963 .finup
= safexcel_ahash_finup
,
964 .digest
= safexcel_hmac_sha1_digest
,
965 .setkey
= safexcel_hmac_sha1_setkey
,
966 .export
= safexcel_ahash_export
,
967 .import
= safexcel_ahash_import
,
969 .digestsize
= SHA1_DIGEST_SIZE
,
970 .statesize
= sizeof(struct safexcel_ahash_export_state
),
972 .cra_name
= "hmac(sha1)",
973 .cra_driver_name
= "safexcel-hmac-sha1",
975 .cra_flags
= CRYPTO_ALG_ASYNC
|
976 CRYPTO_ALG_KERN_DRIVER_ONLY
,
977 .cra_blocksize
= SHA1_BLOCK_SIZE
,
978 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
979 .cra_init
= safexcel_ahash_cra_init
,
980 .cra_exit
= safexcel_ahash_cra_exit
,
981 .cra_module
= THIS_MODULE
,
987 static int safexcel_sha256_init(struct ahash_request
*areq
)
989 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
990 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
992 memset(req
, 0, sizeof(*req
));
994 req
->state
[0] = SHA256_H0
;
995 req
->state
[1] = SHA256_H1
;
996 req
->state
[2] = SHA256_H2
;
997 req
->state
[3] = SHA256_H3
;
998 req
->state
[4] = SHA256_H4
;
999 req
->state
[5] = SHA256_H5
;
1000 req
->state
[6] = SHA256_H6
;
1001 req
->state
[7] = SHA256_H7
;
1003 ctx
->alg
= CONTEXT_CONTROL_CRYPTO_ALG_SHA256
;
1004 ctx
->digest
= CONTEXT_CONTROL_DIGEST_PRECOMPUTED
;
1005 req
->state_sz
= SHA256_DIGEST_SIZE
;
1010 static int safexcel_sha256_digest(struct ahash_request
*areq
)
1012 int ret
= safexcel_sha256_init(areq
);
1017 return safexcel_ahash_finup(areq
);
1020 struct safexcel_alg_template safexcel_alg_sha256
= {
1021 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
1023 .init
= safexcel_sha256_init
,
1024 .update
= safexcel_ahash_update
,
1025 .final
= safexcel_ahash_final
,
1026 .finup
= safexcel_ahash_finup
,
1027 .digest
= safexcel_sha256_digest
,
1028 .export
= safexcel_ahash_export
,
1029 .import
= safexcel_ahash_import
,
1031 .digestsize
= SHA256_DIGEST_SIZE
,
1032 .statesize
= sizeof(struct safexcel_ahash_export_state
),
1034 .cra_name
= "sha256",
1035 .cra_driver_name
= "safexcel-sha256",
1036 .cra_priority
= 300,
1037 .cra_flags
= CRYPTO_ALG_ASYNC
|
1038 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1039 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1040 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
1041 .cra_init
= safexcel_ahash_cra_init
,
1042 .cra_exit
= safexcel_ahash_cra_exit
,
1043 .cra_module
= THIS_MODULE
,
1049 static int safexcel_sha224_init(struct ahash_request
*areq
)
1051 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
1052 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
1054 memset(req
, 0, sizeof(*req
));
1056 req
->state
[0] = SHA224_H0
;
1057 req
->state
[1] = SHA224_H1
;
1058 req
->state
[2] = SHA224_H2
;
1059 req
->state
[3] = SHA224_H3
;
1060 req
->state
[4] = SHA224_H4
;
1061 req
->state
[5] = SHA224_H5
;
1062 req
->state
[6] = SHA224_H6
;
1063 req
->state
[7] = SHA224_H7
;
1065 ctx
->alg
= CONTEXT_CONTROL_CRYPTO_ALG_SHA224
;
1066 ctx
->digest
= CONTEXT_CONTROL_DIGEST_PRECOMPUTED
;
1067 req
->state_sz
= SHA256_DIGEST_SIZE
;
1072 static int safexcel_sha224_digest(struct ahash_request
*areq
)
1074 int ret
= safexcel_sha224_init(areq
);
1079 return safexcel_ahash_finup(areq
);
1082 struct safexcel_alg_template safexcel_alg_sha224
= {
1083 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
1085 .init
= safexcel_sha224_init
,
1086 .update
= safexcel_ahash_update
,
1087 .final
= safexcel_ahash_final
,
1088 .finup
= safexcel_ahash_finup
,
1089 .digest
= safexcel_sha224_digest
,
1090 .export
= safexcel_ahash_export
,
1091 .import
= safexcel_ahash_import
,
1093 .digestsize
= SHA224_DIGEST_SIZE
,
1094 .statesize
= sizeof(struct safexcel_ahash_export_state
),
1096 .cra_name
= "sha224",
1097 .cra_driver_name
= "safexcel-sha224",
1098 .cra_priority
= 300,
1099 .cra_flags
= CRYPTO_ALG_ASYNC
|
1100 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1101 .cra_blocksize
= SHA224_BLOCK_SIZE
,
1102 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
1103 .cra_init
= safexcel_ahash_cra_init
,
1104 .cra_exit
= safexcel_ahash_cra_exit
,
1105 .cra_module
= THIS_MODULE
,