Merge tag 'v3.10.108' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / crypto / caam / caamhash.c
CommitLineData
045e3678
YK
1/*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
3 *
4 * Copyright 2011 Freescale Semiconductor, Inc.
5 *
6 * Based on caamalg.c crypto API driver.
7 *
8 * relationship of digest job descriptor or first job descriptor after init to
9 * shared descriptors:
10 *
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
15 * ---------------
16 *
17 * relationship of subsequent job descriptors to shared descriptors:
18 *
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
25 * --------------- | |
26 * | JobDesc #3 |------| |
27 * | *(packet 3) | |
28 * --------------- |
29 * . |
30 * . |
31 * --------------- |
32 * | JobDesc #4 |------------
33 * | *(packet 4) |
34 * ---------------
35 *
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
41 *
42 * So, a job desc looks like:
43 *
44 * ---------------------
45 * | Header |
46 * | ShareDesc Pointer |
47 * | SEQ_OUT_PTR |
48 * | (output buffer) |
49 * | (output length) |
50 * | SEQ_IN_PTR |
51 * | (input buffer) |
52 * | (input length) |
53 * ---------------------
54 */
55
56#include "compat.h"
57
58#include "regs.h"
59#include "intern.h"
60#include "desc_constr.h"
61#include "jr.h"
62#include "error.h"
63#include "sg_sw_sec4.h"
64#include "key_gen.h"
65
66#define CAAM_CRA_PRIORITY 3000
67
68/* max hash key is max split key size */
69#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
70
71#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
73
74/* length of descriptors text */
75#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
76
77#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
78#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
79#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
80#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
81#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
82#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
83
84#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
85 CAAM_MAX_HASH_KEY_SIZE)
86#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
87
88/* caam context sizes for hashes: running digest + 8 */
89#define HASH_MSG_LEN 8
90#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91
92#ifdef DEBUG
93/* for print_hex_dumps with line references */
94#define xstr(s) str(s)
95#define str(s) #s
96#define debug(format, arg...) printk(format, arg)
97#else
98#define debug(format, arg...)
99#endif
100
101/* ahash per-session context */
102struct caam_hash_ctx {
103 struct device *jrdev;
104 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
105 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
106 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
107 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
108 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
109 dma_addr_t sh_desc_update_dma;
110 dma_addr_t sh_desc_update_first_dma;
111 dma_addr_t sh_desc_fin_dma;
112 dma_addr_t sh_desc_digest_dma;
113 dma_addr_t sh_desc_finup_dma;
114 u32 alg_type;
115 u32 alg_op;
116 u8 key[CAAM_MAX_HASH_KEY_SIZE];
117 dma_addr_t key_dma;
118 int ctx_len;
119 unsigned int split_key_len;
120 unsigned int split_key_pad_len;
121};
122
123/* ahash state */
124struct caam_hash_state {
125 dma_addr_t buf_dma;
126 dma_addr_t ctx_dma;
127 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
128 int buflen_0;
129 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
130 int buflen_1;
131 u8 caam_ctx[MAX_CTX_LEN];
132 int (*update)(struct ahash_request *req);
133 int (*final)(struct ahash_request *req);
134 int (*finup)(struct ahash_request *req);
135 int current_buf;
136};
137
138/* Common job descriptor seq in/out ptr routines */
139
140/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
141static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
142 struct caam_hash_state *state,
143 int ctx_len)
144{
145 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
146 ctx_len, DMA_FROM_DEVICE);
147 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
148}
149
150/* Map req->result, and append seq_out_ptr command that points to it */
151static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
152 u8 *result, int digestsize)
153{
154 dma_addr_t dst_dma;
155
156 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
157 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
158
159 return dst_dma;
160}
161
162/* Map current buffer in state and put it in link table */
163static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
164 struct sec4_sg_entry *sec4_sg,
165 u8 *buf, int buflen)
166{
167 dma_addr_t buf_dma;
168
169 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
170 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
171
172 return buf_dma;
173}
174
175/* Map req->src and put it in link table */
176static inline void src_map_to_sec4_sg(struct device *jrdev,
177 struct scatterlist *src, int src_nents,
643b39b0
YK
178 struct sec4_sg_entry *sec4_sg,
179 bool chained)
045e3678 180{
643b39b0 181 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
045e3678
YK
182 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
183}
184
185/*
186 * Only put buffer in link table if it contains data, which is possible,
187 * since a buffer has previously been used, and needs to be unmapped,
188 */
189static inline dma_addr_t
190try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
191 u8 *buf, dma_addr_t buf_dma, int buflen,
192 int last_buflen)
193{
194 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
195 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
196 if (buflen)
197 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
198 else
199 buf_dma = 0;
200
201 return buf_dma;
202}
203
204/* Map state->caam_ctx, and add it to link table */
205static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
206 struct caam_hash_state *state,
207 int ctx_len,
208 struct sec4_sg_entry *sec4_sg,
209 u32 flag)
210{
211 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
212 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
213}
214
215/* Common shared descriptor commands */
216static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
217{
218 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
219 ctx->split_key_len, CLASS_2 |
220 KEY_DEST_MDHA_SPLIT | KEY_ENC);
221}
222
223/* Append key if it has been set */
224static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
225{
226 u32 *key_jump_cmd;
227
61bb86bb 228 init_sh_desc(desc, HDR_SHARE_SERIAL);
045e3678
YK
229
230 if (ctx->split_key_len) {
231 /* Skip if already shared */
232 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
233 JUMP_COND_SHRD);
234
235 append_key_ahash(desc, ctx);
236
237 set_jump_tgt_here(desc, key_jump_cmd);
238 }
239
240 /* Propagate errors from shared to job descriptor */
241 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
242}
243
244/*
245 * For ahash read data from seqin following state->caam_ctx,
246 * and write resulting class2 context to seqout, which may be state->caam_ctx
247 * or req->result
248 */
249static inline void ahash_append_load_str(u32 *desc, int digestsize)
250{
251 /* Calculate remaining bytes to read */
252 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
253
254 /* Read remaining bytes */
255 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
256 FIFOLD_TYPE_MSG | KEY_VLF);
257
258 /* Store class2 context bytes */
259 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
260 LDST_SRCDST_BYTE_CONTEXT);
261}
262
263/*
264 * For ahash update, final and finup, import context, read and write to seqout
265 */
266static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
267 int digestsize,
268 struct caam_hash_ctx *ctx)
269{
270 init_sh_desc_key_ahash(desc, ctx);
271
272 /* Import context from software */
273 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
274 LDST_CLASS_2_CCB | ctx->ctx_len);
275
276 /* Class 2 operation */
277 append_operation(desc, op | state | OP_ALG_ENCRYPT);
278
279 /*
280 * Load from buf and/or src and write to req->result or state->context
281 */
282 ahash_append_load_str(desc, digestsize);
283}
284
285/* For ahash firsts and digest, read and write to seqout */
286static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
287 int digestsize, struct caam_hash_ctx *ctx)
288{
289 init_sh_desc_key_ahash(desc, ctx);
290
291 /* Class 2 operation */
292 append_operation(desc, op | state | OP_ALG_ENCRYPT);
293
294 /*
295 * Load from buf and/or src and write to req->result or state->context
296 */
297 ahash_append_load_str(desc, digestsize);
298}
299
300static int ahash_set_sh_desc(struct crypto_ahash *ahash)
301{
302 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
303 int digestsize = crypto_ahash_digestsize(ahash);
304 struct device *jrdev = ctx->jrdev;
305 u32 have_key = 0;
306 u32 *desc;
307
308 if (ctx->split_key_len)
309 have_key = OP_ALG_AAI_HMAC_PRECOMP;
310
311 /* ahash_update shared descriptor */
312 desc = ctx->sh_desc_update;
313
61bb86bb 314 init_sh_desc(desc, HDR_SHARE_SERIAL);
045e3678
YK
315
316 /* Import context from software */
317 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
318 LDST_CLASS_2_CCB | ctx->ctx_len);
319
320 /* Class 2 operation */
321 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
322 OP_ALG_ENCRYPT);
323
324 /* Load data and write to result or context */
325 ahash_append_load_str(desc, ctx->ctx_len);
326
327 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
328 DMA_TO_DEVICE);
329 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
330 dev_err(jrdev, "unable to map shared descriptor\n");
331 return -ENOMEM;
332 }
333#ifdef DEBUG
334 print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ",
335 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
336#endif
337
338 /* ahash_update_first shared descriptor */
339 desc = ctx->sh_desc_update_first;
340
341 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
342 ctx->ctx_len, ctx);
343
344 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
345 desc_bytes(desc),
346 DMA_TO_DEVICE);
347 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
348 dev_err(jrdev, "unable to map shared descriptor\n");
349 return -ENOMEM;
350 }
351#ifdef DEBUG
352 print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ",
353 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
354#endif
355
356 /* ahash_final shared descriptor */
357 desc = ctx->sh_desc_fin;
358
359 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
360 OP_ALG_AS_FINALIZE, digestsize, ctx);
361
362 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
363 DMA_TO_DEVICE);
364 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
365 dev_err(jrdev, "unable to map shared descriptor\n");
366 return -ENOMEM;
367 }
368#ifdef DEBUG
369 print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ",
370 DUMP_PREFIX_ADDRESS, 16, 4, desc,
371 desc_bytes(desc), 1);
372#endif
373
374 /* ahash_finup shared descriptor */
375 desc = ctx->sh_desc_finup;
376
377 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
378 OP_ALG_AS_FINALIZE, digestsize, ctx);
379
380 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
381 DMA_TO_DEVICE);
382 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
383 dev_err(jrdev, "unable to map shared descriptor\n");
384 return -ENOMEM;
385 }
386#ifdef DEBUG
387 print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ",
388 DUMP_PREFIX_ADDRESS, 16, 4, desc,
389 desc_bytes(desc), 1);
390#endif
391
392 /* ahash_digest shared descriptor */
393 desc = ctx->sh_desc_digest;
394
395 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
396 digestsize, ctx);
397
398 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
399 desc_bytes(desc),
400 DMA_TO_DEVICE);
401 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
402 dev_err(jrdev, "unable to map shared descriptor\n");
403 return -ENOMEM;
404 }
405#ifdef DEBUG
406 print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ",
407 DUMP_PREFIX_ADDRESS, 16, 4, desc,
408 desc_bytes(desc), 1);
409#endif
410
411 return 0;
412}
413
66b3e887 414static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
045e3678
YK
415 u32 keylen)
416{
417 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
418 ctx->split_key_pad_len, key_in, keylen,
419 ctx->alg_op);
420}
421
422/* Digest hash size if it is too large */
66b3e887 423static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
045e3678
YK
424 u32 *keylen, u8 *key_out, u32 digestsize)
425{
426 struct device *jrdev = ctx->jrdev;
427 u32 *desc;
428 struct split_key_result result;
429 dma_addr_t src_dma, dst_dma;
430 int ret = 0;
431
85acabeb 432 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
2af8f4a2
KP
433 if (!desc) {
434 dev_err(jrdev, "unable to allocate key input memory\n");
435 return -ENOMEM;
436 }
045e3678
YK
437
438 init_job_desc(desc, 0);
439
440 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
441 DMA_TO_DEVICE);
442 if (dma_mapping_error(jrdev, src_dma)) {
443 dev_err(jrdev, "unable to map key input memory\n");
444 kfree(desc);
445 return -ENOMEM;
446 }
447 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
448 DMA_FROM_DEVICE);
449 if (dma_mapping_error(jrdev, dst_dma)) {
450 dev_err(jrdev, "unable to map key output memory\n");
451 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
452 kfree(desc);
453 return -ENOMEM;
454 }
455
456 /* Job descriptor to perform unkeyed hash on key_in */
457 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
458 OP_ALG_AS_INITFINAL);
459 append_seq_in_ptr(desc, src_dma, *keylen, 0);
460 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
461 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
462 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
463 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
464 LDST_SRCDST_BYTE_CONTEXT);
465
466#ifdef DEBUG
467 print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ",
468 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
469 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
470 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
471#endif
472
473 result.err = 0;
474 init_completion(&result.completion);
475
476 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
477 if (!ret) {
478 /* in progress */
7fb443fa 479 wait_for_completion(&result.completion);
045e3678
YK
480 ret = result.err;
481#ifdef DEBUG
482 print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ",
483 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
484 digestsize, 1);
485#endif
486 }
487 *keylen = digestsize;
488
489 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
490 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
491
492 kfree(desc);
493
494 return ret;
495}
496
497static int ahash_setkey(struct crypto_ahash *ahash,
498 const u8 *key, unsigned int keylen)
499{
500 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
501 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
502 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
503 struct device *jrdev = ctx->jrdev;
504 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
505 int digestsize = crypto_ahash_digestsize(ahash);
506 int ret = 0;
507 u8 *hashed_key = NULL;
508
509#ifdef DEBUG
510 printk(KERN_ERR "keylen %d\n", keylen);
511#endif
512
513 if (keylen > blocksize) {
514 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
515 GFP_DMA);
516 if (!hashed_key)
517 return -ENOMEM;
518 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
519 digestsize);
520 if (ret)
521 goto badkey;
522 key = hashed_key;
523 }
524
525 /* Pick class 2 key length from algorithm submask */
526 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
527 OP_ALG_ALGSEL_SHIFT] * 2;
528 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
529
530#ifdef DEBUG
531 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
532 ctx->split_key_len, ctx->split_key_pad_len);
533 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
534 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
535#endif
536
537 ret = gen_split_hash_key(ctx, key, keylen);
538 if (ret)
539 goto badkey;
540
541 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
542 DMA_TO_DEVICE);
543 if (dma_mapping_error(jrdev, ctx->key_dma)) {
544 dev_err(jrdev, "unable to map key i/o memory\n");
545 return -ENOMEM;
546 }
547#ifdef DEBUG
548 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
549 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
550 ctx->split_key_pad_len, 1);
551#endif
552
553 ret = ahash_set_sh_desc(ahash);
554 if (ret) {
555 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
556 DMA_TO_DEVICE);
557 }
558
559 kfree(hashed_key);
560 return ret;
561badkey:
562 kfree(hashed_key);
563 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
564 return -EINVAL;
565}
566
567/*
568 * ahash_edesc - s/w-extended ahash descriptor
569 * @dst_dma: physical mapped address of req->result
570 * @sec4_sg_dma: physical mapped address of h/w link table
643b39b0 571 * @chained: if source is chained
045e3678
YK
572 * @src_nents: number of segments in input scatterlist
573 * @sec4_sg_bytes: length of dma mapped sec4_sg space
574 * @sec4_sg: pointer to h/w link table
575 * @hw_desc: the h/w job descriptor followed by any referenced link tables
576 */
577struct ahash_edesc {
578 dma_addr_t dst_dma;
579 dma_addr_t sec4_sg_dma;
643b39b0 580 bool chained;
045e3678
YK
581 int src_nents;
582 int sec4_sg_bytes;
583 struct sec4_sg_entry *sec4_sg;
584 u32 hw_desc[0];
585};
586
587static inline void ahash_unmap(struct device *dev,
588 struct ahash_edesc *edesc,
589 struct ahash_request *req, int dst_len)
590{
591 if (edesc->src_nents)
643b39b0
YK
592 dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
593 DMA_TO_DEVICE, edesc->chained);
045e3678
YK
594 if (edesc->dst_dma)
595 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
596
597 if (edesc->sec4_sg_bytes)
598 dma_unmap_single(dev, edesc->sec4_sg_dma,
599 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
600}
601
602static inline void ahash_unmap_ctx(struct device *dev,
603 struct ahash_edesc *edesc,
604 struct ahash_request *req, int dst_len, u32 flag)
605{
606 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
607 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
608 struct caam_hash_state *state = ahash_request_ctx(req);
609
610 if (state->ctx_dma)
611 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
612 ahash_unmap(dev, edesc, req, dst_len);
613}
614
615static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
616 void *context)
617{
618 struct ahash_request *req = context;
619 struct ahash_edesc *edesc;
620 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
621 int digestsize = crypto_ahash_digestsize(ahash);
622#ifdef DEBUG
623 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
624 struct caam_hash_state *state = ahash_request_ctx(req);
625
626 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
627#endif
628
629 edesc = (struct ahash_edesc *)((char *)desc -
630 offsetof(struct ahash_edesc, hw_desc));
631 if (err) {
632 char tmp[CAAM_ERROR_STR_MAX];
633
634 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
635 }
636
637 ahash_unmap(jrdev, edesc, req, digestsize);
638 kfree(edesc);
639
640#ifdef DEBUG
641 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
642 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
643 ctx->ctx_len, 1);
644 if (req->result)
645 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
646 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
647 digestsize, 1);
648#endif
649
650 req->base.complete(&req->base, err);
651}
652
653static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
654 void *context)
655{
656 struct ahash_request *req = context;
657 struct ahash_edesc *edesc;
658 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
659 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
660#ifdef DEBUG
661 struct caam_hash_state *state = ahash_request_ctx(req);
662 int digestsize = crypto_ahash_digestsize(ahash);
663
664 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
665#endif
666
667 edesc = (struct ahash_edesc *)((char *)desc -
668 offsetof(struct ahash_edesc, hw_desc));
669 if (err) {
670 char tmp[CAAM_ERROR_STR_MAX];
671
672 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
673 }
674
675 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
676 kfree(edesc);
677
678#ifdef DEBUG
679 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
680 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
681 ctx->ctx_len, 1);
682 if (req->result)
683 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
684 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
685 digestsize, 1);
686#endif
687
688 req->base.complete(&req->base, err);
689}
690
691static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
692 void *context)
693{
694 struct ahash_request *req = context;
695 struct ahash_edesc *edesc;
696 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
697 int digestsize = crypto_ahash_digestsize(ahash);
698#ifdef DEBUG
699 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
700 struct caam_hash_state *state = ahash_request_ctx(req);
701
702 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
703#endif
704
705 edesc = (struct ahash_edesc *)((char *)desc -
706 offsetof(struct ahash_edesc, hw_desc));
707 if (err) {
708 char tmp[CAAM_ERROR_STR_MAX];
709
710 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
711 }
712
713 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
714 kfree(edesc);
715
716#ifdef DEBUG
717 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
718 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
719 ctx->ctx_len, 1);
720 if (req->result)
721 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
722 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
723 digestsize, 1);
724#endif
725
726 req->base.complete(&req->base, err);
727}
728
729static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
730 void *context)
731{
732 struct ahash_request *req = context;
733 struct ahash_edesc *edesc;
734 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
735 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
736#ifdef DEBUG
737 struct caam_hash_state *state = ahash_request_ctx(req);
738 int digestsize = crypto_ahash_digestsize(ahash);
739
740 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
741#endif
742
743 edesc = (struct ahash_edesc *)((char *)desc -
744 offsetof(struct ahash_edesc, hw_desc));
745 if (err) {
746 char tmp[CAAM_ERROR_STR_MAX];
747
748 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
749 }
750
751 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
752 kfree(edesc);
753
754#ifdef DEBUG
755 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
756 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
757 ctx->ctx_len, 1);
758 if (req->result)
759 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
760 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
761 digestsize, 1);
762#endif
763
764 req->base.complete(&req->base, err);
765}
766
767/* submit update job descriptor */
768static int ahash_update_ctx(struct ahash_request *req)
769{
770 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
771 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
772 struct caam_hash_state *state = ahash_request_ctx(req);
773 struct device *jrdev = ctx->jrdev;
774 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
775 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
776 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
777 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
778 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
779 int *next_buflen = state->current_buf ? &state->buflen_0 :
780 &state->buflen_1, last_buflen;
781 int in_len = *buflen + req->nbytes, to_hash;
782 u32 *sh_desc = ctx->sh_desc_update, *desc;
783 dma_addr_t ptr = ctx->sh_desc_update_dma;
784 int src_nents, sec4_sg_bytes, sec4_sg_src_index;
785 struct ahash_edesc *edesc;
643b39b0 786 bool chained = false;
045e3678
YK
787 int ret = 0;
788 int sh_len;
789
790 last_buflen = *next_buflen;
791 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
792 to_hash = in_len - *next_buflen;
793
794 if (to_hash) {
643b39b0
YK
795 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
796 &chained);
045e3678
YK
797 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
798 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
799 sizeof(struct sec4_sg_entry);
800
801 /*
802 * allocate space for base edesc and hw desc commands,
803 * link tables
804 */
805 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
806 sec4_sg_bytes, GFP_DMA | flags);
807 if (!edesc) {
808 dev_err(jrdev,
809 "could not allocate extended descriptor\n");
810 return -ENOMEM;
811 }
812
813 edesc->src_nents = src_nents;
643b39b0 814 edesc->chained = chained;
045e3678
YK
815 edesc->sec4_sg_bytes = sec4_sg_bytes;
816 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
817 DESC_JOB_IO_LEN;
818 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
819 sec4_sg_bytes,
820 DMA_TO_DEVICE);
821
822 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
823 edesc->sec4_sg, DMA_BIDIRECTIONAL);
824
825 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
826 edesc->sec4_sg + 1,
827 buf, state->buf_dma,
828 *buflen, last_buflen);
829
830 if (src_nents) {
831 src_map_to_sec4_sg(jrdev, req->src, src_nents,
643b39b0
YK
832 edesc->sec4_sg + sec4_sg_src_index,
833 chained);
045e3678
YK
834 if (*next_buflen) {
835 sg_copy_part(next_buf, req->src, to_hash -
836 *buflen, req->nbytes);
837 state->current_buf = !state->current_buf;
838 }
839 } else {
840 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
841 SEC4_SG_LEN_FIN;
842 }
843
844 sh_len = desc_len(sh_desc);
845 desc = edesc->hw_desc;
846 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
847 HDR_REVERSE);
848
849 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
850 to_hash, LDST_SGF);
851
852 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
853
854#ifdef DEBUG
855 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
856 DUMP_PREFIX_ADDRESS, 16, 4, desc,
857 desc_bytes(desc), 1);
858#endif
859
860 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
861 if (!ret) {
862 ret = -EINPROGRESS;
863 } else {
864 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
865 DMA_BIDIRECTIONAL);
866 kfree(edesc);
867 }
868 } else if (*next_buflen) {
869 sg_copy(buf + *buflen, req->src, req->nbytes);
870 *buflen = *next_buflen;
871 *next_buflen = last_buflen;
872 }
873#ifdef DEBUG
874 print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
875 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
876 print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
877 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
878 *next_buflen, 1);
879#endif
880
881 return ret;
882}
883
884static int ahash_final_ctx(struct ahash_request *req)
885{
886 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
887 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
888 struct caam_hash_state *state = ahash_request_ctx(req);
889 struct device *jrdev = ctx->jrdev;
890 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
891 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
892 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
893 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
894 int last_buflen = state->current_buf ? state->buflen_0 :
895 state->buflen_1;
896 u32 *sh_desc = ctx->sh_desc_fin, *desc;
897 dma_addr_t ptr = ctx->sh_desc_fin_dma;
d51689e2 898 int sec4_sg_bytes, sec4_sg_src_index;
045e3678
YK
899 int digestsize = crypto_ahash_digestsize(ahash);
900 struct ahash_edesc *edesc;
901 int ret = 0;
902 int sh_len;
903
d51689e2
HG
904 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
905 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
045e3678
YK
906
907 /* allocate space for base edesc and hw desc commands, link tables */
908 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
909 sec4_sg_bytes, GFP_DMA | flags);
910 if (!edesc) {
911 dev_err(jrdev, "could not allocate extended descriptor\n");
912 return -ENOMEM;
913 }
914
915 sh_len = desc_len(sh_desc);
916 desc = edesc->hw_desc;
917 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
918
919 edesc->sec4_sg_bytes = sec4_sg_bytes;
920 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
921 DESC_JOB_IO_LEN;
922 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
923 sec4_sg_bytes, DMA_TO_DEVICE);
924 edesc->src_nents = 0;
925
926 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
927 DMA_TO_DEVICE);
928
929 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
930 buf, state->buf_dma, buflen,
931 last_buflen);
d51689e2 932 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
045e3678
YK
933
934 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
935 LDST_SGF);
936
937 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
938 digestsize);
939
940#ifdef DEBUG
941 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
942 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
943#endif
944
945 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
946 if (!ret) {
947 ret = -EINPROGRESS;
948 } else {
949 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
950 kfree(edesc);
951 }
952
953 return ret;
954}
955
956static int ahash_finup_ctx(struct ahash_request *req)
957{
958 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
959 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
960 struct caam_hash_state *state = ahash_request_ctx(req);
961 struct device *jrdev = ctx->jrdev;
962 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
963 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
964 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
965 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
966 int last_buflen = state->current_buf ? state->buflen_0 :
967 state->buflen_1;
968 u32 *sh_desc = ctx->sh_desc_finup, *desc;
969 dma_addr_t ptr = ctx->sh_desc_finup_dma;
970 int sec4_sg_bytes, sec4_sg_src_index;
971 int src_nents;
972 int digestsize = crypto_ahash_digestsize(ahash);
973 struct ahash_edesc *edesc;
643b39b0 974 bool chained = false;
045e3678
YK
975 int ret = 0;
976 int sh_len;
977
643b39b0 978 src_nents = __sg_count(req->src, req->nbytes, &chained);
045e3678
YK
979 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
980 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
981 sizeof(struct sec4_sg_entry);
982
983 /* allocate space for base edesc and hw desc commands, link tables */
984 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
985 sec4_sg_bytes, GFP_DMA | flags);
986 if (!edesc) {
987 dev_err(jrdev, "could not allocate extended descriptor\n");
988 return -ENOMEM;
989 }
990
991 sh_len = desc_len(sh_desc);
992 desc = edesc->hw_desc;
993 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
994
995 edesc->src_nents = src_nents;
643b39b0 996 edesc->chained = chained;
045e3678
YK
997 edesc->sec4_sg_bytes = sec4_sg_bytes;
998 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
999 DESC_JOB_IO_LEN;
1000 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1001 sec4_sg_bytes, DMA_TO_DEVICE);
1002
1003 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
1004 DMA_TO_DEVICE);
1005
1006 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1007 buf, state->buf_dma, buflen,
1008 last_buflen);
1009
1010 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
643b39b0 1011 sec4_sg_src_index, chained);
045e3678
YK
1012
1013 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1014 buflen + req->nbytes, LDST_SGF);
1015
1016 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1017 digestsize);
1018
1019#ifdef DEBUG
1020 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1021 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1022#endif
1023
1024 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1025 if (!ret) {
1026 ret = -EINPROGRESS;
1027 } else {
1028 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1029 kfree(edesc);
1030 }
1031
1032 return ret;
1033}
1034
1035static int ahash_digest(struct ahash_request *req)
1036{
1037 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1038 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1039 struct device *jrdev = ctx->jrdev;
1040 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1041 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1042 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1043 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1044 int digestsize = crypto_ahash_digestsize(ahash);
1045 int src_nents, sec4_sg_bytes;
1046 dma_addr_t src_dma;
1047 struct ahash_edesc *edesc;
643b39b0 1048 bool chained = false;
045e3678
YK
1049 int ret = 0;
1050 u32 options;
1051 int sh_len;
1052
643b39b0
YK
1053 src_nents = sg_count(req->src, req->nbytes, &chained);
1054 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1055 chained);
045e3678
YK
1056 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1057
1058 /* allocate space for base edesc and hw desc commands, link tables */
1059 edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1060 DESC_JOB_IO_LEN, GFP_DMA | flags);
1061 if (!edesc) {
1062 dev_err(jrdev, "could not allocate extended descriptor\n");
1063 return -ENOMEM;
1064 }
1065 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1066 DESC_JOB_IO_LEN;
1067 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1068 sec4_sg_bytes, DMA_TO_DEVICE);
1069 edesc->src_nents = src_nents;
643b39b0 1070 edesc->chained = chained;
045e3678
YK
1071
1072 sh_len = desc_len(sh_desc);
1073 desc = edesc->hw_desc;
1074 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1075
1076 if (src_nents) {
1077 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1078 src_dma = edesc->sec4_sg_dma;
1079 options = LDST_SGF;
1080 } else {
1081 src_dma = sg_dma_address(req->src);
1082 options = 0;
1083 }
1084 append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1085
1086 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1087 digestsize);
1088
1089#ifdef DEBUG
1090 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1091 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1092#endif
1093
1094 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1095 if (!ret) {
1096 ret = -EINPROGRESS;
1097 } else {
1098 ahash_unmap(jrdev, edesc, req, digestsize);
1099 kfree(edesc);
1100 }
1101
1102 return ret;
1103}
1104
1105/* submit ahash final if it the first job descriptor */
1106static int ahash_final_no_ctx(struct ahash_request *req)
1107{
1108 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1109 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1110 struct caam_hash_state *state = ahash_request_ctx(req);
1111 struct device *jrdev = ctx->jrdev;
1112 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1113 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1114 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1115 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1116 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1117 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1118 int digestsize = crypto_ahash_digestsize(ahash);
1119 struct ahash_edesc *edesc;
1120 int ret = 0;
1121 int sh_len;
1122
1123 /* allocate space for base edesc and hw desc commands, link tables */
1124 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1125 GFP_DMA | flags);
1126 if (!edesc) {
1127 dev_err(jrdev, "could not allocate extended descriptor\n");
1128 return -ENOMEM;
1129 }
1130
1131 sh_len = desc_len(sh_desc);
1132 desc = edesc->hw_desc;
1133 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1134
1135 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1136
1137 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1138
1139 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1140 digestsize);
1141 edesc->src_nents = 0;
1142
1143#ifdef DEBUG
1144 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1145 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1146#endif
1147
1148 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1149 if (!ret) {
1150 ret = -EINPROGRESS;
1151 } else {
1152 ahash_unmap(jrdev, edesc, req, digestsize);
1153 kfree(edesc);
1154 }
1155
1156 return ret;
1157}
1158
1159/* submit ahash update if it the first job descriptor after update */
1160static int ahash_update_no_ctx(struct ahash_request *req)
1161{
1162 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1163 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1164 struct caam_hash_state *state = ahash_request_ctx(req);
1165 struct device *jrdev = ctx->jrdev;
1166 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1167 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1168 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1169 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1170 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1171 int *next_buflen = state->current_buf ? &state->buflen_0 :
1172 &state->buflen_1;
1173 int in_len = *buflen + req->nbytes, to_hash;
1174 int sec4_sg_bytes, src_nents;
1175 struct ahash_edesc *edesc;
1176 u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1177 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
643b39b0 1178 bool chained = false;
045e3678
YK
1179 int ret = 0;
1180 int sh_len;
1181
1182 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1183 to_hash = in_len - *next_buflen;
1184
1185 if (to_hash) {
643b39b0
YK
1186 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1187 &chained);
045e3678
YK
1188 sec4_sg_bytes = (1 + src_nents) *
1189 sizeof(struct sec4_sg_entry);
1190
1191 /*
1192 * allocate space for base edesc and hw desc commands,
1193 * link tables
1194 */
1195 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1196 sec4_sg_bytes, GFP_DMA | flags);
1197 if (!edesc) {
1198 dev_err(jrdev,
1199 "could not allocate extended descriptor\n");
1200 return -ENOMEM;
1201 }
1202
1203 edesc->src_nents = src_nents;
643b39b0 1204 edesc->chained = chained;
045e3678
YK
1205 edesc->sec4_sg_bytes = sec4_sg_bytes;
1206 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1207 DESC_JOB_IO_LEN;
1208 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1209 sec4_sg_bytes,
1210 DMA_TO_DEVICE);
1211
1212 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1213 buf, *buflen);
1214 src_map_to_sec4_sg(jrdev, req->src, src_nents,
643b39b0 1215 edesc->sec4_sg + 1, chained);
045e3678
YK
1216 if (*next_buflen) {
1217 sg_copy_part(next_buf, req->src, to_hash - *buflen,
1218 req->nbytes);
1219 state->current_buf = !state->current_buf;
1220 }
1221
1222 sh_len = desc_len(sh_desc);
1223 desc = edesc->hw_desc;
1224 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1225 HDR_REVERSE);
1226
1227 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1228
1229 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1230
1231#ifdef DEBUG
1232 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1233 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1234 desc_bytes(desc), 1);
1235#endif
1236
1237 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1238 if (!ret) {
1239 ret = -EINPROGRESS;
1240 state->update = ahash_update_ctx;
1241 state->finup = ahash_finup_ctx;
1242 state->final = ahash_final_ctx;
1243 } else {
1244 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1245 DMA_TO_DEVICE);
1246 kfree(edesc);
1247 }
1248 } else if (*next_buflen) {
1249 sg_copy(buf + *buflen, req->src, req->nbytes);
1250 *buflen = *next_buflen;
1251 *next_buflen = 0;
1252 }
1253#ifdef DEBUG
1254 print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
1255 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1256 print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
1257 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1258 *next_buflen, 1);
1259#endif
1260
1261 return ret;
1262}
1263
1264/* submit ahash finup if it the first job descriptor after update */
1265static int ahash_finup_no_ctx(struct ahash_request *req)
1266{
1267 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1268 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1269 struct caam_hash_state *state = ahash_request_ctx(req);
1270 struct device *jrdev = ctx->jrdev;
1271 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1272 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1273 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1274 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1275 int last_buflen = state->current_buf ? state->buflen_0 :
1276 state->buflen_1;
1277 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1278 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1279 int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1280 int digestsize = crypto_ahash_digestsize(ahash);
1281 struct ahash_edesc *edesc;
643b39b0 1282 bool chained = false;
045e3678
YK
1283 int sh_len;
1284 int ret = 0;
1285
643b39b0 1286 src_nents = __sg_count(req->src, req->nbytes, &chained);
045e3678
YK
1287 sec4_sg_src_index = 2;
1288 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1289 sizeof(struct sec4_sg_entry);
1290
1291 /* allocate space for base edesc and hw desc commands, link tables */
1292 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1293 sec4_sg_bytes, GFP_DMA | flags);
1294 if (!edesc) {
1295 dev_err(jrdev, "could not allocate extended descriptor\n");
1296 return -ENOMEM;
1297 }
1298
1299 sh_len = desc_len(sh_desc);
1300 desc = edesc->hw_desc;
1301 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1302
1303 edesc->src_nents = src_nents;
643b39b0 1304 edesc->chained = chained;
045e3678
YK
1305 edesc->sec4_sg_bytes = sec4_sg_bytes;
1306 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1307 DESC_JOB_IO_LEN;
1308 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1309 sec4_sg_bytes, DMA_TO_DEVICE);
1310
1311 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1312 state->buf_dma, buflen,
1313 last_buflen);
1314
643b39b0
YK
1315 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1316 chained);
045e3678
YK
1317
1318 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1319 req->nbytes, LDST_SGF);
1320
1321 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1322 digestsize);
1323
1324#ifdef DEBUG
1325 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1326 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1327#endif
1328
1329 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1330 if (!ret) {
1331 ret = -EINPROGRESS;
1332 } else {
1333 ahash_unmap(jrdev, edesc, req, digestsize);
1334 kfree(edesc);
1335 }
1336
1337 return ret;
1338}
1339
1340/* submit first update job descriptor after init */
1341static int ahash_update_first(struct ahash_request *req)
1342{
1343 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1344 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1345 struct caam_hash_state *state = ahash_request_ctx(req);
1346 struct device *jrdev = ctx->jrdev;
1347 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1348 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1349 u8 *next_buf = state->buf_0 + state->current_buf *
1350 CAAM_MAX_HASH_BLOCK_SIZE;
1351 int *next_buflen = &state->buflen_0 + state->current_buf;
1352 int to_hash;
1353 u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1354 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1355 int sec4_sg_bytes, src_nents;
1356 dma_addr_t src_dma;
1357 u32 options;
1358 struct ahash_edesc *edesc;
643b39b0 1359 bool chained = false;
045e3678
YK
1360 int ret = 0;
1361 int sh_len;
1362
1363 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1364 1);
1365 to_hash = req->nbytes - *next_buflen;
1366
1367 if (to_hash) {
643b39b0
YK
1368 src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1369 &chained);
1370 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1371 DMA_TO_DEVICE, chained);
045e3678
YK
1372 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1373
1374 /*
1375 * allocate space for base edesc and hw desc commands,
1376 * link tables
1377 */
1378 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1379 sec4_sg_bytes, GFP_DMA | flags);
1380 if (!edesc) {
1381 dev_err(jrdev,
1382 "could not allocate extended descriptor\n");
1383 return -ENOMEM;
1384 }
1385
1386 edesc->src_nents = src_nents;
643b39b0 1387 edesc->chained = chained;
045e3678
YK
1388 edesc->sec4_sg_bytes = sec4_sg_bytes;
1389 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1390 DESC_JOB_IO_LEN;
1391 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1392 sec4_sg_bytes,
1393 DMA_TO_DEVICE);
1394
1395 if (src_nents) {
1396 sg_to_sec4_sg_last(req->src, src_nents,
1397 edesc->sec4_sg, 0);
1398 src_dma = edesc->sec4_sg_dma;
1399 options = LDST_SGF;
1400 } else {
1401 src_dma = sg_dma_address(req->src);
1402 options = 0;
1403 }
1404
1405 if (*next_buflen)
1406 sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
1407
1408 sh_len = desc_len(sh_desc);
1409 desc = edesc->hw_desc;
1410 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1411 HDR_REVERSE);
1412
1413 append_seq_in_ptr(desc, src_dma, to_hash, options);
1414
1415 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1416
1417#ifdef DEBUG
1418 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1419 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1420 desc_bytes(desc), 1);
1421#endif
1422
1423 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1424 req);
1425 if (!ret) {
1426 ret = -EINPROGRESS;
1427 state->update = ahash_update_ctx;
1428 state->finup = ahash_finup_ctx;
1429 state->final = ahash_final_ctx;
1430 } else {
1431 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1432 DMA_TO_DEVICE);
1433 kfree(edesc);
1434 }
1435 } else if (*next_buflen) {
1436 state->update = ahash_update_no_ctx;
1437 state->finup = ahash_finup_no_ctx;
1438 state->final = ahash_final_no_ctx;
1439 sg_copy(next_buf, req->src, req->nbytes);
1440 }
1441#ifdef DEBUG
1442 print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
1443 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1444 *next_buflen, 1);
1445#endif
1446
1447 return ret;
1448}
1449
1450static int ahash_finup_first(struct ahash_request *req)
1451{
1452 return ahash_digest(req);
1453}
1454
1455static int ahash_init(struct ahash_request *req)
1456{
1457 struct caam_hash_state *state = ahash_request_ctx(req);
1458
1459 state->update = ahash_update_first;
1460 state->finup = ahash_finup_first;
1461 state->final = ahash_final_no_ctx;
1462
1463 state->current_buf = 0;
1464
1465 return 0;
1466}
1467
1468static int ahash_update(struct ahash_request *req)
1469{
1470 struct caam_hash_state *state = ahash_request_ctx(req);
1471
1472 return state->update(req);
1473}
1474
1475static int ahash_finup(struct ahash_request *req)
1476{
1477 struct caam_hash_state *state = ahash_request_ctx(req);
1478
1479 return state->finup(req);
1480}
1481
1482static int ahash_final(struct ahash_request *req)
1483{
1484 struct caam_hash_state *state = ahash_request_ctx(req);
1485
1486 return state->final(req);
1487}
1488
1489static int ahash_export(struct ahash_request *req, void *out)
1490{
1491 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1492 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1493 struct caam_hash_state *state = ahash_request_ctx(req);
1494
1495 memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1496 memcpy(out + sizeof(struct caam_hash_ctx), state,
1497 sizeof(struct caam_hash_state));
1498 return 0;
1499}
1500
1501static int ahash_import(struct ahash_request *req, const void *in)
1502{
1503 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1504 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1505 struct caam_hash_state *state = ahash_request_ctx(req);
1506
1507 memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1508 memcpy(state, in + sizeof(struct caam_hash_ctx),
1509 sizeof(struct caam_hash_state));
1510 return 0;
1511}
1512
1513struct caam_hash_template {
1514 char name[CRYPTO_MAX_ALG_NAME];
1515 char driver_name[CRYPTO_MAX_ALG_NAME];
b0e09bae
YK
1516 char hmac_name[CRYPTO_MAX_ALG_NAME];
1517 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
045e3678
YK
1518 unsigned int blocksize;
1519 struct ahash_alg template_ahash;
1520 u32 alg_type;
1521 u32 alg_op;
1522};
1523
1524/* ahash descriptors */
1525static struct caam_hash_template driver_hash[] = {
1526 {
b0e09bae
YK
1527 .name = "sha1",
1528 .driver_name = "sha1-caam",
1529 .hmac_name = "hmac(sha1)",
1530 .hmac_driver_name = "hmac-sha1-caam",
045e3678
YK
1531 .blocksize = SHA1_BLOCK_SIZE,
1532 .template_ahash = {
1533 .init = ahash_init,
1534 .update = ahash_update,
1535 .final = ahash_final,
1536 .finup = ahash_finup,
1537 .digest = ahash_digest,
1538 .export = ahash_export,
1539 .import = ahash_import,
1540 .setkey = ahash_setkey,
1541 .halg = {
1542 .digestsize = SHA1_DIGEST_SIZE,
1543 },
1544 },
1545 .alg_type = OP_ALG_ALGSEL_SHA1,
1546 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1547 }, {
b0e09bae
YK
1548 .name = "sha224",
1549 .driver_name = "sha224-caam",
1550 .hmac_name = "hmac(sha224)",
1551 .hmac_driver_name = "hmac-sha224-caam",
045e3678
YK
1552 .blocksize = SHA224_BLOCK_SIZE,
1553 .template_ahash = {
1554 .init = ahash_init,
1555 .update = ahash_update,
1556 .final = ahash_final,
1557 .finup = ahash_finup,
1558 .digest = ahash_digest,
1559 .export = ahash_export,
1560 .import = ahash_import,
1561 .setkey = ahash_setkey,
1562 .halg = {
1563 .digestsize = SHA224_DIGEST_SIZE,
1564 },
1565 },
1566 .alg_type = OP_ALG_ALGSEL_SHA224,
1567 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1568 }, {
b0e09bae
YK
1569 .name = "sha256",
1570 .driver_name = "sha256-caam",
1571 .hmac_name = "hmac(sha256)",
1572 .hmac_driver_name = "hmac-sha256-caam",
045e3678
YK
1573 .blocksize = SHA256_BLOCK_SIZE,
1574 .template_ahash = {
1575 .init = ahash_init,
1576 .update = ahash_update,
1577 .final = ahash_final,
1578 .finup = ahash_finup,
1579 .digest = ahash_digest,
1580 .export = ahash_export,
1581 .import = ahash_import,
1582 .setkey = ahash_setkey,
1583 .halg = {
1584 .digestsize = SHA256_DIGEST_SIZE,
1585 },
1586 },
1587 .alg_type = OP_ALG_ALGSEL_SHA256,
1588 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1589 }, {
b0e09bae
YK
1590 .name = "sha384",
1591 .driver_name = "sha384-caam",
1592 .hmac_name = "hmac(sha384)",
1593 .hmac_driver_name = "hmac-sha384-caam",
045e3678
YK
1594 .blocksize = SHA384_BLOCK_SIZE,
1595 .template_ahash = {
1596 .init = ahash_init,
1597 .update = ahash_update,
1598 .final = ahash_final,
1599 .finup = ahash_finup,
1600 .digest = ahash_digest,
1601 .export = ahash_export,
1602 .import = ahash_import,
1603 .setkey = ahash_setkey,
1604 .halg = {
1605 .digestsize = SHA384_DIGEST_SIZE,
1606 },
1607 },
1608 .alg_type = OP_ALG_ALGSEL_SHA384,
1609 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1610 }, {
b0e09bae
YK
1611 .name = "sha512",
1612 .driver_name = "sha512-caam",
1613 .hmac_name = "hmac(sha512)",
1614 .hmac_driver_name = "hmac-sha512-caam",
045e3678
YK
1615 .blocksize = SHA512_BLOCK_SIZE,
1616 .template_ahash = {
1617 .init = ahash_init,
1618 .update = ahash_update,
1619 .final = ahash_final,
1620 .finup = ahash_finup,
1621 .digest = ahash_digest,
1622 .export = ahash_export,
1623 .import = ahash_import,
1624 .setkey = ahash_setkey,
1625 .halg = {
1626 .digestsize = SHA512_DIGEST_SIZE,
1627 },
1628 },
1629 .alg_type = OP_ALG_ALGSEL_SHA512,
1630 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1631 }, {
b0e09bae
YK
1632 .name = "md5",
1633 .driver_name = "md5-caam",
1634 .hmac_name = "hmac(md5)",
1635 .hmac_driver_name = "hmac-md5-caam",
045e3678
YK
1636 .blocksize = MD5_BLOCK_WORDS * 4,
1637 .template_ahash = {
1638 .init = ahash_init,
1639 .update = ahash_update,
1640 .final = ahash_final,
1641 .finup = ahash_finup,
1642 .digest = ahash_digest,
1643 .export = ahash_export,
1644 .import = ahash_import,
1645 .setkey = ahash_setkey,
1646 .halg = {
1647 .digestsize = MD5_DIGEST_SIZE,
1648 },
1649 },
1650 .alg_type = OP_ALG_ALGSEL_MD5,
1651 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1652 },
1653};
1654
1655struct caam_hash_alg {
1656 struct list_head entry;
1657 struct device *ctrldev;
1658 int alg_type;
1659 int alg_op;
1660 struct ahash_alg ahash_alg;
1661};
1662
1663static int caam_hash_cra_init(struct crypto_tfm *tfm)
1664{
1665 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1666 struct crypto_alg *base = tfm->__crt_alg;
1667 struct hash_alg_common *halg =
1668 container_of(base, struct hash_alg_common, base);
1669 struct ahash_alg *alg =
1670 container_of(halg, struct ahash_alg, halg);
1671 struct caam_hash_alg *caam_hash =
1672 container_of(alg, struct caam_hash_alg, ahash_alg);
1673 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1674 struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
1675 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1676 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1677 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1678 HASH_MSG_LEN + 32,
1679 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1680 HASH_MSG_LEN + 64,
1681 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1682 int tgt_jr = atomic_inc_return(&priv->tfm_count);
1683 int ret = 0;
1684
1685 /*
1686 * distribute tfms across job rings to ensure in-order
1687 * crypto request processing per tfm
1688 */
1689 ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs];
1690
1691 /* copy descriptor header template value */
1692 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1693 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1694
1695 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1696 OP_ALG_ALGSEL_SHIFT];
1697
1698 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1699 sizeof(struct caam_hash_state));
1700
1701 ret = ahash_set_sh_desc(ahash);
1702
1703 return ret;
1704}
1705
1706static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1707{
1708 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1709
1710 if (ctx->sh_desc_update_dma &&
1711 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1712 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1713 desc_bytes(ctx->sh_desc_update),
1714 DMA_TO_DEVICE);
1715 if (ctx->sh_desc_update_first_dma &&
1716 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1717 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1718 desc_bytes(ctx->sh_desc_update_first),
1719 DMA_TO_DEVICE);
1720 if (ctx->sh_desc_fin_dma &&
1721 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1722 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1723 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1724 if (ctx->sh_desc_digest_dma &&
1725 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1726 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1727 desc_bytes(ctx->sh_desc_digest),
1728 DMA_TO_DEVICE);
1729 if (ctx->sh_desc_finup_dma &&
1730 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1731 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1732 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1733}
1734
1735static void __exit caam_algapi_hash_exit(void)
1736{
1737 struct device_node *dev_node;
1738 struct platform_device *pdev;
1739 struct device *ctrldev;
1740 struct caam_drv_private *priv;
1741 struct caam_hash_alg *t_alg, *n;
1742
1743 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
95bcaa39
SL
1744 if (!dev_node) {
1745 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1746 if (!dev_node)
1747 return;
1748 }
045e3678
YK
1749
1750 pdev = of_find_device_by_node(dev_node);
1751 if (!pdev)
1752 return;
1753
1754 ctrldev = &pdev->dev;
1755 of_node_put(dev_node);
1756 priv = dev_get_drvdata(ctrldev);
1757
1758 if (!priv->hash_list.next)
1759 return;
1760
1761 list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
1762 crypto_unregister_ahash(&t_alg->ahash_alg);
1763 list_del(&t_alg->entry);
1764 kfree(t_alg);
1765 }
1766}
1767
1768static struct caam_hash_alg *
b0e09bae
YK
1769caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
1770 bool keyed)
045e3678
YK
1771{
1772 struct caam_hash_alg *t_alg;
1773 struct ahash_alg *halg;
1774 struct crypto_alg *alg;
1775
1776 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1777 if (!t_alg) {
1778 dev_err(ctrldev, "failed to allocate t_alg\n");
1779 return ERR_PTR(-ENOMEM);
1780 }
1781
1782 t_alg->ahash_alg = template->template_ahash;
1783 halg = &t_alg->ahash_alg;
1784 alg = &halg->halg.base;
1785
b0e09bae
YK
1786 if (keyed) {
1787 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1788 template->hmac_name);
1789 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1790 template->hmac_driver_name);
1791 } else {
1792 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1793 template->name);
1794 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1795 template->driver_name);
7c2e0b98 1796 t_alg->ahash_alg.setkey = NULL;
b0e09bae 1797 }
045e3678
YK
1798 alg->cra_module = THIS_MODULE;
1799 alg->cra_init = caam_hash_cra_init;
1800 alg->cra_exit = caam_hash_cra_exit;
1801 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1802 alg->cra_priority = CAAM_CRA_PRIORITY;
1803 alg->cra_blocksize = template->blocksize;
1804 alg->cra_alignmask = 0;
1805 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1806 alg->cra_type = &crypto_ahash_type;
1807
1808 t_alg->alg_type = template->alg_type;
1809 t_alg->alg_op = template->alg_op;
1810 t_alg->ctrldev = ctrldev;
1811
1812 return t_alg;
1813}
1814
1815static int __init caam_algapi_hash_init(void)
1816{
1817 struct device_node *dev_node;
1818 struct platform_device *pdev;
1819 struct device *ctrldev;
1820 struct caam_drv_private *priv;
1821 int i = 0, err = 0;
1822
1823 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
95bcaa39
SL
1824 if (!dev_node) {
1825 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1826 if (!dev_node)
1827 return -ENODEV;
1828 }
045e3678
YK
1829
1830 pdev = of_find_device_by_node(dev_node);
1831 if (!pdev)
1832 return -ENODEV;
1833
1834 ctrldev = &pdev->dev;
1835 priv = dev_get_drvdata(ctrldev);
1836 of_node_put(dev_node);
1837
1838 INIT_LIST_HEAD(&priv->hash_list);
1839
1840 atomic_set(&priv->tfm_count, -1);
1841
1842 /* register crypto algorithms the device supports */
1843 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1844 /* TODO: check if h/w supports alg */
1845 struct caam_hash_alg *t_alg;
1846
b0e09bae
YK
1847 /* register hmac version */
1848 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true);
1849 if (IS_ERR(t_alg)) {
1850 err = PTR_ERR(t_alg);
1851 dev_warn(ctrldev, "%s alg allocation failed\n",
1852 driver_hash[i].driver_name);
1853 continue;
1854 }
1855
1856 err = crypto_register_ahash(&t_alg->ahash_alg);
1857 if (err) {
1858 dev_warn(ctrldev, "%s alg registration failed\n",
1859 t_alg->ahash_alg.halg.base.cra_driver_name);
1860 kfree(t_alg);
1861 } else
1862 list_add_tail(&t_alg->entry, &priv->hash_list);
1863
1864 /* register unkeyed version */
1865 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false);
045e3678
YK
1866 if (IS_ERR(t_alg)) {
1867 err = PTR_ERR(t_alg);
1868 dev_warn(ctrldev, "%s alg allocation failed\n",
1869 driver_hash[i].driver_name);
1870 continue;
1871 }
1872
1873 err = crypto_register_ahash(&t_alg->ahash_alg);
1874 if (err) {
1875 dev_warn(ctrldev, "%s alg registration failed\n",
1876 t_alg->ahash_alg.halg.base.cra_driver_name);
1877 kfree(t_alg);
1878 } else
1879 list_add_tail(&t_alg->entry, &priv->hash_list);
1880 }
1881
1882 return err;
1883}
1884
1885module_init(caam_algapi_hash_init);
1886module_exit(caam_algapi_hash_exit);
1887
1888MODULE_LICENSE("GPL");
1889MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1890MODULE_AUTHOR("Freescale Semiconductor - NMG");