Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / crypto / caam / caamalg.c
1 /*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
40 * | (output length) |
41 * | SEQ_IN_PTR |
42 * | (input buffer) |
43 * | (input length) |
44 * ---------------------
45 */
46
47 #include "compat.h"
48
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56
57 /*
58 * crypto alg
59 */
60 #define CAAM_CRA_PRIORITY 3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 SHA512_DIGEST_SIZE * 2)
64 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65 #define CAAM_MAX_IV_LENGTH 16
66
67 /* length of descriptors text */
68 #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
69
70 #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
71 #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
72 #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
73 #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
74
75 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
76 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
77 20 * CAAM_CMD_SZ)
78 #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
79 15 * CAAM_CMD_SZ)
80
81 #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
82 CAAM_MAX_KEY_SIZE)
83 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
84
85 #ifdef DEBUG
86 /* for print_hex_dumps with line references */
87 #define xstr(s) str(s)
88 #define str(s) #s
89 #define debug(format, arg...) printk(format, arg)
90 #else
91 #define debug(format, arg...)
92 #endif
93
94 /* Set DK bit in class 1 operation if shared */
95 static inline void append_dec_op1(u32 *desc, u32 type)
96 {
97 u32 *jump_cmd, *uncond_jump_cmd;
98
99 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
100 append_operation(desc, type | OP_ALG_AS_INITFINAL |
101 OP_ALG_DECRYPT);
102 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
103 set_jump_tgt_here(desc, jump_cmd);
104 append_operation(desc, type | OP_ALG_AS_INITFINAL |
105 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
106 set_jump_tgt_here(desc, uncond_jump_cmd);
107 }
108
109 /*
110 * Wait for completion of class 1 key loading before allowing
111 * error propagation
112 */
113 static inline void append_dec_shr_done(u32 *desc)
114 {
115 u32 *jump_cmd;
116
117 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
118 set_jump_tgt_here(desc, jump_cmd);
119 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
120 }
121
122 /*
123 * For aead functions, read payload and write payload,
124 * both of which are specified in req->src and req->dst
125 */
126 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
127 {
128 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
129 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
130 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
131 }
132
133 /*
134 * For aead encrypt and decrypt, read iv for both classes
135 */
136 static inline void aead_append_ld_iv(u32 *desc, int ivsize)
137 {
138 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
139 LDST_CLASS_1_CCB | ivsize);
140 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
141 }
142
143 /*
144 * For ablkcipher encrypt and decrypt, read from req->src and
145 * write to req->dst
146 */
147 static inline void ablkcipher_append_src_dst(u32 *desc)
148 {
149 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
150 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
151 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
152 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
153 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
154 }
155
156 /*
157 * If all data, including src (with assoc and iv) or dst (with iv only) are
158 * contiguous
159 */
160 #define GIV_SRC_CONTIG 1
161 #define GIV_DST_CONTIG (1 << 1)
162
163 /*
164 * per-session context
165 */
166 struct caam_ctx {
167 struct device *jrdev;
168 u32 sh_desc_enc[DESC_MAX_USED_LEN];
169 u32 sh_desc_dec[DESC_MAX_USED_LEN];
170 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
171 dma_addr_t sh_desc_enc_dma;
172 dma_addr_t sh_desc_dec_dma;
173 dma_addr_t sh_desc_givenc_dma;
174 u32 class1_alg_type;
175 u32 class2_alg_type;
176 u32 alg_op;
177 u8 key[CAAM_MAX_KEY_SIZE];
178 dma_addr_t key_dma;
179 unsigned int enckeylen;
180 unsigned int split_key_len;
181 unsigned int split_key_pad_len;
182 unsigned int authsize;
183 };
184
185 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
186 int keys_fit_inline)
187 {
188 if (keys_fit_inline) {
189 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
190 ctx->split_key_len, CLASS_2 |
191 KEY_DEST_MDHA_SPLIT | KEY_ENC);
192 append_key_as_imm(desc, (void *)ctx->key +
193 ctx->split_key_pad_len, ctx->enckeylen,
194 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
195 } else {
196 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
197 KEY_DEST_MDHA_SPLIT | KEY_ENC);
198 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
199 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
200 }
201 }
202
203 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
204 int keys_fit_inline)
205 {
206 u32 *key_jump_cmd;
207
208 init_sh_desc(desc, HDR_SHARE_SERIAL);
209
210 /* Skip if already shared */
211 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
212 JUMP_COND_SHRD);
213
214 append_key_aead(desc, ctx, keys_fit_inline);
215
216 set_jump_tgt_here(desc, key_jump_cmd);
217
218 /* Propagate errors from shared to job descriptor */
219 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
220 }
221
222 static int aead_set_sh_desc(struct crypto_aead *aead)
223 {
224 struct aead_tfm *tfm = &aead->base.crt_aead;
225 struct caam_ctx *ctx = crypto_aead_ctx(aead);
226 struct device *jrdev = ctx->jrdev;
227 bool keys_fit_inline = false;
228 u32 *key_jump_cmd, *jump_cmd;
229 u32 geniv, moveiv;
230 u32 *desc;
231
232 if (!ctx->enckeylen || !ctx->authsize)
233 return 0;
234
235 /*
236 * Job Descriptor and Shared Descriptors
237 * must all fit into the 64-word Descriptor h/w Buffer
238 */
239 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
240 ctx->split_key_pad_len + ctx->enckeylen <=
241 CAAM_DESC_BYTES_MAX)
242 keys_fit_inline = true;
243
244 /* aead_encrypt shared descriptor */
245 desc = ctx->sh_desc_enc;
246
247 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
248
249 /* Class 2 operation */
250 append_operation(desc, ctx->class2_alg_type |
251 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
252
253 /* cryptlen = seqoutlen - authsize */
254 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
255
256 /* assoclen + cryptlen = seqinlen - ivsize */
257 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
258
259 /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
260 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
261
262 /* read assoc before reading payload */
263 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
264 KEY_VLF);
265 aead_append_ld_iv(desc, tfm->ivsize);
266
267 /* Class 1 operation */
268 append_operation(desc, ctx->class1_alg_type |
269 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
270
271 /* Read and write cryptlen bytes */
272 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
273 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
274 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
275
276 /* Write ICV */
277 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
278 LDST_SRCDST_BYTE_CONTEXT);
279
280 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
281 desc_bytes(desc),
282 DMA_TO_DEVICE);
283 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
284 dev_err(jrdev, "unable to map shared descriptor\n");
285 return -ENOMEM;
286 }
287 #ifdef DEBUG
288 print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
289 DUMP_PREFIX_ADDRESS, 16, 4, desc,
290 desc_bytes(desc), 1);
291 #endif
292
293 /*
294 * Job Descriptor and Shared Descriptors
295 * must all fit into the 64-word Descriptor h/w Buffer
296 */
297 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
298 ctx->split_key_pad_len + ctx->enckeylen <=
299 CAAM_DESC_BYTES_MAX)
300 keys_fit_inline = true;
301
302 desc = ctx->sh_desc_dec;
303
304 /* aead_decrypt shared descriptor */
305 init_sh_desc(desc, HDR_SHARE_SERIAL);
306
307 /* Skip if already shared */
308 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
309 JUMP_COND_SHRD);
310
311 append_key_aead(desc, ctx, keys_fit_inline);
312
313 /* Only propagate error immediately if shared */
314 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
315 set_jump_tgt_here(desc, key_jump_cmd);
316 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
317 set_jump_tgt_here(desc, jump_cmd);
318
319 /* Class 2 operation */
320 append_operation(desc, ctx->class2_alg_type |
321 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
322
323 /* assoclen + cryptlen = seqinlen - ivsize */
324 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
325 ctx->authsize + tfm->ivsize)
326 /* assoclen = (assoclen + cryptlen) - cryptlen */
327 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
328 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
329
330 /* read assoc before reading payload */
331 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
332 KEY_VLF);
333
334 aead_append_ld_iv(desc, tfm->ivsize);
335
336 append_dec_op1(desc, ctx->class1_alg_type);
337
338 /* Read and write cryptlen bytes */
339 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
340 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
341 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
342
343 /* Load ICV */
344 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
345 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
346 append_dec_shr_done(desc);
347
348 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
349 desc_bytes(desc),
350 DMA_TO_DEVICE);
351 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
352 dev_err(jrdev, "unable to map shared descriptor\n");
353 return -ENOMEM;
354 }
355 #ifdef DEBUG
356 print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
357 DUMP_PREFIX_ADDRESS, 16, 4, desc,
358 desc_bytes(desc), 1);
359 #endif
360
361 /*
362 * Job Descriptor and Shared Descriptors
363 * must all fit into the 64-word Descriptor h/w Buffer
364 */
365 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
366 ctx->split_key_pad_len + ctx->enckeylen <=
367 CAAM_DESC_BYTES_MAX)
368 keys_fit_inline = true;
369
370 /* aead_givencrypt shared descriptor */
371 desc = ctx->sh_desc_givenc;
372
373 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
374
375 /* Generate IV */
376 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
377 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
378 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
379 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
380 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
381 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
382 append_move(desc, MOVE_SRC_INFIFO |
383 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
384 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
385
386 /* Copy IV to class 1 context */
387 append_move(desc, MOVE_SRC_CLASS1CTX |
388 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
389
390 /* Return to encryption */
391 append_operation(desc, ctx->class2_alg_type |
392 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
393
394 /* ivsize + cryptlen = seqoutlen - authsize */
395 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
396
397 /* assoclen = seqinlen - (ivsize + cryptlen) */
398 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
399
400 /* read assoc before reading payload */
401 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
402 KEY_VLF);
403
404 /* Copy iv from class 1 ctx to class 2 fifo*/
405 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
406 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
407 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
408 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
409 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
410 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
411
412 /* Class 1 operation */
413 append_operation(desc, ctx->class1_alg_type |
414 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
415
416 /* Will write ivsize + cryptlen */
417 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
418
419 /* Not need to reload iv */
420 append_seq_fifo_load(desc, tfm->ivsize,
421 FIFOLD_CLASS_SKIP);
422
423 /* Will read cryptlen */
424 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
425 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
426
427 /* Write ICV */
428 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
429 LDST_SRCDST_BYTE_CONTEXT);
430
431 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
432 desc_bytes(desc),
433 DMA_TO_DEVICE);
434 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
435 dev_err(jrdev, "unable to map shared descriptor\n");
436 return -ENOMEM;
437 }
438 #ifdef DEBUG
439 print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
440 DUMP_PREFIX_ADDRESS, 16, 4, desc,
441 desc_bytes(desc), 1);
442 #endif
443
444 return 0;
445 }
446
447 static int aead_setauthsize(struct crypto_aead *authenc,
448 unsigned int authsize)
449 {
450 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
451
452 ctx->authsize = authsize;
453 aead_set_sh_desc(authenc);
454
455 return 0;
456 }
457
458 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
459 u32 authkeylen)
460 {
461 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
462 ctx->split_key_pad_len, key_in, authkeylen,
463 ctx->alg_op);
464 }
465
466 static int aead_setkey(struct crypto_aead *aead,
467 const u8 *key, unsigned int keylen)
468 {
469 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
470 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
471 struct caam_ctx *ctx = crypto_aead_ctx(aead);
472 struct device *jrdev = ctx->jrdev;
473 struct rtattr *rta = (void *)key;
474 struct crypto_authenc_key_param *param;
475 unsigned int authkeylen;
476 unsigned int enckeylen;
477 int ret = 0;
478
479 param = RTA_DATA(rta);
480 enckeylen = be32_to_cpu(param->enckeylen);
481
482 key += RTA_ALIGN(rta->rta_len);
483 keylen -= RTA_ALIGN(rta->rta_len);
484
485 if (keylen < enckeylen)
486 goto badkey;
487
488 authkeylen = keylen - enckeylen;
489
490 if (keylen > CAAM_MAX_KEY_SIZE)
491 goto badkey;
492
493 /* Pick class 2 key length from algorithm submask */
494 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
495 OP_ALG_ALGSEL_SHIFT] * 2;
496 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
497
498 #ifdef DEBUG
499 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
500 keylen, enckeylen, authkeylen);
501 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
502 ctx->split_key_len, ctx->split_key_pad_len);
503 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
504 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
505 #endif
506
507 ret = gen_split_aead_key(ctx, key, authkeylen);
508 if (ret) {
509 goto badkey;
510 }
511
512 /* postpend encryption key to auth split key */
513 memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
514
515 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
516 enckeylen, DMA_TO_DEVICE);
517 if (dma_mapping_error(jrdev, ctx->key_dma)) {
518 dev_err(jrdev, "unable to map key i/o memory\n");
519 return -ENOMEM;
520 }
521 #ifdef DEBUG
522 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
523 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
524 ctx->split_key_pad_len + enckeylen, 1);
525 #endif
526
527 ctx->enckeylen = enckeylen;
528
529 ret = aead_set_sh_desc(aead);
530 if (ret) {
531 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
532 enckeylen, DMA_TO_DEVICE);
533 }
534
535 return ret;
536 badkey:
537 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
538 return -EINVAL;
539 }
540
541 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
542 const u8 *key, unsigned int keylen)
543 {
544 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
545 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
546 struct device *jrdev = ctx->jrdev;
547 int ret = 0;
548 u32 *key_jump_cmd, *jump_cmd;
549 u32 *desc;
550
551 #ifdef DEBUG
552 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
553 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
554 #endif
555
556 memcpy(ctx->key, key, keylen);
557 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
558 DMA_TO_DEVICE);
559 if (dma_mapping_error(jrdev, ctx->key_dma)) {
560 dev_err(jrdev, "unable to map key i/o memory\n");
561 return -ENOMEM;
562 }
563 ctx->enckeylen = keylen;
564
565 /* ablkcipher_encrypt shared descriptor */
566 desc = ctx->sh_desc_enc;
567 init_sh_desc(desc, HDR_SHARE_SERIAL);
568 /* Skip if already shared */
569 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
570 JUMP_COND_SHRD);
571
572 /* Load class1 key only */
573 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
574 ctx->enckeylen, CLASS_1 |
575 KEY_DEST_CLASS_REG);
576
577 set_jump_tgt_here(desc, key_jump_cmd);
578
579 /* Propagate errors from shared to job descriptor */
580 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
581
582 /* Load iv */
583 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
584 LDST_CLASS_1_CCB | tfm->ivsize);
585
586 /* Load operation */
587 append_operation(desc, ctx->class1_alg_type |
588 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
589
590 /* Perform operation */
591 ablkcipher_append_src_dst(desc);
592
593 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
594 desc_bytes(desc),
595 DMA_TO_DEVICE);
596 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
597 dev_err(jrdev, "unable to map shared descriptor\n");
598 return -ENOMEM;
599 }
600 #ifdef DEBUG
601 print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
602 DUMP_PREFIX_ADDRESS, 16, 4, desc,
603 desc_bytes(desc), 1);
604 #endif
605 /* ablkcipher_decrypt shared descriptor */
606 desc = ctx->sh_desc_dec;
607
608 init_sh_desc(desc, HDR_SHARE_SERIAL);
609 /* Skip if already shared */
610 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
611 JUMP_COND_SHRD);
612
613 /* Load class1 key only */
614 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
615 ctx->enckeylen, CLASS_1 |
616 KEY_DEST_CLASS_REG);
617
618 /* For aead, only propagate error immediately if shared */
619 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
620 set_jump_tgt_here(desc, key_jump_cmd);
621 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
622 set_jump_tgt_here(desc, jump_cmd);
623
624 /* load IV */
625 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
626 LDST_CLASS_1_CCB | tfm->ivsize);
627
628 /* Choose operation */
629 append_dec_op1(desc, ctx->class1_alg_type);
630
631 /* Perform operation */
632 ablkcipher_append_src_dst(desc);
633
634 /* Wait for key to load before allowing propagating error */
635 append_dec_shr_done(desc);
636
637 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
638 desc_bytes(desc),
639 DMA_TO_DEVICE);
640 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
641 dev_err(jrdev, "unable to map shared descriptor\n");
642 return -ENOMEM;
643 }
644
645 #ifdef DEBUG
646 print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
647 DUMP_PREFIX_ADDRESS, 16, 4, desc,
648 desc_bytes(desc), 1);
649 #endif
650
651 return ret;
652 }
653
654 /*
655 * aead_edesc - s/w-extended aead descriptor
656 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
657 * @assoc_chained: if source is chained
658 * @src_nents: number of segments in input scatterlist
659 * @src_chained: if source is chained
660 * @dst_nents: number of segments in output scatterlist
661 * @dst_chained: if destination is chained
662 * @iv_dma: dma address of iv for checking continuity and link table
663 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
664 * @sec4_sg_bytes: length of dma mapped sec4_sg space
665 * @sec4_sg_dma: bus physical mapped address of h/w link table
666 * @hw_desc: the h/w job descriptor followed by any referenced link tables
667 */
668 struct aead_edesc {
669 int assoc_nents;
670 bool assoc_chained;
671 int src_nents;
672 bool src_chained;
673 int dst_nents;
674 bool dst_chained;
675 dma_addr_t iv_dma;
676 int sec4_sg_bytes;
677 dma_addr_t sec4_sg_dma;
678 struct sec4_sg_entry *sec4_sg;
679 u32 hw_desc[0];
680 };
681
682 /*
683 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
684 * @src_nents: number of segments in input scatterlist
685 * @src_chained: if source is chained
686 * @dst_nents: number of segments in output scatterlist
687 * @dst_chained: if destination is chained
688 * @iv_dma: dma address of iv for checking continuity and link table
689 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
690 * @sec4_sg_bytes: length of dma mapped sec4_sg space
691 * @sec4_sg_dma: bus physical mapped address of h/w link table
692 * @hw_desc: the h/w job descriptor followed by any referenced link tables
693 */
694 struct ablkcipher_edesc {
695 int src_nents;
696 bool src_chained;
697 int dst_nents;
698 bool dst_chained;
699 dma_addr_t iv_dma;
700 int sec4_sg_bytes;
701 dma_addr_t sec4_sg_dma;
702 struct sec4_sg_entry *sec4_sg;
703 u32 hw_desc[0];
704 };
705
706 static void caam_unmap(struct device *dev, struct scatterlist *src,
707 struct scatterlist *dst, int src_nents,
708 bool src_chained, int dst_nents, bool dst_chained,
709 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
710 int sec4_sg_bytes)
711 {
712 if (dst != src) {
713 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
714 src_chained);
715 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
716 dst_chained);
717 } else {
718 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
719 DMA_BIDIRECTIONAL, src_chained);
720 }
721
722 if (iv_dma)
723 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
724 if (sec4_sg_bytes)
725 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
726 DMA_TO_DEVICE);
727 }
728
729 static void aead_unmap(struct device *dev,
730 struct aead_edesc *edesc,
731 struct aead_request *req)
732 {
733 struct crypto_aead *aead = crypto_aead_reqtfm(req);
734 int ivsize = crypto_aead_ivsize(aead);
735
736 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
737 DMA_TO_DEVICE, edesc->assoc_chained);
738
739 caam_unmap(dev, req->src, req->dst,
740 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
741 edesc->dst_chained, edesc->iv_dma, ivsize,
742 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
743 }
744
745 static void ablkcipher_unmap(struct device *dev,
746 struct ablkcipher_edesc *edesc,
747 struct ablkcipher_request *req)
748 {
749 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
750 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
751
752 caam_unmap(dev, req->src, req->dst,
753 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
754 edesc->dst_chained, edesc->iv_dma, ivsize,
755 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
756 }
757
758 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
759 void *context)
760 {
761 struct aead_request *req = context;
762 struct aead_edesc *edesc;
763 #ifdef DEBUG
764 struct crypto_aead *aead = crypto_aead_reqtfm(req);
765 struct caam_ctx *ctx = crypto_aead_ctx(aead);
766 int ivsize = crypto_aead_ivsize(aead);
767
768 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
769 #endif
770
771 edesc = (struct aead_edesc *)((char *)desc -
772 offsetof(struct aead_edesc, hw_desc));
773
774 if (err) {
775 char tmp[CAAM_ERROR_STR_MAX];
776
777 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
778 }
779
780 aead_unmap(jrdev, edesc, req);
781
782 #ifdef DEBUG
783 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
784 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
785 req->assoclen , 1);
786 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
787 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
788 edesc->src_nents ? 100 : ivsize, 1);
789 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
790 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
791 edesc->src_nents ? 100 : req->cryptlen +
792 ctx->authsize + 4, 1);
793 #endif
794
795 kfree(edesc);
796
797 aead_request_complete(req, err);
798 }
799
800 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
801 void *context)
802 {
803 struct aead_request *req = context;
804 struct aead_edesc *edesc;
805 #ifdef DEBUG
806 struct crypto_aead *aead = crypto_aead_reqtfm(req);
807 struct caam_ctx *ctx = crypto_aead_ctx(aead);
808 int ivsize = crypto_aead_ivsize(aead);
809
810 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
811 #endif
812
813 edesc = (struct aead_edesc *)((char *)desc -
814 offsetof(struct aead_edesc, hw_desc));
815
816 #ifdef DEBUG
817 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
818 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
819 ivsize, 1);
820 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
821 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
822 req->cryptlen, 1);
823 #endif
824
825 if (err) {
826 char tmp[CAAM_ERROR_STR_MAX];
827
828 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
829 }
830
831 aead_unmap(jrdev, edesc, req);
832
833 /*
834 * verify hw auth check passed else return -EBADMSG
835 */
836 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
837 err = -EBADMSG;
838
839 #ifdef DEBUG
840 print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
841 DUMP_PREFIX_ADDRESS, 16, 4,
842 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
843 sizeof(struct iphdr) + req->assoclen +
844 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
845 ctx->authsize + 36, 1);
846 if (!err && edesc->sec4_sg_bytes) {
847 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
848 print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
849 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
850 sg->length + ctx->authsize + 16, 1);
851 }
852 #endif
853
854 kfree(edesc);
855
856 aead_request_complete(req, err);
857 }
858
859 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
860 void *context)
861 {
862 struct ablkcipher_request *req = context;
863 struct ablkcipher_edesc *edesc;
864 #ifdef DEBUG
865 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
866 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
867
868 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
869 #endif
870
871 edesc = (struct ablkcipher_edesc *)((char *)desc -
872 offsetof(struct ablkcipher_edesc, hw_desc));
873
874 if (err) {
875 char tmp[CAAM_ERROR_STR_MAX];
876
877 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
878 }
879
880 #ifdef DEBUG
881 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
882 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
883 edesc->src_nents > 1 ? 100 : ivsize, 1);
884 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
885 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
886 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
887 #endif
888
889 ablkcipher_unmap(jrdev, edesc, req);
890 kfree(edesc);
891
892 ablkcipher_request_complete(req, err);
893 }
894
895 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
896 void *context)
897 {
898 struct ablkcipher_request *req = context;
899 struct ablkcipher_edesc *edesc;
900 #ifdef DEBUG
901 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
902 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
903
904 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
905 #endif
906
907 edesc = (struct ablkcipher_edesc *)((char *)desc -
908 offsetof(struct ablkcipher_edesc, hw_desc));
909 if (err) {
910 char tmp[CAAM_ERROR_STR_MAX];
911
912 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
913 }
914
915 #ifdef DEBUG
916 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
917 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
918 ivsize, 1);
919 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
920 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
921 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
922 #endif
923
924 ablkcipher_unmap(jrdev, edesc, req);
925 kfree(edesc);
926
927 ablkcipher_request_complete(req, err);
928 }
929
930 /*
931 * Fill in aead job descriptor
932 */
933 static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
934 struct aead_edesc *edesc,
935 struct aead_request *req,
936 bool all_contig, bool encrypt)
937 {
938 struct crypto_aead *aead = crypto_aead_reqtfm(req);
939 struct caam_ctx *ctx = crypto_aead_ctx(aead);
940 int ivsize = crypto_aead_ivsize(aead);
941 int authsize = ctx->authsize;
942 u32 *desc = edesc->hw_desc;
943 u32 out_options = 0, in_options;
944 dma_addr_t dst_dma, src_dma;
945 int len, sec4_sg_index = 0;
946
947 #ifdef DEBUG
948 debug("assoclen %d cryptlen %d authsize %d\n",
949 req->assoclen, req->cryptlen, authsize);
950 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
951 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
952 req->assoclen , 1);
953 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
954 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
955 edesc->src_nents ? 100 : ivsize, 1);
956 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
957 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
958 edesc->src_nents ? 100 : req->cryptlen, 1);
959 print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
960 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
961 desc_bytes(sh_desc), 1);
962 #endif
963
964 len = desc_len(sh_desc);
965 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
966
967 if (all_contig) {
968 src_dma = sg_dma_address(req->assoc);
969 in_options = 0;
970 } else {
971 src_dma = edesc->sec4_sg_dma;
972 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
973 (edesc->src_nents ? : 1);
974 in_options = LDST_SGF;
975 }
976 if (encrypt)
977 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
978 req->cryptlen - authsize, in_options);
979 else
980 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
981 req->cryptlen, in_options);
982
983 if (likely(req->src == req->dst)) {
984 if (all_contig) {
985 dst_dma = sg_dma_address(req->src);
986 } else {
987 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
988 ((edesc->assoc_nents ? : 1) + 1);
989 out_options = LDST_SGF;
990 }
991 } else {
992 if (!edesc->dst_nents) {
993 dst_dma = sg_dma_address(req->dst);
994 } else {
995 dst_dma = edesc->sec4_sg_dma +
996 sec4_sg_index *
997 sizeof(struct sec4_sg_entry);
998 out_options = LDST_SGF;
999 }
1000 }
1001 if (encrypt)
1002 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
1003 else
1004 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1005 out_options);
1006 }
1007
1008 /*
1009 * Fill in aead givencrypt job descriptor
1010 */
1011 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1012 struct aead_edesc *edesc,
1013 struct aead_request *req,
1014 int contig)
1015 {
1016 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1017 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1018 int ivsize = crypto_aead_ivsize(aead);
1019 int authsize = ctx->authsize;
1020 u32 *desc = edesc->hw_desc;
1021 u32 out_options = 0, in_options;
1022 dma_addr_t dst_dma, src_dma;
1023 int len, sec4_sg_index = 0;
1024
1025 #ifdef DEBUG
1026 debug("assoclen %d cryptlen %d authsize %d\n",
1027 req->assoclen, req->cryptlen, authsize);
1028 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
1029 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1030 req->assoclen , 1);
1031 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1032 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1033 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
1034 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1035 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1036 print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
1037 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1038 desc_bytes(sh_desc), 1);
1039 #endif
1040
1041 len = desc_len(sh_desc);
1042 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1043
1044 if (contig & GIV_SRC_CONTIG) {
1045 src_dma = sg_dma_address(req->assoc);
1046 in_options = 0;
1047 } else {
1048 src_dma = edesc->sec4_sg_dma;
1049 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1050 in_options = LDST_SGF;
1051 }
1052 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1053 req->cryptlen - authsize, in_options);
1054
1055 if (contig & GIV_DST_CONTIG) {
1056 dst_dma = edesc->iv_dma;
1057 } else {
1058 if (likely(req->src == req->dst)) {
1059 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1060 edesc->assoc_nents;
1061 out_options = LDST_SGF;
1062 } else {
1063 dst_dma = edesc->sec4_sg_dma +
1064 sec4_sg_index *
1065 sizeof(struct sec4_sg_entry);
1066 out_options = LDST_SGF;
1067 }
1068 }
1069
1070 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
1071 }
1072
1073 /*
1074 * Fill in ablkcipher job descriptor
1075 */
1076 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1077 struct ablkcipher_edesc *edesc,
1078 struct ablkcipher_request *req,
1079 bool iv_contig)
1080 {
1081 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1082 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1083 u32 *desc = edesc->hw_desc;
1084 u32 out_options = 0, in_options;
1085 dma_addr_t dst_dma, src_dma;
1086 int len, sec4_sg_index = 0;
1087
1088 #ifdef DEBUG
1089 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1090 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1091 ivsize, 1);
1092 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
1093 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1094 edesc->src_nents ? 100 : req->nbytes, 1);
1095 #endif
1096
1097 len = desc_len(sh_desc);
1098 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1099
1100 if (iv_contig) {
1101 src_dma = edesc->iv_dma;
1102 in_options = 0;
1103 } else {
1104 src_dma = edesc->sec4_sg_dma;
1105 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1106 in_options = LDST_SGF;
1107 }
1108 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1109
1110 if (likely(req->src == req->dst)) {
1111 if (!edesc->src_nents && iv_contig) {
1112 dst_dma = sg_dma_address(req->src);
1113 } else {
1114 dst_dma = edesc->sec4_sg_dma +
1115 sizeof(struct sec4_sg_entry);
1116 out_options = LDST_SGF;
1117 }
1118 } else {
1119 if (!edesc->dst_nents) {
1120 dst_dma = sg_dma_address(req->dst);
1121 } else {
1122 dst_dma = edesc->sec4_sg_dma +
1123 sec4_sg_index * sizeof(struct sec4_sg_entry);
1124 out_options = LDST_SGF;
1125 }
1126 }
1127 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1128 }
1129
1130 /*
1131 * allocate and map the aead extended descriptor
1132 */
1133 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1134 int desc_bytes, bool *all_contig_ptr)
1135 {
1136 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1137 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1138 struct device *jrdev = ctx->jrdev;
1139 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1140 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1141 int assoc_nents, src_nents, dst_nents = 0;
1142 struct aead_edesc *edesc;
1143 dma_addr_t iv_dma = 0;
1144 int sgc;
1145 bool all_contig = true;
1146 bool assoc_chained = false, src_chained = false, dst_chained = false;
1147 int ivsize = crypto_aead_ivsize(aead);
1148 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1149
1150 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1151 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1152
1153 if (unlikely(req->dst != req->src))
1154 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
1155
1156 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1157 DMA_BIDIRECTIONAL, assoc_chained);
1158 if (likely(req->src == req->dst)) {
1159 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1160 DMA_BIDIRECTIONAL, src_chained);
1161 } else {
1162 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1163 DMA_TO_DEVICE, src_chained);
1164 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1165 DMA_FROM_DEVICE, dst_chained);
1166 }
1167
1168 /* Check if data are contiguous */
1169 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1170 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1171 iv_dma || src_nents || iv_dma + ivsize !=
1172 sg_dma_address(req->src)) {
1173 all_contig = false;
1174 assoc_nents = assoc_nents ? : 1;
1175 src_nents = src_nents ? : 1;
1176 sec4_sg_len = assoc_nents + 1 + src_nents;
1177 }
1178 sec4_sg_len += dst_nents;
1179
1180 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1181
1182 /* allocate space for base edesc and hw desc commands, link tables */
1183 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1184 sec4_sg_bytes, GFP_DMA | flags);
1185 if (!edesc) {
1186 dev_err(jrdev, "could not allocate extended descriptor\n");
1187 return ERR_PTR(-ENOMEM);
1188 }
1189
1190 edesc->assoc_nents = assoc_nents;
1191 edesc->assoc_chained = assoc_chained;
1192 edesc->src_nents = src_nents;
1193 edesc->src_chained = src_chained;
1194 edesc->dst_nents = dst_nents;
1195 edesc->dst_chained = dst_chained;
1196 edesc->iv_dma = iv_dma;
1197 edesc->sec4_sg_bytes = sec4_sg_bytes;
1198 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1199 desc_bytes;
1200 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1201 sec4_sg_bytes, DMA_TO_DEVICE);
1202 *all_contig_ptr = all_contig;
1203
1204 sec4_sg_index = 0;
1205 if (!all_contig) {
1206 sg_to_sec4_sg(req->assoc,
1207 (assoc_nents ? : 1),
1208 edesc->sec4_sg +
1209 sec4_sg_index, 0);
1210 sec4_sg_index += assoc_nents ? : 1;
1211 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1212 iv_dma, ivsize, 0);
1213 sec4_sg_index += 1;
1214 sg_to_sec4_sg_last(req->src,
1215 (src_nents ? : 1),
1216 edesc->sec4_sg +
1217 sec4_sg_index, 0);
1218 sec4_sg_index += src_nents ? : 1;
1219 }
1220 if (dst_nents) {
1221 sg_to_sec4_sg_last(req->dst, dst_nents,
1222 edesc->sec4_sg + sec4_sg_index, 0);
1223 }
1224
1225 return edesc;
1226 }
1227
1228 static int aead_encrypt(struct aead_request *req)
1229 {
1230 struct aead_edesc *edesc;
1231 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1232 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1233 struct device *jrdev = ctx->jrdev;
1234 bool all_contig;
1235 u32 *desc;
1236 int ret = 0;
1237
1238 req->cryptlen += ctx->authsize;
1239
1240 /* allocate extended descriptor */
1241 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1242 CAAM_CMD_SZ, &all_contig);
1243 if (IS_ERR(edesc))
1244 return PTR_ERR(edesc);
1245
1246 /* Create and submit job descriptor */
1247 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1248 all_contig, true);
1249 #ifdef DEBUG
1250 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1251 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1252 desc_bytes(edesc->hw_desc), 1);
1253 #endif
1254
1255 desc = edesc->hw_desc;
1256 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1257 if (!ret) {
1258 ret = -EINPROGRESS;
1259 } else {
1260 aead_unmap(jrdev, edesc, req);
1261 kfree(edesc);
1262 }
1263
1264 return ret;
1265 }
1266
1267 static int aead_decrypt(struct aead_request *req)
1268 {
1269 struct aead_edesc *edesc;
1270 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1271 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1272 struct device *jrdev = ctx->jrdev;
1273 bool all_contig;
1274 u32 *desc;
1275 int ret = 0;
1276
1277 /* allocate extended descriptor */
1278 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1279 CAAM_CMD_SZ, &all_contig);
1280 if (IS_ERR(edesc))
1281 return PTR_ERR(edesc);
1282
1283 #ifdef DEBUG
1284 print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
1285 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1286 req->cryptlen, 1);
1287 #endif
1288
1289 /* Create and submit job descriptor*/
1290 init_aead_job(ctx->sh_desc_dec,
1291 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1292 #ifdef DEBUG
1293 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1294 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1295 desc_bytes(edesc->hw_desc), 1);
1296 #endif
1297
1298 desc = edesc->hw_desc;
1299 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1300 if (!ret) {
1301 ret = -EINPROGRESS;
1302 } else {
1303 aead_unmap(jrdev, edesc, req);
1304 kfree(edesc);
1305 }
1306
1307 return ret;
1308 }
1309
1310 /*
1311 * allocate and map the aead extended descriptor for aead givencrypt
1312 */
1313 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1314 *greq, int desc_bytes,
1315 u32 *contig_ptr)
1316 {
1317 struct aead_request *req = &greq->areq;
1318 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1319 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1320 struct device *jrdev = ctx->jrdev;
1321 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1322 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1323 int assoc_nents, src_nents, dst_nents = 0;
1324 struct aead_edesc *edesc;
1325 dma_addr_t iv_dma = 0;
1326 int sgc;
1327 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1328 int ivsize = crypto_aead_ivsize(aead);
1329 bool assoc_chained = false, src_chained = false, dst_chained = false;
1330 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1331
1332 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1333 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1334
1335 if (unlikely(req->dst != req->src))
1336 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
1337
1338 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1339 DMA_BIDIRECTIONAL, assoc_chained);
1340 if (likely(req->src == req->dst)) {
1341 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1342 DMA_BIDIRECTIONAL, src_chained);
1343 } else {
1344 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1345 DMA_TO_DEVICE, src_chained);
1346 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1347 DMA_FROM_DEVICE, dst_chained);
1348 }
1349
1350 /* Check if data are contiguous */
1351 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1352 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1353 iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1354 contig &= ~GIV_SRC_CONTIG;
1355 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1356 contig &= ~GIV_DST_CONTIG;
1357 if (unlikely(req->src != req->dst)) {
1358 dst_nents = dst_nents ? : 1;
1359 sec4_sg_len += 1;
1360 }
1361 if (!(contig & GIV_SRC_CONTIG)) {
1362 assoc_nents = assoc_nents ? : 1;
1363 src_nents = src_nents ? : 1;
1364 sec4_sg_len += assoc_nents + 1 + src_nents;
1365 if (likely(req->src == req->dst))
1366 contig &= ~GIV_DST_CONTIG;
1367 }
1368 sec4_sg_len += dst_nents;
1369
1370 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1371
1372 /* allocate space for base edesc and hw desc commands, link tables */
1373 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1374 sec4_sg_bytes, GFP_DMA | flags);
1375 if (!edesc) {
1376 dev_err(jrdev, "could not allocate extended descriptor\n");
1377 return ERR_PTR(-ENOMEM);
1378 }
1379
1380 edesc->assoc_nents = assoc_nents;
1381 edesc->assoc_chained = assoc_chained;
1382 edesc->src_nents = src_nents;
1383 edesc->src_chained = src_chained;
1384 edesc->dst_nents = dst_nents;
1385 edesc->dst_chained = dst_chained;
1386 edesc->iv_dma = iv_dma;
1387 edesc->sec4_sg_bytes = sec4_sg_bytes;
1388 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1389 desc_bytes;
1390 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1391 sec4_sg_bytes, DMA_TO_DEVICE);
1392 *contig_ptr = contig;
1393
1394 sec4_sg_index = 0;
1395 if (!(contig & GIV_SRC_CONTIG)) {
1396 sg_to_sec4_sg(req->assoc, assoc_nents,
1397 edesc->sec4_sg +
1398 sec4_sg_index, 0);
1399 sec4_sg_index += assoc_nents;
1400 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1401 iv_dma, ivsize, 0);
1402 sec4_sg_index += 1;
1403 sg_to_sec4_sg_last(req->src, src_nents,
1404 edesc->sec4_sg +
1405 sec4_sg_index, 0);
1406 sec4_sg_index += src_nents;
1407 }
1408 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1409 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1410 iv_dma, ivsize, 0);
1411 sec4_sg_index += 1;
1412 sg_to_sec4_sg_last(req->dst, dst_nents,
1413 edesc->sec4_sg + sec4_sg_index, 0);
1414 }
1415
1416 return edesc;
1417 }
1418
1419 static int aead_givencrypt(struct aead_givcrypt_request *areq)
1420 {
1421 struct aead_request *req = &areq->areq;
1422 struct aead_edesc *edesc;
1423 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1424 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1425 struct device *jrdev = ctx->jrdev;
1426 u32 contig;
1427 u32 *desc;
1428 int ret = 0;
1429
1430 req->cryptlen += ctx->authsize;
1431
1432 /* allocate extended descriptor */
1433 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1434 CAAM_CMD_SZ, &contig);
1435
1436 if (IS_ERR(edesc))
1437 return PTR_ERR(edesc);
1438
1439 #ifdef DEBUG
1440 print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
1441 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1442 req->cryptlen, 1);
1443 #endif
1444
1445 /* Create and submit job descriptor*/
1446 init_aead_giv_job(ctx->sh_desc_givenc,
1447 ctx->sh_desc_givenc_dma, edesc, req, contig);
1448 #ifdef DEBUG
1449 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1450 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1451 desc_bytes(edesc->hw_desc), 1);
1452 #endif
1453
1454 desc = edesc->hw_desc;
1455 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1456 if (!ret) {
1457 ret = -EINPROGRESS;
1458 } else {
1459 aead_unmap(jrdev, edesc, req);
1460 kfree(edesc);
1461 }
1462
1463 return ret;
1464 }
1465
1466 /*
1467 * allocate and map the ablkcipher extended descriptor for ablkcipher
1468 */
1469 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1470 *req, int desc_bytes,
1471 bool *iv_contig_out)
1472 {
1473 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1474 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1475 struct device *jrdev = ctx->jrdev;
1476 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1477 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1478 GFP_KERNEL : GFP_ATOMIC;
1479 int src_nents, dst_nents = 0, sec4_sg_bytes;
1480 struct ablkcipher_edesc *edesc;
1481 dma_addr_t iv_dma = 0;
1482 bool iv_contig = false;
1483 int sgc;
1484 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1485 bool src_chained = false, dst_chained = false;
1486 int sec4_sg_index;
1487
1488 src_nents = sg_count(req->src, req->nbytes, &src_chained);
1489
1490 if (req->dst != req->src)
1491 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
1492
1493 if (likely(req->src == req->dst)) {
1494 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1495 DMA_BIDIRECTIONAL, src_chained);
1496 } else {
1497 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1498 DMA_TO_DEVICE, src_chained);
1499 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1500 DMA_FROM_DEVICE, dst_chained);
1501 }
1502
1503 /*
1504 * Check if iv can be contiguous with source and destination.
1505 * If so, include it. If not, create scatterlist.
1506 */
1507 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1508 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1509 iv_contig = true;
1510 else
1511 src_nents = src_nents ? : 1;
1512 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1513 sizeof(struct sec4_sg_entry);
1514
1515 /* allocate space for base edesc and hw desc commands, link tables */
1516 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1517 sec4_sg_bytes, GFP_DMA | flags);
1518 if (!edesc) {
1519 dev_err(jrdev, "could not allocate extended descriptor\n");
1520 return ERR_PTR(-ENOMEM);
1521 }
1522
1523 edesc->src_nents = src_nents;
1524 edesc->src_chained = src_chained;
1525 edesc->dst_nents = dst_nents;
1526 edesc->dst_chained = dst_chained;
1527 edesc->sec4_sg_bytes = sec4_sg_bytes;
1528 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1529 desc_bytes;
1530
1531 sec4_sg_index = 0;
1532 if (!iv_contig) {
1533 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1534 sg_to_sec4_sg_last(req->src, src_nents,
1535 edesc->sec4_sg + 1, 0);
1536 sec4_sg_index += 1 + src_nents;
1537 }
1538
1539 if (dst_nents) {
1540 sg_to_sec4_sg_last(req->dst, dst_nents,
1541 edesc->sec4_sg + sec4_sg_index, 0);
1542 }
1543
1544 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1545 sec4_sg_bytes, DMA_TO_DEVICE);
1546 edesc->iv_dma = iv_dma;
1547
1548 #ifdef DEBUG
1549 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ",
1550 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1551 sec4_sg_bytes, 1);
1552 #endif
1553
1554 *iv_contig_out = iv_contig;
1555 return edesc;
1556 }
1557
1558 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1559 {
1560 struct ablkcipher_edesc *edesc;
1561 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1562 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1563 struct device *jrdev = ctx->jrdev;
1564 bool iv_contig;
1565 u32 *desc;
1566 int ret = 0;
1567
1568 /* allocate extended descriptor */
1569 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1570 CAAM_CMD_SZ, &iv_contig);
1571 if (IS_ERR(edesc))
1572 return PTR_ERR(edesc);
1573
1574 /* Create and submit job descriptor*/
1575 init_ablkcipher_job(ctx->sh_desc_enc,
1576 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1577 #ifdef DEBUG
1578 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1579 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1580 desc_bytes(edesc->hw_desc), 1);
1581 #endif
1582 desc = edesc->hw_desc;
1583 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1584
1585 if (!ret) {
1586 ret = -EINPROGRESS;
1587 } else {
1588 ablkcipher_unmap(jrdev, edesc, req);
1589 kfree(edesc);
1590 }
1591
1592 return ret;
1593 }
1594
1595 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1596 {
1597 struct ablkcipher_edesc *edesc;
1598 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1599 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1600 struct device *jrdev = ctx->jrdev;
1601 bool iv_contig;
1602 u32 *desc;
1603 int ret = 0;
1604
1605 /* allocate extended descriptor */
1606 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1607 CAAM_CMD_SZ, &iv_contig);
1608 if (IS_ERR(edesc))
1609 return PTR_ERR(edesc);
1610
1611 /* Create and submit job descriptor*/
1612 init_ablkcipher_job(ctx->sh_desc_dec,
1613 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1614 desc = edesc->hw_desc;
1615 #ifdef DEBUG
1616 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1617 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1618 desc_bytes(edesc->hw_desc), 1);
1619 #endif
1620
1621 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1622 if (!ret) {
1623 ret = -EINPROGRESS;
1624 } else {
1625 ablkcipher_unmap(jrdev, edesc, req);
1626 kfree(edesc);
1627 }
1628
1629 return ret;
1630 }
1631
1632 #define template_aead template_u.aead
1633 #define template_ablkcipher template_u.ablkcipher
1634 struct caam_alg_template {
1635 char name[CRYPTO_MAX_ALG_NAME];
1636 char driver_name[CRYPTO_MAX_ALG_NAME];
1637 unsigned int blocksize;
1638 u32 type;
1639 union {
1640 struct ablkcipher_alg ablkcipher;
1641 struct aead_alg aead;
1642 struct blkcipher_alg blkcipher;
1643 struct cipher_alg cipher;
1644 struct compress_alg compress;
1645 struct rng_alg rng;
1646 } template_u;
1647 u32 class1_alg_type;
1648 u32 class2_alg_type;
1649 u32 alg_op;
1650 };
1651
1652 static struct caam_alg_template driver_algs[] = {
1653 /* single-pass ipsec_esp descriptor */
1654 {
1655 .name = "authenc(hmac(md5),cbc(aes))",
1656 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
1657 .blocksize = AES_BLOCK_SIZE,
1658 .type = CRYPTO_ALG_TYPE_AEAD,
1659 .template_aead = {
1660 .setkey = aead_setkey,
1661 .setauthsize = aead_setauthsize,
1662 .encrypt = aead_encrypt,
1663 .decrypt = aead_decrypt,
1664 .givencrypt = aead_givencrypt,
1665 .geniv = "<built-in>",
1666 .ivsize = AES_BLOCK_SIZE,
1667 .maxauthsize = MD5_DIGEST_SIZE,
1668 },
1669 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1670 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1671 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1672 },
1673 {
1674 .name = "authenc(hmac(sha1),cbc(aes))",
1675 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1676 .blocksize = AES_BLOCK_SIZE,
1677 .type = CRYPTO_ALG_TYPE_AEAD,
1678 .template_aead = {
1679 .setkey = aead_setkey,
1680 .setauthsize = aead_setauthsize,
1681 .encrypt = aead_encrypt,
1682 .decrypt = aead_decrypt,
1683 .givencrypt = aead_givencrypt,
1684 .geniv = "<built-in>",
1685 .ivsize = AES_BLOCK_SIZE,
1686 .maxauthsize = SHA1_DIGEST_SIZE,
1687 },
1688 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1689 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1690 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1691 },
1692 {
1693 .name = "authenc(hmac(sha224),cbc(aes))",
1694 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1695 .blocksize = AES_BLOCK_SIZE,
1696 .type = CRYPTO_ALG_TYPE_AEAD,
1697 .template_aead = {
1698 .setkey = aead_setkey,
1699 .setauthsize = aead_setauthsize,
1700 .encrypt = aead_encrypt,
1701 .decrypt = aead_decrypt,
1702 .givencrypt = aead_givencrypt,
1703 .geniv = "<built-in>",
1704 .ivsize = AES_BLOCK_SIZE,
1705 .maxauthsize = SHA224_DIGEST_SIZE,
1706 },
1707 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1708 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1709 OP_ALG_AAI_HMAC_PRECOMP,
1710 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1711 },
1712 {
1713 .name = "authenc(hmac(sha256),cbc(aes))",
1714 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1715 .blocksize = AES_BLOCK_SIZE,
1716 .type = CRYPTO_ALG_TYPE_AEAD,
1717 .template_aead = {
1718 .setkey = aead_setkey,
1719 .setauthsize = aead_setauthsize,
1720 .encrypt = aead_encrypt,
1721 .decrypt = aead_decrypt,
1722 .givencrypt = aead_givencrypt,
1723 .geniv = "<built-in>",
1724 .ivsize = AES_BLOCK_SIZE,
1725 .maxauthsize = SHA256_DIGEST_SIZE,
1726 },
1727 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1728 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1729 OP_ALG_AAI_HMAC_PRECOMP,
1730 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1731 },
1732 {
1733 .name = "authenc(hmac(sha384),cbc(aes))",
1734 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
1735 .blocksize = AES_BLOCK_SIZE,
1736 .type = CRYPTO_ALG_TYPE_AEAD,
1737 .template_aead = {
1738 .setkey = aead_setkey,
1739 .setauthsize = aead_setauthsize,
1740 .encrypt = aead_encrypt,
1741 .decrypt = aead_decrypt,
1742 .givencrypt = aead_givencrypt,
1743 .geniv = "<built-in>",
1744 .ivsize = AES_BLOCK_SIZE,
1745 .maxauthsize = SHA384_DIGEST_SIZE,
1746 },
1747 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1748 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1749 OP_ALG_AAI_HMAC_PRECOMP,
1750 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1751 },
1752
1753 {
1754 .name = "authenc(hmac(sha512),cbc(aes))",
1755 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
1756 .blocksize = AES_BLOCK_SIZE,
1757 .type = CRYPTO_ALG_TYPE_AEAD,
1758 .template_aead = {
1759 .setkey = aead_setkey,
1760 .setauthsize = aead_setauthsize,
1761 .encrypt = aead_encrypt,
1762 .decrypt = aead_decrypt,
1763 .givencrypt = aead_givencrypt,
1764 .geniv = "<built-in>",
1765 .ivsize = AES_BLOCK_SIZE,
1766 .maxauthsize = SHA512_DIGEST_SIZE,
1767 },
1768 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1769 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1770 OP_ALG_AAI_HMAC_PRECOMP,
1771 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1772 },
1773 {
1774 .name = "authenc(hmac(md5),cbc(des3_ede))",
1775 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
1776 .blocksize = DES3_EDE_BLOCK_SIZE,
1777 .type = CRYPTO_ALG_TYPE_AEAD,
1778 .template_aead = {
1779 .setkey = aead_setkey,
1780 .setauthsize = aead_setauthsize,
1781 .encrypt = aead_encrypt,
1782 .decrypt = aead_decrypt,
1783 .givencrypt = aead_givencrypt,
1784 .geniv = "<built-in>",
1785 .ivsize = DES3_EDE_BLOCK_SIZE,
1786 .maxauthsize = MD5_DIGEST_SIZE,
1787 },
1788 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1789 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1790 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1791 },
1792 {
1793 .name = "authenc(hmac(sha1),cbc(des3_ede))",
1794 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
1795 .blocksize = DES3_EDE_BLOCK_SIZE,
1796 .type = CRYPTO_ALG_TYPE_AEAD,
1797 .template_aead = {
1798 .setkey = aead_setkey,
1799 .setauthsize = aead_setauthsize,
1800 .encrypt = aead_encrypt,
1801 .decrypt = aead_decrypt,
1802 .givencrypt = aead_givencrypt,
1803 .geniv = "<built-in>",
1804 .ivsize = DES3_EDE_BLOCK_SIZE,
1805 .maxauthsize = SHA1_DIGEST_SIZE,
1806 },
1807 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1808 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1809 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1810 },
1811 {
1812 .name = "authenc(hmac(sha224),cbc(des3_ede))",
1813 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
1814 .blocksize = DES3_EDE_BLOCK_SIZE,
1815 .type = CRYPTO_ALG_TYPE_AEAD,
1816 .template_aead = {
1817 .setkey = aead_setkey,
1818 .setauthsize = aead_setauthsize,
1819 .encrypt = aead_encrypt,
1820 .decrypt = aead_decrypt,
1821 .givencrypt = aead_givencrypt,
1822 .geniv = "<built-in>",
1823 .ivsize = DES3_EDE_BLOCK_SIZE,
1824 .maxauthsize = SHA224_DIGEST_SIZE,
1825 },
1826 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1827 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1828 OP_ALG_AAI_HMAC_PRECOMP,
1829 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1830 },
1831 {
1832 .name = "authenc(hmac(sha256),cbc(des3_ede))",
1833 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
1834 .blocksize = DES3_EDE_BLOCK_SIZE,
1835 .type = CRYPTO_ALG_TYPE_AEAD,
1836 .template_aead = {
1837 .setkey = aead_setkey,
1838 .setauthsize = aead_setauthsize,
1839 .encrypt = aead_encrypt,
1840 .decrypt = aead_decrypt,
1841 .givencrypt = aead_givencrypt,
1842 .geniv = "<built-in>",
1843 .ivsize = DES3_EDE_BLOCK_SIZE,
1844 .maxauthsize = SHA256_DIGEST_SIZE,
1845 },
1846 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1847 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1848 OP_ALG_AAI_HMAC_PRECOMP,
1849 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1850 },
1851 {
1852 .name = "authenc(hmac(sha384),cbc(des3_ede))",
1853 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
1854 .blocksize = DES3_EDE_BLOCK_SIZE,
1855 .type = CRYPTO_ALG_TYPE_AEAD,
1856 .template_aead = {
1857 .setkey = aead_setkey,
1858 .setauthsize = aead_setauthsize,
1859 .encrypt = aead_encrypt,
1860 .decrypt = aead_decrypt,
1861 .givencrypt = aead_givencrypt,
1862 .geniv = "<built-in>",
1863 .ivsize = DES3_EDE_BLOCK_SIZE,
1864 .maxauthsize = SHA384_DIGEST_SIZE,
1865 },
1866 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1867 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1868 OP_ALG_AAI_HMAC_PRECOMP,
1869 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1870 },
1871 {
1872 .name = "authenc(hmac(sha512),cbc(des3_ede))",
1873 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
1874 .blocksize = DES3_EDE_BLOCK_SIZE,
1875 .type = CRYPTO_ALG_TYPE_AEAD,
1876 .template_aead = {
1877 .setkey = aead_setkey,
1878 .setauthsize = aead_setauthsize,
1879 .encrypt = aead_encrypt,
1880 .decrypt = aead_decrypt,
1881 .givencrypt = aead_givencrypt,
1882 .geniv = "<built-in>",
1883 .ivsize = DES3_EDE_BLOCK_SIZE,
1884 .maxauthsize = SHA512_DIGEST_SIZE,
1885 },
1886 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1887 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1888 OP_ALG_AAI_HMAC_PRECOMP,
1889 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1890 },
1891 {
1892 .name = "authenc(hmac(md5),cbc(des))",
1893 .driver_name = "authenc-hmac-md5-cbc-des-caam",
1894 .blocksize = DES_BLOCK_SIZE,
1895 .type = CRYPTO_ALG_TYPE_AEAD,
1896 .template_aead = {
1897 .setkey = aead_setkey,
1898 .setauthsize = aead_setauthsize,
1899 .encrypt = aead_encrypt,
1900 .decrypt = aead_decrypt,
1901 .givencrypt = aead_givencrypt,
1902 .geniv = "<built-in>",
1903 .ivsize = DES_BLOCK_SIZE,
1904 .maxauthsize = MD5_DIGEST_SIZE,
1905 },
1906 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1907 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1908 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1909 },
1910 {
1911 .name = "authenc(hmac(sha1),cbc(des))",
1912 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
1913 .blocksize = DES_BLOCK_SIZE,
1914 .type = CRYPTO_ALG_TYPE_AEAD,
1915 .template_aead = {
1916 .setkey = aead_setkey,
1917 .setauthsize = aead_setauthsize,
1918 .encrypt = aead_encrypt,
1919 .decrypt = aead_decrypt,
1920 .givencrypt = aead_givencrypt,
1921 .geniv = "<built-in>",
1922 .ivsize = DES_BLOCK_SIZE,
1923 .maxauthsize = SHA1_DIGEST_SIZE,
1924 },
1925 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1926 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1927 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1928 },
1929 {
1930 .name = "authenc(hmac(sha224),cbc(des))",
1931 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
1932 .blocksize = DES_BLOCK_SIZE,
1933 .type = CRYPTO_ALG_TYPE_AEAD,
1934 .template_aead = {
1935 .setkey = aead_setkey,
1936 .setauthsize = aead_setauthsize,
1937 .encrypt = aead_encrypt,
1938 .decrypt = aead_decrypt,
1939 .givencrypt = aead_givencrypt,
1940 .geniv = "<built-in>",
1941 .ivsize = DES_BLOCK_SIZE,
1942 .maxauthsize = SHA224_DIGEST_SIZE,
1943 },
1944 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1945 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1946 OP_ALG_AAI_HMAC_PRECOMP,
1947 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1948 },
1949 {
1950 .name = "authenc(hmac(sha256),cbc(des))",
1951 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
1952 .blocksize = DES_BLOCK_SIZE,
1953 .type = CRYPTO_ALG_TYPE_AEAD,
1954 .template_aead = {
1955 .setkey = aead_setkey,
1956 .setauthsize = aead_setauthsize,
1957 .encrypt = aead_encrypt,
1958 .decrypt = aead_decrypt,
1959 .givencrypt = aead_givencrypt,
1960 .geniv = "<built-in>",
1961 .ivsize = DES_BLOCK_SIZE,
1962 .maxauthsize = SHA256_DIGEST_SIZE,
1963 },
1964 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1965 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1966 OP_ALG_AAI_HMAC_PRECOMP,
1967 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1968 },
1969 {
1970 .name = "authenc(hmac(sha384),cbc(des))",
1971 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
1972 .blocksize = DES_BLOCK_SIZE,
1973 .type = CRYPTO_ALG_TYPE_AEAD,
1974 .template_aead = {
1975 .setkey = aead_setkey,
1976 .setauthsize = aead_setauthsize,
1977 .encrypt = aead_encrypt,
1978 .decrypt = aead_decrypt,
1979 .givencrypt = aead_givencrypt,
1980 .geniv = "<built-in>",
1981 .ivsize = DES_BLOCK_SIZE,
1982 .maxauthsize = SHA384_DIGEST_SIZE,
1983 },
1984 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1985 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1986 OP_ALG_AAI_HMAC_PRECOMP,
1987 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1988 },
1989 {
1990 .name = "authenc(hmac(sha512),cbc(des))",
1991 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
1992 .blocksize = DES_BLOCK_SIZE,
1993 .type = CRYPTO_ALG_TYPE_AEAD,
1994 .template_aead = {
1995 .setkey = aead_setkey,
1996 .setauthsize = aead_setauthsize,
1997 .encrypt = aead_encrypt,
1998 .decrypt = aead_decrypt,
1999 .givencrypt = aead_givencrypt,
2000 .geniv = "<built-in>",
2001 .ivsize = DES_BLOCK_SIZE,
2002 .maxauthsize = SHA512_DIGEST_SIZE,
2003 },
2004 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2005 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2006 OP_ALG_AAI_HMAC_PRECOMP,
2007 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2008 },
2009 /* ablkcipher descriptor */
2010 {
2011 .name = "cbc(aes)",
2012 .driver_name = "cbc-aes-caam",
2013 .blocksize = AES_BLOCK_SIZE,
2014 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2015 .template_ablkcipher = {
2016 .setkey = ablkcipher_setkey,
2017 .encrypt = ablkcipher_encrypt,
2018 .decrypt = ablkcipher_decrypt,
2019 .geniv = "eseqiv",
2020 .min_keysize = AES_MIN_KEY_SIZE,
2021 .max_keysize = AES_MAX_KEY_SIZE,
2022 .ivsize = AES_BLOCK_SIZE,
2023 },
2024 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2025 },
2026 {
2027 .name = "cbc(des3_ede)",
2028 .driver_name = "cbc-3des-caam",
2029 .blocksize = DES3_EDE_BLOCK_SIZE,
2030 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2031 .template_ablkcipher = {
2032 .setkey = ablkcipher_setkey,
2033 .encrypt = ablkcipher_encrypt,
2034 .decrypt = ablkcipher_decrypt,
2035 .geniv = "eseqiv",
2036 .min_keysize = DES3_EDE_KEY_SIZE,
2037 .max_keysize = DES3_EDE_KEY_SIZE,
2038 .ivsize = DES3_EDE_BLOCK_SIZE,
2039 },
2040 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2041 },
2042 {
2043 .name = "cbc(des)",
2044 .driver_name = "cbc-des-caam",
2045 .blocksize = DES_BLOCK_SIZE,
2046 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2047 .template_ablkcipher = {
2048 .setkey = ablkcipher_setkey,
2049 .encrypt = ablkcipher_encrypt,
2050 .decrypt = ablkcipher_decrypt,
2051 .geniv = "eseqiv",
2052 .min_keysize = DES_KEY_SIZE,
2053 .max_keysize = DES_KEY_SIZE,
2054 .ivsize = DES_BLOCK_SIZE,
2055 },
2056 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2057 }
2058 };
2059
2060 struct caam_crypto_alg {
2061 struct list_head entry;
2062 struct device *ctrldev;
2063 int class1_alg_type;
2064 int class2_alg_type;
2065 int alg_op;
2066 struct crypto_alg crypto_alg;
2067 };
2068
2069 static int caam_cra_init(struct crypto_tfm *tfm)
2070 {
2071 struct crypto_alg *alg = tfm->__crt_alg;
2072 struct caam_crypto_alg *caam_alg =
2073 container_of(alg, struct caam_crypto_alg, crypto_alg);
2074 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2075 struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
2076 int tgt_jr = atomic_inc_return(&priv->tfm_count);
2077
2078 /*
2079 * distribute tfms across job rings to ensure in-order
2080 * crypto request processing per tfm
2081 */
2082 ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs];
2083
2084 /* copy descriptor header template value */
2085 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2086 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2087 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2088
2089 return 0;
2090 }
2091
2092 static void caam_cra_exit(struct crypto_tfm *tfm)
2093 {
2094 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2095
2096 if (ctx->sh_desc_enc_dma &&
2097 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2098 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2099 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2100 if (ctx->sh_desc_dec_dma &&
2101 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2102 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2103 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2104 if (ctx->sh_desc_givenc_dma &&
2105 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2106 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2107 desc_bytes(ctx->sh_desc_givenc),
2108 DMA_TO_DEVICE);
2109 }
2110
2111 static void __exit caam_algapi_exit(void)
2112 {
2113
2114 struct device_node *dev_node;
2115 struct platform_device *pdev;
2116 struct device *ctrldev;
2117 struct caam_drv_private *priv;
2118 struct caam_crypto_alg *t_alg, *n;
2119
2120 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2121 if (!dev_node) {
2122 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2123 if (!dev_node)
2124 return;
2125 }
2126
2127 pdev = of_find_device_by_node(dev_node);
2128 if (!pdev)
2129 return;
2130
2131 ctrldev = &pdev->dev;
2132 of_node_put(dev_node);
2133 priv = dev_get_drvdata(ctrldev);
2134
2135 if (!priv->alg_list.next)
2136 return;
2137
2138 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2139 crypto_unregister_alg(&t_alg->crypto_alg);
2140 list_del(&t_alg->entry);
2141 kfree(t_alg);
2142 }
2143 }
2144
2145 static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
2146 struct caam_alg_template
2147 *template)
2148 {
2149 struct caam_crypto_alg *t_alg;
2150 struct crypto_alg *alg;
2151
2152 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2153 if (!t_alg) {
2154 dev_err(ctrldev, "failed to allocate t_alg\n");
2155 return ERR_PTR(-ENOMEM);
2156 }
2157
2158 alg = &t_alg->crypto_alg;
2159
2160 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2161 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2162 template->driver_name);
2163 alg->cra_module = THIS_MODULE;
2164 alg->cra_init = caam_cra_init;
2165 alg->cra_exit = caam_cra_exit;
2166 alg->cra_priority = CAAM_CRA_PRIORITY;
2167 alg->cra_blocksize = template->blocksize;
2168 alg->cra_alignmask = 0;
2169 alg->cra_ctxsize = sizeof(struct caam_ctx);
2170 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2171 template->type;
2172 switch (template->type) {
2173 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2174 alg->cra_type = &crypto_ablkcipher_type;
2175 alg->cra_ablkcipher = template->template_ablkcipher;
2176 break;
2177 case CRYPTO_ALG_TYPE_AEAD:
2178 alg->cra_type = &crypto_aead_type;
2179 alg->cra_aead = template->template_aead;
2180 break;
2181 }
2182
2183 t_alg->class1_alg_type = template->class1_alg_type;
2184 t_alg->class2_alg_type = template->class2_alg_type;
2185 t_alg->alg_op = template->alg_op;
2186 t_alg->ctrldev = ctrldev;
2187
2188 return t_alg;
2189 }
2190
2191 static int __init caam_algapi_init(void)
2192 {
2193 struct device_node *dev_node;
2194 struct platform_device *pdev;
2195 struct device *ctrldev;
2196 struct caam_drv_private *priv;
2197 int i = 0, err = 0;
2198
2199 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2200 if (!dev_node) {
2201 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2202 if (!dev_node)
2203 return -ENODEV;
2204 }
2205
2206 pdev = of_find_device_by_node(dev_node);
2207 if (!pdev)
2208 return -ENODEV;
2209
2210 ctrldev = &pdev->dev;
2211 priv = dev_get_drvdata(ctrldev);
2212 of_node_put(dev_node);
2213
2214 INIT_LIST_HEAD(&priv->alg_list);
2215
2216 atomic_set(&priv->tfm_count, -1);
2217
2218 /* register crypto algorithms the device supports */
2219 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2220 /* TODO: check if h/w supports alg */
2221 struct caam_crypto_alg *t_alg;
2222
2223 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
2224 if (IS_ERR(t_alg)) {
2225 err = PTR_ERR(t_alg);
2226 dev_warn(ctrldev, "%s alg allocation failed\n",
2227 driver_algs[i].driver_name);
2228 continue;
2229 }
2230
2231 err = crypto_register_alg(&t_alg->crypto_alg);
2232 if (err) {
2233 dev_warn(ctrldev, "%s alg registration failed\n",
2234 t_alg->crypto_alg.cra_driver_name);
2235 kfree(t_alg);
2236 } else
2237 list_add_tail(&t_alg->entry, &priv->alg_list);
2238 }
2239 if (!list_empty(&priv->alg_list))
2240 dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
2241 (char *)of_get_property(dev_node, "compatible", NULL));
2242
2243 return err;
2244 }
2245
2246 module_init(caam_algapi_init);
2247 module_exit(caam_algapi_exit);
2248
2249 MODULE_LICENSE("GPL");
2250 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2251 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");