drm/nouveau/bios: allow loading alternate vbios image as firmware
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / crypto / geode-aes.c
1 /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 */
8
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/pci.h>
12 #include <linux/pci_ids.h>
13 #include <linux/crypto.h>
14 #include <linux/spinlock.h>
15 #include <crypto/algapi.h>
16 #include <crypto/aes.h>
17
18 #include <linux/io.h>
19 #include <linux/delay.h>
20
21 #include "geode-aes.h"
22
23 /* Static structures */
24
25 static void __iomem *_iobase;
26 static spinlock_t lock;
27
28 /* Write a 128 bit field (either a writable key or IV) */
29 static inline void
30 _writefield(u32 offset, void *value)
31 {
32 int i;
33 for (i = 0; i < 4; i++)
34 iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
35 }
36
37 /* Read a 128 bit field (either a writable key or IV) */
38 static inline void
39 _readfield(u32 offset, void *value)
40 {
41 int i;
42 for (i = 0; i < 4; i++)
43 ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
44 }
45
46 static int
47 do_crypt(void *src, void *dst, int len, u32 flags)
48 {
49 u32 status;
50 u32 counter = AES_OP_TIMEOUT;
51
52 iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
53 iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
54 iowrite32(len, _iobase + AES_LENA_REG);
55
56 /* Start the operation */
57 iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
58
59 do {
60 status = ioread32(_iobase + AES_INTR_REG);
61 cpu_relax();
62 } while (!(status & AES_INTRA_PENDING) && --counter);
63
64 /* Clear the event */
65 iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
66 return counter ? 0 : 1;
67 }
68
69 static unsigned int
70 geode_aes_crypt(struct geode_aes_op *op)
71 {
72 u32 flags = 0;
73 unsigned long iflags;
74 int ret;
75
76 if (op->len == 0)
77 return 0;
78
79 /* If the source and destination is the same, then
80 * we need to turn on the coherent flags, otherwise
81 * we don't need to worry
82 */
83
84 flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
85
86 if (op->dir == AES_DIR_ENCRYPT)
87 flags |= AES_CTRL_ENCRYPT;
88
89 /* Start the critical section */
90
91 spin_lock_irqsave(&lock, iflags);
92
93 if (op->mode == AES_MODE_CBC) {
94 flags |= AES_CTRL_CBC;
95 _writefield(AES_WRITEIV0_REG, op->iv);
96 }
97
98 if (!(op->flags & AES_FLAGS_HIDDENKEY)) {
99 flags |= AES_CTRL_WRKEY;
100 _writefield(AES_WRITEKEY0_REG, op->key);
101 }
102
103 ret = do_crypt(op->src, op->dst, op->len, flags);
104 BUG_ON(ret);
105
106 if (op->mode == AES_MODE_CBC)
107 _readfield(AES_WRITEIV0_REG, op->iv);
108
109 spin_unlock_irqrestore(&lock, iflags);
110
111 return op->len;
112 }
113
114 /* CRYPTO-API Functions */
115
116 static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
117 unsigned int len)
118 {
119 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
120 unsigned int ret;
121
122 op->keylen = len;
123
124 if (len == AES_KEYSIZE_128) {
125 memcpy(op->key, key, len);
126 return 0;
127 }
128
129 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
130 /* not supported at all */
131 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
132 return -EINVAL;
133 }
134
135 /*
136 * The requested key size is not supported by HW, do a fallback
137 */
138 op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
139 op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
140
141 ret = crypto_cipher_setkey(op->fallback.cip, key, len);
142 if (ret) {
143 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
144 tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK);
145 }
146 return ret;
147 }
148
149 static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
150 unsigned int len)
151 {
152 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
153 unsigned int ret;
154
155 op->keylen = len;
156
157 if (len == AES_KEYSIZE_128) {
158 memcpy(op->key, key, len);
159 return 0;
160 }
161
162 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
163 /* not supported at all */
164 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
165 return -EINVAL;
166 }
167
168 /*
169 * The requested key size is not supported by HW, do a fallback
170 */
171 op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
172 op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
173
174 ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
175 if (ret) {
176 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
177 tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
178 }
179 return ret;
180 }
181
182 static int fallback_blk_dec(struct blkcipher_desc *desc,
183 struct scatterlist *dst, struct scatterlist *src,
184 unsigned int nbytes)
185 {
186 unsigned int ret;
187 struct crypto_blkcipher *tfm;
188 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
189
190 tfm = desc->tfm;
191 desc->tfm = op->fallback.blk;
192
193 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
194
195 desc->tfm = tfm;
196 return ret;
197 }
198 static int fallback_blk_enc(struct blkcipher_desc *desc,
199 struct scatterlist *dst, struct scatterlist *src,
200 unsigned int nbytes)
201 {
202 unsigned int ret;
203 struct crypto_blkcipher *tfm;
204 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
205
206 tfm = desc->tfm;
207 desc->tfm = op->fallback.blk;
208
209 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
210
211 desc->tfm = tfm;
212 return ret;
213 }
214
215 static void
216 geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
217 {
218 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
219
220 if (unlikely(op->keylen != AES_KEYSIZE_128)) {
221 crypto_cipher_encrypt_one(op->fallback.cip, out, in);
222 return;
223 }
224
225 op->src = (void *) in;
226 op->dst = (void *) out;
227 op->mode = AES_MODE_ECB;
228 op->flags = 0;
229 op->len = AES_MIN_BLOCK_SIZE;
230 op->dir = AES_DIR_ENCRYPT;
231
232 geode_aes_crypt(op);
233 }
234
235
236 static void
237 geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
238 {
239 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
240
241 if (unlikely(op->keylen != AES_KEYSIZE_128)) {
242 crypto_cipher_decrypt_one(op->fallback.cip, out, in);
243 return;
244 }
245
246 op->src = (void *) in;
247 op->dst = (void *) out;
248 op->mode = AES_MODE_ECB;
249 op->flags = 0;
250 op->len = AES_MIN_BLOCK_SIZE;
251 op->dir = AES_DIR_DECRYPT;
252
253 geode_aes_crypt(op);
254 }
255
256 static int fallback_init_cip(struct crypto_tfm *tfm)
257 {
258 const char *name = tfm->__crt_alg->cra_name;
259 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
260
261 op->fallback.cip = crypto_alloc_cipher(name, 0,
262 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
263
264 if (IS_ERR(op->fallback.cip)) {
265 printk(KERN_ERR "Error allocating fallback algo %s\n", name);
266 return PTR_ERR(op->fallback.cip);
267 }
268
269 return 0;
270 }
271
272 static void fallback_exit_cip(struct crypto_tfm *tfm)
273 {
274 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
275
276 crypto_free_cipher(op->fallback.cip);
277 op->fallback.cip = NULL;
278 }
279
280 static struct crypto_alg geode_alg = {
281 .cra_name = "aes",
282 .cra_driver_name = "geode-aes",
283 .cra_priority = 300,
284 .cra_alignmask = 15,
285 .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
286 CRYPTO_ALG_NEED_FALLBACK,
287 .cra_init = fallback_init_cip,
288 .cra_exit = fallback_exit_cip,
289 .cra_blocksize = AES_MIN_BLOCK_SIZE,
290 .cra_ctxsize = sizeof(struct geode_aes_op),
291 .cra_module = THIS_MODULE,
292 .cra_list = LIST_HEAD_INIT(geode_alg.cra_list),
293 .cra_u = {
294 .cipher = {
295 .cia_min_keysize = AES_MIN_KEY_SIZE,
296 .cia_max_keysize = AES_MAX_KEY_SIZE,
297 .cia_setkey = geode_setkey_cip,
298 .cia_encrypt = geode_encrypt,
299 .cia_decrypt = geode_decrypt
300 }
301 }
302 };
303
304 static int
305 geode_cbc_decrypt(struct blkcipher_desc *desc,
306 struct scatterlist *dst, struct scatterlist *src,
307 unsigned int nbytes)
308 {
309 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
310 struct blkcipher_walk walk;
311 int err, ret;
312
313 if (unlikely(op->keylen != AES_KEYSIZE_128))
314 return fallback_blk_dec(desc, dst, src, nbytes);
315
316 blkcipher_walk_init(&walk, dst, src, nbytes);
317 err = blkcipher_walk_virt(desc, &walk);
318 op->iv = walk.iv;
319
320 while ((nbytes = walk.nbytes)) {
321 op->src = walk.src.virt.addr,
322 op->dst = walk.dst.virt.addr;
323 op->mode = AES_MODE_CBC;
324 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
325 op->dir = AES_DIR_DECRYPT;
326
327 ret = geode_aes_crypt(op);
328
329 nbytes -= ret;
330 err = blkcipher_walk_done(desc, &walk, nbytes);
331 }
332
333 return err;
334 }
335
336 static int
337 geode_cbc_encrypt(struct blkcipher_desc *desc,
338 struct scatterlist *dst, struct scatterlist *src,
339 unsigned int nbytes)
340 {
341 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
342 struct blkcipher_walk walk;
343 int err, ret;
344
345 if (unlikely(op->keylen != AES_KEYSIZE_128))
346 return fallback_blk_enc(desc, dst, src, nbytes);
347
348 blkcipher_walk_init(&walk, dst, src, nbytes);
349 err = blkcipher_walk_virt(desc, &walk);
350 op->iv = walk.iv;
351
352 while ((nbytes = walk.nbytes)) {
353 op->src = walk.src.virt.addr,
354 op->dst = walk.dst.virt.addr;
355 op->mode = AES_MODE_CBC;
356 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
357 op->dir = AES_DIR_ENCRYPT;
358
359 ret = geode_aes_crypt(op);
360 nbytes -= ret;
361 err = blkcipher_walk_done(desc, &walk, nbytes);
362 }
363
364 return err;
365 }
366
367 static int fallback_init_blk(struct crypto_tfm *tfm)
368 {
369 const char *name = tfm->__crt_alg->cra_name;
370 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
371
372 op->fallback.blk = crypto_alloc_blkcipher(name, 0,
373 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
374
375 if (IS_ERR(op->fallback.blk)) {
376 printk(KERN_ERR "Error allocating fallback algo %s\n", name);
377 return PTR_ERR(op->fallback.blk);
378 }
379
380 return 0;
381 }
382
383 static void fallback_exit_blk(struct crypto_tfm *tfm)
384 {
385 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
386
387 crypto_free_blkcipher(op->fallback.blk);
388 op->fallback.blk = NULL;
389 }
390
391 static struct crypto_alg geode_cbc_alg = {
392 .cra_name = "cbc(aes)",
393 .cra_driver_name = "cbc-aes-geode",
394 .cra_priority = 400,
395 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
396 CRYPTO_ALG_KERN_DRIVER_ONLY |
397 CRYPTO_ALG_NEED_FALLBACK,
398 .cra_init = fallback_init_blk,
399 .cra_exit = fallback_exit_blk,
400 .cra_blocksize = AES_MIN_BLOCK_SIZE,
401 .cra_ctxsize = sizeof(struct geode_aes_op),
402 .cra_alignmask = 15,
403 .cra_type = &crypto_blkcipher_type,
404 .cra_module = THIS_MODULE,
405 .cra_list = LIST_HEAD_INIT(geode_cbc_alg.cra_list),
406 .cra_u = {
407 .blkcipher = {
408 .min_keysize = AES_MIN_KEY_SIZE,
409 .max_keysize = AES_MAX_KEY_SIZE,
410 .setkey = geode_setkey_blk,
411 .encrypt = geode_cbc_encrypt,
412 .decrypt = geode_cbc_decrypt,
413 .ivsize = AES_IV_LENGTH,
414 }
415 }
416 };
417
418 static int
419 geode_ecb_decrypt(struct blkcipher_desc *desc,
420 struct scatterlist *dst, struct scatterlist *src,
421 unsigned int nbytes)
422 {
423 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
424 struct blkcipher_walk walk;
425 int err, ret;
426
427 if (unlikely(op->keylen != AES_KEYSIZE_128))
428 return fallback_blk_dec(desc, dst, src, nbytes);
429
430 blkcipher_walk_init(&walk, dst, src, nbytes);
431 err = blkcipher_walk_virt(desc, &walk);
432
433 while ((nbytes = walk.nbytes)) {
434 op->src = walk.src.virt.addr,
435 op->dst = walk.dst.virt.addr;
436 op->mode = AES_MODE_ECB;
437 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
438 op->dir = AES_DIR_DECRYPT;
439
440 ret = geode_aes_crypt(op);
441 nbytes -= ret;
442 err = blkcipher_walk_done(desc, &walk, nbytes);
443 }
444
445 return err;
446 }
447
448 static int
449 geode_ecb_encrypt(struct blkcipher_desc *desc,
450 struct scatterlist *dst, struct scatterlist *src,
451 unsigned int nbytes)
452 {
453 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
454 struct blkcipher_walk walk;
455 int err, ret;
456
457 if (unlikely(op->keylen != AES_KEYSIZE_128))
458 return fallback_blk_enc(desc, dst, src, nbytes);
459
460 blkcipher_walk_init(&walk, dst, src, nbytes);
461 err = blkcipher_walk_virt(desc, &walk);
462
463 while ((nbytes = walk.nbytes)) {
464 op->src = walk.src.virt.addr,
465 op->dst = walk.dst.virt.addr;
466 op->mode = AES_MODE_ECB;
467 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
468 op->dir = AES_DIR_ENCRYPT;
469
470 ret = geode_aes_crypt(op);
471 nbytes -= ret;
472 ret = blkcipher_walk_done(desc, &walk, nbytes);
473 }
474
475 return err;
476 }
477
478 static struct crypto_alg geode_ecb_alg = {
479 .cra_name = "ecb(aes)",
480 .cra_driver_name = "ecb-aes-geode",
481 .cra_priority = 400,
482 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
483 CRYPTO_ALG_KERN_DRIVER_ONLY |
484 CRYPTO_ALG_NEED_FALLBACK,
485 .cra_init = fallback_init_blk,
486 .cra_exit = fallback_exit_blk,
487 .cra_blocksize = AES_MIN_BLOCK_SIZE,
488 .cra_ctxsize = sizeof(struct geode_aes_op),
489 .cra_alignmask = 15,
490 .cra_type = &crypto_blkcipher_type,
491 .cra_module = THIS_MODULE,
492 .cra_list = LIST_HEAD_INIT(geode_ecb_alg.cra_list),
493 .cra_u = {
494 .blkcipher = {
495 .min_keysize = AES_MIN_KEY_SIZE,
496 .max_keysize = AES_MAX_KEY_SIZE,
497 .setkey = geode_setkey_blk,
498 .encrypt = geode_ecb_encrypt,
499 .decrypt = geode_ecb_decrypt,
500 }
501 }
502 };
503
504 static void __devexit
505 geode_aes_remove(struct pci_dev *dev)
506 {
507 crypto_unregister_alg(&geode_alg);
508 crypto_unregister_alg(&geode_ecb_alg);
509 crypto_unregister_alg(&geode_cbc_alg);
510
511 pci_iounmap(dev, _iobase);
512 _iobase = NULL;
513
514 pci_release_regions(dev);
515 pci_disable_device(dev);
516 }
517
518
519 static int __devinit
520 geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
521 {
522 int ret;
523 ret = pci_enable_device(dev);
524 if (ret)
525 return ret;
526
527 ret = pci_request_regions(dev, "geode-aes");
528 if (ret)
529 goto eenable;
530
531 _iobase = pci_iomap(dev, 0, 0);
532
533 if (_iobase == NULL) {
534 ret = -ENOMEM;
535 goto erequest;
536 }
537
538 spin_lock_init(&lock);
539
540 /* Clear any pending activity */
541 iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
542
543 ret = crypto_register_alg(&geode_alg);
544 if (ret)
545 goto eiomap;
546
547 ret = crypto_register_alg(&geode_ecb_alg);
548 if (ret)
549 goto ealg;
550
551 ret = crypto_register_alg(&geode_cbc_alg);
552 if (ret)
553 goto eecb;
554
555 printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
556 return 0;
557
558 eecb:
559 crypto_unregister_alg(&geode_ecb_alg);
560
561 ealg:
562 crypto_unregister_alg(&geode_alg);
563
564 eiomap:
565 pci_iounmap(dev, _iobase);
566
567 erequest:
568 pci_release_regions(dev);
569
570 eenable:
571 pci_disable_device(dev);
572
573 printk(KERN_ERR "geode-aes: GEODE AES initialization failed.\n");
574 return ret;
575 }
576
577 static struct pci_device_id geode_aes_tbl[] = {
578 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), } ,
579 { 0, }
580 };
581
582 MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
583
584 static struct pci_driver geode_aes_driver = {
585 .name = "Geode LX AES",
586 .id_table = geode_aes_tbl,
587 .probe = geode_aes_probe,
588 .remove = __devexit_p(geode_aes_remove)
589 };
590
591 static int __init
592 geode_aes_init(void)
593 {
594 return pci_register_driver(&geode_aes_driver);
595 }
596
597 static void __exit
598 geode_aes_exit(void)
599 {
600 pci_unregister_driver(&geode_aes_driver);
601 }
602
603 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
604 MODULE_DESCRIPTION("Geode LX Hardware AES driver");
605 MODULE_LICENSE("GPL");
606
607 module_init(geode_aes_init);
608 module_exit(geode_aes_exit);