crypto: vmx - Use skcipher for ctr fallback
authorPaulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
Mon, 16 Oct 2017 22:54:19 +0000 (20:54 -0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 5 Sep 2018 07:26:23 +0000 (09:26 +0200)
commit e666d4e9ceec94c0a88c94b7db31d56474da43b3 upstream.

Signed-off-by: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/crypto/vmx/aes_ctr.c

index 02ba5f2aa0e6e8d09b431d9ad0019fa4c1549f94..cd777c75291dfb72f6df5c2967909da694627b78 100644 (file)
 #include <asm/switch_to.h>
 #include <crypto/aes.h>
 #include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+
 #include "aesp8-ppc.h"
 
 struct p8_aes_ctr_ctx {
-       struct crypto_blkcipher *fallback;
+       struct crypto_skcipher *fallback;
        struct aes_key enc_key;
 };
 
 static int p8_aes_ctr_init(struct crypto_tfm *tfm)
 {
        const char *alg = crypto_tfm_alg_name(tfm);
-       struct crypto_blkcipher *fallback;
+       struct crypto_skcipher *fallback;
        struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       fallback =
-           crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+       fallback = crypto_alloc_skcipher(alg, 0,
+                       CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
        if (IS_ERR(fallback)) {
                printk(KERN_ERR
                       "Failed to allocate transformation for '%s': %ld\n",
@@ -49,9 +51,9 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
                return PTR_ERR(fallback);
        }
 
-       crypto_blkcipher_set_flags(
+       crypto_skcipher_set_flags(
                fallback,
-               crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm));
+               crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
        ctx->fallback = fallback;
 
        return 0;
@@ -62,7 +64,7 @@ static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
        struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
 
        if (ctx->fallback) {
-               crypto_free_blkcipher(ctx->fallback);
+               crypto_free_skcipher(ctx->fallback);
                ctx->fallback = NULL;
        }
 }
@@ -81,7 +83,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
        pagefault_enable();
        preempt_enable();
 
-       ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
+       ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
        return ret;
 }
 
@@ -115,15 +117,14 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
        struct blkcipher_walk walk;
        struct p8_aes_ctr_ctx *ctx =
                crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
-       struct blkcipher_desc fallback_desc = {
-               .tfm = ctx->fallback,
-               .info = desc->info,
-               .flags = desc->flags
-       };
 
        if (in_interrupt()) {
-               ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
-                                              nbytes);
+               SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+               skcipher_request_set_tfm(req, ctx->fallback);
+               skcipher_request_set_callback(req, desc->flags, NULL, NULL);
+               skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
+               ret = crypto_skcipher_encrypt(req);
+               skcipher_request_zero(req);
        } else {
                blkcipher_walk_init(&walk, dst, src, nbytes);
                ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);