From: Anton Blanchard Date: Thu, 29 Oct 2015 00:44:03 +0000 (+1100) Subject: crypto: vmx: Only call enable_kernel_vsx() X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=1552cd703cf5a07caeb17ccd82f80e20a23b1707;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git crypto: vmx: Only call enable_kernel_vsx() With the recent change to enable_kernel_vsx(), we no longer need to call enable_kernel_fp() and enable_kernel_altivec(). Signed-off-by: Anton Blanchard Signed-off-by: Michael Ellerman --- diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c index 263af709e536..20539fb7e975 100644 --- a/drivers/crypto/vmx/aes.c +++ b/drivers/crypto/vmx/aes.c @@ -83,7 +83,6 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key, preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); @@ -103,7 +102,6 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) } else { preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); aes_p8_encrypt(src, dst, &ctx->enc_key); pagefault_enable(); @@ -120,7 +118,6 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) } else { preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); aes_p8_decrypt(src, dst, &ctx->dec_key); pagefault_enable(); diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 0b8fe2ec5315..8847b92e9ff0 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -84,7 +84,6 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); @@ -115,7 +114,6 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, } else { preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); blkcipher_walk_init(&walk, dst, src, nbytes); @@ -156,7 +154,6 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, } else { preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); blkcipher_walk_init(&walk, dst, src, nbytes); diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index ee1306cd8f59..80958660c31a 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c @@ -81,7 +81,6 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); pagefault_enable(); @@ -100,7 +99,6 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, unsigned int nbytes = walk->nbytes; pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); pagefault_enable(); @@ -133,7 +131,6 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr, diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index 2183a2e77641..1f4586c2fd25 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c @@ -118,9 +118,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); - enable_kernel_fp(); gcm_init_p8(ctx->htable, (const u64 *) key); pagefault_enable(); preempt_enable(); @@ -149,9 +147,7 @@ static int p8_ghash_update(struct shash_desc *desc, GHASH_DIGEST_SIZE - dctx->bytes); preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); - enable_kernel_fp(); gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, GHASH_DIGEST_SIZE); pagefault_enable(); @@ -164,9 +160,7 @@ static int p8_ghash_update(struct shash_desc *desc, if (len) { preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); - enable_kernel_fp(); gcm_ghash_p8(dctx->shash, ctx->htable, src, len); pagefault_enable(); preempt_enable(); @@ -195,9 +189,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out) dctx->buffer[i] = 0; preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); - enable_kernel_fp(); gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, GHASH_DIGEST_SIZE); pagefault_enable();