crypto: vmx - disable preemption to enable vsx in aes_ctr.c
authorLi Zhong <zhong@linux.vnet.ibm.com>
Fri, 20 Jan 2017 08:35:33 +0000 (16:35 +0800)
committerHerbert Xu <herbert@gondor.apana.org.au>
Mon, 23 Jan 2017 14:50:34 +0000 (22:50 +0800)
Some preemptible check warnings were reported from enable_kernel_vsx(). This
patch disables preemption in aes_ctr.c before enabling vsx, and they are now
consistent with other files in the same directory.

Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/vmx/aes_ctr.c

index 38ed10d761d006eb2f9e24e1c318f98a09a4a323..7cf6d31c1123a117d55dc3de87ed543a3c6279a6 100644 (file)
@@ -80,11 +80,13 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
        int ret;
        struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
 
+       preempt_disable();
        pagefault_disable();
        enable_kernel_vsx();
        ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
        disable_kernel_vsx();
        pagefault_enable();
+       preempt_enable();
 
        ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
        return ret;
@@ -99,11 +101,13 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
        u8 *dst = walk->dst.virt.addr;
        unsigned int nbytes = walk->nbytes;
 
+       preempt_disable();
        pagefault_disable();
        enable_kernel_vsx();
        aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
        disable_kernel_vsx();
        pagefault_enable();
+       preempt_enable();
 
        crypto_xor(keystream, src, nbytes);
        memcpy(dst, keystream, nbytes);
@@ -132,6 +136,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
                blkcipher_walk_init(&walk, dst, src, nbytes);
                ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
                while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+                       preempt_disable();
                        pagefault_disable();
                        enable_kernel_vsx();
                        aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
@@ -143,6 +148,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
                                                    walk.iv);
                        disable_kernel_vsx();
                        pagefault_enable();
+                       preempt_enable();
 
                        /* We need to update IV mostly for last bytes/round */
                        inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;