crypto: inside-secure - fix hash when length is a multiple of a block
authorAntoine Tenart <antoine.tenart@free-electrons.com>
Tue, 26 Dec 2017 16:21:17 +0000 (17:21 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 3 Feb 2018 16:38:50 +0000 (17:38 +0100)
commit 809778e02cd45d0625439fee67688f655627bb3c upstream.

This patch fixes the hash support in the SafeXcel driver when the update
size is a multiple of a block size, and when a final call is made just
after with a size of 0. In such cases the driver should cache the last
block from the update to avoid handling 0 length data on the final call
(that's a hardware limitation).

Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver")
Signed-off-by: Antoine Tenart <antoine.tenart@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/crypto/inside-secure/safexcel_hash.c

index 3980f946874fa08b64288b5d8b04bcac9134a0ad..ebf1388e1e100ccceec6039a9a279e7e24fa3fef 100644 (file)
@@ -185,17 +185,31 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
        else
                cache_len = queued - areq->nbytes;
 
-       /*
-        * If this is not the last request and the queued data does not fit
-        * into full blocks, cache it for the next send() call.
-        */
-       extra = queued & (crypto_ahash_blocksize(ahash) - 1);
-       if (!req->last_req && extra) {
-               sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
-                                  req->cache_next, extra, areq->nbytes - extra);
-
-               queued -= extra;
-               len -= extra;
+       if (!req->last_req) {
+               /* If this is not the last request and the queued data does not
+                * fit into full blocks, cache it for the next send() call.
+                */
+               extra = queued & (crypto_ahash_blocksize(ahash) - 1);
+               if (!extra)
+                       /* If this is not the last request and the queued data
+                        * is a multiple of a block, cache the last one for now.
+                        */
+                       extra = queued - crypto_ahash_blocksize(ahash);
+
+               if (extra) {
+                       sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+                                          req->cache_next, extra,
+                                          areq->nbytes - extra);
+
+                       queued -= extra;
+                       len -= extra;
+
+                       if (!queued) {
+                               *commands = 0;
+                               *results = 0;
+                               return 0;
+                       }
+               }
        }
 
        spin_lock_bh(&priv->ring[ring].egress_lock);