ltq-deu: fix cryptomgr test errors for aes
authorDaniel Kestrel <kestrel1974@t-online.de>
Mon, 31 May 2021 13:17:16 +0000 (15:17 +0200)
committerHauke Mehrtens <hauke@hauke-m.de>
Wed, 5 Jan 2022 23:22:42 +0000 (00:22 +0100)
When running cryptomgr tests against the driver, there are several
occurences of different errors for even and uneven splitted data in the
underlying scatterlists for the ctr and ctr_rfc3686 algorithms which are
now fixed.
Fixed error in ctr_rfc3686_aes_decrypt function which was introduced with
the previous commit by using CRYPTO_DIR_ENCRYPT in the decrypt function.

Signed-off-by: Daniel Kestrel <kestrel1974@t-online.de>
package/kernel/lantiq/ltq-deu/src/ifxmips_aes.c

index bd5256469277c7a2087d50cf75aae421450c65c2..952f08f314081ca975c4891a43540605d0b5227a 100644 (file)
@@ -657,16 +657,26 @@ int ctr_basic_aes_encrypt(struct skcipher_request *req)
 {
     struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
     struct skcipher_walk walk;
-    unsigned int nbytes;
     int err;
+    unsigned int enc_bytes, nbytes;
 
     err = skcipher_walk_virt(&walk, req, false);
 
-    while ((nbytes = walk.nbytes)) {
+    while ((nbytes = enc_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
+            enc_bytes -= (nbytes % AES_BLOCK_SIZE);
+            ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr, 
+                       walk.iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
+        nbytes &= AES_BLOCK_SIZE - 1;
+        err = skcipher_walk_done(&walk, nbytes);
+    }
+
+    /* to handle remaining bytes < AES_BLOCK_SIZE */
+    if (walk.nbytes) {
         ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-                        walk.iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
+                       walk.iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
         err = skcipher_walk_done(&walk, 0);
     }
+
     return err;
 }
 
@@ -680,14 +690,23 @@ int ctr_basic_aes_decrypt(struct skcipher_request *req)
 {
     struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
     struct skcipher_walk walk;
-    unsigned int nbytes;
     int err;
+    unsigned int dec_bytes, nbytes;
 
     err = skcipher_walk_virt(&walk, req, false);
 
-    while ((nbytes = walk.nbytes)) {
+    while ((nbytes = dec_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
+            dec_bytes -= (nbytes % AES_BLOCK_SIZE);
+            ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr, 
+                       walk.iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
+        nbytes &= AES_BLOCK_SIZE - 1;
+        err = skcipher_walk_done(&walk, nbytes);
+    }
+
+    /* to handle remaining bytes < AES_BLOCK_SIZE */
+    if (walk.nbytes) {
         ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-                        walk.iv, nbytes, CRYPTO_DIR_DECRYPT, 0);
+                       walk.iv, walk.nbytes, CRYPTO_DIR_DECRYPT, 0);
         err = skcipher_walk_done(&walk, 0);
     }
 
@@ -709,6 +728,7 @@ struct skcipher_alg ifxdeu_ctr_basic_aes_alg = {
     .min_keysize             =   AES_MIN_KEY_SIZE,
     .max_keysize             =   AES_MAX_KEY_SIZE,
     .ivsize                  =   AES_BLOCK_SIZE,
+    .walksize                =   AES_BLOCK_SIZE,
     .setkey                  =   aes_set_key_skcipher,
     .encrypt                 =   ctr_basic_aes_encrypt,
     .decrypt                 =   ctr_basic_aes_decrypt,
@@ -725,13 +745,12 @@ int ctr_rfc3686_aes_encrypt(struct skcipher_request *req)
 {
     struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
     struct skcipher_walk walk;
-    unsigned int nbytes;
-    int err, bsize;
+    unsigned int nbytes, enc_bytes;
+    int err;
     u8 rfc3686_iv[16];
 
     err = skcipher_walk_virt(&walk, req, false);
     nbytes = walk.nbytes;
-    bsize = nbytes;
 
     /* set up counter block */
     memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); 
@@ -741,22 +760,12 @@ int ctr_rfc3686_aes_encrypt(struct skcipher_request *req)
     *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
         cpu_to_be32(1);
 
-    /* scatterlist source is the same size as request size, just process once */
-    if (nbytes == walk.nbytes) {
-       ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-                       rfc3686_iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
-       nbytes -= walk.nbytes;
-       err = skcipher_walk_done(&walk, nbytes);
-       return err;
-    }
-
-    while ((nbytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
-       ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-                       rfc3686_iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
-
-       nbytes -= walk.nbytes;
-       bsize -= walk.nbytes;
-       err = skcipher_walk_done(&walk, nbytes);
+    while ((nbytes = enc_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
+            enc_bytes -= (nbytes % AES_BLOCK_SIZE);
+            ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr, 
+                       rfc3686_iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
+        nbytes &= AES_BLOCK_SIZE - 1;
+        err = skcipher_walk_done(&walk, nbytes);
     }
 
     /* to handle remaining bytes < AES_BLOCK_SIZE */
@@ -779,13 +788,12 @@ int ctr_rfc3686_aes_decrypt(struct skcipher_request *req)
 {
     struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
     struct skcipher_walk walk;
-    unsigned int nbytes;
-    int err, bsize;
+    unsigned int nbytes, dec_bytes;
+    int err;
     u8 rfc3686_iv[16];
 
     err = skcipher_walk_virt(&walk, req, false);
     nbytes = walk.nbytes;
-    bsize = nbytes;
 
     /* set up counter block */
     memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); 
@@ -795,28 +803,18 @@ int ctr_rfc3686_aes_decrypt(struct skcipher_request *req)
     *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
         cpu_to_be32(1);
 
-    /* scatterlist source is the same size as request size, just process once */
-    if (nbytes == walk.nbytes) {
-       ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-                       rfc3686_iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
-       nbytes -= walk.nbytes;
-       err = skcipher_walk_done(&walk, nbytes);
-       return err;
-    }
-
-    while ((nbytes = walk.nbytes) % (walk.nbytes >= AES_BLOCK_SIZE)) {
-       ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-                       rfc3686_iv, nbytes, CRYPTO_DIR_DECRYPT, 0);
-
-       nbytes -= walk.nbytes;
-       bsize -= walk.nbytes;
-       err = skcipher_walk_done(&walk, nbytes);
+    while ((nbytes = dec_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
+            dec_bytes -= (nbytes % AES_BLOCK_SIZE);
+            ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr, 
+                       rfc3686_iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
+        nbytes &= AES_BLOCK_SIZE - 1;
+        err = skcipher_walk_done(&walk, nbytes);
     }
 
     /* to handle remaining bytes < AES_BLOCK_SIZE */
     if (walk.nbytes) {
        ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-                       rfc3686_iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
+                       rfc3686_iv, walk.nbytes, CRYPTO_DIR_DECRYPT, 0);
        err = skcipher_walk_done(&walk, 0);
     }
 
@@ -838,6 +836,7 @@ struct skcipher_alg ifxdeu_ctr_rfc3686_aes_alg = {
     .min_keysize             =   AES_MIN_KEY_SIZE,
     .max_keysize             =   CTR_RFC3686_MAX_KEY_SIZE,
     .ivsize                  =   CTR_RFC3686_IV_SIZE,
+    .walksize                =   AES_BLOCK_SIZE,
     .setkey                  =   ctr_rfc3686_aes_set_key_skcipher,
     .encrypt                 =   ctr_rfc3686_aes_encrypt,
     .decrypt                 =   ctr_rfc3686_aes_decrypt,