ramips: 6.1: copy config and patches
[openwrt/staging/jow.git] / target / linux / ramips / patches-6.1 / 860-ramips-add-eip93-driver.patch
1 --- /dev/null
2 +++ b/drivers/crypto/mtk-eip93/Kconfig
3 @@ -0,0 +1,64 @@
4 +# SPDX-License-Identifier: GPL-2.0
5 +config CRYPTO_DEV_EIP93_SKCIPHER
6 + tristate
7 +
8 +config CRYPTO_DEV_EIP93_HMAC
9 + tristate
10 +
11 +config CRYPTO_DEV_EIP93
12 + tristate "Support for EIP93 crypto HW accelerators"
13 + depends on SOC_MT7621 || COMPILE_TEST
14 + help
15 + EIP93 have various crypto HW accelerators. Select this if
16 + you want to use the EIP93 modules for any of the crypto algorithms.
17 +
18 +if CRYPTO_DEV_EIP93
19 +
20 +config CRYPTO_DEV_EIP93_AES
21 + bool "Register AES algorithm implementations with the Crypto API"
22 + default y
23 + select CRYPTO_DEV_EIP93_SKCIPHER
24 + select CRYPTO_LIB_AES
25 + select CRYPTO_SKCIPHER
26 + help
27 + Selecting this will offload AES - ECB, CBC and CTR crypto
28 + to the EIP-93 crypto engine.
29 +
30 +config CRYPTO_DEV_EIP93_DES
31 + bool "Register legacy DES / 3DES algorithm with the Crypto API"
32 + default y
33 + select CRYPTO_DEV_EIP93_SKCIPHER
34 + select CRYPTO_LIB_DES
35 + select CRYPTO_SKCIPHER
36 + help
37 + Selecting this will offload DES and 3DES ECB and CBC
38 + crypto to the EIP-93 crypto engine.
39 +
40 +config CRYPTO_DEV_EIP93_AEAD
41 + bool "Register AEAD algorithm with the Crypto API"
42 + default y
43 + select CRYPTO_DEV_EIP93_HMAC
44 + select CRYPTO_AEAD
45 + select CRYPTO_AUTHENC
46 + select CRYPTO_MD5
47 + select CRYPTO_SHA1
48 + select CRYPTO_SHA256
49 + help
50 + Selecting this will offload AEAD authenc(hmac(x), cipher(y))
51 + crypto to the EIP-93 crypto engine.
52 +
53 +config CRYPTO_DEV_EIP93_GENERIC_SW_MAX_LEN
54 + int "Max skcipher software fallback length"
55 + default 256
56 + help
57 + Max length of crypt request which
58 + will fallback to software crypt of skcipher *except* AES-128.
59 +
60 +config CRYPTO_DEV_EIP93_AES_128_SW_MAX_LEN
61 + int "Max AES-128 skcipher software fallback length"
62 + default 512
63 + help
64 + Max length of crypt request which
65 + will fallback to software crypt of AES-128 skcipher.
66 +
67 +endif
68 --- /dev/null
69 +++ b/drivers/crypto/mtk-eip93/Makefile
70 @@ -0,0 +1,7 @@
71 +obj-$(CONFIG_CRYPTO_DEV_EIP93) += crypto-hw-eip93.o
72 +
73 +crypto-hw-eip93-y += eip93-main.o eip93-common.o
74 +
75 +crypto-hw-eip93-$(CONFIG_CRYPTO_DEV_EIP93_SKCIPHER) += eip93-cipher.o
76 +crypto-hw-eip93-$(CONFIG_CRYPTO_DEV_EIP93_AEAD) += eip93-aead.o
77 +
78 --- /dev/null
79 +++ b/drivers/crypto/mtk-eip93/eip93-aead.c
80 @@ -0,0 +1,768 @@
81 +// SPDX-License-Identifier: GPL-2.0
82 +/*
83 + * Copyright (C) 2019 - 2021
84 + *
85 + * Richard van Schagen <vschagen@icloud.com>
86 + */
87 +
88 +#include <crypto/aead.h>
89 +#include <crypto/aes.h>
90 +#include <crypto/authenc.h>
91 +#include <crypto/ctr.h>
92 +#include <crypto/hmac.h>
93 +#include <crypto/internal/aead.h>
94 +#include <crypto/md5.h>
95 +#include <crypto/null.h>
96 +#include <crypto/sha1.h>
97 +#include <crypto/sha2.h>
98 +
99 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_DES)
100 +#include <crypto/internal/des.h>
101 +#endif
102 +
103 +#include <linux/crypto.h>
104 +#include <linux/dma-mapping.h>
105 +
106 +#include "eip93-aead.h"
107 +#include "eip93-cipher.h"
108 +#include "eip93-common.h"
109 +#include "eip93-regs.h"
110 +
111 +void mtk_aead_handle_result(struct crypto_async_request *async, int err)
112 +{
113 + struct mtk_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);
114 + struct mtk_device *mtk = ctx->mtk;
115 + struct aead_request *req = aead_request_cast(async);
116 + struct mtk_cipher_reqctx *rctx = aead_request_ctx(req);
117 +
118 + mtk_unmap_dma(mtk, rctx, req->src, req->dst);
119 + mtk_handle_result(mtk, rctx, req->iv);
120 +
121 + if (err == 1)
122 + err = -EBADMSG;
123 + /* let software handle anti-replay errors */
124 + if (err == 4)
125 + err = 0;
126 +
127 + aead_request_complete(req, err);
128 +}
129 +
130 +static int mtk_aead_send_req(struct crypto_async_request *async)
131 +{
132 + struct aead_request *req = aead_request_cast(async);
133 + struct mtk_cipher_reqctx *rctx = aead_request_ctx(req);
134 + int err;
135 +
136 + err = check_valid_request(rctx);
137 + if (err) {
138 + aead_request_complete(req, err);
139 + return err;
140 + }
141 +
142 + return mtk_send_req(async, req->iv, rctx);
143 +}
144 +
145 +/* Crypto aead API functions */
146 +static int mtk_aead_cra_init(struct crypto_tfm *tfm)
147 +{
148 + struct mtk_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
149 + struct mtk_alg_template *tmpl = container_of(tfm->__crt_alg,
150 + struct mtk_alg_template, alg.aead.base);
151 + u32 flags = tmpl->flags;
152 + char *alg_base;
153 +
154 + crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
155 + sizeof(struct mtk_cipher_reqctx));
156 +
157 + ctx->mtk = tmpl->mtk;
158 + ctx->in_first = true;
159 + ctx->out_first = true;
160 +
161 + ctx->sa_in = kzalloc(sizeof(struct saRecord_s), GFP_KERNEL);
162 + if (!ctx->sa_in)
163 + return -ENOMEM;
164 +
165 + ctx->sa_base_in = dma_map_single(ctx->mtk->dev, ctx->sa_in,
166 + sizeof(struct saRecord_s), DMA_TO_DEVICE);
167 +
168 + ctx->sa_out = kzalloc(sizeof(struct saRecord_s), GFP_KERNEL);
169 + if (!ctx->sa_out)
170 + return -ENOMEM;
171 +
172 + ctx->sa_base_out = dma_map_single(ctx->mtk->dev, ctx->sa_out,
173 + sizeof(struct saRecord_s), DMA_TO_DEVICE);
174 +
175 + /* software workaround for now */
176 + if (IS_HASH_MD5(flags))
177 + alg_base = "md5";
178 + if (IS_HASH_SHA1(flags))
179 + alg_base = "sha1";
180 + if (IS_HASH_SHA224(flags))
181 + alg_base = "sha224";
182 + if (IS_HASH_SHA256(flags))
183 + alg_base = "sha256";
184 +
185 + ctx->shash = crypto_alloc_shash(alg_base, 0, CRYPTO_ALG_NEED_FALLBACK);
186 +
187 + if (IS_ERR(ctx->shash)) {
188 + dev_err(ctx->mtk->dev, "base driver %s could not be loaded.\n",
189 + alg_base);
190 + return PTR_ERR(ctx->shash);
191 + }
192 +
193 + return 0;
194 +}
195 +
196 +static void mtk_aead_cra_exit(struct crypto_tfm *tfm)
197 +{
198 + struct mtk_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
199 +
200 + if (ctx->shash)
201 + crypto_free_shash(ctx->shash);
202 +
203 + dma_unmap_single(ctx->mtk->dev, ctx->sa_base_in,
204 + sizeof(struct saRecord_s), DMA_TO_DEVICE);
205 + dma_unmap_single(ctx->mtk->dev, ctx->sa_base_out,
206 + sizeof(struct saRecord_s), DMA_TO_DEVICE);
207 + kfree(ctx->sa_in);
208 + kfree(ctx->sa_out);
209 +}
210 +
211 +static int mtk_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
212 + unsigned int len)
213 +{
214 + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
215 + struct mtk_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
216 + struct mtk_alg_template *tmpl = container_of(tfm->__crt_alg,
217 + struct mtk_alg_template, alg.skcipher.base);
218 + u32 flags = tmpl->flags;
219 + u32 nonce = 0;
220 + struct crypto_authenc_keys keys;
221 + struct crypto_aes_ctx aes;
222 + struct saRecord_s *saRecord = ctx->sa_out;
223 + int sa_size = sizeof(struct saRecord_s);
224 + int err = -EINVAL;
225 +
226 +
227 + if (crypto_authenc_extractkeys(&keys, key, len))
228 + return err;
229 +
230 + if (IS_RFC3686(flags)) {
231 + if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
232 + return err;
233 +
234 + keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
235 + memcpy(&nonce, keys.enckey + keys.enckeylen,
236 + CTR_RFC3686_NONCE_SIZE);
237 + }
238 +
239 + switch ((flags & MTK_ALG_MASK)) {
240 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_DES)
241 + case MTK_ALG_DES:
242 + err = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen);
243 + break;
244 + case MTK_ALG_3DES:
245 + if (keys.enckeylen != DES3_EDE_KEY_SIZE)
246 + return -EINVAL;
247 +
248 + err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen);
249 + break;
250 +#endif
251 + case MTK_ALG_AES:
252 + err = aes_expandkey(&aes, keys.enckey, keys.enckeylen);
253 + }
254 + if (err)
255 + return err;
256 +
257 + ctx->blksize = crypto_aead_blocksize(ctfm);
258 + dma_unmap_single(ctx->mtk->dev, ctx->sa_base_in, sa_size,
259 + DMA_TO_DEVICE);
260 +
261 + dma_unmap_single(ctx->mtk->dev, ctx->sa_base_out, sa_size,
262 + DMA_TO_DEVICE);
263 + /* Encryption key */
264 + mtk_set_saRecord(saRecord, keys.enckeylen, flags);
265 + saRecord->saCmd0.bits.opCode = 1;
266 + saRecord->saCmd0.bits.digestLength = ctx->authsize >> 2;
267 +
268 + memcpy(saRecord->saKey, keys.enckey, keys.enckeylen);
269 + ctx->saNonce = nonce;
270 + saRecord->saNonce = nonce;
271 +
272 + /* authentication key */
273 + err = mtk_authenc_setkey(ctx->shash, saRecord, keys.authkey,
274 + keys.authkeylen);
275 +
276 + saRecord->saCmd0.bits.direction = 0;
277 + memcpy(ctx->sa_in, saRecord, sa_size);
278 + ctx->sa_in->saCmd0.bits.direction = 1;
279 + ctx->sa_in->saCmd1.bits.copyDigest = 0;
280 +
281 + ctx->sa_base_out = dma_map_single(ctx->mtk->dev, ctx->sa_out, sa_size,
282 + DMA_TO_DEVICE);
283 + ctx->sa_base_in = dma_map_single(ctx->mtk->dev, ctx->sa_in, sa_size,
284 + DMA_TO_DEVICE);
285 + ctx->in_first = true;
286 + ctx->out_first = true;
287 +
288 + return err;
289 +}
290 +
291 +static int mtk_aead_setauthsize(struct crypto_aead *ctfm,
292 + unsigned int authsize)
293 +{
294 + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
295 + struct mtk_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
296 +
297 + dma_unmap_single(ctx->mtk->dev, ctx->sa_base_in,
298 + sizeof(struct saRecord_s), DMA_TO_DEVICE);
299 +
300 + dma_unmap_single(ctx->mtk->dev, ctx->sa_base_out,
301 + sizeof(struct saRecord_s), DMA_TO_DEVICE);
302 +
303 + ctx->authsize = authsize;
304 + ctx->sa_in->saCmd0.bits.digestLength = ctx->authsize >> 2;
305 + ctx->sa_out->saCmd0.bits.digestLength = ctx->authsize >> 2;
306 +
307 + ctx->sa_base_out = dma_map_single(ctx->mtk->dev, ctx->sa_out,
308 + sizeof(struct saRecord_s), DMA_TO_DEVICE);
309 + ctx->sa_base_in = dma_map_single(ctx->mtk->dev, ctx->sa_in,
310 + sizeof(struct saRecord_s), DMA_TO_DEVICE);
311 + return 0;
312 +}
313 +
314 +static void mtk_aead_setassoc(struct mtk_crypto_ctx *ctx,
315 + struct aead_request *req, bool in)
316 +{
317 + struct saRecord_s *saRecord;
318 +
319 + if (in) {
320 + dma_unmap_single(ctx->mtk->dev, ctx->sa_base_in,
321 + sizeof(struct saRecord_s), DMA_TO_DEVICE);
322 + saRecord = ctx->sa_in;
323 + saRecord->saCmd1.bits.hashCryptOffset = req->assoclen >> 2;
324 +
325 + ctx->sa_base_in = dma_map_single(ctx->mtk->dev, ctx->sa_in,
326 + sizeof(struct saRecord_s), DMA_TO_DEVICE);
327 + ctx->assoclen_in = req->assoclen;
328 + } else {
329 + dma_unmap_single(ctx->mtk->dev, ctx->sa_base_out,
330 + sizeof(struct saRecord_s), DMA_TO_DEVICE);
331 + saRecord = ctx->sa_out;
332 + saRecord->saCmd1.bits.hashCryptOffset = req->assoclen >> 2;
333 +
334 + ctx->sa_base_out = dma_map_single(ctx->mtk->dev, ctx->sa_out,
335 + sizeof(struct saRecord_s), DMA_TO_DEVICE);
336 + ctx->assoclen_out = req->assoclen;
337 + }
338 +}
339 +
340 +static int mtk_aead_crypt(struct aead_request *req)
341 +{
342 + struct mtk_cipher_reqctx *rctx = aead_request_ctx(req);
343 + struct crypto_async_request *async = &req->base;
344 + struct mtk_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
345 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
346 +
347 + rctx->textsize = req->cryptlen;
348 + rctx->blksize = ctx->blksize;
349 + rctx->assoclen = req->assoclen;
350 + rctx->authsize = ctx->authsize;
351 + rctx->sg_src = req->src;
352 + rctx->sg_dst = req->dst;
353 + rctx->ivsize = crypto_aead_ivsize(aead);
354 + rctx->flags |= MTK_DESC_AEAD;
355 +
356 + if IS_DECRYPT(rctx->flags)
357 + rctx->textsize -= rctx->authsize;
358 +
359 + return mtk_aead_send_req(async);
360 +}
361 +
362 +static int mtk_aead_encrypt(struct aead_request *req)
363 +{
364 + struct mtk_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
365 + struct mtk_cipher_reqctx *rctx = aead_request_ctx(req);
366 + struct mtk_alg_template *tmpl = container_of(req->base.tfm->__crt_alg,
367 + struct mtk_alg_template, alg.aead.base);
368 +
369 + rctx->flags = tmpl->flags;
370 + rctx->flags |= MTK_ENCRYPT;
371 + if (ctx->out_first) {
372 + mtk_aead_setassoc(ctx, req, false);
373 + ctx->out_first = false;
374 + }
375 +
376 + if (req->assoclen != ctx->assoclen_out) {
377 + dev_err(ctx->mtk->dev, "Request AAD length error\n");
378 + return -EINVAL;
379 + }
380 +
381 + rctx->saRecord_base = ctx->sa_base_out;
382 +
383 + return mtk_aead_crypt(req);
384 +}
385 +
386 +static int mtk_aead_decrypt(struct aead_request *req)
387 +{
388 + struct mtk_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
389 + struct mtk_cipher_reqctx *rctx = aead_request_ctx(req);
390 + struct mtk_alg_template *tmpl = container_of(req->base.tfm->__crt_alg,
391 + struct mtk_alg_template, alg.aead.base);
392 +
393 + rctx->flags = tmpl->flags;
394 + rctx->flags |= MTK_DECRYPT;
395 + if (ctx->in_first) {
396 + mtk_aead_setassoc(ctx, req, true);
397 + ctx->in_first = false;
398 + }
399 +
400 + if (req->assoclen != ctx->assoclen_in) {
401 + dev_err(ctx->mtk->dev, "Request AAD length error\n");
402 + return -EINVAL;
403 + }
404 +
405 + rctx->saRecord_base = ctx->sa_base_in;
406 +
407 + return mtk_aead_crypt(req);
408 +}
409 +
410 +/* Available authenc algorithms in this module */
411 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_AES)
412 +struct mtk_alg_template mtk_alg_authenc_hmac_md5_cbc_aes = {
413 + .type = MTK_ALG_TYPE_AEAD,
414 + .flags = MTK_HASH_HMAC | MTK_HASH_MD5 | MTK_MODE_CBC | MTK_ALG_AES,
415 + .alg.aead = {
416 + .setkey = mtk_aead_setkey,
417 + .encrypt = mtk_aead_encrypt,
418 + .decrypt = mtk_aead_decrypt,
419 + .ivsize = AES_BLOCK_SIZE,
420 + .setauthsize = mtk_aead_setauthsize,
421 + .maxauthsize = MD5_DIGEST_SIZE,
422 + .base = {
423 + .cra_name = "authenc(hmac(md5),cbc(aes))",
424 + .cra_driver_name =
425 + "authenc(hmac(md5-eip93), cbc(aes-eip93))",
426 + .cra_priority = MTK_CRA_PRIORITY,
427 + .cra_flags = CRYPTO_ALG_ASYNC |
428 + CRYPTO_ALG_KERN_DRIVER_ONLY,
429 + .cra_blocksize = AES_BLOCK_SIZE,
430 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
431 + .cra_alignmask = 0,
432 + .cra_init = mtk_aead_cra_init,
433 + .cra_exit = mtk_aead_cra_exit,
434 + .cra_module = THIS_MODULE,
435 + },
436 + },
437 +};
438 +
439 +struct mtk_alg_template mtk_alg_authenc_hmac_sha1_cbc_aes = {
440 + .type = MTK_ALG_TYPE_AEAD,
441 + .flags = MTK_HASH_HMAC | MTK_HASH_SHA1 | MTK_MODE_CBC | MTK_ALG_AES,
442 + .alg.aead = {
443 + .setkey = mtk_aead_setkey,
444 + .encrypt = mtk_aead_encrypt,
445 + .decrypt = mtk_aead_decrypt,
446 + .ivsize = AES_BLOCK_SIZE,
447 + .setauthsize = mtk_aead_setauthsize,
448 + .maxauthsize = SHA1_DIGEST_SIZE,
449 + .base = {
450 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
451 + .cra_driver_name =
452 + "authenc(hmac(sha1-eip93),cbc(aes-eip93))",
453 + .cra_priority = MTK_CRA_PRIORITY,
454 + .cra_flags = CRYPTO_ALG_ASYNC |
455 + CRYPTO_ALG_KERN_DRIVER_ONLY,
456 + .cra_blocksize = AES_BLOCK_SIZE,
457 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
458 + .cra_alignmask = 0,
459 + .cra_init = mtk_aead_cra_init,
460 + .cra_exit = mtk_aead_cra_exit,
461 + .cra_module = THIS_MODULE,
462 + },
463 + },
464 +};
465 +
466 +struct mtk_alg_template mtk_alg_authenc_hmac_sha224_cbc_aes = {
467 + .type = MTK_ALG_TYPE_AEAD,
468 + .flags = MTK_HASH_HMAC | MTK_HASH_SHA224 | MTK_MODE_CBC | MTK_ALG_AES,
469 + .alg.aead = {
470 + .setkey = mtk_aead_setkey,
471 + .encrypt = mtk_aead_encrypt,
472 + .decrypt = mtk_aead_decrypt,
473 + .ivsize = AES_BLOCK_SIZE,
474 + .setauthsize = mtk_aead_setauthsize,
475 + .maxauthsize = SHA224_DIGEST_SIZE,
476 + .base = {
477 + .cra_name = "authenc(hmac(sha224),cbc(aes))",
478 + .cra_driver_name =
479 + "authenc(hmac(sha224-eip93),cbc(aes-eip93))",
480 + .cra_priority = MTK_CRA_PRIORITY,
481 + .cra_flags = CRYPTO_ALG_ASYNC |
482 + CRYPTO_ALG_KERN_DRIVER_ONLY,
483 + .cra_blocksize = AES_BLOCK_SIZE,
484 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
485 + .cra_alignmask = 0,
486 + .cra_init = mtk_aead_cra_init,
487 + .cra_exit = mtk_aead_cra_exit,
488 + .cra_module = THIS_MODULE,
489 + },
490 + },
491 +};
492 +
493 +struct mtk_alg_template mtk_alg_authenc_hmac_sha256_cbc_aes = {
494 + .type = MTK_ALG_TYPE_AEAD,
495 + .flags = MTK_HASH_HMAC | MTK_HASH_SHA256 | MTK_MODE_CBC | MTK_ALG_AES,
496 + .alg.aead = {
497 + .setkey = mtk_aead_setkey,
498 + .encrypt = mtk_aead_encrypt,
499 + .decrypt = mtk_aead_decrypt,
500 + .ivsize = AES_BLOCK_SIZE,
501 + .setauthsize = mtk_aead_setauthsize,
502 + .maxauthsize = SHA256_DIGEST_SIZE,
503 + .base = {
504 + .cra_name = "authenc(hmac(sha256),cbc(aes))",
505 + .cra_driver_name =
506 + "authenc(hmac(sha256-eip93),cbc(aes-eip93))",
507 + .cra_priority = MTK_CRA_PRIORITY,
508 + .cra_flags = CRYPTO_ALG_ASYNC |
509 + CRYPTO_ALG_KERN_DRIVER_ONLY,
510 + .cra_blocksize = AES_BLOCK_SIZE,
511 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
512 + .cra_alignmask = 0,
513 + .cra_init = mtk_aead_cra_init,
514 + .cra_exit = mtk_aead_cra_exit,
515 + .cra_module = THIS_MODULE,
516 + },
517 + },
518 +};
519 +
520 +struct mtk_alg_template mtk_alg_authenc_hmac_md5_rfc3686_aes = {
521 + .type = MTK_ALG_TYPE_AEAD,
522 + .flags = MTK_HASH_HMAC | MTK_HASH_MD5 |
523 + MTK_MODE_CTR | MTK_MODE_RFC3686 | MTK_ALG_AES,
524 + .alg.aead = {
525 + .setkey = mtk_aead_setkey,
526 + .encrypt = mtk_aead_encrypt,
527 + .decrypt = mtk_aead_decrypt,
528 + .ivsize = CTR_RFC3686_IV_SIZE,
529 + .setauthsize = mtk_aead_setauthsize,
530 + .maxauthsize = MD5_DIGEST_SIZE,
531 + .base = {
532 + .cra_name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
533 + .cra_driver_name =
534 + "authenc(hmac(md5-eip93),rfc3686(ctr(aes-eip93)))",
535 + .cra_priority = MTK_CRA_PRIORITY,
536 + .cra_flags = CRYPTO_ALG_ASYNC |
537 + CRYPTO_ALG_KERN_DRIVER_ONLY,
538 + .cra_blocksize = 1,
539 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
540 + .cra_alignmask = 0,
541 + .cra_init = mtk_aead_cra_init,
542 + .cra_exit = mtk_aead_cra_exit,
543 + .cra_module = THIS_MODULE,
544 + },
545 + },
546 +};
547 +
548 +struct mtk_alg_template mtk_alg_authenc_hmac_sha1_rfc3686_aes = {
549 + .type = MTK_ALG_TYPE_AEAD,
550 + .flags = MTK_HASH_HMAC | MTK_HASH_SHA1 |
551 + MTK_MODE_CTR | MTK_MODE_RFC3686 | MTK_ALG_AES,
552 + .alg.aead = {
553 + .setkey = mtk_aead_setkey,
554 + .encrypt = mtk_aead_encrypt,
555 + .decrypt = mtk_aead_decrypt,
556 + .ivsize = CTR_RFC3686_IV_SIZE,
557 + .setauthsize = mtk_aead_setauthsize,
558 + .maxauthsize = SHA1_DIGEST_SIZE,
559 + .base = {
560 + .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
561 + .cra_driver_name =
562 + "authenc(hmac(sha1-eip93),rfc3686(ctr(aes-eip93)))",
563 + .cra_priority = MTK_CRA_PRIORITY,
564 + .cra_flags = CRYPTO_ALG_ASYNC |
565 + CRYPTO_ALG_KERN_DRIVER_ONLY,
566 + .cra_blocksize = 1,
567 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
568 + .cra_alignmask = 0,
569 + .cra_init = mtk_aead_cra_init,
570 + .cra_exit = mtk_aead_cra_exit,
571 + .cra_module = THIS_MODULE,
572 + },
573 + },
574 +};
575 +
576 +struct mtk_alg_template mtk_alg_authenc_hmac_sha224_rfc3686_aes = {
577 + .type = MTK_ALG_TYPE_AEAD,
578 + .flags = MTK_HASH_HMAC | MTK_HASH_SHA224 |
579 + MTK_MODE_CTR | MTK_MODE_RFC3686 | MTK_ALG_AES,
580 + .alg.aead = {
581 + .setkey = mtk_aead_setkey,
582 + .encrypt = mtk_aead_encrypt,
583 + .decrypt = mtk_aead_decrypt,
584 + .ivsize = CTR_RFC3686_IV_SIZE,
585 + .setauthsize = mtk_aead_setauthsize,
586 + .maxauthsize = SHA224_DIGEST_SIZE,
587 + .base = {
588 + .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
589 + .cra_driver_name =
590 + "authenc(hmac(sha224-eip93),rfc3686(ctr(aes-eip93)))",
591 + .cra_priority = MTK_CRA_PRIORITY,
592 + .cra_flags = CRYPTO_ALG_ASYNC |
593 + CRYPTO_ALG_KERN_DRIVER_ONLY,
594 + .cra_blocksize = 1,
595 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
596 + .cra_alignmask = 0,
597 + .cra_init = mtk_aead_cra_init,
598 + .cra_exit = mtk_aead_cra_exit,
599 + .cra_module = THIS_MODULE,
600 + },
601 + },
602 +};
603 +
604 +struct mtk_alg_template mtk_alg_authenc_hmac_sha256_rfc3686_aes = {
605 + .type = MTK_ALG_TYPE_AEAD,
606 + .flags = MTK_HASH_HMAC | MTK_HASH_SHA256 |
607 + MTK_MODE_CTR | MTK_MODE_RFC3686 | MTK_ALG_AES,
608 + .alg.aead = {
609 + .setkey = mtk_aead_setkey,
610 + .encrypt = mtk_aead_encrypt,
611 + .decrypt = mtk_aead_decrypt,
612 + .ivsize = CTR_RFC3686_IV_SIZE,
613 + .setauthsize = mtk_aead_setauthsize,
614 + .maxauthsize = SHA256_DIGEST_SIZE,
615 + .base = {
616 + .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
617 + .cra_driver_name =
618 + "authenc(hmac(sha256-eip93),rfc3686(ctr(aes-eip93)))",
619 + .cra_priority = MTK_CRA_PRIORITY,
620 + .cra_flags = CRYPTO_ALG_ASYNC |
621 + CRYPTO_ALG_KERN_DRIVER_ONLY,
622 + .cra_blocksize = 1,
623 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
624 + .cra_alignmask = 0,
625 + .cra_init = mtk_aead_cra_init,
626 + .cra_exit = mtk_aead_cra_exit,
627 + .cra_module = THIS_MODULE,
628 + },
629 + },
630 +};
631 +#endif
632 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_DES)
633 +struct mtk_alg_template mtk_alg_authenc_hmac_md5_cbc_des = {
634 + .type = MTK_ALG_TYPE_AEAD,
635 + .flags = MTK_HASH_HMAC | MTK_HASH_MD5 | MTK_MODE_CBC | MTK_ALG_DES,
636 + .alg.aead = {
637 + .setkey = mtk_aead_setkey,
638 + .encrypt = mtk_aead_encrypt,
639 + .decrypt = mtk_aead_decrypt,
640 + .ivsize = DES_BLOCK_SIZE,
641 + .setauthsize = mtk_aead_setauthsize,
642 + .maxauthsize = MD5_DIGEST_SIZE,
643 + .base = {
644 + .cra_name = "authenc(hmac(md5),cbc(des))",
645 + .cra_driver_name =
646 + "authenc(hmac(md5-eip93),cbc(des-eip93))",
647 + .cra_priority = MTK_CRA_PRIORITY,
648 + .cra_flags = CRYPTO_ALG_ASYNC |
649 + CRYPTO_ALG_KERN_DRIVER_ONLY,
650 + .cra_blocksize = DES_BLOCK_SIZE,
651 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
652 + .cra_alignmask = 0,
653 + .cra_init = mtk_aead_cra_init,
654 + .cra_exit = mtk_aead_cra_exit,
655 + .cra_module = THIS_MODULE,
656 + },
657 + },
658 +};
659 +
660 +struct mtk_alg_template mtk_alg_authenc_hmac_sha1_cbc_des = {
661 + .type = MTK_ALG_TYPE_AEAD,
662 + .flags = MTK_HASH_HMAC | MTK_HASH_SHA1 | MTK_MODE_CBC | MTK_ALG_DES,
663 + .alg.aead = {
664 + .setkey = mtk_aead_setkey,
665 + .encrypt = mtk_aead_encrypt,
666 + .decrypt = mtk_aead_decrypt,
667 + .ivsize = DES_BLOCK_SIZE,
668 + .setauthsize = mtk_aead_setauthsize,
669 + .maxauthsize = SHA1_DIGEST_SIZE,
670 + .base = {
671 + .cra_name = "authenc(hmac(sha1),cbc(des))",
672 + .cra_driver_name =
673 + "authenc(hmac(sha1-eip93),cbc(des-eip93))",
674 + .cra_priority = MTK_CRA_PRIORITY,
675 + .cra_flags = CRYPTO_ALG_ASYNC |
676 + CRYPTO_ALG_KERN_DRIVER_ONLY,
677 + .cra_blocksize = DES_BLOCK_SIZE,
678 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
679 + .cra_alignmask = 0,
680 + .cra_init = mtk_aead_cra_init,
681 + .cra_exit = mtk_aead_cra_exit,
682 + .cra_module = THIS_MODULE,
683 + },
684 + },
685 +};
686 +
687 +struct mtk_alg_template mtk_alg_authenc_hmac_sha224_cbc_des = {
688 + .type = MTK_ALG_TYPE_AEAD,
689 + .flags = MTK_HASH_HMAC | MTK_HASH_SHA224 | MTK_MODE_CBC | MTK_ALG_DES,
690 + .alg.aead = {
691 + .setkey = mtk_aead_setkey,
692 + .encrypt = mtk_aead_encrypt,
693 + .decrypt = mtk_aead_decrypt,
694 + .ivsize = DES_BLOCK_SIZE,
695 + .setauthsize = mtk_aead_setauthsize,
696 + .maxauthsize = SHA224_DIGEST_SIZE,
697 + .base = {
698 + .cra_name = "authenc(hmac(sha224),cbc(des))",
699 + .cra_driver_name =
700 + "authenc(hmac(sha224-eip93),cbc(des-eip93))",
701 + .cra_priority = MTK_CRA_PRIORITY,
702 + .cra_flags = CRYPTO_ALG_ASYNC |
703 + CRYPTO_ALG_KERN_DRIVER_ONLY,
704 + .cra_blocksize = DES_BLOCK_SIZE,
705 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
706 + .cra_alignmask = 0,
707 + .cra_init = mtk_aead_cra_init,
708 + .cra_exit = mtk_aead_cra_exit,
709 + .cra_module = THIS_MODULE,
710 + },
711 + },
712 +};
713 +
714 +struct mtk_alg_template mtk_alg_authenc_hmac_sha256_cbc_des = {
715 + .type = MTK_ALG_TYPE_AEAD,
716 + .flags = MTK_HASH_HMAC | MTK_HASH_SHA256 | MTK_MODE_CBC | MTK_ALG_DES,
717 + .alg.aead = {
718 + .setkey = mtk_aead_setkey,
719 + .encrypt = mtk_aead_encrypt,
720 + .decrypt = mtk_aead_decrypt,
721 + .ivsize = DES_BLOCK_SIZE,
722 + .setauthsize = mtk_aead_setauthsize,
723 + .maxauthsize = SHA256_DIGEST_SIZE,
724 + .base = {
725 + .cra_name = "authenc(hmac(sha256),cbc(des))",
726 + .cra_driver_name =
727 + "authenc(hmac(sha256-eip93),cbc(des-eip93))",
728 + .cra_priority = MTK_CRA_PRIORITY,
729 + .cra_flags = CRYPTO_ALG_ASYNC |
730 + CRYPTO_ALG_KERN_DRIVER_ONLY,
731 + .cra_blocksize = DES_BLOCK_SIZE,
732 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
733 + .cra_alignmask = 0,
734 + .cra_init = mtk_aead_cra_init,
735 + .cra_exit = mtk_aead_cra_exit,
736 + .cra_module = THIS_MODULE,
737 + },
738 + },
739 +};
740 +
741 +struct mtk_alg_template mtk_alg_authenc_hmac_md5_cbc_des3_ede = {
742 + .type = MTK_ALG_TYPE_AEAD,
743 + .flags = MTK_HASH_HMAC | MTK_HASH_MD5 | MTK_MODE_CBC | MTK_ALG_3DES,
744 + .alg.aead = {
745 + .setkey = mtk_aead_setkey,
746 + .encrypt = mtk_aead_encrypt,
747 + .decrypt = mtk_aead_decrypt,
748 + .ivsize = DES3_EDE_BLOCK_SIZE,
749 + .setauthsize = mtk_aead_setauthsize,
750 + .maxauthsize = MD5_DIGEST_SIZE,
751 + .base = {
752 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
753 + .cra_driver_name =
754 + "authenc(hmac(md5-eip93),cbc(des3_ede-eip93))",
755 + .cra_priority = MTK_CRA_PRIORITY,
756 + .cra_flags = CRYPTO_ALG_ASYNC |
757 + CRYPTO_ALG_KERN_DRIVER_ONLY,
758 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
759 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
760 + .cra_alignmask = 0x0,
761 + .cra_init = mtk_aead_cra_init,
762 + .cra_exit = mtk_aead_cra_exit,
763 + .cra_module = THIS_MODULE,
764 + },
765 + },
766 +};
767 +
768 +struct mtk_alg_template mtk_alg_authenc_hmac_sha1_cbc_des3_ede = {
769 + .type = MTK_ALG_TYPE_AEAD,
770 + .flags = MTK_HASH_HMAC | MTK_HASH_SHA1 | MTK_MODE_CBC | MTK_ALG_3DES,
771 + .alg.aead = {
772 + .setkey = mtk_aead_setkey,
773 + .encrypt = mtk_aead_encrypt,
774 + .decrypt = mtk_aead_decrypt,
775 + .ivsize = DES3_EDE_BLOCK_SIZE,
776 + .setauthsize = mtk_aead_setauthsize,
777 + .maxauthsize = SHA1_DIGEST_SIZE,
778 + .base = {
779 + .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
780 + .cra_driver_name =
781 + "authenc(hmac(sha1-eip93),cbc(des3_ede-eip93))",
782 + .cra_priority = MTK_CRA_PRIORITY,
783 + .cra_flags = CRYPTO_ALG_ASYNC |
784 + CRYPTO_ALG_KERN_DRIVER_ONLY,
785 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
786 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
787 + .cra_alignmask = 0x0,
788 + .cra_init = mtk_aead_cra_init,
789 + .cra_exit = mtk_aead_cra_exit,
790 + .cra_module = THIS_MODULE,
791 + },
792 + },
793 +};
794 +
795 +struct mtk_alg_template mtk_alg_authenc_hmac_sha224_cbc_des3_ede = {
796 + .type = MTK_ALG_TYPE_AEAD,
797 + .flags = MTK_HASH_HMAC | MTK_HASH_SHA224 | MTK_MODE_CBC | MTK_ALG_3DES,
798 + .alg.aead = {
799 + .setkey = mtk_aead_setkey,
800 + .encrypt = mtk_aead_encrypt,
801 + .decrypt = mtk_aead_decrypt,
802 + .ivsize = DES3_EDE_BLOCK_SIZE,
803 + .setauthsize = mtk_aead_setauthsize,
804 + .maxauthsize = SHA224_DIGEST_SIZE,
805 + .base = {
806 + .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
807 + .cra_driver_name =
808 + "authenc(hmac(sha224-eip93),cbc(des3_ede-eip93))",
809 + .cra_priority = MTK_CRA_PRIORITY,
810 + .cra_flags = CRYPTO_ALG_ASYNC |
811 + CRYPTO_ALG_KERN_DRIVER_ONLY,
812 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
813 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
814 + .cra_alignmask = 0x0,
815 + .cra_init = mtk_aead_cra_init,
816 + .cra_exit = mtk_aead_cra_exit,
817 + .cra_module = THIS_MODULE,
818 + },
819 + },
820 +};
821 +
822 +struct mtk_alg_template mtk_alg_authenc_hmac_sha256_cbc_des3_ede = {
823 + .type = MTK_ALG_TYPE_AEAD,
824 + .flags = MTK_HASH_HMAC | MTK_HASH_SHA256 | MTK_MODE_CBC | MTK_ALG_3DES,
825 + .alg.aead = {
826 + .setkey = mtk_aead_setkey,
827 + .encrypt = mtk_aead_encrypt,
828 + .decrypt = mtk_aead_decrypt,
829 + .ivsize = DES3_EDE_BLOCK_SIZE,
830 + .setauthsize = mtk_aead_setauthsize,
831 + .maxauthsize = SHA256_DIGEST_SIZE,
832 + .base = {
833 + .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
834 + .cra_driver_name =
835 + "authenc(hmac(sha256-eip93),cbc(des3_ede-eip93))",
836 + .cra_priority = MTK_CRA_PRIORITY,
837 + .cra_flags = CRYPTO_ALG_ASYNC |
838 + CRYPTO_ALG_KERN_DRIVER_ONLY,
839 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
840 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
841 + .cra_alignmask = 0x0,
842 + .cra_init = mtk_aead_cra_init,
843 + .cra_exit = mtk_aead_cra_exit,
844 + .cra_module = THIS_MODULE,
845 + },
846 + },
847 +};
848 +#endif
849 --- /dev/null
850 +++ b/drivers/crypto/mtk-eip93/eip93-aead.h
851 @@ -0,0 +1,31 @@
852 +/* SPDX-License-Identifier: GPL-2.0
853 + *
854 + * Copyright (C) 2019 - 2021
855 + *
856 + * Richard van Schagen <vschagen@icloud.com>
857 + */
858 +#ifndef _EIP93_AEAD_H_
859 +#define _EIP93_AEAD_H_
860 +
861 +extern struct mtk_alg_template mtk_alg_authenc_hmac_md5_cbc_aes;
862 +extern struct mtk_alg_template mtk_alg_authenc_hmac_sha1_cbc_aes;
863 +extern struct mtk_alg_template mtk_alg_authenc_hmac_sha224_cbc_aes;
864 +extern struct mtk_alg_template mtk_alg_authenc_hmac_sha256_cbc_aes;
865 +extern struct mtk_alg_template mtk_alg_authenc_hmac_md5_rfc3686_aes;
866 +extern struct mtk_alg_template mtk_alg_authenc_hmac_sha1_rfc3686_aes;
867 +extern struct mtk_alg_template mtk_alg_authenc_hmac_sha224_rfc3686_aes;
868 +extern struct mtk_alg_template mtk_alg_authenc_hmac_sha256_rfc3686_aes;
869 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_DES)
870 +extern struct mtk_alg_template mtk_alg_authenc_hmac_md5_cbc_des;
871 +extern struct mtk_alg_template mtk_alg_authenc_hmac_sha1_cbc_des;
872 +extern struct mtk_alg_template mtk_alg_authenc_hmac_sha224_cbc_des;
873 +extern struct mtk_alg_template mtk_alg_authenc_hmac_sha256_cbc_des;
874 +extern struct mtk_alg_template mtk_alg_authenc_hmac_md5_cbc_des3_ede;
875 +extern struct mtk_alg_template mtk_alg_authenc_hmac_sha1_cbc_des3_ede;
876 +extern struct mtk_alg_template mtk_alg_authenc_hmac_sha224_cbc_des3_ede;
877 +extern struct mtk_alg_template mtk_alg_authenc_hmac_sha256_cbc_des3_ede;
878 +#endif
879 +
880 +void mtk_aead_handle_result(struct crypto_async_request *async, int err);
881 +
882 +#endif /* _EIP93_AEAD_H_ */
883 --- /dev/null
884 +++ b/drivers/crypto/mtk-eip93/eip93-aes.h
885 @@ -0,0 +1,15 @@
886 +/* SPDX-License-Identifier: GPL-2.0
887 + *
888 + * Copyright (C) 2019 - 2021
889 + *
890 + * Richard van Schagen <vschagen@icloud.com>
891 + */
892 +#ifndef _EIP93_AES_H_
893 +#define _EIP93_AES_H_
894 +
895 +extern struct mtk_alg_template mtk_alg_ecb_aes;
896 +extern struct mtk_alg_template mtk_alg_cbc_aes;
897 +extern struct mtk_alg_template mtk_alg_ctr_aes;
898 +extern struct mtk_alg_template mtk_alg_rfc3686_aes;
899 +
900 +#endif /* _EIP93_AES_H_ */
901 --- /dev/null
902 +++ b/drivers/crypto/mtk-eip93/eip93-cipher.c
903 @@ -0,0 +1,483 @@
904 +// SPDX-License-Identifier: GPL-2.0
905 +/*
906 + * Copyright (C) 2019 - 2021
907 + *
908 + * Richard van Schagen <vschagen@icloud.com>
909 + */
910 +
911 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_AES)
912 +#include <crypto/aes.h>
913 +#include <crypto/ctr.h>
914 +#endif
915 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_DES)
916 +#include <crypto/internal/des.h>
917 +#endif
918 +#include <linux/dma-mapping.h>
919 +
920 +#include "eip93-cipher.h"
921 +#include "eip93-common.h"
922 +#include "eip93-regs.h"
923 +
924 +void mtk_skcipher_handle_result(struct crypto_async_request *async, int err)
925 +{
926 + struct mtk_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);
927 + struct mtk_device *mtk = ctx->mtk;
928 + struct skcipher_request *req = skcipher_request_cast(async);
929 + struct mtk_cipher_reqctx *rctx = skcipher_request_ctx(req);
930 +
931 + mtk_unmap_dma(mtk, rctx, req->src, req->dst);
932 + mtk_handle_result(mtk, rctx, req->iv);
933 +
934 + skcipher_request_complete(req, err);
935 +}
936 +
937 +static inline bool mtk_skcipher_is_fallback(const struct crypto_tfm *tfm,
938 + u32 flags)
939 +{
940 + return (tfm->__crt_alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) &&
941 + !IS_RFC3686(flags);
942 +}
943 +
944 +static int mtk_skcipher_send_req(struct crypto_async_request *async)
945 +{
946 + struct skcipher_request *req = skcipher_request_cast(async);
947 + struct mtk_cipher_reqctx *rctx = skcipher_request_ctx(req);
948 + int err;
949 +
950 + err = check_valid_request(rctx);
951 +
952 + if (err) {
953 + skcipher_request_complete(req, err);
954 + return err;
955 + }
956 +
957 + return mtk_send_req(async, req->iv, rctx);
958 +}
959 +
960 +/* Crypto skcipher API functions */
961 +static int mtk_skcipher_cra_init(struct crypto_tfm *tfm)
962 +{
963 + struct mtk_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
964 + struct mtk_alg_template *tmpl = container_of(tfm->__crt_alg,
965 + struct mtk_alg_template, alg.skcipher.base);
966 + bool fallback = mtk_skcipher_is_fallback(tfm, tmpl->flags);
967 +
968 + if (fallback) {
969 + ctx->fallback = crypto_alloc_skcipher(
970 + crypto_tfm_alg_name(tfm), 0, CRYPTO_ALG_NEED_FALLBACK);
971 + if (IS_ERR(ctx->fallback))
972 + return PTR_ERR(ctx->fallback);
973 + }
974 +
975 + crypto_skcipher_set_reqsize(
976 + __crypto_skcipher_cast(tfm),
977 + sizeof(struct mtk_cipher_reqctx) +
978 + (fallback ? crypto_skcipher_reqsize(ctx->fallback) :
979 + 0));
980 +
981 + ctx->mtk = tmpl->mtk;
982 +
983 + ctx->sa_in = kzalloc(sizeof(struct saRecord_s), GFP_KERNEL);
984 + if (!ctx->sa_in)
985 + return -ENOMEM;
986 +
987 + ctx->sa_base_in = dma_map_single(ctx->mtk->dev, ctx->sa_in,
988 + sizeof(struct saRecord_s), DMA_TO_DEVICE);
989 +
990 + ctx->sa_out = kzalloc(sizeof(struct saRecord_s), GFP_KERNEL);
991 + if (!ctx->sa_out)
992 + return -ENOMEM;
993 +
994 + ctx->sa_base_out = dma_map_single(ctx->mtk->dev, ctx->sa_out,
995 + sizeof(struct saRecord_s), DMA_TO_DEVICE);
996 + return 0;
997 +}
998 +
999 +static void mtk_skcipher_cra_exit(struct crypto_tfm *tfm)
1000 +{
1001 + struct mtk_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
1002 +
1003 + dma_unmap_single(ctx->mtk->dev, ctx->sa_base_in,
1004 + sizeof(struct saRecord_s), DMA_TO_DEVICE);
1005 + dma_unmap_single(ctx->mtk->dev, ctx->sa_base_out,
1006 + sizeof(struct saRecord_s), DMA_TO_DEVICE);
1007 + kfree(ctx->sa_in);
1008 + kfree(ctx->sa_out);
1009 +
1010 + crypto_free_skcipher(ctx->fallback);
1011 +}
1012 +
1013 +static int mtk_skcipher_setkey(struct crypto_skcipher *ctfm, const u8 *key,
1014 + unsigned int len)
1015 +{
1016 + struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
1017 + struct mtk_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
1018 + struct mtk_alg_template *tmpl = container_of(tfm->__crt_alg,
1019 + struct mtk_alg_template, alg.skcipher.base);
1020 + struct saRecord_s *saRecord = ctx->sa_out;
1021 + u32 flags = tmpl->flags;
1022 + u32 nonce = 0;
1023 + unsigned int keylen = len;
1024 + int sa_size = sizeof(struct saRecord_s);
1025 + int err = -EINVAL;
1026 +
1027 + if (!key || !keylen)
1028 + return err;
1029 +
1030 + ctx->keylen = keylen;
1031 +
1032 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_AES)
1033 + if (IS_RFC3686(flags)) {
1034 + if (len < CTR_RFC3686_NONCE_SIZE)
1035 + return err;
1036 +
1037 + keylen = len - CTR_RFC3686_NONCE_SIZE;
1038 + memcpy(&nonce, key + keylen, CTR_RFC3686_NONCE_SIZE);
1039 + }
1040 +#endif
1041 +
1042 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_DES)
1043 + if (flags & MTK_ALG_DES) {
1044 + ctx->blksize = DES_BLOCK_SIZE;
1045 + err = verify_skcipher_des_key(ctfm, key);
1046 + }
1047 + if (flags & MTK_ALG_3DES) {
1048 + ctx->blksize = DES3_EDE_BLOCK_SIZE;
1049 + err = verify_skcipher_des3_key(ctfm, key);
1050 + }
1051 +#endif
1052 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_AES)
1053 + if (flags & MTK_ALG_AES) {
1054 + struct crypto_aes_ctx aes;
1055 + bool fallback = mtk_skcipher_is_fallback(tfm, flags);
1056 +
1057 + if (fallback && !IS_RFC3686(flags)) {
1058 + err = crypto_skcipher_setkey(ctx->fallback, key,
1059 + keylen);
1060 + if (err)
1061 + return err;
1062 + }
1063 +
1064 + ctx->blksize = AES_BLOCK_SIZE;
1065 + err = aes_expandkey(&aes, key, keylen);
1066 + }
1067 +#endif
1068 + if (err)
1069 + return err;
1070 +
1071 + dma_unmap_single(ctx->mtk->dev, ctx->sa_base_in, sa_size,
1072 + DMA_TO_DEVICE);
1073 +
1074 + dma_unmap_single(ctx->mtk->dev, ctx->sa_base_out, sa_size,
1075 + DMA_TO_DEVICE);
1076 +
1077 + mtk_set_saRecord(saRecord, keylen, flags);
1078 +
1079 + memcpy(saRecord->saKey, key, keylen);
1080 + ctx->saNonce = nonce;
1081 + saRecord->saNonce = nonce;
1082 + saRecord->saCmd0.bits.direction = 0;
1083 +
1084 + memcpy(ctx->sa_in, saRecord, sa_size);
1085 + ctx->sa_in->saCmd0.bits.direction = 1;
1086 +
1087 + ctx->sa_base_out = dma_map_single(ctx->mtk->dev, ctx->sa_out, sa_size,
1088 + DMA_TO_DEVICE);
1089 +
1090 + ctx->sa_base_in = dma_map_single(ctx->mtk->dev, ctx->sa_in, sa_size,
1091 + DMA_TO_DEVICE);
1092 + return err;
1093 +}
1094 +
1095 +static int mtk_skcipher_crypt(struct skcipher_request *req, bool encrypt)
1096 +{
1097 + struct mtk_cipher_reqctx *rctx = skcipher_request_ctx(req);
1098 + struct crypto_async_request *async = &req->base;
1099 + struct mtk_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1100 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1101 + bool fallback = mtk_skcipher_is_fallback(req->base.tfm, rctx->flags);
1102 +
1103 + if (!req->cryptlen)
1104 + return 0;
1105 +
1106 + /*
1107 + * ECB and CBC algorithms require message lengths to be
1108 + * multiples of block size.
1109 + */
1110 + if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags))
1111 + if (!IS_ALIGNED(req->cryptlen,
1112 + crypto_skcipher_blocksize(skcipher)))
1113 + return -EINVAL;
1114 +
1115 + if (fallback &&
1116 + req->cryptlen <= (AES_KEYSIZE_128 ?
1117 + CONFIG_CRYPTO_DEV_EIP93_AES_128_SW_MAX_LEN :
1118 + CONFIG_CRYPTO_DEV_EIP93_GENERIC_SW_MAX_LEN)) {
1119 + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
1120 + skcipher_request_set_callback(&rctx->fallback_req,
1121 + req->base.flags,
1122 + req->base.complete,
1123 + req->base.data);
1124 + skcipher_request_set_crypt(&rctx->fallback_req, req->src,
1125 + req->dst, req->cryptlen, req->iv);
1126 + return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
1127 + crypto_skcipher_decrypt(&rctx->fallback_req);
1128 + }
1129 +
1130 + rctx->assoclen = 0;
1131 + rctx->textsize = req->cryptlen;
1132 + rctx->authsize = 0;
1133 + rctx->sg_src = req->src;
1134 + rctx->sg_dst = req->dst;
1135 + rctx->ivsize = crypto_skcipher_ivsize(skcipher);
1136 + rctx->blksize = ctx->blksize;
1137 + rctx->flags |= MTK_DESC_SKCIPHER;
1138 + if (!IS_ECB(rctx->flags))
1139 + rctx->flags |= MTK_DESC_DMA_IV;
1140 +
1141 + return mtk_skcipher_send_req(async);
1142 +}
1143 +
1144 +static int mtk_skcipher_encrypt(struct skcipher_request *req)
1145 +{
1146 + struct mtk_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1147 + struct mtk_cipher_reqctx *rctx = skcipher_request_ctx(req);
1148 + struct mtk_alg_template *tmpl = container_of(req->base.tfm->__crt_alg,
1149 + struct mtk_alg_template, alg.skcipher.base);
1150 +
1151 + rctx->flags = tmpl->flags;
1152 + rctx->flags |= MTK_ENCRYPT;
1153 + rctx->saRecord_base = ctx->sa_base_out;
1154 +
1155 + return mtk_skcipher_crypt(req, true);
1156 +}
1157 +
1158 +static int mtk_skcipher_decrypt(struct skcipher_request *req)
1159 +{
1160 + struct mtk_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1161 + struct mtk_cipher_reqctx *rctx = skcipher_request_ctx(req);
1162 + struct mtk_alg_template *tmpl = container_of(req->base.tfm->__crt_alg,
1163 + struct mtk_alg_template, alg.skcipher.base);
1164 +
1165 + rctx->flags = tmpl->flags;
1166 + rctx->flags |= MTK_DECRYPT;
1167 + rctx->saRecord_base = ctx->sa_base_in;
1168 +
1169 + return mtk_skcipher_crypt(req, false);
1170 +}
1171 +
1172 +/* Available algorithms in this module */
1173 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_AES)
1174 +struct mtk_alg_template mtk_alg_ecb_aes = {
1175 + .type = MTK_ALG_TYPE_SKCIPHER,
1176 + .flags = MTK_MODE_ECB | MTK_ALG_AES,
1177 + .alg.skcipher = {
1178 + .setkey = mtk_skcipher_setkey,
1179 + .encrypt = mtk_skcipher_encrypt,
1180 + .decrypt = mtk_skcipher_decrypt,
1181 + .min_keysize = AES_MIN_KEY_SIZE,
1182 + .max_keysize = AES_MAX_KEY_SIZE,
1183 + .ivsize = 0,
1184 + .base = {
1185 + .cra_name = "ecb(aes)",
1186 + .cra_driver_name = "ecb(aes-eip93)",
1187 + .cra_priority = MTK_CRA_PRIORITY,
1188 + .cra_flags = CRYPTO_ALG_ASYNC |
1189 + CRYPTO_ALG_NEED_FALLBACK |
1190 + CRYPTO_ALG_KERN_DRIVER_ONLY,
1191 + .cra_blocksize = AES_BLOCK_SIZE,
1192 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
1193 + .cra_alignmask = 0xf,
1194 + .cra_init = mtk_skcipher_cra_init,
1195 + .cra_exit = mtk_skcipher_cra_exit,
1196 + .cra_module = THIS_MODULE,
1197 + },
1198 + },
1199 +};
1200 +
1201 +struct mtk_alg_template mtk_alg_cbc_aes = {
1202 + .type = MTK_ALG_TYPE_SKCIPHER,
1203 + .flags = MTK_MODE_CBC | MTK_ALG_AES,
1204 + .alg.skcipher = {
1205 + .setkey = mtk_skcipher_setkey,
1206 + .encrypt = mtk_skcipher_encrypt,
1207 + .decrypt = mtk_skcipher_decrypt,
1208 + .min_keysize = AES_MIN_KEY_SIZE,
1209 + .max_keysize = AES_MAX_KEY_SIZE,
1210 + .ivsize = AES_BLOCK_SIZE,
1211 + .base = {
1212 + .cra_name = "cbc(aes)",
1213 + .cra_driver_name = "cbc(aes-eip93)",
1214 + .cra_priority = MTK_CRA_PRIORITY,
1215 + .cra_flags = CRYPTO_ALG_ASYNC |
1216 + CRYPTO_ALG_NEED_FALLBACK |
1217 + CRYPTO_ALG_KERN_DRIVER_ONLY,
1218 + .cra_blocksize = AES_BLOCK_SIZE,
1219 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
1220 + .cra_alignmask = 0xf,
1221 + .cra_init = mtk_skcipher_cra_init,
1222 + .cra_exit = mtk_skcipher_cra_exit,
1223 + .cra_module = THIS_MODULE,
1224 + },
1225 + },
1226 +};
1227 +
1228 +struct mtk_alg_template mtk_alg_ctr_aes = {
1229 + .type = MTK_ALG_TYPE_SKCIPHER,
1230 + .flags = MTK_MODE_CTR | MTK_ALG_AES,
1231 + .alg.skcipher = {
1232 + .setkey = mtk_skcipher_setkey,
1233 + .encrypt = mtk_skcipher_encrypt,
1234 + .decrypt = mtk_skcipher_decrypt,
1235 + .min_keysize = AES_MIN_KEY_SIZE,
1236 + .max_keysize = AES_MAX_KEY_SIZE,
1237 + .ivsize = AES_BLOCK_SIZE,
1238 + .base = {
1239 + .cra_name = "ctr(aes)",
1240 + .cra_driver_name = "ctr(aes-eip93)",
1241 + .cra_priority = MTK_CRA_PRIORITY,
1242 + .cra_flags = CRYPTO_ALG_ASYNC |
1243 + CRYPTO_ALG_NEED_FALLBACK |
1244 + CRYPTO_ALG_KERN_DRIVER_ONLY,
1245 + .cra_blocksize = 1,
1246 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
1247 + .cra_alignmask = 0xf,
1248 + .cra_init = mtk_skcipher_cra_init,
1249 + .cra_exit = mtk_skcipher_cra_exit,
1250 + .cra_module = THIS_MODULE,
1251 + },
1252 + },
1253 +};
1254 +
1255 +struct mtk_alg_template mtk_alg_rfc3686_aes = {
1256 + .type = MTK_ALG_TYPE_SKCIPHER,
1257 + .flags = MTK_MODE_CTR | MTK_MODE_RFC3686 | MTK_ALG_AES,
1258 + .alg.skcipher = {
1259 + .setkey = mtk_skcipher_setkey,
1260 + .encrypt = mtk_skcipher_encrypt,
1261 + .decrypt = mtk_skcipher_decrypt,
1262 + .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1263 + .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1264 + .ivsize = CTR_RFC3686_IV_SIZE,
1265 + .base = {
1266 + .cra_name = "rfc3686(ctr(aes))",
1267 + .cra_driver_name = "rfc3686(ctr(aes-eip93))",
1268 + .cra_priority = MTK_CRA_PRIORITY,
1269 + .cra_flags = CRYPTO_ALG_ASYNC |
1270 + CRYPTO_ALG_NEED_FALLBACK |
1271 + CRYPTO_ALG_KERN_DRIVER_ONLY,
1272 + .cra_blocksize = 1,
1273 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
1274 + .cra_alignmask = 0xf,
1275 + .cra_init = mtk_skcipher_cra_init,
1276 + .cra_exit = mtk_skcipher_cra_exit,
1277 + .cra_module = THIS_MODULE,
1278 + },
1279 + },
1280 +};
1281 +#endif
1282 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_DES)
1283 +struct mtk_alg_template mtk_alg_ecb_des = {
1284 + .type = MTK_ALG_TYPE_SKCIPHER,
1285 + .flags = MTK_MODE_ECB | MTK_ALG_DES,
1286 + .alg.skcipher = {
1287 + .setkey = mtk_skcipher_setkey,
1288 + .encrypt = mtk_skcipher_encrypt,
1289 + .decrypt = mtk_skcipher_decrypt,
1290 + .min_keysize = DES_KEY_SIZE,
1291 + .max_keysize = DES_KEY_SIZE,
1292 + .ivsize = 0,
1293 + .base = {
1294 + .cra_name = "ecb(des)",
1295 + .cra_driver_name = "ebc(des-eip93)",
1296 + .cra_priority = MTK_CRA_PRIORITY,
1297 + .cra_flags = CRYPTO_ALG_ASYNC |
1298 + CRYPTO_ALG_KERN_DRIVER_ONLY,
1299 + .cra_blocksize = DES_BLOCK_SIZE,
1300 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
1301 + .cra_alignmask = 0,
1302 + .cra_init = mtk_skcipher_cra_init,
1303 + .cra_exit = mtk_skcipher_cra_exit,
1304 + .cra_module = THIS_MODULE,
1305 + },
1306 + },
1307 +};
1308 +
1309 +struct mtk_alg_template mtk_alg_cbc_des = {
1310 + .type = MTK_ALG_TYPE_SKCIPHER,
1311 + .flags = MTK_MODE_CBC | MTK_ALG_DES,
1312 + .alg.skcipher = {
1313 + .setkey = mtk_skcipher_setkey,
1314 + .encrypt = mtk_skcipher_encrypt,
1315 + .decrypt = mtk_skcipher_decrypt,
1316 + .min_keysize = DES_KEY_SIZE,
1317 + .max_keysize = DES_KEY_SIZE,
1318 + .ivsize = DES_BLOCK_SIZE,
1319 + .base = {
1320 + .cra_name = "cbc(des)",
1321 + .cra_driver_name = "cbc(des-eip93)",
1322 + .cra_priority = MTK_CRA_PRIORITY,
1323 + .cra_flags = CRYPTO_ALG_ASYNC |
1324 + CRYPTO_ALG_KERN_DRIVER_ONLY,
1325 + .cra_blocksize = DES_BLOCK_SIZE,
1326 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
1327 + .cra_alignmask = 0,
1328 + .cra_init = mtk_skcipher_cra_init,
1329 + .cra_exit = mtk_skcipher_cra_exit,
1330 + .cra_module = THIS_MODULE,
1331 + },
1332 + },
1333 +};
1334 +
1335 +struct mtk_alg_template mtk_alg_ecb_des3_ede = {
1336 + .type = MTK_ALG_TYPE_SKCIPHER,
1337 + .flags = MTK_MODE_ECB | MTK_ALG_3DES,
1338 + .alg.skcipher = {
1339 + .setkey = mtk_skcipher_setkey,
1340 + .encrypt = mtk_skcipher_encrypt,
1341 + .decrypt = mtk_skcipher_decrypt,
1342 + .min_keysize = DES3_EDE_KEY_SIZE,
1343 + .max_keysize = DES3_EDE_KEY_SIZE,
1344 + .ivsize = 0,
1345 + .base = {
1346 + .cra_name = "ecb(des3_ede)",
1347 + .cra_driver_name = "ecb(des3_ede-eip93)",
1348 + .cra_priority = MTK_CRA_PRIORITY,
1349 + .cra_flags = CRYPTO_ALG_ASYNC |
1350 + CRYPTO_ALG_KERN_DRIVER_ONLY,
1351 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1352 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
1353 + .cra_alignmask = 0,
1354 + .cra_init = mtk_skcipher_cra_init,
1355 + .cra_exit = mtk_skcipher_cra_exit,
1356 + .cra_module = THIS_MODULE,
1357 + },
1358 + },
1359 +};
1360 +
1361 +struct mtk_alg_template mtk_alg_cbc_des3_ede = {
1362 + .type = MTK_ALG_TYPE_SKCIPHER,
1363 + .flags = MTK_MODE_CBC | MTK_ALG_3DES,
1364 + .alg.skcipher = {
1365 + .setkey = mtk_skcipher_setkey,
1366 + .encrypt = mtk_skcipher_encrypt,
1367 + .decrypt = mtk_skcipher_decrypt,
1368 + .min_keysize = DES3_EDE_KEY_SIZE,
1369 + .max_keysize = DES3_EDE_KEY_SIZE,
1370 + .ivsize = DES3_EDE_BLOCK_SIZE,
1371 + .base = {
1372 + .cra_name = "cbc(des3_ede)",
1373 + .cra_driver_name = "cbc(des3_ede-eip93)",
1374 + .cra_priority = MTK_CRA_PRIORITY,
1375 + .cra_flags = CRYPTO_ALG_ASYNC |
1376 + CRYPTO_ALG_KERN_DRIVER_ONLY,
1377 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1378 + .cra_ctxsize = sizeof(struct mtk_crypto_ctx),
1379 + .cra_alignmask = 0,
1380 + .cra_init = mtk_skcipher_cra_init,
1381 + .cra_exit = mtk_skcipher_cra_exit,
1382 + .cra_module = THIS_MODULE,
1383 + },
1384 + },
1385 +};
1386 +#endif
1387 --- /dev/null
1388 +++ b/drivers/crypto/mtk-eip93/eip93-cipher.h
1389 @@ -0,0 +1,66 @@
1390 +/* SPDX-License-Identifier: GPL-2.0
1391 + *
1392 + * Copyright (C) 2019 - 2021
1393 + *
1394 + * Richard van Schagen <vschagen@icloud.com>
1395 + */
1396 +#ifndef _EIP93_CIPHER_H_
1397 +#define _EIP93_CIPHER_H_
1398 +
1399 +#include "eip93-main.h"
1400 +
1401 +struct mtk_crypto_ctx {
1402 + struct mtk_device *mtk;
1403 + struct saRecord_s *sa_in;
1404 + dma_addr_t sa_base_in;
1405 + struct saRecord_s *sa_out;
1406 + dma_addr_t sa_base_out;
1407 + uint32_t saNonce;
1408 + int blksize;
1409 + /* AEAD specific */
1410 + unsigned int authsize;
1411 + unsigned int assoclen_in;
1412 + unsigned int assoclen_out;
1413 + bool in_first;
1414 + bool out_first;
1415 + struct crypto_shash *shash;
1416 + unsigned int keylen;
1417 + struct crypto_skcipher *fallback;
1418 +};
1419 +
1420 +struct mtk_cipher_reqctx {
1421 + unsigned long flags;
1422 + unsigned int blksize;
1423 + unsigned int ivsize;
1424 + unsigned int textsize;
1425 + unsigned int assoclen;
1426 + unsigned int authsize;
1427 + dma_addr_t saRecord_base;
1428 + struct saState_s *saState;
1429 + dma_addr_t saState_base;
1430 + uint32_t saState_idx;
1431 + struct eip93_descriptor_s *cdesc;
1432 + struct scatterlist *sg_src;
1433 + struct scatterlist *sg_dst;
1434 + int src_nents;
1435 + int dst_nents;
1436 + struct saState_s *saState_ctr;
1437 + dma_addr_t saState_base_ctr;
1438 + uint32_t saState_ctr_idx;
1439 + struct skcipher_request fallback_req; // keep at the end
1440 +};
1441 +
1442 +int check_valid_request(struct mtk_cipher_reqctx *rctx);
1443 +
1444 +void mtk_unmap_dma(struct mtk_device *mtk, struct mtk_cipher_reqctx *rctx,
1445 + struct scatterlist *reqsrc, struct scatterlist *reqdst);
1446 +
1447 +void mtk_skcipher_handle_result(struct crypto_async_request *async, int err);
1448 +
1449 +int mtk_send_req(struct crypto_async_request *async,
1450 + const u8 *reqiv, struct mtk_cipher_reqctx *rctx);
1451 +
1452 +void mtk_handle_result(struct mtk_device *mtk, struct mtk_cipher_reqctx *rctx,
1453 + u8 *reqiv);
1454 +
1455 +#endif /* _EIP93_CIPHER_H_ */
1456 --- /dev/null
1457 +++ b/drivers/crypto/mtk-eip93/eip93-common.c
1458 @@ -0,0 +1,749 @@
1459 +// SPDX-License-Identifier: GPL-2.0
1460 +/*
1461 + * Copyright (C) 2019 - 2021
1462 + *
1463 + * Richard van Schagen <vschagen@icloud.com>
1464 + */
1465 +
1466 +#include <crypto/aes.h>
1467 +#include <crypto/ctr.h>
1468 +#include <crypto/hmac.h>
1469 +#include <crypto/sha1.h>
1470 +#include <crypto/sha2.h>
1471 +#include <linux/delay.h>
1472 +#include <linux/dma-mapping.h>
1473 +#include <linux/scatterlist.h>
1474 +
1475 +#include "eip93-cipher.h"
1476 +#include "eip93-common.h"
1477 +#include "eip93-main.h"
1478 +#include "eip93-regs.h"
1479 +
1480 +inline void *mtk_ring_next_wptr(struct mtk_device *mtk,
1481 + struct mtk_desc_ring *ring)
1482 +{
1483 + void *ptr = ring->write;
1484 +
1485 + if ((ring->write == ring->read - ring->offset) ||
1486 + (ring->read == ring->base && ring->write == ring->base_end))
1487 + return ERR_PTR(-ENOMEM);
1488 +
1489 + if (ring->write == ring->base_end)
1490 + ring->write = ring->base;
1491 + else
1492 + ring->write += ring->offset;
1493 +
1494 + return ptr;
1495 +}
1496 +
1497 +inline void *mtk_ring_next_rptr(struct mtk_device *mtk,
1498 + struct mtk_desc_ring *ring)
1499 +{
1500 + void *ptr = ring->read;
1501 +
1502 + if (ring->write == ring->read)
1503 + return ERR_PTR(-ENOENT);
1504 +
1505 + if (ring->read == ring->base_end)
1506 + ring->read = ring->base;
1507 + else
1508 + ring->read += ring->offset;
1509 +
1510 + return ptr;
1511 +}
1512 +
1513 +inline int mtk_put_descriptor(struct mtk_device *mtk,
1514 + struct eip93_descriptor_s *desc)
1515 +{
1516 + struct eip93_descriptor_s *cdesc;
1517 + struct eip93_descriptor_s *rdesc;
1518 + unsigned long irqflags;
1519 +
1520 + spin_lock_irqsave(&mtk->ring->write_lock, irqflags);
1521 +
1522 + rdesc = mtk_ring_next_wptr(mtk, &mtk->ring->rdr);
1523 +
1524 + if (IS_ERR(rdesc)) {
1525 + spin_unlock_irqrestore(&mtk->ring->write_lock, irqflags);
1526 + return -ENOENT;
1527 + }
1528 +
1529 + cdesc = mtk_ring_next_wptr(mtk, &mtk->ring->cdr);
1530 +
1531 + if (IS_ERR(cdesc)) {
1532 + spin_unlock_irqrestore(&mtk->ring->write_lock, irqflags);
1533 + return -ENOENT;
1534 + }
1535 +
1536 + memset(rdesc, 0, sizeof(struct eip93_descriptor_s));
1537 + memcpy(cdesc, desc, sizeof(struct eip93_descriptor_s));
1538 +
1539 + atomic_dec(&mtk->ring->free);
1540 + spin_unlock_irqrestore(&mtk->ring->write_lock, irqflags);
1541 +
1542 + return 0;
1543 +}
1544 +
1545 +inline void *mtk_get_descriptor(struct mtk_device *mtk)
1546 +{
1547 + struct eip93_descriptor_s *cdesc;
1548 + void *ptr;
1549 + unsigned long irqflags;
1550 +
1551 + spin_lock_irqsave(&mtk->ring->read_lock, irqflags);
1552 +
1553 + cdesc = mtk_ring_next_rptr(mtk, &mtk->ring->cdr);
1554 +
1555 + if (IS_ERR(cdesc)) {
1556 + spin_unlock_irqrestore(&mtk->ring->read_lock, irqflags);
1557 + return ERR_PTR(-ENOENT);
1558 + }
1559 +
1560 + memset(cdesc, 0, sizeof(struct eip93_descriptor_s));
1561 +
1562 + ptr = mtk_ring_next_rptr(mtk, &mtk->ring->rdr);
1563 + if (IS_ERR(ptr)) {
1564 + spin_unlock_irqrestore(&mtk->ring->read_lock, irqflags);
1565 + return ERR_PTR(-ENOENT);
1566 + }
1567 +
1568 + atomic_inc(&mtk->ring->free);
1569 + spin_unlock_irqrestore(&mtk->ring->read_lock, irqflags);
1570 +
1571 + return ptr;
1572 +}
1573 +
1574 +inline int mtk_get_free_saState(struct mtk_device *mtk)
1575 +{
1576 + struct mtk_state_pool *saState_pool;
1577 + int i;
1578 +
1579 + for (i = 0; i < MTK_RING_SIZE; i++) {
1580 + saState_pool = &mtk->ring->saState_pool[i];
1581 + if (saState_pool->in_use == false) {
1582 + saState_pool->in_use = true;
1583 + return i;
1584 + }
1585 +
1586 + }
1587 +
1588 + return -ENOENT;
1589 +}
1590 +
1591 +static inline void mtk_free_sg_copy(const int len, struct scatterlist **sg)
1592 +{
1593 + if (!*sg || !len)
1594 + return;
1595 +
1596 + free_pages((unsigned long)sg_virt(*sg), get_order(len));
1597 + kfree(*sg);
1598 + *sg = NULL;
1599 +}
1600 +
1601 +static inline int mtk_make_sg_copy(struct scatterlist *src,
1602 + struct scatterlist **dst,
1603 + const uint32_t len, const bool copy)
1604 +{
1605 + void *pages;
1606 +
1607 + *dst = kmalloc(sizeof(**dst), GFP_KERNEL);
1608 + if (!*dst)
1609 + return -ENOMEM;
1610 +
1611 +
1612 + pages = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA,
1613 + get_order(len));
1614 +
1615 + if (!pages) {
1616 + kfree(*dst);
1617 + *dst = NULL;
1618 + return -ENOMEM;
1619 + }
1620 +
1621 + sg_init_table(*dst, 1);
1622 + sg_set_buf(*dst, pages, len);
1623 +
1624 + /* copy only as requested */
1625 + if (copy)
1626 + sg_copy_to_buffer(src, sg_nents(src), pages, len);
1627 +
1628 + return 0;
1629 +}
1630 +
1631 +static inline bool mtk_is_sg_aligned(struct scatterlist *sg, u32 len,
1632 + const int blksize)
1633 +{
1634 + int nents;
1635 +
1636 + for (nents = 0; sg; sg = sg_next(sg), ++nents) {
1637 + if (!IS_ALIGNED(sg->offset, 4))
1638 + return false;
1639 +
1640 + if (len <= sg->length) {
1641 + if (!IS_ALIGNED(len, blksize))
1642 + return false;
1643 +
1644 + return true;
1645 + }
1646 +
1647 + if (!IS_ALIGNED(sg->length, blksize))
1648 + return false;
1649 +
1650 + len -= sg->length;
1651 + }
1652 + return false;
1653 +}
1654 +
1655 +int check_valid_request(struct mtk_cipher_reqctx *rctx)
1656 +{
1657 + struct scatterlist *src = rctx->sg_src;
1658 + struct scatterlist *dst = rctx->sg_dst;
1659 + uint32_t src_nents, dst_nents;
1660 + u32 textsize = rctx->textsize;
1661 + u32 authsize = rctx->authsize;
1662 + u32 blksize = rctx->blksize;
1663 + u32 totlen_src = rctx->assoclen + rctx->textsize;
1664 + u32 totlen_dst = rctx->assoclen + rctx->textsize;
1665 + u32 copy_len;
1666 + bool src_align, dst_align;
1667 + int err = -EINVAL;
1668 +
1669 + if (!IS_CTR(rctx->flags)) {
1670 + if (!IS_ALIGNED(textsize, blksize))
1671 + return err;
1672 + }
1673 +
1674 + if (authsize) {
1675 + if (IS_ENCRYPT(rctx->flags))
1676 + totlen_dst += authsize;
1677 + else
1678 + totlen_src += authsize;
1679 + }
1680 +
1681 + src_nents = sg_nents_for_len(src, totlen_src);
1682 + dst_nents = sg_nents_for_len(dst, totlen_dst);
1683 +
1684 + if (src == dst) {
1685 + src_nents = max(src_nents, dst_nents);
1686 + dst_nents = src_nents;
1687 + if (unlikely((totlen_src || totlen_dst) && (src_nents <= 0)))
1688 + return err;
1689 +
1690 + } else {
1691 + if (unlikely(totlen_src && (src_nents <= 0)))
1692 + return err;
1693 +
1694 + if (unlikely(totlen_dst && (dst_nents <= 0)))
1695 + return err;
1696 + }
1697 +
1698 + if (authsize) {
1699 + if (dst_nents == 1 && src_nents == 1) {
1700 + src_align = mtk_is_sg_aligned(src, totlen_src, blksize);
1701 + if (src == dst)
1702 + dst_align = src_align;
1703 + else
1704 + dst_align = mtk_is_sg_aligned(dst,
1705 + totlen_dst, blksize);
1706 + } else {
1707 + src_align = false;
1708 + dst_align = false;
1709 + }
1710 + } else {
1711 + src_align = mtk_is_sg_aligned(src, totlen_src, blksize);
1712 + if (src == dst)
1713 + dst_align = src_align;
1714 + else
1715 + dst_align = mtk_is_sg_aligned(dst, totlen_dst, blksize);
1716 + }
1717 +
1718 + copy_len = max(totlen_src, totlen_dst);
1719 + if (!src_align) {
1720 + err = mtk_make_sg_copy(src, &rctx->sg_src, copy_len, true);
1721 + if (err)
1722 + return err;
1723 + }
1724 +
1725 + if (!dst_align) {
1726 + err = mtk_make_sg_copy(dst, &rctx->sg_dst, copy_len, false);
1727 + if (err)
1728 + return err;
1729 + }
1730 +
1731 + rctx->src_nents = sg_nents_for_len(rctx->sg_src, totlen_src);
1732 + rctx->dst_nents = sg_nents_for_len(rctx->sg_dst, totlen_dst);
1733 +
1734 + return 0;
1735 +}
1736 +/*
1737 + * Set saRecord function:
1738 + * Even saRecord is set to "0", keep " = 0" for readability.
1739 + */
1740 +void mtk_set_saRecord(struct saRecord_s *saRecord, const unsigned int keylen,
1741 + const u32 flags)
1742 +{
1743 + saRecord->saCmd0.bits.ivSource = 2;
1744 + if (IS_ECB(flags))
1745 + saRecord->saCmd0.bits.saveIv = 0;
1746 + else
1747 + saRecord->saCmd0.bits.saveIv = 1;
1748 +
1749 + saRecord->saCmd0.bits.opGroup = 0;
1750 + saRecord->saCmd0.bits.opCode = 0;
1751 +
1752 + switch ((flags & MTK_ALG_MASK)) {
1753 + case MTK_ALG_AES:
1754 + saRecord->saCmd0.bits.cipher = 3;
1755 + saRecord->saCmd1.bits.aesKeyLen = keylen >> 3;
1756 + break;
1757 + case MTK_ALG_3DES:
1758 + saRecord->saCmd0.bits.cipher = 1;
1759 + break;
1760 + case MTK_ALG_DES:
1761 + saRecord->saCmd0.bits.cipher = 0;
1762 + break;
1763 + default:
1764 + saRecord->saCmd0.bits.cipher = 15;
1765 + }
1766 +
1767 + switch ((flags & MTK_HASH_MASK)) {
1768 + case MTK_HASH_SHA256:
1769 + saRecord->saCmd0.bits.hash = 3;
1770 + break;
1771 + case MTK_HASH_SHA224:
1772 + saRecord->saCmd0.bits.hash = 2;
1773 + break;
1774 + case MTK_HASH_SHA1:
1775 + saRecord->saCmd0.bits.hash = 1;
1776 + break;
1777 + case MTK_HASH_MD5:
1778 + saRecord->saCmd0.bits.hash = 0;
1779 + break;
1780 + default:
1781 + saRecord->saCmd0.bits.hash = 15;
1782 + }
1783 +
1784 + saRecord->saCmd0.bits.hdrProc = 0;
1785 + saRecord->saCmd0.bits.padType = 3;
1786 + saRecord->saCmd0.bits.extPad = 0;
1787 + saRecord->saCmd0.bits.scPad = 0;
1788 +
1789 + switch ((flags & MTK_MODE_MASK)) {
1790 + case MTK_MODE_CBC:
1791 + saRecord->saCmd1.bits.cipherMode = 1;
1792 + break;
1793 + case MTK_MODE_CTR:
1794 + saRecord->saCmd1.bits.cipherMode = 2;
1795 + break;
1796 + case MTK_MODE_ECB:
1797 + saRecord->saCmd1.bits.cipherMode = 0;
1798 + break;
1799 + }
1800 +
1801 + saRecord->saCmd1.bits.byteOffset = 0;
1802 + saRecord->saCmd1.bits.hashCryptOffset = 0;
1803 + saRecord->saCmd0.bits.digestLength = 0;
1804 + saRecord->saCmd1.bits.copyPayload = 0;
1805 +
1806 + if (IS_HMAC(flags)) {
1807 + saRecord->saCmd1.bits.hmac = 1;
1808 + saRecord->saCmd1.bits.copyDigest = 1;
1809 + saRecord->saCmd1.bits.copyHeader = 1;
1810 + } else {
1811 + saRecord->saCmd1.bits.hmac = 0;
1812 + saRecord->saCmd1.bits.copyDigest = 0;
1813 + saRecord->saCmd1.bits.copyHeader = 0;
1814 + }
1815 +
1816 + saRecord->saCmd1.bits.seqNumCheck = 0;
1817 + saRecord->saSpi = 0x0;
1818 + saRecord->saSeqNumMask[0] = 0xFFFFFFFF;
1819 + saRecord->saSeqNumMask[1] = 0x0;
1820 +}
1821 +
1822 +/*
1823 + * Poor mans Scatter/gather function:
1824 + * Create a Descriptor for every segment to avoid copying buffers.
1825 + * For performance better to wait for hardware to perform multiple DMA
1826 + *
1827 + */
1828 +static inline int mtk_scatter_combine(struct mtk_device *mtk,
1829 + struct mtk_cipher_reqctx *rctx,
1830 + u32 datalen, u32 split, int offsetin)
1831 +{
1832 + struct eip93_descriptor_s *cdesc = rctx->cdesc;
1833 + struct scatterlist *sgsrc = rctx->sg_src;
1834 + struct scatterlist *sgdst = rctx->sg_dst;
1835 + unsigned int remainin = sg_dma_len(sgsrc);
1836 + unsigned int remainout = sg_dma_len(sgdst);
1837 + dma_addr_t saddr = sg_dma_address(sgsrc);
1838 + dma_addr_t daddr = sg_dma_address(sgdst);
1839 + dma_addr_t stateAddr;
1840 + u32 srcAddr, dstAddr, len, n;
1841 + bool nextin = false;
1842 + bool nextout = false;
1843 + int offsetout = 0;
1844 + int ndesc_cdr = 0, err;
1845 +
1846 + if (IS_ECB(rctx->flags))
1847 + rctx->saState_base = 0;
1848 +
1849 + if (split < datalen) {
1850 + stateAddr = rctx->saState_base_ctr;
1851 + n = split;
1852 + } else {
1853 + stateAddr = rctx->saState_base;
1854 + n = datalen;
1855 + }
1856 +
1857 + do {
1858 + if (nextin) {
1859 + sgsrc = sg_next(sgsrc);
1860 + remainin = sg_dma_len(sgsrc);
1861 + if (remainin == 0)
1862 + continue;
1863 +
1864 + saddr = sg_dma_address(sgsrc);
1865 + offsetin = 0;
1866 + nextin = false;
1867 + }
1868 +
1869 + if (nextout) {
1870 + sgdst = sg_next(sgdst);
1871 + remainout = sg_dma_len(sgdst);
1872 + if (remainout == 0)
1873 + continue;
1874 +
1875 + daddr = sg_dma_address(sgdst);
1876 + offsetout = 0;
1877 + nextout = false;
1878 + }
1879 + srcAddr = saddr + offsetin;
1880 + dstAddr = daddr + offsetout;
1881 +
1882 + if (remainin == remainout) {
1883 + len = remainin;
1884 + if (len > n) {
1885 + len = n;
1886 + remainin -= n;
1887 + remainout -= n;
1888 + offsetin += n;
1889 + offsetout += n;
1890 + } else {
1891 + nextin = true;
1892 + nextout = true;
1893 + }
1894 + } else if (remainin < remainout) {
1895 + len = remainin;
1896 + if (len > n) {
1897 + len = n;
1898 + remainin -= n;
1899 + remainout -= n;
1900 + offsetin += n;
1901 + offsetout += n;
1902 + } else {
1903 + offsetout += len;
1904 + remainout -= len;
1905 + nextin = true;
1906 + }
1907 + } else {
1908 + len = remainout;
1909 + if (len > n) {
1910 + len = n;
1911 + remainin -= n;
1912 + remainout -= n;
1913 + offsetin += n;
1914 + offsetout += n;
1915 + } else {
1916 + offsetin += len;
1917 + remainin -= len;
1918 + nextout = true;
1919 + }
1920 + }
1921 + n -= len;
1922 +
1923 + cdesc->srcAddr = srcAddr;
1924 + cdesc->dstAddr = dstAddr;
1925 + cdesc->stateAddr = stateAddr;
1926 + cdesc->peLength.bits.peReady = 0;
1927 + cdesc->peLength.bits.byPass = 0;
1928 + cdesc->peLength.bits.length = len;
1929 + cdesc->peLength.bits.hostReady = 1;
1930 +
1931 + if (n == 0) {
1932 + n = datalen - split;
1933 + split = datalen;
1934 + stateAddr = rctx->saState_base;
1935 + }
1936 +
1937 + if (n == 0)
1938 + cdesc->userId |= MTK_DESC_LAST;
1939 +
1940 + /* Loop - Delay - No need to rollback
1941 + * Maybe refine by slowing down at MTK_RING_BUSY
1942 + */
1943 +again:
1944 + err = mtk_put_descriptor(mtk, cdesc);
1945 + if (err) {
1946 + udelay(500);
1947 + goto again;
1948 + }
1949 + /* Writing new descriptor count starts DMA action */
1950 + writel(1, mtk->base + EIP93_REG_PE_CD_COUNT);
1951 +
1952 + ndesc_cdr++;
1953 + } while (n);
1954 +
1955 + return -EINPROGRESS;
1956 +}
1957 +
1958 +int mtk_send_req(struct crypto_async_request *async,
1959 + const u8 *reqiv, struct mtk_cipher_reqctx *rctx)
1960 +{
1961 + struct mtk_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);
1962 + struct mtk_device *mtk = ctx->mtk;
1963 + struct scatterlist *src = rctx->sg_src;
1964 + struct scatterlist *dst = rctx->sg_dst;
1965 + struct saState_s *saState;
1966 + struct mtk_state_pool *saState_pool;
1967 + struct eip93_descriptor_s cdesc;
1968 + u32 flags = rctx->flags;
1969 + int idx;
1970 + int offsetin = 0, err = -ENOMEM;
1971 + u32 datalen = rctx->assoclen + rctx->textsize;
1972 + u32 split = datalen;
1973 + u32 start, end, ctr, blocks;
1974 + u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
1975 +
1976 + rctx->saState_ctr = NULL;
1977 + rctx->saState = NULL;
1978 +
1979 + if (IS_ECB(flags))
1980 + goto skip_iv;
1981 +
1982 + memcpy(iv, reqiv, rctx->ivsize);
1983 +
1984 + if (!IS_ALIGNED((u32)reqiv, rctx->ivsize) || IS_RFC3686(flags)) {
1985 + rctx->flags &= ~MTK_DESC_DMA_IV;
1986 + flags = rctx->flags;
1987 + }
1988 +
1989 + if (IS_DMA_IV(flags)) {
1990 + rctx->saState = (void *)reqiv;
1991 + } else {
1992 + idx = mtk_get_free_saState(mtk);
1993 + if (idx < 0)
1994 + goto send_err;
1995 + saState_pool = &mtk->ring->saState_pool[idx];
1996 + rctx->saState_idx = idx;
1997 + rctx->saState = saState_pool->base;
1998 + rctx->saState_base = saState_pool->base_dma;
1999 + memcpy(rctx->saState->stateIv, iv, rctx->ivsize);
2000 + }
2001 +
2002 + saState = rctx->saState;
2003 +
2004 + if (IS_RFC3686(flags)) {
2005 + saState->stateIv[0] = ctx->saNonce;
2006 + saState->stateIv[1] = iv[0];
2007 + saState->stateIv[2] = iv[1];
2008 + saState->stateIv[3] = cpu_to_be32(1);
2009 + } else if (!IS_HMAC(flags) && IS_CTR(flags)) {
2010 + /* Compute data length. */
2011 + blocks = DIV_ROUND_UP(rctx->textsize, AES_BLOCK_SIZE);
2012 + ctr = be32_to_cpu(iv[3]);
2013 + /* Check 32bit counter overflow. */
2014 + start = ctr;
2015 + end = start + blocks - 1;
2016 + if (end < start) {
2017 + split = AES_BLOCK_SIZE * -start;
2018 + /*
2019 + * Increment the counter manually to cope with
2020 + * the hardware counter overflow.
2021 + */
2022 + iv[3] = 0xffffffff;
2023 + crypto_inc((u8 *)iv, AES_BLOCK_SIZE);
2024 + idx = mtk_get_free_saState(mtk);
2025 + if (idx < 0)
2026 + goto free_state;
2027 + saState_pool = &mtk->ring->saState_pool[idx];
2028 + rctx->saState_ctr_idx = idx;
2029 + rctx->saState_ctr = saState_pool->base;
2030 + rctx->saState_base_ctr = saState_pool->base_dma;
2031 +
2032 + memcpy(rctx->saState_ctr->stateIv, reqiv, rctx->ivsize);
2033 + memcpy(saState->stateIv, iv, rctx->ivsize);
2034 + }
2035 + }
2036 +
2037 + if (IS_DMA_IV(flags)) {
2038 + rctx->saState_base = dma_map_single(mtk->dev, (void *)reqiv,
2039 + rctx->ivsize, DMA_TO_DEVICE);
2040 + if (dma_mapping_error(mtk->dev, rctx->saState_base))
2041 + goto free_state;
2042 + }
2043 +skip_iv:
2044 + cdesc.peCrtlStat.bits.hostReady = 1;
2045 + cdesc.peCrtlStat.bits.prngMode = 0;
2046 + cdesc.peCrtlStat.bits.hashFinal = 0;
2047 + cdesc.peCrtlStat.bits.padCrtlStat = 0;
2048 + cdesc.peCrtlStat.bits.peReady = 0;
2049 + cdesc.saAddr = rctx->saRecord_base;
2050 + cdesc.arc4Addr = (uint32_t)async;
2051 + cdesc.userId = flags;
2052 + rctx->cdesc = &cdesc;
2053 +
2054 + /* map DMA_BIDIRECTIONAL to invalidate cache on destination
2055 + * implies __dma_cache_wback_inv
2056 + */
2057 + dma_map_sg(mtk->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL);
2058 + if (src != dst)
2059 + dma_map_sg(mtk->dev, src, rctx->src_nents, DMA_TO_DEVICE);
2060 +
2061 + err = mtk_scatter_combine(mtk, rctx, datalen, split, offsetin);
2062 +
2063 + return err;
2064 +
2065 +free_state:
2066 + if (rctx->saState) {
2067 + saState_pool = &mtk->ring->saState_pool[rctx->saState_idx];
2068 + saState_pool->in_use = false;
2069 + }
2070 +
2071 + if (rctx->saState_ctr) {
2072 + saState_pool = &mtk->ring->saState_pool[rctx->saState_ctr_idx];
2073 + saState_pool->in_use = false;
2074 + }
2075 +send_err:
2076 + return err;
2077 +}
2078 +
2079 +void mtk_unmap_dma(struct mtk_device *mtk, struct mtk_cipher_reqctx *rctx,
2080 + struct scatterlist *reqsrc, struct scatterlist *reqdst)
2081 +{
2082 + u32 len = rctx->assoclen + rctx->textsize;
2083 + u32 authsize = rctx->authsize;
2084 + u32 flags = rctx->flags;
2085 + u32 *otag;
2086 + int i;
2087 +
2088 + if (rctx->sg_src == rctx->sg_dst) {
2089 + dma_unmap_sg(mtk->dev, rctx->sg_dst, rctx->dst_nents,
2090 + DMA_BIDIRECTIONAL);
2091 + goto process_tag;
2092 + }
2093 +
2094 + dma_unmap_sg(mtk->dev, rctx->sg_src, rctx->src_nents,
2095 + DMA_TO_DEVICE);
2096 +
2097 + if (rctx->sg_src != reqsrc)
2098 + mtk_free_sg_copy(len + rctx->authsize, &rctx->sg_src);
2099 +
2100 + dma_unmap_sg(mtk->dev, rctx->sg_dst, rctx->dst_nents,
2101 + DMA_BIDIRECTIONAL);
2102 +
2103 + /* SHA tags need conversion from net-to-host */
2104 +process_tag:
2105 + if (IS_DECRYPT(flags))
2106 + authsize = 0;
2107 +
2108 + if (authsize) {
2109 + if (!IS_HASH_MD5(flags)) {
2110 + otag = sg_virt(rctx->sg_dst) + len;
2111 + for (i = 0; i < (authsize / 4); i++)
2112 + otag[i] = ntohl(otag[i]);
2113 + }
2114 + }
2115 +
2116 + if (rctx->sg_dst != reqdst) {
2117 + sg_copy_from_buffer(reqdst, sg_nents(reqdst),
2118 + sg_virt(rctx->sg_dst), len + authsize);
2119 + mtk_free_sg_copy(len + rctx->authsize, &rctx->sg_dst);
2120 + }
2121 +}
2122 +
2123 +void mtk_handle_result(struct mtk_device *mtk, struct mtk_cipher_reqctx *rctx,
2124 + u8 *reqiv)
2125 +{
2126 + struct mtk_state_pool *saState_pool;
2127 +
2128 + if (IS_DMA_IV(rctx->flags))
2129 + dma_unmap_single(mtk->dev, rctx->saState_base, rctx->ivsize,
2130 + DMA_TO_DEVICE);
2131 +
2132 + if (!IS_ECB(rctx->flags))
2133 + memcpy(reqiv, rctx->saState->stateIv, rctx->ivsize);
2134 +
2135 + if ((rctx->saState) && !(IS_DMA_IV(rctx->flags))) {
2136 + saState_pool = &mtk->ring->saState_pool[rctx->saState_idx];
2137 + saState_pool->in_use = false;
2138 + }
2139 +
2140 + if (rctx->saState_ctr) {
2141 + saState_pool = &mtk->ring->saState_pool[rctx->saState_ctr_idx];
2142 + saState_pool->in_use = false;
2143 + }
2144 +}
2145 +
2146 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_HMAC)
2147 +/* basically this is set hmac - key */
2148 +int mtk_authenc_setkey(struct crypto_shash *cshash, struct saRecord_s *sa,
2149 + const u8 *authkey, unsigned int authkeylen)
2150 +{
2151 + int bs = crypto_shash_blocksize(cshash);
2152 + int ds = crypto_shash_digestsize(cshash);
2153 + int ss = crypto_shash_statesize(cshash);
2154 + u8 *ipad, *opad;
2155 + unsigned int i, err;
2156 +
2157 + SHASH_DESC_ON_STACK(shash, cshash);
2158 +
2159 + shash->tfm = cshash;
2160 +
2161 + /* auth key
2162 + *
2163 + * EIP93 can only authenticate with hash of the key
2164 + * do software shash until EIP93 hash function complete.
2165 + */
2166 + ipad = kcalloc(2, SHA256_BLOCK_SIZE + ss, GFP_KERNEL);
2167 + if (!ipad)
2168 + return -ENOMEM;
2169 +
2170 + opad = ipad + SHA256_BLOCK_SIZE + ss;
2171 +
2172 + if (authkeylen > bs) {
2173 + err = crypto_shash_digest(shash, authkey,
2174 + authkeylen, ipad);
2175 + if (err)
2176 + return err;
2177 +
2178 + authkeylen = ds;
2179 + } else
2180 + memcpy(ipad, authkey, authkeylen);
2181 +
2182 + memset(ipad + authkeylen, 0, bs - authkeylen);
2183 + memcpy(opad, ipad, bs);
2184 +
2185 + for (i = 0; i < bs; i++) {
2186 + ipad[i] ^= HMAC_IPAD_VALUE;
2187 + opad[i] ^= HMAC_OPAD_VALUE;
2188 + }
2189 +
2190 + err = crypto_shash_init(shash) ?:
2191 + crypto_shash_update(shash, ipad, bs) ?:
2192 + crypto_shash_export(shash, ipad) ?:
2193 + crypto_shash_init(shash) ?:
2194 + crypto_shash_update(shash, opad, bs) ?:
2195 + crypto_shash_export(shash, opad);
2196 +
2197 + if (err)
2198 + return err;
2199 +
2200 + /* add auth key */
2201 + memcpy(&sa->saIDigest, ipad, SHA256_DIGEST_SIZE);
2202 + memcpy(&sa->saODigest, opad, SHA256_DIGEST_SIZE);
2203 +
2204 + kfree(ipad);
2205 + return 0;
2206 +}
2207 +#endif
2208 --- /dev/null
2209 +++ b/drivers/crypto/mtk-eip93/eip93-common.h
2210 @@ -0,0 +1,28 @@
2211 +/* SPDX-License-Identifier: GPL-2.0
2212 + *
2213 + * Copyright (C) 2019 - 2021
2214 + *
2215 + * Richard van Schagen <vschagen@icloud.com>
2216 + */
2217 +
2218 +#ifndef _EIP93_COMMON_H_
2219 +#define _EIP93_COMMON_H_
2220 +
2221 +#include "eip93-main.h"
2222 +
2223 +inline int mtk_put_descriptor(struct mtk_device *mtk,
2224 + struct eip93_descriptor_s *desc);
2225 +
2226 +inline void *mtk_get_descriptor(struct mtk_device *mtk);
2227 +
2228 +inline int mtk_get_free_saState(struct mtk_device *mtk);
2229 +
2230 +void mtk_set_saRecord(struct saRecord_s *saRecord, const unsigned int keylen,
2231 + const u32 flags);
2232 +
2233 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_HMAC)
2234 +int mtk_authenc_setkey(struct crypto_shash *cshash, struct saRecord_s *sa,
2235 + const u8 *authkey, unsigned int authkeylen);
2236 +#endif
2237 +
2238 +#endif /* _EIP93_COMMON_H_ */
2239 --- /dev/null
2240 +++ b/drivers/crypto/mtk-eip93/eip93-des.h
2241 @@ -0,0 +1,15 @@
2242 +/* SPDX-License-Identifier: GPL-2.0
2243 + *
2244 + * Copyright (C) 2019 - 2021
2245 + *
2246 + * Richard van Schagen <vschagen@icloud.com>
2247 + */
2248 +#ifndef _EIP93_DES_H_
2249 +#define _EIP93_DES_H_
2250 +
2251 +extern struct mtk_alg_template mtk_alg_ecb_des;
2252 +extern struct mtk_alg_template mtk_alg_cbc_des;
2253 +extern struct mtk_alg_template mtk_alg_ecb_des3_ede;
2254 +extern struct mtk_alg_template mtk_alg_cbc_des3_ede;
2255 +
2256 +#endif /* _EIP93_DES_H_ */
2257 --- /dev/null
2258 +++ b/drivers/crypto/mtk-eip93/eip93-main.c
2259 @@ -0,0 +1,467 @@
2260 +// SPDX-License-Identifier: GPL-2.0
2261 +/*
2262 + * Copyright (C) 2019 - 2021
2263 + *
2264 + * Richard van Schagen <vschagen@icloud.com>
2265 + */
2266 +
2267 +#include <linux/atomic.h>
2268 +#include <linux/clk.h>
2269 +#include <linux/delay.h>
2270 +#include <linux/dma-mapping.h>
2271 +#include <linux/interrupt.h>
2272 +#include <linux/module.h>
2273 +#include <linux/of_device.h>
2274 +#include <linux/platform_device.h>
2275 +#include <linux/spinlock.h>
2276 +
2277 +#include "eip93-main.h"
2278 +#include "eip93-regs.h"
2279 +#include "eip93-common.h"
2280 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_SKCIPHER)
2281 +#include "eip93-cipher.h"
2282 +#endif
2283 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_AES)
2284 +#include "eip93-aes.h"
2285 +#endif
2286 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_DES)
2287 +#include "eip93-des.h"
2288 +#endif
2289 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_AEAD)
2290 +#include "eip93-aead.h"
2291 +#endif
2292 +
2293 +static struct mtk_alg_template *mtk_algs[] = {
2294 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_DES)
2295 + &mtk_alg_ecb_des,
2296 + &mtk_alg_cbc_des,
2297 + &mtk_alg_ecb_des3_ede,
2298 + &mtk_alg_cbc_des3_ede,
2299 +#endif
2300 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_AES)
2301 + &mtk_alg_ecb_aes,
2302 + &mtk_alg_cbc_aes,
2303 + &mtk_alg_ctr_aes,
2304 + &mtk_alg_rfc3686_aes,
2305 +#endif
2306 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_AEAD)
2307 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_DES)
2308 + &mtk_alg_authenc_hmac_md5_cbc_des,
2309 + &mtk_alg_authenc_hmac_sha1_cbc_des,
2310 + &mtk_alg_authenc_hmac_sha224_cbc_des,
2311 + &mtk_alg_authenc_hmac_sha256_cbc_des,
2312 + &mtk_alg_authenc_hmac_md5_cbc_des3_ede,
2313 + &mtk_alg_authenc_hmac_sha1_cbc_des3_ede,
2314 + &mtk_alg_authenc_hmac_sha224_cbc_des3_ede,
2315 + &mtk_alg_authenc_hmac_sha256_cbc_des3_ede,
2316 +#endif
2317 + &mtk_alg_authenc_hmac_md5_cbc_aes,
2318 + &mtk_alg_authenc_hmac_sha1_cbc_aes,
2319 + &mtk_alg_authenc_hmac_sha224_cbc_aes,
2320 + &mtk_alg_authenc_hmac_sha256_cbc_aes,
2321 + &mtk_alg_authenc_hmac_md5_rfc3686_aes,
2322 + &mtk_alg_authenc_hmac_sha1_rfc3686_aes,
2323 + &mtk_alg_authenc_hmac_sha224_rfc3686_aes,
2324 + &mtk_alg_authenc_hmac_sha256_rfc3686_aes,
2325 +#endif
2326 +};
2327 +
2328 +inline void mtk_irq_disable(struct mtk_device *mtk, u32 mask)
2329 +{
2330 + __raw_writel(mask, mtk->base + EIP93_REG_MASK_DISABLE);
2331 +}
2332 +
2333 +inline void mtk_irq_enable(struct mtk_device *mtk, u32 mask)
2334 +{
2335 + __raw_writel(mask, mtk->base + EIP93_REG_MASK_ENABLE);
2336 +}
2337 +
2338 +inline void mtk_irq_clear(struct mtk_device *mtk, u32 mask)
2339 +{
2340 + __raw_writel(mask, mtk->base + EIP93_REG_INT_CLR);
2341 +}
2342 +
2343 +static void mtk_unregister_algs(unsigned int i)
2344 +{
2345 + unsigned int j;
2346 +
2347 + for (j = 0; j < i; j++) {
2348 + switch (mtk_algs[j]->type) {
2349 + case MTK_ALG_TYPE_SKCIPHER:
2350 + crypto_unregister_skcipher(&mtk_algs[j]->alg.skcipher);
2351 + break;
2352 + case MTK_ALG_TYPE_AEAD:
2353 + crypto_unregister_aead(&mtk_algs[j]->alg.aead);
2354 + break;
2355 + }
2356 + }
2357 +}
2358 +
2359 +static int mtk_register_algs(struct mtk_device *mtk)
2360 +{
2361 + unsigned int i;
2362 + int err = 0;
2363 +
2364 + for (i = 0; i < ARRAY_SIZE(mtk_algs); i++) {
2365 + mtk_algs[i]->mtk = mtk;
2366 +
2367 + switch (mtk_algs[i]->type) {
2368 + case MTK_ALG_TYPE_SKCIPHER:
2369 + err = crypto_register_skcipher(&mtk_algs[i]->alg.skcipher);
2370 + break;
2371 + case MTK_ALG_TYPE_AEAD:
2372 + err = crypto_register_aead(&mtk_algs[i]->alg.aead);
2373 + break;
2374 + }
2375 + if (err)
2376 + goto fail;
2377 + }
2378 +
2379 + return 0;
2380 +
2381 +fail:
2382 + mtk_unregister_algs(i);
2383 +
2384 + return err;
2385 +}
2386 +
2387 +static void mtk_handle_result_descriptor(struct mtk_device *mtk)
2388 +{
2389 + struct crypto_async_request *async;
2390 + struct eip93_descriptor_s *rdesc;
2391 + bool last_entry;
2392 + u32 flags;
2393 + int handled, ready, err;
2394 + union peCrtlStat_w done1;
2395 + union peLength_w done2;
2396 +
2397 +get_more:
2398 + handled = 0;
2399 +
2400 + ready = readl(mtk->base + EIP93_REG_PE_RD_COUNT) & GENMASK(10, 0);
2401 +
2402 + if (!ready) {
2403 + mtk_irq_clear(mtk, EIP93_INT_PE_RDRTHRESH_REQ);
2404 + mtk_irq_enable(mtk, EIP93_INT_PE_RDRTHRESH_REQ);
2405 + return;
2406 + }
2407 +
2408 + last_entry = false;
2409 +
2410 + while (ready) {
2411 + rdesc = mtk_get_descriptor(mtk);
2412 + if (IS_ERR(rdesc)) {
2413 + dev_err(mtk->dev, "Ndesc: %d nreq: %d\n",
2414 + handled, ready);
2415 + err = -EIO;
2416 + break;
2417 + }
2418 + /* make sure DMA is finished writing */
2419 + do {
2420 + done1.word = READ_ONCE(rdesc->peCrtlStat.word);
2421 + done2.word = READ_ONCE(rdesc->peLength.word);
2422 + } while ((!done1.bits.peReady) || (!done2.bits.peReady));
2423 +
2424 + err = rdesc->peCrtlStat.bits.errStatus;
2425 +
2426 + flags = rdesc->userId;
2427 + async = (struct crypto_async_request *)rdesc->arc4Addr;
2428 +
2429 + writel(1, mtk->base + EIP93_REG_PE_RD_COUNT);
2430 + mtk_irq_clear(mtk, EIP93_INT_PE_RDRTHRESH_REQ);
2431 +
2432 + handled++;
2433 + ready--;
2434 +
2435 + if (flags & MTK_DESC_LAST) {
2436 + last_entry = true;
2437 + break;
2438 + }
2439 + }
2440 +
2441 + if (!last_entry)
2442 + goto get_more;
2443 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_SKCIPHER)
2444 + if (flags & MTK_DESC_SKCIPHER)
2445 + mtk_skcipher_handle_result(async, err);
2446 +#endif
2447 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_AEAD)
2448 + if (flags & MTK_DESC_AEAD)
2449 + mtk_aead_handle_result(async, err);
2450 +#endif
2451 + goto get_more;
2452 +}
2453 +
2454 +static void mtk_done_task(unsigned long data)
2455 +{
2456 + struct mtk_device *mtk = (struct mtk_device *)data;
2457 +
2458 + mtk_handle_result_descriptor(mtk);
2459 +}
2460 +
2461 +static irqreturn_t mtk_irq_handler(int irq, void *dev_id)
2462 +{
2463 + struct mtk_device *mtk = (struct mtk_device *)dev_id;
2464 + u32 irq_status;
2465 +
2466 + irq_status = readl(mtk->base + EIP93_REG_INT_MASK_STAT);
2467 +
2468 + if (irq_status & EIP93_INT_PE_RDRTHRESH_REQ) {
2469 + mtk_irq_disable(mtk, EIP93_INT_PE_RDRTHRESH_REQ);
2470 + tasklet_schedule(&mtk->ring->done_task);
2471 + return IRQ_HANDLED;
2472 + }
2473 +
2474 + mtk_irq_clear(mtk, irq_status);
2475 + if (irq_status)
2476 + mtk_irq_disable(mtk, irq_status);
2477 +
2478 + return IRQ_NONE;
2479 +}
2480 +
2481 +static void mtk_initialize(struct mtk_device *mtk)
2482 +{
2483 + union peConfig_w peConfig;
2484 + union peEndianCfg_w peEndianCfg;
2485 + union peIntCfg_w peIntCfg;
2486 + union peClockCfg_w peClockCfg;
2487 + union peBufThresh_w peBufThresh;
2488 + union peRingThresh_w peRingThresh;
2489 +
2490 + /* Reset Engine and setup Mode */
2491 + peConfig.word = 0;
2492 + peConfig.bits.resetPE = 1;
2493 + peConfig.bits.resetRing = 1;
2494 + peConfig.bits.peMode = 3;
2495 + peConfig.bits.enCDRupdate = 1;
2496 +
2497 + writel(peConfig.word, mtk->base + EIP93_REG_PE_CONFIG);
2498 +
2499 + udelay(10);
2500 +
2501 + peConfig.bits.resetPE = 0;
2502 + peConfig.bits.resetRing = 0;
2503 +
2504 + writel(peConfig.word, mtk->base + EIP93_REG_PE_CONFIG);
2505 +
2506 + /* Initialize the BYTE_ORDER_CFG register */
2507 + peEndianCfg.word = 0;
2508 + writel(peEndianCfg.word, mtk->base + EIP93_REG_PE_ENDIAN_CONFIG);
2509 +
2510 + /* Initialize the INT_CFG register */
2511 + peIntCfg.word = 0;
2512 + writel(peIntCfg.word, mtk->base + EIP93_REG_INT_CFG);
2513 +
2514 + /* Config Clocks */
2515 + peClockCfg.word = 0;
2516 + peClockCfg.bits.enPEclk = 1;
2517 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_DES)
2518 + peClockCfg.bits.enDESclk = 1;
2519 +#endif
2520 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_AES)
2521 + peClockCfg.bits.enAESclk = 1;
2522 +#endif
2523 +#if IS_ENABLED(CONFIG_CRYPTO_DEV_EIP93_HMAC)
2524 + peClockCfg.bits.enHASHclk = 1;
2525 +#endif
2526 + writel(peClockCfg.word, mtk->base + EIP93_REG_PE_CLOCK_CTRL);
2527 +
2528 + /* Config DMA thresholds */
2529 + peBufThresh.word = 0;
2530 + peBufThresh.bits.inputBuffer = 128;
2531 + peBufThresh.bits.outputBuffer = 128;
2532 +
2533 + writel(peBufThresh.word, mtk->base + EIP93_REG_PE_BUF_THRESH);
2534 +
2535 + /* Clear/ack all interrupts before disable all */
2536 + mtk_irq_clear(mtk, 0xFFFFFFFF);
2537 + mtk_irq_disable(mtk, 0xFFFFFFFF);
2538 +
2539 + /* Config Ring Threshold */
2540 + peRingThresh.word = 0;
2541 + peRingThresh.bits.CDRThresh = MTK_RING_SIZE - MTK_RING_BUSY;
2542 + peRingThresh.bits.RDRThresh = 0;
2543 + peRingThresh.bits.RDTimeout = 5;
2544 + peRingThresh.bits.enTimeout = 1;
2545 +
2546 + writel(peRingThresh.word, mtk->base + EIP93_REG_PE_RING_THRESH);
2547 +}
2548 +
2549 +static void mtk_desc_free(struct mtk_device *mtk)
2550 +{
2551 + writel(0, mtk->base + EIP93_REG_PE_RING_CONFIG);
2552 + writel(0, mtk->base + EIP93_REG_PE_CDR_BASE);
2553 + writel(0, mtk->base + EIP93_REG_PE_RDR_BASE);
2554 +}
2555 +
2556 +static int mtk_set_ring(struct mtk_device *mtk, struct mtk_desc_ring *ring,
2557 + int Offset)
2558 +{
2559 + ring->offset = Offset;
2560 + ring->base = dmam_alloc_coherent(mtk->dev, Offset * MTK_RING_SIZE,
2561 + &ring->base_dma, GFP_KERNEL);
2562 + if (!ring->base)
2563 + return -ENOMEM;
2564 +
2565 + ring->write = ring->base;
2566 + ring->base_end = ring->base + Offset * (MTK_RING_SIZE - 1);
2567 + ring->read = ring->base;
2568 +
2569 + return 0;
2570 +}
2571 +
2572 +static int mtk_desc_init(struct mtk_device *mtk)
2573 +{
2574 + struct mtk_state_pool *saState_pool;
2575 + struct mtk_desc_ring *cdr = &mtk->ring->cdr;
2576 + struct mtk_desc_ring *rdr = &mtk->ring->rdr;
2577 + union peRingCfg_w peRingCfg;
2578 + int RingOffset, err, i;
2579 +
2580 + RingOffset = sizeof(struct eip93_descriptor_s);
2581 +
2582 + err = mtk_set_ring(mtk, cdr, RingOffset);
2583 + if (err)
2584 + return err;
2585 +
2586 + err = mtk_set_ring(mtk, rdr, RingOffset);
2587 + if (err)
2588 + return err;
2589 +
2590 + writel((u32)cdr->base_dma, mtk->base + EIP93_REG_PE_CDR_BASE);
2591 + writel((u32)rdr->base_dma, mtk->base + EIP93_REG_PE_RDR_BASE);
2592 +
2593 + peRingCfg.word = 0;
2594 + peRingCfg.bits.ringSize = MTK_RING_SIZE - 1;
2595 + peRingCfg.bits.ringOffset = RingOffset / 4;
2596 +
2597 + writel(peRingCfg.word, mtk->base + EIP93_REG_PE_RING_CONFIG);
2598 +
2599 + atomic_set(&mtk->ring->free, MTK_RING_SIZE - 1);
2600 + /* Create State record DMA pool */
2601 + RingOffset = sizeof(struct saState_s);
2602 + mtk->ring->saState = dmam_alloc_coherent(mtk->dev,
2603 + RingOffset * MTK_RING_SIZE,
2604 + &mtk->ring->saState_dma, GFP_KERNEL);
2605 + if (!mtk->ring->saState)
2606 + return -ENOMEM;
2607 +
2608 + mtk->ring->saState_pool = devm_kcalloc(mtk->dev, 1,
2609 + sizeof(struct mtk_state_pool) * MTK_RING_SIZE,
2610 + GFP_KERNEL);
2611 +
2612 + for (i = 0; i < MTK_RING_SIZE; i++) {
2613 + saState_pool = &mtk->ring->saState_pool[i];
2614 + saState_pool->base = mtk->ring->saState + (i * RingOffset);
2615 + saState_pool->base_dma = mtk->ring->saState_dma + (i * RingOffset);
2616 + saState_pool->in_use = false;
2617 + }
2618 +
2619 + return 0;
2620 +}
2621 +
2622 +static void mtk_cleanup(struct mtk_device *mtk)
2623 +{
2624 + tasklet_kill(&mtk->ring->done_task);
2625 +
2626 + /* Clear/ack all interrupts before disable all */
2627 + mtk_irq_clear(mtk, 0xFFFFFFFF);
2628 + mtk_irq_disable(mtk, 0xFFFFFFFF);
2629 +
2630 + writel(0, mtk->base + EIP93_REG_PE_CLOCK_CTRL);
2631 +
2632 + mtk_desc_free(mtk);
2633 +}
2634 +
2635 +static int mtk_crypto_probe(struct platform_device *pdev)
2636 +{
2637 + struct device *dev = &pdev->dev;
2638 + struct mtk_device *mtk;
2639 + struct resource *res;
2640 + int err;
2641 +
2642 + mtk = devm_kzalloc(dev, sizeof(*mtk), GFP_KERNEL);
2643 + if (!mtk)
2644 + return -ENOMEM;
2645 +
2646 + mtk->dev = dev;
2647 + platform_set_drvdata(pdev, mtk);
2648 +
2649 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2650 + mtk->base = devm_ioremap_resource(&pdev->dev, res);
2651 +
2652 + if (IS_ERR(mtk->base))
2653 + return PTR_ERR(mtk->base);
2654 +
2655 + mtk->irq = platform_get_irq(pdev, 0);
2656 +
2657 + if (mtk->irq < 0)
2658 + return mtk->irq;
2659 +
2660 + err = devm_request_threaded_irq(mtk->dev, mtk->irq, mtk_irq_handler,
2661 + NULL, IRQF_ONESHOT,
2662 + dev_name(mtk->dev), mtk);
2663 +
2664 + mtk->ring = devm_kcalloc(mtk->dev, 1, sizeof(*mtk->ring), GFP_KERNEL);
2665 +
2666 + if (!mtk->ring)
2667 + return -ENOMEM;
2668 +
2669 + err = mtk_desc_init(mtk);
2670 + if (err)
2671 + return err;
2672 +
2673 + tasklet_init(&mtk->ring->done_task, mtk_done_task, (unsigned long)mtk);
2674 +
2675 + spin_lock_init(&mtk->ring->read_lock);
2676 + spin_lock_init(&mtk->ring->write_lock);
2677 +
2678 + mtk_initialize(mtk);
2679 +
2680 + /* Init. finished, enable RDR interupt */
2681 + mtk_irq_enable(mtk, EIP93_INT_PE_RDRTHRESH_REQ);
2682 +
2683 + err = mtk_register_algs(mtk);
2684 + if (err) {
2685 + mtk_cleanup(mtk);
2686 + return err;
2687 + }
2688 +
2689 + dev_info(mtk->dev, "EIP93 Crypto Engine Initialized.");
2690 +
2691 + return 0;
2692 +}
2693 +
2694 +static int mtk_crypto_remove(struct platform_device *pdev)
2695 +{
2696 + struct mtk_device *mtk = platform_get_drvdata(pdev);
2697 +
2698 + mtk_unregister_algs(ARRAY_SIZE(mtk_algs));
2699 + mtk_cleanup(mtk);
2700 + dev_info(mtk->dev, "EIP93 removed.\n");
2701 +
2702 + return 0;
2703 +}
2704 +
2705 +#if defined(CONFIG_OF)
2706 +static const struct of_device_id mtk_crypto_of_match[] = {
2707 + { .compatible = "mediatek,mtk-eip93", },
2708 + {}
2709 +};
2710 +MODULE_DEVICE_TABLE(of, mtk_crypto_of_match);
2711 +#endif
2712 +
2713 +static struct platform_driver mtk_crypto_driver = {
2714 + .probe = mtk_crypto_probe,
2715 + .remove = mtk_crypto_remove,
2716 + .driver = {
2717 + .name = "mtk-eip93",
2718 + .of_match_table = of_match_ptr(mtk_crypto_of_match),
2719 + },
2720 +};
2721 +module_platform_driver(mtk_crypto_driver);
2722 +
2723 +MODULE_AUTHOR("Richard van Schagen <vschagen@cs.com>");
2724 +MODULE_ALIAS("platform:" KBUILD_MODNAME);
2725 +MODULE_DESCRIPTION("Mediatek EIP-93 crypto engine driver");
2726 +MODULE_LICENSE("GPL v2");
2727 --- /dev/null
2728 +++ b/drivers/crypto/mtk-eip93/eip93-main.h
2729 @@ -0,0 +1,146 @@
2730 +/* SPDX-License-Identifier: GPL-2.0
2731 + *
2732 + * Copyright (C) 2019 - 2021
2733 + *
2734 + * Richard van Schagen <vschagen@icloud.com>
2735 + */
2736 +#ifndef _EIP93_MAIN_H_
2737 +#define _EIP93_MAIN_H_
2738 +
2739 +#include <crypto/internal/aead.h>
2740 +#include <crypto/internal/hash.h>
2741 +#include <crypto/internal/rng.h>
2742 +#include <crypto/internal/skcipher.h>
2743 +#include <linux/device.h>
2744 +#include <linux/interrupt.h>
2745 +
2746 +#define MTK_RING_SIZE 512
2747 +#define MTK_RING_BUSY 32
2748 +#define MTK_CRA_PRIORITY 1500
2749 +
2750 +/* cipher algorithms */
2751 +#define MTK_ALG_DES BIT(0)
2752 +#define MTK_ALG_3DES BIT(1)
2753 +#define MTK_ALG_AES BIT(2)
2754 +#define MTK_ALG_MASK GENMASK(2, 0)
2755 +/* hash and hmac algorithms */
2756 +#define MTK_HASH_MD5 BIT(3)
2757 +#define MTK_HASH_SHA1 BIT(4)
2758 +#define MTK_HASH_SHA224 BIT(5)
2759 +#define MTK_HASH_SHA256 BIT(6)
2760 +#define MTK_HASH_HMAC BIT(7)
2761 +#define MTK_HASH_MASK GENMASK(6, 3)
2762 +/* cipher modes */
2763 +#define MTK_MODE_CBC BIT(8)
2764 +#define MTK_MODE_ECB BIT(9)
2765 +#define MTK_MODE_CTR BIT(10)
2766 +#define MTK_MODE_RFC3686 BIT(11)
2767 +#define MTK_MODE_MASK GENMASK(10, 8)
2768 +
2769 +/* cipher encryption/decryption operations */
2770 +#define MTK_ENCRYPT BIT(12)
2771 +#define MTK_DECRYPT BIT(13)
2772 +
2773 +#define MTK_BUSY BIT(14)
2774 +
2775 +/* descriptor flags */
2776 +#define MTK_DESC_ASYNC BIT(31)
2777 +#define MTK_DESC_SKCIPHER BIT(30)
2778 +#define MTK_DESC_AEAD BIT(29)
2779 +#define MTK_DESC_AHASH BIT(28)
2780 +#define MTK_DESC_PRNG BIT(27)
2781 +#define MTK_DESC_FAKE_HMAC BIT(26)
2782 +#define MTK_DESC_LAST BIT(25)
2783 +#define MTK_DESC_FINISH BIT(24)
2784 +#define MTK_DESC_IPSEC BIT(23)
2785 +#define MTK_DESC_DMA_IV BIT(22)
2786 +
2787 +#define IS_DES(flags) (flags & MTK_ALG_DES)
2788 +#define IS_3DES(flags) (flags & MTK_ALG_3DES)
2789 +#define IS_AES(flags) (flags & MTK_ALG_AES)
2790 +
2791 +#define IS_HASH_MD5(flags) (flags & MTK_HASH_MD5)
2792 +#define IS_HASH_SHA1(flags) (flags & MTK_HASH_SHA1)
2793 +#define IS_HASH_SHA224(flags) (flags & MTK_HASH_SHA224)
2794 +#define IS_HASH_SHA256(flags) (flags & MTK_HASH_SHA256)
2795 +#define IS_HMAC(flags) (flags & MTK_HASH_HMAC)
2796 +
2797 +#define IS_CBC(mode) (mode & MTK_MODE_CBC)
2798 +#define IS_ECB(mode) (mode & MTK_MODE_ECB)
2799 +#define IS_CTR(mode) (mode & MTK_MODE_CTR)
2800 +#define IS_RFC3686(mode) (mode & MTK_MODE_RFC3686)
2801 +
2802 +#define IS_BUSY(flags) (flags & MTK_BUSY)
2803 +#define IS_DMA_IV(flags) (flags & MTK_DESC_DMA_IV)
2804 +
2805 +#define IS_ENCRYPT(dir) (dir & MTK_ENCRYPT)
2806 +#define IS_DECRYPT(dir) (dir & MTK_DECRYPT)
2807 +
2808 +#define IS_CIPHER(flags) (flags & (MTK_ALG_DES || \
2809 + MTK_ALG_3DES || \
2810 + MTK_ALG_AES))
2811 +
2812 +#define IS_HASH(flags) (flags & (MTK_HASH_MD5 || \
2813 + MTK_HASH_SHA1 || \
2814 + MTK_HASH_SHA224 || \
2815 + MTK_HASH_SHA256))
2816 +
2817 +/**
2818 + * struct mtk_device - crypto engine device structure
2819 + */
2820 +struct mtk_device {
2821 + void __iomem *base;
2822 + struct device *dev;
2823 + struct clk *clk;
2824 + int irq;
2825 + struct mtk_ring *ring;
2826 + struct mtk_state_pool *saState_pool;
2827 +};
2828 +
2829 +struct mtk_desc_ring {
2830 + void *base;
2831 + void *base_end;
2832 + dma_addr_t base_dma;
2833 + /* write and read pointers */
2834 + void *read;
2835 + void *write;
2836 + /* descriptor element offset */
2837 + u32 offset;
2838 +};
2839 +
2840 +struct mtk_state_pool {
2841 + void *base;
2842 + dma_addr_t base_dma;
2843 + bool in_use;
2844 +};
2845 +
2846 +struct mtk_ring {
2847 + struct tasklet_struct done_task;
2848 + /* command/result rings */
2849 + struct mtk_desc_ring cdr;
2850 + struct mtk_desc_ring rdr;
2851 + spinlock_t write_lock;
2852 + spinlock_t read_lock;
2853 + atomic_t free;
2854 + /* saState */
2855 + struct mtk_state_pool *saState_pool;
2856 + void *saState;
2857 + dma_addr_t saState_dma;
2858 +};
2859 +
2860 +enum mtk_alg_type {
2861 + MTK_ALG_TYPE_AEAD,
2862 + MTK_ALG_TYPE_SKCIPHER,
2863 +};
2864 +
2865 +struct mtk_alg_template {
2866 + struct mtk_device *mtk;
2867 + enum mtk_alg_type type;
2868 + u32 flags;
2869 + union {
2870 + struct aead_alg aead;
2871 + struct skcipher_alg skcipher;
2872 + } alg;
2873 +};
2874 +
2875 +#endif /* _EIP93_MAIN_H_ */
2876 --- /dev/null
2877 +++ b/drivers/crypto/mtk-eip93/eip93-regs.h
2878 @@ -0,0 +1,382 @@
2879 +/* SPDX-License-Identifier: GPL-2.0 */
2880 +/*
2881 + * Copyright (C) 2019 - 2021
2882 + *
2883 + * Richard van Schagen <vschagen@icloud.com>
2884 + */
2885 +#ifndef REG_EIP93_H
2886 +#define REG_EIP93_H
2887 +
2888 +#define EIP93_REG_WIDTH 4
2889 +/*-----------------------------------------------------------------------------
2890 + * Register Map
2891 + */
2892 +#define DESP_BASE 0x0000000
2893 +#define EIP93_REG_PE_CTRL_STAT ((DESP_BASE)+(0x00 * EIP93_REG_WIDTH))
2894 +#define EIP93_REG_PE_SOURCE_ADDR ((DESP_BASE)+(0x01 * EIP93_REG_WIDTH))
2895 +#define EIP93_REG_PE_DEST_ADDR ((DESP_BASE)+(0x02 * EIP93_REG_WIDTH))
2896 +#define EIP93_REG_PE_SA_ADDR ((DESP_BASE)+(0x03 * EIP93_REG_WIDTH))
2897 +#define EIP93_REG_PE_ADDR ((DESP_BASE)+(0x04 * EIP93_REG_WIDTH))
2898 +#define EIP93_REG_PE_USER_ID ((DESP_BASE)+(0x06 * EIP93_REG_WIDTH))
2899 +#define EIP93_REG_PE_LENGTH ((DESP_BASE)+(0x07 * EIP93_REG_WIDTH))
2900 +
2901 +//PACKET ENGINE RING configuration registers
2902 +#define PE_RNG_BASE 0x0000080
2903 +
2904 +#define EIP93_REG_PE_CDR_BASE ((PE_RNG_BASE)+(0x00 * EIP93_REG_WIDTH))
2905 +#define EIP93_REG_PE_RDR_BASE ((PE_RNG_BASE)+(0x01 * EIP93_REG_WIDTH))
2906 +#define EIP93_REG_PE_RING_CONFIG ((PE_RNG_BASE)+(0x02 * EIP93_REG_WIDTH))
2907 +#define EIP93_REG_PE_RING_THRESH ((PE_RNG_BASE)+(0x03 * EIP93_REG_WIDTH))
2908 +#define EIP93_REG_PE_CD_COUNT ((PE_RNG_BASE)+(0x04 * EIP93_REG_WIDTH))
2909 +#define EIP93_REG_PE_RD_COUNT ((PE_RNG_BASE)+(0x05 * EIP93_REG_WIDTH))
2910 +#define EIP93_REG_PE_RING_RW_PNTR ((PE_RNG_BASE)+(0x06 * EIP93_REG_WIDTH))
2911 +
2912 +//PACKET ENGINE configuration registers
2913 +#define PE_CFG_BASE 0x0000100
2914 +#define EIP93_REG_PE_CONFIG ((PE_CFG_BASE)+(0x00 * EIP93_REG_WIDTH))
2915 +#define EIP93_REG_PE_STATUS ((PE_CFG_BASE)+(0x01 * EIP93_REG_WIDTH))
2916 +#define EIP93_REG_PE_BUF_THRESH ((PE_CFG_BASE)+(0x03 * EIP93_REG_WIDTH))
2917 +#define EIP93_REG_PE_INBUF_COUNT ((PE_CFG_BASE)+(0x04 * EIP93_REG_WIDTH))
2918 +#define EIP93_REG_PE_OUTBUF_COUNT ((PE_CFG_BASE)+(0x05 * EIP93_REG_WIDTH))
2919 +#define EIP93_REG_PE_BUF_RW_PNTR ((PE_CFG_BASE)+(0x06 * EIP93_REG_WIDTH))
2920 +
2921 +//PACKET ENGINE endian config
2922 +#define EN_CFG_BASE 0x00001CC
2923 +#define EIP93_REG_PE_ENDIAN_CONFIG ((EN_CFG_BASE)+(0x00 * EIP93_REG_WIDTH))
2924 +
2925 +//EIP93 CLOCK control registers
2926 +#define CLOCK_BASE 0x01E8
2927 +#define EIP93_REG_PE_CLOCK_CTRL ((CLOCK_BASE)+(0x00 * EIP93_REG_WIDTH))
2928 +
2929 +//EIP93 Device Option and Revision Register
2930 +#define REV_BASE 0x01F4
2931 +#define EIP93_REG_PE_OPTION_1 ((REV_BASE)+(0x00 * EIP93_REG_WIDTH))
2932 +#define EIP93_REG_PE_OPTION_0 ((REV_BASE)+(0x01 * EIP93_REG_WIDTH))
2933 +#define EIP93_REG_PE_REVISION ((REV_BASE)+(0x02 * EIP93_REG_WIDTH))
2934 +
2935 +//EIP93 Interrupt Control Register
2936 +#define INT_BASE 0x0200
2937 +#define EIP93_REG_INT_UNMASK_STAT ((INT_BASE)+(0x00 * EIP93_REG_WIDTH))
2938 +#define EIP93_REG_INT_MASK_STAT ((INT_BASE)+(0x01 * EIP93_REG_WIDTH))
2939 +#define EIP93_REG_INT_CLR ((INT_BASE)+(0x01 * EIP93_REG_WIDTH))
2940 +#define EIP93_REG_INT_MASK ((INT_BASE)+(0x02 * EIP93_REG_WIDTH))
2941 +#define EIP93_REG_INT_CFG ((INT_BASE)+(0x03 * EIP93_REG_WIDTH))
2942 +#define EIP93_REG_MASK_ENABLE ((INT_BASE)+(0X04 * EIP93_REG_WIDTH))
2943 +#define EIP93_REG_MASK_DISABLE ((INT_BASE)+(0X05 * EIP93_REG_WIDTH))
2944 +
2945 +//EIP93 SA Record register
2946 +#define SA_BASE 0x0400
2947 +#define EIP93_REG_SA_CMD_0 ((SA_BASE)+(0x00 * EIP93_REG_WIDTH))
2948 +#define EIP93_REG_SA_CMD_1 ((SA_BASE)+(0x01 * EIP93_REG_WIDTH))
2949 +
2950 +//#define EIP93_REG_SA_READY ((SA_BASE)+(31 * EIP93_REG_WIDTH))
2951 +
2952 +//State save register
2953 +#define STATE_BASE 0x0500
2954 +#define EIP93_REG_STATE_IV_0 ((STATE_BASE)+(0x00 * EIP93_REG_WIDTH))
2955 +#define EIP93_REG_STATE_IV_1 ((STATE_BASE)+(0x01 * EIP93_REG_WIDTH))
2956 +
2957 +#define EIP93_PE_ARC4STATE_BASEADDR_REG 0x0700
2958 +
2959 +//RAM buffer start address
2960 +#define EIP93_INPUT_BUFFER 0x0800
2961 +#define EIP93_OUTPUT_BUFFER 0x0800
2962 +
2963 +//EIP93 PRNG Configuration Register
2964 +#define PRNG_BASE 0x0300
2965 +#define EIP93_REG_PRNG_STAT ((PRNG_BASE)+(0x00 * EIP93_REG_WIDTH))
2966 +#define EIP93_REG_PRNG_CTRL ((PRNG_BASE)+(0x01 * EIP93_REG_WIDTH))
2967 +#define EIP93_REG_PRNG_SEED_0 ((PRNG_BASE)+(0x02 * EIP93_REG_WIDTH))
2968 +#define EIP93_REG_PRNG_SEED_1 ((PRNG_BASE)+(0x03 * EIP93_REG_WIDTH))
2969 +#define EIP93_REG_PRNG_SEED_2 ((PRNG_BASE)+(0x04 * EIP93_REG_WIDTH))
2970 +#define EIP93_REG_PRNG_SEED_3 ((PRNG_BASE)+(0x05 * EIP93_REG_WIDTH))
2971 +#define EIP93_REG_PRNG_KEY_0 ((PRNG_BASE)+(0x06 * EIP93_REG_WIDTH))
2972 +#define EIP93_REG_PRNG_KEY_1 ((PRNG_BASE)+(0x07 * EIP93_REG_WIDTH))
2973 +#define EIP93_REG_PRNG_KEY_2 ((PRNG_BASE)+(0x08 * EIP93_REG_WIDTH))
2974 +#define EIP93_REG_PRNG_KEY_3 ((PRNG_BASE)+(0x09 * EIP93_REG_WIDTH))
2975 +#define EIP93_REG_PRNG_RES_0 ((PRNG_BASE)+(0x0A * EIP93_REG_WIDTH))
2976 +#define EIP93_REG_PRNG_RES_1 ((PRNG_BASE)+(0x0B * EIP93_REG_WIDTH))
2977 +#define EIP93_REG_PRNG_RES_2 ((PRNG_BASE)+(0x0C * EIP93_REG_WIDTH))
2978 +#define EIP93_REG_PRNG_RES_3 ((PRNG_BASE)+(0x0D * EIP93_REG_WIDTH))
2979 +#define EIP93_REG_PRNG_LFSR_0 ((PRNG_BASE)+(0x0E * EIP93_REG_WIDTH))
2980 +#define EIP93_REG_PRNG_LFSR_1 ((PRNG_BASE)+(0x0F * EIP93_REG_WIDTH))
2981 +
2982 +/*-----------------------------------------------------------------------------
2983 + * Constants & masks
2984 + */
2985 +
2986 +#define EIP93_SUPPORTED_INTERRUPTS_MASK 0xffff7f00
2987 +#define EIP93_PRNG_DT_TEXT_LOWERHALF 0xDEAD
2988 +#define EIP93_PRNG_DT_TEXT_UPPERHALF 0xC0DE
2989 +#define EIP93_10BITS_MASK 0X3FF
2990 +#define EIP93_12BITS_MASK 0XFFF
2991 +#define EIP93_4BITS_MASK 0X04
2992 +#define EIP93_20BITS_MASK 0xFFFFF
2993 +
2994 +#define EIP93_MIN_DESC_DONE_COUNT 0
2995 +#define EIP93_MAX_DESC_DONE_COUNT 15
2996 +
2997 +#define EIP93_MIN_DESC_PENDING_COUNT 0
2998 +#define EIP93_MAX_DESC_PENDING_COUNT 1023
2999 +
3000 +#define EIP93_MIN_TIMEOUT_COUNT 0
3001 +#define EIP93_MAX_TIMEOUT_COUNT 15
3002 +
3003 +#define EIP93_MIN_PE_INPUT_THRESHOLD 1
3004 +#define EIP93_MAX_PE_INPUT_THRESHOLD 511
3005 +
3006 +#define EIP93_MIN_PE_OUTPUT_THRESHOLD 1
3007 +#define EIP93_MAX_PE_OUTPUT_THRESHOLD 432
3008 +
3009 +#define EIP93_MIN_PE_RING_SIZE 1
3010 +#define EIP93_MAX_PE_RING_SIZE 1023
3011 +
3012 +#define EIP93_MIN_PE_DESCRIPTOR_SIZE 7
3013 +#define EIP93_MAX_PE_DESCRIPTOR_SIZE 15
3014 +
3015 +//3DES keys,seed,known data and its result
3016 +#define EIP93_KEY_0 0x133b3454
3017 +#define EIP93_KEY_1 0x5e5b890b
3018 +#define EIP93_KEY_2 0x5eb30757
3019 +#define EIP93_KEY_3 0x93ab15f7
3020 +#define EIP93_SEED_0 0x62c4bf5e
3021 +#define EIP93_SEED_1 0x972667c8
3022 +#define EIP93_SEED_2 0x6345bf67
3023 +#define EIP93_SEED_3 0xcb3482bf
3024 +#define EIP93_LFSR_0 0xDEADC0DE
3025 +#define EIP93_LFSR_1 0xBEEFF00D
3026 +
3027 +/*-----------------------------------------------------------------------------
3028 + * EIP93 device initialization specifics
3029 + */
3030 +
3031 +/*----------------------------------------------------------------------------
3032 + * Byte Order Reversal Mechanisms Supported in EIP93
3033 + * EIP93_BO_REVERSE_HALF_WORD : reverse the byte order within a half-word
3034 + * EIP93_BO_REVERSE_WORD : reverse the byte order within a word
3035 + * EIP93_BO_REVERSE_DUAL_WORD : reverse the byte order within a dual-word
3036 + * EIP93_BO_REVERSE_QUAD_WORD : reverse the byte order within a quad-word
3037 + */
3038 +enum EIP93_Byte_Order_Value_t {
3039 + EIP93_BO_REVERSE_HALF_WORD = 1,
3040 + EIP93_BO_REVERSE_WORD = 2,
3041 + EIP93_BO_REVERSE_DUAL_WORD = 4,
3042 + EIP93_BO_REVERSE_QUAD_WORD = 8,
3043 +};
3044 +
3045 +/*----------------------------------------------------------------------------
3046 + * Byte Order Reversal Mechanisms Supported in EIP93 for Target Data
3047 + * EIP93_BO_REVERSE_HALF_WORD : reverse the byte order within a half-word
3048 + * EIP93_BO_REVERSE_WORD : reverse the byte order within a word
3049 + */
3050 +enum EIP93_Byte_Order_Value_TD_t {
3051 + EIP93_BO_REVERSE_HALF_WORD_TD = 1,
3052 + EIP93_BO_REVERSE_WORD_TD = 2,
3053 +};
3054 +
3055 +// BYTE_ORDER_CFG register values
3056 +#define EIP93_BYTE_ORDER_PD EIP93_BO_REVERSE_WORD
3057 +#define EIP93_BYTE_ORDER_SA EIP93_BO_REVERSE_WORD
3058 +#define EIP93_BYTE_ORDER_DATA EIP93_BO_REVERSE_WORD
3059 +#define EIP93_BYTE_ORDER_TD EIP93_BO_REVERSE_WORD_TD
3060 +
3061 +// INT_CFG register values
3062 +#define EIP93_INT_HOST_OUTPUT_TYPE 0
3063 +#define EIP93_INT_PULSE_CLEAR 0
3064 +
3065 +/*
3066 + * Interrupts of EIP93
3067 + */
3068 +
3069 +enum EIP93_InterruptSource_t {
3070 + EIP93_INT_PE_CDRTHRESH_REQ = BIT(0),
3071 + EIP93_INT_PE_RDRTHRESH_REQ = BIT(1),
3072 + EIP93_INT_PE_OPERATION_DONE = BIT(9),
3073 + EIP93_INT_PE_INBUFTHRESH_REQ = BIT(10),
3074 + EIP93_INT_PE_OUTBURTHRSH_REQ = BIT(11),
3075 + EIP93_INT_PE_PRNG_IRQ = BIT(12),
3076 + EIP93_INT_PE_ERR_REG = BIT(13),
3077 + EIP93_INT_PE_RD_DONE_IRQ = BIT(16),
3078 +};
3079 +
3080 +union peConfig_w {
3081 + u32 word;
3082 + struct {
3083 + u32 resetPE :1;
3084 + u32 resetRing :1;
3085 + u32 reserved :6;
3086 + u32 peMode :2;
3087 + u32 enCDRupdate :1;
3088 + u32 reserved2 :5;
3089 + u32 swapCDRD :1;
3090 + u32 swapSA :1;
3091 + u32 swapData :1;
3092 + u32 reserved3 :13;
3093 + } bits;
3094 +} __packed;
3095 +
3096 +union peEndianCfg_w {
3097 + u32 word;
3098 + struct {
3099 + u32 masterByteSwap :8;
3100 + u32 reserved :8;
3101 + u32 targetByteSwap :8;
3102 + u32 reserved2 :8;
3103 + } bits;
3104 +} __packed;
3105 +
3106 +union peIntCfg_w {
3107 + u32 word;
3108 + struct {
3109 + u32 PulseClear :1;
3110 + u32 IntType :1;
3111 + u32 reserved :30;
3112 + } bits;
3113 +} __packed;
3114 +
3115 +union peClockCfg_w {
3116 + u32 word;
3117 + struct {
3118 + u32 enPEclk :1;
3119 + u32 enDESclk :1;
3120 + u32 enAESclk :1;
3121 + u32 reserved :1;
3122 + u32 enHASHclk :1;
3123 + u32 reserved2 :27;
3124 + } bits;
3125 +} __packed;
3126 +
3127 +union peBufThresh_w {
3128 + u32 word;
3129 + struct {
3130 + u32 inputBuffer :8;
3131 + u32 reserved :8;
3132 + u32 outputBuffer :8;
3133 + u32 reserved2 :8;
3134 + } bits;
3135 +} __packed;
3136 +
3137 +union peRingThresh_w {
3138 + u32 word;
3139 + struct {
3140 + u32 CDRThresh :10;
3141 + u32 reserved :6;
3142 + u32 RDRThresh :10;
3143 + u32 RDTimeout :4;
3144 + u32 reserved2 :1;
3145 + u32 enTimeout :1;
3146 + } bits;
3147 +} __packed;
3148 +
3149 +union peRingCfg_w {
3150 + u32 word;
3151 + struct {
3152 + u32 ringSize :10;
3153 + u32 reserved :6;
3154 + u32 ringOffset :8;
3155 + u32 reserved2 :8;
3156 + } bits;
3157 +} __packed;
3158 +
3159 +union saCmd0 {
3160 + u32 word;
3161 + struct {
3162 + u32 opCode :3;
3163 + u32 direction :1;
3164 + u32 opGroup :2;
3165 + u32 padType :2;
3166 + u32 cipher :4;
3167 + u32 hash :4;
3168 + u32 reserved2 :1;
3169 + u32 scPad :1;
3170 + u32 extPad :1;
3171 + u32 hdrProc :1;
3172 + u32 digestLength :4;
3173 + u32 ivSource :2;
3174 + u32 hashSource :2;
3175 + u32 saveIv :1;
3176 + u32 saveHash :1;
3177 + u32 reserved1 :2;
3178 + } bits;
3179 +} __packed;
3180 +
3181 +union saCmd1 {
3182 + u32 word;
3183 + struct {
3184 + u32 copyDigest :1;
3185 + u32 copyHeader :1;
3186 + u32 copyPayload :1;
3187 + u32 copyPad :1;
3188 + u32 reserved4 :4;
3189 + u32 cipherMode :2;
3190 + u32 reserved3 :1;
3191 + u32 sslMac :1;
3192 + u32 hmac :1;
3193 + u32 byteOffset :1;
3194 + u32 reserved2 :2;
3195 + u32 hashCryptOffset :8;
3196 + u32 aesKeyLen :3;
3197 + u32 reserved1 :1;
3198 + u32 aesDecKey :1;
3199 + u32 seqNumCheck :1;
3200 + u32 reserved0 :2;
3201 + } bits;
3202 +} __packed;
3203 +
3204 +struct saRecord_s {
3205 + union saCmd0 saCmd0;
3206 + union saCmd1 saCmd1;
3207 + u32 saKey[8];
3208 + u32 saIDigest[8];
3209 + u32 saODigest[8];
3210 + u32 saSpi;
3211 + u32 saSeqNum[2];
3212 + u32 saSeqNumMask[2];
3213 + u32 saNonce;
3214 +} __packed;
3215 +
3216 +struct saState_s {
3217 + u32 stateIv[4];
3218 + u32 stateByteCnt[2];
3219 + u32 stateIDigest[8];
3220 +} __packed;
3221 +
3222 +union peCrtlStat_w {
3223 + u32 word;
3224 + struct {
3225 + u32 hostReady :1;
3226 + u32 peReady :1;
3227 + u32 reserved :1;
3228 + u32 initArc4 :1;
3229 + u32 hashFinal :1;
3230 + u32 haltMode :1;
3231 + u32 prngMode :2;
3232 + u32 padValue :8;
3233 + u32 errStatus :8;
3234 + u32 padCrtlStat :8;
3235 + } bits;
3236 +} __packed;
3237 +
3238 +union peLength_w {
3239 + u32 word;
3240 + struct {
3241 + u32 length :20;
3242 + u32 reserved :2;
3243 + u32 hostReady :1;
3244 + u32 peReady :1;
3245 + u32 byPass :8;
3246 + } bits;
3247 +} __packed;
3248 +
3249 +struct eip93_descriptor_s {
3250 + union peCrtlStat_w peCrtlStat;
3251 + u32 srcAddr;
3252 + u32 dstAddr;
3253 + u32 saAddr;
3254 + u32 stateAddr;
3255 + u32 arc4Addr;
3256 + u32 userId;
3257 + union peLength_w peLength;
3258 +} __packed;
3259 +
3260 +#endif
3261 --- a/drivers/crypto/Kconfig
3262 +++ b/drivers/crypto/Kconfig
3263 @@ -918,4 +918,6 @@ config CRYPTO_DEV_SA2UL
3264
3265 source "drivers/crypto/keembay/Kconfig"
3266
3267 +source "drivers/crypto/mtk-eip93/Kconfig"
3268 +
3269 endif # CRYPTO_HW
3270 --- a/drivers/crypto/Makefile
3271 +++ b/drivers/crypto/Makefile
3272 @@ -51,3 +51,4 @@ obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += x
3273 obj-y += hisilicon/
3274 obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
3275 obj-y += keembay/
3276 +obj-$(CONFIG_CRYPTO_DEV_EIP93) += mtk-eip93/