63f6540be08e1c926a8081fd8bc7ead0b1fefc74
[openwrt/staging/luka.git] / target / linux / layerscape / patches-4.14 / 820-sec-support-layerscape.patch
1 From ba8e92b322a3763880fdc4d19e9c7085f5504be7 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Tue, 23 Apr 2019 17:41:43 +0800
4 Subject: [PATCH] sec: support layerscape
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 This is an integrated patch of sec for layerscape
10
11 Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
12 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
13 Signed-off-by: Biwen Li <biwen.li@nxp.com>
14 Signed-off-by: Carmen Iorga <carmen.iorga@nxp.com>
15 Signed-off-by: Cristian Stoica <cristian.stoica@nxp.com>
16 Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
17 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
18 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
19 Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
20 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
21 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
22 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
23 Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
24 ---
25 crypto/Kconfig | 20 +
26 crypto/Makefile | 1 +
27 crypto/chacha20poly1305.c | 2 -
28 crypto/tcrypt.c | 27 +-
29 crypto/testmgr.c | 244 ++
30 crypto/testmgr.h | 219 ++
31 crypto/tls.c | 607 ++++
32 drivers/crypto/Makefile | 2 +-
33 drivers/crypto/caam/Kconfig | 85 +-
34 drivers/crypto/caam/Makefile | 26 +-
35 drivers/crypto/caam/caamalg.c | 468 +++-
36 drivers/crypto/caam/caamalg_desc.c | 903 +++++-
37 drivers/crypto/caam/caamalg_desc.h | 52 +-
38 drivers/crypto/caam/caamalg_qi.c | 1060 ++++++-
39 drivers/crypto/caam/caamalg_qi2.c | 5843 +++++++++++++++++++++++++++++++++++
40 drivers/crypto/caam/caamalg_qi2.h | 276 ++
41 drivers/crypto/caam/caamhash.c | 192 +-
42 drivers/crypto/caam/caamhash_desc.c | 108 +
43 drivers/crypto/caam/caamhash_desc.h | 49 +
44 drivers/crypto/caam/caampkc.c | 52 +-
45 drivers/crypto/caam/caamrng.c | 52 +-
46 drivers/crypto/caam/compat.h | 4 +
47 drivers/crypto/caam/ctrl.c | 194 +-
48 drivers/crypto/caam/desc.h | 89 +-
49 drivers/crypto/caam/desc_constr.h | 59 +-
50 drivers/crypto/caam/dpseci.c | 865 ++++++
51 drivers/crypto/caam/dpseci.h | 433 +++
52 drivers/crypto/caam/dpseci_cmd.h | 287 ++
53 drivers/crypto/caam/error.c | 81 +-
54 drivers/crypto/caam/error.h | 6 +-
55 drivers/crypto/caam/intern.h | 102 +-
56 drivers/crypto/caam/jr.c | 84 +
57 drivers/crypto/caam/jr.h | 2 +
58 drivers/crypto/caam/key_gen.c | 30 -
59 drivers/crypto/caam/key_gen.h | 30 +
60 drivers/crypto/caam/qi.c | 134 +-
61 drivers/crypto/caam/qi.h | 2 +-
62 drivers/crypto/caam/regs.h | 76 +-
63 drivers/crypto/caam/sg_sw_qm.h | 46 +-
64 drivers/crypto/talitos.c | 8 +
65 include/crypto/chacha20.h | 1 +
66 41 files changed, 12088 insertions(+), 733 deletions(-)
67 create mode 100644 crypto/tls.c
68 create mode 100644 drivers/crypto/caam/caamalg_qi2.c
69 create mode 100644 drivers/crypto/caam/caamalg_qi2.h
70 create mode 100644 drivers/crypto/caam/caamhash_desc.c
71 create mode 100644 drivers/crypto/caam/caamhash_desc.h
72 create mode 100644 drivers/crypto/caam/dpseci.c
73 create mode 100644 drivers/crypto/caam/dpseci.h
74 create mode 100644 drivers/crypto/caam/dpseci_cmd.h
75
76 --- a/crypto/Kconfig
77 +++ b/crypto/Kconfig
78 @@ -312,6 +312,26 @@ config CRYPTO_ECHAINIV
79 a sequence number xored with a salt. This is the default
80 algorithm for CBC.
81
82 +config CRYPTO_TLS
83 + tristate "TLS support"
84 + select CRYPTO_AEAD
85 + select CRYPTO_BLKCIPHER
86 + select CRYPTO_MANAGER
87 + select CRYPTO_HASH
88 + select CRYPTO_NULL
89 + select CRYPTO_AUTHENC
90 + help
91 + Support for TLS 1.0 record encryption and decryption
92 +
93 + This module adds support for encryption/decryption of TLS 1.0 frames
94 + using blockcipher algorithms. The name of the resulting algorithm is
95 + "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
96 + algorithms are used (e.g. aes-generic, sha1-generic), but hardware
97 + accelerated versions will be used automatically if available.
98 +
99 + User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
100 + operations through AF_ALG or cryptodev interfaces
101 +
102 comment "Block modes"
103
104 config CRYPTO_CBC
105 --- a/crypto/Makefile
106 +++ b/crypto/Makefile
107 @@ -118,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge
108 obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
109 obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
110 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
111 +obj-$(CONFIG_CRYPTO_TLS) += tls.o
112 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
113 obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
114 obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
115 --- a/crypto/chacha20poly1305.c
116 +++ b/crypto/chacha20poly1305.c
117 @@ -22,8 +22,6 @@
118
119 #include "internal.h"
120
121 -#define CHACHAPOLY_IV_SIZE 12
122 -
123 struct chachapoly_instance_ctx {
124 struct crypto_skcipher_spawn chacha;
125 struct crypto_ahash_spawn poly;
126 --- a/crypto/tcrypt.c
127 +++ b/crypto/tcrypt.c
128 @@ -76,7 +76,7 @@ static char *check[] = {
129 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
130 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
131 "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
132 - NULL
133 + "rsa", NULL
134 };
135
136 struct tcrypt_result {
137 @@ -355,11 +355,13 @@ static void test_aead_speed(const char *
138 iv);
139 aead_request_set_ad(req, aad_size);
140
141 - if (secs)
142 + if (secs) {
143 ret = test_aead_jiffies(req, enc, *b_size,
144 secs);
145 - else
146 + cond_resched();
147 + } else {
148 ret = test_aead_cycles(req, enc, *b_size);
149 + }
150
151 if (ret) {
152 pr_err("%s() failed return code=%d\n", e, ret);
153 @@ -736,12 +738,14 @@ static void test_ahash_speed_common(cons
154
155 ahash_request_set_crypt(req, sg, output, speed[i].plen);
156
157 - if (secs)
158 + if (secs) {
159 ret = test_ahash_jiffies(req, speed[i].blen,
160 speed[i].plen, output, secs);
161 - else
162 + cond_resched();
163 + } else {
164 ret = test_ahash_cycles(req, speed[i].blen,
165 speed[i].plen, output);
166 + }
167
168 if (ret) {
169 pr_err("hashing failed ret=%d\n", ret);
170 @@ -959,12 +963,14 @@ static void test_skcipher_speed(const ch
171
172 skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
173
174 - if (secs)
175 + if (secs) {
176 ret = test_acipher_jiffies(req, enc,
177 *b_size, secs);
178 - else
179 + cond_resched();
180 + } else {
181 ret = test_acipher_cycles(req, enc,
182 *b_size);
183 + }
184
185 if (ret) {
186 pr_err("%s() failed flags=%x\n", e,
187 @@ -1336,6 +1342,10 @@ static int do_test(const char *alg, u32
188 ret += tcrypt_test("hmac(sha3-512)");
189 break;
190
191 + case 115:
192 + ret += tcrypt_test("rsa");
193 + break;
194 +
195 case 150:
196 ret += tcrypt_test("ansi_cprng");
197 break;
198 @@ -1397,6 +1407,9 @@ static int do_test(const char *alg, u32
199 case 190:
200 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
201 break;
202 + case 191:
203 + ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
204 + break;
205 case 200:
206 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
207 speed_template_16_24_32);
208 --- a/crypto/testmgr.c
209 +++ b/crypto/testmgr.c
210 @@ -117,6 +117,13 @@ struct drbg_test_suite {
211 unsigned int count;
212 };
213
214 +struct tls_test_suite {
215 + struct {
216 + struct tls_testvec *vecs;
217 + unsigned int count;
218 + } enc, dec;
219 +};
220 +
221 struct akcipher_test_suite {
222 const struct akcipher_testvec *vecs;
223 unsigned int count;
224 @@ -140,6 +147,7 @@ struct alg_test_desc {
225 struct hash_test_suite hash;
226 struct cprng_test_suite cprng;
227 struct drbg_test_suite drbg;
228 + struct tls_test_suite tls;
229 struct akcipher_test_suite akcipher;
230 struct kpp_test_suite kpp;
231 } suite;
232 @@ -991,6 +999,233 @@ static int test_aead(struct crypto_aead
233 return 0;
234 }
235
236 +static int __test_tls(struct crypto_aead *tfm, int enc,
237 + struct tls_testvec *template, unsigned int tcount,
238 + const bool diff_dst)
239 +{
240 + const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
241 + unsigned int i, k, authsize;
242 + char *q;
243 + struct aead_request *req;
244 + struct scatterlist *sg;
245 + struct scatterlist *sgout;
246 + const char *e, *d;
247 + struct tcrypt_result result;
248 + void *input;
249 + void *output;
250 + void *assoc;
251 + char *iv;
252 + char *key;
253 + char *xbuf[XBUFSIZE];
254 + char *xoutbuf[XBUFSIZE];
255 + char *axbuf[XBUFSIZE];
256 + int ret = -ENOMEM;
257 +
258 + if (testmgr_alloc_buf(xbuf))
259 + goto out_noxbuf;
260 +
261 + if (diff_dst && testmgr_alloc_buf(xoutbuf))
262 + goto out_nooutbuf;
263 +
264 + if (testmgr_alloc_buf(axbuf))
265 + goto out_noaxbuf;
266 +
267 + iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
268 + if (!iv)
269 + goto out_noiv;
270 +
271 + key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
272 + if (!key)
273 + goto out_nokey;
274 +
275 + sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
276 + if (!sg)
277 + goto out_nosg;
278 +
279 + sgout = sg + 8;
280 +
281 + d = diff_dst ? "-ddst" : "";
282 + e = enc ? "encryption" : "decryption";
283 +
284 + init_completion(&result.completion);
285 +
286 + req = aead_request_alloc(tfm, GFP_KERNEL);
287 + if (!req) {
288 + pr_err("alg: tls%s: Failed to allocate request for %s\n",
289 + d, algo);
290 + goto out;
291 + }
292 +
293 + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
294 + tcrypt_complete, &result);
295 +
296 + for (i = 0; i < tcount; i++) {
297 + input = xbuf[0];
298 + assoc = axbuf[0];
299 +
300 + ret = -EINVAL;
301 + if (WARN_ON(template[i].ilen > PAGE_SIZE ||
302 + template[i].alen > PAGE_SIZE))
303 + goto out;
304 +
305 + memcpy(assoc, template[i].assoc, template[i].alen);
306 + memcpy(input, template[i].input, template[i].ilen);
307 +
308 + if (template[i].iv)
309 + memcpy(iv, template[i].iv, MAX_IVLEN);
310 + else
311 + memset(iv, 0, MAX_IVLEN);
312 +
313 + crypto_aead_clear_flags(tfm, ~0);
314 +
315 + if (template[i].klen > MAX_KEYLEN) {
316 + pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
317 + d, i, algo, template[i].klen, MAX_KEYLEN);
318 + ret = -EINVAL;
319 + goto out;
320 + }
321 + memcpy(key, template[i].key, template[i].klen);
322 +
323 + ret = crypto_aead_setkey(tfm, key, template[i].klen);
324 + if (!ret == template[i].fail) {
325 + pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
326 + d, i, algo, crypto_aead_get_flags(tfm));
327 + goto out;
328 + } else if (ret)
329 + continue;
330 +
331 + authsize = 20;
332 + ret = crypto_aead_setauthsize(tfm, authsize);
333 + if (ret) {
334 + pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
335 + d, authsize, i, algo);
336 + goto out;
337 + }
338 +
339 + k = !!template[i].alen;
340 + sg_init_table(sg, k + 1);
341 + sg_set_buf(&sg[0], assoc, template[i].alen);
342 + sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
343 + template[i].ilen));
344 + output = input;
345 +
346 + if (diff_dst) {
347 + sg_init_table(sgout, k + 1);
348 + sg_set_buf(&sgout[0], assoc, template[i].alen);
349 +
350 + output = xoutbuf[0];
351 + sg_set_buf(&sgout[k], output,
352 + (enc ? template[i].rlen : template[i].ilen));
353 + }
354 +
355 + aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
356 + template[i].ilen, iv);
357 +
358 + aead_request_set_ad(req, template[i].alen);
359 +
360 + ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
361 +
362 + switch (ret) {
363 + case 0:
364 + if (template[i].novrfy) {
365 + /* verification was supposed to fail */
366 + pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
367 + d, e, i, algo);
368 + /* so really, we got a bad message */
369 + ret = -EBADMSG;
370 + goto out;
371 + }
372 + break;
373 + case -EINPROGRESS:
374 + case -EBUSY:
375 + wait_for_completion(&result.completion);
376 + reinit_completion(&result.completion);
377 + ret = result.err;
378 + if (!ret)
379 + break;
380 + case -EBADMSG:
381 + /* verification failure was expected */
382 + if (template[i].novrfy)
383 + continue;
384 + /* fall through */
385 + default:
386 + pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
387 + d, e, i, algo, -ret);
388 + goto out;
389 + }
390 +
391 + q = output;
392 + if (memcmp(q, template[i].result, template[i].rlen)) {
393 + pr_err("alg: tls%s: Test %d failed on %s for %s\n",
394 + d, i, e, algo);
395 + hexdump(q, template[i].rlen);
396 + pr_err("should be:\n");
397 + hexdump(template[i].result, template[i].rlen);
398 + ret = -EINVAL;
399 + goto out;
400 + }
401 + }
402 +
403 +out:
404 + aead_request_free(req);
405 +
406 + kfree(sg);
407 +out_nosg:
408 + kfree(key);
409 +out_nokey:
410 + kfree(iv);
411 +out_noiv:
412 + testmgr_free_buf(axbuf);
413 +out_noaxbuf:
414 + if (diff_dst)
415 + testmgr_free_buf(xoutbuf);
416 +out_nooutbuf:
417 + testmgr_free_buf(xbuf);
418 +out_noxbuf:
419 + return ret;
420 +}
421 +
422 +static int test_tls(struct crypto_aead *tfm, int enc,
423 + struct tls_testvec *template, unsigned int tcount)
424 +{
425 + int ret;
426 + /* test 'dst == src' case */
427 + ret = __test_tls(tfm, enc, template, tcount, false);
428 + if (ret)
429 + return ret;
430 + /* test 'dst != src' case */
431 + return __test_tls(tfm, enc, template, tcount, true);
432 +}
433 +
434 +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
435 + u32 type, u32 mask)
436 +{
437 + struct crypto_aead *tfm;
438 + int err = 0;
439 +
440 + tfm = crypto_alloc_aead(driver, type, mask);
441 + if (IS_ERR(tfm)) {
442 + pr_err("alg: aead: Failed to load transform for %s: %ld\n",
443 + driver, PTR_ERR(tfm));
444 + return PTR_ERR(tfm);
445 + }
446 +
447 + if (desc->suite.tls.enc.vecs) {
448 + err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
449 + desc->suite.tls.enc.count);
450 + if (err)
451 + goto out;
452 + }
453 +
454 + if (!err && desc->suite.tls.dec.vecs)
455 + err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
456 + desc->suite.tls.dec.count);
457 +
458 +out:
459 + crypto_free_aead(tfm);
460 + return err;
461 +}
462 +
463 static int test_cipher(struct crypto_cipher *tfm, int enc,
464 const struct cipher_testvec *template,
465 unsigned int tcount)
466 @@ -3524,6 +3759,15 @@ static const struct alg_test_desc alg_te
467 .hash = __VECS(tgr192_tv_template)
468 }
469 }, {
470 + .alg = "tls10(hmac(sha1),cbc(aes))",
471 + .test = alg_test_tls,
472 + .suite = {
473 + .tls = {
474 + .enc = __VECS(tls_enc_tv_template),
475 + .dec = __VECS(tls_dec_tv_template)
476 + }
477 + }
478 + }, {
479 .alg = "vmac(aes)",
480 .test = alg_test_hash,
481 .suite = {
482 --- a/crypto/testmgr.h
483 +++ b/crypto/testmgr.h
484 @@ -125,6 +125,20 @@ struct drbg_testvec {
485 size_t expectedlen;
486 };
487
488 +struct tls_testvec {
489 + char *key; /* wrapped keys for encryption and authentication */
490 + char *iv; /* initialization vector */
491 + char *input; /* input data */
492 + char *assoc; /* associated data: seq num, type, version, input len */
493 + char *result; /* result data */
494 + unsigned char fail; /* the test failure is expected */
495 + unsigned char novrfy; /* dec verification failure expected */
496 + unsigned char klen; /* key length */
497 + unsigned short ilen; /* input data length */
498 + unsigned short alen; /* associated data length */
499 + unsigned short rlen; /* result length */
500 +};
501 +
502 struct akcipher_testvec {
503 const unsigned char *key;
504 const unsigned char *m;
505 @@ -153,6 +167,211 @@ struct kpp_testvec {
506 static const char zeroed_string[48];
507
508 /*
509 + * TLS1.0 synthetic test vectors
510 + */
511 +static struct tls_testvec tls_enc_tv_template[] = {
512 + {
513 +#ifdef __LITTLE_ENDIAN
514 + .key = "\x08\x00" /* rta length */
515 + "\x01\x00" /* rta type */
516 +#else
517 + .key = "\x00\x08" /* rta length */
518 + "\x00\x01" /* rta type */
519 +#endif
520 + "\x00\x00\x00\x10" /* enc key length */
521 + "authenticationkey20benckeyis16_bytes",
522 + .klen = 8 + 20 + 16,
523 + .iv = "iv0123456789abcd",
524 + .input = "Single block msg",
525 + .ilen = 16,
526 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
527 + "\x00\x03\x01\x00\x10",
528 + .alen = 13,
529 + .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
530 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
531 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
532 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
533 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
534 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
535 + .rlen = 16 + 20 + 12,
536 + }, {
537 +#ifdef __LITTLE_ENDIAN
538 + .key = "\x08\x00" /* rta length */
539 + "\x01\x00" /* rta type */
540 +#else
541 + .key = "\x00\x08" /* rta length */
542 + "\x00\x01" /* rta type */
543 +#endif
544 + "\x00\x00\x00\x10" /* enc key length */
545 + "authenticationkey20benckeyis16_bytes",
546 + .klen = 8 + 20 + 16,
547 + .iv = "iv0123456789abcd",
548 + .input = "",
549 + .ilen = 0,
550 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
551 + "\x00\x03\x01\x00\x00",
552 + .alen = 13,
553 + .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
554 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
555 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
556 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
557 + .rlen = 20 + 12,
558 + }, {
559 +#ifdef __LITTLE_ENDIAN
560 + .key = "\x08\x00" /* rta length */
561 + "\x01\x00" /* rta type */
562 +#else
563 + .key = "\x00\x08" /* rta length */
564 + "\x00\x01" /* rta type */
565 +#endif
566 + "\x00\x00\x00\x10" /* enc key length */
567 + "authenticationkey20benckeyis16_bytes",
568 + .klen = 8 + 20 + 16,
569 + .iv = "iv0123456789abcd",
570 + .input = "285 bytes plaintext285 bytes plaintext285 bytes"
571 + " plaintext285 bytes plaintext285 bytes plaintext285"
572 + " bytes plaintext285 bytes plaintext285 bytes"
573 + " plaintext285 bytes plaintext285 bytes plaintext285"
574 + " bytes plaintext285 bytes plaintext285 bytes"
575 + " plaintext285 bytes plaintext285 bytes plaintext285"
576 + " bytes plaintext285 bytes plaintext",
577 + .ilen = 285,
578 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
579 + "\x00\x03\x01\x01\x1d",
580 + .alen = 13,
581 + .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
582 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
583 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
584 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
585 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
586 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
587 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
588 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
589 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
590 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
591 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
592 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
593 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
594 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
595 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
596 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
597 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
598 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
599 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
600 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
601 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
602 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
603 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
604 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
605 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
606 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
607 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
608 + .rlen = 285 + 20 + 15,
609 + }
610 +};
611 +
612 +static struct tls_testvec tls_dec_tv_template[] = {
613 + {
614 +#ifdef __LITTLE_ENDIAN
615 + .key = "\x08\x00" /* rta length */
616 + "\x01\x00" /* rta type */
617 +#else
618 + .key = "\x00\x08" /* rta length */
619 + "\x00\x01" /* rta type */
620 +#endif
621 + "\x00\x00\x00\x10" /* enc key length */
622 + "authenticationkey20benckeyis16_bytes",
623 + .klen = 8 + 20 + 16,
624 + .iv = "iv0123456789abcd",
625 + .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
626 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
627 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
628 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
629 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
630 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
631 + .ilen = 16 + 20 + 12,
632 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
633 + "\x00\x03\x01\x00\x30",
634 + .alen = 13,
635 + .result = "Single block msg",
636 + .rlen = 16,
637 + }, {
638 +#ifdef __LITTLE_ENDIAN
639 + .key = "\x08\x00" /* rta length */
640 + "\x01\x00" /* rta type */
641 +#else
642 + .key = "\x00\x08" /* rta length */
643 + "\x00\x01" /* rta type */
644 +#endif
645 + "\x00\x00\x00\x10" /* enc key length */
646 + "authenticationkey20benckeyis16_bytes",
647 + .klen = 8 + 20 + 16,
648 + .iv = "iv0123456789abcd",
649 + .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
650 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
651 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
652 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
653 + .ilen = 20 + 12,
654 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
655 + "\x00\x03\x01\x00\x20",
656 + .alen = 13,
657 + .result = "",
658 + .rlen = 0,
659 + }, {
660 +#ifdef __LITTLE_ENDIAN
661 + .key = "\x08\x00" /* rta length */
662 + "\x01\x00" /* rta type */
663 +#else
664 + .key = "\x00\x08" /* rta length */
665 + "\x00\x01" /* rta type */
666 +#endif
667 + "\x00\x00\x00\x10" /* enc key length */
668 + "authenticationkey20benckeyis16_bytes",
669 + .klen = 8 + 20 + 16,
670 + .iv = "iv0123456789abcd",
671 + .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
672 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
673 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
674 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
675 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
676 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
677 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
678 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
679 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
680 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
681 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
682 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
683 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
684 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
685 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
686 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
687 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
688 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
689 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
690 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
691 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
692 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
693 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
694 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
695 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
696 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
697 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
698 +
699 + .ilen = 285 + 20 + 15,
700 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
701 + "\x00\x03\x01\x01\x40",
702 + .alen = 13,
703 + .result = "285 bytes plaintext285 bytes plaintext285 bytes"
704 + " plaintext285 bytes plaintext285 bytes plaintext285"
705 + " bytes plaintext285 bytes plaintext285 bytes"
706 + " plaintext285 bytes plaintext285 bytes plaintext285"
707 + " bytes plaintext285 bytes plaintext285 bytes"
708 + " plaintext285 bytes plaintext285 bytes plaintext",
709 + .rlen = 285,
710 + }
711 +};
712 +
713 +/*
714 * RSA test vectors. Borrowed from openSSL.
715 */
716 static const struct akcipher_testvec rsa_tv_template[] = {
717 --- /dev/null
718 +++ b/crypto/tls.c
719 @@ -0,0 +1,607 @@
720 +/*
721 + * Copyright 2013 Freescale Semiconductor, Inc.
722 + * Copyright 2017 NXP Semiconductor, Inc.
723 + *
724 + * This program is free software; you can redistribute it and/or modify it
725 + * under the terms of the GNU General Public License as published by the Free
726 + * Software Foundation; either version 2 of the License, or (at your option)
727 + * any later version.
728 + *
729 + */
730 +
731 +#include <crypto/internal/aead.h>
732 +#include <crypto/internal/hash.h>
733 +#include <crypto/internal/skcipher.h>
734 +#include <crypto/authenc.h>
735 +#include <crypto/null.h>
736 +#include <crypto/scatterwalk.h>
737 +#include <linux/err.h>
738 +#include <linux/init.h>
739 +#include <linux/module.h>
740 +#include <linux/rtnetlink.h>
741 +
742 +struct tls_instance_ctx {
743 + struct crypto_ahash_spawn auth;
744 + struct crypto_skcipher_spawn enc;
745 +};
746 +
747 +struct crypto_tls_ctx {
748 + unsigned int reqoff;
749 + struct crypto_ahash *auth;
750 + struct crypto_skcipher *enc;
751 + struct crypto_skcipher *null;
752 +};
753 +
754 +struct tls_request_ctx {
755 + /*
756 + * cryptlen holds the payload length in the case of encryption or
757 + * payload_len + icv_len + padding_len in case of decryption
758 + */
759 + unsigned int cryptlen;
760 + /* working space for partial results */
761 + struct scatterlist tmp[2];
762 + struct scatterlist cipher[2];
763 + struct scatterlist dst[2];
764 + char tail[];
765 +};
766 +
767 +struct async_op {
768 + struct completion completion;
769 + int err;
770 +};
771 +
772 +static void tls_async_op_done(struct crypto_async_request *req, int err)
773 +{
774 + struct async_op *areq = req->data;
775 +
776 + if (err == -EINPROGRESS)
777 + return;
778 +
779 + areq->err = err;
780 + complete(&areq->completion);
781 +}
782 +
783 +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
784 + unsigned int keylen)
785 +{
786 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
787 + struct crypto_ahash *auth = ctx->auth;
788 + struct crypto_skcipher *enc = ctx->enc;
789 + struct crypto_authenc_keys keys;
790 + int err = -EINVAL;
791 +
792 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
793 + goto badkey;
794 +
795 + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
796 + crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
797 + CRYPTO_TFM_REQ_MASK);
798 + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
799 + crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
800 + CRYPTO_TFM_RES_MASK);
801 +
802 + if (err)
803 + goto out;
804 +
805 + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
806 + crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
807 + CRYPTO_TFM_REQ_MASK);
808 + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
809 + crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
810 + CRYPTO_TFM_RES_MASK);
811 +
812 +out:
813 + return err;
814 +
815 +badkey:
816 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
817 + goto out;
818 +}
819 +
820 +/**
821 + * crypto_tls_genicv - Calculate hmac digest for a TLS record
822 + * @hash: (output) buffer to save the digest into
823 + * @src: (input) scatterlist with the assoc and payload data
824 + * @srclen: (input) size of the source buffer (assoclen + cryptlen)
825 + * @req: (input) aead request
826 + **/
827 +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
828 + unsigned int srclen, struct aead_request *req)
829 +{
830 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
831 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
832 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
833 + struct async_op ahash_op;
834 + struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
835 + unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
836 + int err = -EBADMSG;
837 +
838 + /* Bail out if the request assoc len is 0 */
839 + if (!req->assoclen)
840 + return err;
841 +
842 + init_completion(&ahash_op.completion);
843 +
844 + /* the hash transform to be executed comes from the original request */
845 + ahash_request_set_tfm(ahreq, ctx->auth);
846 + /* prepare the hash request with input data and result pointer */
847 + ahash_request_set_crypt(ahreq, src, hash, srclen);
848 + /* set the notifier for when the async hash function returns */
849 + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
850 + tls_async_op_done, &ahash_op);
851 +
852 + /* Calculate the digest on the given data. The result is put in hash */
853 + err = crypto_ahash_digest(ahreq);
854 + if (err == -EINPROGRESS) {
855 + err = wait_for_completion_interruptible(&ahash_op.completion);
856 + if (!err)
857 + err = ahash_op.err;
858 + }
859 +
860 + return err;
861 +}
862 +
863 +/**
864 + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
865 + * @hash: (output) buffer to save the digest and padding into
866 + * @phashlen: (output) the size of digest + padding
867 + * @req: (input) aead request
868 + **/
869 +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
870 + struct aead_request *req)
871 +{
872 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
873 + unsigned int hash_size = crypto_aead_authsize(tls);
874 + unsigned int block_size = crypto_aead_blocksize(tls);
875 + unsigned int srclen = req->cryptlen + hash_size;
876 + unsigned int icvlen = req->cryptlen + req->assoclen;
877 + unsigned int padlen;
878 + int err;
879 +
880 + err = crypto_tls_genicv(hash, req->src, icvlen, req);
881 + if (err)
882 + goto out;
883 +
884 + /* add padding after digest */
885 + padlen = block_size - (srclen % block_size);
886 + memset(hash + hash_size, padlen - 1, padlen);
887 +
888 + *phashlen = hash_size + padlen;
889 +out:
890 + return err;
891 +}
892 +
893 +static int crypto_tls_copy_data(struct aead_request *req,
894 + struct scatterlist *src,
895 + struct scatterlist *dst,
896 + unsigned int len)
897 +{
898 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
899 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
900 + SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
901 +
902 + skcipher_request_set_tfm(skreq, ctx->null);
903 + skcipher_request_set_callback(skreq, aead_request_flags(req),
904 + NULL, NULL);
905 + skcipher_request_set_crypt(skreq, src, dst, len, NULL);
906 +
907 + return crypto_skcipher_encrypt(skreq);
908 +}
909 +
910 +static int crypto_tls_encrypt(struct aead_request *req)
911 +{
912 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
913 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
914 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
915 + struct skcipher_request *skreq;
916 + struct scatterlist *cipher = treq_ctx->cipher;
917 + struct scatterlist *tmp = treq_ctx->tmp;
918 + struct scatterlist *sg, *src, *dst;
919 + unsigned int cryptlen, phashlen;
920 + u8 *hash = treq_ctx->tail;
921 + int err;
922 +
923 + /*
924 + * The hash result is saved at the beginning of the tls request ctx
925 + * and is aligned as required by the hash transform. Enough space was
926 + * allocated in crypto_tls_init_tfm to accommodate the difference. The
927 + * requests themselves start later at treq_ctx->tail + ctx->reqoff so
928 + * the result is not overwritten by the second (cipher) request.
929 + */
930 + hash = (u8 *)ALIGN((unsigned long)hash +
931 + crypto_ahash_alignmask(ctx->auth),
932 + crypto_ahash_alignmask(ctx->auth) + 1);
933 +
934 + /*
935 + * STEP 1: create ICV together with necessary padding
936 + */
937 + err = crypto_tls_gen_padicv(hash, &phashlen, req);
938 + if (err)
939 + return err;
940 +
941 + /*
942 + * STEP 2: Hash and padding are combined with the payload
943 + * depending on the form it arrives. Scatter tables must have at least
944 + * one page of data before chaining with another table and can't have
945 + * an empty data page. The following code addresses these requirements.
946 + *
947 + * If the payload is empty, only the hash is encrypted, otherwise the
948 + * payload scatterlist is merged with the hash. A special merging case
949 + * is when the payload has only one page of data. In that case the
950 + * payload page is moved to another scatterlist and prepared there for
951 + * encryption.
952 + */
953 + if (req->cryptlen) {
954 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
955 +
956 + sg_init_table(cipher, 2);
957 + sg_set_buf(cipher + 1, hash, phashlen);
958 +
959 + if (sg_is_last(src)) {
960 + sg_set_page(cipher, sg_page(src), req->cryptlen,
961 + src->offset);
962 + src = cipher;
963 + } else {
964 + unsigned int rem_len = req->cryptlen;
965 +
966 + for (sg = src; rem_len > sg->length; sg = sg_next(sg))
967 + rem_len -= min(rem_len, sg->length);
968 +
969 + sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
970 + sg_chain(sg, 1, cipher);
971 + }
972 + } else {
973 + sg_init_one(cipher, hash, phashlen);
974 + src = cipher;
975 + }
976 +
977 + /**
978 + * If src != dst copy the associated data from source to destination.
979 + * In both cases fast-forward passed the associated data in the dest.
980 + */
981 + if (req->src != req->dst) {
982 + err = crypto_tls_copy_data(req, req->src, req->dst,
983 + req->assoclen);
984 + if (err)
985 + return err;
986 + }
987 + dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
988 +
989 + /*
990 + * STEP 3: encrypt the frame and return the result
991 + */
992 + cryptlen = req->cryptlen + phashlen;
993 +
994 + /*
995 + * The hash and the cipher are applied at different times and their
996 + * requests can use the same memory space without interference
997 + */
998 + skreq = (void *)(treq_ctx->tail + ctx->reqoff);
999 + skcipher_request_set_tfm(skreq, ctx->enc);
1000 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
1001 + skcipher_request_set_callback(skreq, aead_request_flags(req),
1002 + req->base.complete, req->base.data);
1003 + /*
1004 + * Apply the cipher transform. The result will be in req->dst when the
1005 + * asynchronuous call terminates
1006 + */
1007 + err = crypto_skcipher_encrypt(skreq);
1008 +
1009 + return err;
1010 +}
1011 +
1012 +static int crypto_tls_decrypt(struct aead_request *req)
1013 +{
1014 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
1015 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
1016 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
1017 + unsigned int cryptlen = req->cryptlen;
1018 + unsigned int hash_size = crypto_aead_authsize(tls);
1019 + unsigned int block_size = crypto_aead_blocksize(tls);
1020 + struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
1021 + struct scatterlist *tmp = treq_ctx->tmp;
1022 + struct scatterlist *src, *dst;
1023 +
1024 + u8 padding[255]; /* padding can be 0-255 bytes */
1025 + u8 pad_size;
1026 + u16 *len_field;
1027 + u8 *ihash, *hash = treq_ctx->tail;
1028 +
1029 + int paderr = 0;
1030 + int err = -EINVAL;
1031 + int i;
1032 + struct async_op ciph_op;
1033 +
1034 + /*
1035 + * Rule out bad packets. The input packet length must be at least one
1036 + * byte more than the hash_size
1037 + */
1038 + if (cryptlen <= hash_size || cryptlen % block_size)
1039 + goto out;
1040 +
1041 + /*
1042 + * Step 1 - Decrypt the source. Fast-forward past the associated data
1043 + * to the encrypted data. The result will be overwritten in place so
1044 + * that the decrypted data will be adjacent to the associated data. The
1045 + * last step (computing the hash) will have it's input data already
1046 + * prepared and ready to be accessed at req->src.
1047 + */
1048 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
1049 + dst = src;
1050 +
1051 + init_completion(&ciph_op.completion);
1052 + skcipher_request_set_tfm(skreq, ctx->enc);
1053 + skcipher_request_set_callback(skreq, aead_request_flags(req),
1054 + tls_async_op_done, &ciph_op);
1055 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
1056 + err = crypto_skcipher_decrypt(skreq);
1057 + if (err == -EINPROGRESS) {
1058 + err = wait_for_completion_interruptible(&ciph_op.completion);
1059 + if (!err)
1060 + err = ciph_op.err;
1061 + }
1062 + if (err)
1063 + goto out;
1064 +
1065 + /*
1066 + * Step 2 - Verify padding
1067 + * Retrieve the last byte of the payload; this is the padding size.
1068 + */
1069 + cryptlen -= 1;
1070 + scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
1071 +
1072 + /* RFC recommendation for invalid padding size. */
1073 + if (cryptlen < pad_size + hash_size) {
1074 + pad_size = 0;
1075 + paderr = -EBADMSG;
1076 + }
1077 + cryptlen -= pad_size;
1078 + scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
1079 +
1080 + /* Padding content must be equal with pad_size. We verify it all */
1081 + for (i = 0; i < pad_size; i++)
1082 + if (padding[i] != pad_size)
1083 + paderr = -EBADMSG;
1084 +
1085 + /*
1086 + * Step 3 - Verify hash
1087 + * Align the digest result as required by the hash transform. Enough
1088 + * space was allocated in crypto_tls_init_tfm
1089 + */
1090 + hash = (u8 *)ALIGN((unsigned long)hash +
1091 + crypto_ahash_alignmask(ctx->auth),
1092 + crypto_ahash_alignmask(ctx->auth) + 1);
1093 + /*
1094 + * Two bytes at the end of the associated data make the length field.
1095 + * It must be updated with the length of the cleartext message before
1096 + * the hash is calculated.
1097 + */
1098 + len_field = sg_virt(req->src) + req->assoclen - 2;
1099 + cryptlen -= hash_size;
1100 + *len_field = htons(cryptlen);
1101 +
1102 + /* This is the hash from the decrypted packet. Save it for later */
1103 + ihash = hash + hash_size;
1104 + scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
1105 +
1106 + /* Now compute and compare our ICV with the one from the packet */
1107 + err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
1108 + if (!err)
1109 + err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
1110 +
1111 + if (req->src != req->dst) {
1112 + err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
1113 + req->assoclen);
1114 + if (err)
1115 + goto out;
1116 + }
1117 +
1118 + /* return the first found error */
1119 + if (paderr)
1120 + err = paderr;
1121 +
1122 +out:
1123 + aead_request_complete(req, err);
1124 + return err;
1125 +}
1126 +
1127 +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
1128 +{
1129 + struct aead_instance *inst = aead_alg_instance(tfm);
1130 + struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
1131 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
1132 + struct crypto_ahash *auth;
1133 + struct crypto_skcipher *enc;
1134 + struct crypto_skcipher *null;
1135 + int err;
1136 +
1137 + auth = crypto_spawn_ahash(&ictx->auth);
1138 + if (IS_ERR(auth))
1139 + return PTR_ERR(auth);
1140 +
1141 + enc = crypto_spawn_skcipher(&ictx->enc);
1142 + err = PTR_ERR(enc);
1143 + if (IS_ERR(enc))
1144 + goto err_free_ahash;
1145 +
1146 + null = crypto_get_default_null_skcipher2();
1147 + err = PTR_ERR(null);
1148 + if (IS_ERR(null))
1149 + goto err_free_skcipher;
1150 +
1151 + ctx->auth = auth;
1152 + ctx->enc = enc;
1153 + ctx->null = null;
1154 +
1155 + /*
1156 + * Allow enough space for two digests. The two digests will be compared
1157 + * during the decryption phase. One will come from the decrypted packet
1158 + * and the other will be calculated. For encryption, one digest is
1159 + * padded (up to a cipher blocksize) and chained with the payload
1160 + */
1161 + ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
1162 + crypto_ahash_alignmask(auth),
1163 + crypto_ahash_alignmask(auth) + 1) +
1164 + max(crypto_ahash_digestsize(auth),
1165 + crypto_skcipher_blocksize(enc));
1166 +
1167 + crypto_aead_set_reqsize(tfm,
1168 + sizeof(struct tls_request_ctx) +
1169 + ctx->reqoff +
1170 + max_t(unsigned int,
1171 + crypto_ahash_reqsize(auth) +
1172 + sizeof(struct ahash_request),
1173 + crypto_skcipher_reqsize(enc) +
1174 + sizeof(struct skcipher_request)));
1175 +
1176 + return 0;
1177 +
1178 +err_free_skcipher:
1179 + crypto_free_skcipher(enc);
1180 +err_free_ahash:
1181 + crypto_free_ahash(auth);
1182 + return err;
1183 +}
1184 +
1185 +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
1186 +{
1187 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
1188 +
1189 + crypto_free_ahash(ctx->auth);
1190 + crypto_free_skcipher(ctx->enc);
1191 + crypto_put_default_null_skcipher2();
1192 +}
1193 +
1194 +static void crypto_tls_free(struct aead_instance *inst)
1195 +{
1196 + struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
1197 +
1198 + crypto_drop_skcipher(&ctx->enc);
1199 + crypto_drop_ahash(&ctx->auth);
1200 + kfree(inst);
1201 +}
1202 +
1203 +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
1204 +{
1205 + struct crypto_attr_type *algt;
1206 + struct aead_instance *inst;
1207 + struct hash_alg_common *auth;
1208 + struct crypto_alg *auth_base;
1209 + struct skcipher_alg *enc;
1210 + struct tls_instance_ctx *ctx;
1211 + const char *enc_name;
1212 + int err;
1213 +
1214 + algt = crypto_get_attr_type(tb);
1215 + if (IS_ERR(algt))
1216 + return PTR_ERR(algt);
1217 +
1218 + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
1219 + return -EINVAL;
1220 +
1221 + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
1222 + CRYPTO_ALG_TYPE_AHASH_MASK |
1223 + crypto_requires_sync(algt->type, algt->mask));
1224 + if (IS_ERR(auth))
1225 + return PTR_ERR(auth);
1226 +
1227 + auth_base = &auth->base;
1228 +
1229 + enc_name = crypto_attr_alg_name(tb[2]);
1230 + err = PTR_ERR(enc_name);
1231 + if (IS_ERR(enc_name))
1232 + goto out_put_auth;
1233 +
1234 + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
1235 + err = -ENOMEM;
1236 + if (!inst)
1237 + goto out_put_auth;
1238 +
1239 + ctx = aead_instance_ctx(inst);
1240 +
1241 + err = crypto_init_ahash_spawn(&ctx->auth, auth,
1242 + aead_crypto_instance(inst));
1243 + if (err)
1244 + goto err_free_inst;
1245 +
1246 + crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
1247 + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
1248 + crypto_requires_sync(algt->type,
1249 + algt->mask));
1250 + if (err)
1251 + goto err_drop_auth;
1252 +
1253 + enc = crypto_spawn_skcipher_alg(&ctx->enc);
1254 +
1255 + err = -ENAMETOOLONG;
1256 + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
1257 + "tls10(%s,%s)", auth_base->cra_name,
1258 + enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
1259 + goto err_drop_enc;
1260 +
1261 + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1262 + "tls10(%s,%s)", auth_base->cra_driver_name,
1263 + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
1264 + goto err_drop_enc;
1265 +
1266 + inst->alg.base.cra_flags = (auth_base->cra_flags |
1267 + enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
1268 + inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
1269 + auth_base->cra_priority;
1270 + inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
1271 + inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
1272 + enc->base.cra_alignmask;
1273 + inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
1274 +
1275 + inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
1276 + inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
1277 + inst->alg.maxauthsize = auth->digestsize;
1278 +
1279 + inst->alg.init = crypto_tls_init_tfm;
1280 + inst->alg.exit = crypto_tls_exit_tfm;
1281 +
1282 + inst->alg.setkey = crypto_tls_setkey;
1283 + inst->alg.encrypt = crypto_tls_encrypt;
1284 + inst->alg.decrypt = crypto_tls_decrypt;
1285 +
1286 + inst->free = crypto_tls_free;
1287 +
1288 + err = aead_register_instance(tmpl, inst);
1289 + if (err)
1290 + goto err_drop_enc;
1291 +
1292 +out:
1293 + crypto_mod_put(auth_base);
1294 + return err;
1295 +
1296 +err_drop_enc:
1297 + crypto_drop_skcipher(&ctx->enc);
1298 +err_drop_auth:
1299 + crypto_drop_ahash(&ctx->auth);
1300 +err_free_inst:
1301 + kfree(inst);
1302 +out_put_auth:
1303 + goto out;
1304 +}
1305 +
1306 +static struct crypto_template crypto_tls_tmpl = {
1307 + .name = "tls10",
1308 + .create = crypto_tls_create,
1309 + .module = THIS_MODULE,
1310 +};
1311 +
1312 +static int __init crypto_tls_module_init(void)
1313 +{
1314 + return crypto_register_template(&crypto_tls_tmpl);
1315 +}
1316 +
1317 +static void __exit crypto_tls_module_exit(void)
1318 +{
1319 + crypto_unregister_template(&crypto_tls_tmpl);
1320 +}
1321 +
1322 +module_init(crypto_tls_module_init);
1323 +module_exit(crypto_tls_module_exit);
1324 +
1325 +MODULE_LICENSE("GPL");
1326 +MODULE_DESCRIPTION("TLS 1.0 record encryption");
1327 --- a/drivers/crypto/Makefile
1328 +++ b/drivers/crypto/Makefile
1329 @@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chel
1330 obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
1331 obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
1332 obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
1333 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
1334 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
1335 obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
1336 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
1337 obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
1338 --- a/drivers/crypto/caam/Kconfig
1339 +++ b/drivers/crypto/caam/Kconfig
1340 @@ -1,7 +1,17 @@
1341 +config CRYPTO_DEV_FSL_CAAM_COMMON
1342 + tristate
1343 +
1344 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1345 + tristate
1346 +
1347 +config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
1348 + tristate
1349 +
1350 config CRYPTO_DEV_FSL_CAAM
1351 - tristate "Freescale CAAM-Multicore driver backend"
1352 + tristate "Freescale CAAM-Multicore platform driver backend"
1353 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
1354 select SOC_BUS
1355 + select CRYPTO_DEV_FSL_CAAM_COMMON
1356 help
1357 Enables the driver module for Freescale's Cryptographic Accelerator
1358 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
1359 @@ -12,9 +22,16 @@ config CRYPTO_DEV_FSL_CAAM
1360 To compile this driver as a module, choose M here: the module
1361 will be called caam.
1362
1363 +if CRYPTO_DEV_FSL_CAAM
1364 +
1365 +config CRYPTO_DEV_FSL_CAAM_DEBUG
1366 + bool "Enable debug output in CAAM driver"
1367 + help
1368 + Selecting this will enable printing of various debug
1369 + information in the CAAM driver.
1370 +
1371 config CRYPTO_DEV_FSL_CAAM_JR
1372 tristate "Freescale CAAM Job Ring driver backend"
1373 - depends on CRYPTO_DEV_FSL_CAAM
1374 default y
1375 help
1376 Enables the driver module for Job Rings which are part of
1377 @@ -25,9 +42,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
1378 To compile this driver as a module, choose M here: the module
1379 will be called caam_jr.
1380
1381 +if CRYPTO_DEV_FSL_CAAM_JR
1382 +
1383 config CRYPTO_DEV_FSL_CAAM_RINGSIZE
1384 int "Job Ring size"
1385 - depends on CRYPTO_DEV_FSL_CAAM_JR
1386 range 2 9
1387 default "9"
1388 help
1389 @@ -45,7 +63,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
1390
1391 config CRYPTO_DEV_FSL_CAAM_INTC
1392 bool "Job Ring interrupt coalescing"
1393 - depends on CRYPTO_DEV_FSL_CAAM_JR
1394 help
1395 Enable the Job Ring's interrupt coalescing feature.
1396
1397 @@ -74,9 +91,9 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
1398 threshold. Range is 1-65535.
1399
1400 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
1401 - tristate "Register algorithm implementations with the Crypto API"
1402 - depends on CRYPTO_DEV_FSL_CAAM_JR
1403 + bool "Register algorithm implementations with the Crypto API"
1404 default y
1405 + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1406 select CRYPTO_AEAD
1407 select CRYPTO_AUTHENC
1408 select CRYPTO_BLKCIPHER
1409 @@ -85,13 +102,11 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
1410 scatterlist crypto API (such as the linux native IPSec
1411 stack) to the SEC4 via job ring.
1412
1413 - To compile this as a module, choose M here: the module
1414 - will be called caamalg.
1415 -
1416 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
1417 - tristate "Queue Interface as Crypto API backend"
1418 - depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
1419 + bool "Queue Interface as Crypto API backend"
1420 + depends on FSL_SDK_DPA && NET
1421 default y
1422 + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1423 select CRYPTO_AUTHENC
1424 select CRYPTO_BLKCIPHER
1425 help
1426 @@ -102,36 +117,26 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
1427 assigned to the kernel should also be more than the number of
1428 job rings.
1429
1430 - To compile this as a module, choose M here: the module
1431 - will be called caamalg_qi.
1432 -
1433 config CRYPTO_DEV_FSL_CAAM_AHASH_API
1434 - tristate "Register hash algorithm implementations with Crypto API"
1435 - depends on CRYPTO_DEV_FSL_CAAM_JR
1436 + bool "Register hash algorithm implementations with Crypto API"
1437 default y
1438 + select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
1439 select CRYPTO_HASH
1440 help
1441 Selecting this will offload ahash for users of the
1442 scatterlist crypto API to the SEC4 via job ring.
1443
1444 - To compile this as a module, choose M here: the module
1445 - will be called caamhash.
1446 -
1447 config CRYPTO_DEV_FSL_CAAM_PKC_API
1448 - tristate "Register public key cryptography implementations with Crypto API"
1449 - depends on CRYPTO_DEV_FSL_CAAM_JR
1450 + bool "Register public key cryptography implementations with Crypto API"
1451 default y
1452 select CRYPTO_RSA
1453 help
1454 Selecting this will allow SEC Public key support for RSA.
1455 Supported cryptographic primitives: encryption, decryption,
1456 signature and verification.
1457 - To compile this as a module, choose M here: the module
1458 - will be called caam_pkc.
1459
1460 config CRYPTO_DEV_FSL_CAAM_RNG_API
1461 - tristate "Register caam device for hwrng API"
1462 - depends on CRYPTO_DEV_FSL_CAAM_JR
1463 + bool "Register caam device for hwrng API"
1464 default y
1465 select CRYPTO_RNG
1466 select HW_RANDOM
1467 @@ -139,16 +144,24 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
1468 Selecting this will register the SEC4 hardware rng to
1469 the hw_random API for suppying the kernel entropy pool.
1470
1471 - To compile this as a module, choose M here: the module
1472 - will be called caamrng.
1473 +endif # CRYPTO_DEV_FSL_CAAM_JR
1474
1475 -config CRYPTO_DEV_FSL_CAAM_DEBUG
1476 - bool "Enable debug output in CAAM driver"
1477 - depends on CRYPTO_DEV_FSL_CAAM
1478 - help
1479 - Selecting this will enable printing of various debug
1480 - information in the CAAM driver.
1481 +endif # CRYPTO_DEV_FSL_CAAM
1482
1483 -config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1484 - def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
1485 - CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
1486 +config CRYPTO_DEV_FSL_DPAA2_CAAM
1487 + tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
1488 + depends on FSL_MC_DPIO
1489 + select CRYPTO_DEV_FSL_CAAM_COMMON
1490 + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1491 + select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
1492 + select CRYPTO_BLKCIPHER
1493 + select CRYPTO_AUTHENC
1494 + select CRYPTO_AEAD
1495 + select CRYPTO_HASH
1496 + ---help---
1497 + CAAM driver for QorIQ Data Path Acceleration Architecture 2.
1498 + It handles DPSECI DPAA2 objects that sit on the Management Complex
1499 + (MC) fsl-mc bus.
1500 +
1501 + To compile this as a module, choose M here: the module
1502 + will be called dpaa2_caam.
1503 --- a/drivers/crypto/caam/Makefile
1504 +++ b/drivers/crypto/caam/Makefile
1505 @@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
1506 ccflags-y := -DDEBUG
1507 endif
1508
1509 +ccflags-y += -DVERSION=\"\"
1510 +
1511 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
1512 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
1513 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
1514 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
1515 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
1516 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
1517 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
1518 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
1519 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
1520 -
1521 -caam-objs := ctrl.o
1522 -caam_jr-objs := jr.o key_gen.o error.o
1523 -caam_pkc-y := caampkc.o pkc_desc.o
1524 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
1525 +
1526 +caam-y := ctrl.o
1527 +caam_jr-y := jr.o key_gen.o
1528 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
1529 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
1530 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
1531 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
1532 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
1533 +
1534 +caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
1535 ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
1536 ccflags-y += -DCONFIG_CAAM_QI
1537 - caam-objs += qi.o
1538 endif
1539 +
1540 +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
1541 +
1542 +dpaa2_caam-y := caamalg_qi2.o dpseci.o
1543 --- a/drivers/crypto/caam/caamalg.c
1544 +++ b/drivers/crypto/caam/caamalg.c
1545 @@ -71,6 +71,8 @@
1546 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
1547 CAAM_CMD_SZ * 5)
1548
1549 +#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
1550 +
1551 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
1552 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
1553
1554 @@ -108,6 +110,7 @@ struct caam_ctx {
1555 dma_addr_t sh_desc_dec_dma;
1556 dma_addr_t sh_desc_givenc_dma;
1557 dma_addr_t key_dma;
1558 + enum dma_data_direction dir;
1559 struct device *jrdev;
1560 struct alginfo adata;
1561 struct alginfo cdata;
1562 @@ -118,6 +121,7 @@ static int aead_null_set_sh_desc(struct
1563 {
1564 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1565 struct device *jrdev = ctx->jrdev;
1566 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1567 u32 *desc;
1568 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
1569 ctx->adata.keylen_pad;
1570 @@ -136,9 +140,10 @@ static int aead_null_set_sh_desc(struct
1571
1572 /* aead_encrypt shared descriptor */
1573 desc = ctx->sh_desc_enc;
1574 - cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
1575 + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
1576 + ctrlpriv->era);
1577 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1578 - desc_bytes(desc), DMA_TO_DEVICE);
1579 + desc_bytes(desc), ctx->dir);
1580
1581 /*
1582 * Job Descriptor and Shared Descriptors
1583 @@ -154,9 +159,10 @@ static int aead_null_set_sh_desc(struct
1584
1585 /* aead_decrypt shared descriptor */
1586 desc = ctx->sh_desc_dec;
1587 - cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
1588 + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
1589 + ctrlpriv->era);
1590 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1591 - desc_bytes(desc), DMA_TO_DEVICE);
1592 + desc_bytes(desc), ctx->dir);
1593
1594 return 0;
1595 }
1596 @@ -168,6 +174,7 @@ static int aead_set_sh_desc(struct crypt
1597 unsigned int ivsize = crypto_aead_ivsize(aead);
1598 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1599 struct device *jrdev = ctx->jrdev;
1600 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1601 u32 ctx1_iv_off = 0;
1602 u32 *desc, *nonce = NULL;
1603 u32 inl_mask;
1604 @@ -234,9 +241,9 @@ static int aead_set_sh_desc(struct crypt
1605 desc = ctx->sh_desc_enc;
1606 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
1607 ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
1608 - false);
1609 + false, ctrlpriv->era);
1610 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1611 - desc_bytes(desc), DMA_TO_DEVICE);
1612 + desc_bytes(desc), ctx->dir);
1613
1614 skip_enc:
1615 /*
1616 @@ -266,9 +273,9 @@ skip_enc:
1617 desc = ctx->sh_desc_dec;
1618 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
1619 ctx->authsize, alg->caam.geniv, is_rfc3686,
1620 - nonce, ctx1_iv_off, false);
1621 + nonce, ctx1_iv_off, false, ctrlpriv->era);
1622 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1623 - desc_bytes(desc), DMA_TO_DEVICE);
1624 + desc_bytes(desc), ctx->dir);
1625
1626 if (!alg->caam.geniv)
1627 goto skip_givenc;
1628 @@ -300,9 +307,9 @@ skip_enc:
1629 desc = ctx->sh_desc_enc;
1630 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
1631 ctx->authsize, is_rfc3686, nonce,
1632 - ctx1_iv_off, false);
1633 + ctx1_iv_off, false, ctrlpriv->era);
1634 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1635 - desc_bytes(desc), DMA_TO_DEVICE);
1636 + desc_bytes(desc), ctx->dir);
1637
1638 skip_givenc:
1639 return 0;
1640 @@ -323,6 +330,7 @@ static int gcm_set_sh_desc(struct crypto
1641 {
1642 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1643 struct device *jrdev = ctx->jrdev;
1644 + unsigned int ivsize = crypto_aead_ivsize(aead);
1645 u32 *desc;
1646 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1647 ctx->cdata.keylen;
1648 @@ -344,9 +352,9 @@ static int gcm_set_sh_desc(struct crypto
1649 }
1650
1651 desc = ctx->sh_desc_enc;
1652 - cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
1653 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
1654 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1655 - desc_bytes(desc), DMA_TO_DEVICE);
1656 + desc_bytes(desc), ctx->dir);
1657
1658 /*
1659 * Job Descriptor and Shared Descriptors
1660 @@ -361,9 +369,9 @@ static int gcm_set_sh_desc(struct crypto
1661 }
1662
1663 desc = ctx->sh_desc_dec;
1664 - cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
1665 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
1666 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1667 - desc_bytes(desc), DMA_TO_DEVICE);
1668 + desc_bytes(desc), ctx->dir);
1669
1670 return 0;
1671 }
1672 @@ -382,6 +390,7 @@ static int rfc4106_set_sh_desc(struct cr
1673 {
1674 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1675 struct device *jrdev = ctx->jrdev;
1676 + unsigned int ivsize = crypto_aead_ivsize(aead);
1677 u32 *desc;
1678 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1679 ctx->cdata.keylen;
1680 @@ -403,9 +412,10 @@ static int rfc4106_set_sh_desc(struct cr
1681 }
1682
1683 desc = ctx->sh_desc_enc;
1684 - cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
1685 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
1686 + false);
1687 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1688 - desc_bytes(desc), DMA_TO_DEVICE);
1689 + desc_bytes(desc), ctx->dir);
1690
1691 /*
1692 * Job Descriptor and Shared Descriptors
1693 @@ -420,9 +430,10 @@ static int rfc4106_set_sh_desc(struct cr
1694 }
1695
1696 desc = ctx->sh_desc_dec;
1697 - cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
1698 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
1699 + false);
1700 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1701 - desc_bytes(desc), DMA_TO_DEVICE);
1702 + desc_bytes(desc), ctx->dir);
1703
1704 return 0;
1705 }
1706 @@ -442,6 +453,7 @@ static int rfc4543_set_sh_desc(struct cr
1707 {
1708 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1709 struct device *jrdev = ctx->jrdev;
1710 + unsigned int ivsize = crypto_aead_ivsize(aead);
1711 u32 *desc;
1712 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1713 ctx->cdata.keylen;
1714 @@ -463,9 +475,10 @@ static int rfc4543_set_sh_desc(struct cr
1715 }
1716
1717 desc = ctx->sh_desc_enc;
1718 - cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
1719 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
1720 + false);
1721 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1722 - desc_bytes(desc), DMA_TO_DEVICE);
1723 + desc_bytes(desc), ctx->dir);
1724
1725 /*
1726 * Job Descriptor and Shared Descriptors
1727 @@ -480,9 +493,10 @@ static int rfc4543_set_sh_desc(struct cr
1728 }
1729
1730 desc = ctx->sh_desc_dec;
1731 - cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
1732 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
1733 + false);
1734 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1735 - desc_bytes(desc), DMA_TO_DEVICE);
1736 + desc_bytes(desc), ctx->dir);
1737
1738 return 0;
1739 }
1740 @@ -498,11 +512,67 @@ static int rfc4543_setauthsize(struct cr
1741 return 0;
1742 }
1743
1744 +static int chachapoly_set_sh_desc(struct crypto_aead *aead)
1745 +{
1746 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
1747 + struct device *jrdev = ctx->jrdev;
1748 + unsigned int ivsize = crypto_aead_ivsize(aead);
1749 + u32 *desc;
1750 +
1751 + if (!ctx->cdata.keylen || !ctx->authsize)
1752 + return 0;
1753 +
1754 + desc = ctx->sh_desc_enc;
1755 + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
1756 + ctx->authsize, true, false);
1757 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1758 + desc_bytes(desc), ctx->dir);
1759 +
1760 + desc = ctx->sh_desc_dec;
1761 + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
1762 + ctx->authsize, false, false);
1763 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1764 + desc_bytes(desc), ctx->dir);
1765 +
1766 + return 0;
1767 +}
1768 +
1769 +static int chachapoly_setauthsize(struct crypto_aead *aead,
1770 + unsigned int authsize)
1771 +{
1772 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
1773 +
1774 + if (authsize != POLY1305_DIGEST_SIZE)
1775 + return -EINVAL;
1776 +
1777 + ctx->authsize = authsize;
1778 + return chachapoly_set_sh_desc(aead);
1779 +}
1780 +
1781 +static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
1782 + unsigned int keylen)
1783 +{
1784 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
1785 + unsigned int ivsize = crypto_aead_ivsize(aead);
1786 + unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
1787 +
1788 + if (keylen != CHACHA20_KEY_SIZE + saltlen) {
1789 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1790 + return -EINVAL;
1791 + }
1792 +
1793 + ctx->cdata.key_virt = key;
1794 + ctx->cdata.keylen = keylen - saltlen;
1795 +
1796 + return chachapoly_set_sh_desc(aead);
1797 +}
1798 +
1799 static int aead_setkey(struct crypto_aead *aead,
1800 const u8 *key, unsigned int keylen)
1801 {
1802 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1803 struct device *jrdev = ctx->jrdev;
1804 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1805 struct crypto_authenc_keys keys;
1806 int ret = 0;
1807
1808 @@ -517,6 +587,27 @@ static int aead_setkey(struct crypto_aea
1809 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1810 #endif
1811
1812 + /*
1813 + * If DKP is supported, use it in the shared descriptor to generate
1814 + * the split key.
1815 + */
1816 + if (ctrlpriv->era >= 6) {
1817 + ctx->adata.keylen = keys.authkeylen;
1818 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
1819 + OP_ALG_ALGSEL_MASK);
1820 +
1821 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1822 + goto badkey;
1823 +
1824 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
1825 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
1826 + keys.enckeylen);
1827 + dma_sync_single_for_device(jrdev, ctx->key_dma,
1828 + ctx->adata.keylen_pad +
1829 + keys.enckeylen, ctx->dir);
1830 + goto skip_split_key;
1831 + }
1832 +
1833 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
1834 keys.authkeylen, CAAM_MAX_KEY_SIZE -
1835 keys.enckeylen);
1836 @@ -527,12 +618,14 @@ static int aead_setkey(struct crypto_aea
1837 /* postpend encryption key to auth split key */
1838 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
1839 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
1840 - keys.enckeylen, DMA_TO_DEVICE);
1841 + keys.enckeylen, ctx->dir);
1842 #ifdef DEBUG
1843 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1844 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1845 ctx->adata.keylen_pad + keys.enckeylen, 1);
1846 #endif
1847 +
1848 +skip_split_key:
1849 ctx->cdata.keylen = keys.enckeylen;
1850 return aead_set_sh_desc(aead);
1851 badkey:
1852 @@ -552,7 +645,7 @@ static int gcm_setkey(struct crypto_aead
1853 #endif
1854
1855 memcpy(ctx->key, key, keylen);
1856 - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
1857 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
1858 ctx->cdata.keylen = keylen;
1859
1860 return gcm_set_sh_desc(aead);
1861 @@ -580,7 +673,7 @@ static int rfc4106_setkey(struct crypto_
1862 */
1863 ctx->cdata.keylen = keylen - 4;
1864 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
1865 - DMA_TO_DEVICE);
1866 + ctx->dir);
1867 return rfc4106_set_sh_desc(aead);
1868 }
1869
1870 @@ -606,7 +699,7 @@ static int rfc4543_setkey(struct crypto_
1871 */
1872 ctx->cdata.keylen = keylen - 4;
1873 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
1874 - DMA_TO_DEVICE);
1875 + ctx->dir);
1876 return rfc4543_set_sh_desc(aead);
1877 }
1878
1879 @@ -658,21 +751,21 @@ static int ablkcipher_setkey(struct cryp
1880 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
1881 ctx1_iv_off);
1882 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1883 - desc_bytes(desc), DMA_TO_DEVICE);
1884 + desc_bytes(desc), ctx->dir);
1885
1886 /* ablkcipher_decrypt shared descriptor */
1887 desc = ctx->sh_desc_dec;
1888 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
1889 ctx1_iv_off);
1890 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1891 - desc_bytes(desc), DMA_TO_DEVICE);
1892 + desc_bytes(desc), ctx->dir);
1893
1894 /* ablkcipher_givencrypt shared descriptor */
1895 desc = ctx->sh_desc_givenc;
1896 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
1897 ctx1_iv_off);
1898 dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
1899 - desc_bytes(desc), DMA_TO_DEVICE);
1900 + desc_bytes(desc), ctx->dir);
1901
1902 return 0;
1903 }
1904 @@ -701,13 +794,13 @@ static int xts_ablkcipher_setkey(struct
1905 desc = ctx->sh_desc_enc;
1906 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
1907 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1908 - desc_bytes(desc), DMA_TO_DEVICE);
1909 + desc_bytes(desc), ctx->dir);
1910
1911 /* xts_ablkcipher_decrypt shared descriptor */
1912 desc = ctx->sh_desc_dec;
1913 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
1914 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1915 - desc_bytes(desc), DMA_TO_DEVICE);
1916 + desc_bytes(desc), ctx->dir);
1917
1918 return 0;
1919 }
1920 @@ -989,9 +1082,6 @@ static void init_aead_job(struct aead_re
1921 append_seq_out_ptr(desc, dst_dma,
1922 req->assoclen + req->cryptlen - authsize,
1923 out_options);
1924 -
1925 - /* REG3 = assoclen */
1926 - append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1927 }
1928
1929 static void init_gcm_job(struct aead_request *req,
1930 @@ -1006,6 +1096,7 @@ static void init_gcm_job(struct aead_req
1931 unsigned int last;
1932
1933 init_aead_job(req, edesc, all_contig, encrypt);
1934 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1935
1936 /* BUG This should not be specific to generic GCM. */
1937 last = 0;
1938 @@ -1023,6 +1114,40 @@ static void init_gcm_job(struct aead_req
1939 /* End of blank commands */
1940 }
1941
1942 +static void init_chachapoly_job(struct aead_request *req,
1943 + struct aead_edesc *edesc, bool all_contig,
1944 + bool encrypt)
1945 +{
1946 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
1947 + unsigned int ivsize = crypto_aead_ivsize(aead);
1948 + unsigned int assoclen = req->assoclen;
1949 + u32 *desc = edesc->hw_desc;
1950 + u32 ctx_iv_off = 4;
1951 +
1952 + init_aead_job(req, edesc, all_contig, encrypt);
1953 +
1954 + if (ivsize != CHACHAPOLY_IV_SIZE) {
1955 + /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
1956 + ctx_iv_off += 4;
1957 +
1958 + /*
1959 + * The associated data comes already with the IV but we need
1960 + * to skip it when we authenticate or encrypt...
1961 + */
1962 + assoclen -= ivsize;
1963 + }
1964 +
1965 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
1966 +
1967 + /*
1968 + * For IPsec load the IV further in the same register.
1969 + * For RFC7539 simply load the 12 bytes nonce in a single operation
1970 + */
1971 + append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
1972 + LDST_SRCDST_BYTE_CONTEXT |
1973 + ctx_iv_off << LDST_OFFSET_SHIFT);
1974 +}
1975 +
1976 static void init_authenc_job(struct aead_request *req,
1977 struct aead_edesc *edesc,
1978 bool all_contig, bool encrypt)
1979 @@ -1032,6 +1157,7 @@ static void init_authenc_job(struct aead
1980 struct caam_aead_alg, aead);
1981 unsigned int ivsize = crypto_aead_ivsize(aead);
1982 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1983 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
1984 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
1985 OP_ALG_AAI_CTR_MOD128);
1986 const bool is_rfc3686 = alg->caam.rfc3686;
1987 @@ -1055,6 +1181,15 @@ static void init_authenc_job(struct aead
1988
1989 init_aead_job(req, edesc, all_contig, encrypt);
1990
1991 + /*
1992 + * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
1993 + * having DPOVRD as destination.
1994 + */
1995 + if (ctrlpriv->era < 3)
1996 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1997 + else
1998 + append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
1999 +
2000 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
2001 append_load_as_imm(desc, req->iv, ivsize,
2002 LDST_CLASS_1_CCB |
2003 @@ -1227,8 +1362,16 @@ static struct aead_edesc *aead_edesc_all
2004 }
2005 }
2006
2007 + /*
2008 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
2009 + * the end of the table by allocating more S/G entries.
2010 + */
2011 sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
2012 - sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
2013 + if (mapped_dst_nents > 1)
2014 + sec4_sg_len += ALIGN(mapped_dst_nents, 4);
2015 + else
2016 + sec4_sg_len = ALIGN(sec4_sg_len, 4);
2017 +
2018 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2019
2020 /* allocate space for base edesc and hw desc commands, link tables */
2021 @@ -1309,6 +1452,72 @@ static int gcm_encrypt(struct aead_reque
2022 return ret;
2023 }
2024
2025 +static int chachapoly_encrypt(struct aead_request *req)
2026 +{
2027 + struct aead_edesc *edesc;
2028 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
2029 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
2030 + struct device *jrdev = ctx->jrdev;
2031 + bool all_contig;
2032 + u32 *desc;
2033 + int ret;
2034 +
2035 + edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
2036 + true);
2037 + if (IS_ERR(edesc))
2038 + return PTR_ERR(edesc);
2039 +
2040 + desc = edesc->hw_desc;
2041 +
2042 + init_chachapoly_job(req, edesc, all_contig, true);
2043 + print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
2044 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2045 + 1);
2046 +
2047 + ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2048 + if (!ret) {
2049 + ret = -EINPROGRESS;
2050 + } else {
2051 + aead_unmap(jrdev, edesc, req);
2052 + kfree(edesc);
2053 + }
2054 +
2055 + return ret;
2056 +}
2057 +
2058 +static int chachapoly_decrypt(struct aead_request *req)
2059 +{
2060 + struct aead_edesc *edesc;
2061 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
2062 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
2063 + struct device *jrdev = ctx->jrdev;
2064 + bool all_contig;
2065 + u32 *desc;
2066 + int ret;
2067 +
2068 + edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
2069 + false);
2070 + if (IS_ERR(edesc))
2071 + return PTR_ERR(edesc);
2072 +
2073 + desc = edesc->hw_desc;
2074 +
2075 + init_chachapoly_job(req, edesc, all_contig, false);
2076 + print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
2077 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2078 + 1);
2079 +
2080 + ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2081 + if (!ret) {
2082 + ret = -EINPROGRESS;
2083 + } else {
2084 + aead_unmap(jrdev, edesc, req);
2085 + kfree(edesc);
2086 + }
2087 +
2088 + return ret;
2089 +}
2090 +
2091 static int ipsec_gcm_encrypt(struct aead_request *req)
2092 {
2093 if (req->assoclen < 8)
2094 @@ -1496,7 +1705,25 @@ static struct ablkcipher_edesc *ablkciph
2095
2096 sec4_sg_ents = 1 + mapped_src_nents;
2097 dst_sg_idx = sec4_sg_ents;
2098 - sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
2099 +
2100 + /*
2101 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
2102 + * the end of the table by allocating more S/G entries. Logic:
2103 + * if (src != dst && output S/G)
2104 + * pad output S/G, if needed
2105 + * else if (src == dst && S/G)
2106 + * overlapping S/Gs; pad one of them
2107 + * else if (input S/G) ...
2108 + * pad input S/G, if needed
2109 + */
2110 + if (mapped_dst_nents > 1)
2111 + sec4_sg_ents += ALIGN(mapped_dst_nents, 4);
2112 + else if ((req->src == req->dst) && (mapped_src_nents > 1))
2113 + sec4_sg_ents = max(ALIGN(sec4_sg_ents, 4),
2114 + 1 + ALIGN(mapped_src_nents, 4));
2115 + else
2116 + sec4_sg_ents = ALIGN(sec4_sg_ents, 4);
2117 +
2118 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
2119
2120 /*
2121 @@ -3199,6 +3426,50 @@ static struct caam_aead_alg driver_aeads
2122 .geniv = true,
2123 },
2124 },
2125 + {
2126 + .aead = {
2127 + .base = {
2128 + .cra_name = "rfc7539(chacha20,poly1305)",
2129 + .cra_driver_name = "rfc7539-chacha20-poly1305-"
2130 + "caam",
2131 + .cra_blocksize = 1,
2132 + },
2133 + .setkey = chachapoly_setkey,
2134 + .setauthsize = chachapoly_setauthsize,
2135 + .encrypt = chachapoly_encrypt,
2136 + .decrypt = chachapoly_decrypt,
2137 + .ivsize = CHACHAPOLY_IV_SIZE,
2138 + .maxauthsize = POLY1305_DIGEST_SIZE,
2139 + },
2140 + .caam = {
2141 + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2142 + OP_ALG_AAI_AEAD,
2143 + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2144 + OP_ALG_AAI_AEAD,
2145 + },
2146 + },
2147 + {
2148 + .aead = {
2149 + .base = {
2150 + .cra_name = "rfc7539esp(chacha20,poly1305)",
2151 + .cra_driver_name = "rfc7539esp-chacha20-"
2152 + "poly1305-caam",
2153 + .cra_blocksize = 1,
2154 + },
2155 + .setkey = chachapoly_setkey,
2156 + .setauthsize = chachapoly_setauthsize,
2157 + .encrypt = chachapoly_encrypt,
2158 + .decrypt = chachapoly_decrypt,
2159 + .ivsize = 8,
2160 + .maxauthsize = POLY1305_DIGEST_SIZE,
2161 + },
2162 + .caam = {
2163 + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2164 + OP_ALG_AAI_AEAD,
2165 + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2166 + OP_ALG_AAI_AEAD,
2167 + },
2168 + },
2169 };
2170
2171 struct caam_crypto_alg {
2172 @@ -3207,9 +3478,11 @@ struct caam_crypto_alg {
2173 struct caam_alg_entry caam;
2174 };
2175
2176 -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
2177 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2178 + bool uses_dkp)
2179 {
2180 dma_addr_t dma_addr;
2181 + struct caam_drv_private *priv;
2182
2183 ctx->jrdev = caam_jr_alloc();
2184 if (IS_ERR(ctx->jrdev)) {
2185 @@ -3217,10 +3490,16 @@ static int caam_init_common(struct caam_
2186 return PTR_ERR(ctx->jrdev);
2187 }
2188
2189 + priv = dev_get_drvdata(ctx->jrdev->parent);
2190 + if (priv->era >= 6 && uses_dkp)
2191 + ctx->dir = DMA_BIDIRECTIONAL;
2192 + else
2193 + ctx->dir = DMA_TO_DEVICE;
2194 +
2195 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
2196 offsetof(struct caam_ctx,
2197 sh_desc_enc_dma),
2198 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
2199 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
2200 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
2201 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
2202 caam_jr_free(ctx->jrdev);
2203 @@ -3248,7 +3527,7 @@ static int caam_cra_init(struct crypto_t
2204 container_of(alg, struct caam_crypto_alg, crypto_alg);
2205 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2206
2207 - return caam_init_common(ctx, &caam_alg->caam);
2208 + return caam_init_common(ctx, &caam_alg->caam, false);
2209 }
2210
2211 static int caam_aead_init(struct crypto_aead *tfm)
2212 @@ -3258,14 +3537,15 @@ static int caam_aead_init(struct crypto_
2213 container_of(alg, struct caam_aead_alg, aead);
2214 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2215
2216 - return caam_init_common(ctx, &caam_alg->caam);
2217 + return caam_init_common(ctx, &caam_alg->caam,
2218 + alg->setkey == aead_setkey);
2219 }
2220
2221 static void caam_exit_common(struct caam_ctx *ctx)
2222 {
2223 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
2224 offsetof(struct caam_ctx, sh_desc_enc_dma),
2225 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
2226 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
2227 caam_jr_free(ctx->jrdev);
2228 }
2229
2230 @@ -3279,7 +3559,7 @@ static void caam_aead_exit(struct crypto
2231 caam_exit_common(crypto_aead_ctx(tfm));
2232 }
2233
2234 -static void __exit caam_algapi_exit(void)
2235 +void caam_algapi_exit(void)
2236 {
2237
2238 struct caam_crypto_alg *t_alg, *n;
2239 @@ -3358,56 +3638,52 @@ static void caam_aead_alg_init(struct ca
2240 alg->exit = caam_aead_exit;
2241 }
2242
2243 -static int __init caam_algapi_init(void)
2244 +int caam_algapi_init(struct device *ctrldev)
2245 {
2246 - struct device_node *dev_node;
2247 - struct platform_device *pdev;
2248 - struct device *ctrldev;
2249 - struct caam_drv_private *priv;
2250 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
2251 int i = 0, err = 0;
2252 - u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
2253 + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
2254 unsigned int md_limit = SHA512_DIGEST_SIZE;
2255 bool registered = false;
2256
2257 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2258 - if (!dev_node) {
2259 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2260 - if (!dev_node)
2261 - return -ENODEV;
2262 - }
2263 -
2264 - pdev = of_find_device_by_node(dev_node);
2265 - if (!pdev) {
2266 - of_node_put(dev_node);
2267 - return -ENODEV;
2268 - }
2269 -
2270 - ctrldev = &pdev->dev;
2271 - priv = dev_get_drvdata(ctrldev);
2272 - of_node_put(dev_node);
2273 -
2274 - /*
2275 - * If priv is NULL, it's probably because the caam driver wasn't
2276 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2277 - */
2278 - if (!priv)
2279 - return -ENODEV;
2280 -
2281 -
2282 INIT_LIST_HEAD(&alg_list);
2283
2284 /*
2285 * Register crypto algorithms the device supports.
2286 * First, detect presence and attributes of DES, AES, and MD blocks.
2287 */
2288 - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2289 - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2290 - des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2291 - aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2292 - md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2293 + if (priv->era < 10) {
2294 + u32 cha_vid, cha_inst;
2295 +
2296 + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2297 + aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
2298 + md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2299 +
2300 + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2301 + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
2302 + CHA_ID_LS_DES_SHIFT;
2303 + aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
2304 + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2305 + ccha_inst = 0;
2306 + ptha_inst = 0;
2307 + } else {
2308 + u32 aesa, mdha;
2309 +
2310 + aesa = rd_reg32(&priv->ctrl->vreg.aesa);
2311 + mdha = rd_reg32(&priv->ctrl->vreg.mdha);
2312 +
2313 + aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2314 + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2315 +
2316 + des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
2317 + aes_inst = aesa & CHA_VER_NUM_MASK;
2318 + md_inst = mdha & CHA_VER_NUM_MASK;
2319 + ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
2320 + ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
2321 + }
2322
2323 /* If MD is present, limit digest size based on LP256 */
2324 - if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2325 + if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
2326 md_limit = SHA256_DIGEST_SIZE;
2327
2328 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2329 @@ -3429,10 +3705,10 @@ static int __init caam_algapi_init(void)
2330 * Check support for AES modes not available
2331 * on LP devices.
2332 */
2333 - if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
2334 - if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
2335 - OP_ALG_AAI_XTS)
2336 - continue;
2337 + if (aes_vid == CHA_VER_VID_AES_LP &&
2338 + (alg->class1_alg_type & OP_ALG_AAI_MASK) ==
2339 + OP_ALG_AAI_XTS)
2340 + continue;
2341
2342 t_alg = caam_alg_alloc(alg);
2343 if (IS_ERR(t_alg)) {
2344 @@ -3471,21 +3747,28 @@ static int __init caam_algapi_init(void)
2345 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2346 continue;
2347
2348 + /* Skip CHACHA20 algorithms if not supported by device */
2349 + if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst)
2350 + continue;
2351 +
2352 + /* Skip POLY1305 algorithms if not supported by device */
2353 + if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
2354 + continue;
2355 +
2356 /*
2357 * Check support for AES algorithms not available
2358 * on LP devices.
2359 */
2360 - if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
2361 - if (alg_aai == OP_ALG_AAI_GCM)
2362 - continue;
2363 + if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
2364 + continue;
2365
2366 /*
2367 * Skip algorithms requiring message digests
2368 * if MD or MD size is not supported by device.
2369 */
2370 - if (c2_alg_sel &&
2371 - (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2372 - continue;
2373 + if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
2374 + (!md_inst || t_alg->aead.maxauthsize > md_limit))
2375 + continue;
2376
2377 caam_aead_alg_init(t_alg);
2378
2379 @@ -3505,10 +3788,3 @@ static int __init caam_algapi_init(void)
2380
2381 return err;
2382 }
2383 -
2384 -module_init(caam_algapi_init);
2385 -module_exit(caam_algapi_exit);
2386 -
2387 -MODULE_LICENSE("GPL");
2388 -MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2389 -MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
2390 --- a/drivers/crypto/caam/caamalg_desc.c
2391 +++ b/drivers/crypto/caam/caamalg_desc.c
2392 @@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *d
2393 * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
2394 * (non-protocol) with no (null) encryption.
2395 * @desc: pointer to buffer used for descriptor construction
2396 - * @adata: pointer to authentication transform definitions. Note that since a
2397 - * split key is to be used, the size of the split key itself is
2398 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2399 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2400 + * @adata: pointer to authentication transform definitions.
2401 + * A split key is required for SEC Era < 6; the size of the split key
2402 + * is specified in this case. Valid algorithm values - one of
2403 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2404 + * with OP_ALG_AAI_HMAC_PRECOMP.
2405 * @icvsize: integrity check value (ICV) size (truncated or full)
2406 - *
2407 - * Note: Requires an MDHA split key.
2408 + * @era: SEC Era
2409 */
2410 void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
2411 - unsigned int icvsize)
2412 + unsigned int icvsize, int era)
2413 {
2414 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
2415
2416 @@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * c
2417 /* Skip if already shared */
2418 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2419 JUMP_COND_SHRD);
2420 - if (adata->key_inline)
2421 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
2422 - adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
2423 - KEY_ENC);
2424 - else
2425 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2426 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2427 + if (era < 6) {
2428 + if (adata->key_inline)
2429 + append_key_as_imm(desc, adata->key_virt,
2430 + adata->keylen_pad, adata->keylen,
2431 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2432 + KEY_ENC);
2433 + else
2434 + append_key(desc, adata->key_dma, adata->keylen,
2435 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2436 + } else {
2437 + append_proto_dkp(desc, adata);
2438 + }
2439 set_jump_tgt_here(desc, key_jump_cmd);
2440
2441 /* assoclen + cryptlen = seqinlen */
2442 @@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_enca
2443 * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
2444 * (non-protocol) with no (null) decryption.
2445 * @desc: pointer to buffer used for descriptor construction
2446 - * @adata: pointer to authentication transform definitions. Note that since a
2447 - * split key is to be used, the size of the split key itself is
2448 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2449 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2450 + * @adata: pointer to authentication transform definitions.
2451 + * A split key is required for SEC Era < 6; the size of the split key
2452 + * is specified in this case. Valid algorithm values - one of
2453 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2454 + * with OP_ALG_AAI_HMAC_PRECOMP.
2455 * @icvsize: integrity check value (ICV) size (truncated or full)
2456 - *
2457 - * Note: Requires an MDHA split key.
2458 + * @era: SEC Era
2459 */
2460 void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
2461 - unsigned int icvsize)
2462 + unsigned int icvsize, int era)
2463 {
2464 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
2465
2466 @@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * c
2467 /* Skip if already shared */
2468 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2469 JUMP_COND_SHRD);
2470 - if (adata->key_inline)
2471 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
2472 - adata->keylen, CLASS_2 |
2473 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2474 - else
2475 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2476 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2477 + if (era < 6) {
2478 + if (adata->key_inline)
2479 + append_key_as_imm(desc, adata->key_virt,
2480 + adata->keylen_pad, adata->keylen,
2481 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2482 + KEY_ENC);
2483 + else
2484 + append_key(desc, adata->key_dma, adata->keylen,
2485 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2486 + } else {
2487 + append_proto_dkp(desc, adata);
2488 + }
2489 set_jump_tgt_here(desc, key_jump_cmd);
2490
2491 /* Class 2 operation */
2492 @@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_deca
2493 static void init_sh_desc_key_aead(u32 * const desc,
2494 struct alginfo * const cdata,
2495 struct alginfo * const adata,
2496 - const bool is_rfc3686, u32 *nonce)
2497 + const bool is_rfc3686, u32 *nonce, int era)
2498 {
2499 u32 *key_jump_cmd;
2500 unsigned int enckeylen = cdata->keylen;
2501 @@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 *
2502 if (is_rfc3686)
2503 enckeylen -= CTR_RFC3686_NONCE_SIZE;
2504
2505 - if (adata->key_inline)
2506 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
2507 - adata->keylen, CLASS_2 |
2508 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2509 - else
2510 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2511 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2512 + if (era < 6) {
2513 + if (adata->key_inline)
2514 + append_key_as_imm(desc, adata->key_virt,
2515 + adata->keylen_pad, adata->keylen,
2516 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2517 + KEY_ENC);
2518 + else
2519 + append_key(desc, adata->key_dma, adata->keylen,
2520 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2521 + } else {
2522 + append_proto_dkp(desc, adata);
2523 + }
2524
2525 if (cdata->key_inline)
2526 append_key_as_imm(desc, cdata->key_virt, enckeylen,
2527 @@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 *
2528 * @cdata: pointer to block cipher transform definitions
2529 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2530 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2531 - * @adata: pointer to authentication transform definitions. Note that since a
2532 - * split key is to be used, the size of the split key itself is
2533 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2534 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2535 + * @adata: pointer to authentication transform definitions.
2536 + * A split key is required for SEC Era < 6; the size of the split key
2537 + * is specified in this case. Valid algorithm values - one of
2538 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2539 + * with OP_ALG_AAI_HMAC_PRECOMP.
2540 * @ivsize: initialization vector size
2541 * @icvsize: integrity check value (ICV) size (truncated or full)
2542 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2543 * @nonce: pointer to rfc3686 nonce
2544 * @ctx1_iv_off: IV offset in CONTEXT1 register
2545 * @is_qi: true when called from caam/qi
2546 - *
2547 - * Note: Requires an MDHA split key.
2548 + * @era: SEC Era
2549 */
2550 void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
2551 struct alginfo *adata, unsigned int ivsize,
2552 unsigned int icvsize, const bool is_rfc3686,
2553 - u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
2554 + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
2555 + int era)
2556 {
2557 /* Note: Context registers are saved. */
2558 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2559 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2560
2561 /* Class 2 operation */
2562 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2563 @@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const
2564 }
2565
2566 /* Read and write assoclen bytes */
2567 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2568 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2569 + if (is_qi || era < 3) {
2570 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2571 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2572 + } else {
2573 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2574 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2575 + }
2576
2577 /* Skip assoc data */
2578 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2579 @@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
2580 * @cdata: pointer to block cipher transform definitions
2581 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2582 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2583 - * @adata: pointer to authentication transform definitions. Note that since a
2584 - * split key is to be used, the size of the split key itself is
2585 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2586 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2587 + * @adata: pointer to authentication transform definitions.
2588 + * A split key is required for SEC Era < 6; the size of the split key
2589 + * is specified in this case. Valid algorithm values - one of
2590 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2591 + * with OP_ALG_AAI_HMAC_PRECOMP.
2592 * @ivsize: initialization vector size
2593 * @icvsize: integrity check value (ICV) size (truncated or full)
2594 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2595 * @nonce: pointer to rfc3686 nonce
2596 * @ctx1_iv_off: IV offset in CONTEXT1 register
2597 * @is_qi: true when called from caam/qi
2598 - *
2599 - * Note: Requires an MDHA split key.
2600 + * @era: SEC Era
2601 */
2602 void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
2603 struct alginfo *adata, unsigned int ivsize,
2604 unsigned int icvsize, const bool geniv,
2605 const bool is_rfc3686, u32 *nonce,
2606 - const u32 ctx1_iv_off, const bool is_qi)
2607 + const u32 ctx1_iv_off, const bool is_qi, int era)
2608 {
2609 /* Note: Context registers are saved. */
2610 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2611 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2612
2613 /* Class 2 operation */
2614 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2615 @@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const
2616 }
2617
2618 /* Read and write assoclen bytes */
2619 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2620 - if (geniv)
2621 - append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
2622 - else
2623 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2624 + if (is_qi || era < 3) {
2625 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2626 + if (geniv)
2627 + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
2628 + ivsize);
2629 + else
2630 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
2631 + CAAM_CMD_SZ);
2632 + } else {
2633 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2634 + if (geniv)
2635 + append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
2636 + ivsize);
2637 + else
2638 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
2639 + CAAM_CMD_SZ);
2640 + }
2641
2642 /* Skip assoc data */
2643 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2644 @@ -456,30 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
2645 * @cdata: pointer to block cipher transform definitions
2646 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2647 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2648 - * @adata: pointer to authentication transform definitions. Note that since a
2649 - * split key is to be used, the size of the split key itself is
2650 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2651 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2652 - * @ivsize: initialization vector size
2653 + * @adata: pointer to authentication transform definitions.
2654 + * A split key is required for SEC Era < 6; the size of the split key
2655 + * is specified in this case. Valid algorithm values - one of
2656 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2657 + * with OP_ALG_AAI_HMAC_PRECOMP. * @ivsize: initialization vector size
2658 * @icvsize: integrity check value (ICV) size (truncated or full)
2659 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2660 * @nonce: pointer to rfc3686 nonce
2661 * @ctx1_iv_off: IV offset in CONTEXT1 register
2662 * @is_qi: true when called from caam/qi
2663 - *
2664 - * Note: Requires an MDHA split key.
2665 + * @era: SEC Era
2666 */
2667 void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
2668 struct alginfo *adata, unsigned int ivsize,
2669 unsigned int icvsize, const bool is_rfc3686,
2670 u32 *nonce, const u32 ctx1_iv_off,
2671 - const bool is_qi)
2672 + const bool is_qi, int era)
2673 {
2674 u32 geniv, moveiv;
2675 u32 *wait_cmd;
2676
2677 /* Note: Context registers are saved. */
2678 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2679 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2680
2681 if (is_qi) {
2682 u32 *wait_load_cmd;
2683 @@ -529,8 +561,13 @@ copy_iv:
2684 OP_ALG_ENCRYPT);
2685
2686 /* Read and write assoclen bytes */
2687 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2688 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2689 + if (is_qi || era < 3) {
2690 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2691 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2692 + } else {
2693 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2694 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2695 + }
2696
2697 /* Skip assoc data */
2698 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2699 @@ -592,14 +629,431 @@ copy_iv:
2700 EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
2701
2702 /**
2703 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
2704 + * @desc: pointer to buffer used for descriptor construction
2705 + * @cdata: pointer to block cipher transform definitions
2706 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
2707 + * with OP_ALG_AAI_CBC
2708 + * @adata: pointer to authentication transform definitions.
2709 + * A split key is required for SEC Era < 6; the size of the split key
2710 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
2711 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2712 + * @assoclen: associated data length
2713 + * @ivsize: initialization vector size
2714 + * @authsize: authentication data size
2715 + * @blocksize: block cipher size
2716 + * @era: SEC Era
2717 + */
2718 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
2719 + struct alginfo *adata, unsigned int assoclen,
2720 + unsigned int ivsize, unsigned int authsize,
2721 + unsigned int blocksize, int era)
2722 +{
2723 + u32 *key_jump_cmd, *zero_payload_jump_cmd;
2724 + u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
2725 +
2726 + /*
2727 + * Compute the index (in bytes) for the LOAD with destination of
2728 + * Class 1 Data Size Register and for the LOAD that generates padding
2729 + */
2730 + if (adata->key_inline) {
2731 + idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
2732 + cdata->keylen - 4 * CAAM_CMD_SZ;
2733 + idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
2734 + cdata->keylen - 2 * CAAM_CMD_SZ;
2735 + } else {
2736 + idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
2737 + 4 * CAAM_CMD_SZ;
2738 + idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
2739 + 2 * CAAM_CMD_SZ;
2740 + }
2741 +
2742 + stidx = 1 << HDR_START_IDX_SHIFT;
2743 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
2744 +
2745 + /* skip key loading if they are loaded due to sharing */
2746 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2747 + JUMP_COND_SHRD);
2748 +
2749 + if (era < 6) {
2750 + if (adata->key_inline)
2751 + append_key_as_imm(desc, adata->key_virt,
2752 + adata->keylen_pad, adata->keylen,
2753 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2754 + KEY_ENC);
2755 + else
2756 + append_key(desc, adata->key_dma, adata->keylen,
2757 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2758 + } else {
2759 + append_proto_dkp(desc, adata);
2760 + }
2761 +
2762 + if (cdata->key_inline)
2763 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
2764 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
2765 + else
2766 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
2767 + KEY_DEST_CLASS_REG);
2768 +
2769 + set_jump_tgt_here(desc, key_jump_cmd);
2770 +
2771 + /* class 2 operation */
2772 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2773 + OP_ALG_ENCRYPT);
2774 + /* class 1 operation */
2775 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2776 + OP_ALG_ENCRYPT);
2777 +
2778 + /* payloadlen = input data length - (assoclen + ivlen) */
2779 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
2780 +
2781 + /* math1 = payloadlen + icvlen */
2782 + append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
2783 +
2784 + /* padlen = block_size - math1 % block_size */
2785 + append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
2786 + append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
2787 +
2788 + /* cryptlen = payloadlen + icvlen + padlen */
2789 + append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
2790 +
2791 + /*
2792 + * update immediate data with the padding length value
2793 + * for the LOAD in the class 1 data size register.
2794 + */
2795 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
2796 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
2797 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
2798 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
2799 +
2800 + /* overwrite PL field for the padding iNFO FIFO entry */
2801 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
2802 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
2803 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
2804 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
2805 +
2806 + /* store encrypted payload, icv and padding */
2807 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
2808 +
2809 + /* if payload length is zero, jump to zero-payload commands */
2810 + append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
2811 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
2812 + JUMP_COND_MATH_Z);
2813 +
2814 + /* load iv in context1 */
2815 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2816 + LDST_CLASS_1_CCB | ivsize);
2817 +
2818 + /* read assoc for authentication */
2819 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
2820 + FIFOLD_TYPE_MSG);
2821 + /* insnoop payload */
2822 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
2823 + FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
2824 +
2825 + /* jump the zero-payload commands */
2826 + append_jump(desc, JUMP_TEST_ALL | 3);
2827 +
2828 + /* zero-payload commands */
2829 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
2830 +
2831 + /* load iv in context1 */
2832 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2833 + LDST_CLASS_1_CCB | ivsize);
2834 +
2835 + /* assoc data is the only data for authentication */
2836 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
2837 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
2838 +
2839 + /* send icv to encryption */
2840 + append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
2841 + authsize);
2842 +
2843 + /* update class 1 data size register with padding length */
2844 + append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
2845 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
2846 +
2847 + /* generate padding and send it to encryption */
2848 + genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
2849 + NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
2850 + append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
2851 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
2852 +
2853 +#ifdef DEBUG
2854 + print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
2855 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
2856 + desc_bytes(desc), 1);
2857 +#endif
2858 +}
2859 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
2860 +
2861 +/**
2862 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
2863 + * @desc: pointer to buffer used for descriptor construction
2864 + * @cdata: pointer to block cipher transform definitions
2865 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
2866 + * with OP_ALG_AAI_CBC
2867 + * @adata: pointer to authentication transform definitions.
2868 + * A split key is required for SEC Era < 6; the size of the split key
2869 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
2870 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2871 + * @assoclen: associated data length
2872 + * @ivsize: initialization vector size
2873 + * @authsize: authentication data size
2874 + * @blocksize: block cipher size
2875 + * @era: SEC Era
2876 + */
2877 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
2878 + struct alginfo *adata, unsigned int assoclen,
2879 + unsigned int ivsize, unsigned int authsize,
2880 + unsigned int blocksize, int era)
2881 +{
2882 + u32 stidx, jumpback;
2883 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
2884 + /*
2885 + * Pointer Size bool determines the size of address pointers.
2886 + * false - Pointers fit in one 32-bit word.
2887 + * true - Pointers fit in two 32-bit words.
2888 + */
2889 + static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
2890 +
2891 + stidx = 1 << HDR_START_IDX_SHIFT;
2892 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
2893 +
2894 + /* skip key loading if they are loaded due to sharing */
2895 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2896 + JUMP_COND_SHRD);
2897 +
2898 + if (era < 6)
2899 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2900 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
2901 + else
2902 + append_proto_dkp(desc, adata);
2903 +
2904 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
2905 + KEY_DEST_CLASS_REG);
2906 +
2907 + set_jump_tgt_here(desc, key_jump_cmd);
2908 +
2909 + /* class 2 operation */
2910 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2911 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
2912 + /* class 1 operation */
2913 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2914 + OP_ALG_DECRYPT);
2915 +
2916 + /* VSIL = input data length - 2 * block_size */
2917 + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
2918 + blocksize);
2919 +
2920 + /*
2921 + * payloadlen + icvlen + padlen = input data length - (assoclen +
2922 + * ivsize)
2923 + */
2924 + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
2925 +
2926 + /* skip data to the last but one cipher block */
2927 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
2928 +
2929 + /* load iv for the last cipher block */
2930 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2931 + LDST_CLASS_1_CCB | ivsize);
2932 +
2933 + /* read last cipher block */
2934 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
2935 + FIFOLD_TYPE_LAST1 | blocksize);
2936 +
2937 + /* move decrypted block into math0 and math1 */
2938 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
2939 + blocksize);
2940 +
2941 + /* reset AES CHA */
2942 + append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
2943 + LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
2944 +
2945 + /* rewind input sequence */
2946 + append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
2947 +
2948 + /* key1 is in decryption form */
2949 + append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
2950 + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
2951 +
2952 + /* load iv in context1 */
2953 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
2954 + LDST_SRCDST_WORD_CLASS_CTX | ivsize);
2955 +
2956 + /* read sequence number */
2957 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
2958 + /* load Type, Version and Len fields in math0 */
2959 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
2960 + LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
2961 +
2962 + /* compute (padlen - 1) */
2963 + append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
2964 +
2965 + /* math2 = icvlen + (padlen - 1) + 1 */
2966 + append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
2967 +
2968 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
2969 +
2970 + /* VSOL = payloadlen + icvlen + padlen */
2971 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
2972 +
2973 + if (caam_little_end)
2974 + append_moveb(desc, MOVE_WAITCOMP |
2975 + MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
2976 +
2977 + /* update Len field */
2978 + append_math_sub(desc, REG0, REG0, REG2, 8);
2979 +
2980 + /* store decrypted payload, icv and padding */
2981 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
2982 +
2983 + /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
2984 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
2985 +
2986 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
2987 + JUMP_COND_MATH_Z);
2988 +
2989 + /* send Type, Version and Len(pre ICV) fields to authentication */
2990 + append_move(desc, MOVE_WAITCOMP |
2991 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
2992 + (3 << MOVE_OFFSET_SHIFT) | 5);
2993 +
2994 + /* outsnooping payload */
2995 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
2996 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
2997 + FIFOLDST_VLF);
2998 + skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
2999 +
3000 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
3001 + /* send Type, Version and Len(pre ICV) fields to authentication */
3002 + append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
3003 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
3004 + (3 << MOVE_OFFSET_SHIFT) | 5);
3005 +
3006 + set_jump_tgt_here(desc, skip_zero_jump_cmd);
3007 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
3008 +
3009 + /* load icvlen and padlen */
3010 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
3011 + FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
3012 +
3013 + /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
3014 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
3015 +
3016 + /*
3017 + * Start a new input sequence using the SEQ OUT PTR command options,
3018 + * pointer and length used when the current output sequence was defined.
3019 + */
3020 + if (ps) {
3021 + /*
3022 + * Move the lower 32 bits of Shared Descriptor address, the
3023 + * SEQ OUT PTR command, Output Pointer (2 words) and
3024 + * Output Length into math registers.
3025 + */
3026 + if (caam_little_end)
3027 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3028 + MOVE_DEST_MATH0 |
3029 + (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
3030 + else
3031 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3032 + MOVE_DEST_MATH0 |
3033 + (54 * 4 << MOVE_OFFSET_SHIFT) | 20);
3034 +
3035 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
3036 + append_math_and_imm_u32(desc, REG0, REG0, IMM,
3037 + ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
3038 + /* Append a JUMP command after the copied fields */
3039 + jumpback = CMD_JUMP | (char)-9;
3040 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
3041 + LDST_SRCDST_WORD_DECO_MATH2 |
3042 + (4 << LDST_OFFSET_SHIFT));
3043 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
3044 + /* Move the updated fields back to the Job Descriptor */
3045 + if (caam_little_end)
3046 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3047 + MOVE_DEST_DESCBUF |
3048 + (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
3049 + else
3050 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3051 + MOVE_DEST_DESCBUF |
3052 + (54 * 4 << MOVE_OFFSET_SHIFT) | 24);
3053 +
3054 + /*
3055 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
3056 + * and then jump back to the next command from the
3057 + * Shared Descriptor.
3058 + */
3059 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
3060 + } else {
3061 + /*
3062 + * Move the SEQ OUT PTR command, Output Pointer (1 word) and
3063 + * Output Length into math registers.
3064 + */
3065 + if (caam_little_end)
3066 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3067 + MOVE_DEST_MATH0 |
3068 + (54 * 4 << MOVE_OFFSET_SHIFT) | 12);
3069 + else
3070 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3071 + MOVE_DEST_MATH0 |
3072 + (53 * 4 << MOVE_OFFSET_SHIFT) | 12);
3073 +
3074 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
3075 + append_math_and_imm_u64(desc, REG0, REG0, IMM,
3076 + ~(((u64)(CMD_SEQ_IN_PTR ^
3077 + CMD_SEQ_OUT_PTR)) << 32));
3078 + /* Append a JUMP command after the copied fields */
3079 + jumpback = CMD_JUMP | (char)-7;
3080 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
3081 + LDST_SRCDST_WORD_DECO_MATH1 |
3082 + (4 << LDST_OFFSET_SHIFT));
3083 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
3084 + /* Move the updated fields back to the Job Descriptor */
3085 + if (caam_little_end)
3086 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3087 + MOVE_DEST_DESCBUF |
3088 + (54 * 4 << MOVE_OFFSET_SHIFT) | 16);
3089 + else
3090 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3091 + MOVE_DEST_DESCBUF |
3092 + (53 * 4 << MOVE_OFFSET_SHIFT) | 16);
3093 +
3094 + /*
3095 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
3096 + * and then jump back to the next command from the
3097 + * Shared Descriptor.
3098 + */
3099 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
3100 + }
3101 +
3102 + /* skip payload */
3103 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
3104 + /* check icv */
3105 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
3106 + FIFOLD_TYPE_LAST2 | authsize);
3107 +
3108 +#ifdef DEBUG
3109 + print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
3110 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
3111 + desc_bytes(desc), 1);
3112 +#endif
3113 +}
3114 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
3115 +
3116 +/**
3117 * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
3118 * @desc: pointer to buffer used for descriptor construction
3119 * @cdata: pointer to block cipher transform definitions
3120 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3121 + * @ivsize: initialization vector size
3122 * @icvsize: integrity check value (ICV) size (truncated or full)
3123 + * @is_qi: true when called from caam/qi
3124 */
3125 void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
3126 - unsigned int icvsize)
3127 + unsigned int ivsize, unsigned int icvsize,
3128 + const bool is_qi)
3129 {
3130 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
3131 *zero_assoc_jump_cmd2;
3132 @@ -621,11 +1075,35 @@ void cnstr_shdsc_gcm_encap(u32 * const d
3133 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3134 OP_ALG_ENCRYPT);
3135
3136 + if (is_qi) {
3137 + u32 *wait_load_cmd;
3138 +
3139 + /* REG3 = assoclen */
3140 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3141 + LDST_SRCDST_WORD_DECO_MATH3 |
3142 + (4 << LDST_OFFSET_SHIFT));
3143 +
3144 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3145 + JUMP_COND_CALM | JUMP_COND_NCP |
3146 + JUMP_COND_NOP | JUMP_COND_NIP |
3147 + JUMP_COND_NIFP);
3148 + set_jump_tgt_here(desc, wait_load_cmd);
3149 +
3150 + append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
3151 + ivsize);
3152 + } else {
3153 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
3154 + CAAM_CMD_SZ);
3155 + }
3156 +
3157 /* if assoclen + cryptlen is ZERO, skip to ICV write */
3158 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3159 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
3160 JUMP_COND_MATH_Z);
3161
3162 + if (is_qi)
3163 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3164 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3165 +
3166 /* if assoclen is ZERO, skip reading the assoc data */
3167 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
3168 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
3169 @@ -657,8 +1135,11 @@ void cnstr_shdsc_gcm_encap(u32 * const d
3170 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
3171 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
3172
3173 - /* jump the zero-payload commands */
3174 - append_jump(desc, JUMP_TEST_ALL | 2);
3175 + /* jump to ICV writing */
3176 + if (is_qi)
3177 + append_jump(desc, JUMP_TEST_ALL | 4);
3178 + else
3179 + append_jump(desc, JUMP_TEST_ALL | 2);
3180
3181 /* zero-payload commands */
3182 set_jump_tgt_here(desc, zero_payload_jump_cmd);
3183 @@ -666,10 +1147,18 @@ void cnstr_shdsc_gcm_encap(u32 * const d
3184 /* read assoc data */
3185 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
3186 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
3187 + if (is_qi)
3188 + /* jump to ICV writing */
3189 + append_jump(desc, JUMP_TEST_ALL | 2);
3190
3191 /* There is no input data */
3192 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
3193
3194 + if (is_qi)
3195 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3196 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
3197 + FIFOLD_TYPE_LAST1);
3198 +
3199 /* write ICV */
3200 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
3201 LDST_SRCDST_BYTE_CONTEXT);
3202 @@ -686,10 +1175,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
3203 * @desc: pointer to buffer used for descriptor construction
3204 * @cdata: pointer to block cipher transform definitions
3205 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3206 + * @ivsize: initialization vector size
3207 * @icvsize: integrity check value (ICV) size (truncated or full)
3208 + * @is_qi: true when called from caam/qi
3209 */
3210 void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
3211 - unsigned int icvsize)
3212 + unsigned int ivsize, unsigned int icvsize,
3213 + const bool is_qi)
3214 {
3215 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
3216
3217 @@ -710,6 +1202,24 @@ void cnstr_shdsc_gcm_decap(u32 * const d
3218 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3219 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3220
3221 + if (is_qi) {
3222 + u32 *wait_load_cmd;
3223 +
3224 + /* REG3 = assoclen */
3225 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3226 + LDST_SRCDST_WORD_DECO_MATH3 |
3227 + (4 << LDST_OFFSET_SHIFT));
3228 +
3229 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3230 + JUMP_COND_CALM | JUMP_COND_NCP |
3231 + JUMP_COND_NOP | JUMP_COND_NIP |
3232 + JUMP_COND_NIFP);
3233 + set_jump_tgt_here(desc, wait_load_cmd);
3234 +
3235 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3236 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3237 + }
3238 +
3239 /* if assoclen is ZERO, skip reading the assoc data */
3240 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
3241 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
3242 @@ -762,10 +1272,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
3243 * @desc: pointer to buffer used for descriptor construction
3244 * @cdata: pointer to block cipher transform definitions
3245 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3246 + * @ivsize: initialization vector size
3247 * @icvsize: integrity check value (ICV) size (truncated or full)
3248 + * @is_qi: true when called from caam/qi
3249 */
3250 void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
3251 - unsigned int icvsize)
3252 + unsigned int ivsize, unsigned int icvsize,
3253 + const bool is_qi)
3254 {
3255 u32 *key_jump_cmd;
3256
3257 @@ -786,7 +1299,29 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
3258 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3259 OP_ALG_ENCRYPT);
3260
3261 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
3262 + if (is_qi) {
3263 + u32 *wait_load_cmd;
3264 +
3265 + /* REG3 = assoclen */
3266 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3267 + LDST_SRCDST_WORD_DECO_MATH3 |
3268 + (4 << LDST_OFFSET_SHIFT));
3269 +
3270 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3271 + JUMP_COND_CALM | JUMP_COND_NCP |
3272 + JUMP_COND_NOP | JUMP_COND_NIP |
3273 + JUMP_COND_NIFP);
3274 + set_jump_tgt_here(desc, wait_load_cmd);
3275 +
3276 + /* Read salt and IV */
3277 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3278 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3279 + FIFOLD_TYPE_IV);
3280 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3281 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3282 + }
3283 +
3284 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
3285 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
3286
3287 /* Read assoc data */
3288 @@ -794,7 +1329,7 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
3289 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
3290
3291 /* Skip IV */
3292 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
3293 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
3294
3295 /* Will read cryptlen bytes */
3296 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3297 @@ -833,10 +1368,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap)
3298 * @desc: pointer to buffer used for descriptor construction
3299 * @cdata: pointer to block cipher transform definitions
3300 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3301 + * @ivsize: initialization vector size
3302 * @icvsize: integrity check value (ICV) size (truncated or full)
3303 + * @is_qi: true when called from caam/qi
3304 */
3305 void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
3306 - unsigned int icvsize)
3307 + unsigned int ivsize, unsigned int icvsize,
3308 + const bool is_qi)
3309 {
3310 u32 *key_jump_cmd;
3311
3312 @@ -858,7 +1396,29 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
3313 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3314 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3315
3316 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
3317 + if (is_qi) {
3318 + u32 *wait_load_cmd;
3319 +
3320 + /* REG3 = assoclen */
3321 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3322 + LDST_SRCDST_WORD_DECO_MATH3 |
3323 + (4 << LDST_OFFSET_SHIFT));
3324 +
3325 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3326 + JUMP_COND_CALM | JUMP_COND_NCP |
3327 + JUMP_COND_NOP | JUMP_COND_NIP |
3328 + JUMP_COND_NIFP);
3329 + set_jump_tgt_here(desc, wait_load_cmd);
3330 +
3331 + /* Read salt and IV */
3332 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3333 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3334 + FIFOLD_TYPE_IV);
3335 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3336 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3337 + }
3338 +
3339 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
3340 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
3341
3342 /* Read assoc data */
3343 @@ -866,7 +1426,7 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
3344 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
3345
3346 /* Skip IV */
3347 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
3348 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
3349
3350 /* Will read cryptlen bytes */
3351 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
3352 @@ -905,10 +1465,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap)
3353 * @desc: pointer to buffer used for descriptor construction
3354 * @cdata: pointer to block cipher transform definitions
3355 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3356 + * @ivsize: initialization vector size
3357 * @icvsize: integrity check value (ICV) size (truncated or full)
3358 + * @is_qi: true when called from caam/qi
3359 */
3360 void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
3361 - unsigned int icvsize)
3362 + unsigned int ivsize, unsigned int icvsize,
3363 + const bool is_qi)
3364 {
3365 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
3366
3367 @@ -929,6 +1492,18 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
3368 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3369 OP_ALG_ENCRYPT);
3370
3371 + if (is_qi) {
3372 + /* assoclen is not needed, skip it */
3373 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
3374 +
3375 + /* Read salt and IV */
3376 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3377 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3378 + FIFOLD_TYPE_IV);
3379 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3380 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3381 + }
3382 +
3383 /* assoclen + cryptlen = seqinlen */
3384 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
3385
3386 @@ -940,7 +1515,7 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
3387 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
3388 (0x6 << MOVE_LEN_SHIFT));
3389 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
3390 - (0x8 << MOVE_LEN_SHIFT));
3391 + (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
3392
3393 /* Will read assoclen + cryptlen bytes */
3394 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3395 @@ -975,10 +1550,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap)
3396 * @desc: pointer to buffer used for descriptor construction
3397 * @cdata: pointer to block cipher transform definitions
3398 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3399 + * @ivsize: initialization vector size
3400 * @icvsize: integrity check value (ICV) size (truncated or full)
3401 + * @is_qi: true when called from caam/qi
3402 */
3403 void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
3404 - unsigned int icvsize)
3405 + unsigned int ivsize, unsigned int icvsize,
3406 + const bool is_qi)
3407 {
3408 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
3409
3410 @@ -999,6 +1577,18 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
3411 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3412 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3413
3414 + if (is_qi) {
3415 + /* assoclen is not needed, skip it */
3416 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
3417 +
3418 + /* Read salt and IV */
3419 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3420 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3421 + FIFOLD_TYPE_IV);
3422 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3423 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3424 + }
3425 +
3426 /* assoclen + cryptlen = seqoutlen */
3427 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
3428
3429 @@ -1010,7 +1600,7 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
3430 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
3431 (0x6 << MOVE_LEN_SHIFT));
3432 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
3433 - (0x8 << MOVE_LEN_SHIFT));
3434 + (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
3435
3436 /* Will read assoclen + cryptlen bytes */
3437 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
3438 @@ -1044,6 +1634,138 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
3439 }
3440 EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
3441
3442 +/**
3443 + * cnstr_shdsc_chachapoly - Chacha20 + Poly1305 generic AEAD (rfc7539) and
3444 + * IPsec ESP (rfc7634, a.k.a. rfc7539esp) shared
3445 + * descriptor (non-protocol).
3446 + * @desc: pointer to buffer used for descriptor construction
3447 + * @cdata: pointer to block cipher transform definitions
3448 + * Valid algorithm values - OP_ALG_ALGSEL_CHACHA20 ANDed with
3449 + * OP_ALG_AAI_AEAD.
3450 + * @adata: pointer to authentication transform definitions
3451 + * Valid algorithm values - OP_ALG_ALGSEL_POLY1305 ANDed with
3452 + * OP_ALG_AAI_AEAD.
3453 + * @ivsize: initialization vector size
3454 + * @icvsize: integrity check value (ICV) size (truncated or full)
3455 + * @encap: true if encapsulation, false if decapsulation
3456 + * @is_qi: true when called from caam/qi
3457 + */
3458 +void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
3459 + struct alginfo *adata, unsigned int ivsize,
3460 + unsigned int icvsize, const bool encap,
3461 + const bool is_qi)
3462 +{
3463 + u32 *key_jump_cmd, *wait_cmd;
3464 + u32 nfifo;
3465 + const bool is_ipsec = (ivsize != CHACHAPOLY_IV_SIZE);
3466 +
3467 + /* Note: Context registers are saved. */
3468 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
3469 +
3470 + /* skip key loading if they are loaded due to sharing */
3471 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3472 + JUMP_COND_SHRD);
3473 +
3474 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen,
3475 + CLASS_1 | KEY_DEST_CLASS_REG);
3476 +
3477 + /* For IPsec load the salt from keymat in the context register */
3478 + if (is_ipsec)
3479 + append_load_as_imm(desc, cdata->key_virt + cdata->keylen, 4,
3480 + LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT |
3481 + 4 << LDST_OFFSET_SHIFT);
3482 +
3483 + set_jump_tgt_here(desc, key_jump_cmd);
3484 +
3485 + /* Class 2 and 1 operations: Poly & ChaCha */
3486 + if (encap) {
3487 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
3488 + OP_ALG_ENCRYPT);
3489 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3490 + OP_ALG_ENCRYPT);
3491 + } else {
3492 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
3493 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3494 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3495 + OP_ALG_DECRYPT);
3496 + }
3497 +
3498 + if (is_qi) {
3499 + u32 *wait_load_cmd;
3500 + u32 ctx1_iv_off = is_ipsec ? 8 : 4;
3501 +
3502 + /* REG3 = assoclen */
3503 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3504 + LDST_SRCDST_WORD_DECO_MATH3 |
3505 + 4 << LDST_OFFSET_SHIFT);
3506 +
3507 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3508 + JUMP_COND_CALM | JUMP_COND_NCP |
3509 + JUMP_COND_NOP | JUMP_COND_NIP |
3510 + JUMP_COND_NIFP);
3511 + set_jump_tgt_here(desc, wait_load_cmd);
3512 +
3513 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
3514 + LDST_SRCDST_BYTE_CONTEXT |
3515 + ctx1_iv_off << LDST_OFFSET_SHIFT);
3516 + }
3517 +
3518 + /*
3519 + * MAGIC with NFIFO
3520 + * Read associated data from the input and send them to class1 and
3521 + * class2 alignment blocks. From class1 send data to output fifo and
3522 + * then write it to memory since we don't need to encrypt AD.
3523 + */
3524 + nfifo = NFIFOENTRY_DEST_BOTH | NFIFOENTRY_FC1 | NFIFOENTRY_FC2 |
3525 + NFIFOENTRY_DTYPE_POLY | NFIFOENTRY_BND;
3526 + append_load_imm_u32(desc, nfifo, LDST_CLASS_IND_CCB |
3527 + LDST_SRCDST_WORD_INFO_FIFO_SM | LDLEN_MATH3);
3528 +
3529 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
3530 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
3531 + append_seq_fifo_load(desc, 0, FIFOLD_TYPE_NOINFOFIFO |
3532 + FIFOLD_CLASS_CLASS1 | LDST_VLF);
3533 + append_move_len(desc, MOVE_AUX_LS | MOVE_SRC_AUX_ABLK |
3534 + MOVE_DEST_OUTFIFO | MOVELEN_MRSEL_MATH3);
3535 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
3536 +
3537 + /* IPsec - copy IV at the output */
3538 + if (is_ipsec)
3539 + append_seq_fifo_store(desc, ivsize, FIFOST_TYPE_METADATA |
3540 + 0x2 << 25);
3541 +
3542 + wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL |
3543 + JUMP_COND_NOP | JUMP_TEST_ALL);
3544 + set_jump_tgt_here(desc, wait_cmd);
3545 +
3546 + if (encap) {
3547 + /* Read and write cryptlen bytes */
3548 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3549 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0,
3550 + CAAM_CMD_SZ);
3551 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
3552 +
3553 + /* Write ICV */
3554 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
3555 + LDST_SRCDST_BYTE_CONTEXT);
3556 + } else {
3557 + /* Read and write cryptlen bytes */
3558 + append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
3559 + append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0,
3560 + CAAM_CMD_SZ);
3561 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
3562 +
3563 + /* Load ICV for verification */
3564 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
3565 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
3566 + }
3567 +
3568 + print_hex_dump_debug("chachapoly shdesc@" __stringify(__LINE__)": ",
3569 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3570 + 1);
3571 +}
3572 +EXPORT_SYMBOL(cnstr_shdsc_chachapoly);
3573 +
3574 /*
3575 * For ablkcipher encrypt and decrypt, read from req->src and
3576 * write to req->dst
3577 @@ -1062,7 +1784,8 @@ static inline void ablkcipher_append_src
3578 * @desc: pointer to buffer used for descriptor construction
3579 * @cdata: pointer to block cipher transform definitions
3580 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
3581 - * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
3582 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
3583 + * - OP_ALG_ALGSEL_CHACHA20
3584 * @ivsize: initialization vector size
3585 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
3586 * @ctx1_iv_off: IV offset in CONTEXT1 register
3587 @@ -1084,7 +1807,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 *
3588
3589 /* Load nonce into CONTEXT1 reg */
3590 if (is_rfc3686) {
3591 - u8 *nonce = cdata->key_virt + cdata->keylen;
3592 + const u8 *nonce = cdata->key_virt + cdata->keylen;
3593
3594 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
3595 LDST_CLASS_IND_CCB |
3596 @@ -1127,7 +1850,8 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_enc
3597 * @desc: pointer to buffer used for descriptor construction
3598 * @cdata: pointer to block cipher transform definitions
3599 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
3600 - * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
3601 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
3602 + * - OP_ALG_ALGSEL_CHACHA20
3603 * @ivsize: initialization vector size
3604 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
3605 * @ctx1_iv_off: IV offset in CONTEXT1 register
3606 @@ -1149,7 +1873,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 *
3607
3608 /* Load nonce into CONTEXT1 reg */
3609 if (is_rfc3686) {
3610 - u8 *nonce = cdata->key_virt + cdata->keylen;
3611 + const u8 *nonce = cdata->key_virt + cdata->keylen;
3612
3613 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
3614 LDST_CLASS_IND_CCB |
3615 @@ -1218,7 +1942,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32
3616
3617 /* Load Nonce into CONTEXT1 reg */
3618 if (is_rfc3686) {
3619 - u8 *nonce = cdata->key_virt + cdata->keylen;
3620 + const u8 *nonce = cdata->key_virt + cdata->keylen;
3621
3622 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
3623 LDST_CLASS_IND_CCB |
3624 --- a/drivers/crypto/caam/caamalg_desc.h
3625 +++ b/drivers/crypto/caam/caamalg_desc.h
3626 @@ -17,6 +17,9 @@
3627 #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
3628 #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
3629
3630 +#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
3631 +#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
3632 +
3633 /* Note: Nonce is counted in cdata.keylen */
3634 #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
3635
3636 @@ -27,14 +30,20 @@
3637 #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
3638 #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
3639 #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
3640 +#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
3641 +#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
3642
3643 #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
3644 #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
3645 #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
3646 +#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
3647 +#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
3648
3649 #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
3650 #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
3651 #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
3652 +#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
3653 +#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
3654
3655 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
3656 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
3657 @@ -43,46 +52,67 @@
3658 15 * CAAM_CMD_SZ)
3659
3660 void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
3661 - unsigned int icvsize);
3662 + unsigned int icvsize, int era);
3663
3664 void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
3665 - unsigned int icvsize);
3666 + unsigned int icvsize, int era);
3667
3668 void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
3669 struct alginfo *adata, unsigned int ivsize,
3670 unsigned int icvsize, const bool is_rfc3686,
3671 u32 *nonce, const u32 ctx1_iv_off,
3672 - const bool is_qi);
3673 + const bool is_qi, int era);
3674
3675 void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
3676 struct alginfo *adata, unsigned int ivsize,
3677 unsigned int icvsize, const bool geniv,
3678 const bool is_rfc3686, u32 *nonce,
3679 - const u32 ctx1_iv_off, const bool is_qi);
3680 + const u32 ctx1_iv_off, const bool is_qi, int era);
3681
3682 void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
3683 struct alginfo *adata, unsigned int ivsize,
3684 unsigned int icvsize, const bool is_rfc3686,
3685 u32 *nonce, const u32 ctx1_iv_off,
3686 - const bool is_qi);
3687 + const bool is_qi, int era);
3688 +
3689 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
3690 + struct alginfo *adata, unsigned int assoclen,
3691 + unsigned int ivsize, unsigned int authsize,
3692 + unsigned int blocksize, int era);
3693 +
3694 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
3695 + struct alginfo *adata, unsigned int assoclen,
3696 + unsigned int ivsize, unsigned int authsize,
3697 + unsigned int blocksize, int era);
3698
3699 void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
3700 - unsigned int icvsize);
3701 + unsigned int ivsize, unsigned int icvsize,
3702 + const bool is_qi);
3703
3704 void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
3705 - unsigned int icvsize);
3706 + unsigned int ivsize, unsigned int icvsize,
3707 + const bool is_qi);
3708
3709 void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
3710 - unsigned int icvsize);
3711 + unsigned int ivsize, unsigned int icvsize,
3712 + const bool is_qi);
3713
3714 void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
3715 - unsigned int icvsize);
3716 + unsigned int ivsize, unsigned int icvsize,
3717 + const bool is_qi);
3718
3719 void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
3720 - unsigned int icvsize);
3721 + unsigned int ivsize, unsigned int icvsize,
3722 + const bool is_qi);
3723
3724 void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
3725 - unsigned int icvsize);
3726 + unsigned int ivsize, unsigned int icvsize,
3727 + const bool is_qi);
3728 +
3729 +void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
3730 + struct alginfo *adata, unsigned int ivsize,
3731 + unsigned int icvsize, const bool encap,
3732 + const bool is_qi);
3733
3734 void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
3735 unsigned int ivsize, const bool is_rfc3686,
3736 --- a/drivers/crypto/caam/caamalg_qi.c
3737 +++ b/drivers/crypto/caam/caamalg_qi.c
3738 @@ -7,7 +7,7 @@
3739 */
3740
3741 #include "compat.h"
3742 -
3743 +#include "ctrl.h"
3744 #include "regs.h"
3745 #include "intern.h"
3746 #include "desc_constr.h"
3747 @@ -53,6 +53,7 @@ struct caam_ctx {
3748 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
3749 u8 key[CAAM_MAX_KEY_SIZE];
3750 dma_addr_t key_dma;
3751 + enum dma_data_direction dir;
3752 struct alginfo adata;
3753 struct alginfo cdata;
3754 unsigned int authsize;
3755 @@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypt
3756 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
3757 OP_ALG_AAI_CTR_MOD128);
3758 const bool is_rfc3686 = alg->caam.rfc3686;
3759 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
3760
3761 if (!ctx->cdata.keylen || !ctx->authsize)
3762 return 0;
3763 @@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypt
3764
3765 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3766 ivsize, ctx->authsize, is_rfc3686, nonce,
3767 - ctx1_iv_off, true);
3768 + ctx1_iv_off, true, ctrlpriv->era);
3769
3770 skip_enc:
3771 /* aead_decrypt shared descriptor */
3772 @@ -149,7 +151,8 @@ skip_enc:
3773
3774 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
3775 ivsize, ctx->authsize, alg->caam.geniv,
3776 - is_rfc3686, nonce, ctx1_iv_off, true);
3777 + is_rfc3686, nonce, ctx1_iv_off, true,
3778 + ctrlpriv->era);
3779
3780 if (!alg->caam.geniv)
3781 goto skip_givenc;
3782 @@ -176,7 +179,7 @@ skip_enc:
3783
3784 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3785 ivsize, ctx->authsize, is_rfc3686, nonce,
3786 - ctx1_iv_off, true);
3787 + ctx1_iv_off, true, ctrlpriv->era);
3788
3789 skip_givenc:
3790 return 0;
3791 @@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aea
3792 {
3793 struct caam_ctx *ctx = crypto_aead_ctx(aead);
3794 struct device *jrdev = ctx->jrdev;
3795 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
3796 struct crypto_authenc_keys keys;
3797 int ret = 0;
3798
3799 @@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aea
3800 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3801 #endif
3802
3803 + /*
3804 + * If DKP is supported, use it in the shared descriptor to generate
3805 + * the split key.
3806 + */
3807 + if (ctrlpriv->era >= 6) {
3808 + ctx->adata.keylen = keys.authkeylen;
3809 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3810 + OP_ALG_ALGSEL_MASK);
3811 +
3812 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
3813 + goto badkey;
3814 +
3815 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
3816 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
3817 + keys.enckeylen);
3818 + dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
3819 + ctx->adata.keylen_pad +
3820 + keys.enckeylen, ctx->dir);
3821 + goto skip_split_key;
3822 + }
3823 +
3824 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
3825 keys.authkeylen, CAAM_MAX_KEY_SIZE -
3826 keys.enckeylen);
3827 @@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aea
3828 /* postpend encryption key to auth split key */
3829 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
3830 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
3831 - keys.enckeylen, DMA_TO_DEVICE);
3832 + keys.enckeylen, ctx->dir);
3833 #ifdef DEBUG
3834 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
3835 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
3836 ctx->adata.keylen_pad + keys.enckeylen, 1);
3837 #endif
3838
3839 +skip_split_key:
3840 ctx->cdata.keylen = keys.enckeylen;
3841
3842 ret = aead_set_sh_desc(aead);
3843 @@ -258,6 +284,468 @@ badkey:
3844 return -EINVAL;
3845 }
3846
3847 +static int tls_set_sh_desc(struct crypto_aead *tls)
3848 +{
3849 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3850 + unsigned int ivsize = crypto_aead_ivsize(tls);
3851 + unsigned int blocksize = crypto_aead_blocksize(tls);
3852 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
3853 + unsigned int data_len[2];
3854 + u32 inl_mask;
3855 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
3856 +
3857 + if (!ctx->cdata.keylen || !ctx->authsize)
3858 + return 0;
3859 +
3860 + /*
3861 + * TLS 1.0 encrypt shared descriptor
3862 + * Job Descriptor and Shared Descriptor
3863 + * must fit into the 64-word Descriptor h/w Buffer
3864 + */
3865 + data_len[0] = ctx->adata.keylen_pad;
3866 + data_len[1] = ctx->cdata.keylen;
3867 +
3868 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
3869 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
3870 + return -EINVAL;
3871 +
3872 + if (inl_mask & 1)
3873 + ctx->adata.key_virt = ctx->key;
3874 + else
3875 + ctx->adata.key_dma = ctx->key_dma;
3876 +
3877 + if (inl_mask & 2)
3878 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
3879 + else
3880 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
3881 +
3882 + ctx->adata.key_inline = !!(inl_mask & 1);
3883 + ctx->cdata.key_inline = !!(inl_mask & 2);
3884 +
3885 + cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3886 + assoclen, ivsize, ctx->authsize, blocksize,
3887 + ctrlpriv->era);
3888 +
3889 + /*
3890 + * TLS 1.0 decrypt shared descriptor
3891 + * Keys do not fit inline, regardless of algorithms used
3892 + */
3893 + ctx->adata.key_inline = false;
3894 + ctx->adata.key_dma = ctx->key_dma;
3895 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
3896 +
3897 + cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
3898 + assoclen, ivsize, ctx->authsize, blocksize,
3899 + ctrlpriv->era);
3900 +
3901 + return 0;
3902 +}
3903 +
3904 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
3905 +{
3906 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3907 +
3908 + ctx->authsize = authsize;
3909 + tls_set_sh_desc(tls);
3910 +
3911 + return 0;
3912 +}
3913 +
3914 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
3915 + unsigned int keylen)
3916 +{
3917 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3918 + struct device *jrdev = ctx->jrdev;
3919 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
3920 + struct crypto_authenc_keys keys;
3921 + int ret = 0;
3922 +
3923 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3924 + goto badkey;
3925 +
3926 +#ifdef DEBUG
3927 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
3928 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
3929 + keys.authkeylen);
3930 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
3931 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3932 +#endif
3933 +
3934 + /*
3935 + * If DKP is supported, use it in the shared descriptor to generate
3936 + * the split key.
3937 + */
3938 + if (ctrlpriv->era >= 6) {
3939 + ctx->adata.keylen = keys.authkeylen;
3940 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3941 + OP_ALG_ALGSEL_MASK);
3942 +
3943 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
3944 + goto badkey;
3945 +
3946 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
3947 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
3948 + keys.enckeylen);
3949 + dma_sync_single_for_device(jrdev, ctx->key_dma,
3950 + ctx->adata.keylen_pad +
3951 + keys.enckeylen, ctx->dir);
3952 + goto skip_split_key;
3953 + }
3954 +
3955 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
3956 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
3957 + keys.enckeylen);
3958 + if (ret)
3959 + goto badkey;
3960 +
3961 + /* postpend encryption key to auth split key */
3962 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
3963 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
3964 + keys.enckeylen, ctx->dir);
3965 +
3966 +#ifdef DEBUG
3967 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
3968 + ctx->adata.keylen, ctx->adata.keylen_pad);
3969 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
3970 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
3971 + ctx->adata.keylen_pad + keys.enckeylen, 1);
3972 +#endif
3973 +
3974 +skip_split_key:
3975 + ctx->cdata.keylen = keys.enckeylen;
3976 +
3977 + ret = tls_set_sh_desc(tls);
3978 + if (ret)
3979 + goto badkey;
3980 +
3981 + /* Now update the driver contexts with the new shared descriptor */
3982 + if (ctx->drv_ctx[ENCRYPT]) {
3983 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
3984 + ctx->sh_desc_enc);
3985 + if (ret) {
3986 + dev_err(jrdev, "driver enc context update failed\n");
3987 + goto badkey;
3988 + }
3989 + }
3990 +
3991 + if (ctx->drv_ctx[DECRYPT]) {
3992 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
3993 + ctx->sh_desc_dec);
3994 + if (ret) {
3995 + dev_err(jrdev, "driver dec context update failed\n");
3996 + goto badkey;
3997 + }
3998 + }
3999 +
4000 + return ret;
4001 +badkey:
4002 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
4003 + return -EINVAL;
4004 +}
4005 +
4006 +static int gcm_set_sh_desc(struct crypto_aead *aead)
4007 +{
4008 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4009 + unsigned int ivsize = crypto_aead_ivsize(aead);
4010 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
4011 + ctx->cdata.keylen;
4012 +
4013 + if (!ctx->cdata.keylen || !ctx->authsize)
4014 + return 0;
4015 +
4016 + /*
4017 + * Job Descriptor and Shared Descriptor
4018 + * must fit into the 64-word Descriptor h/w Buffer
4019 + */
4020 + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
4021 + ctx->cdata.key_inline = true;
4022 + ctx->cdata.key_virt = ctx->key;
4023 + } else {
4024 + ctx->cdata.key_inline = false;
4025 + ctx->cdata.key_dma = ctx->key_dma;
4026 + }
4027 +
4028 + cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
4029 + ctx->authsize, true);
4030 +
4031 + /*
4032 + * Job Descriptor and Shared Descriptor
4033 + * must fit into the 64-word Descriptor h/w Buffer
4034 + */
4035 + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
4036 + ctx->cdata.key_inline = true;
4037 + ctx->cdata.key_virt = ctx->key;
4038 + } else {
4039 + ctx->cdata.key_inline = false;
4040 + ctx->cdata.key_dma = ctx->key_dma;
4041 + }
4042 +
4043 + cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
4044 + ctx->authsize, true);
4045 +
4046 + return 0;
4047 +}
4048 +
4049 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
4050 +{
4051 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
4052 +
4053 + ctx->authsize = authsize;
4054 + gcm_set_sh_desc(authenc);
4055 +
4056 + return 0;
4057 +}
4058 +
4059 +static int gcm_setkey(struct crypto_aead *aead,
4060 + const u8 *key, unsigned int keylen)
4061 +{
4062 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4063 + struct device *jrdev = ctx->jrdev;
4064 + int ret;
4065 +
4066 +#ifdef DEBUG
4067 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
4068 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
4069 +#endif
4070 +
4071 + memcpy(ctx->key, key, keylen);
4072 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
4073 + ctx->cdata.keylen = keylen;
4074 +
4075 + ret = gcm_set_sh_desc(aead);
4076 + if (ret)
4077 + return ret;
4078 +
4079 + /* Now update the driver contexts with the new shared descriptor */
4080 + if (ctx->drv_ctx[ENCRYPT]) {
4081 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
4082 + ctx->sh_desc_enc);
4083 + if (ret) {
4084 + dev_err(jrdev, "driver enc context update failed\n");
4085 + return ret;
4086 + }
4087 + }
4088 +
4089 + if (ctx->drv_ctx[DECRYPT]) {
4090 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
4091 + ctx->sh_desc_dec);
4092 + if (ret) {
4093 + dev_err(jrdev, "driver dec context update failed\n");
4094 + return ret;
4095 + }
4096 + }
4097 +
4098 + return 0;
4099 +}
4100 +
4101 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
4102 +{
4103 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4104 + unsigned int ivsize = crypto_aead_ivsize(aead);
4105 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
4106 + ctx->cdata.keylen;
4107 +
4108 + if (!ctx->cdata.keylen || !ctx->authsize)
4109 + return 0;
4110 +
4111 + ctx->cdata.key_virt = ctx->key;
4112 +
4113 + /*
4114 + * Job Descriptor and Shared Descriptor
4115 + * must fit into the 64-word Descriptor h/w Buffer
4116 + */
4117 + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
4118 + ctx->cdata.key_inline = true;
4119 + } else {
4120 + ctx->cdata.key_inline = false;
4121 + ctx->cdata.key_dma = ctx->key_dma;
4122 + }
4123 +
4124 + cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
4125 + ctx->authsize, true);
4126 +
4127 + /*
4128 + * Job Descriptor and Shared Descriptor
4129 + * must fit into the 64-word Descriptor h/w Buffer
4130 + */
4131 + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
4132 + ctx->cdata.key_inline = true;
4133 + } else {
4134 + ctx->cdata.key_inline = false;
4135 + ctx->cdata.key_dma = ctx->key_dma;
4136 + }
4137 +
4138 + cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
4139 + ctx->authsize, true);
4140 +
4141 + return 0;
4142 +}
4143 +
4144 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
4145 + unsigned int authsize)
4146 +{
4147 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
4148 +
4149 + ctx->authsize = authsize;
4150 + rfc4106_set_sh_desc(authenc);
4151 +
4152 + return 0;
4153 +}
4154 +
4155 +static int rfc4106_setkey(struct crypto_aead *aead,
4156 + const u8 *key, unsigned int keylen)
4157 +{
4158 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4159 + struct device *jrdev = ctx->jrdev;
4160 + int ret;
4161 +
4162 + if (keylen < 4)
4163 + return -EINVAL;
4164 +
4165 +#ifdef DEBUG
4166 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
4167 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
4168 +#endif
4169 +
4170 + memcpy(ctx->key, key, keylen);
4171 + /*
4172 + * The last four bytes of the key material are used as the salt value
4173 + * in the nonce. Update the AES key length.
4174 + */
4175 + ctx->cdata.keylen = keylen - 4;
4176 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
4177 + ctx->dir);
4178 +
4179 + ret = rfc4106_set_sh_desc(aead);
4180 + if (ret)
4181 + return ret;
4182 +
4183 + /* Now update the driver contexts with the new shared descriptor */
4184 + if (ctx->drv_ctx[ENCRYPT]) {
4185 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
4186 + ctx->sh_desc_enc);
4187 + if (ret) {
4188 + dev_err(jrdev, "driver enc context update failed\n");
4189 + return ret;
4190 + }
4191 + }
4192 +
4193 + if (ctx->drv_ctx[DECRYPT]) {
4194 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
4195 + ctx->sh_desc_dec);
4196 + if (ret) {
4197 + dev_err(jrdev, "driver dec context update failed\n");
4198 + return ret;
4199 + }
4200 + }
4201 +
4202 + return 0;
4203 +}
4204 +
4205 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
4206 +{
4207 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4208 + unsigned int ivsize = crypto_aead_ivsize(aead);
4209 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
4210 + ctx->cdata.keylen;
4211 +
4212 + if (!ctx->cdata.keylen || !ctx->authsize)
4213 + return 0;
4214 +
4215 + ctx->cdata.key_virt = ctx->key;
4216 +
4217 + /*
4218 + * Job Descriptor and Shared Descriptor
4219 + * must fit into the 64-word Descriptor h/w Buffer
4220 + */
4221 + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
4222 + ctx->cdata.key_inline = true;
4223 + } else {
4224 + ctx->cdata.key_inline = false;
4225 + ctx->cdata.key_dma = ctx->key_dma;
4226 + }
4227 +
4228 + cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
4229 + ctx->authsize, true);
4230 +
4231 + /*
4232 + * Job Descriptor and Shared Descriptor
4233 + * must fit into the 64-word Descriptor h/w Buffer
4234 + */
4235 + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
4236 + ctx->cdata.key_inline = true;
4237 + } else {
4238 + ctx->cdata.key_inline = false;
4239 + ctx->cdata.key_dma = ctx->key_dma;
4240 + }
4241 +
4242 + cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
4243 + ctx->authsize, true);
4244 +
4245 + return 0;
4246 +}
4247 +
4248 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
4249 + unsigned int authsize)
4250 +{
4251 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
4252 +
4253 + ctx->authsize = authsize;
4254 + rfc4543_set_sh_desc(authenc);
4255 +
4256 + return 0;
4257 +}
4258 +
4259 +static int rfc4543_setkey(struct crypto_aead *aead,
4260 + const u8 *key, unsigned int keylen)
4261 +{
4262 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4263 + struct device *jrdev = ctx->jrdev;
4264 + int ret;
4265 +
4266 + if (keylen < 4)
4267 + return -EINVAL;
4268 +
4269 +#ifdef DEBUG
4270 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
4271 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
4272 +#endif
4273 +
4274 + memcpy(ctx->key, key, keylen);
4275 + /*
4276 + * The last four bytes of the key material are used as the salt value
4277 + * in the nonce. Update the AES key length.
4278 + */
4279 + ctx->cdata.keylen = keylen - 4;
4280 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
4281 + ctx->dir);
4282 +
4283 + ret = rfc4543_set_sh_desc(aead);
4284 + if (ret)
4285 + return ret;
4286 +
4287 + /* Now update the driver contexts with the new shared descriptor */
4288 + if (ctx->drv_ctx[ENCRYPT]) {
4289 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
4290 + ctx->sh_desc_enc);
4291 + if (ret) {
4292 + dev_err(jrdev, "driver enc context update failed\n");
4293 + return ret;
4294 + }
4295 + }
4296 +
4297 + if (ctx->drv_ctx[DECRYPT]) {
4298 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
4299 + ctx->sh_desc_dec);
4300 + if (ret) {
4301 + dev_err(jrdev, "driver dec context update failed\n");
4302 + return ret;
4303 + }
4304 + }
4305 +
4306 + return 0;
4307 +}
4308 +
4309 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
4310 const u8 *key, unsigned int keylen)
4311 {
4312 @@ -414,6 +902,29 @@ struct aead_edesc {
4313 };
4314
4315 /*
4316 + * tls_edesc - s/w-extended tls descriptor
4317 + * @src_nents: number of segments in input scatterlist
4318 + * @dst_nents: number of segments in output scatterlist
4319 + * @iv_dma: dma address of iv for checking continuity and link table
4320 + * @qm_sg_bytes: length of dma mapped h/w link table
4321 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
4322 + * @qm_sg_dma: bus physical mapped address of h/w link table
4323 + * @drv_req: driver-specific request structure
4324 + * @sgt: the h/w link table, followed by IV
4325 + */
4326 +struct tls_edesc {
4327 + int src_nents;
4328 + int dst_nents;
4329 + dma_addr_t iv_dma;
4330 + int qm_sg_bytes;
4331 + dma_addr_t qm_sg_dma;
4332 + struct scatterlist tmp[2];
4333 + struct scatterlist *dst;
4334 + struct caam_drv_req drv_req;
4335 + struct qm_sg_entry sgt[0];
4336 +};
4337 +
4338 +/*
4339 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
4340 * @src_nents: number of segments in input scatterlist
4341 * @dst_nents: number of segments in output scatterlist
4342 @@ -508,6 +1019,19 @@ static void aead_unmap(struct device *de
4343 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
4344 }
4345
4346 +static void tls_unmap(struct device *dev,
4347 + struct tls_edesc *edesc,
4348 + struct aead_request *req)
4349 +{
4350 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
4351 + int ivsize = crypto_aead_ivsize(aead);
4352 +
4353 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
4354 + edesc->dst_nents, edesc->iv_dma, ivsize,
4355 + edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
4356 + edesc->qm_sg_bytes);
4357 +}
4358 +
4359 static void ablkcipher_unmap(struct device *dev,
4360 struct ablkcipher_edesc *edesc,
4361 struct ablkcipher_request *req)
4362 @@ -532,8 +1056,18 @@ static void aead_done(struct caam_drv_re
4363 qidev = caam_ctx->qidev;
4364
4365 if (unlikely(status)) {
4366 + u32 ssrc = status & JRSTA_SSRC_MASK;
4367 + u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
4368 +
4369 caam_jr_strstatus(qidev, status);
4370 - ecode = -EIO;
4371 + /*
4372 + * verify hw auth check passed else return -EBADMSG
4373 + */
4374 + if (ssrc == JRSTA_SSRC_CCB_ERROR &&
4375 + err_id == JRSTA_CCBERR_ERRID_ICVCHK)
4376 + ecode = -EBADMSG;
4377 + else
4378 + ecode = -EIO;
4379 }
4380
4381 edesc = container_of(drv_req, typeof(*edesc), drv_req);
4382 @@ -647,9 +1181,24 @@ static struct aead_edesc *aead_edesc_all
4383 /*
4384 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
4385 * Input is not contiguous.
4386 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
4387 + * the end of the table by allocating more S/G entries. Logic:
4388 + * if (src != dst && output S/G)
4389 + * pad output S/G, if needed
4390 + * else if (src == dst && S/G)
4391 + * overlapping S/Gs; pad one of them
4392 + * else if (input S/G) ...
4393 + * pad input S/G, if needed
4394 */
4395 - qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
4396 - (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
4397 + qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
4398 + if (mapped_dst_nents > 1)
4399 + qm_sg_ents += ALIGN(mapped_dst_nents, 4);
4400 + else if ((req->src == req->dst) && (mapped_src_nents > 1))
4401 + qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
4402 + 1 + !!ivsize + ALIGN(mapped_src_nents, 4));
4403 + else
4404 + qm_sg_ents = ALIGN(qm_sg_ents, 4);
4405 +
4406 sg_table = &edesc->sgt[0];
4407 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
4408 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
4409 @@ -785,6 +1334,260 @@ static int aead_decrypt(struct aead_requ
4410 return aead_crypt(req, false);
4411 }
4412
4413 +static int ipsec_gcm_encrypt(struct aead_request *req)
4414 +{
4415 + if (req->assoclen < 8)
4416 + return -EINVAL;
4417 +
4418 + return aead_crypt(req, true);
4419 +}
4420 +
4421 +static int ipsec_gcm_decrypt(struct aead_request *req)
4422 +{
4423 + if (req->assoclen < 8)
4424 + return -EINVAL;
4425 +
4426 + return aead_crypt(req, false);
4427 +}
4428 +
4429 +static void tls_done(struct caam_drv_req *drv_req, u32 status)
4430 +{
4431 + struct device *qidev;
4432 + struct tls_edesc *edesc;
4433 + struct aead_request *aead_req = drv_req->app_ctx;
4434 + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
4435 + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
4436 + int ecode = 0;
4437 +
4438 + qidev = caam_ctx->qidev;
4439 +
4440 + if (unlikely(status)) {
4441 + caam_jr_strstatus(qidev, status);
4442 + ecode = -EIO;
4443 + }
4444 +
4445 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
4446 + tls_unmap(qidev, edesc, aead_req);
4447 +
4448 + aead_request_complete(aead_req, ecode);
4449 + qi_cache_free(edesc);
4450 +}
4451 +
4452 +/*
4453 + * allocate and map the tls extended descriptor
4454 + */
4455 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
4456 +{
4457 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
4458 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4459 + unsigned int blocksize = crypto_aead_blocksize(aead);
4460 + unsigned int padsize, authsize;
4461 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
4462 + typeof(*alg), aead);
4463 + struct device *qidev = ctx->qidev;
4464 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4465 + GFP_KERNEL : GFP_ATOMIC;
4466 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
4467 + struct tls_edesc *edesc;
4468 + dma_addr_t qm_sg_dma, iv_dma = 0;
4469 + int ivsize = 0;
4470 + u8 *iv;
4471 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
4472 + int in_len, out_len;
4473 + struct qm_sg_entry *sg_table, *fd_sgt;
4474 + struct caam_drv_ctx *drv_ctx;
4475 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
4476 + struct scatterlist *dst;
4477 +
4478 + if (encrypt) {
4479 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
4480 + blocksize);
4481 + authsize = ctx->authsize + padsize;
4482 + } else {
4483 + authsize = ctx->authsize;
4484 + }
4485 +
4486 + drv_ctx = get_drv_ctx(ctx, op_type);
4487 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
4488 + return (struct tls_edesc *)drv_ctx;
4489 +
4490 + /* allocate space for base edesc, link tables and IV */
4491 + edesc = qi_cache_alloc(GFP_DMA | flags);
4492 + if (unlikely(!edesc)) {
4493 + dev_err(qidev, "could not allocate extended descriptor\n");
4494 + return ERR_PTR(-ENOMEM);
4495 + }
4496 +
4497 + if (likely(req->src == req->dst)) {
4498 + src_nents = sg_nents_for_len(req->src, req->assoclen +
4499 + req->cryptlen +
4500 + (encrypt ? authsize : 0));
4501 + if (unlikely(src_nents < 0)) {
4502 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
4503 + req->assoclen + req->cryptlen +
4504 + (encrypt ? authsize : 0));
4505 + qi_cache_free(edesc);
4506 + return ERR_PTR(src_nents);
4507 + }
4508 +
4509 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
4510 + DMA_BIDIRECTIONAL);
4511 + if (unlikely(!mapped_src_nents)) {
4512 + dev_err(qidev, "unable to map source\n");
4513 + qi_cache_free(edesc);
4514 + return ERR_PTR(-ENOMEM);
4515 + }
4516 + dst = req->dst;
4517 + } else {
4518 + src_nents = sg_nents_for_len(req->src, req->assoclen +
4519 + req->cryptlen);
4520 + if (unlikely(src_nents < 0)) {
4521 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
4522 + req->assoclen + req->cryptlen);
4523 + qi_cache_free(edesc);
4524 + return ERR_PTR(src_nents);
4525 + }
4526 +
4527 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
4528 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
4529 + (encrypt ? authsize : 0));
4530 + if (unlikely(dst_nents < 0)) {
4531 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
4532 + req->cryptlen +
4533 + (encrypt ? authsize : 0));
4534 + qi_cache_free(edesc);
4535 + return ERR_PTR(dst_nents);
4536 + }
4537 +
4538 + if (src_nents) {
4539 + mapped_src_nents = dma_map_sg(qidev, req->src,
4540 + src_nents, DMA_TO_DEVICE);
4541 + if (unlikely(!mapped_src_nents)) {
4542 + dev_err(qidev, "unable to map source\n");
4543 + qi_cache_free(edesc);
4544 + return ERR_PTR(-ENOMEM);
4545 + }
4546 + } else {
4547 + mapped_src_nents = 0;
4548 + }
4549 +
4550 + mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
4551 + DMA_FROM_DEVICE);
4552 + if (unlikely(!mapped_dst_nents)) {
4553 + dev_err(qidev, "unable to map destination\n");
4554 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
4555 + qi_cache_free(edesc);
4556 + return ERR_PTR(-ENOMEM);
4557 + }
4558 + }
4559 +
4560 + /*
4561 + * Create S/G table: IV, src, dst.
4562 + * Input is not contiguous.
4563 + */
4564 + qm_sg_ents = 1 + mapped_src_nents +
4565 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
4566 + sg_table = &edesc->sgt[0];
4567 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
4568 +
4569 + ivsize = crypto_aead_ivsize(aead);
4570 + iv = (u8 *)(sg_table + qm_sg_ents);
4571 + /* Make sure IV is located in a DMAable area */
4572 + memcpy(iv, req->iv, ivsize);
4573 + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
4574 + if (dma_mapping_error(qidev, iv_dma)) {
4575 + dev_err(qidev, "unable to map IV\n");
4576 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
4577 + 0, 0);
4578 + qi_cache_free(edesc);
4579 + return ERR_PTR(-ENOMEM);
4580 + }
4581 +
4582 + edesc->src_nents = src_nents;
4583 + edesc->dst_nents = dst_nents;
4584 + edesc->dst = dst;
4585 + edesc->iv_dma = iv_dma;
4586 + edesc->drv_req.app_ctx = req;
4587 + edesc->drv_req.cbk = tls_done;
4588 + edesc->drv_req.drv_ctx = drv_ctx;
4589 +
4590 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
4591 + qm_sg_index = 1;
4592 +
4593 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
4594 + qm_sg_index += mapped_src_nents;
4595 +
4596 + if (mapped_dst_nents > 1)
4597 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
4598 + qm_sg_index, 0);
4599 +
4600 + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
4601 + if (dma_mapping_error(qidev, qm_sg_dma)) {
4602 + dev_err(qidev, "unable to map S/G table\n");
4603 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
4604 + ivsize, op_type, 0, 0);
4605 + qi_cache_free(edesc);
4606 + return ERR_PTR(-ENOMEM);
4607 + }
4608 +
4609 + edesc->qm_sg_dma = qm_sg_dma;
4610 + edesc->qm_sg_bytes = qm_sg_bytes;
4611 +
4612 + out_len = req->cryptlen + (encrypt ? authsize : 0);
4613 + in_len = ivsize + req->assoclen + req->cryptlen;
4614 +
4615 + fd_sgt = &edesc->drv_req.fd_sgt[0];
4616 +
4617 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
4618 +
4619 + if (req->dst == req->src)
4620 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
4621 + (sg_nents_for_len(req->src, req->assoclen) +
4622 + 1) * sizeof(*sg_table), out_len, 0);
4623 + else if (mapped_dst_nents == 1)
4624 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
4625 + else
4626 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
4627 + qm_sg_index, out_len, 0);
4628 +
4629 + return edesc;
4630 +}
4631 +
4632 +static int tls_crypt(struct aead_request *req, bool encrypt)
4633 +{
4634 + struct tls_edesc *edesc;
4635 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
4636 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4637 + int ret;
4638 +
4639 + if (unlikely(caam_congested))
4640 + return -EAGAIN;
4641 +
4642 + edesc = tls_edesc_alloc(req, encrypt);
4643 + if (IS_ERR_OR_NULL(edesc))
4644 + return PTR_ERR(edesc);
4645 +
4646 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
4647 + if (!ret) {
4648 + ret = -EINPROGRESS;
4649 + } else {
4650 + tls_unmap(ctx->qidev, edesc, req);
4651 + qi_cache_free(edesc);
4652 + }
4653 +
4654 + return ret;
4655 +}
4656 +
4657 +static int tls_encrypt(struct aead_request *req)
4658 +{
4659 + return tls_crypt(req, true);
4660 +}
4661 +
4662 +static int tls_decrypt(struct aead_request *req)
4663 +{
4664 + return tls_crypt(req, false);
4665 +}
4666 +
4667 static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
4668 {
4669 struct ablkcipher_edesc *edesc;
4670 @@ -900,7 +1703,24 @@ static struct ablkcipher_edesc *ablkciph
4671 qm_sg_ents = 1 + mapped_src_nents;
4672 dst_sg_idx = qm_sg_ents;
4673
4674 - qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
4675 + /*
4676 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
4677 + * the end of the table by allocating more S/G entries. Logic:
4678 + * if (src != dst && output S/G)
4679 + * pad output S/G, if needed
4680 + * else if (src == dst && S/G)
4681 + * overlapping S/Gs; pad one of them
4682 + * else if (input S/G) ...
4683 + * pad input S/G, if needed
4684 + */
4685 + if (mapped_dst_nents > 1)
4686 + qm_sg_ents += ALIGN(mapped_dst_nents, 4);
4687 + else if ((req->src == req->dst) && (mapped_src_nents > 1))
4688 + qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
4689 + 1 + ALIGN(mapped_src_nents, 4));
4690 + else
4691 + qm_sg_ents = ALIGN(qm_sg_ents, 4);
4692 +
4693 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
4694 if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
4695 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
4696 @@ -1308,6 +2128,61 @@ static struct caam_alg_template driver_a
4697 };
4698
4699 static struct caam_aead_alg driver_aeads[] = {
4700 + {
4701 + .aead = {
4702 + .base = {
4703 + .cra_name = "rfc4106(gcm(aes))",
4704 + .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
4705 + .cra_blocksize = 1,
4706 + },
4707 + .setkey = rfc4106_setkey,
4708 + .setauthsize = rfc4106_setauthsize,
4709 + .encrypt = ipsec_gcm_encrypt,
4710 + .decrypt = ipsec_gcm_decrypt,
4711 + .ivsize = 8,
4712 + .maxauthsize = AES_BLOCK_SIZE,
4713 + },
4714 + .caam = {
4715 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4716 + },
4717 + },
4718 + {
4719 + .aead = {
4720 + .base = {
4721 + .cra_name = "rfc4543(gcm(aes))",
4722 + .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
4723 + .cra_blocksize = 1,
4724 + },
4725 + .setkey = rfc4543_setkey,
4726 + .setauthsize = rfc4543_setauthsize,
4727 + .encrypt = ipsec_gcm_encrypt,
4728 + .decrypt = ipsec_gcm_decrypt,
4729 + .ivsize = 8,
4730 + .maxauthsize = AES_BLOCK_SIZE,
4731 + },
4732 + .caam = {
4733 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4734 + },
4735 + },
4736 + /* Galois Counter Mode */
4737 + {
4738 + .aead = {
4739 + .base = {
4740 + .cra_name = "gcm(aes)",
4741 + .cra_driver_name = "gcm-aes-caam-qi",
4742 + .cra_blocksize = 1,
4743 + },
4744 + .setkey = gcm_setkey,
4745 + .setauthsize = gcm_setauthsize,
4746 + .encrypt = aead_encrypt,
4747 + .decrypt = aead_decrypt,
4748 + .ivsize = 12,
4749 + .maxauthsize = AES_BLOCK_SIZE,
4750 + },
4751 + .caam = {
4752 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4753 + }
4754 + },
4755 /* single-pass ipsec_esp descriptor */
4756 {
4757 .aead = {
4758 @@ -2118,6 +2993,26 @@ static struct caam_aead_alg driver_aeads
4759 .geniv = true,
4760 }
4761 },
4762 + {
4763 + .aead = {
4764 + .base = {
4765 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
4766 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
4767 + .cra_blocksize = AES_BLOCK_SIZE,
4768 + },
4769 + .setkey = tls_setkey,
4770 + .setauthsize = tls_setauthsize,
4771 + .encrypt = tls_encrypt,
4772 + .decrypt = tls_decrypt,
4773 + .ivsize = AES_BLOCK_SIZE,
4774 + .maxauthsize = SHA1_DIGEST_SIZE,
4775 + },
4776 + .caam = {
4777 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
4778 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4779 + OP_ALG_AAI_HMAC_PRECOMP,
4780 + }
4781 + }
4782 };
4783
4784 struct caam_crypto_alg {
4785 @@ -2126,9 +3021,21 @@ struct caam_crypto_alg {
4786 struct caam_alg_entry caam;
4787 };
4788
4789 -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4790 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
4791 + bool uses_dkp)
4792 {
4793 struct caam_drv_private *priv;
4794 + struct device *dev;
4795 + /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
4796 + static const u8 digest_size[] = {
4797 + MD5_DIGEST_SIZE,
4798 + SHA1_DIGEST_SIZE,
4799 + SHA224_DIGEST_SIZE,
4800 + SHA256_DIGEST_SIZE,
4801 + SHA384_DIGEST_SIZE,
4802 + SHA512_DIGEST_SIZE
4803 + };
4804 + u8 op_id;
4805
4806 /*
4807 * distribute tfms across job rings to ensure in-order
4808 @@ -2140,10 +3047,19 @@ static int caam_init_common(struct caam_
4809 return PTR_ERR(ctx->jrdev);
4810 }
4811
4812 - ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
4813 - DMA_TO_DEVICE);
4814 - if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
4815 - dev_err(ctx->jrdev, "unable to map key\n");
4816 + priv = dev_get_drvdata(ctx->jrdev->parent);
4817 + if (priv->era >= 6 && uses_dkp) {
4818 + ctx->dir = DMA_BIDIRECTIONAL;
4819 + dev = ctx->jrdev->parent;
4820 + } else {
4821 + ctx->dir = DMA_TO_DEVICE;
4822 + dev = ctx->jrdev;
4823 + }
4824 +
4825 + ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key),
4826 + ctx->dir);
4827 + if (dma_mapping_error(dev, ctx->key_dma)) {
4828 + dev_err(dev, "unable to map key\n");
4829 caam_jr_free(ctx->jrdev);
4830 return -ENOMEM;
4831 }
4832 @@ -2152,8 +3068,23 @@ static int caam_init_common(struct caam_
4833 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4834 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4835
4836 - priv = dev_get_drvdata(ctx->jrdev->parent);
4837 - ctx->qidev = priv->qidev;
4838 + if (ctx->adata.algtype) {
4839 + op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
4840 + >> OP_ALG_ALGSEL_SHIFT;
4841 + if (op_id < ARRAY_SIZE(digest_size)) {
4842 + ctx->authsize = digest_size[op_id];
4843 + } else {
4844 + dev_err(ctx->jrdev,
4845 + "incorrect op_id %d; must be less than %zu\n",
4846 + op_id, ARRAY_SIZE(digest_size));
4847 + caam_jr_free(ctx->jrdev);
4848 + return -EINVAL;
4849 + }
4850 + } else {
4851 + ctx->authsize = 0;
4852 + }
4853 +
4854 + ctx->qidev = ctx->jrdev->parent;
4855
4856 spin_lock_init(&ctx->lock);
4857 ctx->drv_ctx[ENCRYPT] = NULL;
4858 @@ -2170,7 +3101,7 @@ static int caam_cra_init(struct crypto_t
4859 crypto_alg);
4860 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
4861
4862 - return caam_init_common(ctx, &caam_alg->caam);
4863 + return caam_init_common(ctx, &caam_alg->caam, false);
4864 }
4865
4866 static int caam_aead_init(struct crypto_aead *tfm)
4867 @@ -2180,17 +3111,25 @@ static int caam_aead_init(struct crypto_
4868 aead);
4869 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
4870
4871 - return caam_init_common(ctx, &caam_alg->caam);
4872 + return caam_init_common(ctx, &caam_alg->caam,
4873 + (alg->setkey == aead_setkey) ||
4874 + (alg->setkey == tls_setkey));
4875 }
4876
4877 static void caam_exit_common(struct caam_ctx *ctx)
4878 {
4879 + struct device *dev;
4880 +
4881 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
4882 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
4883 caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
4884
4885 - dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
4886 - DMA_TO_DEVICE);
4887 + if (ctx->dir == DMA_BIDIRECTIONAL)
4888 + dev = ctx->jrdev->parent;
4889 + else
4890 + dev = ctx->jrdev;
4891 +
4892 + dma_unmap_single(dev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
4893
4894 caam_jr_free(ctx->jrdev);
4895 }
4896 @@ -2206,7 +3145,7 @@ static void caam_aead_exit(struct crypto
4897 }
4898
4899 static struct list_head alg_list;
4900 -static void __exit caam_qi_algapi_exit(void)
4901 +void caam_qi_algapi_exit(void)
4902 {
4903 struct caam_crypto_alg *t_alg, *n;
4904 int i;
4905 @@ -2282,53 +3221,48 @@ static void caam_aead_alg_init(struct ca
4906 alg->exit = caam_aead_exit;
4907 }
4908
4909 -static int __init caam_qi_algapi_init(void)
4910 +int caam_qi_algapi_init(struct device *ctrldev)
4911 {
4912 - struct device_node *dev_node;
4913 - struct platform_device *pdev;
4914 - struct device *ctrldev;
4915 - struct caam_drv_private *priv;
4916 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
4917 int i = 0, err = 0;
4918 - u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
4919 + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
4920 unsigned int md_limit = SHA512_DIGEST_SIZE;
4921 bool registered = false;
4922
4923 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4924 - if (!dev_node) {
4925 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4926 - if (!dev_node)
4927 - return -ENODEV;
4928 - }
4929 -
4930 - pdev = of_find_device_by_node(dev_node);
4931 - of_node_put(dev_node);
4932 - if (!pdev)
4933 - return -ENODEV;
4934 -
4935 - ctrldev = &pdev->dev;
4936 - priv = dev_get_drvdata(ctrldev);
4937 -
4938 - /*
4939 - * If priv is NULL, it's probably because the caam driver wasn't
4940 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4941 - */
4942 - if (!priv || !priv->qi_present)
4943 - return -ENODEV;
4944 -
4945 INIT_LIST_HEAD(&alg_list);
4946
4947 /*
4948 * Register crypto algorithms the device supports.
4949 * First, detect presence and attributes of DES, AES, and MD blocks.
4950 */
4951 - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4952 - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4953 - des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
4954 - aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
4955 - md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4956 + if (priv->era < 10) {
4957 + u32 cha_vid, cha_inst;
4958 +
4959 + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4960 + aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
4961 + md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4962 +
4963 + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4964 + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
4965 + CHA_ID_LS_DES_SHIFT;
4966 + aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
4967 + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4968 + } else {
4969 + u32 aesa, mdha;
4970 +
4971 + aesa = rd_reg32(&priv->ctrl->vreg.aesa);
4972 + mdha = rd_reg32(&priv->ctrl->vreg.mdha);
4973 +
4974 + aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
4975 + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
4976 +
4977 + des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
4978 + aes_inst = aesa & CHA_VER_NUM_MASK;
4979 + md_inst = mdha & CHA_VER_NUM_MASK;
4980 + }
4981
4982 /* If MD is present, limit digest size based on LP256 */
4983 - if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
4984 + if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
4985 md_limit = SHA256_DIGEST_SIZE;
4986
4987 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4988 @@ -2349,14 +3283,14 @@ static int __init caam_qi_algapi_init(vo
4989 t_alg = caam_alg_alloc(alg);
4990 if (IS_ERR(t_alg)) {
4991 err = PTR_ERR(t_alg);
4992 - dev_warn(priv->qidev, "%s alg allocation failed\n",
4993 + dev_warn(ctrldev, "%s alg allocation failed\n",
4994 alg->driver_name);
4995 continue;
4996 }
4997
4998 err = crypto_register_alg(&t_alg->crypto_alg);
4999 if (err) {
5000 - dev_warn(priv->qidev, "%s alg registration failed\n",
5001 + dev_warn(ctrldev, "%s alg registration failed\n",
5002 t_alg->crypto_alg.cra_driver_name);
5003 kfree(t_alg);
5004 continue;
5005 @@ -2388,8 +3322,7 @@ static int __init caam_qi_algapi_init(vo
5006 * Check support for AES algorithms not available
5007 * on LP devices.
5008 */
5009 - if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
5010 - (alg_aai == OP_ALG_AAI_GCM))
5011 + if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
5012 continue;
5013
5014 /*
5015 @@ -2414,14 +3347,7 @@ static int __init caam_qi_algapi_init(vo
5016 }
5017
5018 if (registered)
5019 - dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
5020 + dev_info(ctrldev, "algorithms registered in /proc/crypto\n");
5021
5022 return err;
5023 }
5024 -
5025 -module_init(caam_qi_algapi_init);
5026 -module_exit(caam_qi_algapi_exit);
5027 -
5028 -MODULE_LICENSE("GPL");
5029 -MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
5030 -MODULE_AUTHOR("Freescale Semiconductor");
5031 --- /dev/null
5032 +++ b/drivers/crypto/caam/caamalg_qi2.c
5033 @@ -0,0 +1,5843 @@
5034 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
5035 +/*
5036 + * Copyright 2015-2016 Freescale Semiconductor Inc.
5037 + * Copyright 2017-2018 NXP
5038 + */
5039 +
5040 +#include <linux/fsl/mc.h>
5041 +#include "compat.h"
5042 +#include "regs.h"
5043 +#include "caamalg_qi2.h"
5044 +#include "dpseci_cmd.h"
5045 +#include "desc_constr.h"
5046 +#include "error.h"
5047 +#include "sg_sw_sec4.h"
5048 +#include "sg_sw_qm2.h"
5049 +#include "key_gen.h"
5050 +#include "caamalg_desc.h"
5051 +#include "caamhash_desc.h"
5052 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
5053 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
5054 +
5055 +#define CAAM_CRA_PRIORITY 2000
5056 +
5057 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
5058 +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
5059 + SHA512_DIGEST_SIZE * 2)
5060 +
5061 +/*
5062 + * This is a a cache of buffers, from which the users of CAAM QI driver
5063 + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
5064 + * NOTE: A more elegant solution would be to have some headroom in the frames
5065 + * being processed. This can be added by the dpaa2-eth driver. This would
5066 + * pose a problem for userspace application processing which cannot
5067 + * know of this limitation. So for now, this will work.
5068 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
5069 + */
5070 +static struct kmem_cache *qi_cache;
5071 +
5072 +struct caam_alg_entry {
5073 + struct device *dev;
5074 + int class1_alg_type;
5075 + int class2_alg_type;
5076 + bool rfc3686;
5077 + bool geniv;
5078 +};
5079 +
5080 +struct caam_aead_alg {
5081 + struct aead_alg aead;
5082 + struct caam_alg_entry caam;
5083 + bool registered;
5084 +};
5085 +
5086 +struct caam_skcipher_alg {
5087 + struct skcipher_alg skcipher;
5088 + struct caam_alg_entry caam;
5089 + bool registered;
5090 +};
5091 +
5092 +/**
5093 + * caam_ctx - per-session context
5094 + * @flc: Flow Contexts array
5095 + * @key: virtual address of the key(s): [authentication key], encryption key
5096 + * @flc_dma: I/O virtual addresses of the Flow Contexts
5097 + * @key_dma: I/O virtual address of the key
5098 + * @dir: DMA direction for mapping key and Flow Contexts
5099 + * @dev: dpseci device
5100 + * @adata: authentication algorithm details
5101 + * @cdata: encryption algorithm details
5102 + * @authsize: authentication tag (a.k.a. ICV / MAC) size
5103 + */
5104 +struct caam_ctx {
5105 + struct caam_flc flc[NUM_OP];
5106 + u8 key[CAAM_MAX_KEY_SIZE];
5107 + dma_addr_t flc_dma[NUM_OP];
5108 + dma_addr_t key_dma;
5109 + enum dma_data_direction dir;
5110 + struct device *dev;
5111 + struct alginfo adata;
5112 + struct alginfo cdata;
5113 + unsigned int authsize;
5114 +};
5115 +
5116 +void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
5117 + dma_addr_t iova_addr)
5118 +{
5119 + phys_addr_t phys_addr;
5120 +
5121 + phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
5122 + iova_addr;
5123 +
5124 + return phys_to_virt(phys_addr);
5125 +}
5126 +
5127 +/*
5128 + * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
5129 + *
5130 + * Allocate data on the hotpath. Instead of using kzalloc, one can use the
5131 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
5132 + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
5133 + * hosting 16 SG entries.
5134 + *
5135 + * @flags - flags that would be used for the equivalent kmalloc(..) call
5136 + *
5137 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
5138 + */
5139 +static inline void *qi_cache_zalloc(gfp_t flags)
5140 +{
5141 + return kmem_cache_zalloc(qi_cache, flags);
5142 +}
5143 +
5144 +/*
5145 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
5146 + *
5147 + * @obj - buffer previously allocated by qi_cache_zalloc
5148 + *
5149 + * No checking is being done, the call is a passthrough call to
5150 + * kmem_cache_free(...)
5151 + */
5152 +static inline void qi_cache_free(void *obj)
5153 +{
5154 + kmem_cache_free(qi_cache, obj);
5155 +}
5156 +
5157 +static struct caam_request *to_caam_req(struct crypto_async_request *areq)
5158 +{
5159 + switch (crypto_tfm_alg_type(areq->tfm)) {
5160 + case CRYPTO_ALG_TYPE_SKCIPHER:
5161 + return skcipher_request_ctx(skcipher_request_cast(areq));
5162 + case CRYPTO_ALG_TYPE_AEAD:
5163 + return aead_request_ctx(container_of(areq, struct aead_request,
5164 + base));
5165 + case CRYPTO_ALG_TYPE_AHASH:
5166 + return ahash_request_ctx(ahash_request_cast(areq));
5167 + default:
5168 + return ERR_PTR(-EINVAL);
5169 + }
5170 +}
5171 +
5172 +static void caam_unmap(struct device *dev, struct scatterlist *src,
5173 + struct scatterlist *dst, int src_nents,
5174 + int dst_nents, dma_addr_t iv_dma, int ivsize,
5175 + dma_addr_t qm_sg_dma, int qm_sg_bytes)
5176 +{
5177 + if (dst != src) {
5178 + if (src_nents)
5179 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
5180 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
5181 + } else {
5182 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
5183 + }
5184 +
5185 + if (iv_dma)
5186 + dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
5187 +
5188 + if (qm_sg_bytes)
5189 + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
5190 +}
5191 +
5192 +static int aead_set_sh_desc(struct crypto_aead *aead)
5193 +{
5194 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
5195 + typeof(*alg), aead);
5196 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5197 + unsigned int ivsize = crypto_aead_ivsize(aead);
5198 + struct device *dev = ctx->dev;
5199 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5200 + struct caam_flc *flc;
5201 + u32 *desc;
5202 + u32 ctx1_iv_off = 0;
5203 + u32 *nonce = NULL;
5204 + unsigned int data_len[2];
5205 + u32 inl_mask;
5206 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
5207 + OP_ALG_AAI_CTR_MOD128);
5208 + const bool is_rfc3686 = alg->caam.rfc3686;
5209 +
5210 + if (!ctx->cdata.keylen || !ctx->authsize)
5211 + return 0;
5212 +
5213 + /*
5214 + * AES-CTR needs to load IV in CONTEXT1 reg
5215 + * at an offset of 128bits (16bytes)
5216 + * CONTEXT1[255:128] = IV
5217 + */
5218 + if (ctr_mode)
5219 + ctx1_iv_off = 16;
5220 +
5221 + /*
5222 + * RFC3686 specific:
5223 + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
5224 + */
5225 + if (is_rfc3686) {
5226 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
5227 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
5228 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
5229 + }
5230 +
5231 + data_len[0] = ctx->adata.keylen_pad;
5232 + data_len[1] = ctx->cdata.keylen;
5233 +
5234 + /* aead_encrypt shared descriptor */
5235 + if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
5236 + DESC_QI_AEAD_ENC_LEN) +
5237 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
5238 + DESC_JOB_IO_LEN, data_len, &inl_mask,
5239 + ARRAY_SIZE(data_len)) < 0)
5240 + return -EINVAL;
5241 +
5242 + if (inl_mask & 1)
5243 + ctx->adata.key_virt = ctx->key;
5244 + else
5245 + ctx->adata.key_dma = ctx->key_dma;
5246 +
5247 + if (inl_mask & 2)
5248 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
5249 + else
5250 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
5251 +
5252 + ctx->adata.key_inline = !!(inl_mask & 1);
5253 + ctx->cdata.key_inline = !!(inl_mask & 2);
5254 +
5255 + flc = &ctx->flc[ENCRYPT];
5256 + desc = flc->sh_desc;
5257 +
5258 + if (alg->caam.geniv)
5259 + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
5260 + ivsize, ctx->authsize, is_rfc3686,
5261 + nonce, ctx1_iv_off, true,
5262 + priv->sec_attr.era);
5263 + else
5264 + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
5265 + ivsize, ctx->authsize, is_rfc3686, nonce,
5266 + ctx1_iv_off, true, priv->sec_attr.era);
5267 +
5268 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5269 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5270 + sizeof(flc->flc) + desc_bytes(desc),
5271 + ctx->dir);
5272 +
5273 + /* aead_decrypt shared descriptor */
5274 + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
5275 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
5276 + DESC_JOB_IO_LEN, data_len, &inl_mask,
5277 + ARRAY_SIZE(data_len)) < 0)
5278 + return -EINVAL;
5279 +
5280 + if (inl_mask & 1)
5281 + ctx->adata.key_virt = ctx->key;
5282 + else
5283 + ctx->adata.key_dma = ctx->key_dma;
5284 +
5285 + if (inl_mask & 2)
5286 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
5287 + else
5288 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
5289 +
5290 + ctx->adata.key_inline = !!(inl_mask & 1);
5291 + ctx->cdata.key_inline = !!(inl_mask & 2);
5292 +
5293 + flc = &ctx->flc[DECRYPT];
5294 + desc = flc->sh_desc;
5295 + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
5296 + ivsize, ctx->authsize, alg->caam.geniv,
5297 + is_rfc3686, nonce, ctx1_iv_off, true,
5298 + priv->sec_attr.era);
5299 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5300 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
5301 + sizeof(flc->flc) + desc_bytes(desc),
5302 + ctx->dir);
5303 +
5304 + return 0;
5305 +}
5306 +
5307 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
5308 +{
5309 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
5310 +
5311 + ctx->authsize = authsize;
5312 + aead_set_sh_desc(authenc);
5313 +
5314 + return 0;
5315 +}
5316 +
5317 +struct split_key_sh_result {
5318 + struct completion completion;
5319 + int err;
5320 + struct device *dev;
5321 +};
5322 +
5323 +static void split_key_sh_done(void *cbk_ctx, u32 err)
5324 +{
5325 + struct split_key_sh_result *res = cbk_ctx;
5326 +
5327 +#ifdef DEBUG
5328 + dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
5329 +#endif
5330 +
5331 + if (err)
5332 + caam_qi2_strstatus(res->dev, err);
5333 +
5334 + res->err = err;
5335 + complete(&res->completion);
5336 +}
5337 +
5338 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
5339 + unsigned int keylen)
5340 +{
5341 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5342 + struct device *dev = ctx->dev;
5343 + struct crypto_authenc_keys keys;
5344 +
5345 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
5346 + goto badkey;
5347 +
5348 +#ifdef DEBUG
5349 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
5350 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
5351 + keys.authkeylen);
5352 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
5353 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
5354 +#endif
5355 +
5356 + ctx->adata.keylen = keys.authkeylen;
5357 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
5358 + OP_ALG_ALGSEL_MASK);
5359 +
5360 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
5361 + goto badkey;
5362 +
5363 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
5364 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
5365 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
5366 + keys.enckeylen, ctx->dir);
5367 +#ifdef DEBUG
5368 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
5369 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
5370 + ctx->adata.keylen_pad + keys.enckeylen, 1);
5371 +#endif
5372 +
5373 + ctx->cdata.keylen = keys.enckeylen;
5374 +
5375 + return aead_set_sh_desc(aead);
5376 +badkey:
5377 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
5378 + return -EINVAL;
5379 +}
5380 +
5381 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
5382 + bool encrypt)
5383 +{
5384 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
5385 + struct caam_request *req_ctx = aead_request_ctx(req);
5386 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
5387 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
5388 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5389 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
5390 + typeof(*alg), aead);
5391 + struct device *dev = ctx->dev;
5392 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
5393 + GFP_KERNEL : GFP_ATOMIC;
5394 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
5395 + struct aead_edesc *edesc;
5396 + dma_addr_t qm_sg_dma, iv_dma = 0;
5397 + int ivsize = 0;
5398 + unsigned int authsize = ctx->authsize;
5399 + int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
5400 + int in_len, out_len;
5401 + struct dpaa2_sg_entry *sg_table;
5402 +
5403 + /* allocate space for base edesc, link tables and IV */
5404 + edesc = qi_cache_zalloc(GFP_DMA | flags);
5405 + if (unlikely(!edesc)) {
5406 + dev_err(dev, "could not allocate extended descriptor\n");
5407 + return ERR_PTR(-ENOMEM);
5408 + }
5409 +
5410 + if (unlikely(req->dst != req->src)) {
5411 + src_nents = sg_nents_for_len(req->src, req->assoclen +
5412 + req->cryptlen);
5413 + if (unlikely(src_nents < 0)) {
5414 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
5415 + req->assoclen + req->cryptlen);
5416 + qi_cache_free(edesc);
5417 + return ERR_PTR(src_nents);
5418 + }
5419 +
5420 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
5421 + req->cryptlen +
5422 + (encrypt ? authsize :
5423 + (-authsize)));
5424 + if (unlikely(dst_nents < 0)) {
5425 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
5426 + req->assoclen + req->cryptlen +
5427 + (encrypt ? authsize : (-authsize)));
5428 + qi_cache_free(edesc);
5429 + return ERR_PTR(dst_nents);
5430 + }
5431 +
5432 + if (src_nents) {
5433 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
5434 + DMA_TO_DEVICE);
5435 + if (unlikely(!mapped_src_nents)) {
5436 + dev_err(dev, "unable to map source\n");
5437 + qi_cache_free(edesc);
5438 + return ERR_PTR(-ENOMEM);
5439 + }
5440 + } else {
5441 + mapped_src_nents = 0;
5442 + }
5443 +
5444 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
5445 + DMA_FROM_DEVICE);
5446 + if (unlikely(!mapped_dst_nents)) {
5447 + dev_err(dev, "unable to map destination\n");
5448 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
5449 + qi_cache_free(edesc);
5450 + return ERR_PTR(-ENOMEM);
5451 + }
5452 + } else {
5453 + src_nents = sg_nents_for_len(req->src, req->assoclen +
5454 + req->cryptlen +
5455 + (encrypt ? authsize : 0));
5456 + if (unlikely(src_nents < 0)) {
5457 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
5458 + req->assoclen + req->cryptlen +
5459 + (encrypt ? authsize : 0));
5460 + qi_cache_free(edesc);
5461 + return ERR_PTR(src_nents);
5462 + }
5463 +
5464 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
5465 + DMA_BIDIRECTIONAL);
5466 + if (unlikely(!mapped_src_nents)) {
5467 + dev_err(dev, "unable to map source\n");
5468 + qi_cache_free(edesc);
5469 + return ERR_PTR(-ENOMEM);
5470 + }
5471 + }
5472 +
5473 + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
5474 + ivsize = crypto_aead_ivsize(aead);
5475 +
5476 + /*
5477 + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
5478 + * Input is not contiguous.
5479 + */
5480 + qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
5481 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
5482 + sg_table = &edesc->sgt[0];
5483 + qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
5484 + if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
5485 + CAAM_QI_MEMCACHE_SIZE)) {
5486 + dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
5487 + qm_sg_nents, ivsize);
5488 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
5489 + 0, 0, 0);
5490 + qi_cache_free(edesc);
5491 + return ERR_PTR(-ENOMEM);
5492 + }
5493 +
5494 + if (ivsize) {
5495 + u8 *iv = (u8 *)(sg_table + qm_sg_nents);
5496 +
5497 + /* Make sure IV is located in a DMAable area */
5498 + memcpy(iv, req->iv, ivsize);
5499 +
5500 + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
5501 + if (dma_mapping_error(dev, iv_dma)) {
5502 + dev_err(dev, "unable to map IV\n");
5503 + caam_unmap(dev, req->src, req->dst, src_nents,
5504 + dst_nents, 0, 0, 0, 0);
5505 + qi_cache_free(edesc);
5506 + return ERR_PTR(-ENOMEM);
5507 + }
5508 + }
5509 +
5510 + edesc->src_nents = src_nents;
5511 + edesc->dst_nents = dst_nents;
5512 + edesc->iv_dma = iv_dma;
5513 +
5514 + if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
5515 + OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
5516 + /*
5517 + * The associated data comes already with the IV but we need
5518 + * to skip it when we authenticate or encrypt...
5519 + */
5520 + edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
5521 + else
5522 + edesc->assoclen = cpu_to_caam32(req->assoclen);
5523 + edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
5524 + DMA_TO_DEVICE);
5525 + if (dma_mapping_error(dev, edesc->assoclen_dma)) {
5526 + dev_err(dev, "unable to map assoclen\n");
5527 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
5528 + iv_dma, ivsize, 0, 0);
5529 + qi_cache_free(edesc);
5530 + return ERR_PTR(-ENOMEM);
5531 + }
5532 +
5533 + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
5534 + qm_sg_index++;
5535 + if (ivsize) {
5536 + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
5537 + qm_sg_index++;
5538 + }
5539 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
5540 + qm_sg_index += mapped_src_nents;
5541 +
5542 + if (mapped_dst_nents > 1)
5543 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
5544 + qm_sg_index, 0);
5545 +
5546 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
5547 + if (dma_mapping_error(dev, qm_sg_dma)) {
5548 + dev_err(dev, "unable to map S/G table\n");
5549 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
5550 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
5551 + iv_dma, ivsize, 0, 0);
5552 + qi_cache_free(edesc);
5553 + return ERR_PTR(-ENOMEM);
5554 + }
5555 +
5556 + edesc->qm_sg_dma = qm_sg_dma;
5557 + edesc->qm_sg_bytes = qm_sg_bytes;
5558 +
5559 + out_len = req->assoclen + req->cryptlen +
5560 + (encrypt ? ctx->authsize : (-ctx->authsize));
5561 + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
5562 +
5563 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
5564 + dpaa2_fl_set_final(in_fle, true);
5565 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
5566 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
5567 + dpaa2_fl_set_len(in_fle, in_len);
5568 +
5569 + if (req->dst == req->src) {
5570 + if (mapped_src_nents == 1) {
5571 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5572 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
5573 + } else {
5574 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5575 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
5576 + (1 + !!ivsize) * sizeof(*sg_table));
5577 + }
5578 + } else if (mapped_dst_nents == 1) {
5579 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5580 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
5581 + } else {
5582 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5583 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
5584 + sizeof(*sg_table));
5585 + }
5586 +
5587 + dpaa2_fl_set_len(out_fle, out_len);
5588 +
5589 + return edesc;
5590 +}
5591 +
5592 +static int chachapoly_set_sh_desc(struct crypto_aead *aead)
5593 +{
5594 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5595 + unsigned int ivsize = crypto_aead_ivsize(aead);
5596 + struct device *dev = ctx->dev;
5597 + struct caam_flc *flc;
5598 + u32 *desc;
5599 +
5600 + if (!ctx->cdata.keylen || !ctx->authsize)
5601 + return 0;
5602 +
5603 + flc = &ctx->flc[ENCRYPT];
5604 + desc = flc->sh_desc;
5605 + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
5606 + ctx->authsize, true, true);
5607 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5608 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5609 + sizeof(flc->flc) + desc_bytes(desc),
5610 + ctx->dir);
5611 +
5612 + flc = &ctx->flc[DECRYPT];
5613 + desc = flc->sh_desc;
5614 + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
5615 + ctx->authsize, false, true);
5616 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5617 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
5618 + sizeof(flc->flc) + desc_bytes(desc),
5619 + ctx->dir);
5620 +
5621 + return 0;
5622 +}
5623 +
5624 +static int chachapoly_setauthsize(struct crypto_aead *aead,
5625 + unsigned int authsize)
5626 +{
5627 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5628 +
5629 + if (authsize != POLY1305_DIGEST_SIZE)
5630 + return -EINVAL;
5631 +
5632 + ctx->authsize = authsize;
5633 + return chachapoly_set_sh_desc(aead);
5634 +}
5635 +
5636 +static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
5637 + unsigned int keylen)
5638 +{
5639 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5640 + unsigned int ivsize = crypto_aead_ivsize(aead);
5641 + unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
5642 +
5643 + if (keylen != CHACHA20_KEY_SIZE + saltlen) {
5644 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
5645 + return -EINVAL;
5646 + }
5647 +
5648 + ctx->cdata.key_virt = key;
5649 + ctx->cdata.keylen = keylen - saltlen;
5650 +
5651 + return chachapoly_set_sh_desc(aead);
5652 +}
5653 +
5654 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
5655 + bool encrypt)
5656 +{
5657 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
5658 + unsigned int blocksize = crypto_aead_blocksize(tls);
5659 + unsigned int padsize, authsize;
5660 + struct caam_request *req_ctx = aead_request_ctx(req);
5661 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
5662 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
5663 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
5664 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
5665 + typeof(*alg), aead);
5666 + struct device *dev = ctx->dev;
5667 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
5668 + GFP_KERNEL : GFP_ATOMIC;
5669 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
5670 + struct tls_edesc *edesc;
5671 + dma_addr_t qm_sg_dma, iv_dma = 0;
5672 + int ivsize = 0;
5673 + u8 *iv;
5674 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
5675 + int in_len, out_len;
5676 + struct dpaa2_sg_entry *sg_table;
5677 + struct scatterlist *dst;
5678 +
5679 + if (encrypt) {
5680 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
5681 + blocksize);
5682 + authsize = ctx->authsize + padsize;
5683 + } else {
5684 + authsize = ctx->authsize;
5685 + }
5686 +
5687 + /* allocate space for base edesc, link tables and IV */
5688 + edesc = qi_cache_zalloc(GFP_DMA | flags);
5689 + if (unlikely(!edesc)) {
5690 + dev_err(dev, "could not allocate extended descriptor\n");
5691 + return ERR_PTR(-ENOMEM);
5692 + }
5693 +
5694 + if (likely(req->src == req->dst)) {
5695 + src_nents = sg_nents_for_len(req->src, req->assoclen +
5696 + req->cryptlen +
5697 + (encrypt ? authsize : 0));
5698 + if (unlikely(src_nents < 0)) {
5699 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
5700 + req->assoclen + req->cryptlen +
5701 + (encrypt ? authsize : 0));
5702 + qi_cache_free(edesc);
5703 + return ERR_PTR(src_nents);
5704 + }
5705 +
5706 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
5707 + DMA_BIDIRECTIONAL);
5708 + if (unlikely(!mapped_src_nents)) {
5709 + dev_err(dev, "unable to map source\n");
5710 + qi_cache_free(edesc);
5711 + return ERR_PTR(-ENOMEM);
5712 + }
5713 + dst = req->dst;
5714 + } else {
5715 + src_nents = sg_nents_for_len(req->src, req->assoclen +
5716 + req->cryptlen);
5717 + if (unlikely(src_nents < 0)) {
5718 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
5719 + req->assoclen + req->cryptlen);
5720 + qi_cache_free(edesc);
5721 + return ERR_PTR(src_nents);
5722 + }
5723 +
5724 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
5725 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
5726 + (encrypt ? authsize : 0));
5727 + if (unlikely(dst_nents < 0)) {
5728 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
5729 + req->cryptlen +
5730 + (encrypt ? authsize : 0));
5731 + qi_cache_free(edesc);
5732 + return ERR_PTR(dst_nents);
5733 + }
5734 +
5735 + if (src_nents) {
5736 + mapped_src_nents = dma_map_sg(dev, req->src,
5737 + src_nents, DMA_TO_DEVICE);
5738 + if (unlikely(!mapped_src_nents)) {
5739 + dev_err(dev, "unable to map source\n");
5740 + qi_cache_free(edesc);
5741 + return ERR_PTR(-ENOMEM);
5742 + }
5743 + } else {
5744 + mapped_src_nents = 0;
5745 + }
5746 +
5747 + mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
5748 + DMA_FROM_DEVICE);
5749 + if (unlikely(!mapped_dst_nents)) {
5750 + dev_err(dev, "unable to map destination\n");
5751 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
5752 + qi_cache_free(edesc);
5753 + return ERR_PTR(-ENOMEM);
5754 + }
5755 + }
5756 +
5757 + /*
5758 + * Create S/G table: IV, src, dst.
5759 + * Input is not contiguous.
5760 + */
5761 + qm_sg_ents = 1 + mapped_src_nents +
5762 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
5763 + sg_table = &edesc->sgt[0];
5764 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
5765 +
5766 + ivsize = crypto_aead_ivsize(tls);
5767 + iv = (u8 *)(sg_table + qm_sg_ents);
5768 + /* Make sure IV is located in a DMAable area */
5769 + memcpy(iv, req->iv, ivsize);
5770 + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
5771 + if (dma_mapping_error(dev, iv_dma)) {
5772 + dev_err(dev, "unable to map IV\n");
5773 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
5774 + 0);
5775 + qi_cache_free(edesc);
5776 + return ERR_PTR(-ENOMEM);
5777 + }
5778 +
5779 + edesc->src_nents = src_nents;
5780 + edesc->dst_nents = dst_nents;
5781 + edesc->dst = dst;
5782 + edesc->iv_dma = iv_dma;
5783 +
5784 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
5785 + qm_sg_index = 1;
5786 +
5787 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
5788 + qm_sg_index += mapped_src_nents;
5789 +
5790 + if (mapped_dst_nents > 1)
5791 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
5792 + qm_sg_index, 0);
5793 +
5794 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
5795 + if (dma_mapping_error(dev, qm_sg_dma)) {
5796 + dev_err(dev, "unable to map S/G table\n");
5797 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
5798 + ivsize, 0, 0);
5799 + qi_cache_free(edesc);
5800 + return ERR_PTR(-ENOMEM);
5801 + }
5802 +
5803 + edesc->qm_sg_dma = qm_sg_dma;
5804 + edesc->qm_sg_bytes = qm_sg_bytes;
5805 +
5806 + out_len = req->cryptlen + (encrypt ? authsize : 0);
5807 + in_len = ivsize + req->assoclen + req->cryptlen;
5808 +
5809 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
5810 + dpaa2_fl_set_final(in_fle, true);
5811 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
5812 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
5813 + dpaa2_fl_set_len(in_fle, in_len);
5814 +
5815 + if (req->dst == req->src) {
5816 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5817 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
5818 + (sg_nents_for_len(req->src, req->assoclen) +
5819 + 1) * sizeof(*sg_table));
5820 + } else if (mapped_dst_nents == 1) {
5821 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5822 + dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
5823 + } else {
5824 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5825 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
5826 + sizeof(*sg_table));
5827 + }
5828 +
5829 + dpaa2_fl_set_len(out_fle, out_len);
5830 +
5831 + return edesc;
5832 +}
5833 +
5834 +static int tls_set_sh_desc(struct crypto_aead *tls)
5835 +{
5836 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
5837 + unsigned int ivsize = crypto_aead_ivsize(tls);
5838 + unsigned int blocksize = crypto_aead_blocksize(tls);
5839 + struct device *dev = ctx->dev;
5840 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5841 + struct caam_flc *flc;
5842 + u32 *desc;
5843 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
5844 + unsigned int data_len[2];
5845 + u32 inl_mask;
5846 +
5847 + if (!ctx->cdata.keylen || !ctx->authsize)
5848 + return 0;
5849 +
5850 + /*
5851 + * TLS 1.0 encrypt shared descriptor
5852 + * Job Descriptor and Shared Descriptor
5853 + * must fit into the 64-word Descriptor h/w Buffer
5854 + */
5855 + data_len[0] = ctx->adata.keylen_pad;
5856 + data_len[1] = ctx->cdata.keylen;
5857 +
5858 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
5859 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
5860 + return -EINVAL;
5861 +
5862 + if (inl_mask & 1)
5863 + ctx->adata.key_virt = ctx->key;
5864 + else
5865 + ctx->adata.key_dma = ctx->key_dma;
5866 +
5867 + if (inl_mask & 2)
5868 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
5869 + else
5870 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
5871 +
5872 + ctx->adata.key_inline = !!(inl_mask & 1);
5873 + ctx->cdata.key_inline = !!(inl_mask & 2);
5874 +
5875 + flc = &ctx->flc[ENCRYPT];
5876 + desc = flc->sh_desc;
5877 + cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
5878 + assoclen, ivsize, ctx->authsize, blocksize,
5879 + priv->sec_attr.era);
5880 + flc->flc[1] = cpu_to_caam32(desc_len(desc));
5881 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5882 + sizeof(flc->flc) + desc_bytes(desc),
5883 + ctx->dir);
5884 +
5885 + /*
5886 + * TLS 1.0 decrypt shared descriptor
5887 + * Keys do not fit inline, regardless of algorithms used
5888 + */
5889 + ctx->adata.key_inline = false;
5890 + ctx->adata.key_dma = ctx->key_dma;
5891 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
5892 +
5893 + flc = &ctx->flc[DECRYPT];
5894 + desc = flc->sh_desc;
5895 + cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
5896 + ctx->authsize, blocksize, priv->sec_attr.era);
5897 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5898 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
5899 + sizeof(flc->flc) + desc_bytes(desc),
5900 + ctx->dir);
5901 +
5902 + return 0;
5903 +}
5904 +
5905 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
5906 + unsigned int keylen)
5907 +{
5908 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
5909 + struct device *dev = ctx->dev;
5910 + struct crypto_authenc_keys keys;
5911 +
5912 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
5913 + goto badkey;
5914 +
5915 +#ifdef DEBUG
5916 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
5917 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
5918 + keys.authkeylen);
5919 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
5920 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
5921 +#endif
5922 +
5923 + ctx->adata.keylen = keys.authkeylen;
5924 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
5925 + OP_ALG_ALGSEL_MASK);
5926 +
5927 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
5928 + goto badkey;
5929 +
5930 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
5931 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
5932 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
5933 + keys.enckeylen, ctx->dir);
5934 +#ifdef DEBUG
5935 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
5936 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
5937 + ctx->adata.keylen_pad + keys.enckeylen, 1);
5938 +#endif
5939 +
5940 + ctx->cdata.keylen = keys.enckeylen;
5941 +
5942 + return tls_set_sh_desc(tls);
5943 +badkey:
5944 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
5945 + return -EINVAL;
5946 +}
5947 +
5948 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
5949 +{
5950 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
5951 +
5952 + ctx->authsize = authsize;
5953 + tls_set_sh_desc(tls);
5954 +
5955 + return 0;
5956 +}
5957 +
5958 +static int gcm_set_sh_desc(struct crypto_aead *aead)
5959 +{
5960 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5961 + struct device *dev = ctx->dev;
5962 + unsigned int ivsize = crypto_aead_ivsize(aead);
5963 + struct caam_flc *flc;
5964 + u32 *desc;
5965 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
5966 + ctx->cdata.keylen;
5967 +
5968 + if (!ctx->cdata.keylen || !ctx->authsize)
5969 + return 0;
5970 +
5971 + /*
5972 + * AES GCM encrypt shared descriptor
5973 + * Job Descriptor and Shared Descriptor
5974 + * must fit into the 64-word Descriptor h/w Buffer
5975 + */
5976 + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
5977 + ctx->cdata.key_inline = true;
5978 + ctx->cdata.key_virt = ctx->key;
5979 + } else {
5980 + ctx->cdata.key_inline = false;
5981 + ctx->cdata.key_dma = ctx->key_dma;
5982 + }
5983 +
5984 + flc = &ctx->flc[ENCRYPT];
5985 + desc = flc->sh_desc;
5986 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
5987 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5988 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5989 + sizeof(flc->flc) + desc_bytes(desc),
5990 + ctx->dir);
5991 +
5992 + /*
5993 + * Job Descriptor and Shared Descriptors
5994 + * must all fit into the 64-word Descriptor h/w Buffer
5995 + */
5996 + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
5997 + ctx->cdata.key_inline = true;
5998 + ctx->cdata.key_virt = ctx->key;
5999 + } else {
6000 + ctx->cdata.key_inline = false;
6001 + ctx->cdata.key_dma = ctx->key_dma;
6002 + }
6003 +
6004 + flc = &ctx->flc[DECRYPT];
6005 + desc = flc->sh_desc;
6006 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
6007 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6008 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6009 + sizeof(flc->flc) + desc_bytes(desc),
6010 + ctx->dir);
6011 +
6012 + return 0;
6013 +}
6014 +
6015 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
6016 +{
6017 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
6018 +
6019 + ctx->authsize = authsize;
6020 + gcm_set_sh_desc(authenc);
6021 +
6022 + return 0;
6023 +}
6024 +
6025 +static int gcm_setkey(struct crypto_aead *aead,
6026 + const u8 *key, unsigned int keylen)
6027 +{
6028 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6029 + struct device *dev = ctx->dev;
6030 +
6031 +#ifdef DEBUG
6032 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
6033 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
6034 +#endif
6035 +
6036 + memcpy(ctx->key, key, keylen);
6037 + dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
6038 + ctx->cdata.keylen = keylen;
6039 +
6040 + return gcm_set_sh_desc(aead);
6041 +}
6042 +
6043 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
6044 +{
6045 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6046 + struct device *dev = ctx->dev;
6047 + unsigned int ivsize = crypto_aead_ivsize(aead);
6048 + struct caam_flc *flc;
6049 + u32 *desc;
6050 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
6051 + ctx->cdata.keylen;
6052 +
6053 + if (!ctx->cdata.keylen || !ctx->authsize)
6054 + return 0;
6055 +
6056 + ctx->cdata.key_virt = ctx->key;
6057 +
6058 + /*
6059 + * RFC4106 encrypt shared descriptor
6060 + * Job Descriptor and Shared Descriptor
6061 + * must fit into the 64-word Descriptor h/w Buffer
6062 + */
6063 + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
6064 + ctx->cdata.key_inline = true;
6065 + } else {
6066 + ctx->cdata.key_inline = false;
6067 + ctx->cdata.key_dma = ctx->key_dma;
6068 + }
6069 +
6070 + flc = &ctx->flc[ENCRYPT];
6071 + desc = flc->sh_desc;
6072 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
6073 + true);
6074 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6075 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
6076 + sizeof(flc->flc) + desc_bytes(desc),
6077 + ctx->dir);
6078 +
6079 + /*
6080 + * Job Descriptor and Shared Descriptors
6081 + * must all fit into the 64-word Descriptor h/w Buffer
6082 + */
6083 + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
6084 + ctx->cdata.key_inline = true;
6085 + } else {
6086 + ctx->cdata.key_inline = false;
6087 + ctx->cdata.key_dma = ctx->key_dma;
6088 + }
6089 +
6090 + flc = &ctx->flc[DECRYPT];
6091 + desc = flc->sh_desc;
6092 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
6093 + true);
6094 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6095 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6096 + sizeof(flc->flc) + desc_bytes(desc),
6097 + ctx->dir);
6098 +
6099 + return 0;
6100 +}
6101 +
6102 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
6103 + unsigned int authsize)
6104 +{
6105 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
6106 +
6107 + ctx->authsize = authsize;
6108 + rfc4106_set_sh_desc(authenc);
6109 +
6110 + return 0;
6111 +}
6112 +
6113 +static int rfc4106_setkey(struct crypto_aead *aead,
6114 + const u8 *key, unsigned int keylen)
6115 +{
6116 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6117 + struct device *dev = ctx->dev;
6118 +
6119 + if (keylen < 4)
6120 + return -EINVAL;
6121 +
6122 +#ifdef DEBUG
6123 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
6124 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
6125 +#endif
6126 +
6127 + memcpy(ctx->key, key, keylen);
6128 + /*
6129 + * The last four bytes of the key material are used as the salt value
6130 + * in the nonce. Update the AES key length.
6131 + */
6132 + ctx->cdata.keylen = keylen - 4;
6133 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
6134 + ctx->dir);
6135 +
6136 + return rfc4106_set_sh_desc(aead);
6137 +}
6138 +
6139 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
6140 +{
6141 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6142 + struct device *dev = ctx->dev;
6143 + unsigned int ivsize = crypto_aead_ivsize(aead);
6144 + struct caam_flc *flc;
6145 + u32 *desc;
6146 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
6147 + ctx->cdata.keylen;
6148 +
6149 + if (!ctx->cdata.keylen || !ctx->authsize)
6150 + return 0;
6151 +
6152 + ctx->cdata.key_virt = ctx->key;
6153 +
6154 + /*
6155 + * RFC4543 encrypt shared descriptor
6156 + * Job Descriptor and Shared Descriptor
6157 + * must fit into the 64-word Descriptor h/w Buffer
6158 + */
6159 + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
6160 + ctx->cdata.key_inline = true;
6161 + } else {
6162 + ctx->cdata.key_inline = false;
6163 + ctx->cdata.key_dma = ctx->key_dma;
6164 + }
6165 +
6166 + flc = &ctx->flc[ENCRYPT];
6167 + desc = flc->sh_desc;
6168 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
6169 + true);
6170 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6171 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
6172 + sizeof(flc->flc) + desc_bytes(desc),
6173 + ctx->dir);
6174 +
6175 + /*
6176 + * Job Descriptor and Shared Descriptors
6177 + * must all fit into the 64-word Descriptor h/w Buffer
6178 + */
6179 + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
6180 + ctx->cdata.key_inline = true;
6181 + } else {
6182 + ctx->cdata.key_inline = false;
6183 + ctx->cdata.key_dma = ctx->key_dma;
6184 + }
6185 +
6186 + flc = &ctx->flc[DECRYPT];
6187 + desc = flc->sh_desc;
6188 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
6189 + true);
6190 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6191 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6192 + sizeof(flc->flc) + desc_bytes(desc),
6193 + ctx->dir);
6194 +
6195 + return 0;
6196 +}
6197 +
6198 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
6199 + unsigned int authsize)
6200 +{
6201 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
6202 +
6203 + ctx->authsize = authsize;
6204 + rfc4543_set_sh_desc(authenc);
6205 +
6206 + return 0;
6207 +}
6208 +
6209 +static int rfc4543_setkey(struct crypto_aead *aead,
6210 + const u8 *key, unsigned int keylen)
6211 +{
6212 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6213 + struct device *dev = ctx->dev;
6214 +
6215 + if (keylen < 4)
6216 + return -EINVAL;
6217 +
6218 +#ifdef DEBUG
6219 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
6220 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
6221 +#endif
6222 +
6223 + memcpy(ctx->key, key, keylen);
6224 + /*
6225 + * The last four bytes of the key material are used as the salt value
6226 + * in the nonce. Update the AES key length.
6227 + */
6228 + ctx->cdata.keylen = keylen - 4;
6229 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
6230 + ctx->dir);
6231 +
6232 + return rfc4543_set_sh_desc(aead);
6233 +}
6234 +
6235 +static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
6236 + unsigned int keylen)
6237 +{
6238 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6239 + struct caam_skcipher_alg *alg =
6240 + container_of(crypto_skcipher_alg(skcipher),
6241 + struct caam_skcipher_alg, skcipher);
6242 + struct device *dev = ctx->dev;
6243 + struct caam_flc *flc;
6244 + unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
6245 + u32 *desc;
6246 + u32 ctx1_iv_off = 0;
6247 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
6248 + OP_ALG_AAI_CTR_MOD128) &&
6249 + ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
6250 + OP_ALG_ALGSEL_CHACHA20);
6251 + const bool is_rfc3686 = alg->caam.rfc3686;
6252 +
6253 +#ifdef DEBUG
6254 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
6255 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
6256 +#endif
6257 + /*
6258 + * AES-CTR needs to load IV in CONTEXT1 reg
6259 + * at an offset of 128bits (16bytes)
6260 + * CONTEXT1[255:128] = IV
6261 + */
6262 + if (ctr_mode)
6263 + ctx1_iv_off = 16;
6264 +
6265 + /*
6266 + * RFC3686 specific:
6267 + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
6268 + * | *key = {KEY, NONCE}
6269 + */
6270 + if (is_rfc3686) {
6271 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
6272 + keylen -= CTR_RFC3686_NONCE_SIZE;
6273 + }
6274 +
6275 + ctx->cdata.keylen = keylen;
6276 + ctx->cdata.key_virt = key;
6277 + ctx->cdata.key_inline = true;
6278 +
6279 + /* skcipher_encrypt shared descriptor */
6280 + flc = &ctx->flc[ENCRYPT];
6281 + desc = flc->sh_desc;
6282 + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
6283 + is_rfc3686, ctx1_iv_off);
6284 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6285 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
6286 + sizeof(flc->flc) + desc_bytes(desc),
6287 + ctx->dir);
6288 +
6289 + /* skcipher_decrypt shared descriptor */
6290 + flc = &ctx->flc[DECRYPT];
6291 + desc = flc->sh_desc;
6292 + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
6293 + is_rfc3686, ctx1_iv_off);
6294 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6295 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6296 + sizeof(flc->flc) + desc_bytes(desc),
6297 + ctx->dir);
6298 +
6299 + return 0;
6300 +}
6301 +
6302 +static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
6303 + unsigned int keylen)
6304 +{
6305 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6306 + struct device *dev = ctx->dev;
6307 + struct caam_flc *flc;
6308 + u32 *desc;
6309 +
6310 + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
6311 + dev_err(dev, "key size mismatch\n");
6312 + crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
6313 + return -EINVAL;
6314 + }
6315 +
6316 + ctx->cdata.keylen = keylen;
6317 + ctx->cdata.key_virt = key;
6318 + ctx->cdata.key_inline = true;
6319 +
6320 + /* xts_skcipher_encrypt shared descriptor */
6321 + flc = &ctx->flc[ENCRYPT];
6322 + desc = flc->sh_desc;
6323 + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
6324 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6325 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
6326 + sizeof(flc->flc) + desc_bytes(desc),
6327 + ctx->dir);
6328 +
6329 + /* xts_skcipher_decrypt shared descriptor */
6330 + flc = &ctx->flc[DECRYPT];
6331 + desc = flc->sh_desc;
6332 + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
6333 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6334 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6335 + sizeof(flc->flc) + desc_bytes(desc),
6336 + ctx->dir);
6337 +
6338 + return 0;
6339 +}
6340 +
6341 +static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
6342 +{
6343 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6344 + struct caam_request *req_ctx = skcipher_request_ctx(req);
6345 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
6346 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
6347 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6348 + struct device *dev = ctx->dev;
6349 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
6350 + GFP_KERNEL : GFP_ATOMIC;
6351 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
6352 + struct skcipher_edesc *edesc;
6353 + dma_addr_t iv_dma;
6354 + u8 *iv;
6355 + int ivsize = crypto_skcipher_ivsize(skcipher);
6356 + int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
6357 + struct dpaa2_sg_entry *sg_table;
6358 +
6359 + src_nents = sg_nents_for_len(req->src, req->cryptlen);
6360 + if (unlikely(src_nents < 0)) {
6361 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
6362 + req->cryptlen);
6363 + return ERR_PTR(src_nents);
6364 + }
6365 +
6366 + if (unlikely(req->dst != req->src)) {
6367 + dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
6368 + if (unlikely(dst_nents < 0)) {
6369 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
6370 + req->cryptlen);
6371 + return ERR_PTR(dst_nents);
6372 + }
6373 +
6374 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
6375 + DMA_TO_DEVICE);
6376 + if (unlikely(!mapped_src_nents)) {
6377 + dev_err(dev, "unable to map source\n");
6378 + return ERR_PTR(-ENOMEM);
6379 + }
6380 +
6381 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
6382 + DMA_FROM_DEVICE);
6383 + if (unlikely(!mapped_dst_nents)) {
6384 + dev_err(dev, "unable to map destination\n");
6385 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
6386 + return ERR_PTR(-ENOMEM);
6387 + }
6388 + } else {
6389 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
6390 + DMA_BIDIRECTIONAL);
6391 + if (unlikely(!mapped_src_nents)) {
6392 + dev_err(dev, "unable to map source\n");
6393 + return ERR_PTR(-ENOMEM);
6394 + }
6395 + }
6396 +
6397 + qm_sg_ents = 1 + mapped_src_nents;
6398 + dst_sg_idx = qm_sg_ents;
6399 +
6400 + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
6401 + qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
6402 + if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
6403 + ivsize > CAAM_QI_MEMCACHE_SIZE)) {
6404 + dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
6405 + qm_sg_ents, ivsize);
6406 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
6407 + 0, 0, 0);
6408 + return ERR_PTR(-ENOMEM);
6409 + }
6410 +
6411 + /* allocate space for base edesc, link tables and IV */
6412 + edesc = qi_cache_zalloc(GFP_DMA | flags);
6413 + if (unlikely(!edesc)) {
6414 + dev_err(dev, "could not allocate extended descriptor\n");
6415 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
6416 + 0, 0, 0);
6417 + return ERR_PTR(-ENOMEM);
6418 + }
6419 +
6420 + /* Make sure IV is located in a DMAable area */
6421 + sg_table = &edesc->sgt[0];
6422 + iv = (u8 *)(sg_table + qm_sg_ents);
6423 + memcpy(iv, req->iv, ivsize);
6424 +
6425 + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
6426 + if (dma_mapping_error(dev, iv_dma)) {
6427 + dev_err(dev, "unable to map IV\n");
6428 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
6429 + 0, 0, 0);
6430 + qi_cache_free(edesc);
6431 + return ERR_PTR(-ENOMEM);
6432 + }
6433 +
6434 + edesc->src_nents = src_nents;
6435 + edesc->dst_nents = dst_nents;
6436 + edesc->iv_dma = iv_dma;
6437 + edesc->qm_sg_bytes = qm_sg_bytes;
6438 +
6439 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
6440 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
6441 +
6442 + if (mapped_dst_nents > 1)
6443 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
6444 + dst_sg_idx, 0);
6445 +
6446 + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
6447 + DMA_TO_DEVICE);
6448 + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
6449 + dev_err(dev, "unable to map S/G table\n");
6450 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
6451 + iv_dma, ivsize, 0, 0);
6452 + qi_cache_free(edesc);
6453 + return ERR_PTR(-ENOMEM);
6454 + }
6455 +
6456 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
6457 + dpaa2_fl_set_final(in_fle, true);
6458 + dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
6459 + dpaa2_fl_set_len(out_fle, req->cryptlen);
6460 +
6461 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
6462 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
6463 +
6464 + if (req->src == req->dst) {
6465 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
6466 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
6467 + sizeof(*sg_table));
6468 + } else if (mapped_dst_nents > 1) {
6469 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
6470 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
6471 + sizeof(*sg_table));
6472 + } else {
6473 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
6474 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
6475 + }
6476 +
6477 + return edesc;
6478 +}
6479 +
6480 +static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
6481 + struct aead_request *req)
6482 +{
6483 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
6484 + int ivsize = crypto_aead_ivsize(aead);
6485 +
6486 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
6487 + edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
6488 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
6489 +}
6490 +
6491 +static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
6492 + struct aead_request *req)
6493 +{
6494 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6495 + int ivsize = crypto_aead_ivsize(tls);
6496 +
6497 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
6498 + edesc->dst_nents, edesc->iv_dma, ivsize, edesc->qm_sg_dma,
6499 + edesc->qm_sg_bytes);
6500 +}
6501 +
6502 +static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
6503 + struct skcipher_request *req)
6504 +{
6505 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6506 + int ivsize = crypto_skcipher_ivsize(skcipher);
6507 +
6508 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
6509 + edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
6510 +}
6511 +
6512 +static void aead_encrypt_done(void *cbk_ctx, u32 status)
6513 +{
6514 + struct crypto_async_request *areq = cbk_ctx;
6515 + struct aead_request *req = container_of(areq, struct aead_request,
6516 + base);
6517 + struct caam_request *req_ctx = to_caam_req(areq);
6518 + struct aead_edesc *edesc = req_ctx->edesc;
6519 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
6520 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6521 + int ecode = 0;
6522 +
6523 +#ifdef DEBUG
6524 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6525 +#endif
6526 +
6527 + if (unlikely(status)) {
6528 + caam_qi2_strstatus(ctx->dev, status);
6529 + ecode = -EIO;
6530 + }
6531 +
6532 + aead_unmap(ctx->dev, edesc, req);
6533 + qi_cache_free(edesc);
6534 + aead_request_complete(req, ecode);
6535 +}
6536 +
6537 +static void aead_decrypt_done(void *cbk_ctx, u32 status)
6538 +{
6539 + struct crypto_async_request *areq = cbk_ctx;
6540 + struct aead_request *req = container_of(areq, struct aead_request,
6541 + base);
6542 + struct caam_request *req_ctx = to_caam_req(areq);
6543 + struct aead_edesc *edesc = req_ctx->edesc;
6544 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
6545 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6546 + int ecode = 0;
6547 +
6548 +#ifdef DEBUG
6549 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6550 +#endif
6551 +
6552 + if (unlikely(status)) {
6553 + caam_qi2_strstatus(ctx->dev, status);
6554 + /*
6555 + * verify hw auth check passed else return -EBADMSG
6556 + */
6557 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
6558 + JRSTA_CCBERR_ERRID_ICVCHK)
6559 + ecode = -EBADMSG;
6560 + else
6561 + ecode = -EIO;
6562 + }
6563 +
6564 + aead_unmap(ctx->dev, edesc, req);
6565 + qi_cache_free(edesc);
6566 + aead_request_complete(req, ecode);
6567 +}
6568 +
6569 +static int aead_encrypt(struct aead_request *req)
6570 +{
6571 + struct aead_edesc *edesc;
6572 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
6573 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6574 + struct caam_request *caam_req = aead_request_ctx(req);
6575 + int ret;
6576 +
6577 + /* allocate extended descriptor */
6578 + edesc = aead_edesc_alloc(req, true);
6579 + if (IS_ERR(edesc))
6580 + return PTR_ERR(edesc);
6581 +
6582 + caam_req->flc = &ctx->flc[ENCRYPT];
6583 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
6584 + caam_req->cbk = aead_encrypt_done;
6585 + caam_req->ctx = &req->base;
6586 + caam_req->edesc = edesc;
6587 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6588 + if (ret != -EINPROGRESS &&
6589 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6590 + aead_unmap(ctx->dev, edesc, req);
6591 + qi_cache_free(edesc);
6592 + }
6593 +
6594 + return ret;
6595 +}
6596 +
6597 +static int aead_decrypt(struct aead_request *req)
6598 +{
6599 + struct aead_edesc *edesc;
6600 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
6601 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6602 + struct caam_request *caam_req = aead_request_ctx(req);
6603 + int ret;
6604 +
6605 + /* allocate extended descriptor */
6606 + edesc = aead_edesc_alloc(req, false);
6607 + if (IS_ERR(edesc))
6608 + return PTR_ERR(edesc);
6609 +
6610 + caam_req->flc = &ctx->flc[DECRYPT];
6611 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
6612 + caam_req->cbk = aead_decrypt_done;
6613 + caam_req->ctx = &req->base;
6614 + caam_req->edesc = edesc;
6615 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6616 + if (ret != -EINPROGRESS &&
6617 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6618 + aead_unmap(ctx->dev, edesc, req);
6619 + qi_cache_free(edesc);
6620 + }
6621 +
6622 + return ret;
6623 +}
6624 +
6625 +static void tls_encrypt_done(void *cbk_ctx, u32 status)
6626 +{
6627 + struct crypto_async_request *areq = cbk_ctx;
6628 + struct aead_request *req = container_of(areq, struct aead_request,
6629 + base);
6630 + struct caam_request *req_ctx = to_caam_req(areq);
6631 + struct tls_edesc *edesc = req_ctx->edesc;
6632 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6633 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
6634 + int ecode = 0;
6635 +
6636 +#ifdef DEBUG
6637 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6638 +#endif
6639 +
6640 + if (unlikely(status)) {
6641 + caam_qi2_strstatus(ctx->dev, status);
6642 + ecode = -EIO;
6643 + }
6644 +
6645 + tls_unmap(ctx->dev, edesc, req);
6646 + qi_cache_free(edesc);
6647 + aead_request_complete(req, ecode);
6648 +}
6649 +
6650 +static void tls_decrypt_done(void *cbk_ctx, u32 status)
6651 +{
6652 + struct crypto_async_request *areq = cbk_ctx;
6653 + struct aead_request *req = container_of(areq, struct aead_request,
6654 + base);
6655 + struct caam_request *req_ctx = to_caam_req(areq);
6656 + struct tls_edesc *edesc = req_ctx->edesc;
6657 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6658 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
6659 + int ecode = 0;
6660 +
6661 +#ifdef DEBUG
6662 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6663 +#endif
6664 +
6665 + if (unlikely(status)) {
6666 + caam_qi2_strstatus(ctx->dev, status);
6667 + /*
6668 + * verify hw auth check passed else return -EBADMSG
6669 + */
6670 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
6671 + JRSTA_CCBERR_ERRID_ICVCHK)
6672 + ecode = -EBADMSG;
6673 + else
6674 + ecode = -EIO;
6675 + }
6676 +
6677 + tls_unmap(ctx->dev, edesc, req);
6678 + qi_cache_free(edesc);
6679 + aead_request_complete(req, ecode);
6680 +}
6681 +
6682 +static int tls_encrypt(struct aead_request *req)
6683 +{
6684 + struct tls_edesc *edesc;
6685 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6686 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
6687 + struct caam_request *caam_req = aead_request_ctx(req);
6688 + int ret;
6689 +
6690 + /* allocate extended descriptor */
6691 + edesc = tls_edesc_alloc(req, true);
6692 + if (IS_ERR(edesc))
6693 + return PTR_ERR(edesc);
6694 +
6695 + caam_req->flc = &ctx->flc[ENCRYPT];
6696 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
6697 + caam_req->cbk = tls_encrypt_done;
6698 + caam_req->ctx = &req->base;
6699 + caam_req->edesc = edesc;
6700 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6701 + if (ret != -EINPROGRESS &&
6702 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6703 + tls_unmap(ctx->dev, edesc, req);
6704 + qi_cache_free(edesc);
6705 + }
6706 +
6707 + return ret;
6708 +}
6709 +
6710 +static int tls_decrypt(struct aead_request *req)
6711 +{
6712 + struct tls_edesc *edesc;
6713 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6714 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
6715 + struct caam_request *caam_req = aead_request_ctx(req);
6716 + int ret;
6717 +
6718 + /* allocate extended descriptor */
6719 + edesc = tls_edesc_alloc(req, false);
6720 + if (IS_ERR(edesc))
6721 + return PTR_ERR(edesc);
6722 +
6723 + caam_req->flc = &ctx->flc[DECRYPT];
6724 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
6725 + caam_req->cbk = tls_decrypt_done;
6726 + caam_req->ctx = &req->base;
6727 + caam_req->edesc = edesc;
6728 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6729 + if (ret != -EINPROGRESS &&
6730 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6731 + tls_unmap(ctx->dev, edesc, req);
6732 + qi_cache_free(edesc);
6733 + }
6734 +
6735 + return ret;
6736 +}
6737 +
6738 +static int ipsec_gcm_encrypt(struct aead_request *req)
6739 +{
6740 + if (req->assoclen < 8)
6741 + return -EINVAL;
6742 +
6743 + return aead_encrypt(req);
6744 +}
6745 +
6746 +static int ipsec_gcm_decrypt(struct aead_request *req)
6747 +{
6748 + if (req->assoclen < 8)
6749 + return -EINVAL;
6750 +
6751 + return aead_decrypt(req);
6752 +}
6753 +
6754 +static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
6755 +{
6756 + struct crypto_async_request *areq = cbk_ctx;
6757 + struct skcipher_request *req = skcipher_request_cast(areq);
6758 + struct caam_request *req_ctx = to_caam_req(areq);
6759 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6760 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6761 + struct skcipher_edesc *edesc = req_ctx->edesc;
6762 + int ecode = 0;
6763 + int ivsize = crypto_skcipher_ivsize(skcipher);
6764 +
6765 +#ifdef DEBUG
6766 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6767 +#endif
6768 +
6769 + if (unlikely(status)) {
6770 + caam_qi2_strstatus(ctx->dev, status);
6771 + ecode = -EIO;
6772 + }
6773 +
6774 +#ifdef DEBUG
6775 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
6776 + DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
6777 + edesc->src_nents > 1 ? 100 : ivsize, 1);
6778 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
6779 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
6780 + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
6781 +#endif
6782 +
6783 + skcipher_unmap(ctx->dev, edesc, req);
6784 +
6785 + /*
6786 + * The crypto API expects us to set the IV (req->iv) to the last
6787 + * ciphertext block. This is used e.g. by the CTS mode.
6788 + */
6789 + scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
6790 + ivsize, 0);
6791 +
6792 + qi_cache_free(edesc);
6793 + skcipher_request_complete(req, ecode);
6794 +}
6795 +
6796 +static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
6797 +{
6798 + struct crypto_async_request *areq = cbk_ctx;
6799 + struct skcipher_request *req = skcipher_request_cast(areq);
6800 + struct caam_request *req_ctx = to_caam_req(areq);
6801 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6802 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6803 + struct skcipher_edesc *edesc = req_ctx->edesc;
6804 + int ecode = 0;
6805 +#ifdef DEBUG
6806 + int ivsize = crypto_skcipher_ivsize(skcipher);
6807 +
6808 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6809 +#endif
6810 +
6811 + if (unlikely(status)) {
6812 + caam_qi2_strstatus(ctx->dev, status);
6813 + ecode = -EIO;
6814 + }
6815 +
6816 +#ifdef DEBUG
6817 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
6818 + DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
6819 + edesc->src_nents > 1 ? 100 : ivsize, 1);
6820 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
6821 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
6822 + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
6823 +#endif
6824 +
6825 + skcipher_unmap(ctx->dev, edesc, req);
6826 + qi_cache_free(edesc);
6827 + skcipher_request_complete(req, ecode);
6828 +}
6829 +
6830 +static int skcipher_encrypt(struct skcipher_request *req)
6831 +{
6832 + struct skcipher_edesc *edesc;
6833 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6834 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6835 + struct caam_request *caam_req = skcipher_request_ctx(req);
6836 + int ret;
6837 +
6838 + /* allocate extended descriptor */
6839 + edesc = skcipher_edesc_alloc(req);
6840 + if (IS_ERR(edesc))
6841 + return PTR_ERR(edesc);
6842 +
6843 + caam_req->flc = &ctx->flc[ENCRYPT];
6844 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
6845 + caam_req->cbk = skcipher_encrypt_done;
6846 + caam_req->ctx = &req->base;
6847 + caam_req->edesc = edesc;
6848 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6849 + if (ret != -EINPROGRESS &&
6850 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6851 + skcipher_unmap(ctx->dev, edesc, req);
6852 + qi_cache_free(edesc);
6853 + }
6854 +
6855 + return ret;
6856 +}
6857 +
6858 +static int skcipher_decrypt(struct skcipher_request *req)
6859 +{
6860 + struct skcipher_edesc *edesc;
6861 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6862 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6863 + struct caam_request *caam_req = skcipher_request_ctx(req);
6864 + int ivsize = crypto_skcipher_ivsize(skcipher);
6865 + int ret;
6866 +
6867 + /* allocate extended descriptor */
6868 + edesc = skcipher_edesc_alloc(req);
6869 + if (IS_ERR(edesc))
6870 + return PTR_ERR(edesc);
6871 +
6872 + /*
6873 + * The crypto API expects us to set the IV (req->iv) to the last
6874 + * ciphertext block.
6875 + */
6876 + scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
6877 + ivsize, 0);
6878 +
6879 + caam_req->flc = &ctx->flc[DECRYPT];
6880 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
6881 + caam_req->cbk = skcipher_decrypt_done;
6882 + caam_req->ctx = &req->base;
6883 + caam_req->edesc = edesc;
6884 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6885 + if (ret != -EINPROGRESS &&
6886 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6887 + skcipher_unmap(ctx->dev, edesc, req);
6888 + qi_cache_free(edesc);
6889 + }
6890 +
6891 + return ret;
6892 +}
6893 +
6894 +static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
6895 + bool uses_dkp)
6896 +{
6897 + dma_addr_t dma_addr;
6898 + int i;
6899 +
6900 + /* copy descriptor header template value */
6901 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
6902 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
6903 +
6904 + ctx->dev = caam->dev;
6905 + ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
6906 +
6907 + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
6908 + offsetof(struct caam_ctx, flc_dma),
6909 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
6910 + if (dma_mapping_error(ctx->dev, dma_addr)) {
6911 + dev_err(ctx->dev, "unable to map key, shared descriptors\n");
6912 + return -ENOMEM;
6913 + }
6914 +
6915 + for (i = 0; i < NUM_OP; i++)
6916 + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
6917 + ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
6918 +
6919 + return 0;
6920 +}
6921 +
6922 +static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
6923 +{
6924 + struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
6925 + struct caam_skcipher_alg *caam_alg =
6926 + container_of(alg, typeof(*caam_alg), skcipher);
6927 +
6928 + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
6929 + return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
6930 +}
6931 +
6932 +static int caam_cra_init_aead(struct crypto_aead *tfm)
6933 +{
6934 + struct aead_alg *alg = crypto_aead_alg(tfm);
6935 + struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
6936 + aead);
6937 +
6938 + crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
6939 + return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
6940 + (alg->setkey == aead_setkey) ||
6941 + (alg->setkey == tls_setkey));
6942 +}
6943 +
6944 +static void caam_exit_common(struct caam_ctx *ctx)
6945 +{
6946 + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
6947 + offsetof(struct caam_ctx, flc_dma), ctx->dir,
6948 + DMA_ATTR_SKIP_CPU_SYNC);
6949 +}
6950 +
6951 +static void caam_cra_exit(struct crypto_skcipher *tfm)
6952 +{
6953 + caam_exit_common(crypto_skcipher_ctx(tfm));
6954 +}
6955 +
6956 +static void caam_cra_exit_aead(struct crypto_aead *tfm)
6957 +{
6958 + caam_exit_common(crypto_aead_ctx(tfm));
6959 +}
6960 +
6961 +static struct caam_skcipher_alg driver_algs[] = {
6962 + {
6963 + .skcipher = {
6964 + .base = {
6965 + .cra_name = "cbc(aes)",
6966 + .cra_driver_name = "cbc-aes-caam-qi2",
6967 + .cra_blocksize = AES_BLOCK_SIZE,
6968 + },
6969 + .setkey = skcipher_setkey,
6970 + .encrypt = skcipher_encrypt,
6971 + .decrypt = skcipher_decrypt,
6972 + .min_keysize = AES_MIN_KEY_SIZE,
6973 + .max_keysize = AES_MAX_KEY_SIZE,
6974 + .ivsize = AES_BLOCK_SIZE,
6975 + },
6976 + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
6977 + },
6978 + {
6979 + .skcipher = {
6980 + .base = {
6981 + .cra_name = "cbc(des3_ede)",
6982 + .cra_driver_name = "cbc-3des-caam-qi2",
6983 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
6984 + },
6985 + .setkey = skcipher_setkey,
6986 + .encrypt = skcipher_encrypt,
6987 + .decrypt = skcipher_decrypt,
6988 + .min_keysize = DES3_EDE_KEY_SIZE,
6989 + .max_keysize = DES3_EDE_KEY_SIZE,
6990 + .ivsize = DES3_EDE_BLOCK_SIZE,
6991 + },
6992 + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
6993 + },
6994 + {
6995 + .skcipher = {
6996 + .base = {
6997 + .cra_name = "cbc(des)",
6998 + .cra_driver_name = "cbc-des-caam-qi2",
6999 + .cra_blocksize = DES_BLOCK_SIZE,
7000 + },
7001 + .setkey = skcipher_setkey,
7002 + .encrypt = skcipher_encrypt,
7003 + .decrypt = skcipher_decrypt,
7004 + .min_keysize = DES_KEY_SIZE,
7005 + .max_keysize = DES_KEY_SIZE,
7006 + .ivsize = DES_BLOCK_SIZE,
7007 + },
7008 + .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7009 + },
7010 + {
7011 + .skcipher = {
7012 + .base = {
7013 + .cra_name = "ctr(aes)",
7014 + .cra_driver_name = "ctr-aes-caam-qi2",
7015 + .cra_blocksize = 1,
7016 + },
7017 + .setkey = skcipher_setkey,
7018 + .encrypt = skcipher_encrypt,
7019 + .decrypt = skcipher_decrypt,
7020 + .min_keysize = AES_MIN_KEY_SIZE,
7021 + .max_keysize = AES_MAX_KEY_SIZE,
7022 + .ivsize = AES_BLOCK_SIZE,
7023 + .chunksize = AES_BLOCK_SIZE,
7024 + },
7025 + .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
7026 + OP_ALG_AAI_CTR_MOD128,
7027 + },
7028 + {
7029 + .skcipher = {
7030 + .base = {
7031 + .cra_name = "rfc3686(ctr(aes))",
7032 + .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
7033 + .cra_blocksize = 1,
7034 + },
7035 + .setkey = skcipher_setkey,
7036 + .encrypt = skcipher_encrypt,
7037 + .decrypt = skcipher_decrypt,
7038 + .min_keysize = AES_MIN_KEY_SIZE +
7039 + CTR_RFC3686_NONCE_SIZE,
7040 + .max_keysize = AES_MAX_KEY_SIZE +
7041 + CTR_RFC3686_NONCE_SIZE,
7042 + .ivsize = CTR_RFC3686_IV_SIZE,
7043 + .chunksize = AES_BLOCK_SIZE,
7044 + },
7045 + .caam = {
7046 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7047 + OP_ALG_AAI_CTR_MOD128,
7048 + .rfc3686 = true,
7049 + },
7050 + },
7051 + {
7052 + .skcipher = {
7053 + .base = {
7054 + .cra_name = "xts(aes)",
7055 + .cra_driver_name = "xts-aes-caam-qi2",
7056 + .cra_blocksize = AES_BLOCK_SIZE,
7057 + },
7058 + .setkey = xts_skcipher_setkey,
7059 + .encrypt = skcipher_encrypt,
7060 + .decrypt = skcipher_decrypt,
7061 + .min_keysize = 2 * AES_MIN_KEY_SIZE,
7062 + .max_keysize = 2 * AES_MAX_KEY_SIZE,
7063 + .ivsize = AES_BLOCK_SIZE,
7064 + },
7065 + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
7066 + },
7067 + {
7068 + .skcipher = {
7069 + .base = {
7070 + .cra_name = "chacha20",
7071 + .cra_driver_name = "chacha20-caam-qi2",
7072 + .cra_blocksize = 1,
7073 + },
7074 + .setkey = skcipher_setkey,
7075 + .encrypt = skcipher_encrypt,
7076 + .decrypt = skcipher_decrypt,
7077 + .min_keysize = CHACHA20_KEY_SIZE,
7078 + .max_keysize = CHACHA20_KEY_SIZE,
7079 + .ivsize = CHACHA20_IV_SIZE,
7080 + },
7081 + .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
7082 + },
7083 +};
7084 +
7085 +static struct caam_aead_alg driver_aeads[] = {
7086 + {
7087 + .aead = {
7088 + .base = {
7089 + .cra_name = "rfc4106(gcm(aes))",
7090 + .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
7091 + .cra_blocksize = 1,
7092 + },
7093 + .setkey = rfc4106_setkey,
7094 + .setauthsize = rfc4106_setauthsize,
7095 + .encrypt = ipsec_gcm_encrypt,
7096 + .decrypt = ipsec_gcm_decrypt,
7097 + .ivsize = 8,
7098 + .maxauthsize = AES_BLOCK_SIZE,
7099 + },
7100 + .caam = {
7101 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
7102 + },
7103 + },
7104 + {
7105 + .aead = {
7106 + .base = {
7107 + .cra_name = "rfc4543(gcm(aes))",
7108 + .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
7109 + .cra_blocksize = 1,
7110 + },
7111 + .setkey = rfc4543_setkey,
7112 + .setauthsize = rfc4543_setauthsize,
7113 + .encrypt = ipsec_gcm_encrypt,
7114 + .decrypt = ipsec_gcm_decrypt,
7115 + .ivsize = 8,
7116 + .maxauthsize = AES_BLOCK_SIZE,
7117 + },
7118 + .caam = {
7119 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
7120 + },
7121 + },
7122 + /* Galois Counter Mode */
7123 + {
7124 + .aead = {
7125 + .base = {
7126 + .cra_name = "gcm(aes)",
7127 + .cra_driver_name = "gcm-aes-caam-qi2",
7128 + .cra_blocksize = 1,
7129 + },
7130 + .setkey = gcm_setkey,
7131 + .setauthsize = gcm_setauthsize,
7132 + .encrypt = aead_encrypt,
7133 + .decrypt = aead_decrypt,
7134 + .ivsize = 12,
7135 + .maxauthsize = AES_BLOCK_SIZE,
7136 + },
7137 + .caam = {
7138 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
7139 + }
7140 + },
7141 + /* single-pass ipsec_esp descriptor */
7142 + {
7143 + .aead = {
7144 + .base = {
7145 + .cra_name = "authenc(hmac(md5),cbc(aes))",
7146 + .cra_driver_name = "authenc-hmac-md5-"
7147 + "cbc-aes-caam-qi2",
7148 + .cra_blocksize = AES_BLOCK_SIZE,
7149 + },
7150 + .setkey = aead_setkey,
7151 + .setauthsize = aead_setauthsize,
7152 + .encrypt = aead_encrypt,
7153 + .decrypt = aead_decrypt,
7154 + .ivsize = AES_BLOCK_SIZE,
7155 + .maxauthsize = MD5_DIGEST_SIZE,
7156 + },
7157 + .caam = {
7158 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7159 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7160 + OP_ALG_AAI_HMAC_PRECOMP,
7161 + }
7162 + },
7163 + {
7164 + .aead = {
7165 + .base = {
7166 + .cra_name = "echainiv(authenc(hmac(md5),"
7167 + "cbc(aes)))",
7168 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
7169 + "cbc-aes-caam-qi2",
7170 + .cra_blocksize = AES_BLOCK_SIZE,
7171 + },
7172 + .setkey = aead_setkey,
7173 + .setauthsize = aead_setauthsize,
7174 + .encrypt = aead_encrypt,
7175 + .decrypt = aead_decrypt,
7176 + .ivsize = AES_BLOCK_SIZE,
7177 + .maxauthsize = MD5_DIGEST_SIZE,
7178 + },
7179 + .caam = {
7180 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7181 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7182 + OP_ALG_AAI_HMAC_PRECOMP,
7183 + .geniv = true,
7184 + }
7185 + },
7186 + {
7187 + .aead = {
7188 + .base = {
7189 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
7190 + .cra_driver_name = "authenc-hmac-sha1-"
7191 + "cbc-aes-caam-qi2",
7192 + .cra_blocksize = AES_BLOCK_SIZE,
7193 + },
7194 + .setkey = aead_setkey,
7195 + .setauthsize = aead_setauthsize,
7196 + .encrypt = aead_encrypt,
7197 + .decrypt = aead_decrypt,
7198 + .ivsize = AES_BLOCK_SIZE,
7199 + .maxauthsize = SHA1_DIGEST_SIZE,
7200 + },
7201 + .caam = {
7202 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7203 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7204 + OP_ALG_AAI_HMAC_PRECOMP,
7205 + }
7206 + },
7207 + {
7208 + .aead = {
7209 + .base = {
7210 + .cra_name = "echainiv(authenc(hmac(sha1),"
7211 + "cbc(aes)))",
7212 + .cra_driver_name = "echainiv-authenc-"
7213 + "hmac-sha1-cbc-aes-caam-qi2",
7214 + .cra_blocksize = AES_BLOCK_SIZE,
7215 + },
7216 + .setkey = aead_setkey,
7217 + .setauthsize = aead_setauthsize,
7218 + .encrypt = aead_encrypt,
7219 + .decrypt = aead_decrypt,
7220 + .ivsize = AES_BLOCK_SIZE,
7221 + .maxauthsize = SHA1_DIGEST_SIZE,
7222 + },
7223 + .caam = {
7224 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7225 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7226 + OP_ALG_AAI_HMAC_PRECOMP,
7227 + .geniv = true,
7228 + },
7229 + },
7230 + {
7231 + .aead = {
7232 + .base = {
7233 + .cra_name = "authenc(hmac(sha224),cbc(aes))",
7234 + .cra_driver_name = "authenc-hmac-sha224-"
7235 + "cbc-aes-caam-qi2",
7236 + .cra_blocksize = AES_BLOCK_SIZE,
7237 + },
7238 + .setkey = aead_setkey,
7239 + .setauthsize = aead_setauthsize,
7240 + .encrypt = aead_encrypt,
7241 + .decrypt = aead_decrypt,
7242 + .ivsize = AES_BLOCK_SIZE,
7243 + .maxauthsize = SHA224_DIGEST_SIZE,
7244 + },
7245 + .caam = {
7246 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7247 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7248 + OP_ALG_AAI_HMAC_PRECOMP,
7249 + }
7250 + },
7251 + {
7252 + .aead = {
7253 + .base = {
7254 + .cra_name = "echainiv(authenc(hmac(sha224),"
7255 + "cbc(aes)))",
7256 + .cra_driver_name = "echainiv-authenc-"
7257 + "hmac-sha224-cbc-aes-caam-qi2",
7258 + .cra_blocksize = AES_BLOCK_SIZE,
7259 + },
7260 + .setkey = aead_setkey,
7261 + .setauthsize = aead_setauthsize,
7262 + .encrypt = aead_encrypt,
7263 + .decrypt = aead_decrypt,
7264 + .ivsize = AES_BLOCK_SIZE,
7265 + .maxauthsize = SHA224_DIGEST_SIZE,
7266 + },
7267 + .caam = {
7268 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7269 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7270 + OP_ALG_AAI_HMAC_PRECOMP,
7271 + .geniv = true,
7272 + }
7273 + },
7274 + {
7275 + .aead = {
7276 + .base = {
7277 + .cra_name = "authenc(hmac(sha256),cbc(aes))",
7278 + .cra_driver_name = "authenc-hmac-sha256-"
7279 + "cbc-aes-caam-qi2",
7280 + .cra_blocksize = AES_BLOCK_SIZE,
7281 + },
7282 + .setkey = aead_setkey,
7283 + .setauthsize = aead_setauthsize,
7284 + .encrypt = aead_encrypt,
7285 + .decrypt = aead_decrypt,
7286 + .ivsize = AES_BLOCK_SIZE,
7287 + .maxauthsize = SHA256_DIGEST_SIZE,
7288 + },
7289 + .caam = {
7290 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7291 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7292 + OP_ALG_AAI_HMAC_PRECOMP,
7293 + }
7294 + },
7295 + {
7296 + .aead = {
7297 + .base = {
7298 + .cra_name = "echainiv(authenc(hmac(sha256),"
7299 + "cbc(aes)))",
7300 + .cra_driver_name = "echainiv-authenc-"
7301 + "hmac-sha256-cbc-aes-"
7302 + "caam-qi2",
7303 + .cra_blocksize = AES_BLOCK_SIZE,
7304 + },
7305 + .setkey = aead_setkey,
7306 + .setauthsize = aead_setauthsize,
7307 + .encrypt = aead_encrypt,
7308 + .decrypt = aead_decrypt,
7309 + .ivsize = AES_BLOCK_SIZE,
7310 + .maxauthsize = SHA256_DIGEST_SIZE,
7311 + },
7312 + .caam = {
7313 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7314 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7315 + OP_ALG_AAI_HMAC_PRECOMP,
7316 + .geniv = true,
7317 + }
7318 + },
7319 + {
7320 + .aead = {
7321 + .base = {
7322 + .cra_name = "authenc(hmac(sha384),cbc(aes))",
7323 + .cra_driver_name = "authenc-hmac-sha384-"
7324 + "cbc-aes-caam-qi2",
7325 + .cra_blocksize = AES_BLOCK_SIZE,
7326 + },
7327 + .setkey = aead_setkey,
7328 + .setauthsize = aead_setauthsize,
7329 + .encrypt = aead_encrypt,
7330 + .decrypt = aead_decrypt,
7331 + .ivsize = AES_BLOCK_SIZE,
7332 + .maxauthsize = SHA384_DIGEST_SIZE,
7333 + },
7334 + .caam = {
7335 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7336 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7337 + OP_ALG_AAI_HMAC_PRECOMP,
7338 + }
7339 + },
7340 + {
7341 + .aead = {
7342 + .base = {
7343 + .cra_name = "echainiv(authenc(hmac(sha384),"
7344 + "cbc(aes)))",
7345 + .cra_driver_name = "echainiv-authenc-"
7346 + "hmac-sha384-cbc-aes-"
7347 + "caam-qi2",
7348 + .cra_blocksize = AES_BLOCK_SIZE,
7349 + },
7350 + .setkey = aead_setkey,
7351 + .setauthsize = aead_setauthsize,
7352 + .encrypt = aead_encrypt,
7353 + .decrypt = aead_decrypt,
7354 + .ivsize = AES_BLOCK_SIZE,
7355 + .maxauthsize = SHA384_DIGEST_SIZE,
7356 + },
7357 + .caam = {
7358 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7359 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7360 + OP_ALG_AAI_HMAC_PRECOMP,
7361 + .geniv = true,
7362 + }
7363 + },
7364 + {
7365 + .aead = {
7366 + .base = {
7367 + .cra_name = "authenc(hmac(sha512),cbc(aes))",
7368 + .cra_driver_name = "authenc-hmac-sha512-"
7369 + "cbc-aes-caam-qi2",
7370 + .cra_blocksize = AES_BLOCK_SIZE,
7371 + },
7372 + .setkey = aead_setkey,
7373 + .setauthsize = aead_setauthsize,
7374 + .encrypt = aead_encrypt,
7375 + .decrypt = aead_decrypt,
7376 + .ivsize = AES_BLOCK_SIZE,
7377 + .maxauthsize = SHA512_DIGEST_SIZE,
7378 + },
7379 + .caam = {
7380 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7381 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7382 + OP_ALG_AAI_HMAC_PRECOMP,
7383 + }
7384 + },
7385 + {
7386 + .aead = {
7387 + .base = {
7388 + .cra_name = "echainiv(authenc(hmac(sha512),"
7389 + "cbc(aes)))",
7390 + .cra_driver_name = "echainiv-authenc-"
7391 + "hmac-sha512-cbc-aes-"
7392 + "caam-qi2",
7393 + .cra_blocksize = AES_BLOCK_SIZE,
7394 + },
7395 + .setkey = aead_setkey,
7396 + .setauthsize = aead_setauthsize,
7397 + .encrypt = aead_encrypt,
7398 + .decrypt = aead_decrypt,
7399 + .ivsize = AES_BLOCK_SIZE,
7400 + .maxauthsize = SHA512_DIGEST_SIZE,
7401 + },
7402 + .caam = {
7403 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7404 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7405 + OP_ALG_AAI_HMAC_PRECOMP,
7406 + .geniv = true,
7407 + }
7408 + },
7409 + {
7410 + .aead = {
7411 + .base = {
7412 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
7413 + .cra_driver_name = "authenc-hmac-md5-"
7414 + "cbc-des3_ede-caam-qi2",
7415 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7416 + },
7417 + .setkey = aead_setkey,
7418 + .setauthsize = aead_setauthsize,
7419 + .encrypt = aead_encrypt,
7420 + .decrypt = aead_decrypt,
7421 + .ivsize = DES3_EDE_BLOCK_SIZE,
7422 + .maxauthsize = MD5_DIGEST_SIZE,
7423 + },
7424 + .caam = {
7425 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7426 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7427 + OP_ALG_AAI_HMAC_PRECOMP,
7428 + }
7429 + },
7430 + {
7431 + .aead = {
7432 + .base = {
7433 + .cra_name = "echainiv(authenc(hmac(md5),"
7434 + "cbc(des3_ede)))",
7435 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
7436 + "cbc-des3_ede-caam-qi2",
7437 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7438 + },
7439 + .setkey = aead_setkey,
7440 + .setauthsize = aead_setauthsize,
7441 + .encrypt = aead_encrypt,
7442 + .decrypt = aead_decrypt,
7443 + .ivsize = DES3_EDE_BLOCK_SIZE,
7444 + .maxauthsize = MD5_DIGEST_SIZE,
7445 + },
7446 + .caam = {
7447 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7448 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7449 + OP_ALG_AAI_HMAC_PRECOMP,
7450 + .geniv = true,
7451 + }
7452 + },
7453 + {
7454 + .aead = {
7455 + .base = {
7456 + .cra_name = "authenc(hmac(sha1),"
7457 + "cbc(des3_ede))",
7458 + .cra_driver_name = "authenc-hmac-sha1-"
7459 + "cbc-des3_ede-caam-qi2",
7460 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7461 + },
7462 + .setkey = aead_setkey,
7463 + .setauthsize = aead_setauthsize,
7464 + .encrypt = aead_encrypt,
7465 + .decrypt = aead_decrypt,
7466 + .ivsize = DES3_EDE_BLOCK_SIZE,
7467 + .maxauthsize = SHA1_DIGEST_SIZE,
7468 + },
7469 + .caam = {
7470 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7471 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7472 + OP_ALG_AAI_HMAC_PRECOMP,
7473 + },
7474 + },
7475 + {
7476 + .aead = {
7477 + .base = {
7478 + .cra_name = "echainiv(authenc(hmac(sha1),"
7479 + "cbc(des3_ede)))",
7480 + .cra_driver_name = "echainiv-authenc-"
7481 + "hmac-sha1-"
7482 + "cbc-des3_ede-caam-qi2",
7483 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7484 + },
7485 + .setkey = aead_setkey,
7486 + .setauthsize = aead_setauthsize,
7487 + .encrypt = aead_encrypt,
7488 + .decrypt = aead_decrypt,
7489 + .ivsize = DES3_EDE_BLOCK_SIZE,
7490 + .maxauthsize = SHA1_DIGEST_SIZE,
7491 + },
7492 + .caam = {
7493 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7494 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7495 + OP_ALG_AAI_HMAC_PRECOMP,
7496 + .geniv = true,
7497 + }
7498 + },
7499 + {
7500 + .aead = {
7501 + .base = {
7502 + .cra_name = "authenc(hmac(sha224),"
7503 + "cbc(des3_ede))",
7504 + .cra_driver_name = "authenc-hmac-sha224-"
7505 + "cbc-des3_ede-caam-qi2",
7506 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7507 + },
7508 + .setkey = aead_setkey,
7509 + .setauthsize = aead_setauthsize,
7510 + .encrypt = aead_encrypt,
7511 + .decrypt = aead_decrypt,
7512 + .ivsize = DES3_EDE_BLOCK_SIZE,
7513 + .maxauthsize = SHA224_DIGEST_SIZE,
7514 + },
7515 + .caam = {
7516 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7517 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7518 + OP_ALG_AAI_HMAC_PRECOMP,
7519 + },
7520 + },
7521 + {
7522 + .aead = {
7523 + .base = {
7524 + .cra_name = "echainiv(authenc(hmac(sha224),"
7525 + "cbc(des3_ede)))",
7526 + .cra_driver_name = "echainiv-authenc-"
7527 + "hmac-sha224-"
7528 + "cbc-des3_ede-caam-qi2",
7529 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7530 + },
7531 + .setkey = aead_setkey,
7532 + .setauthsize = aead_setauthsize,
7533 + .encrypt = aead_encrypt,
7534 + .decrypt = aead_decrypt,
7535 + .ivsize = DES3_EDE_BLOCK_SIZE,
7536 + .maxauthsize = SHA224_DIGEST_SIZE,
7537 + },
7538 + .caam = {
7539 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7540 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7541 + OP_ALG_AAI_HMAC_PRECOMP,
7542 + .geniv = true,
7543 + }
7544 + },
7545 + {
7546 + .aead = {
7547 + .base = {
7548 + .cra_name = "authenc(hmac(sha256),"
7549 + "cbc(des3_ede))",
7550 + .cra_driver_name = "authenc-hmac-sha256-"
7551 + "cbc-des3_ede-caam-qi2",
7552 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7553 + },
7554 + .setkey = aead_setkey,
7555 + .setauthsize = aead_setauthsize,
7556 + .encrypt = aead_encrypt,
7557 + .decrypt = aead_decrypt,
7558 + .ivsize = DES3_EDE_BLOCK_SIZE,
7559 + .maxauthsize = SHA256_DIGEST_SIZE,
7560 + },
7561 + .caam = {
7562 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7563 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7564 + OP_ALG_AAI_HMAC_PRECOMP,
7565 + },
7566 + },
7567 + {
7568 + .aead = {
7569 + .base = {
7570 + .cra_name = "echainiv(authenc(hmac(sha256),"
7571 + "cbc(des3_ede)))",
7572 + .cra_driver_name = "echainiv-authenc-"
7573 + "hmac-sha256-"
7574 + "cbc-des3_ede-caam-qi2",
7575 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7576 + },
7577 + .setkey = aead_setkey,
7578 + .setauthsize = aead_setauthsize,
7579 + .encrypt = aead_encrypt,
7580 + .decrypt = aead_decrypt,
7581 + .ivsize = DES3_EDE_BLOCK_SIZE,
7582 + .maxauthsize = SHA256_DIGEST_SIZE,
7583 + },
7584 + .caam = {
7585 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7586 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7587 + OP_ALG_AAI_HMAC_PRECOMP,
7588 + .geniv = true,
7589 + }
7590 + },
7591 + {
7592 + .aead = {
7593 + .base = {
7594 + .cra_name = "authenc(hmac(sha384),"
7595 + "cbc(des3_ede))",
7596 + .cra_driver_name = "authenc-hmac-sha384-"
7597 + "cbc-des3_ede-caam-qi2",
7598 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7599 + },
7600 + .setkey = aead_setkey,
7601 + .setauthsize = aead_setauthsize,
7602 + .encrypt = aead_encrypt,
7603 + .decrypt = aead_decrypt,
7604 + .ivsize = DES3_EDE_BLOCK_SIZE,
7605 + .maxauthsize = SHA384_DIGEST_SIZE,
7606 + },
7607 + .caam = {
7608 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7609 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7610 + OP_ALG_AAI_HMAC_PRECOMP,
7611 + },
7612 + },
7613 + {
7614 + .aead = {
7615 + .base = {
7616 + .cra_name = "echainiv(authenc(hmac(sha384),"
7617 + "cbc(des3_ede)))",
7618 + .cra_driver_name = "echainiv-authenc-"
7619 + "hmac-sha384-"
7620 + "cbc-des3_ede-caam-qi2",
7621 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7622 + },
7623 + .setkey = aead_setkey,
7624 + .setauthsize = aead_setauthsize,
7625 + .encrypt = aead_encrypt,
7626 + .decrypt = aead_decrypt,
7627 + .ivsize = DES3_EDE_BLOCK_SIZE,
7628 + .maxauthsize = SHA384_DIGEST_SIZE,
7629 + },
7630 + .caam = {
7631 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7632 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7633 + OP_ALG_AAI_HMAC_PRECOMP,
7634 + .geniv = true,
7635 + }
7636 + },
7637 + {
7638 + .aead = {
7639 + .base = {
7640 + .cra_name = "authenc(hmac(sha512),"
7641 + "cbc(des3_ede))",
7642 + .cra_driver_name = "authenc-hmac-sha512-"
7643 + "cbc-des3_ede-caam-qi2",
7644 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7645 + },
7646 + .setkey = aead_setkey,
7647 + .setauthsize = aead_setauthsize,
7648 + .encrypt = aead_encrypt,
7649 + .decrypt = aead_decrypt,
7650 + .ivsize = DES3_EDE_BLOCK_SIZE,
7651 + .maxauthsize = SHA512_DIGEST_SIZE,
7652 + },
7653 + .caam = {
7654 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7655 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7656 + OP_ALG_AAI_HMAC_PRECOMP,
7657 + },
7658 + },
7659 + {
7660 + .aead = {
7661 + .base = {
7662 + .cra_name = "echainiv(authenc(hmac(sha512),"
7663 + "cbc(des3_ede)))",
7664 + .cra_driver_name = "echainiv-authenc-"
7665 + "hmac-sha512-"
7666 + "cbc-des3_ede-caam-qi2",
7667 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7668 + },
7669 + .setkey = aead_setkey,
7670 + .setauthsize = aead_setauthsize,
7671 + .encrypt = aead_encrypt,
7672 + .decrypt = aead_decrypt,
7673 + .ivsize = DES3_EDE_BLOCK_SIZE,
7674 + .maxauthsize = SHA512_DIGEST_SIZE,
7675 + },
7676 + .caam = {
7677 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7678 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7679 + OP_ALG_AAI_HMAC_PRECOMP,
7680 + .geniv = true,
7681 + }
7682 + },
7683 + {
7684 + .aead = {
7685 + .base = {
7686 + .cra_name = "authenc(hmac(md5),cbc(des))",
7687 + .cra_driver_name = "authenc-hmac-md5-"
7688 + "cbc-des-caam-qi2",
7689 + .cra_blocksize = DES_BLOCK_SIZE,
7690 + },
7691 + .setkey = aead_setkey,
7692 + .setauthsize = aead_setauthsize,
7693 + .encrypt = aead_encrypt,
7694 + .decrypt = aead_decrypt,
7695 + .ivsize = DES_BLOCK_SIZE,
7696 + .maxauthsize = MD5_DIGEST_SIZE,
7697 + },
7698 + .caam = {
7699 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7700 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7701 + OP_ALG_AAI_HMAC_PRECOMP,
7702 + },
7703 + },
7704 + {
7705 + .aead = {
7706 + .base = {
7707 + .cra_name = "echainiv(authenc(hmac(md5),"
7708 + "cbc(des)))",
7709 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
7710 + "cbc-des-caam-qi2",
7711 + .cra_blocksize = DES_BLOCK_SIZE,
7712 + },
7713 + .setkey = aead_setkey,
7714 + .setauthsize = aead_setauthsize,
7715 + .encrypt = aead_encrypt,
7716 + .decrypt = aead_decrypt,
7717 + .ivsize = DES_BLOCK_SIZE,
7718 + .maxauthsize = MD5_DIGEST_SIZE,
7719 + },
7720 + .caam = {
7721 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7722 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7723 + OP_ALG_AAI_HMAC_PRECOMP,
7724 + .geniv = true,
7725 + }
7726 + },
7727 + {
7728 + .aead = {
7729 + .base = {
7730 + .cra_name = "authenc(hmac(sha1),cbc(des))",
7731 + .cra_driver_name = "authenc-hmac-sha1-"
7732 + "cbc-des-caam-qi2",
7733 + .cra_blocksize = DES_BLOCK_SIZE,
7734 + },
7735 + .setkey = aead_setkey,
7736 + .setauthsize = aead_setauthsize,
7737 + .encrypt = aead_encrypt,
7738 + .decrypt = aead_decrypt,
7739 + .ivsize = DES_BLOCK_SIZE,
7740 + .maxauthsize = SHA1_DIGEST_SIZE,
7741 + },
7742 + .caam = {
7743 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7744 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7745 + OP_ALG_AAI_HMAC_PRECOMP,
7746 + },
7747 + },
7748 + {
7749 + .aead = {
7750 + .base = {
7751 + .cra_name = "echainiv(authenc(hmac(sha1),"
7752 + "cbc(des)))",
7753 + .cra_driver_name = "echainiv-authenc-"
7754 + "hmac-sha1-cbc-des-caam-qi2",
7755 + .cra_blocksize = DES_BLOCK_SIZE,
7756 + },
7757 + .setkey = aead_setkey,
7758 + .setauthsize = aead_setauthsize,
7759 + .encrypt = aead_encrypt,
7760 + .decrypt = aead_decrypt,
7761 + .ivsize = DES_BLOCK_SIZE,
7762 + .maxauthsize = SHA1_DIGEST_SIZE,
7763 + },
7764 + .caam = {
7765 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7766 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7767 + OP_ALG_AAI_HMAC_PRECOMP,
7768 + .geniv = true,
7769 + }
7770 + },
7771 + {
7772 + .aead = {
7773 + .base = {
7774 + .cra_name = "authenc(hmac(sha224),cbc(des))",
7775 + .cra_driver_name = "authenc-hmac-sha224-"
7776 + "cbc-des-caam-qi2",
7777 + .cra_blocksize = DES_BLOCK_SIZE,
7778 + },
7779 + .setkey = aead_setkey,
7780 + .setauthsize = aead_setauthsize,
7781 + .encrypt = aead_encrypt,
7782 + .decrypt = aead_decrypt,
7783 + .ivsize = DES_BLOCK_SIZE,
7784 + .maxauthsize = SHA224_DIGEST_SIZE,
7785 + },
7786 + .caam = {
7787 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7788 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7789 + OP_ALG_AAI_HMAC_PRECOMP,
7790 + },
7791 + },
7792 + {
7793 + .aead = {
7794 + .base = {
7795 + .cra_name = "echainiv(authenc(hmac(sha224),"
7796 + "cbc(des)))",
7797 + .cra_driver_name = "echainiv-authenc-"
7798 + "hmac-sha224-cbc-des-"
7799 + "caam-qi2",
7800 + .cra_blocksize = DES_BLOCK_SIZE,
7801 + },
7802 + .setkey = aead_setkey,
7803 + .setauthsize = aead_setauthsize,
7804 + .encrypt = aead_encrypt,
7805 + .decrypt = aead_decrypt,
7806 + .ivsize = DES_BLOCK_SIZE,
7807 + .maxauthsize = SHA224_DIGEST_SIZE,
7808 + },
7809 + .caam = {
7810 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7811 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7812 + OP_ALG_AAI_HMAC_PRECOMP,
7813 + .geniv = true,
7814 + }
7815 + },
7816 + {
7817 + .aead = {
7818 + .base = {
7819 + .cra_name = "authenc(hmac(sha256),cbc(des))",
7820 + .cra_driver_name = "authenc-hmac-sha256-"
7821 + "cbc-des-caam-qi2",
7822 + .cra_blocksize = DES_BLOCK_SIZE,
7823 + },
7824 + .setkey = aead_setkey,
7825 + .setauthsize = aead_setauthsize,
7826 + .encrypt = aead_encrypt,
7827 + .decrypt = aead_decrypt,
7828 + .ivsize = DES_BLOCK_SIZE,
7829 + .maxauthsize = SHA256_DIGEST_SIZE,
7830 + },
7831 + .caam = {
7832 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7833 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7834 + OP_ALG_AAI_HMAC_PRECOMP,
7835 + },
7836 + },
7837 + {
7838 + .aead = {
7839 + .base = {
7840 + .cra_name = "echainiv(authenc(hmac(sha256),"
7841 + "cbc(des)))",
7842 + .cra_driver_name = "echainiv-authenc-"
7843 + "hmac-sha256-cbc-desi-"
7844 + "caam-qi2",
7845 + .cra_blocksize = DES_BLOCK_SIZE,
7846 + },
7847 + .setkey = aead_setkey,
7848 + .setauthsize = aead_setauthsize,
7849 + .encrypt = aead_encrypt,
7850 + .decrypt = aead_decrypt,
7851 + .ivsize = DES_BLOCK_SIZE,
7852 + .maxauthsize = SHA256_DIGEST_SIZE,
7853 + },
7854 + .caam = {
7855 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7856 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7857 + OP_ALG_AAI_HMAC_PRECOMP,
7858 + .geniv = true,
7859 + },
7860 + },
7861 + {
7862 + .aead = {
7863 + .base = {
7864 + .cra_name = "authenc(hmac(sha384),cbc(des))",
7865 + .cra_driver_name = "authenc-hmac-sha384-"
7866 + "cbc-des-caam-qi2",
7867 + .cra_blocksize = DES_BLOCK_SIZE,
7868 + },
7869 + .setkey = aead_setkey,
7870 + .setauthsize = aead_setauthsize,
7871 + .encrypt = aead_encrypt,
7872 + .decrypt = aead_decrypt,
7873 + .ivsize = DES_BLOCK_SIZE,
7874 + .maxauthsize = SHA384_DIGEST_SIZE,
7875 + },
7876 + .caam = {
7877 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7878 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7879 + OP_ALG_AAI_HMAC_PRECOMP,
7880 + },
7881 + },
7882 + {
7883 + .aead = {
7884 + .base = {
7885 + .cra_name = "echainiv(authenc(hmac(sha384),"
7886 + "cbc(des)))",
7887 + .cra_driver_name = "echainiv-authenc-"
7888 + "hmac-sha384-cbc-des-"
7889 + "caam-qi2",
7890 + .cra_blocksize = DES_BLOCK_SIZE,
7891 + },
7892 + .setkey = aead_setkey,
7893 + .setauthsize = aead_setauthsize,
7894 + .encrypt = aead_encrypt,
7895 + .decrypt = aead_decrypt,
7896 + .ivsize = DES_BLOCK_SIZE,
7897 + .maxauthsize = SHA384_DIGEST_SIZE,
7898 + },
7899 + .caam = {
7900 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7901 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7902 + OP_ALG_AAI_HMAC_PRECOMP,
7903 + .geniv = true,
7904 + }
7905 + },
7906 + {
7907 + .aead = {
7908 + .base = {
7909 + .cra_name = "authenc(hmac(sha512),cbc(des))",
7910 + .cra_driver_name = "authenc-hmac-sha512-"
7911 + "cbc-des-caam-qi2",
7912 + .cra_blocksize = DES_BLOCK_SIZE,
7913 + },
7914 + .setkey = aead_setkey,
7915 + .setauthsize = aead_setauthsize,
7916 + .encrypt = aead_encrypt,
7917 + .decrypt = aead_decrypt,
7918 + .ivsize = DES_BLOCK_SIZE,
7919 + .maxauthsize = SHA512_DIGEST_SIZE,
7920 + },
7921 + .caam = {
7922 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7923 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7924 + OP_ALG_AAI_HMAC_PRECOMP,
7925 + }
7926 + },
7927 + {
7928 + .aead = {
7929 + .base = {
7930 + .cra_name = "echainiv(authenc(hmac(sha512),"
7931 + "cbc(des)))",
7932 + .cra_driver_name = "echainiv-authenc-"
7933 + "hmac-sha512-cbc-des-"
7934 + "caam-qi2",
7935 + .cra_blocksize = DES_BLOCK_SIZE,
7936 + },
7937 + .setkey = aead_setkey,
7938 + .setauthsize = aead_setauthsize,
7939 + .encrypt = aead_encrypt,
7940 + .decrypt = aead_decrypt,
7941 + .ivsize = DES_BLOCK_SIZE,
7942 + .maxauthsize = SHA512_DIGEST_SIZE,
7943 + },
7944 + .caam = {
7945 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7946 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7947 + OP_ALG_AAI_HMAC_PRECOMP,
7948 + .geniv = true,
7949 + }
7950 + },
7951 + {
7952 + .aead = {
7953 + .base = {
7954 + .cra_name = "authenc(hmac(md5),"
7955 + "rfc3686(ctr(aes)))",
7956 + .cra_driver_name = "authenc-hmac-md5-"
7957 + "rfc3686-ctr-aes-caam-qi2",
7958 + .cra_blocksize = 1,
7959 + },
7960 + .setkey = aead_setkey,
7961 + .setauthsize = aead_setauthsize,
7962 + .encrypt = aead_encrypt,
7963 + .decrypt = aead_decrypt,
7964 + .ivsize = CTR_RFC3686_IV_SIZE,
7965 + .maxauthsize = MD5_DIGEST_SIZE,
7966 + },
7967 + .caam = {
7968 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7969 + OP_ALG_AAI_CTR_MOD128,
7970 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7971 + OP_ALG_AAI_HMAC_PRECOMP,
7972 + .rfc3686 = true,
7973 + },
7974 + },
7975 + {
7976 + .aead = {
7977 + .base = {
7978 + .cra_name = "seqiv(authenc("
7979 + "hmac(md5),rfc3686(ctr(aes))))",
7980 + .cra_driver_name = "seqiv-authenc-hmac-md5-"
7981 + "rfc3686-ctr-aes-caam-qi2",
7982 + .cra_blocksize = 1,
7983 + },
7984 + .setkey = aead_setkey,
7985 + .setauthsize = aead_setauthsize,
7986 + .encrypt = aead_encrypt,
7987 + .decrypt = aead_decrypt,
7988 + .ivsize = CTR_RFC3686_IV_SIZE,
7989 + .maxauthsize = MD5_DIGEST_SIZE,
7990 + },
7991 + .caam = {
7992 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7993 + OP_ALG_AAI_CTR_MOD128,
7994 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7995 + OP_ALG_AAI_HMAC_PRECOMP,
7996 + .rfc3686 = true,
7997 + .geniv = true,
7998 + },
7999 + },
8000 + {
8001 + .aead = {
8002 + .base = {
8003 + .cra_name = "authenc(hmac(sha1),"
8004 + "rfc3686(ctr(aes)))",
8005 + .cra_driver_name = "authenc-hmac-sha1-"
8006 + "rfc3686-ctr-aes-caam-qi2",
8007 + .cra_blocksize = 1,
8008 + },
8009 + .setkey = aead_setkey,
8010 + .setauthsize = aead_setauthsize,
8011 + .encrypt = aead_encrypt,
8012 + .decrypt = aead_decrypt,
8013 + .ivsize = CTR_RFC3686_IV_SIZE,
8014 + .maxauthsize = SHA1_DIGEST_SIZE,
8015 + },
8016 + .caam = {
8017 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8018 + OP_ALG_AAI_CTR_MOD128,
8019 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
8020 + OP_ALG_AAI_HMAC_PRECOMP,
8021 + .rfc3686 = true,
8022 + },
8023 + },
8024 + {
8025 + .aead = {
8026 + .base = {
8027 + .cra_name = "seqiv(authenc("
8028 + "hmac(sha1),rfc3686(ctr(aes))))",
8029 + .cra_driver_name = "seqiv-authenc-hmac-sha1-"
8030 + "rfc3686-ctr-aes-caam-qi2",
8031 + .cra_blocksize = 1,
8032 + },
8033 + .setkey = aead_setkey,
8034 + .setauthsize = aead_setauthsize,
8035 + .encrypt = aead_encrypt,
8036 + .decrypt = aead_decrypt,
8037 + .ivsize = CTR_RFC3686_IV_SIZE,
8038 + .maxauthsize = SHA1_DIGEST_SIZE,
8039 + },
8040 + .caam = {
8041 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8042 + OP_ALG_AAI_CTR_MOD128,
8043 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
8044 + OP_ALG_AAI_HMAC_PRECOMP,
8045 + .rfc3686 = true,
8046 + .geniv = true,
8047 + },
8048 + },
8049 + {
8050 + .aead = {
8051 + .base = {
8052 + .cra_name = "authenc(hmac(sha224),"
8053 + "rfc3686(ctr(aes)))",
8054 + .cra_driver_name = "authenc-hmac-sha224-"
8055 + "rfc3686-ctr-aes-caam-qi2",
8056 + .cra_blocksize = 1,
8057 + },
8058 + .setkey = aead_setkey,
8059 + .setauthsize = aead_setauthsize,
8060 + .encrypt = aead_encrypt,
8061 + .decrypt = aead_decrypt,
8062 + .ivsize = CTR_RFC3686_IV_SIZE,
8063 + .maxauthsize = SHA224_DIGEST_SIZE,
8064 + },
8065 + .caam = {
8066 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8067 + OP_ALG_AAI_CTR_MOD128,
8068 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
8069 + OP_ALG_AAI_HMAC_PRECOMP,
8070 + .rfc3686 = true,
8071 + },
8072 + },
8073 + {
8074 + .aead = {
8075 + .base = {
8076 + .cra_name = "seqiv(authenc("
8077 + "hmac(sha224),rfc3686(ctr(aes))))",
8078 + .cra_driver_name = "seqiv-authenc-hmac-sha224-"
8079 + "rfc3686-ctr-aes-caam-qi2",
8080 + .cra_blocksize = 1,
8081 + },
8082 + .setkey = aead_setkey,
8083 + .setauthsize = aead_setauthsize,
8084 + .encrypt = aead_encrypt,
8085 + .decrypt = aead_decrypt,
8086 + .ivsize = CTR_RFC3686_IV_SIZE,
8087 + .maxauthsize = SHA224_DIGEST_SIZE,
8088 + },
8089 + .caam = {
8090 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8091 + OP_ALG_AAI_CTR_MOD128,
8092 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
8093 + OP_ALG_AAI_HMAC_PRECOMP,
8094 + .rfc3686 = true,
8095 + .geniv = true,
8096 + },
8097 + },
8098 + {
8099 + .aead = {
8100 + .base = {
8101 + .cra_name = "authenc(hmac(sha256),"
8102 + "rfc3686(ctr(aes)))",
8103 + .cra_driver_name = "authenc-hmac-sha256-"
8104 + "rfc3686-ctr-aes-caam-qi2",
8105 + .cra_blocksize = 1,
8106 + },
8107 + .setkey = aead_setkey,
8108 + .setauthsize = aead_setauthsize,
8109 + .encrypt = aead_encrypt,
8110 + .decrypt = aead_decrypt,
8111 + .ivsize = CTR_RFC3686_IV_SIZE,
8112 + .maxauthsize = SHA256_DIGEST_SIZE,
8113 + },
8114 + .caam = {
8115 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8116 + OP_ALG_AAI_CTR_MOD128,
8117 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
8118 + OP_ALG_AAI_HMAC_PRECOMP,
8119 + .rfc3686 = true,
8120 + },
8121 + },
8122 + {
8123 + .aead = {
8124 + .base = {
8125 + .cra_name = "seqiv(authenc(hmac(sha256),"
8126 + "rfc3686(ctr(aes))))",
8127 + .cra_driver_name = "seqiv-authenc-hmac-sha256-"
8128 + "rfc3686-ctr-aes-caam-qi2",
8129 + .cra_blocksize = 1,
8130 + },
8131 + .setkey = aead_setkey,
8132 + .setauthsize = aead_setauthsize,
8133 + .encrypt = aead_encrypt,
8134 + .decrypt = aead_decrypt,
8135 + .ivsize = CTR_RFC3686_IV_SIZE,
8136 + .maxauthsize = SHA256_DIGEST_SIZE,
8137 + },
8138 + .caam = {
8139 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8140 + OP_ALG_AAI_CTR_MOD128,
8141 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
8142 + OP_ALG_AAI_HMAC_PRECOMP,
8143 + .rfc3686 = true,
8144 + .geniv = true,
8145 + },
8146 + },
8147 + {
8148 + .aead = {
8149 + .base = {
8150 + .cra_name = "authenc(hmac(sha384),"
8151 + "rfc3686(ctr(aes)))",
8152 + .cra_driver_name = "authenc-hmac-sha384-"
8153 + "rfc3686-ctr-aes-caam-qi2",
8154 + .cra_blocksize = 1,
8155 + },
8156 + .setkey = aead_setkey,
8157 + .setauthsize = aead_setauthsize,
8158 + .encrypt = aead_encrypt,
8159 + .decrypt = aead_decrypt,
8160 + .ivsize = CTR_RFC3686_IV_SIZE,
8161 + .maxauthsize = SHA384_DIGEST_SIZE,
8162 + },
8163 + .caam = {
8164 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8165 + OP_ALG_AAI_CTR_MOD128,
8166 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
8167 + OP_ALG_AAI_HMAC_PRECOMP,
8168 + .rfc3686 = true,
8169 + },
8170 + },
8171 + {
8172 + .aead = {
8173 + .base = {
8174 + .cra_name = "seqiv(authenc(hmac(sha384),"
8175 + "rfc3686(ctr(aes))))",
8176 + .cra_driver_name = "seqiv-authenc-hmac-sha384-"
8177 + "rfc3686-ctr-aes-caam-qi2",
8178 + .cra_blocksize = 1,
8179 + },
8180 + .setkey = aead_setkey,
8181 + .setauthsize = aead_setauthsize,
8182 + .encrypt = aead_encrypt,
8183 + .decrypt = aead_decrypt,
8184 + .ivsize = CTR_RFC3686_IV_SIZE,
8185 + .maxauthsize = SHA384_DIGEST_SIZE,
8186 + },
8187 + .caam = {
8188 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8189 + OP_ALG_AAI_CTR_MOD128,
8190 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
8191 + OP_ALG_AAI_HMAC_PRECOMP,
8192 + .rfc3686 = true,
8193 + .geniv = true,
8194 + },
8195 + },
8196 + {
8197 + .aead = {
8198 + .base = {
8199 + .cra_name = "rfc7539(chacha20,poly1305)",
8200 + .cra_driver_name = "rfc7539-chacha20-poly1305-"
8201 + "caam-qi2",
8202 + .cra_blocksize = 1,
8203 + },
8204 + .setkey = chachapoly_setkey,
8205 + .setauthsize = chachapoly_setauthsize,
8206 + .encrypt = aead_encrypt,
8207 + .decrypt = aead_decrypt,
8208 + .ivsize = CHACHAPOLY_IV_SIZE,
8209 + .maxauthsize = POLY1305_DIGEST_SIZE,
8210 + },
8211 + .caam = {
8212 + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
8213 + OP_ALG_AAI_AEAD,
8214 + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
8215 + OP_ALG_AAI_AEAD,
8216 + },
8217 + },
8218 + {
8219 + .aead = {
8220 + .base = {
8221 + .cra_name = "rfc7539esp(chacha20,poly1305)",
8222 + .cra_driver_name = "rfc7539esp-chacha20-"
8223 + "poly1305-caam-qi2",
8224 + .cra_blocksize = 1,
8225 + },
8226 + .setkey = chachapoly_setkey,
8227 + .setauthsize = chachapoly_setauthsize,
8228 + .encrypt = aead_encrypt,
8229 + .decrypt = aead_decrypt,
8230 + .ivsize = 8,
8231 + .maxauthsize = POLY1305_DIGEST_SIZE,
8232 + },
8233 + .caam = {
8234 + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
8235 + OP_ALG_AAI_AEAD,
8236 + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
8237 + OP_ALG_AAI_AEAD,
8238 + },
8239 + },
8240 + {
8241 + .aead = {
8242 + .base = {
8243 + .cra_name = "authenc(hmac(sha512),"
8244 + "rfc3686(ctr(aes)))",
8245 + .cra_driver_name = "authenc-hmac-sha512-"
8246 + "rfc3686-ctr-aes-caam-qi2",
8247 + .cra_blocksize = 1,
8248 + },
8249 + .setkey = aead_setkey,
8250 + .setauthsize = aead_setauthsize,
8251 + .encrypt = aead_encrypt,
8252 + .decrypt = aead_decrypt,
8253 + .ivsize = CTR_RFC3686_IV_SIZE,
8254 + .maxauthsize = SHA512_DIGEST_SIZE,
8255 + },
8256 + .caam = {
8257 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8258 + OP_ALG_AAI_CTR_MOD128,
8259 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
8260 + OP_ALG_AAI_HMAC_PRECOMP,
8261 + .rfc3686 = true,
8262 + },
8263 + },
8264 + {
8265 + .aead = {
8266 + .base = {
8267 + .cra_name = "seqiv(authenc(hmac(sha512),"
8268 + "rfc3686(ctr(aes))))",
8269 + .cra_driver_name = "seqiv-authenc-hmac-sha512-"
8270 + "rfc3686-ctr-aes-caam-qi2",
8271 + .cra_blocksize = 1,
8272 + },
8273 + .setkey = aead_setkey,
8274 + .setauthsize = aead_setauthsize,
8275 + .encrypt = aead_encrypt,
8276 + .decrypt = aead_decrypt,
8277 + .ivsize = CTR_RFC3686_IV_SIZE,
8278 + .maxauthsize = SHA512_DIGEST_SIZE,
8279 + },
8280 + .caam = {
8281 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8282 + OP_ALG_AAI_CTR_MOD128,
8283 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
8284 + OP_ALG_AAI_HMAC_PRECOMP,
8285 + .rfc3686 = true,
8286 + .geniv = true,
8287 + },
8288 + },
8289 + {
8290 + .aead = {
8291 + .base = {
8292 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
8293 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
8294 + .cra_blocksize = AES_BLOCK_SIZE,
8295 + },
8296 + .setkey = tls_setkey,
8297 + .setauthsize = tls_setauthsize,
8298 + .encrypt = tls_encrypt,
8299 + .decrypt = tls_decrypt,
8300 + .ivsize = AES_BLOCK_SIZE,
8301 + .maxauthsize = SHA1_DIGEST_SIZE,
8302 + },
8303 + .caam = {
8304 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
8305 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
8306 + OP_ALG_AAI_HMAC_PRECOMP,
8307 + },
8308 + },
8309 +};
8310 +
8311 +static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
8312 +{
8313 + struct skcipher_alg *alg = &t_alg->skcipher;
8314 +
8315 + alg->base.cra_module = THIS_MODULE;
8316 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
8317 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
8318 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
8319 +
8320 + alg->init = caam_cra_init_skcipher;
8321 + alg->exit = caam_cra_exit;
8322 +}
8323 +
8324 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
8325 +{
8326 + struct aead_alg *alg = &t_alg->aead;
8327 +
8328 + alg->base.cra_module = THIS_MODULE;
8329 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
8330 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
8331 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
8332 +
8333 + alg->init = caam_cra_init_aead;
8334 + alg->exit = caam_cra_exit_aead;
8335 +}
8336 +
8337 +/* max hash key is max split key size */
8338 +#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
8339 +
8340 +#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
8341 +#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
8342 +
8343 +#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
8344 + CAAM_MAX_HASH_KEY_SIZE)
8345 +#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
8346 +
8347 +/* caam context sizes for hashes: running digest + 8 */
8348 +#define HASH_MSG_LEN 8
8349 +#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
8350 +
8351 +enum hash_optype {
8352 + UPDATE = 0,
8353 + UPDATE_FIRST,
8354 + FINALIZE,
8355 + DIGEST,
8356 + HASH_NUM_OP
8357 +};
8358 +
8359 +/**
8360 + * caam_hash_ctx - ahash per-session context
8361 + * @flc: Flow Contexts array
8362 + * @flc_dma: I/O virtual addresses of the Flow Contexts
8363 + * @key: virtual address of the authentication key
8364 + * @dev: dpseci device
8365 + * @ctx_len: size of Context Register
8366 + * @adata: hashing algorithm details
8367 + */
8368 +struct caam_hash_ctx {
8369 + struct caam_flc flc[HASH_NUM_OP];
8370 + dma_addr_t flc_dma[HASH_NUM_OP];
8371 + u8 key[CAAM_MAX_HASH_KEY_SIZE];
8372 + struct device *dev;
8373 + int ctx_len;
8374 + struct alginfo adata;
8375 +};
8376 +
8377 +/* ahash state */
8378 +struct caam_hash_state {
8379 + struct caam_request caam_req;
8380 + dma_addr_t buf_dma;
8381 + dma_addr_t ctx_dma;
8382 + u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
8383 + int buflen_0;
8384 + u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
8385 + int buflen_1;
8386 + u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
8387 + int (*update)(struct ahash_request *req);
8388 + int (*final)(struct ahash_request *req);
8389 + int (*finup)(struct ahash_request *req);
8390 + int current_buf;
8391 +};
8392 +
8393 +struct caam_export_state {
8394 + u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
8395 + u8 caam_ctx[MAX_CTX_LEN];
8396 + int buflen;
8397 + int (*update)(struct ahash_request *req);
8398 + int (*final)(struct ahash_request *req);
8399 + int (*finup)(struct ahash_request *req);
8400 +};
8401 +
8402 +static inline void switch_buf(struct caam_hash_state *state)
8403 +{
8404 + state->current_buf ^= 1;
8405 +}
8406 +
8407 +static inline u8 *current_buf(struct caam_hash_state *state)
8408 +{
8409 + return state->current_buf ? state->buf_1 : state->buf_0;
8410 +}
8411 +
8412 +static inline u8 *alt_buf(struct caam_hash_state *state)
8413 +{
8414 + return state->current_buf ? state->buf_0 : state->buf_1;
8415 +}
8416 +
8417 +static inline int *current_buflen(struct caam_hash_state *state)
8418 +{
8419 + return state->current_buf ? &state->buflen_1 : &state->buflen_0;
8420 +}
8421 +
8422 +static inline int *alt_buflen(struct caam_hash_state *state)
8423 +{
8424 + return state->current_buf ? &state->buflen_0 : &state->buflen_1;
8425 +}
8426 +
8427 +/* Map current buffer in state (if length > 0) and put it in link table */
8428 +static inline int buf_map_to_qm_sg(struct device *dev,
8429 + struct dpaa2_sg_entry *qm_sg,
8430 + struct caam_hash_state *state)
8431 +{
8432 + int buflen = *current_buflen(state);
8433 +
8434 + if (!buflen)
8435 + return 0;
8436 +
8437 + state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
8438 + DMA_TO_DEVICE);
8439 + if (dma_mapping_error(dev, state->buf_dma)) {
8440 + dev_err(dev, "unable to map buf\n");
8441 + state->buf_dma = 0;
8442 + return -ENOMEM;
8443 + }
8444 +
8445 + dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
8446 +
8447 + return 0;
8448 +}
8449 +
8450 +/* Map state->caam_ctx, and add it to link table */
8451 +static inline int ctx_map_to_qm_sg(struct device *dev,
8452 + struct caam_hash_state *state, int ctx_len,
8453 + struct dpaa2_sg_entry *qm_sg, u32 flag)
8454 +{
8455 + state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
8456 + if (dma_mapping_error(dev, state->ctx_dma)) {
8457 + dev_err(dev, "unable to map ctx\n");
8458 + state->ctx_dma = 0;
8459 + return -ENOMEM;
8460 + }
8461 +
8462 + dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
8463 +
8464 + return 0;
8465 +}
8466 +
8467 +static int ahash_set_sh_desc(struct crypto_ahash *ahash)
8468 +{
8469 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8470 + int digestsize = crypto_ahash_digestsize(ahash);
8471 + struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
8472 + struct caam_flc *flc;
8473 + u32 *desc;
8474 +
8475 + ctx->adata.key_virt = ctx->key;
8476 + ctx->adata.key_inline = true;
8477 +
8478 + /* ahash_update shared descriptor */
8479 + flc = &ctx->flc[UPDATE];
8480 + desc = flc->sh_desc;
8481 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
8482 + ctx->ctx_len, true, priv->sec_attr.era);
8483 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8484 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
8485 + desc_bytes(desc), DMA_BIDIRECTIONAL);
8486 +#ifdef DEBUG
8487 + print_hex_dump(KERN_ERR,
8488 + "ahash update shdesc@" __stringify(__LINE__)": ",
8489 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8490 +#endif
8491 +
8492 + /* ahash_update_first shared descriptor */
8493 + flc = &ctx->flc[UPDATE_FIRST];
8494 + desc = flc->sh_desc;
8495 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
8496 + ctx->ctx_len, false, priv->sec_attr.era);
8497 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8498 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
8499 + desc_bytes(desc), DMA_BIDIRECTIONAL);
8500 +#ifdef DEBUG
8501 + print_hex_dump(KERN_ERR,
8502 + "ahash update first shdesc@" __stringify(__LINE__)": ",
8503 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8504 +#endif
8505 +
8506 + /* ahash_final shared descriptor */
8507 + flc = &ctx->flc[FINALIZE];
8508 + desc = flc->sh_desc;
8509 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
8510 + ctx->ctx_len, true, priv->sec_attr.era);
8511 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8512 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
8513 + desc_bytes(desc), DMA_BIDIRECTIONAL);
8514 +#ifdef DEBUG
8515 + print_hex_dump(KERN_ERR,
8516 + "ahash final shdesc@" __stringify(__LINE__)": ",
8517 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8518 +#endif
8519 +
8520 + /* ahash_digest shared descriptor */
8521 + flc = &ctx->flc[DIGEST];
8522 + desc = flc->sh_desc;
8523 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
8524 + ctx->ctx_len, false, priv->sec_attr.era);
8525 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8526 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
8527 + desc_bytes(desc), DMA_BIDIRECTIONAL);
8528 +#ifdef DEBUG
8529 + print_hex_dump(KERN_ERR,
8530 + "ahash digest shdesc@" __stringify(__LINE__)": ",
8531 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8532 +#endif
8533 +
8534 + return 0;
8535 +}
8536 +
8537 +/* Digest hash size if it is too large */
8538 +static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
8539 + u32 *keylen, u8 *key_out, u32 digestsize)
8540 +{
8541 + struct caam_request *req_ctx;
8542 + u32 *desc;
8543 + struct split_key_sh_result result;
8544 + dma_addr_t src_dma, dst_dma;
8545 + struct caam_flc *flc;
8546 + dma_addr_t flc_dma;
8547 + int ret = -ENOMEM;
8548 + struct dpaa2_fl_entry *in_fle, *out_fle;
8549 +
8550 + req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
8551 + if (!req_ctx)
8552 + return -ENOMEM;
8553 +
8554 + in_fle = &req_ctx->fd_flt[1];
8555 + out_fle = &req_ctx->fd_flt[0];
8556 +
8557 + flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
8558 + if (!flc)
8559 + goto err_flc;
8560 +
8561 + src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
8562 + DMA_TO_DEVICE);
8563 + if (dma_mapping_error(ctx->dev, src_dma)) {
8564 + dev_err(ctx->dev, "unable to map key input memory\n");
8565 + goto err_src_dma;
8566 + }
8567 + dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
8568 + DMA_FROM_DEVICE);
8569 + if (dma_mapping_error(ctx->dev, dst_dma)) {
8570 + dev_err(ctx->dev, "unable to map key output memory\n");
8571 + goto err_dst_dma;
8572 + }
8573 +
8574 + desc = flc->sh_desc;
8575 +
8576 + init_sh_desc(desc, 0);
8577 +
8578 + /* descriptor to perform unkeyed hash on key_in */
8579 + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
8580 + OP_ALG_AS_INITFINAL);
8581 + append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
8582 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
8583 + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
8584 + LDST_SRCDST_BYTE_CONTEXT);
8585 +
8586 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8587 + flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
8588 + desc_bytes(desc), DMA_TO_DEVICE);
8589 + if (dma_mapping_error(ctx->dev, flc_dma)) {
8590 + dev_err(ctx->dev, "unable to map shared descriptor\n");
8591 + goto err_flc_dma;
8592 + }
8593 +
8594 + dpaa2_fl_set_final(in_fle, true);
8595 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
8596 + dpaa2_fl_set_addr(in_fle, src_dma);
8597 + dpaa2_fl_set_len(in_fle, *keylen);
8598 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
8599 + dpaa2_fl_set_addr(out_fle, dst_dma);
8600 + dpaa2_fl_set_len(out_fle, digestsize);
8601 +
8602 +#ifdef DEBUG
8603 + print_hex_dump(KERN_ERR, "key_in@" __stringify(__LINE__)": ",
8604 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
8605 + print_hex_dump(KERN_ERR, "shdesc@" __stringify(__LINE__)": ",
8606 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8607 +#endif
8608 +
8609 + result.err = 0;
8610 + init_completion(&result.completion);
8611 + result.dev = ctx->dev;
8612 +
8613 + req_ctx->flc = flc;
8614 + req_ctx->flc_dma = flc_dma;
8615 + req_ctx->cbk = split_key_sh_done;
8616 + req_ctx->ctx = &result;
8617 +
8618 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
8619 + if (ret == -EINPROGRESS) {
8620 + /* in progress */
8621 + wait_for_completion(&result.completion);
8622 + ret = result.err;
8623 +#ifdef DEBUG
8624 + print_hex_dump(KERN_ERR,
8625 + "digested key@" __stringify(__LINE__)": ",
8626 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize,
8627 + 1);
8628 +#endif
8629 + }
8630 +
8631 + dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
8632 + DMA_TO_DEVICE);
8633 +err_flc_dma:
8634 + dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
8635 +err_dst_dma:
8636 + dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
8637 +err_src_dma:
8638 + kfree(flc);
8639 +err_flc:
8640 + kfree(req_ctx);
8641 +
8642 + *keylen = digestsize;
8643 +
8644 + return ret;
8645 +}
8646 +
8647 +static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
8648 + unsigned int keylen)
8649 +{
8650 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8651 + unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
8652 + unsigned int digestsize = crypto_ahash_digestsize(ahash);
8653 + int ret;
8654 + u8 *hashed_key = NULL;
8655 +
8656 +#ifdef DEBUG
8657 + dev_err(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
8658 +#endif
8659 +
8660 + if (keylen > blocksize) {
8661 + hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
8662 + GFP_KERNEL | GFP_DMA);
8663 + if (!hashed_key)
8664 + return -ENOMEM;
8665 + ret = hash_digest_key(ctx, key, &keylen, hashed_key,
8666 + digestsize);
8667 + if (ret)
8668 + goto bad_free_key;
8669 + key = hashed_key;
8670 + }
8671 +
8672 + ctx->adata.keylen = keylen;
8673 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
8674 + OP_ALG_ALGSEL_MASK);
8675 + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
8676 + goto bad_free_key;
8677 +
8678 + memcpy(ctx->key, key, keylen);
8679 +
8680 + kfree(hashed_key);
8681 + return ahash_set_sh_desc(ahash);
8682 +bad_free_key:
8683 + kfree(hashed_key);
8684 + crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
8685 + return -EINVAL;
8686 +}
8687 +
8688 +static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
8689 + struct ahash_request *req, int dst_len)
8690 +{
8691 + struct caam_hash_state *state = ahash_request_ctx(req);
8692 +
8693 + if (edesc->src_nents)
8694 + dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
8695 + if (edesc->dst_dma)
8696 + dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
8697 +
8698 + if (edesc->qm_sg_bytes)
8699 + dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
8700 + DMA_TO_DEVICE);
8701 +
8702 + if (state->buf_dma) {
8703 + dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
8704 + DMA_TO_DEVICE);
8705 + state->buf_dma = 0;
8706 + }
8707 +}
8708 +
8709 +static inline void ahash_unmap_ctx(struct device *dev,
8710 + struct ahash_edesc *edesc,
8711 + struct ahash_request *req, int dst_len,
8712 + u32 flag)
8713 +{
8714 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8715 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8716 + struct caam_hash_state *state = ahash_request_ctx(req);
8717 +
8718 + if (state->ctx_dma) {
8719 + dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
8720 + state->ctx_dma = 0;
8721 + }
8722 + ahash_unmap(dev, edesc, req, dst_len);
8723 +}
8724 +
8725 +static void ahash_done(void *cbk_ctx, u32 status)
8726 +{
8727 + struct crypto_async_request *areq = cbk_ctx;
8728 + struct ahash_request *req = ahash_request_cast(areq);
8729 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8730 + struct caam_hash_state *state = ahash_request_ctx(req);
8731 + struct ahash_edesc *edesc = state->caam_req.edesc;
8732 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8733 + int digestsize = crypto_ahash_digestsize(ahash);
8734 + int ecode = 0;
8735 +
8736 +#ifdef DEBUG
8737 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
8738 +#endif
8739 +
8740 + if (unlikely(status)) {
8741 + caam_qi2_strstatus(ctx->dev, status);
8742 + ecode = -EIO;
8743 + }
8744 +
8745 + ahash_unmap(ctx->dev, edesc, req, digestsize);
8746 + qi_cache_free(edesc);
8747 +
8748 +#ifdef DEBUG
8749 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
8750 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
8751 + ctx->ctx_len, 1);
8752 + if (req->result)
8753 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
8754 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
8755 + digestsize, 1);
8756 +#endif
8757 +
8758 + req->base.complete(&req->base, ecode);
8759 +}
8760 +
8761 +static void ahash_done_bi(void *cbk_ctx, u32 status)
8762 +{
8763 + struct crypto_async_request *areq = cbk_ctx;
8764 + struct ahash_request *req = ahash_request_cast(areq);
8765 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8766 + struct caam_hash_state *state = ahash_request_ctx(req);
8767 + struct ahash_edesc *edesc = state->caam_req.edesc;
8768 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8769 + int ecode = 0;
8770 +#ifdef DEBUG
8771 + int digestsize = crypto_ahash_digestsize(ahash);
8772 +
8773 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
8774 +#endif
8775 +
8776 + if (unlikely(status)) {
8777 + caam_qi2_strstatus(ctx->dev, status);
8778 + ecode = -EIO;
8779 + }
8780 +
8781 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
8782 + switch_buf(state);
8783 + qi_cache_free(edesc);
8784 +
8785 +#ifdef DEBUG
8786 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
8787 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
8788 + ctx->ctx_len, 1);
8789 + if (req->result)
8790 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
8791 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
8792 + digestsize, 1);
8793 +#endif
8794 +
8795 + req->base.complete(&req->base, ecode);
8796 +}
8797 +
8798 +static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
8799 +{
8800 + struct crypto_async_request *areq = cbk_ctx;
8801 + struct ahash_request *req = ahash_request_cast(areq);
8802 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8803 + struct caam_hash_state *state = ahash_request_ctx(req);
8804 + struct ahash_edesc *edesc = state->caam_req.edesc;
8805 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8806 + int digestsize = crypto_ahash_digestsize(ahash);
8807 + int ecode = 0;
8808 +
8809 +#ifdef DEBUG
8810 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
8811 +#endif
8812 +
8813 + if (unlikely(status)) {
8814 + caam_qi2_strstatus(ctx->dev, status);
8815 + ecode = -EIO;
8816 + }
8817 +
8818 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
8819 + qi_cache_free(edesc);
8820 +
8821 +#ifdef DEBUG
8822 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
8823 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
8824 + ctx->ctx_len, 1);
8825 + if (req->result)
8826 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
8827 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
8828 + digestsize, 1);
8829 +#endif
8830 +
8831 + req->base.complete(&req->base, ecode);
8832 +}
8833 +
8834 +static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
8835 +{
8836 + struct crypto_async_request *areq = cbk_ctx;
8837 + struct ahash_request *req = ahash_request_cast(areq);
8838 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8839 + struct caam_hash_state *state = ahash_request_ctx(req);
8840 + struct ahash_edesc *edesc = state->caam_req.edesc;
8841 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8842 + int ecode = 0;
8843 +#ifdef DEBUG
8844 + int digestsize = crypto_ahash_digestsize(ahash);
8845 +
8846 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
8847 +#endif
8848 +
8849 + if (unlikely(status)) {
8850 + caam_qi2_strstatus(ctx->dev, status);
8851 + ecode = -EIO;
8852 + }
8853 +
8854 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
8855 + switch_buf(state);
8856 + qi_cache_free(edesc);
8857 +
8858 +#ifdef DEBUG
8859 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
8860 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
8861 + ctx->ctx_len, 1);
8862 + if (req->result)
8863 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
8864 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
8865 + digestsize, 1);
8866 +#endif
8867 +
8868 + req->base.complete(&req->base, ecode);
8869 +}
8870 +
8871 +static int ahash_update_ctx(struct ahash_request *req)
8872 +{
8873 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8874 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8875 + struct caam_hash_state *state = ahash_request_ctx(req);
8876 + struct caam_request *req_ctx = &state->caam_req;
8877 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
8878 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
8879 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
8880 + GFP_KERNEL : GFP_ATOMIC;
8881 + u8 *buf = current_buf(state);
8882 + int *buflen = current_buflen(state);
8883 + u8 *next_buf = alt_buf(state);
8884 + int *next_buflen = alt_buflen(state), last_buflen;
8885 + int in_len = *buflen + req->nbytes, to_hash;
8886 + int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
8887 + struct ahash_edesc *edesc;
8888 + int ret = 0;
8889 +
8890 + last_buflen = *next_buflen;
8891 + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
8892 + to_hash = in_len - *next_buflen;
8893 +
8894 + if (to_hash) {
8895 + struct dpaa2_sg_entry *sg_table;
8896 +
8897 + src_nents = sg_nents_for_len(req->src,
8898 + req->nbytes - (*next_buflen));
8899 + if (src_nents < 0) {
8900 + dev_err(ctx->dev, "Invalid number of src SG.\n");
8901 + return src_nents;
8902 + }
8903 +
8904 + if (src_nents) {
8905 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
8906 + DMA_TO_DEVICE);
8907 + if (!mapped_nents) {
8908 + dev_err(ctx->dev, "unable to DMA map source\n");
8909 + return -ENOMEM;
8910 + }
8911 + } else {
8912 + mapped_nents = 0;
8913 + }
8914 +
8915 + /* allocate space for base edesc and link tables */
8916 + edesc = qi_cache_zalloc(GFP_DMA | flags);
8917 + if (!edesc) {
8918 + dma_unmap_sg(ctx->dev, req->src, src_nents,
8919 + DMA_TO_DEVICE);
8920 + return -ENOMEM;
8921 + }
8922 +
8923 + edesc->src_nents = src_nents;
8924 + qm_sg_src_index = 1 + (*buflen ? 1 : 0);
8925 + qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
8926 + sizeof(*sg_table);
8927 + sg_table = &edesc->sgt[0];
8928 +
8929 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
8930 + DMA_BIDIRECTIONAL);
8931 + if (ret)
8932 + goto unmap_ctx;
8933 +
8934 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
8935 + if (ret)
8936 + goto unmap_ctx;
8937 +
8938 + if (mapped_nents) {
8939 + sg_to_qm_sg_last(req->src, mapped_nents,
8940 + sg_table + qm_sg_src_index, 0);
8941 + if (*next_buflen)
8942 + scatterwalk_map_and_copy(next_buf, req->src,
8943 + to_hash - *buflen,
8944 + *next_buflen, 0);
8945 + } else {
8946 + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
8947 + true);
8948 + }
8949 +
8950 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
8951 + qm_sg_bytes, DMA_TO_DEVICE);
8952 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
8953 + dev_err(ctx->dev, "unable to map S/G table\n");
8954 + ret = -ENOMEM;
8955 + goto unmap_ctx;
8956 + }
8957 + edesc->qm_sg_bytes = qm_sg_bytes;
8958 +
8959 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
8960 + dpaa2_fl_set_final(in_fle, true);
8961 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
8962 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
8963 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
8964 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
8965 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
8966 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
8967 +
8968 + req_ctx->flc = &ctx->flc[UPDATE];
8969 + req_ctx->flc_dma = ctx->flc_dma[UPDATE];
8970 + req_ctx->cbk = ahash_done_bi;
8971 + req_ctx->ctx = &req->base;
8972 + req_ctx->edesc = edesc;
8973 +
8974 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
8975 + if (ret != -EINPROGRESS &&
8976 + !(ret == -EBUSY &&
8977 + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
8978 + goto unmap_ctx;
8979 + } else if (*next_buflen) {
8980 + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
8981 + req->nbytes, 0);
8982 + *buflen = *next_buflen;
8983 + *next_buflen = last_buflen;
8984 + }
8985 +#ifdef DEBUG
8986 + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
8987 + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
8988 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
8989 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
8990 + *next_buflen, 1);
8991 +#endif
8992 +
8993 + return ret;
8994 +unmap_ctx:
8995 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
8996 + qi_cache_free(edesc);
8997 + return ret;
8998 +}
8999 +
9000 +static int ahash_final_ctx(struct ahash_request *req)
9001 +{
9002 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9003 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9004 + struct caam_hash_state *state = ahash_request_ctx(req);
9005 + struct caam_request *req_ctx = &state->caam_req;
9006 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9007 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9008 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9009 + GFP_KERNEL : GFP_ATOMIC;
9010 + int buflen = *current_buflen(state);
9011 + int qm_sg_bytes, qm_sg_src_index;
9012 + int digestsize = crypto_ahash_digestsize(ahash);
9013 + struct ahash_edesc *edesc;
9014 + struct dpaa2_sg_entry *sg_table;
9015 + int ret;
9016 +
9017 + /* allocate space for base edesc and link tables */
9018 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9019 + if (!edesc)
9020 + return -ENOMEM;
9021 +
9022 + qm_sg_src_index = 1 + (buflen ? 1 : 0);
9023 + qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
9024 + sg_table = &edesc->sgt[0];
9025 +
9026 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
9027 + DMA_TO_DEVICE);
9028 + if (ret)
9029 + goto unmap_ctx;
9030 +
9031 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
9032 + if (ret)
9033 + goto unmap_ctx;
9034 +
9035 + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
9036 +
9037 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
9038 + DMA_TO_DEVICE);
9039 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9040 + dev_err(ctx->dev, "unable to map S/G table\n");
9041 + ret = -ENOMEM;
9042 + goto unmap_ctx;
9043 + }
9044 + edesc->qm_sg_bytes = qm_sg_bytes;
9045 +
9046 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9047 + DMA_FROM_DEVICE);
9048 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9049 + dev_err(ctx->dev, "unable to map dst\n");
9050 + edesc->dst_dma = 0;
9051 + ret = -ENOMEM;
9052 + goto unmap_ctx;
9053 + }
9054 +
9055 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9056 + dpaa2_fl_set_final(in_fle, true);
9057 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9058 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9059 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
9060 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9061 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9062 + dpaa2_fl_set_len(out_fle, digestsize);
9063 +
9064 + req_ctx->flc = &ctx->flc[FINALIZE];
9065 + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
9066 + req_ctx->cbk = ahash_done_ctx_src;
9067 + req_ctx->ctx = &req->base;
9068 + req_ctx->edesc = edesc;
9069 +
9070 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9071 + if (ret == -EINPROGRESS ||
9072 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9073 + return ret;
9074 +
9075 +unmap_ctx:
9076 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
9077 + qi_cache_free(edesc);
9078 + return ret;
9079 +}
9080 +
9081 +static int ahash_finup_ctx(struct ahash_request *req)
9082 +{
9083 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9084 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9085 + struct caam_hash_state *state = ahash_request_ctx(req);
9086 + struct caam_request *req_ctx = &state->caam_req;
9087 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9088 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9089 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9090 + GFP_KERNEL : GFP_ATOMIC;
9091 + int buflen = *current_buflen(state);
9092 + int qm_sg_bytes, qm_sg_src_index;
9093 + int src_nents, mapped_nents;
9094 + int digestsize = crypto_ahash_digestsize(ahash);
9095 + struct ahash_edesc *edesc;
9096 + struct dpaa2_sg_entry *sg_table;
9097 + int ret;
9098 +
9099 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9100 + if (src_nents < 0) {
9101 + dev_err(ctx->dev, "Invalid number of src SG.\n");
9102 + return src_nents;
9103 + }
9104 +
9105 + if (src_nents) {
9106 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9107 + DMA_TO_DEVICE);
9108 + if (!mapped_nents) {
9109 + dev_err(ctx->dev, "unable to DMA map source\n");
9110 + return -ENOMEM;
9111 + }
9112 + } else {
9113 + mapped_nents = 0;
9114 + }
9115 +
9116 + /* allocate space for base edesc and link tables */
9117 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9118 + if (!edesc) {
9119 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
9120 + return -ENOMEM;
9121 + }
9122 +
9123 + edesc->src_nents = src_nents;
9124 + qm_sg_src_index = 1 + (buflen ? 1 : 0);
9125 + qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
9126 + sg_table = &edesc->sgt[0];
9127 +
9128 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
9129 + DMA_TO_DEVICE);
9130 + if (ret)
9131 + goto unmap_ctx;
9132 +
9133 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
9134 + if (ret)
9135 + goto unmap_ctx;
9136 +
9137 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
9138 +
9139 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
9140 + DMA_TO_DEVICE);
9141 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9142 + dev_err(ctx->dev, "unable to map S/G table\n");
9143 + ret = -ENOMEM;
9144 + goto unmap_ctx;
9145 + }
9146 + edesc->qm_sg_bytes = qm_sg_bytes;
9147 +
9148 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9149 + DMA_FROM_DEVICE);
9150 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9151 + dev_err(ctx->dev, "unable to map dst\n");
9152 + edesc->dst_dma = 0;
9153 + ret = -ENOMEM;
9154 + goto unmap_ctx;
9155 + }
9156 +
9157 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9158 + dpaa2_fl_set_final(in_fle, true);
9159 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9160 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9161 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
9162 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9163 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9164 + dpaa2_fl_set_len(out_fle, digestsize);
9165 +
9166 + req_ctx->flc = &ctx->flc[FINALIZE];
9167 + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
9168 + req_ctx->cbk = ahash_done_ctx_src;
9169 + req_ctx->ctx = &req->base;
9170 + req_ctx->edesc = edesc;
9171 +
9172 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9173 + if (ret == -EINPROGRESS ||
9174 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9175 + return ret;
9176 +
9177 +unmap_ctx:
9178 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
9179 + qi_cache_free(edesc);
9180 + return ret;
9181 +}
9182 +
9183 +static int ahash_digest(struct ahash_request *req)
9184 +{
9185 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9186 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9187 + struct caam_hash_state *state = ahash_request_ctx(req);
9188 + struct caam_request *req_ctx = &state->caam_req;
9189 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9190 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9191 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9192 + GFP_KERNEL : GFP_ATOMIC;
9193 + int digestsize = crypto_ahash_digestsize(ahash);
9194 + int src_nents, mapped_nents;
9195 + struct ahash_edesc *edesc;
9196 + int ret = -ENOMEM;
9197 +
9198 + state->buf_dma = 0;
9199 +
9200 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9201 + if (src_nents < 0) {
9202 + dev_err(ctx->dev, "Invalid number of src SG.\n");
9203 + return src_nents;
9204 + }
9205 +
9206 + if (src_nents) {
9207 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9208 + DMA_TO_DEVICE);
9209 + if (!mapped_nents) {
9210 + dev_err(ctx->dev, "unable to map source for DMA\n");
9211 + return ret;
9212 + }
9213 + } else {
9214 + mapped_nents = 0;
9215 + }
9216 +
9217 + /* allocate space for base edesc and link tables */
9218 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9219 + if (!edesc) {
9220 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
9221 + return ret;
9222 + }
9223 +
9224 + edesc->src_nents = src_nents;
9225 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9226 +
9227 + if (mapped_nents > 1) {
9228 + int qm_sg_bytes;
9229 + struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
9230 +
9231 + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
9232 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
9233 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
9234 + qm_sg_bytes, DMA_TO_DEVICE);
9235 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9236 + dev_err(ctx->dev, "unable to map S/G table\n");
9237 + goto unmap;
9238 + }
9239 + edesc->qm_sg_bytes = qm_sg_bytes;
9240 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9241 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9242 + } else {
9243 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
9244 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
9245 + }
9246 +
9247 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9248 + DMA_FROM_DEVICE);
9249 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9250 + dev_err(ctx->dev, "unable to map dst\n");
9251 + edesc->dst_dma = 0;
9252 + goto unmap;
9253 + }
9254 +
9255 + dpaa2_fl_set_final(in_fle, true);
9256 + dpaa2_fl_set_len(in_fle, req->nbytes);
9257 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9258 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9259 + dpaa2_fl_set_len(out_fle, digestsize);
9260 +
9261 + req_ctx->flc = &ctx->flc[DIGEST];
9262 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
9263 + req_ctx->cbk = ahash_done;
9264 + req_ctx->ctx = &req->base;
9265 + req_ctx->edesc = edesc;
9266 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9267 + if (ret == -EINPROGRESS ||
9268 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9269 + return ret;
9270 +
9271 +unmap:
9272 + ahash_unmap(ctx->dev, edesc, req, digestsize);
9273 + qi_cache_free(edesc);
9274 + return ret;
9275 +}
9276 +
9277 +static int ahash_final_no_ctx(struct ahash_request *req)
9278 +{
9279 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9280 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9281 + struct caam_hash_state *state = ahash_request_ctx(req);
9282 + struct caam_request *req_ctx = &state->caam_req;
9283 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9284 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9285 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9286 + GFP_KERNEL : GFP_ATOMIC;
9287 + u8 *buf = current_buf(state);
9288 + int buflen = *current_buflen(state);
9289 + int digestsize = crypto_ahash_digestsize(ahash);
9290 + struct ahash_edesc *edesc;
9291 + int ret = -ENOMEM;
9292 +
9293 + /* allocate space for base edesc and link tables */
9294 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9295 + if (!edesc)
9296 + return ret;
9297 +
9298 + state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
9299 + if (dma_mapping_error(ctx->dev, state->buf_dma)) {
9300 + dev_err(ctx->dev, "unable to map src\n");
9301 + goto unmap;
9302 + }
9303 +
9304 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9305 + DMA_FROM_DEVICE);
9306 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9307 + dev_err(ctx->dev, "unable to map dst\n");
9308 + edesc->dst_dma = 0;
9309 + goto unmap;
9310 + }
9311 +
9312 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9313 + dpaa2_fl_set_final(in_fle, true);
9314 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
9315 + dpaa2_fl_set_addr(in_fle, state->buf_dma);
9316 + dpaa2_fl_set_len(in_fle, buflen);
9317 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9318 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9319 + dpaa2_fl_set_len(out_fle, digestsize);
9320 +
9321 + req_ctx->flc = &ctx->flc[DIGEST];
9322 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
9323 + req_ctx->cbk = ahash_done;
9324 + req_ctx->ctx = &req->base;
9325 + req_ctx->edesc = edesc;
9326 +
9327 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9328 + if (ret == -EINPROGRESS ||
9329 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9330 + return ret;
9331 +
9332 +unmap:
9333 + ahash_unmap(ctx->dev, edesc, req, digestsize);
9334 + qi_cache_free(edesc);
9335 + return ret;
9336 +}
9337 +
9338 +static int ahash_update_no_ctx(struct ahash_request *req)
9339 +{
9340 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9341 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9342 + struct caam_hash_state *state = ahash_request_ctx(req);
9343 + struct caam_request *req_ctx = &state->caam_req;
9344 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9345 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9346 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9347 + GFP_KERNEL : GFP_ATOMIC;
9348 + u8 *buf = current_buf(state);
9349 + int *buflen = current_buflen(state);
9350 + u8 *next_buf = alt_buf(state);
9351 + int *next_buflen = alt_buflen(state);
9352 + int in_len = *buflen + req->nbytes, to_hash;
9353 + int qm_sg_bytes, src_nents, mapped_nents;
9354 + struct ahash_edesc *edesc;
9355 + int ret = 0;
9356 +
9357 + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
9358 + to_hash = in_len - *next_buflen;
9359 +
9360 + if (to_hash) {
9361 + struct dpaa2_sg_entry *sg_table;
9362 +
9363 + src_nents = sg_nents_for_len(req->src,
9364 + req->nbytes - *next_buflen);
9365 + if (src_nents < 0) {
9366 + dev_err(ctx->dev, "Invalid number of src SG.\n");
9367 + return src_nents;
9368 + }
9369 +
9370 + if (src_nents) {
9371 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9372 + DMA_TO_DEVICE);
9373 + if (!mapped_nents) {
9374 + dev_err(ctx->dev, "unable to DMA map source\n");
9375 + return -ENOMEM;
9376 + }
9377 + } else {
9378 + mapped_nents = 0;
9379 + }
9380 +
9381 + /* allocate space for base edesc and link tables */
9382 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9383 + if (!edesc) {
9384 + dma_unmap_sg(ctx->dev, req->src, src_nents,
9385 + DMA_TO_DEVICE);
9386 + return -ENOMEM;
9387 + }
9388 +
9389 + edesc->src_nents = src_nents;
9390 + qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
9391 + sg_table = &edesc->sgt[0];
9392 +
9393 + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
9394 + if (ret)
9395 + goto unmap_ctx;
9396 +
9397 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
9398 +
9399 + if (*next_buflen)
9400 + scatterwalk_map_and_copy(next_buf, req->src,
9401 + to_hash - *buflen,
9402 + *next_buflen, 0);
9403 +
9404 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
9405 + qm_sg_bytes, DMA_TO_DEVICE);
9406 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9407 + dev_err(ctx->dev, "unable to map S/G table\n");
9408 + ret = -ENOMEM;
9409 + goto unmap_ctx;
9410 + }
9411 + edesc->qm_sg_bytes = qm_sg_bytes;
9412 +
9413 + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
9414 + ctx->ctx_len, DMA_FROM_DEVICE);
9415 + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
9416 + dev_err(ctx->dev, "unable to map ctx\n");
9417 + state->ctx_dma = 0;
9418 + ret = -ENOMEM;
9419 + goto unmap_ctx;
9420 + }
9421 +
9422 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9423 + dpaa2_fl_set_final(in_fle, true);
9424 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9425 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9426 + dpaa2_fl_set_len(in_fle, to_hash);
9427 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9428 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
9429 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
9430 +
9431 + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
9432 + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
9433 + req_ctx->cbk = ahash_done_ctx_dst;
9434 + req_ctx->ctx = &req->base;
9435 + req_ctx->edesc = edesc;
9436 +
9437 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9438 + if (ret != -EINPROGRESS &&
9439 + !(ret == -EBUSY &&
9440 + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9441 + goto unmap_ctx;
9442 +
9443 + state->update = ahash_update_ctx;
9444 + state->finup = ahash_finup_ctx;
9445 + state->final = ahash_final_ctx;
9446 + } else if (*next_buflen) {
9447 + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
9448 + req->nbytes, 0);
9449 + *buflen = *next_buflen;
9450 + *next_buflen = 0;
9451 + }
9452 +#ifdef DEBUG
9453 + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
9454 + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
9455 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
9456 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
9457 + *next_buflen, 1);
9458 +#endif
9459 +
9460 + return ret;
9461 +unmap_ctx:
9462 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
9463 + qi_cache_free(edesc);
9464 + return ret;
9465 +}
9466 +
9467 +static int ahash_finup_no_ctx(struct ahash_request *req)
9468 +{
9469 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9470 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9471 + struct caam_hash_state *state = ahash_request_ctx(req);
9472 + struct caam_request *req_ctx = &state->caam_req;
9473 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9474 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9475 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9476 + GFP_KERNEL : GFP_ATOMIC;
9477 + int buflen = *current_buflen(state);
9478 + int qm_sg_bytes, src_nents, mapped_nents;
9479 + int digestsize = crypto_ahash_digestsize(ahash);
9480 + struct ahash_edesc *edesc;
9481 + struct dpaa2_sg_entry *sg_table;
9482 + int ret;
9483 +
9484 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9485 + if (src_nents < 0) {
9486 + dev_err(ctx->dev, "Invalid number of src SG.\n");
9487 + return src_nents;
9488 + }
9489 +
9490 + if (src_nents) {
9491 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9492 + DMA_TO_DEVICE);
9493 + if (!mapped_nents) {
9494 + dev_err(ctx->dev, "unable to DMA map source\n");
9495 + return -ENOMEM;
9496 + }
9497 + } else {
9498 + mapped_nents = 0;
9499 + }
9500 +
9501 + /* allocate space for base edesc and link tables */
9502 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9503 + if (!edesc) {
9504 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
9505 + return -ENOMEM;
9506 + }
9507 +
9508 + edesc->src_nents = src_nents;
9509 + qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
9510 + sg_table = &edesc->sgt[0];
9511 +
9512 + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
9513 + if (ret)
9514 + goto unmap;
9515 +
9516 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
9517 +
9518 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
9519 + DMA_TO_DEVICE);
9520 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9521 + dev_err(ctx->dev, "unable to map S/G table\n");
9522 + ret = -ENOMEM;
9523 + goto unmap;
9524 + }
9525 + edesc->qm_sg_bytes = qm_sg_bytes;
9526 +
9527 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9528 + DMA_FROM_DEVICE);
9529 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9530 + dev_err(ctx->dev, "unable to map dst\n");
9531 + edesc->dst_dma = 0;
9532 + ret = -ENOMEM;
9533 + goto unmap;
9534 + }
9535 +
9536 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9537 + dpaa2_fl_set_final(in_fle, true);
9538 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9539 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9540 + dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
9541 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9542 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9543 + dpaa2_fl_set_len(out_fle, digestsize);
9544 +
9545 + req_ctx->flc = &ctx->flc[DIGEST];
9546 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
9547 + req_ctx->cbk = ahash_done;
9548 + req_ctx->ctx = &req->base;
9549 + req_ctx->edesc = edesc;
9550 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9551 + if (ret != -EINPROGRESS &&
9552 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9553 + goto unmap;
9554 +
9555 + return ret;
9556 +unmap:
9557 + ahash_unmap(ctx->dev, edesc, req, digestsize);
9558 + qi_cache_free(edesc);
9559 + return -ENOMEM;
9560 +}
9561 +
9562 +static int ahash_update_first(struct ahash_request *req)
9563 +{
9564 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9565 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9566 + struct caam_hash_state *state = ahash_request_ctx(req);
9567 + struct caam_request *req_ctx = &state->caam_req;
9568 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9569 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9570 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9571 + GFP_KERNEL : GFP_ATOMIC;
9572 + u8 *next_buf = alt_buf(state);
9573 + int *next_buflen = alt_buflen(state);
9574 + int to_hash;
9575 + int src_nents, mapped_nents;
9576 + struct ahash_edesc *edesc;
9577 + int ret = 0;
9578 +
9579 + *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
9580 + 1);
9581 + to_hash = req->nbytes - *next_buflen;
9582 +
9583 + if (to_hash) {
9584 + struct dpaa2_sg_entry *sg_table;
9585 +
9586 + src_nents = sg_nents_for_len(req->src,
9587 + req->nbytes - (*next_buflen));
9588 + if (src_nents < 0) {
9589 + dev_err(ctx->dev, "Invalid number of src SG.\n");
9590 + return src_nents;
9591 + }
9592 +
9593 + if (src_nents) {
9594 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9595 + DMA_TO_DEVICE);
9596 + if (!mapped_nents) {
9597 + dev_err(ctx->dev, "unable to map source for DMA\n");
9598 + return -ENOMEM;
9599 + }
9600 + } else {
9601 + mapped_nents = 0;
9602 + }
9603 +
9604 + /* allocate space for base edesc and link tables */
9605 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9606 + if (!edesc) {
9607 + dma_unmap_sg(ctx->dev, req->src, src_nents,
9608 + DMA_TO_DEVICE);
9609 + return -ENOMEM;
9610 + }
9611 +
9612 + edesc->src_nents = src_nents;
9613 + sg_table = &edesc->sgt[0];
9614 +
9615 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9616 + dpaa2_fl_set_final(in_fle, true);
9617 + dpaa2_fl_set_len(in_fle, to_hash);
9618 +
9619 + if (mapped_nents > 1) {
9620 + int qm_sg_bytes;
9621 +
9622 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
9623 + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
9624 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
9625 + qm_sg_bytes,
9626 + DMA_TO_DEVICE);
9627 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9628 + dev_err(ctx->dev, "unable to map S/G table\n");
9629 + ret = -ENOMEM;
9630 + goto unmap_ctx;
9631 + }
9632 + edesc->qm_sg_bytes = qm_sg_bytes;
9633 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9634 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9635 + } else {
9636 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
9637 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
9638 + }
9639 +
9640 + if (*next_buflen)
9641 + scatterwalk_map_and_copy(next_buf, req->src, to_hash,
9642 + *next_buflen, 0);
9643 +
9644 + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
9645 + ctx->ctx_len, DMA_FROM_DEVICE);
9646 + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
9647 + dev_err(ctx->dev, "unable to map ctx\n");
9648 + state->ctx_dma = 0;
9649 + ret = -ENOMEM;
9650 + goto unmap_ctx;
9651 + }
9652 +
9653 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9654 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
9655 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
9656 +
9657 + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
9658 + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
9659 + req_ctx->cbk = ahash_done_ctx_dst;
9660 + req_ctx->ctx = &req->base;
9661 + req_ctx->edesc = edesc;
9662 +
9663 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9664 + if (ret != -EINPROGRESS &&
9665 + !(ret == -EBUSY && req->base.flags &
9666 + CRYPTO_TFM_REQ_MAY_BACKLOG))
9667 + goto unmap_ctx;
9668 +
9669 + state->update = ahash_update_ctx;
9670 + state->finup = ahash_finup_ctx;
9671 + state->final = ahash_final_ctx;
9672 + } else if (*next_buflen) {
9673 + state->update = ahash_update_no_ctx;
9674 + state->finup = ahash_finup_no_ctx;
9675 + state->final = ahash_final_no_ctx;
9676 + scatterwalk_map_and_copy(next_buf, req->src, 0,
9677 + req->nbytes, 0);
9678 + switch_buf(state);
9679 + }
9680 +#ifdef DEBUG
9681 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
9682 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1);
9683 +#endif
9684 +
9685 + return ret;
9686 +unmap_ctx:
9687 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
9688 + qi_cache_free(edesc);
9689 + return ret;
9690 +}
9691 +
9692 +static int ahash_finup_first(struct ahash_request *req)
9693 +{
9694 + return ahash_digest(req);
9695 +}
9696 +
9697 +static int ahash_init(struct ahash_request *req)
9698 +{
9699 + struct caam_hash_state *state = ahash_request_ctx(req);
9700 +
9701 + state->update = ahash_update_first;
9702 + state->finup = ahash_finup_first;
9703 + state->final = ahash_final_no_ctx;
9704 +
9705 + state->ctx_dma = 0;
9706 + state->current_buf = 0;
9707 + state->buf_dma = 0;
9708 + state->buflen_0 = 0;
9709 + state->buflen_1 = 0;
9710 +
9711 + return 0;
9712 +}
9713 +
9714 +static int ahash_update(struct ahash_request *req)
9715 +{
9716 + struct caam_hash_state *state = ahash_request_ctx(req);
9717 +
9718 + return state->update(req);
9719 +}
9720 +
9721 +static int ahash_finup(struct ahash_request *req)
9722 +{
9723 + struct caam_hash_state *state = ahash_request_ctx(req);
9724 +
9725 + return state->finup(req);
9726 +}
9727 +
9728 +static int ahash_final(struct ahash_request *req)
9729 +{
9730 + struct caam_hash_state *state = ahash_request_ctx(req);
9731 +
9732 + return state->final(req);
9733 +}
9734 +
9735 +static int ahash_export(struct ahash_request *req, void *out)
9736 +{
9737 + struct caam_hash_state *state = ahash_request_ctx(req);
9738 + struct caam_export_state *export = out;
9739 + int len;
9740 + u8 *buf;
9741 +
9742 + if (state->current_buf) {
9743 + buf = state->buf_1;
9744 + len = state->buflen_1;
9745 + } else {
9746 + buf = state->buf_0;
9747 + len = state->buflen_0;
9748 + }
9749 +
9750 + memcpy(export->buf, buf, len);
9751 + memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
9752 + export->buflen = len;
9753 + export->update = state->update;
9754 + export->final = state->final;
9755 + export->finup = state->finup;
9756 +
9757 + return 0;
9758 +}
9759 +
9760 +static int ahash_import(struct ahash_request *req, const void *in)
9761 +{
9762 + struct caam_hash_state *state = ahash_request_ctx(req);
9763 + const struct caam_export_state *export = in;
9764 +
9765 + memset(state, 0, sizeof(*state));
9766 + memcpy(state->buf_0, export->buf, export->buflen);
9767 + memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
9768 + state->buflen_0 = export->buflen;
9769 + state->update = export->update;
9770 + state->final = export->final;
9771 + state->finup = export->finup;
9772 +
9773 + return 0;
9774 +}
9775 +
9776 +struct caam_hash_template {
9777 + char name[CRYPTO_MAX_ALG_NAME];
9778 + char driver_name[CRYPTO_MAX_ALG_NAME];
9779 + char hmac_name[CRYPTO_MAX_ALG_NAME];
9780 + char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
9781 + unsigned int blocksize;
9782 + struct ahash_alg template_ahash;
9783 + u32 alg_type;
9784 +};
9785 +
9786 +/* ahash descriptors */
9787 +static struct caam_hash_template driver_hash[] = {
9788 + {
9789 + .name = "sha1",
9790 + .driver_name = "sha1-caam-qi2",
9791 + .hmac_name = "hmac(sha1)",
9792 + .hmac_driver_name = "hmac-sha1-caam-qi2",
9793 + .blocksize = SHA1_BLOCK_SIZE,
9794 + .template_ahash = {
9795 + .init = ahash_init,
9796 + .update = ahash_update,
9797 + .final = ahash_final,
9798 + .finup = ahash_finup,
9799 + .digest = ahash_digest,
9800 + .export = ahash_export,
9801 + .import = ahash_import,
9802 + .setkey = ahash_setkey,
9803 + .halg = {
9804 + .digestsize = SHA1_DIGEST_SIZE,
9805 + .statesize = sizeof(struct caam_export_state),
9806 + },
9807 + },
9808 + .alg_type = OP_ALG_ALGSEL_SHA1,
9809 + }, {
9810 + .name = "sha224",
9811 + .driver_name = "sha224-caam-qi2",
9812 + .hmac_name = "hmac(sha224)",
9813 + .hmac_driver_name = "hmac-sha224-caam-qi2",
9814 + .blocksize = SHA224_BLOCK_SIZE,
9815 + .template_ahash = {
9816 + .init = ahash_init,
9817 + .update = ahash_update,
9818 + .final = ahash_final,
9819 + .finup = ahash_finup,
9820 + .digest = ahash_digest,
9821 + .export = ahash_export,
9822 + .import = ahash_import,
9823 + .setkey = ahash_setkey,
9824 + .halg = {
9825 + .digestsize = SHA224_DIGEST_SIZE,
9826 + .statesize = sizeof(struct caam_export_state),
9827 + },
9828 + },
9829 + .alg_type = OP_ALG_ALGSEL_SHA224,
9830 + }, {
9831 + .name = "sha256",
9832 + .driver_name = "sha256-caam-qi2",
9833 + .hmac_name = "hmac(sha256)",
9834 + .hmac_driver_name = "hmac-sha256-caam-qi2",
9835 + .blocksize = SHA256_BLOCK_SIZE,
9836 + .template_ahash = {
9837 + .init = ahash_init,
9838 + .update = ahash_update,
9839 + .final = ahash_final,
9840 + .finup = ahash_finup,
9841 + .digest = ahash_digest,
9842 + .export = ahash_export,
9843 + .import = ahash_import,
9844 + .setkey = ahash_setkey,
9845 + .halg = {
9846 + .digestsize = SHA256_DIGEST_SIZE,
9847 + .statesize = sizeof(struct caam_export_state),
9848 + },
9849 + },
9850 + .alg_type = OP_ALG_ALGSEL_SHA256,
9851 + }, {
9852 + .name = "sha384",
9853 + .driver_name = "sha384-caam-qi2",
9854 + .hmac_name = "hmac(sha384)",
9855 + .hmac_driver_name = "hmac-sha384-caam-qi2",
9856 + .blocksize = SHA384_BLOCK_SIZE,
9857 + .template_ahash = {
9858 + .init = ahash_init,
9859 + .update = ahash_update,
9860 + .final = ahash_final,
9861 + .finup = ahash_finup,
9862 + .digest = ahash_digest,
9863 + .export = ahash_export,
9864 + .import = ahash_import,
9865 + .setkey = ahash_setkey,
9866 + .halg = {
9867 + .digestsize = SHA384_DIGEST_SIZE,
9868 + .statesize = sizeof(struct caam_export_state),
9869 + },
9870 + },
9871 + .alg_type = OP_ALG_ALGSEL_SHA384,
9872 + }, {
9873 + .name = "sha512",
9874 + .driver_name = "sha512-caam-qi2",
9875 + .hmac_name = "hmac(sha512)",
9876 + .hmac_driver_name = "hmac-sha512-caam-qi2",
9877 + .blocksize = SHA512_BLOCK_SIZE,
9878 + .template_ahash = {
9879 + .init = ahash_init,
9880 + .update = ahash_update,
9881 + .final = ahash_final,
9882 + .finup = ahash_finup,
9883 + .digest = ahash_digest,
9884 + .export = ahash_export,
9885 + .import = ahash_import,
9886 + .setkey = ahash_setkey,
9887 + .halg = {
9888 + .digestsize = SHA512_DIGEST_SIZE,
9889 + .statesize = sizeof(struct caam_export_state),
9890 + },
9891 + },
9892 + .alg_type = OP_ALG_ALGSEL_SHA512,
9893 + }, {
9894 + .name = "md5",
9895 + .driver_name = "md5-caam-qi2",
9896 + .hmac_name = "hmac(md5)",
9897 + .hmac_driver_name = "hmac-md5-caam-qi2",
9898 + .blocksize = MD5_BLOCK_WORDS * 4,
9899 + .template_ahash = {
9900 + .init = ahash_init,
9901 + .update = ahash_update,
9902 + .final = ahash_final,
9903 + .finup = ahash_finup,
9904 + .digest = ahash_digest,
9905 + .export = ahash_export,
9906 + .import = ahash_import,
9907 + .setkey = ahash_setkey,
9908 + .halg = {
9909 + .digestsize = MD5_DIGEST_SIZE,
9910 + .statesize = sizeof(struct caam_export_state),
9911 + },
9912 + },
9913 + .alg_type = OP_ALG_ALGSEL_MD5,
9914 + }
9915 +};
9916 +
9917 +struct caam_hash_alg {
9918 + struct list_head entry;
9919 + struct device *dev;
9920 + int alg_type;
9921 + struct ahash_alg ahash_alg;
9922 +};
9923 +
9924 +static int caam_hash_cra_init(struct crypto_tfm *tfm)
9925 +{
9926 + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
9927 + struct crypto_alg *base = tfm->__crt_alg;
9928 + struct hash_alg_common *halg =
9929 + container_of(base, struct hash_alg_common, base);
9930 + struct ahash_alg *alg =
9931 + container_of(halg, struct ahash_alg, halg);
9932 + struct caam_hash_alg *caam_hash =
9933 + container_of(alg, struct caam_hash_alg, ahash_alg);
9934 + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
9935 + /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
9936 + static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
9937 + HASH_MSG_LEN + SHA1_DIGEST_SIZE,
9938 + HASH_MSG_LEN + 32,
9939 + HASH_MSG_LEN + SHA256_DIGEST_SIZE,
9940 + HASH_MSG_LEN + 64,
9941 + HASH_MSG_LEN + SHA512_DIGEST_SIZE };
9942 + dma_addr_t dma_addr;
9943 + int i;
9944 +
9945 + ctx->dev = caam_hash->dev;
9946 +
9947 + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
9948 + DMA_BIDIRECTIONAL,
9949 + DMA_ATTR_SKIP_CPU_SYNC);
9950 + if (dma_mapping_error(ctx->dev, dma_addr)) {
9951 + dev_err(ctx->dev, "unable to map shared descriptors\n");
9952 + return -ENOMEM;
9953 + }
9954 +
9955 + for (i = 0; i < HASH_NUM_OP; i++)
9956 + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
9957 +
9958 + /* copy descriptor header template value */
9959 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
9960 +
9961 + ctx->ctx_len = runninglen[(ctx->adata.algtype &
9962 + OP_ALG_ALGSEL_SUBMASK) >>
9963 + OP_ALG_ALGSEL_SHIFT];
9964 +
9965 + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
9966 + sizeof(struct caam_hash_state));
9967 +
9968 + return ahash_set_sh_desc(ahash);
9969 +}
9970 +
9971 +static void caam_hash_cra_exit(struct crypto_tfm *tfm)
9972 +{
9973 + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
9974 +
9975 + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
9976 + DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
9977 +}
9978 +
9979 +static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
9980 + struct caam_hash_template *template, bool keyed)
9981 +{
9982 + struct caam_hash_alg *t_alg;
9983 + struct ahash_alg *halg;
9984 + struct crypto_alg *alg;
9985 +
9986 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
9987 + if (!t_alg)
9988 + return ERR_PTR(-ENOMEM);
9989 +
9990 + t_alg->ahash_alg = template->template_ahash;
9991 + halg = &t_alg->ahash_alg;
9992 + alg = &halg->halg.base;
9993 +
9994 + if (keyed) {
9995 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
9996 + template->hmac_name);
9997 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
9998 + template->hmac_driver_name);
9999 + } else {
10000 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
10001 + template->name);
10002 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
10003 + template->driver_name);
10004 + t_alg->ahash_alg.setkey = NULL;
10005 + }
10006 + alg->cra_module = THIS_MODULE;
10007 + alg->cra_init = caam_hash_cra_init;
10008 + alg->cra_exit = caam_hash_cra_exit;
10009 + alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
10010 + alg->cra_priority = CAAM_CRA_PRIORITY;
10011 + alg->cra_blocksize = template->blocksize;
10012 + alg->cra_alignmask = 0;
10013 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
10014 + alg->cra_type = &crypto_ahash_type;
10015 +
10016 + t_alg->alg_type = template->alg_type;
10017 + t_alg->dev = dev;
10018 +
10019 + return t_alg;
10020 +}
10021 +
10022 +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
10023 +{
10024 + struct dpaa2_caam_priv_per_cpu *ppriv;
10025 +
10026 + ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
10027 + napi_schedule_irqoff(&ppriv->napi);
10028 +}
10029 +
10030 +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
10031 +{
10032 + struct device *dev = priv->dev;
10033 + struct dpaa2_io_notification_ctx *nctx;
10034 + struct dpaa2_caam_priv_per_cpu *ppriv;
10035 + int err, i = 0, cpu;
10036 +
10037 + for_each_online_cpu(cpu) {
10038 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10039 + ppriv->priv = priv;
10040 + nctx = &ppriv->nctx;
10041 + nctx->is_cdan = 0;
10042 + nctx->id = ppriv->rsp_fqid;
10043 + nctx->desired_cpu = cpu;
10044 + nctx->cb = dpaa2_caam_fqdan_cb;
10045 +
10046 + /* Register notification callbacks */
10047 + ppriv->dpio = dpaa2_io_service_select(cpu);
10048 + err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
10049 + if (unlikely(err)) {
10050 + dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
10051 + nctx->cb = NULL;
10052 + /*
10053 + * If no affine DPIO for this core, there's probably
10054 + * none available for next cores either. Signal we want
10055 + * to retry later, in case the DPIO devices weren't
10056 + * probed yet.
10057 + */
10058 + err = -EPROBE_DEFER;
10059 + goto err;
10060 + }
10061 +
10062 + ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
10063 + dev);
10064 + if (unlikely(!ppriv->store)) {
10065 + dev_err(dev, "dpaa2_io_store_create() failed\n");
10066 + err = -ENOMEM;
10067 + goto err;
10068 + }
10069 +
10070 + if (++i == priv->num_pairs)
10071 + break;
10072 + }
10073 +
10074 + return 0;
10075 +
10076 +err:
10077 + for_each_online_cpu(cpu) {
10078 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10079 + if (!ppriv->nctx.cb)
10080 + break;
10081 + dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
10082 + }
10083 +
10084 + for_each_online_cpu(cpu) {
10085 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10086 + if (!ppriv->store)
10087 + break;
10088 + dpaa2_io_store_destroy(ppriv->store);
10089 + }
10090 +
10091 + return err;
10092 +}
10093 +
10094 +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
10095 +{
10096 + struct dpaa2_caam_priv_per_cpu *ppriv;
10097 + struct device *dev = priv->dev;
10098 + int i = 0, cpu;
10099 +
10100 + for_each_online_cpu(cpu) {
10101 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10102 + dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
10103 + dpaa2_io_store_destroy(ppriv->store);
10104 +
10105 + if (++i == priv->num_pairs)
10106 + return;
10107 + }
10108 +}
10109 +
10110 +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
10111 +{
10112 + struct dpseci_rx_queue_cfg rx_queue_cfg;
10113 + struct device *dev = priv->dev;
10114 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
10115 + struct dpaa2_caam_priv_per_cpu *ppriv;
10116 + int err = 0, i = 0, cpu;
10117 +
10118 + /* Configure Rx queues */
10119 + for_each_online_cpu(cpu) {
10120 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10121 +
10122 + rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
10123 + DPSECI_QUEUE_OPT_USER_CTX;
10124 + rx_queue_cfg.order_preservation_en = 0;
10125 + rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
10126 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
10127 + /*
10128 + * Rx priority (WQ) doesn't really matter, since we use
10129 + * pull mode, i.e. volatile dequeues from specific FQs
10130 + */
10131 + rx_queue_cfg.dest_cfg.priority = 0;
10132 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
10133 +
10134 + err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
10135 + &rx_queue_cfg);
10136 + if (err) {
10137 + dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
10138 + err);
10139 + return err;
10140 + }
10141 +
10142 + if (++i == priv->num_pairs)
10143 + break;
10144 + }
10145 +
10146 + return err;
10147 +}
10148 +
10149 +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
10150 +{
10151 + struct device *dev = priv->dev;
10152 +
10153 + if (!priv->cscn_mem)
10154 + return;
10155 +
10156 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
10157 + kfree(priv->cscn_mem);
10158 +}
10159 +
10160 +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
10161 +{
10162 + struct device *dev = priv->dev;
10163 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
10164 +
10165 + dpaa2_dpseci_congestion_free(priv);
10166 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
10167 +}
10168 +
10169 +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
10170 + const struct dpaa2_fd *fd)
10171 +{
10172 + struct caam_request *req;
10173 + u32 fd_err;
10174 +
10175 + if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
10176 + dev_err(priv->dev, "Only Frame List FD format is supported!\n");
10177 + return;
10178 + }
10179 +
10180 + fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
10181 + if (unlikely(fd_err))
10182 + dev_err(priv->dev, "FD error: %08x\n", fd_err);
10183 +
10184 + /*
10185 + * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
10186 + * in FD[ERR] or FD[FRC].
10187 + */
10188 + req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
10189 + dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
10190 + DMA_BIDIRECTIONAL);
10191 + req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
10192 +}
10193 +
10194 +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
10195 +{
10196 + int err;
10197 +
10198 + /* Retry while portal is busy */
10199 + do {
10200 + err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
10201 + ppriv->store);
10202 + } while (err == -EBUSY);
10203 +
10204 + if (unlikely(err))
10205 + dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
10206 +
10207 + return err;
10208 +}
10209 +
10210 +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
10211 +{
10212 + struct dpaa2_dq *dq;
10213 + int cleaned = 0, is_last;
10214 +
10215 + do {
10216 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
10217 + if (unlikely(!dq)) {
10218 + if (unlikely(!is_last)) {
10219 + dev_dbg(ppriv->priv->dev,
10220 + "FQ %d returned no valid frames\n",
10221 + ppriv->rsp_fqid);
10222 + /*
10223 + * MUST retry until we get some sort of
10224 + * valid response token (be it "empty dequeue"
10225 + * or a valid frame).
10226 + */
10227 + continue;
10228 + }
10229 + break;
10230 + }
10231 +
10232 + /* Process FD */
10233 + dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
10234 + cleaned++;
10235 + } while (!is_last);
10236 +
10237 + return cleaned;
10238 +}
10239 +
10240 +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
10241 +{
10242 + struct dpaa2_caam_priv_per_cpu *ppriv;
10243 + struct dpaa2_caam_priv *priv;
10244 + int err, cleaned = 0, store_cleaned;
10245 +
10246 + ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
10247 + priv = ppriv->priv;
10248 +
10249 + if (unlikely(dpaa2_caam_pull_fq(ppriv)))
10250 + return 0;
10251 +
10252 + do {
10253 + store_cleaned = dpaa2_caam_store_consume(ppriv);
10254 + cleaned += store_cleaned;
10255 +
10256 + if (store_cleaned == 0 ||
10257 + cleaned > budget - DPAA2_CAAM_STORE_SIZE)
10258 + break;
10259 +
10260 + /* Try to dequeue some more */
10261 + err = dpaa2_caam_pull_fq(ppriv);
10262 + if (unlikely(err))
10263 + break;
10264 + } while (1);
10265 +
10266 + if (cleaned < budget) {
10267 + napi_complete_done(napi, cleaned);
10268 + err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
10269 + if (unlikely(err))
10270 + dev_err(priv->dev, "Notification rearm failed: %d\n",
10271 + err);
10272 + }
10273 +
10274 + return cleaned;
10275 +}
10276 +
10277 +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
10278 + u16 token)
10279 +{
10280 + struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
10281 + struct device *dev = priv->dev;
10282 + int err;
10283 +
10284 + /*
10285 + * Congestion group feature supported starting with DPSECI API v5.1
10286 + * and only when object has been created with this capability.
10287 + */
10288 + if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
10289 + !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
10290 + return 0;
10291 +
10292 + priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
10293 + GFP_KERNEL | GFP_DMA);
10294 + if (!priv->cscn_mem)
10295 + return -ENOMEM;
10296 +
10297 + priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
10298 + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
10299 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
10300 + if (dma_mapping_error(dev, priv->cscn_dma)) {
10301 + dev_err(dev, "Error mapping CSCN memory area\n");
10302 + err = -ENOMEM;
10303 + goto err_dma_map;
10304 + }
10305 +
10306 + cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
10307 + cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
10308 + cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
10309 + cong_notif_cfg.message_ctx = (u64)priv;
10310 + cong_notif_cfg.message_iova = priv->cscn_dma;
10311 + cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
10312 + DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
10313 + DPSECI_CGN_MODE_COHERENT_WRITE;
10314 +
10315 + err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
10316 + &cong_notif_cfg);
10317 + if (err) {
10318 + dev_err(dev, "dpseci_set_congestion_notification failed\n");
10319 + goto err_set_cong;
10320 + }
10321 +
10322 + return 0;
10323 +
10324 +err_set_cong:
10325 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
10326 +err_dma_map:
10327 + kfree(priv->cscn_mem);
10328 +
10329 + return err;
10330 +}
10331 +
10332 +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
10333 +{
10334 + struct device *dev = &ls_dev->dev;
10335 + struct dpaa2_caam_priv *priv;
10336 + struct dpaa2_caam_priv_per_cpu *ppriv;
10337 + int err, cpu;
10338 + u8 i;
10339 +
10340 + priv = dev_get_drvdata(dev);
10341 +
10342 + priv->dev = dev;
10343 + priv->dpsec_id = ls_dev->obj_desc.id;
10344 +
10345 + /* Get a handle for the DPSECI this interface is associate with */
10346 + err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
10347 + if (err) {
10348 + dev_err(dev, "dpsec_open() failed: %d\n", err);
10349 + goto err_open;
10350 + }
10351 +
10352 + dev_info(dev, "Opened dpseci object successfully\n");
10353 +
10354 + err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
10355 + &priv->minor_ver);
10356 + if (err) {
10357 + dev_err(dev, "dpseci_get_api_version() failed\n");
10358 + goto err_get_vers;
10359 + }
10360 +
10361 + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
10362 + &priv->dpseci_attr);
10363 + if (err) {
10364 + dev_err(dev, "dpseci_get_attributes() failed\n");
10365 + goto err_get_vers;
10366 + }
10367 +
10368 + err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
10369 + &priv->sec_attr);
10370 + if (err) {
10371 + dev_err(dev, "dpseci_get_sec_attr() failed\n");
10372 + goto err_get_vers;
10373 + }
10374 +
10375 + err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
10376 + if (err) {
10377 + dev_err(dev, "setup_congestion() failed\n");
10378 + goto err_get_vers;
10379 + }
10380 +
10381 + priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
10382 + priv->dpseci_attr.num_tx_queues);
10383 + if (priv->num_pairs > num_online_cpus()) {
10384 + dev_warn(dev, "%d queues won't be used\n",
10385 + priv->num_pairs - num_online_cpus());
10386 + priv->num_pairs = num_online_cpus();
10387 + }
10388 +
10389 + for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
10390 + err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
10391 + &priv->rx_queue_attr[i]);
10392 + if (err) {
10393 + dev_err(dev, "dpseci_get_rx_queue() failed\n");
10394 + goto err_get_rx_queue;
10395 + }
10396 + }
10397 +
10398 + for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
10399 + err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
10400 + &priv->tx_queue_attr[i]);
10401 + if (err) {
10402 + dev_err(dev, "dpseci_get_tx_queue() failed\n");
10403 + goto err_get_rx_queue;
10404 + }
10405 + }
10406 +
10407 + i = 0;
10408 + for_each_online_cpu(cpu) {
10409 + u8 j;
10410 +
10411 + j = i % priv->num_pairs;
10412 +
10413 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10414 + ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
10415 +
10416 + /*
10417 + * Allow all cores to enqueue, while only some of them
10418 + * will take part in dequeuing.
10419 + */
10420 + if (++i > priv->num_pairs)
10421 + continue;
10422 +
10423 + ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
10424 + ppriv->prio = j;
10425 +
10426 + dev_info(dev, "pair %d: rx queue %d, tx queue %d\n", j,
10427 + priv->rx_queue_attr[j].fqid,
10428 + priv->tx_queue_attr[j].fqid);
10429 +
10430 + ppriv->net_dev.dev = *dev;
10431 + INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
10432 + netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
10433 + DPAA2_CAAM_NAPI_WEIGHT);
10434 + }
10435 +
10436 + return 0;
10437 +
10438 +err_get_rx_queue:
10439 + dpaa2_dpseci_congestion_free(priv);
10440 +err_get_vers:
10441 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
10442 +err_open:
10443 + return err;
10444 +}
10445 +
10446 +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
10447 +{
10448 + struct device *dev = priv->dev;
10449 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
10450 + struct dpaa2_caam_priv_per_cpu *ppriv;
10451 + int err, i;
10452 +
10453 + for (i = 0; i < priv->num_pairs; i++) {
10454 + ppriv = per_cpu_ptr(priv->ppriv, i);
10455 + napi_enable(&ppriv->napi);
10456 + }
10457 +
10458 + err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
10459 + if (err) {
10460 + dev_err(dev, "dpseci_enable() failed\n");
10461 + return err;
10462 + }
10463 +
10464 + dev_info(dev, "DPSECI version %d.%d\n",
10465 + priv->major_ver,
10466 + priv->minor_ver);
10467 +
10468 + return 0;
10469 +}
10470 +
10471 +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
10472 +{
10473 + struct device *dev = priv->dev;
10474 + struct dpaa2_caam_priv_per_cpu *ppriv;
10475 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
10476 + int i, err = 0, enabled;
10477 +
10478 + err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
10479 + if (err) {
10480 + dev_err(dev, "dpseci_disable() failed\n");
10481 + return err;
10482 + }
10483 +
10484 + err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
10485 + if (err) {
10486 + dev_err(dev, "dpseci_is_enabled() failed\n");
10487 + return err;
10488 + }
10489 +
10490 + dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
10491 +
10492 + for (i = 0; i < priv->num_pairs; i++) {
10493 + ppriv = per_cpu_ptr(priv->ppriv, i);
10494 + napi_disable(&ppriv->napi);
10495 + netif_napi_del(&ppriv->napi);
10496 + }
10497 +
10498 + return 0;
10499 +}
10500 +
10501 +static struct list_head hash_list;
10502 +
10503 +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
10504 +{
10505 + struct device *dev;
10506 + struct dpaa2_caam_priv *priv;
10507 + int i, err = 0;
10508 + bool registered = false;
10509 +
10510 + /*
10511 + * There is no way to get CAAM endianness - there is no direct register
10512 + * space access and MC f/w does not provide this attribute.
10513 + * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
10514 + * property.
10515 + */
10516 + caam_little_end = true;
10517 +
10518 + caam_imx = false;
10519 +
10520 + dev = &dpseci_dev->dev;
10521 +
10522 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
10523 + if (!priv)
10524 + return -ENOMEM;
10525 +
10526 + dev_set_drvdata(dev, priv);
10527 +
10528 + priv->domain = iommu_get_domain_for_dev(dev);
10529 +
10530 + qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
10531 + 0, SLAB_CACHE_DMA, NULL);
10532 + if (!qi_cache) {
10533 + dev_err(dev, "Can't allocate SEC cache\n");
10534 + err = -ENOMEM;
10535 + goto err_qicache;
10536 + }
10537 +
10538 + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
10539 + if (err) {
10540 + dev_err(dev, "dma_set_mask_and_coherent() failed\n");
10541 + goto err_dma_mask;
10542 + }
10543 +
10544 + /* Obtain a MC portal */
10545 + err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
10546 + if (err) {
10547 + if (err == -ENXIO)
10548 + err = -EPROBE_DEFER;
10549 + else
10550 + dev_err(dev, "MC portal allocation failed\n");
10551 +
10552 + goto err_dma_mask;
10553 + }
10554 +
10555 + priv->ppriv = alloc_percpu(*priv->ppriv);
10556 + if (!priv->ppriv) {
10557 + dev_err(dev, "alloc_percpu() failed\n");
10558 + err = -ENOMEM;
10559 + goto err_alloc_ppriv;
10560 + }
10561 +
10562 + /* DPSECI initialization */
10563 + err = dpaa2_dpseci_setup(dpseci_dev);
10564 + if (err) {
10565 + dev_err(dev, "dpaa2_dpseci_setup() failed\n");
10566 + goto err_dpseci_setup;
10567 + }
10568 +
10569 + /* DPIO */
10570 + err = dpaa2_dpseci_dpio_setup(priv);
10571 + if (err) {
10572 + if (err != -EPROBE_DEFER)
10573 + dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
10574 + goto err_dpio_setup;
10575 + }
10576 +
10577 + /* DPSECI binding to DPIO */
10578 + err = dpaa2_dpseci_bind(priv);
10579 + if (err) {
10580 + dev_err(dev, "dpaa2_dpseci_bind() failed\n");
10581 + goto err_bind;
10582 + }
10583 +
10584 + /* DPSECI enable */
10585 + err = dpaa2_dpseci_enable(priv);
10586 + if (err) {
10587 + dev_err(dev, "dpaa2_dpseci_enable() failed");
10588 + goto err_bind;
10589 + }
10590 +
10591 + /* register crypto algorithms the device supports */
10592 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
10593 + struct caam_skcipher_alg *t_alg = driver_algs + i;
10594 + u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
10595 +
10596 + /* Skip DES algorithms if not supported by device */
10597 + if (!priv->sec_attr.des_acc_num &&
10598 + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
10599 + (alg_sel == OP_ALG_ALGSEL_DES)))
10600 + continue;
10601 +
10602 + /* Skip AES algorithms if not supported by device */
10603 + if (!priv->sec_attr.aes_acc_num &&
10604 + (alg_sel == OP_ALG_ALGSEL_AES))
10605 + continue;
10606 +
10607 + /* Skip CHACHA20 algorithms if not supported by device */
10608 + if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
10609 + !priv->sec_attr.ccha_acc_num)
10610 + continue;
10611 +
10612 + t_alg->caam.dev = dev;
10613 + caam_skcipher_alg_init(t_alg);
10614 +
10615 + err = crypto_register_skcipher(&t_alg->skcipher);
10616 + if (err) {
10617 + dev_warn(dev, "%s alg registration failed: %d\n",
10618 + t_alg->skcipher.base.cra_driver_name, err);
10619 + continue;
10620 + }
10621 +
10622 + t_alg->registered = true;
10623 + registered = true;
10624 + }
10625 +
10626 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
10627 + struct caam_aead_alg *t_alg = driver_aeads + i;
10628 + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
10629 + OP_ALG_ALGSEL_MASK;
10630 + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
10631 + OP_ALG_ALGSEL_MASK;
10632 +
10633 + /* Skip DES algorithms if not supported by device */
10634 + if (!priv->sec_attr.des_acc_num &&
10635 + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
10636 + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
10637 + continue;
10638 +
10639 + /* Skip AES algorithms if not supported by device */
10640 + if (!priv->sec_attr.aes_acc_num &&
10641 + (c1_alg_sel == OP_ALG_ALGSEL_AES))
10642 + continue;
10643 +
10644 + /* Skip CHACHA20 algorithms if not supported by device */
10645 + if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
10646 + !priv->sec_attr.ccha_acc_num)
10647 + continue;
10648 +
10649 + /* Skip POLY1305 algorithms if not supported by device */
10650 + if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
10651 + !priv->sec_attr.ptha_acc_num)
10652 + continue;
10653 +
10654 + /*
10655 + * Skip algorithms requiring message digests
10656 + * if MD not supported by device.
10657 + */
10658 + if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
10659 + !priv->sec_attr.md_acc_num)
10660 + continue;
10661 +
10662 + t_alg->caam.dev = dev;
10663 + caam_aead_alg_init(t_alg);
10664 +
10665 + err = crypto_register_aead(&t_alg->aead);
10666 + if (err) {
10667 + dev_warn(dev, "%s alg registration failed: %d\n",
10668 + t_alg->aead.base.cra_driver_name, err);
10669 + continue;
10670 + }
10671 +
10672 + t_alg->registered = true;
10673 + registered = true;
10674 + }
10675 + if (registered)
10676 + dev_info(dev, "algorithms registered in /proc/crypto\n");
10677 +
10678 + /* register hash algorithms the device supports */
10679 + INIT_LIST_HEAD(&hash_list);
10680 +
10681 + /*
10682 + * Skip registration of any hashing algorithms if MD block
10683 + * is not present.
10684 + */
10685 + if (!priv->sec_attr.md_acc_num)
10686 + return 0;
10687 +
10688 + for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
10689 + struct caam_hash_alg *t_alg;
10690 + struct caam_hash_template *alg = driver_hash + i;
10691 +
10692 + /* register hmac version */
10693 + t_alg = caam_hash_alloc(dev, alg, true);
10694 + if (IS_ERR(t_alg)) {
10695 + err = PTR_ERR(t_alg);
10696 + dev_warn(dev, "%s hash alg allocation failed: %d\n",
10697 + alg->driver_name, err);
10698 + continue;
10699 + }
10700 +
10701 + err = crypto_register_ahash(&t_alg->ahash_alg);
10702 + if (err) {
10703 + dev_warn(dev, "%s alg registration failed: %d\n",
10704 + t_alg->ahash_alg.halg.base.cra_driver_name,
10705 + err);
10706 + kfree(t_alg);
10707 + } else {
10708 + list_add_tail(&t_alg->entry, &hash_list);
10709 + }
10710 +
10711 + /* register unkeyed version */
10712 + t_alg = caam_hash_alloc(dev, alg, false);
10713 + if (IS_ERR(t_alg)) {
10714 + err = PTR_ERR(t_alg);
10715 + dev_warn(dev, "%s alg allocation failed: %d\n",
10716 + alg->driver_name, err);
10717 + continue;
10718 + }
10719 +
10720 + err = crypto_register_ahash(&t_alg->ahash_alg);
10721 + if (err) {
10722 + dev_warn(dev, "%s alg registration failed: %d\n",
10723 + t_alg->ahash_alg.halg.base.cra_driver_name,
10724 + err);
10725 + kfree(t_alg);
10726 + } else {
10727 + list_add_tail(&t_alg->entry, &hash_list);
10728 + }
10729 + }
10730 + if (!list_empty(&hash_list))
10731 + dev_info(dev, "hash algorithms registered in /proc/crypto\n");
10732 +
10733 + return err;
10734 +
10735 +err_bind:
10736 + dpaa2_dpseci_dpio_free(priv);
10737 +err_dpio_setup:
10738 + dpaa2_dpseci_free(priv);
10739 +err_dpseci_setup:
10740 + free_percpu(priv->ppriv);
10741 +err_alloc_ppriv:
10742 + fsl_mc_portal_free(priv->mc_io);
10743 +err_dma_mask:
10744 + kmem_cache_destroy(qi_cache);
10745 +err_qicache:
10746 + dev_set_drvdata(dev, NULL);
10747 +
10748 + return err;
10749 +}
10750 +
10751 +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
10752 +{
10753 + struct device *dev;
10754 + struct dpaa2_caam_priv *priv;
10755 + int i;
10756 +
10757 + dev = &ls_dev->dev;
10758 + priv = dev_get_drvdata(dev);
10759 +
10760 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
10761 + struct caam_aead_alg *t_alg = driver_aeads + i;
10762 +
10763 + if (t_alg->registered)
10764 + crypto_unregister_aead(&t_alg->aead);
10765 + }
10766 +
10767 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
10768 + struct caam_skcipher_alg *t_alg = driver_algs + i;
10769 +
10770 + if (t_alg->registered)
10771 + crypto_unregister_skcipher(&t_alg->skcipher);
10772 + }
10773 +
10774 + if (hash_list.next) {
10775 + struct caam_hash_alg *t_hash_alg, *p;
10776 +
10777 + list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
10778 + crypto_unregister_ahash(&t_hash_alg->ahash_alg);
10779 + list_del(&t_hash_alg->entry);
10780 + kfree(t_hash_alg);
10781 + }
10782 + }
10783 +
10784 + dpaa2_dpseci_disable(priv);
10785 + dpaa2_dpseci_dpio_free(priv);
10786 + dpaa2_dpseci_free(priv);
10787 + free_percpu(priv->ppriv);
10788 + fsl_mc_portal_free(priv->mc_io);
10789 + dev_set_drvdata(dev, NULL);
10790 + kmem_cache_destroy(qi_cache);
10791 +
10792 + return 0;
10793 +}
10794 +
10795 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
10796 +{
10797 + struct dpaa2_fd fd;
10798 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
10799 + struct dpaa2_caam_priv_per_cpu *ppriv;
10800 + int err = 0, i;
10801 +
10802 + if (IS_ERR(req))
10803 + return PTR_ERR(req);
10804 +
10805 + if (priv->cscn_mem) {
10806 + dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
10807 + DPAA2_CSCN_SIZE,
10808 + DMA_FROM_DEVICE);
10809 + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
10810 + dev_dbg_ratelimited(dev, "Dropping request\n");
10811 + return -EBUSY;
10812 + }
10813 + }
10814 +
10815 + dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
10816 +
10817 + req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
10818 + DMA_BIDIRECTIONAL);
10819 + if (dma_mapping_error(dev, req->fd_flt_dma)) {
10820 + dev_err(dev, "DMA mapping error for QI enqueue request\n");
10821 + goto err_out;
10822 + }
10823 +
10824 + memset(&fd, 0, sizeof(fd));
10825 + dpaa2_fd_set_format(&fd, dpaa2_fd_list);
10826 + dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
10827 + dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
10828 + dpaa2_fd_set_flc(&fd, req->flc_dma);
10829 +
10830 + ppriv = this_cpu_ptr(priv->ppriv);
10831 + for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
10832 + err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
10833 + &fd);
10834 + if (err != -EBUSY)
10835 + break;
10836 +
10837 + cpu_relax();
10838 + }
10839 +
10840 + if (unlikely(err)) {
10841 + dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
10842 + goto err_out;
10843 + }
10844 +
10845 + return -EINPROGRESS;
10846 +
10847 +err_out:
10848 + dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
10849 + DMA_BIDIRECTIONAL);
10850 + return -EIO;
10851 +}
10852 +EXPORT_SYMBOL(dpaa2_caam_enqueue);
10853 +
10854 +const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
10855 + {
10856 + .vendor = FSL_MC_VENDOR_FREESCALE,
10857 + .obj_type = "dpseci",
10858 + },
10859 + { .vendor = 0x0 }
10860 +};
10861 +
10862 +static struct fsl_mc_driver dpaa2_caam_driver = {
10863 + .driver = {
10864 + .name = KBUILD_MODNAME,
10865 + .owner = THIS_MODULE,
10866 + },
10867 + .probe = dpaa2_caam_probe,
10868 + .remove = dpaa2_caam_remove,
10869 + .match_id_table = dpaa2_caam_match_id_table
10870 +};
10871 +
10872 +MODULE_LICENSE("Dual BSD/GPL");
10873 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
10874 +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
10875 +
10876 +module_fsl_mc_driver(dpaa2_caam_driver);
10877 --- /dev/null
10878 +++ b/drivers/crypto/caam/caamalg_qi2.h
10879 @@ -0,0 +1,276 @@
10880 +/*
10881 + * Copyright 2015-2016 Freescale Semiconductor Inc.
10882 + * Copyright 2017 NXP
10883 + *
10884 + * Redistribution and use in source and binary forms, with or without
10885 + * modification, are permitted provided that the following conditions are met:
10886 + * * Redistributions of source code must retain the above copyright
10887 + * notice, this list of conditions and the following disclaimer.
10888 + * * Redistributions in binary form must reproduce the above copyright
10889 + * notice, this list of conditions and the following disclaimer in the
10890 + * documentation and/or other materials provided with the distribution.
10891 + * * Neither the names of the above-listed copyright holders nor the
10892 + * names of any contributors may be used to endorse or promote products
10893 + * derived from this software without specific prior written permission.
10894 + *
10895 + *
10896 + * ALTERNATIVELY, this software may be distributed under the terms of the
10897 + * GNU General Public License ("GPL") as published by the Free Software
10898 + * Foundation, either version 2 of that License or (at your option) any
10899 + * later version.
10900 + *
10901 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
10902 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
10903 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
10904 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
10905 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
10906 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
10907 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
10908 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
10909 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
10910 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
10911 + * POSSIBILITY OF SUCH DAMAGE.
10912 + */
10913 +
10914 +#ifndef _CAAMALG_QI2_H_
10915 +#define _CAAMALG_QI2_H_
10916 +
10917 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
10918 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
10919 +#include <linux/threads.h>
10920 +#include "dpseci.h"
10921 +#include "desc_constr.h"
10922 +
10923 +#define DPAA2_CAAM_STORE_SIZE 16
10924 +/* NAPI weight *must* be a multiple of the store size. */
10925 +#define DPAA2_CAAM_NAPI_WEIGHT 64
10926 +
10927 +/* The congestion entrance threshold was chosen so that on LS2088
10928 + * we support the maximum throughput for the available memory
10929 + */
10930 +#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
10931 +#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
10932 +
10933 +/**
10934 + * dpaa2_caam_priv - driver private data
10935 + * @dpseci_id: DPSECI object unique ID
10936 + * @major_ver: DPSECI major version
10937 + * @minor_ver: DPSECI minor version
10938 + * @dpseci_attr: DPSECI attributes
10939 + * @sec_attr: SEC engine attributes
10940 + * @rx_queue_attr: array of Rx queue attributes
10941 + * @tx_queue_attr: array of Tx queue attributes
10942 + * @cscn_mem: pointer to memory region containing the
10943 + * dpaa2_cscn struct; it's size is larger than
10944 + * sizeof(struct dpaa2_cscn) to accommodate alignment
10945 + * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
10946 + * as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
10947 + * @cscn_dma: dma address used by the QMAN to write CSCN messages
10948 + * @dev: device associated with the DPSECI object
10949 + * @mc_io: pointer to MC portal's I/O object
10950 + * @domain: IOMMU domain
10951 + * @ppriv: per CPU pointers to privata data
10952 + */
10953 +struct dpaa2_caam_priv {
10954 + int dpsec_id;
10955 +
10956 + u16 major_ver;
10957 + u16 minor_ver;
10958 +
10959 + struct dpseci_attr dpseci_attr;
10960 + struct dpseci_sec_attr sec_attr;
10961 + struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_MAX_QUEUE_NUM];
10962 + struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_MAX_QUEUE_NUM];
10963 + int num_pairs;
10964 +
10965 + /* congestion */
10966 + void *cscn_mem;
10967 + void *cscn_mem_aligned;
10968 + dma_addr_t cscn_dma;
10969 +
10970 + struct device *dev;
10971 + struct fsl_mc_io *mc_io;
10972 + struct iommu_domain *domain;
10973 +
10974 + struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
10975 +};
10976 +
10977 +/**
10978 + * dpaa2_caam_priv_per_cpu - per CPU private data
10979 + * @napi: napi structure
10980 + * @net_dev: netdev used by napi
10981 + * @req_fqid: (virtual) request (Tx / enqueue) FQID
10982 + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
10983 + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
10984 + * @nctx: notification context of response FQ
10985 + * @store: where dequeued frames are stored
10986 + * @priv: backpointer to dpaa2_caam_priv
10987 + * @dpio: portal used for data path operations
10988 + */
10989 +struct dpaa2_caam_priv_per_cpu {
10990 + struct napi_struct napi;
10991 + struct net_device net_dev;
10992 + int req_fqid;
10993 + int rsp_fqid;
10994 + int prio;
10995 + struct dpaa2_io_notification_ctx nctx;
10996 + struct dpaa2_io_store *store;
10997 + struct dpaa2_caam_priv *priv;
10998 + struct dpaa2_io *dpio;
10999 +};
11000 +
11001 +/*
11002 + * The CAAM QI hardware constructs a job descriptor which points
11003 + * to shared descriptor (as pointed by context_a of FQ to CAAM).
11004 + * When the job descriptor is executed by deco, the whole job
11005 + * descriptor together with shared descriptor gets loaded in
11006 + * deco buffer which is 64 words long (each 32-bit).
11007 + *
11008 + * The job descriptor constructed by QI hardware has layout:
11009 + *
11010 + * HEADER (1 word)
11011 + * Shdesc ptr (1 or 2 words)
11012 + * SEQ_OUT_PTR (1 word)
11013 + * Out ptr (1 or 2 words)
11014 + * Out length (1 word)
11015 + * SEQ_IN_PTR (1 word)
11016 + * In ptr (1 or 2 words)
11017 + * In length (1 word)
11018 + *
11019 + * The shdesc ptr is used to fetch shared descriptor contents
11020 + * into deco buffer.
11021 + *
11022 + * Apart from shdesc contents, the total number of words that
11023 + * get loaded in deco buffer are '8' or '11'. The remaining words
11024 + * in deco buffer can be used for storing shared descriptor.
11025 + */
11026 +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
11027 +
11028 +/* Length of a single buffer in the QI driver memory cache */
11029 +#define CAAM_QI_MEMCACHE_SIZE 512
11030 +
11031 +/*
11032 + * aead_edesc - s/w-extended aead descriptor
11033 + * @src_nents: number of segments in input scatterlist
11034 + * @dst_nents: number of segments in output scatterlist
11035 + * @iv_dma: dma address of iv for checking continuity and link table
11036 + * @qm_sg_bytes: length of dma mapped h/w link table
11037 + * @qm_sg_dma: bus physical mapped address of h/w link table
11038 + * @assoclen: associated data length, in CAAM endianness
11039 + * @assoclen_dma: bus physical mapped address of req->assoclen
11040 + * @sgt: the h/w link table, followed by IV
11041 + */
11042 +struct aead_edesc {
11043 + int src_nents;
11044 + int dst_nents;
11045 + dma_addr_t iv_dma;
11046 + int qm_sg_bytes;
11047 + dma_addr_t qm_sg_dma;
11048 + unsigned int assoclen;
11049 + dma_addr_t assoclen_dma;
11050 + struct dpaa2_sg_entry sgt[0];
11051 +};
11052 +
11053 +/*
11054 + * tls_edesc - s/w-extended tls descriptor
11055 + * @src_nents: number of segments in input scatterlist
11056 + * @dst_nents: number of segments in output scatterlist
11057 + * @iv_dma: dma address of iv for checking continuity and link table
11058 + * @qm_sg_bytes: length of dma mapped h/w link table
11059 + * @qm_sg_dma: bus physical mapped address of h/w link table
11060 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
11061 + * @dst: pointer to output scatterlist, usefull for unmapping
11062 + * @sgt: the h/w link table, followed by IV
11063 + */
11064 +struct tls_edesc {
11065 + int src_nents;
11066 + int dst_nents;
11067 + dma_addr_t iv_dma;
11068 + int qm_sg_bytes;
11069 + dma_addr_t qm_sg_dma;
11070 + struct scatterlist tmp[2];
11071 + struct scatterlist *dst;
11072 + struct dpaa2_sg_entry sgt[0];
11073 +};
11074 +
11075 +/*
11076 + * skcipher_edesc - s/w-extended skcipher descriptor
11077 + * @src_nents: number of segments in input scatterlist
11078 + * @dst_nents: number of segments in output scatterlist
11079 + * @iv_dma: dma address of iv for checking continuity and link table
11080 + * @qm_sg_bytes: length of dma mapped qm_sg space
11081 + * @qm_sg_dma: I/O virtual address of h/w link table
11082 + * @sgt: the h/w link table, followed by IV
11083 + */
11084 +struct skcipher_edesc {
11085 + int src_nents;
11086 + int dst_nents;
11087 + dma_addr_t iv_dma;
11088 + int qm_sg_bytes;
11089 + dma_addr_t qm_sg_dma;
11090 + struct dpaa2_sg_entry sgt[0];
11091 +};
11092 +
11093 +/*
11094 + * ahash_edesc - s/w-extended ahash descriptor
11095 + * @dst_dma: I/O virtual address of req->result
11096 + * @qm_sg_dma: I/O virtual address of h/w link table
11097 + * @src_nents: number of segments in input scatterlist
11098 + * @qm_sg_bytes: length of dma mapped qm_sg space
11099 + * @sgt: pointer to h/w link table
11100 + */
11101 +struct ahash_edesc {
11102 + dma_addr_t dst_dma;
11103 + dma_addr_t qm_sg_dma;
11104 + int src_nents;
11105 + int qm_sg_bytes;
11106 + struct dpaa2_sg_entry sgt[0];
11107 +};
11108 +
11109 +/**
11110 + * caam_flc - Flow Context (FLC)
11111 + * @flc: Flow Context options
11112 + * @sh_desc: Shared Descriptor
11113 + */
11114 +struct caam_flc {
11115 + u32 flc[16];
11116 + u32 sh_desc[MAX_SDLEN];
11117 +} ____cacheline_aligned;
11118 +
11119 +enum optype {
11120 + ENCRYPT = 0,
11121 + DECRYPT,
11122 + NUM_OP
11123 +};
11124 +
11125 +/**
11126 + * caam_request - the request structure the driver application should fill while
11127 + * submitting a job to driver.
11128 + * @fd_flt: Frame list table defining input and output
11129 + * fd_flt[0] - FLE pointing to output buffer
11130 + * fd_flt[1] - FLE pointing to input buffer
11131 + * @fd_flt_dma: DMA address for the frame list table
11132 + * @flc: Flow Context
11133 + * @flc_dma: I/O virtual address of Flow Context
11134 + * @cbk: Callback function to invoke when job is completed
11135 + * @ctx: arbit context attached with request by the application
11136 + * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
11137 + */
11138 +struct caam_request {
11139 + struct dpaa2_fl_entry fd_flt[2];
11140 + dma_addr_t fd_flt_dma;
11141 + struct caam_flc *flc;
11142 + dma_addr_t flc_dma;
11143 + void (*cbk)(void *ctx, u32 err);
11144 + void *ctx;
11145 + void *edesc;
11146 +};
11147 +
11148 +/**
11149 + * dpaa2_caam_enqueue() - enqueue a crypto request
11150 + * @dev: device associated with the DPSECI object
11151 + * @req: pointer to caam_request
11152 + */
11153 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
11154 +
11155 +#endif /* _CAAMALG_QI2_H_ */
11156 --- a/drivers/crypto/caam/caamhash.c
11157 +++ b/drivers/crypto/caam/caamhash.c
11158 @@ -2,6 +2,7 @@
11159 * caam - Freescale FSL CAAM support for ahash functions of crypto API
11160 *
11161 * Copyright 2011 Freescale Semiconductor, Inc.
11162 + * Copyright 2018 NXP
11163 *
11164 * Based on caamalg.c crypto API driver.
11165 *
11166 @@ -62,6 +63,7 @@
11167 #include "error.h"
11168 #include "sg_sw_sec4.h"
11169 #include "key_gen.h"
11170 +#include "caamhash_desc.h"
11171
11172 #define CAAM_CRA_PRIORITY 3000
11173
11174 @@ -71,14 +73,6 @@
11175 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
11176 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
11177
11178 -/* length of descriptors text */
11179 -#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
11180 -#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
11181 -#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
11182 -#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
11183 -#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
11184 -#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
11185 -
11186 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
11187 CAAM_MAX_HASH_KEY_SIZE)
11188 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
11189 @@ -107,6 +101,7 @@ struct caam_hash_ctx {
11190 dma_addr_t sh_desc_update_first_dma;
11191 dma_addr_t sh_desc_fin_dma;
11192 dma_addr_t sh_desc_digest_dma;
11193 + enum dma_data_direction dir;
11194 struct device *jrdev;
11195 u8 key[CAAM_MAX_HASH_KEY_SIZE];
11196 int ctx_len;
11197 @@ -218,7 +213,7 @@ static inline int buf_map_to_sec4_sg(str
11198 }
11199
11200 /* Map state->caam_ctx, and add it to link table */
11201 -static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
11202 +static inline int ctx_map_to_sec4_sg(struct device *jrdev,
11203 struct caam_hash_state *state, int ctx_len,
11204 struct sec4_sg_entry *sec4_sg, u32 flag)
11205 {
11206 @@ -234,68 +229,22 @@ static inline int ctx_map_to_sec4_sg(u32
11207 return 0;
11208 }
11209
11210 -/*
11211 - * For ahash update, final and finup (import_ctx = true)
11212 - * import context, read and write to seqout
11213 - * For ahash firsts and digest (import_ctx = false)
11214 - * read and write to seqout
11215 - */
11216 -static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
11217 - struct caam_hash_ctx *ctx, bool import_ctx)
11218 -{
11219 - u32 op = ctx->adata.algtype;
11220 - u32 *skip_key_load;
11221 -
11222 - init_sh_desc(desc, HDR_SHARE_SERIAL);
11223 -
11224 - /* Append key if it has been set; ahash update excluded */
11225 - if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
11226 - /* Skip key loading if already shared */
11227 - skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11228 - JUMP_COND_SHRD);
11229 -
11230 - append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
11231 - ctx->adata.keylen, CLASS_2 |
11232 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
11233 -
11234 - set_jump_tgt_here(desc, skip_key_load);
11235 -
11236 - op |= OP_ALG_AAI_HMAC_PRECOMP;
11237 - }
11238 -
11239 - /* If needed, import context from software */
11240 - if (import_ctx)
11241 - append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
11242 - LDST_SRCDST_BYTE_CONTEXT);
11243 -
11244 - /* Class 2 operation */
11245 - append_operation(desc, op | state | OP_ALG_ENCRYPT);
11246 -
11247 - /*
11248 - * Load from buf and/or src and write to req->result or state->context
11249 - * Calculate remaining bytes to read
11250 - */
11251 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11252 - /* Read remaining bytes */
11253 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
11254 - FIFOLD_TYPE_MSG | KEY_VLF);
11255 - /* Store class2 context bytes */
11256 - append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
11257 - LDST_SRCDST_BYTE_CONTEXT);
11258 -}
11259 -
11260 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
11261 {
11262 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
11263 int digestsize = crypto_ahash_digestsize(ahash);
11264 struct device *jrdev = ctx->jrdev;
11265 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
11266 u32 *desc;
11267
11268 + ctx->adata.key_virt = ctx->key;
11269 +
11270 /* ahash_update shared descriptor */
11271 desc = ctx->sh_desc_update;
11272 - ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
11273 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
11274 + ctx->ctx_len, true, ctrlpriv->era);
11275 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
11276 - desc_bytes(desc), DMA_TO_DEVICE);
11277 + desc_bytes(desc), ctx->dir);
11278 #ifdef DEBUG
11279 print_hex_dump(KERN_ERR,
11280 "ahash update shdesc@"__stringify(__LINE__)": ",
11281 @@ -304,9 +253,10 @@ static int ahash_set_sh_desc(struct cryp
11282
11283 /* ahash_update_first shared descriptor */
11284 desc = ctx->sh_desc_update_first;
11285 - ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
11286 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
11287 + ctx->ctx_len, false, ctrlpriv->era);
11288 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
11289 - desc_bytes(desc), DMA_TO_DEVICE);
11290 + desc_bytes(desc), ctx->dir);
11291 #ifdef DEBUG
11292 print_hex_dump(KERN_ERR,
11293 "ahash update first shdesc@"__stringify(__LINE__)": ",
11294 @@ -315,9 +265,10 @@ static int ahash_set_sh_desc(struct cryp
11295
11296 /* ahash_final shared descriptor */
11297 desc = ctx->sh_desc_fin;
11298 - ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
11299 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
11300 + ctx->ctx_len, true, ctrlpriv->era);
11301 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
11302 - desc_bytes(desc), DMA_TO_DEVICE);
11303 + desc_bytes(desc), ctx->dir);
11304 #ifdef DEBUG
11305 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
11306 DUMP_PREFIX_ADDRESS, 16, 4, desc,
11307 @@ -326,9 +277,10 @@ static int ahash_set_sh_desc(struct cryp
11308
11309 /* ahash_digest shared descriptor */
11310 desc = ctx->sh_desc_digest;
11311 - ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
11312 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
11313 + ctx->ctx_len, false, ctrlpriv->era);
11314 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
11315 - desc_bytes(desc), DMA_TO_DEVICE);
11316 + desc_bytes(desc), ctx->dir);
11317 #ifdef DEBUG
11318 print_hex_dump(KERN_ERR,
11319 "ahash digest shdesc@"__stringify(__LINE__)": ",
11320 @@ -421,6 +373,7 @@ static int ahash_setkey(struct crypto_ah
11321 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
11322 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
11323 int digestsize = crypto_ahash_digestsize(ahash);
11324 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
11325 int ret;
11326 u8 *hashed_key = NULL;
11327
11328 @@ -441,16 +394,26 @@ static int ahash_setkey(struct crypto_ah
11329 key = hashed_key;
11330 }
11331
11332 - ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
11333 - CAAM_MAX_HASH_KEY_SIZE);
11334 - if (ret)
11335 - goto bad_free_key;
11336 + /*
11337 + * If DKP is supported, use it in the shared descriptor to generate
11338 + * the split key.
11339 + */
11340 + if (ctrlpriv->era >= 6) {
11341 + ctx->adata.key_inline = true;
11342 + ctx->adata.keylen = keylen;
11343 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
11344 + OP_ALG_ALGSEL_MASK);
11345
11346 -#ifdef DEBUG
11347 - print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
11348 - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
11349 - ctx->adata.keylen_pad, 1);
11350 -#endif
11351 + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
11352 + goto bad_free_key;
11353 +
11354 + memcpy(ctx->key, key, keylen);
11355 + } else {
11356 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
11357 + keylen, CAAM_MAX_HASH_KEY_SIZE);
11358 + if (ret)
11359 + goto bad_free_key;
11360 + }
11361
11362 kfree(hashed_key);
11363 return ahash_set_sh_desc(ahash);
11364 @@ -773,7 +736,7 @@ static int ahash_update_ctx(struct ahash
11365 edesc->src_nents = src_nents;
11366 edesc->sec4_sg_bytes = sec4_sg_bytes;
11367
11368 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
11369 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
11370 edesc->sec4_sg, DMA_BIDIRECTIONAL);
11371 if (ret)
11372 goto unmap_ctx;
11373 @@ -871,9 +834,8 @@ static int ahash_final_ctx(struct ahash_
11374 desc = edesc->hw_desc;
11375
11376 edesc->sec4_sg_bytes = sec4_sg_bytes;
11377 - edesc->src_nents = 0;
11378
11379 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
11380 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
11381 edesc->sec4_sg, DMA_TO_DEVICE);
11382 if (ret)
11383 goto unmap_ctx;
11384 @@ -967,7 +929,7 @@ static int ahash_finup_ctx(struct ahash_
11385
11386 edesc->src_nents = src_nents;
11387
11388 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
11389 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
11390 edesc->sec4_sg, DMA_TO_DEVICE);
11391 if (ret)
11392 goto unmap_ctx;
11393 @@ -1126,7 +1088,6 @@ static int ahash_final_no_ctx(struct aha
11394 dev_err(jrdev, "unable to map dst\n");
11395 goto unmap;
11396 }
11397 - edesc->src_nents = 0;
11398
11399 #ifdef DEBUG
11400 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
11401 @@ -1208,7 +1169,6 @@ static int ahash_update_no_ctx(struct ah
11402
11403 edesc->src_nents = src_nents;
11404 edesc->sec4_sg_bytes = sec4_sg_bytes;
11405 - edesc->dst_dma = 0;
11406
11407 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
11408 if (ret)
11409 @@ -1420,7 +1380,6 @@ static int ahash_update_first(struct aha
11410 }
11411
11412 edesc->src_nents = src_nents;
11413 - edesc->dst_dma = 0;
11414
11415 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
11416 to_hash);
11417 @@ -1722,6 +1681,7 @@ static int caam_hash_cra_init(struct cry
11418 HASH_MSG_LEN + 64,
11419 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
11420 dma_addr_t dma_addr;
11421 + struct caam_drv_private *priv;
11422
11423 /*
11424 * Get a Job ring from Job Ring driver to ensure in-order
11425 @@ -1733,10 +1693,13 @@ static int caam_hash_cra_init(struct cry
11426 return PTR_ERR(ctx->jrdev);
11427 }
11428
11429 + priv = dev_get_drvdata(ctx->jrdev->parent);
11430 + ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
11431 +
11432 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
11433 offsetof(struct caam_hash_ctx,
11434 sh_desc_update_dma),
11435 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
11436 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
11437 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
11438 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
11439 caam_jr_free(ctx->jrdev);
11440 @@ -1771,11 +1734,11 @@ static void caam_hash_cra_exit(struct cr
11441 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
11442 offsetof(struct caam_hash_ctx,
11443 sh_desc_update_dma),
11444 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
11445 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
11446 caam_jr_free(ctx->jrdev);
11447 }
11448
11449 -static void __exit caam_algapi_hash_exit(void)
11450 +void caam_algapi_hash_exit(void)
11451 {
11452 struct caam_hash_alg *t_alg, *n;
11453
11454 @@ -1834,56 +1797,38 @@ caam_hash_alloc(struct caam_hash_templat
11455 return t_alg;
11456 }
11457
11458 -static int __init caam_algapi_hash_init(void)
11459 +int caam_algapi_hash_init(struct device *ctrldev)
11460 {
11461 - struct device_node *dev_node;
11462 - struct platform_device *pdev;
11463 - struct device *ctrldev;
11464 int i = 0, err = 0;
11465 - struct caam_drv_private *priv;
11466 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
11467 unsigned int md_limit = SHA512_DIGEST_SIZE;
11468 - u32 cha_inst, cha_vid;
11469 -
11470 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
11471 - if (!dev_node) {
11472 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
11473 - if (!dev_node)
11474 - return -ENODEV;
11475 - }
11476 -
11477 - pdev = of_find_device_by_node(dev_node);
11478 - if (!pdev) {
11479 - of_node_put(dev_node);
11480 - return -ENODEV;
11481 - }
11482 -
11483 - ctrldev = &pdev->dev;
11484 - priv = dev_get_drvdata(ctrldev);
11485 - of_node_put(dev_node);
11486 -
11487 - /*
11488 - * If priv is NULL, it's probably because the caam driver wasn't
11489 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
11490 - */
11491 - if (!priv)
11492 - return -ENODEV;
11493 + u32 md_inst, md_vid;
11494
11495 /*
11496 * Register crypto algorithms the device supports. First, identify
11497 * presence and attributes of MD block.
11498 */
11499 - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
11500 - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
11501 + if (priv->era < 10) {
11502 + md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
11503 + CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
11504 + md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
11505 + CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
11506 + } else {
11507 + u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
11508 +
11509 + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
11510 + md_inst = mdha & CHA_VER_NUM_MASK;
11511 + }
11512
11513 /*
11514 * Skip registration of any hashing algorithms if MD block
11515 * is not present.
11516 */
11517 - if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
11518 + if (!md_inst)
11519 return -ENODEV;
11520
11521 /* Limit digest size based on LP256 */
11522 - if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
11523 + if (md_vid == CHA_VER_VID_MD_LP256)
11524 md_limit = SHA256_DIGEST_SIZE;
11525
11526 INIT_LIST_HEAD(&hash_list);
11527 @@ -1934,10 +1879,3 @@ static int __init caam_algapi_hash_init(
11528
11529 return err;
11530 }
11531 -
11532 -module_init(caam_algapi_hash_init);
11533 -module_exit(caam_algapi_hash_exit);
11534 -
11535 -MODULE_LICENSE("GPL");
11536 -MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
11537 -MODULE_AUTHOR("Freescale Semiconductor - NMG");
11538 --- /dev/null
11539 +++ b/drivers/crypto/caam/caamhash_desc.c
11540 @@ -0,0 +1,108 @@
11541 +/*
11542 + * Shared descriptors for ahash algorithms
11543 + *
11544 + * Copyright 2017 NXP
11545 + *
11546 + * Redistribution and use in source and binary forms, with or without
11547 + * modification, are permitted provided that the following conditions are met:
11548 + * * Redistributions of source code must retain the above copyright
11549 + * notice, this list of conditions and the following disclaimer.
11550 + * * Redistributions in binary form must reproduce the above copyright
11551 + * notice, this list of conditions and the following disclaimer in the
11552 + * documentation and/or other materials provided with the distribution.
11553 + * * Neither the names of the above-listed copyright holders nor the
11554 + * names of any contributors may be used to endorse or promote products
11555 + * derived from this software without specific prior written permission.
11556 + *
11557 + *
11558 + * ALTERNATIVELY, this software may be distributed under the terms of the
11559 + * GNU General Public License ("GPL") as published by the Free Software
11560 + * Foundation, either version 2 of that License or (at your option) any
11561 + * later version.
11562 + *
11563 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
11564 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
11565 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
11566 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
11567 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
11568 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
11569 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
11570 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
11571 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
11572 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
11573 + * POSSIBILITY OF SUCH DAMAGE.
11574 + */
11575 +
11576 +#include "compat.h"
11577 +#include "desc_constr.h"
11578 +#include "caamhash_desc.h"
11579 +
11580 +/**
11581 + * cnstr_shdsc_ahash - ahash shared descriptor
11582 + * @desc: pointer to buffer used for descriptor construction
11583 + * @adata: pointer to authentication transform definitions.
11584 + * A split key is required for SEC Era < 6; the size of the split key
11585 + * is specified in this case.
11586 + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
11587 + * SHA256, SHA384, SHA512}.
11588 + * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
11589 + * @digestsize: algorithm's digest size
11590 + * @ctx_len: size of Context Register
11591 + * @import_ctx: true if previous Context Register needs to be restored
11592 + * must be true for ahash update and final
11593 + * must be false for for ahash first and digest
11594 + * @era: SEC Era
11595 + */
11596 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
11597 + int digestsize, int ctx_len, bool import_ctx, int era)
11598 +{
11599 + u32 op = adata->algtype;
11600 +
11601 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11602 +
11603 + /* Append key if it has been set; ahash update excluded */
11604 + if (state != OP_ALG_AS_UPDATE && adata->keylen) {
11605 + u32 *skip_key_load;
11606 +
11607 + /* Skip key loading if already shared */
11608 + skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11609 + JUMP_COND_SHRD);
11610 +
11611 + if (era < 6)
11612 + append_key_as_imm(desc, adata->key_virt,
11613 + adata->keylen_pad,
11614 + adata->keylen, CLASS_2 |
11615 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
11616 + else
11617 + append_proto_dkp(desc, adata);
11618 +
11619 + set_jump_tgt_here(desc, skip_key_load);
11620 +
11621 + op |= OP_ALG_AAI_HMAC_PRECOMP;
11622 + }
11623 +
11624 + /* If needed, import context from software */
11625 + if (import_ctx)
11626 + append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
11627 + LDST_SRCDST_BYTE_CONTEXT);
11628 +
11629 + /* Class 2 operation */
11630 + append_operation(desc, op | state | OP_ALG_ENCRYPT);
11631 +
11632 + /*
11633 + * Load from buf and/or src and write to req->result or state->context
11634 + * Calculate remaining bytes to read
11635 + */
11636 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11637 + /* Read remaining bytes */
11638 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
11639 + FIFOLD_TYPE_MSG | KEY_VLF);
11640 + /* Store class2 context bytes */
11641 + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
11642 + LDST_SRCDST_BYTE_CONTEXT);
11643 +}
11644 +EXPORT_SYMBOL(cnstr_shdsc_ahash);
11645 +
11646 +MODULE_LICENSE("Dual BSD/GPL");
11647 +MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
11648 +MODULE_AUTHOR("NXP Semiconductors");
11649 --- /dev/null
11650 +++ b/drivers/crypto/caam/caamhash_desc.h
11651 @@ -0,0 +1,49 @@
11652 +/*
11653 + * Shared descriptors for ahash algorithms
11654 + *
11655 + * Copyright 2017 NXP
11656 + *
11657 + * Redistribution and use in source and binary forms, with or without
11658 + * modification, are permitted provided that the following conditions are met:
11659 + * * Redistributions of source code must retain the above copyright
11660 + * notice, this list of conditions and the following disclaimer.
11661 + * * Redistributions in binary form must reproduce the above copyright
11662 + * notice, this list of conditions and the following disclaimer in the
11663 + * documentation and/or other materials provided with the distribution.
11664 + * * Neither the names of the above-listed copyright holders nor the
11665 + * names of any contributors may be used to endorse or promote products
11666 + * derived from this software without specific prior written permission.
11667 + *
11668 + *
11669 + * ALTERNATIVELY, this software may be distributed under the terms of the
11670 + * GNU General Public License ("GPL") as published by the Free Software
11671 + * Foundation, either version 2 of that License or (at your option) any
11672 + * later version.
11673 + *
11674 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
11675 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
11676 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
11677 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
11678 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
11679 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
11680 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
11681 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
11682 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
11683 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
11684 + * POSSIBILITY OF SUCH DAMAGE.
11685 + */
11686 +
11687 +#ifndef _CAAMHASH_DESC_H_
11688 +#define _CAAMHASH_DESC_H_
11689 +
11690 +/* length of descriptors text */
11691 +#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
11692 +#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
11693 +#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
11694 +#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
11695 +#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
11696 +
11697 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
11698 + int digestsize, int ctx_len, bool import_ctx, int era);
11699 +
11700 +#endif /* _CAAMHASH_DESC_H_ */
11701 --- a/drivers/crypto/caam/caampkc.c
11702 +++ b/drivers/crypto/caam/caampkc.c
11703 @@ -2,6 +2,7 @@
11704 * caam - Freescale FSL CAAM support for Public Key Cryptography
11705 *
11706 * Copyright 2016 Freescale Semiconductor, Inc.
11707 + * Copyright 2018 NXP
11708 *
11709 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
11710 * all the desired key parameters, input and output pointers.
11711 @@ -1017,46 +1018,22 @@ static struct akcipher_alg caam_rsa = {
11712 };
11713
11714 /* Public Key Cryptography module initialization handler */
11715 -static int __init caam_pkc_init(void)
11716 +int caam_pkc_init(struct device *ctrldev)
11717 {
11718 - struct device_node *dev_node;
11719 - struct platform_device *pdev;
11720 - struct device *ctrldev;
11721 - struct caam_drv_private *priv;
11722 - u32 cha_inst, pk_inst;
11723 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
11724 + u32 pk_inst;
11725 int err;
11726
11727 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
11728 - if (!dev_node) {
11729 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
11730 - if (!dev_node)
11731 - return -ENODEV;
11732 - }
11733 -
11734 - pdev = of_find_device_by_node(dev_node);
11735 - if (!pdev) {
11736 - of_node_put(dev_node);
11737 - return -ENODEV;
11738 - }
11739 -
11740 - ctrldev = &pdev->dev;
11741 - priv = dev_get_drvdata(ctrldev);
11742 - of_node_put(dev_node);
11743 -
11744 - /*
11745 - * If priv is NULL, it's probably because the caam driver wasn't
11746 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
11747 - */
11748 - if (!priv)
11749 - return -ENODEV;
11750 -
11751 /* Determine public key hardware accelerator presence. */
11752 - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
11753 - pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
11754 + if (priv->era < 10)
11755 + pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
11756 + CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
11757 + else
11758 + pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
11759
11760 /* Do not register algorithms if PKHA is not present. */
11761 if (!pk_inst)
11762 - return -ENODEV;
11763 + return 0;
11764
11765 err = crypto_register_akcipher(&caam_rsa);
11766 if (err)
11767 @@ -1068,14 +1045,7 @@ static int __init caam_pkc_init(void)
11768 return err;
11769 }
11770
11771 -static void __exit caam_pkc_exit(void)
11772 +void caam_pkc_exit(void)
11773 {
11774 crypto_unregister_akcipher(&caam_rsa);
11775 }
11776 -
11777 -module_init(caam_pkc_init);
11778 -module_exit(caam_pkc_exit);
11779 -
11780 -MODULE_LICENSE("Dual BSD/GPL");
11781 -MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
11782 -MODULE_AUTHOR("Freescale Semiconductor");
11783 --- a/drivers/crypto/caam/caamrng.c
11784 +++ b/drivers/crypto/caam/caamrng.c
11785 @@ -2,6 +2,7 @@
11786 * caam - Freescale FSL CAAM support for hw_random
11787 *
11788 * Copyright 2011 Freescale Semiconductor, Inc.
11789 + * Copyright 2018 NXP
11790 *
11791 * Based on caamalg.c crypto API driver.
11792 *
11793 @@ -294,49 +295,29 @@ static struct hwrng caam_rng = {
11794 .read = caam_read,
11795 };
11796
11797 -static void __exit caam_rng_exit(void)
11798 +void caam_rng_exit(void)
11799 {
11800 caam_jr_free(rng_ctx->jrdev);
11801 hwrng_unregister(&caam_rng);
11802 kfree(rng_ctx);
11803 }
11804
11805 -static int __init caam_rng_init(void)
11806 +int caam_rng_init(struct device *ctrldev)
11807 {
11808 struct device *dev;
11809 - struct device_node *dev_node;
11810 - struct platform_device *pdev;
11811 - struct device *ctrldev;
11812 - struct caam_drv_private *priv;
11813 + u32 rng_inst;
11814 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
11815 int err;
11816
11817 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
11818 - if (!dev_node) {
11819 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
11820 - if (!dev_node)
11821 - return -ENODEV;
11822 - }
11823 -
11824 - pdev = of_find_device_by_node(dev_node);
11825 - if (!pdev) {
11826 - of_node_put(dev_node);
11827 - return -ENODEV;
11828 - }
11829 -
11830 - ctrldev = &pdev->dev;
11831 - priv = dev_get_drvdata(ctrldev);
11832 - of_node_put(dev_node);
11833 -
11834 - /*
11835 - * If priv is NULL, it's probably because the caam driver wasn't
11836 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
11837 - */
11838 - if (!priv)
11839 - return -ENODEV;
11840 -
11841 /* Check for an instantiated RNG before registration */
11842 - if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
11843 - return -ENODEV;
11844 + if (priv->era < 10)
11845 + rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
11846 + CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
11847 + else
11848 + rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
11849 +
11850 + if (!rng_inst)
11851 + return 0;
11852
11853 dev = caam_jr_alloc();
11854 if (IS_ERR(dev)) {
11855 @@ -364,10 +345,3 @@ free_caam_alloc:
11856 caam_jr_free(dev);
11857 return err;
11858 }
11859 -
11860 -module_init(caam_rng_init);
11861 -module_exit(caam_rng_exit);
11862 -
11863 -MODULE_LICENSE("GPL");
11864 -MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
11865 -MODULE_AUTHOR("Freescale Semiconductor - NMG");
11866 --- a/drivers/crypto/caam/compat.h
11867 +++ b/drivers/crypto/caam/compat.h
11868 @@ -17,6 +17,7 @@
11869 #include <linux/of_platform.h>
11870 #include <linux/dma-mapping.h>
11871 #include <linux/io.h>
11872 +#include <linux/iommu.h>
11873 #include <linux/spinlock.h>
11874 #include <linux/rtnetlink.h>
11875 #include <linux/in.h>
11876 @@ -34,10 +35,13 @@
11877 #include <crypto/des.h>
11878 #include <crypto/sha.h>
11879 #include <crypto/md5.h>
11880 +#include <crypto/chacha20.h>
11881 +#include <crypto/poly1305.h>
11882 #include <crypto/internal/aead.h>
11883 #include <crypto/authenc.h>
11884 #include <crypto/akcipher.h>
11885 #include <crypto/scatterwalk.h>
11886 +#include <crypto/skcipher.h>
11887 #include <crypto/internal/skcipher.h>
11888 #include <crypto/internal/hash.h>
11889 #include <crypto/internal/rsa.h>
11890 --- a/drivers/crypto/caam/ctrl.c
11891 +++ b/drivers/crypto/caam/ctrl.c
11892 @@ -2,6 +2,7 @@
11893 * Controller-level driver, kernel property detection, initialization
11894 *
11895 * Copyright 2008-2012 Freescale Semiconductor, Inc.
11896 + * Copyright 2018 NXP
11897 */
11898
11899 #include <linux/device.h>
11900 @@ -16,17 +17,15 @@
11901 #include "desc_constr.h"
11902 #include "ctrl.h"
11903
11904 -bool caam_little_end;
11905 -EXPORT_SYMBOL(caam_little_end);
11906 bool caam_dpaa2;
11907 EXPORT_SYMBOL(caam_dpaa2);
11908 -bool caam_imx;
11909 -EXPORT_SYMBOL(caam_imx);
11910
11911 #ifdef CONFIG_CAAM_QI
11912 #include "qi.h"
11913 #endif
11914
11915 +static struct platform_device *caam_dma_dev;
11916 +
11917 /*
11918 * i.MX targets tend to have clock control subsystems that can
11919 * enable/disable clocking to our device.
11920 @@ -105,7 +104,7 @@ static inline int run_descriptor_deco0(s
11921 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
11922 struct caam_deco __iomem *deco = ctrlpriv->deco;
11923 unsigned int timeout = 100000;
11924 - u32 deco_dbg_reg, flags;
11925 + u32 deco_dbg_reg, deco_state, flags;
11926 int i;
11927
11928
11929 @@ -148,13 +147,22 @@ static inline int run_descriptor_deco0(s
11930 timeout = 10000000;
11931 do {
11932 deco_dbg_reg = rd_reg32(&deco->desc_dbg);
11933 +
11934 + if (ctrlpriv->era < 10)
11935 + deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
11936 + DESC_DBG_DECO_STAT_SHIFT;
11937 + else
11938 + deco_state = (rd_reg32(&deco->dbg_exec) &
11939 + DESC_DER_DECO_STAT_MASK) >>
11940 + DESC_DER_DECO_STAT_SHIFT;
11941 +
11942 /*
11943 * If an error occured in the descriptor, then
11944 * the DECO status field will be set to 0x0D
11945 */
11946 - if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
11947 - DESC_DBG_DECO_STAT_HOST_ERR)
11948 + if (deco_state == DECO_STAT_HOST_ERR)
11949 break;
11950 +
11951 cpu_relax();
11952 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
11953
11954 @@ -316,15 +324,15 @@ static int caam_remove(struct platform_d
11955 of_platform_depopulate(ctrldev);
11956
11957 #ifdef CONFIG_CAAM_QI
11958 - if (ctrlpriv->qidev)
11959 - caam_qi_shutdown(ctrlpriv->qidev);
11960 + if (ctrlpriv->qi_init)
11961 + caam_qi_shutdown(ctrldev);
11962 #endif
11963
11964 /*
11965 * De-initialize RNG state handles initialized by this driver.
11966 - * In case of DPAA 2.x, RNG is managed by MC firmware.
11967 + * In case of SoCs with Management Complex, RNG is managed by MC f/w.
11968 */
11969 - if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
11970 + if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
11971 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
11972
11973 /* Shut down debug views */
11974 @@ -332,6 +340,9 @@ static int caam_remove(struct platform_d
11975 debugfs_remove_recursive(ctrlpriv->dfs_root);
11976 #endif
11977
11978 + if (caam_dma_dev)
11979 + platform_device_unregister(caam_dma_dev);
11980 +
11981 /* Unmap controller region */
11982 iounmap(ctrl);
11983
11984 @@ -433,6 +444,10 @@ static int caam_probe(struct platform_de
11985 {.family = "Freescale i.MX"},
11986 {},
11987 };
11988 + static struct platform_device_info caam_dma_pdev_info = {
11989 + .name = "caam-dma",
11990 + .id = PLATFORM_DEVID_NONE
11991 + };
11992 struct device *dev;
11993 struct device_node *nprop, *np;
11994 struct caam_ctrl __iomem *ctrl;
11995 @@ -442,7 +457,7 @@ static int caam_probe(struct platform_de
11996 struct caam_perfmon *perfmon;
11997 #endif
11998 u32 scfgr, comp_params;
11999 - u32 cha_vid_ls;
12000 + u8 rng_vid;
12001 int pg_size;
12002 int BLOCK_OFFSET = 0;
12003
12004 @@ -454,15 +469,54 @@ static int caam_probe(struct platform_de
12005 dev_set_drvdata(dev, ctrlpriv);
12006 nprop = pdev->dev.of_node;
12007
12008 + /* Get configuration properties from device tree */
12009 + /* First, get register page */
12010 + ctrl = of_iomap(nprop, 0);
12011 + if (!ctrl) {
12012 + dev_err(dev, "caam: of_iomap() failed\n");
12013 + return -ENOMEM;
12014 + }
12015 +
12016 + caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
12017 + (CSTA_PLEND | CSTA_ALT_PLEND));
12018 caam_imx = (bool)soc_device_match(imx_soc);
12019
12020 + comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
12021 + caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
12022 + ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
12023 +
12024 +#ifdef CONFIG_CAAM_QI
12025 + /* If (DPAA 1.x) QI present, check whether dependencies are available */
12026 + if (ctrlpriv->qi_present && !caam_dpaa2) {
12027 + ret = qman_is_probed();
12028 + if (!ret) {
12029 + ret = -EPROBE_DEFER;
12030 + goto iounmap_ctrl;
12031 + } else if (ret < 0) {
12032 + dev_err(dev, "failing probe due to qman probe error\n");
12033 + ret = -ENODEV;
12034 + goto iounmap_ctrl;
12035 + }
12036 +
12037 + ret = qman_portals_probed();
12038 + if (!ret) {
12039 + ret = -EPROBE_DEFER;
12040 + goto iounmap_ctrl;
12041 + } else if (ret < 0) {
12042 + dev_err(dev, "failing probe due to qman portals probe error\n");
12043 + ret = -ENODEV;
12044 + goto iounmap_ctrl;
12045 + }
12046 + }
12047 +#endif
12048 +
12049 /* Enable clocking */
12050 clk = caam_drv_identify_clk(&pdev->dev, "ipg");
12051 if (IS_ERR(clk)) {
12052 ret = PTR_ERR(clk);
12053 dev_err(&pdev->dev,
12054 "can't identify CAAM ipg clk: %d\n", ret);
12055 - return ret;
12056 + goto iounmap_ctrl;
12057 }
12058 ctrlpriv->caam_ipg = clk;
12059
12060 @@ -471,7 +525,7 @@ static int caam_probe(struct platform_de
12061 ret = PTR_ERR(clk);
12062 dev_err(&pdev->dev,
12063 "can't identify CAAM mem clk: %d\n", ret);
12064 - return ret;
12065 + goto iounmap_ctrl;
12066 }
12067 ctrlpriv->caam_mem = clk;
12068
12069 @@ -480,7 +534,7 @@ static int caam_probe(struct platform_de
12070 ret = PTR_ERR(clk);
12071 dev_err(&pdev->dev,
12072 "can't identify CAAM aclk clk: %d\n", ret);
12073 - return ret;
12074 + goto iounmap_ctrl;
12075 }
12076 ctrlpriv->caam_aclk = clk;
12077
12078 @@ -490,7 +544,7 @@ static int caam_probe(struct platform_de
12079 ret = PTR_ERR(clk);
12080 dev_err(&pdev->dev,
12081 "can't identify CAAM emi_slow clk: %d\n", ret);
12082 - return ret;
12083 + goto iounmap_ctrl;
12084 }
12085 ctrlpriv->caam_emi_slow = clk;
12086 }
12087 @@ -498,7 +552,7 @@ static int caam_probe(struct platform_de
12088 ret = clk_prepare_enable(ctrlpriv->caam_ipg);
12089 if (ret < 0) {
12090 dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
12091 - return ret;
12092 + goto iounmap_ctrl;
12093 }
12094
12095 ret = clk_prepare_enable(ctrlpriv->caam_mem);
12096 @@ -523,25 +577,10 @@ static int caam_probe(struct platform_de
12097 }
12098 }
12099
12100 - /* Get configuration properties from device tree */
12101 - /* First, get register page */
12102 - ctrl = of_iomap(nprop, 0);
12103 - if (ctrl == NULL) {
12104 - dev_err(dev, "caam: of_iomap() failed\n");
12105 - ret = -ENOMEM;
12106 - goto disable_caam_emi_slow;
12107 - }
12108 -
12109 - caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
12110 - (CSTA_PLEND | CSTA_ALT_PLEND));
12111 -
12112 - /* Finding the page size for using the CTPR_MS register */
12113 - comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
12114 - pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
12115 -
12116 /* Allocating the BLOCK_OFFSET based on the supported page size on
12117 * the platform
12118 */
12119 + pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
12120 if (pg_size == 0)
12121 BLOCK_OFFSET = PG_SIZE_4K;
12122 else
12123 @@ -563,11 +602,14 @@ static int caam_probe(struct platform_de
12124 /*
12125 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
12126 * long pointers in master configuration register.
12127 - * In case of DPAA 2.x, Management Complex firmware performs
12128 + * In case of SoCs with Management Complex, MC f/w performs
12129 * the configuration.
12130 */
12131 - caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
12132 - if (!caam_dpaa2)
12133 + np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
12134 + ctrlpriv->mc_en = !!np;
12135 + of_node_put(np);
12136 +
12137 + if (!ctrlpriv->mc_en)
12138 clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
12139 MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
12140 MCFGR_WDENABLE | MCFGR_LARGE_BURST |
12141 @@ -612,14 +654,11 @@ static int caam_probe(struct platform_de
12142 }
12143 if (ret) {
12144 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
12145 - goto iounmap_ctrl;
12146 + goto disable_caam_emi_slow;
12147 }
12148
12149 - ret = of_platform_populate(nprop, caam_match, NULL, dev);
12150 - if (ret) {
12151 - dev_err(dev, "JR platform devices creation error\n");
12152 - goto iounmap_ctrl;
12153 - }
12154 + ctrlpriv->era = caam_get_era();
12155 + ctrlpriv->domain = iommu_get_domain_for_dev(dev);
12156
12157 #ifdef CONFIG_DEBUG_FS
12158 /*
12159 @@ -633,21 +672,7 @@ static int caam_probe(struct platform_de
12160 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
12161 #endif
12162
12163 - ring = 0;
12164 - for_each_available_child_of_node(nprop, np)
12165 - if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
12166 - of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
12167 - ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
12168 - ((__force uint8_t *)ctrl +
12169 - (ring + JR_BLOCK_NUMBER) *
12170 - BLOCK_OFFSET
12171 - );
12172 - ctrlpriv->total_jobrs++;
12173 - ring++;
12174 - }
12175 -
12176 /* Check to see if (DPAA 1.x) QI present. If so, enable */
12177 - ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
12178 if (ctrlpriv->qi_present && !caam_dpaa2) {
12179 ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
12180 ((__force uint8_t *)ctrl +
12181 @@ -664,6 +689,25 @@ static int caam_probe(struct platform_de
12182 #endif
12183 }
12184
12185 + ret = of_platform_populate(nprop, caam_match, NULL, dev);
12186 + if (ret) {
12187 + dev_err(dev, "JR platform devices creation error\n");
12188 + goto shutdown_qi;
12189 + }
12190 +
12191 + ring = 0;
12192 + for_each_available_child_of_node(nprop, np)
12193 + if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
12194 + of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
12195 + ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
12196 + ((__force uint8_t *)ctrl +
12197 + (ring + JR_BLOCK_NUMBER) *
12198 + BLOCK_OFFSET
12199 + );
12200 + ctrlpriv->total_jobrs++;
12201 + ring++;
12202 + }
12203 +
12204 /* If no QI and no rings specified, quit and go home */
12205 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
12206 dev_err(dev, "no queues configured, terminating\n");
12207 @@ -671,15 +715,29 @@ static int caam_probe(struct platform_de
12208 goto caam_remove;
12209 }
12210
12211 - cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
12212 + caam_dma_pdev_info.parent = dev;
12213 + caam_dma_pdev_info.dma_mask = dma_get_mask(dev);
12214 + caam_dma_dev = platform_device_register_full(&caam_dma_pdev_info);
12215 + if (IS_ERR(caam_dma_dev)) {
12216 + dev_err(dev, "Unable to create and register caam-dma dev\n");
12217 + caam_dma_dev = 0;
12218 + } else {
12219 + set_dma_ops(&caam_dma_dev->dev, get_dma_ops(dev));
12220 + }
12221 +
12222 + if (ctrlpriv->era < 10)
12223 + rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
12224 + CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
12225 + else
12226 + rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
12227 + CHA_VER_VID_SHIFT;
12228
12229 /*
12230 * If SEC has RNG version >= 4 and RNG state handle has not been
12231 * already instantiated, do RNG instantiation
12232 - * In case of DPAA 2.x, RNG is managed by MC firmware.
12233 + * In case of SoCs with Management Complex, RNG is managed by MC f/w.
12234 */
12235 - if (!caam_dpaa2 &&
12236 - (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
12237 + if (!ctrlpriv->mc_en && rng_vid >= 4) {
12238 ctrlpriv->rng4_sh_init =
12239 rd_reg32(&ctrl->r4tst[0].rdsta);
12240 /*
12241 @@ -746,10 +804,9 @@ static int caam_probe(struct platform_de
12242
12243 /* Report "alive" for developer to see */
12244 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
12245 - caam_get_era());
12246 - dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
12247 - ctrlpriv->total_jobrs, ctrlpriv->qi_present,
12248 - caam_dpaa2 ? "yes" : "no");
12249 + ctrlpriv->era);
12250 + dev_info(dev, "job rings = %d, qi = %d\n",
12251 + ctrlpriv->total_jobrs, ctrlpriv->qi_present);
12252
12253 #ifdef CONFIG_DEBUG_FS
12254 debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
12255 @@ -816,8 +873,11 @@ caam_remove:
12256 caam_remove(pdev);
12257 return ret;
12258
12259 -iounmap_ctrl:
12260 - iounmap(ctrl);
12261 +shutdown_qi:
12262 +#ifdef CONFIG_CAAM_QI
12263 + if (ctrlpriv->qi_init)
12264 + caam_qi_shutdown(dev);
12265 +#endif
12266 disable_caam_emi_slow:
12267 if (ctrlpriv->caam_emi_slow)
12268 clk_disable_unprepare(ctrlpriv->caam_emi_slow);
12269 @@ -827,6 +887,8 @@ disable_caam_mem:
12270 clk_disable_unprepare(ctrlpriv->caam_mem);
12271 disable_caam_ipg:
12272 clk_disable_unprepare(ctrlpriv->caam_ipg);
12273 +iounmap_ctrl:
12274 + iounmap(ctrl);
12275 return ret;
12276 }
12277
12278 --- a/drivers/crypto/caam/desc.h
12279 +++ b/drivers/crypto/caam/desc.h
12280 @@ -4,6 +4,7 @@
12281 * Definitions to support CAAM descriptor instruction generation
12282 *
12283 * Copyright 2008-2011 Freescale Semiconductor, Inc.
12284 + * Copyright 2018 NXP
12285 */
12286
12287 #ifndef DESC_H
12288 @@ -42,6 +43,7 @@
12289 #define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
12290 #define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
12291 #define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
12292 +#define CMD_MOVEB (0x07 << CMD_SHIFT)
12293 #define CMD_STORE (0x0a << CMD_SHIFT)
12294 #define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
12295 #define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
12296 @@ -242,6 +244,7 @@
12297 #define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
12298 #define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
12299 #define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
12300 +#define LDST_SRCDST_WORD_INFO_FIFO_SM (0x71 << LDST_SRCDST_SHIFT)
12301 #define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
12302
12303 /* Offset in source/destination */
12304 @@ -284,6 +287,12 @@
12305 #define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
12306 #define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
12307
12308 +/* Special Length definitions when dst=sm, nfifo-{sm,m} */
12309 +#define LDLEN_MATH0 0
12310 +#define LDLEN_MATH1 1
12311 +#define LDLEN_MATH2 2
12312 +#define LDLEN_MATH3 3
12313 +
12314 /*
12315 * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
12316 * Command Constructs
12317 @@ -355,6 +364,7 @@
12318 #define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
12319 #define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
12320 #define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
12321 +#define FIFOLD_TYPE_IFIFO (0x0f << FIFOLD_TYPE_SHIFT)
12322
12323 /* Other types. Need to OR in last/flush bits as desired */
12324 #define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
12325 @@ -408,6 +418,7 @@
12326 #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
12327 #define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
12328 #define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
12329 +#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
12330 #define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
12331
12332 /*
12333 @@ -444,6 +455,18 @@
12334 #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
12335 #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
12336 #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
12337 +#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
12338 +#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
12339 +#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
12340 +#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
12341 +#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
12342 +#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
12343 +#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
12344 +#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
12345 +#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
12346 +#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
12347 +#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
12348 +#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
12349
12350 /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
12351 #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
12352 @@ -1093,6 +1116,22 @@
12353 /* MacSec protinfos */
12354 #define OP_PCL_MACSEC 0x0001
12355
12356 +/* Derived Key Protocol (DKP) Protinfo */
12357 +#define OP_PCL_DKP_SRC_SHIFT 14
12358 +#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
12359 +#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
12360 +#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
12361 +#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
12362 +#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
12363 +#define OP_PCL_DKP_DST_SHIFT 12
12364 +#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
12365 +#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
12366 +#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
12367 +#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
12368 +#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
12369 +#define OP_PCL_DKP_KEY_SHIFT 0
12370 +#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
12371 +
12372 /* PKI unidirectional protocol protinfo bits */
12373 #define OP_PCL_PKPROT_TEST 0x0008
12374 #define OP_PCL_PKPROT_DECRYPT 0x0004
12375 @@ -1105,6 +1144,12 @@
12376 #define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
12377 #define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
12378
12379 +/* version register fields */
12380 +#define OP_VER_CCHA_NUM 0x000000ff /* Number CCHAs instantiated */
12381 +#define OP_VER_CCHA_MISC 0x0000ff00 /* CCHA Miscellaneous Information */
12382 +#define OP_VER_CCHA_REV 0x00ff0000 /* CCHA Revision Number */
12383 +#define OP_VER_CCHA_VID 0xff000000 /* CCHA Version ID */
12384 +
12385 #define OP_ALG_ALGSEL_SHIFT 16
12386 #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
12387 #define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
12388 @@ -1124,6 +1169,8 @@
12389 #define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
12390 #define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
12391 #define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
12392 +#define OP_ALG_ALGSEL_CHACHA20 (0xD0 << OP_ALG_ALGSEL_SHIFT)
12393 +#define OP_ALG_ALGSEL_POLY1305 (0xE0 << OP_ALG_ALGSEL_SHIFT)
12394
12395 #define OP_ALG_AAI_SHIFT 4
12396 #define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT)
12397 @@ -1171,6 +1218,11 @@
12398 #define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
12399 #define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
12400
12401 +/* Chacha20 AAI set */
12402 +#define OP_ALG_AAI_AEAD (0x002 << OP_ALG_AAI_SHIFT)
12403 +#define OP_ALG_AAI_KEYSTREAM (0x001 << OP_ALG_AAI_SHIFT)
12404 +#define OP_ALG_AAI_BC8 (0x008 << OP_ALG_AAI_SHIFT)
12405 +
12406 /* hmac/smac AAI set */
12407 #define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
12408 #define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
12409 @@ -1359,6 +1411,7 @@
12410 #define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
12411 #define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
12412 #define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
12413 +#define MOVE_SRC_AUX_ABLK (0x0a << MOVE_SRC_SHIFT)
12414
12415 #define MOVE_DEST_SHIFT 16
12416 #define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
12417 @@ -1385,6 +1438,10 @@
12418
12419 #define MOVELEN_MRSEL_SHIFT 0
12420 #define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
12421 +#define MOVELEN_MRSEL_MATH0 (0 << MOVELEN_MRSEL_SHIFT)
12422 +#define MOVELEN_MRSEL_MATH1 (1 << MOVELEN_MRSEL_SHIFT)
12423 +#define MOVELEN_MRSEL_MATH2 (2 << MOVELEN_MRSEL_SHIFT)
12424 +#define MOVELEN_MRSEL_MATH3 (3 << MOVELEN_MRSEL_SHIFT)
12425
12426 /*
12427 * MATH Command Constructs
12428 @@ -1440,10 +1497,11 @@
12429 #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
12430 #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
12431 #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
12432 -#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
12433 +#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
12434 #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
12435 #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
12436 #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
12437 +#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
12438
12439 /* Destination selectors */
12440 #define MATH_DEST_SHIFT 8
12441 @@ -1452,6 +1510,7 @@
12442 #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
12443 #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
12444 #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
12445 +#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
12446 #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
12447 #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
12448 #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
12449 @@ -1560,6 +1619,7 @@
12450 #define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
12451 #define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
12452 #define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
12453 +#define NFIFOENTRY_DTYPE_POLY (0xB << NFIFOENTRY_DTYPE_SHIFT)
12454 #define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
12455 #define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
12456
12457 @@ -1624,4 +1684,31 @@
12458 /* Frame Descriptor Command for Replacement Job Descriptor */
12459 #define FD_CMD_REPLACE_JOB_DESC 0x20000000
12460
12461 +/* CHA Control Register bits */
12462 +#define CCTRL_RESET_CHA_ALL 0x1
12463 +#define CCTRL_RESET_CHA_AESA 0x2
12464 +#define CCTRL_RESET_CHA_DESA 0x4
12465 +#define CCTRL_RESET_CHA_AFHA 0x8
12466 +#define CCTRL_RESET_CHA_KFHA 0x10
12467 +#define CCTRL_RESET_CHA_SF8A 0x20
12468 +#define CCTRL_RESET_CHA_PKHA 0x40
12469 +#define CCTRL_RESET_CHA_MDHA 0x80
12470 +#define CCTRL_RESET_CHA_CRCA 0x100
12471 +#define CCTRL_RESET_CHA_RNG 0x200
12472 +#define CCTRL_RESET_CHA_SF9A 0x400
12473 +#define CCTRL_RESET_CHA_ZUCE 0x800
12474 +#define CCTRL_RESET_CHA_ZUCA 0x1000
12475 +#define CCTRL_UNLOAD_PK_A0 0x10000
12476 +#define CCTRL_UNLOAD_PK_A1 0x20000
12477 +#define CCTRL_UNLOAD_PK_A2 0x40000
12478 +#define CCTRL_UNLOAD_PK_A3 0x80000
12479 +#define CCTRL_UNLOAD_PK_B0 0x100000
12480 +#define CCTRL_UNLOAD_PK_B1 0x200000
12481 +#define CCTRL_UNLOAD_PK_B2 0x400000
12482 +#define CCTRL_UNLOAD_PK_B3 0x800000
12483 +#define CCTRL_UNLOAD_PK_N 0x1000000
12484 +#define CCTRL_UNLOAD_PK_A 0x4000000
12485 +#define CCTRL_UNLOAD_PK_B 0x8000000
12486 +#define CCTRL_UNLOAD_SBOX 0x10000000
12487 +
12488 #endif /* DESC_H */
12489 --- a/drivers/crypto/caam/desc_constr.h
12490 +++ b/drivers/crypto/caam/desc_constr.h
12491 @@ -109,7 +109,7 @@ static inline void init_job_desc_shared(
12492 append_ptr(desc, ptr);
12493 }
12494
12495 -static inline void append_data(u32 * const desc, void *data, int len)
12496 +static inline void append_data(u32 * const desc, const void *data, int len)
12497 {
12498 u32 *offset = desc_end(desc);
12499
12500 @@ -172,7 +172,7 @@ static inline void append_cmd_ptr_extlen
12501 append_cmd(desc, len);
12502 }
12503
12504 -static inline void append_cmd_data(u32 * const desc, void *data, int len,
12505 +static inline void append_cmd_data(u32 * const desc, const void *data, int len,
12506 u32 command)
12507 {
12508 append_cmd(desc, command | IMMEDIATE | len);
12509 @@ -189,6 +189,8 @@ static inline u32 *append_##cmd(u32 * co
12510 }
12511 APPEND_CMD_RET(jump, JUMP)
12512 APPEND_CMD_RET(move, MOVE)
12513 +APPEND_CMD_RET(moveb, MOVEB)
12514 +APPEND_CMD_RET(move_len, MOVE_LEN)
12515
12516 static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
12517 {
12518 @@ -271,7 +273,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
12519 APPEND_SEQ_PTR_INTLEN(out, OUT)
12520
12521 #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
12522 -static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
12523 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
12524 unsigned int len, u32 options) \
12525 { \
12526 PRINT_POS; \
12527 @@ -312,7 +314,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
12528 * from length of immediate data provided, e.g., split keys
12529 */
12530 #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
12531 -static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
12532 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
12533 unsigned int data_len, \
12534 unsigned int len, u32 options) \
12535 { \
12536 @@ -327,7 +329,11 @@ static inline void append_##cmd##_imm_##
12537 u32 options) \
12538 { \
12539 PRINT_POS; \
12540 - append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
12541 + if (options & LDST_LEN_MASK) \
12542 + append_cmd(desc, CMD_##op | IMMEDIATE | options); \
12543 + else \
12544 + append_cmd(desc, CMD_##op | IMMEDIATE | options | \
12545 + sizeof(type)); \
12546 append_cmd(desc, immediate); \
12547 }
12548 APPEND_CMD_RAW_IMM(load, LOAD, u32);
12549 @@ -452,7 +458,7 @@ struct alginfo {
12550 unsigned int keylen_pad;
12551 union {
12552 dma_addr_t key_dma;
12553 - void *key_virt;
12554 + const void *key_virt;
12555 };
12556 bool key_inline;
12557 };
12558 @@ -496,4 +502,45 @@ static inline int desc_inline_query(unsi
12559 return (rem_bytes >= 0) ? 0 : -1;
12560 }
12561
12562 +/**
12563 + * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
12564 + * @desc: pointer to buffer used for descriptor construction
12565 + * @adata: pointer to authentication transform definitions.
12566 + * keylen should be the length of initial key, while keylen_pad
12567 + * the length of the derived (split) key.
12568 + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
12569 + * SHA256, SHA384, SHA512}.
12570 + */
12571 +static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
12572 +{
12573 + u32 protid;
12574 +
12575 + /*
12576 + * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
12577 + * to OP_PCLID_DKP_{MD5, SHA*}
12578 + */
12579 + protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
12580 + (0x20 << OP_ALG_ALGSEL_SHIFT);
12581 +
12582 + if (adata->key_inline) {
12583 + int words;
12584 +
12585 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
12586 + OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
12587 + adata->keylen);
12588 + append_data(desc, adata->key_virt, adata->keylen);
12589 +
12590 + /* Reserve space in descriptor buffer for the derived key */
12591 + words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
12592 + ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
12593 + if (words)
12594 + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
12595 + } else {
12596 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
12597 + OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
12598 + adata->keylen);
12599 + append_ptr(desc, adata->key_dma);
12600 + }
12601 +}
12602 +
12603 #endif /* DESC_CONSTR_H */
12604 --- /dev/null
12605 +++ b/drivers/crypto/caam/dpseci.c
12606 @@ -0,0 +1,865 @@
12607 +/*
12608 + * Copyright 2013-2016 Freescale Semiconductor Inc.
12609 + * Copyright 2017 NXP
12610 + *
12611 + * Redistribution and use in source and binary forms, with or without
12612 + * modification, are permitted provided that the following conditions are met:
12613 + * * Redistributions of source code must retain the above copyright
12614 + * notice, this list of conditions and the following disclaimer.
12615 + * * Redistributions in binary form must reproduce the above copyright
12616 + * notice, this list of conditions and the following disclaimer in the
12617 + * documentation and/or other materials provided with the distribution.
12618 + * * Neither the names of the above-listed copyright holders nor the
12619 + * names of any contributors may be used to endorse or promote products
12620 + * derived from this software without specific prior written permission.
12621 + *
12622 + *
12623 + * ALTERNATIVELY, this software may be distributed under the terms of the
12624 + * GNU General Public License ("GPL") as published by the Free Software
12625 + * Foundation, either version 2 of that License or (at your option) any
12626 + * later version.
12627 + *
12628 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
12629 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
12630 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
12631 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
12632 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
12633 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
12634 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
12635 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
12636 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
12637 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
12638 + * POSSIBILITY OF SUCH DAMAGE.
12639 + */
12640 +
12641 +#include <linux/fsl/mc.h>
12642 +#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
12643 +#include "dpseci.h"
12644 +#include "dpseci_cmd.h"
12645 +
12646 +/**
12647 + * dpseci_open() - Open a control session for the specified object
12648 + * @mc_io: Pointer to MC portal's I/O object
12649 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12650 + * @dpseci_id: DPSECI unique ID
12651 + * @token: Returned token; use in subsequent API calls
12652 + *
12653 + * This function can be used to open a control session for an already created
12654 + * object; an object may have been declared in the DPL or by calling the
12655 + * dpseci_create() function.
12656 + * This function returns a unique authentication token, associated with the
12657 + * specific object ID and the specific MC portal; this token must be used in all
12658 + * subsequent commands for this specific object.
12659 + *
12660 + * Return: '0' on success, error code otherwise
12661 + */
12662 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
12663 + u16 *token)
12664 +{
12665 + struct fsl_mc_command cmd = { 0 };
12666 + struct dpseci_cmd_open *cmd_params;
12667 + int err;
12668 +
12669 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
12670 + cmd_flags,
12671 + 0);
12672 + cmd_params = (struct dpseci_cmd_open *)cmd.params;
12673 + cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
12674 + err = mc_send_command(mc_io, &cmd);
12675 + if (err)
12676 + return err;
12677 +
12678 + *token = mc_cmd_hdr_read_token(&cmd);
12679 +
12680 + return 0;
12681 +}
12682 +
12683 +/**
12684 + * dpseci_close() - Close the control session of the object
12685 + * @mc_io: Pointer to MC portal's I/O object
12686 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12687 + * @token: Token of DPSECI object
12688 + *
12689 + * After this function is called, no further operations are allowed on the
12690 + * object without opening a new control session.
12691 + *
12692 + * Return: '0' on success, error code otherwise
12693 + */
12694 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
12695 +{
12696 + struct fsl_mc_command cmd = { 0 };
12697 +
12698 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
12699 + cmd_flags,
12700 + token);
12701 + return mc_send_command(mc_io, &cmd);
12702 +}
12703 +
12704 +/**
12705 + * dpseci_create() - Create the DPSECI object
12706 + * @mc_io: Pointer to MC portal's I/O object
12707 + * @dprc_token: Parent container token; '0' for default container
12708 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12709 + * @cfg: Configuration structure
12710 + * @obj_id: returned object id
12711 + *
12712 + * Create the DPSECI object, allocate required resources and perform required
12713 + * initialization.
12714 + *
12715 + * The object can be created either by declaring it in the DPL file, or by
12716 + * calling this function.
12717 + *
12718 + * The function accepts an authentication token of a parent container that this
12719 + * object should be assigned to. The token can be '0' so the object will be
12720 + * assigned to the default container.
12721 + * The newly created object can be opened with the returned object id and using
12722 + * the container's associated tokens and MC portals.
12723 + *
12724 + * Return: '0' on success, error code otherwise
12725 + */
12726 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
12727 + const struct dpseci_cfg *cfg, u32 *obj_id)
12728 +{
12729 + struct fsl_mc_command cmd = { 0 };
12730 + struct dpseci_cmd_create *cmd_params;
12731 + int i, err;
12732 +
12733 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
12734 + cmd_flags,
12735 + dprc_token);
12736 + cmd_params = (struct dpseci_cmd_create *)cmd.params;
12737 + for (i = 0; i < 8; i++)
12738 + cmd_params->priorities[i] = cfg->priorities[i];
12739 + for (i = 0; i < 8; i++)
12740 + cmd_params->priorities2[i] = cfg->priorities[8 + i];
12741 + cmd_params->num_tx_queues = cfg->num_tx_queues;
12742 + cmd_params->num_rx_queues = cfg->num_rx_queues;
12743 + cmd_params->options = cpu_to_le32(cfg->options);
12744 + err = mc_send_command(mc_io, &cmd);
12745 + if (err)
12746 + return err;
12747 +
12748 + *obj_id = mc_cmd_read_object_id(&cmd);
12749 +
12750 + return 0;
12751 +}
12752 +
12753 +/**
12754 + * dpseci_destroy() - Destroy the DPSECI object and release all its resources
12755 + * @mc_io: Pointer to MC portal's I/O object
12756 + * @dprc_token: Parent container token; '0' for default container
12757 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12758 + * @object_id: The object id; it must be a valid id within the container that
12759 + * created this object
12760 + *
12761 + * The function accepts the authentication token of the parent container that
12762 + * created the object (not the one that currently owns the object). The object
12763 + * is searched within parent using the provided 'object_id'.
12764 + * All tokens to the object must be closed before calling destroy.
12765 + *
12766 + * Return: '0' on success, error code otherwise
12767 + */
12768 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
12769 + u32 object_id)
12770 +{
12771 + struct fsl_mc_command cmd = { 0 };
12772 + struct dpseci_cmd_destroy *cmd_params;
12773 +
12774 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
12775 + cmd_flags,
12776 + dprc_token);
12777 + cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
12778 + cmd_params->object_id = cpu_to_le32(object_id);
12779 +
12780 + return mc_send_command(mc_io, &cmd);
12781 +}
12782 +
12783 +/**
12784 + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
12785 + * @mc_io: Pointer to MC portal's I/O object
12786 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12787 + * @token: Token of DPSECI object
12788 + *
12789 + * Return: '0' on success, error code otherwise
12790 + */
12791 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
12792 +{
12793 + struct fsl_mc_command cmd = { 0 };
12794 +
12795 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
12796 + cmd_flags,
12797 + token);
12798 + return mc_send_command(mc_io, &cmd);
12799 +}
12800 +
12801 +/**
12802 + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
12803 + * @mc_io: Pointer to MC portal's I/O object
12804 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12805 + * @token: Token of DPSECI object
12806 + *
12807 + * Return: '0' on success, error code otherwise
12808 + */
12809 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
12810 +{
12811 + struct fsl_mc_command cmd = { 0 };
12812 +
12813 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
12814 + cmd_flags,
12815 + token);
12816 +
12817 + return mc_send_command(mc_io, &cmd);
12818 +}
12819 +
12820 +/**
12821 + * dpseci_is_enabled() - Check if the DPSECI is enabled.
12822 + * @mc_io: Pointer to MC portal's I/O object
12823 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12824 + * @token: Token of DPSECI object
12825 + * @en: Returns '1' if object is enabled; '0' otherwise
12826 + *
12827 + * Return: '0' on success, error code otherwise
12828 + */
12829 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12830 + int *en)
12831 +{
12832 + struct fsl_mc_command cmd = { 0 };
12833 + struct dpseci_rsp_is_enabled *rsp_params;
12834 + int err;
12835 +
12836 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
12837 + cmd_flags,
12838 + token);
12839 + err = mc_send_command(mc_io, &cmd);
12840 + if (err)
12841 + return err;
12842 +
12843 + rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
12844 + *en = dpseci_get_field(rsp_params->is_enabled, ENABLE);
12845 +
12846 + return 0;
12847 +}
12848 +
12849 +/**
12850 + * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
12851 + * @mc_io: Pointer to MC portal's I/O object
12852 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12853 + * @token: Token of DPSECI object
12854 + *
12855 + * Return: '0' on success, error code otherwise
12856 + */
12857 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
12858 +{
12859 + struct fsl_mc_command cmd = { 0 };
12860 +
12861 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
12862 + cmd_flags,
12863 + token);
12864 +
12865 + return mc_send_command(mc_io, &cmd);
12866 +}
12867 +
12868 +/**
12869 + * dpseci_get_irq_enable() - Get overall interrupt state
12870 + * @mc_io: Pointer to MC portal's I/O object
12871 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12872 + * @token: Token of DPSECI object
12873 + * @irq_index: The interrupt index to configure
12874 + * @en: Returned Interrupt state - enable = 1, disable = 0
12875 + *
12876 + * Return: '0' on success, error code otherwise
12877 + */
12878 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12879 + u8 irq_index, u8 *en)
12880 +{
12881 + struct fsl_mc_command cmd = { 0 };
12882 + struct dpseci_cmd_irq_enable *cmd_params;
12883 + struct dpseci_rsp_get_irq_enable *rsp_params;
12884 + int err;
12885 +
12886 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
12887 + cmd_flags,
12888 + token);
12889 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
12890 + cmd_params->irq_index = irq_index;
12891 + err = mc_send_command(mc_io, &cmd);
12892 + if (err)
12893 + return err;
12894 +
12895 + rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
12896 + *en = rsp_params->enable_state;
12897 +
12898 + return 0;
12899 +}
12900 +
12901 +/**
12902 + * dpseci_set_irq_enable() - Set overall interrupt state.
12903 + * @mc_io: Pointer to MC portal's I/O object
12904 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12905 + * @token: Token of DPSECI object
12906 + * @irq_index: The interrupt index to configure
12907 + * @en: Interrupt state - enable = 1, disable = 0
12908 + *
12909 + * Allows GPP software to control when interrupts are generated.
12910 + * Each interrupt can have up to 32 causes. The enable/disable control's the
12911 + * overall interrupt state. If the interrupt is disabled no causes will cause
12912 + * an interrupt.
12913 + *
12914 + * Return: '0' on success, error code otherwise
12915 + */
12916 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12917 + u8 irq_index, u8 en)
12918 +{
12919 + struct fsl_mc_command cmd = { 0 };
12920 + struct dpseci_cmd_irq_enable *cmd_params;
12921 +
12922 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
12923 + cmd_flags,
12924 + token);
12925 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
12926 + cmd_params->irq_index = irq_index;
12927 + cmd_params->enable_state = en;
12928 +
12929 + return mc_send_command(mc_io, &cmd);
12930 +}
12931 +
12932 +/**
12933 + * dpseci_get_irq_mask() - Get interrupt mask.
12934 + * @mc_io: Pointer to MC portal's I/O object
12935 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12936 + * @token: Token of DPSECI object
12937 + * @irq_index: The interrupt index to configure
12938 + * @mask: Returned event mask to trigger interrupt
12939 + *
12940 + * Every interrupt can have up to 32 causes and the interrupt model supports
12941 + * masking/unmasking each cause independently.
12942 + *
12943 + * Return: '0' on success, error code otherwise
12944 + */
12945 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12946 + u8 irq_index, u32 *mask)
12947 +{
12948 + struct fsl_mc_command cmd = { 0 };
12949 + struct dpseci_cmd_irq_mask *cmd_params;
12950 + int err;
12951 +
12952 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
12953 + cmd_flags,
12954 + token);
12955 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
12956 + cmd_params->irq_index = irq_index;
12957 + err = mc_send_command(mc_io, &cmd);
12958 + if (err)
12959 + return err;
12960 +
12961 + *mask = le32_to_cpu(cmd_params->mask);
12962 +
12963 + return 0;
12964 +}
12965 +
12966 +/**
12967 + * dpseci_set_irq_mask() - Set interrupt mask.
12968 + * @mc_io: Pointer to MC portal's I/O object
12969 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12970 + * @token: Token of DPSECI object
12971 + * @irq_index: The interrupt index to configure
12972 + * @mask: event mask to trigger interrupt;
12973 + * each bit:
12974 + * 0 = ignore event
12975 + * 1 = consider event for asserting IRQ
12976 + *
12977 + * Every interrupt can have up to 32 causes and the interrupt model supports
12978 + * masking/unmasking each cause independently
12979 + *
12980 + * Return: '0' on success, error code otherwise
12981 + */
12982 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12983 + u8 irq_index, u32 mask)
12984 +{
12985 + struct fsl_mc_command cmd = { 0 };
12986 + struct dpseci_cmd_irq_mask *cmd_params;
12987 +
12988 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
12989 + cmd_flags,
12990 + token);
12991 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
12992 + cmd_params->mask = cpu_to_le32(mask);
12993 + cmd_params->irq_index = irq_index;
12994 +
12995 + return mc_send_command(mc_io, &cmd);
12996 +}
12997 +
12998 +/**
12999 + * dpseci_get_irq_status() - Get the current status of any pending interrupts
13000 + * @mc_io: Pointer to MC portal's I/O object
13001 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13002 + * @token: Token of DPSECI object
13003 + * @irq_index: The interrupt index to configure
13004 + * @status: Returned interrupts status - one bit per cause:
13005 + * 0 = no interrupt pending
13006 + * 1 = interrupt pending
13007 + *
13008 + * Return: '0' on success, error code otherwise
13009 + */
13010 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13011 + u8 irq_index, u32 *status)
13012 +{
13013 + struct fsl_mc_command cmd = { 0 };
13014 + struct dpseci_cmd_irq_status *cmd_params;
13015 + int err;
13016 +
13017 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
13018 + cmd_flags,
13019 + token);
13020 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
13021 + cmd_params->status = cpu_to_le32(*status);
13022 + cmd_params->irq_index = irq_index;
13023 + err = mc_send_command(mc_io, &cmd);
13024 + if (err)
13025 + return err;
13026 +
13027 + *status = le32_to_cpu(cmd_params->status);
13028 +
13029 + return 0;
13030 +}
13031 +
13032 +/**
13033 + * dpseci_clear_irq_status() - Clear a pending interrupt's status
13034 + * @mc_io: Pointer to MC portal's I/O object
13035 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13036 + * @token: Token of DPSECI object
13037 + * @irq_index: The interrupt index to configure
13038 + * @status: bits to clear (W1C) - one bit per cause:
13039 + * 0 = don't change
13040 + * 1 = clear status bit
13041 + *
13042 + * Return: '0' on success, error code otherwise
13043 + */
13044 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13045 + u8 irq_index, u32 status)
13046 +{
13047 + struct fsl_mc_command cmd = { 0 };
13048 + struct dpseci_cmd_irq_status *cmd_params;
13049 +
13050 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
13051 + cmd_flags,
13052 + token);
13053 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
13054 + cmd_params->status = cpu_to_le32(status);
13055 + cmd_params->irq_index = irq_index;
13056 +
13057 + return mc_send_command(mc_io, &cmd);
13058 +}
13059 +
13060 +/**
13061 + * dpseci_get_attributes() - Retrieve DPSECI attributes
13062 + * @mc_io: Pointer to MC portal's I/O object
13063 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13064 + * @token: Token of DPSECI object
13065 + * @attr: Returned object's attributes
13066 + *
13067 + * Return: '0' on success, error code otherwise
13068 + */
13069 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13070 + struct dpseci_attr *attr)
13071 +{
13072 + struct fsl_mc_command cmd = { 0 };
13073 + struct dpseci_rsp_get_attributes *rsp_params;
13074 + int err;
13075 +
13076 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
13077 + cmd_flags,
13078 + token);
13079 + err = mc_send_command(mc_io, &cmd);
13080 + if (err)
13081 + return err;
13082 +
13083 + rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
13084 + attr->id = le32_to_cpu(rsp_params->id);
13085 + attr->num_tx_queues = rsp_params->num_tx_queues;
13086 + attr->num_rx_queues = rsp_params->num_rx_queues;
13087 + attr->options = le32_to_cpu(rsp_params->options);
13088 +
13089 + return 0;
13090 +}
13091 +
13092 +/**
13093 + * dpseci_set_rx_queue() - Set Rx queue configuration
13094 + * @mc_io: Pointer to MC portal's I/O object
13095 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13096 + * @token: Token of DPSECI object
13097 + * @queue: Select the queue relative to number of priorities configured at
13098 + * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
13099 + * Rx queues identically.
13100 + * @cfg: Rx queue configuration
13101 + *
13102 + * Return: '0' on success, error code otherwise
13103 + */
13104 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13105 + u8 queue, const struct dpseci_rx_queue_cfg *cfg)
13106 +{
13107 + struct fsl_mc_command cmd = { 0 };
13108 + struct dpseci_cmd_queue *cmd_params;
13109 +
13110 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
13111 + cmd_flags,
13112 + token);
13113 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
13114 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
13115 + cmd_params->priority = cfg->dest_cfg.priority;
13116 + cmd_params->queue = queue;
13117 + dpseci_set_field(cmd_params->dest_type, DEST_TYPE,
13118 + cfg->dest_cfg.dest_type);
13119 + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
13120 + cmd_params->options = cpu_to_le32(cfg->options);
13121 + dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION,
13122 + cfg->order_preservation_en);
13123 +
13124 + return mc_send_command(mc_io, &cmd);
13125 +}
13126 +
13127 +/**
13128 + * dpseci_get_rx_queue() - Retrieve Rx queue attributes
13129 + * @mc_io: Pointer to MC portal's I/O object
13130 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13131 + * @token: Token of DPSECI object
13132 + * @queue: Select the queue relative to number of priorities configured at
13133 + * DPSECI creation
13134 + * @attr: Returned Rx queue attributes
13135 + *
13136 + * Return: '0' on success, error code otherwise
13137 + */
13138 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13139 + u8 queue, struct dpseci_rx_queue_attr *attr)
13140 +{
13141 + struct fsl_mc_command cmd = { 0 };
13142 + struct dpseci_cmd_queue *cmd_params;
13143 + int err;
13144 +
13145 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
13146 + cmd_flags,
13147 + token);
13148 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
13149 + cmd_params->queue = queue;
13150 + err = mc_send_command(mc_io, &cmd);
13151 + if (err)
13152 + return err;
13153 +
13154 + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
13155 + attr->dest_cfg.priority = cmd_params->priority;
13156 + attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type,
13157 + DEST_TYPE);
13158 + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
13159 + attr->fqid = le32_to_cpu(cmd_params->fqid);
13160 + attr->order_preservation_en =
13161 + dpseci_get_field(cmd_params->order_preservation_en,
13162 + ORDER_PRESERVATION);
13163 +
13164 + return 0;
13165 +}
13166 +
13167 +/**
13168 + * dpseci_get_tx_queue() - Retrieve Tx queue attributes
13169 + * @mc_io: Pointer to MC portal's I/O object
13170 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13171 + * @token: Token of DPSECI object
13172 + * @queue: Select the queue relative to number of priorities configured at
13173 + * DPSECI creation
13174 + * @attr: Returned Tx queue attributes
13175 + *
13176 + * Return: '0' on success, error code otherwise
13177 + */
13178 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13179 + u8 queue, struct dpseci_tx_queue_attr *attr)
13180 +{
13181 + struct fsl_mc_command cmd = { 0 };
13182 + struct dpseci_cmd_queue *cmd_params;
13183 + struct dpseci_rsp_get_tx_queue *rsp_params;
13184 + int err;
13185 +
13186 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
13187 + cmd_flags,
13188 + token);
13189 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
13190 + cmd_params->queue = queue;
13191 + err = mc_send_command(mc_io, &cmd);
13192 + if (err)
13193 + return err;
13194 +
13195 + rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
13196 + attr->fqid = le32_to_cpu(rsp_params->fqid);
13197 + attr->priority = rsp_params->priority;
13198 +
13199 + return 0;
13200 +}
13201 +
13202 +/**
13203 + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
13204 + * @mc_io: Pointer to MC portal's I/O object
13205 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13206 + * @token: Token of DPSECI object
13207 + * @attr: Returned SEC attributes
13208 + *
13209 + * Return: '0' on success, error code otherwise
13210 + */
13211 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13212 + struct dpseci_sec_attr *attr)
13213 +{
13214 + struct fsl_mc_command cmd = { 0 };
13215 + struct dpseci_rsp_get_sec_attr *rsp_params;
13216 + int err;
13217 +
13218 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
13219 + cmd_flags,
13220 + token);
13221 + err = mc_send_command(mc_io, &cmd);
13222 + if (err)
13223 + return err;
13224 +
13225 + rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
13226 + attr->ip_id = le16_to_cpu(rsp_params->ip_id);
13227 + attr->major_rev = rsp_params->major_rev;
13228 + attr->minor_rev = rsp_params->minor_rev;
13229 + attr->era = rsp_params->era;
13230 + attr->deco_num = rsp_params->deco_num;
13231 + attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
13232 + attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
13233 + attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
13234 + attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
13235 + attr->crc_acc_num = rsp_params->crc_acc_num;
13236 + attr->pk_acc_num = rsp_params->pk_acc_num;
13237 + attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
13238 + attr->rng_acc_num = rsp_params->rng_acc_num;
13239 + attr->md_acc_num = rsp_params->md_acc_num;
13240 + attr->arc4_acc_num = rsp_params->arc4_acc_num;
13241 + attr->des_acc_num = rsp_params->des_acc_num;
13242 + attr->aes_acc_num = rsp_params->aes_acc_num;
13243 + attr->ccha_acc_num = rsp_params->ccha_acc_num;
13244 + attr->ptha_acc_num = rsp_params->ptha_acc_num;
13245 +
13246 + return 0;
13247 +}
13248 +
13249 +/**
13250 + * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
13251 + * @mc_io: Pointer to MC portal's I/O object
13252 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13253 + * @token: Token of DPSECI object
13254 + * @counters: Returned SEC counters
13255 + *
13256 + * Return: '0' on success, error code otherwise
13257 + */
13258 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13259 + struct dpseci_sec_counters *counters)
13260 +{
13261 + struct fsl_mc_command cmd = { 0 };
13262 + struct dpseci_rsp_get_sec_counters *rsp_params;
13263 + int err;
13264 +
13265 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
13266 + cmd_flags,
13267 + token);
13268 + err = mc_send_command(mc_io, &cmd);
13269 + if (err)
13270 + return err;
13271 +
13272 + rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
13273 + counters->dequeued_requests =
13274 + le64_to_cpu(rsp_params->dequeued_requests);
13275 + counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
13276 + counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
13277 + counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
13278 + counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
13279 + counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
13280 + counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
13281 +
13282 + return 0;
13283 +}
13284 +
13285 +/**
13286 + * dpseci_get_api_version() - Get Data Path SEC Interface API version
13287 + * @mc_io: Pointer to MC portal's I/O object
13288 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13289 + * @major_ver: Major version of data path sec API
13290 + * @minor_ver: Minor version of data path sec API
13291 + *
13292 + * Return: '0' on success, error code otherwise
13293 + */
13294 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
13295 + u16 *major_ver, u16 *minor_ver)
13296 +{
13297 + struct fsl_mc_command cmd = { 0 };
13298 + struct dpseci_rsp_get_api_version *rsp_params;
13299 + int err;
13300 +
13301 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
13302 + cmd_flags, 0);
13303 + err = mc_send_command(mc_io, &cmd);
13304 + if (err)
13305 + return err;
13306 +
13307 + rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
13308 + *major_ver = le16_to_cpu(rsp_params->major);
13309 + *minor_ver = le16_to_cpu(rsp_params->minor);
13310 +
13311 + return 0;
13312 +}
13313 +
13314 +/**
13315 + * dpseci_set_opr() - Set Order Restoration configuration
13316 + * @mc_io: Pointer to MC portal's I/O object
13317 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13318 + * @token: Token of DPSECI object
13319 + * @index: The queue index
13320 + * @options: Configuration mode options; can be OPR_OPT_CREATE or
13321 + * OPR_OPT_RETIRE
13322 + * @cfg: Configuration options for the OPR
13323 + *
13324 + * Return: '0' on success, error code otherwise
13325 + */
13326 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
13327 + u8 options, struct opr_cfg *cfg)
13328 +{
13329 + struct fsl_mc_command cmd = { 0 };
13330 + struct dpseci_cmd_opr *cmd_params;
13331 +
13332 + cmd.header = mc_encode_cmd_header(
13333 + DPSECI_CMDID_SET_OPR,
13334 + cmd_flags,
13335 + token);
13336 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
13337 + cmd_params->index = index;
13338 + cmd_params->options = options;
13339 + cmd_params->oloe = cfg->oloe;
13340 + cmd_params->oeane = cfg->oeane;
13341 + cmd_params->olws = cfg->olws;
13342 + cmd_params->oa = cfg->oa;
13343 + cmd_params->oprrws = cfg->oprrws;
13344 +
13345 + return mc_send_command(mc_io, &cmd);
13346 +}
13347 +
13348 +/**
13349 + * dpseci_get_opr() - Retrieve Order Restoration config and query
13350 + * @mc_io: Pointer to MC portal's I/O object
13351 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13352 + * @token: Token of DPSECI object
13353 + * @index: The queue index
13354 + * @cfg: Returned OPR configuration
13355 + * @qry: Returned OPR query
13356 + *
13357 + * Return: '0' on success, error code otherwise
13358 + */
13359 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
13360 + struct opr_cfg *cfg, struct opr_qry *qry)
13361 +{
13362 + struct fsl_mc_command cmd = { 0 };
13363 + struct dpseci_cmd_opr *cmd_params;
13364 + struct dpseci_rsp_get_opr *rsp_params;
13365 + int err;
13366 +
13367 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
13368 + cmd_flags,
13369 + token);
13370 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
13371 + cmd_params->index = index;
13372 + err = mc_send_command(mc_io, &cmd);
13373 + if (err)
13374 + return err;
13375 +
13376 + rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
13377 + qry->rip = dpseci_get_field(rsp_params->flags, OPR_RIP);
13378 + qry->enable = dpseci_get_field(rsp_params->flags, OPR_ENABLE);
13379 + cfg->oloe = rsp_params->oloe;
13380 + cfg->oeane = rsp_params->oeane;
13381 + cfg->olws = rsp_params->olws;
13382 + cfg->oa = rsp_params->oa;
13383 + cfg->oprrws = rsp_params->oprrws;
13384 + qry->nesn = le16_to_cpu(rsp_params->nesn);
13385 + qry->ndsn = le16_to_cpu(rsp_params->ndsn);
13386 + qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
13387 + qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
13388 + qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
13389 + qry->hseq_nlis = dpseci_get_field(rsp_params->hseq_nlis, OPR_HSEQ_NLIS);
13390 + qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
13391 + qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
13392 + qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
13393 + qry->opr_id = le16_to_cpu(rsp_params->opr_id);
13394 +
13395 + return 0;
13396 +}
13397 +
13398 +/**
13399 + * dpseci_set_congestion_notification() - Set congestion group
13400 + * notification configuration
13401 + * @mc_io: Pointer to MC portal's I/O object
13402 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13403 + * @token: Token of DPSECI object
13404 + * @cfg: congestion notification configuration
13405 + *
13406 + * Return: '0' on success, error code otherwise
13407 + */
13408 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
13409 + u16 token, const struct dpseci_congestion_notification_cfg *cfg)
13410 +{
13411 + struct fsl_mc_command cmd = { 0 };
13412 + struct dpseci_cmd_congestion_notification *cmd_params;
13413 +
13414 + cmd.header = mc_encode_cmd_header(
13415 + DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
13416 + cmd_flags,
13417 + token);
13418 + cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
13419 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
13420 + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
13421 + cmd_params->priority = cfg->dest_cfg.priority;
13422 + dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
13423 + cfg->dest_cfg.dest_type);
13424 + dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
13425 + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
13426 + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
13427 + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
13428 + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
13429 +
13430 + return mc_send_command(mc_io, &cmd);
13431 +}
13432 +
13433 +/**
13434 + * dpseci_get_congestion_notification() - Get congestion group notification
13435 + * configuration
13436 + * @mc_io: Pointer to MC portal's I/O object
13437 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13438 + * @token: Token of DPSECI object
13439 + * @cfg: congestion notification configuration
13440 + *
13441 + * Return: '0' on success, error code otherwise
13442 + */
13443 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
13444 + u16 token, struct dpseci_congestion_notification_cfg *cfg)
13445 +{
13446 + struct fsl_mc_command cmd = { 0 };
13447 + struct dpseci_cmd_congestion_notification *rsp_params;
13448 + int err;
13449 +
13450 + cmd.header = mc_encode_cmd_header(
13451 + DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
13452 + cmd_flags,
13453 + token);
13454 + err = mc_send_command(mc_io, &cmd);
13455 + if (err)
13456 + return err;
13457 +
13458 + rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
13459 + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
13460 + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
13461 + cfg->dest_cfg.priority = rsp_params->priority;
13462 + cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
13463 + CGN_DEST_TYPE);
13464 + cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
13465 + cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
13466 + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
13467 + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
13468 + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
13469 +
13470 + return 0;
13471 +}
13472 --- /dev/null
13473 +++ b/drivers/crypto/caam/dpseci.h
13474 @@ -0,0 +1,433 @@
13475 +/*
13476 + * Copyright 2013-2016 Freescale Semiconductor Inc.
13477 + * Copyright 2017 NXP
13478 + *
13479 + * Redistribution and use in source and binary forms, with or without
13480 + * modification, are permitted provided that the following conditions are met:
13481 + * * Redistributions of source code must retain the above copyright
13482 + * notice, this list of conditions and the following disclaimer.
13483 + * * Redistributions in binary form must reproduce the above copyright
13484 + * notice, this list of conditions and the following disclaimer in the
13485 + * documentation and/or other materials provided with the distribution.
13486 + * * Neither the names of the above-listed copyright holders nor the
13487 + * names of any contributors may be used to endorse or promote products
13488 + * derived from this software without specific prior written permission.
13489 + *
13490 + *
13491 + * ALTERNATIVELY, this software may be distributed under the terms of the
13492 + * GNU General Public License ("GPL") as published by the Free Software
13493 + * Foundation, either version 2 of that License or (at your option) any
13494 + * later version.
13495 + *
13496 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
13497 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
13498 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
13499 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
13500 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
13501 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
13502 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
13503 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
13504 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
13505 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
13506 + * POSSIBILITY OF SUCH DAMAGE.
13507 + */
13508 +#ifndef _DPSECI_H_
13509 +#define _DPSECI_H_
13510 +
13511 +/*
13512 + * Data Path SEC Interface API
13513 + * Contains initialization APIs and runtime control APIs for DPSECI
13514 + */
13515 +
13516 +struct fsl_mc_io;
13517 +struct opr_cfg;
13518 +struct opr_qry;
13519 +
13520 +/**
13521 + * General DPSECI macros
13522 + */
13523 +
13524 +/**
13525 + * Maximum number of Tx/Rx queues per DPSECI object
13526 + */
13527 +#define DPSECI_MAX_QUEUE_NUM 16
13528 +
13529 +/**
13530 + * All queues considered; see dpseci_set_rx_queue()
13531 + */
13532 +#define DPSECI_ALL_QUEUES (u8)(-1)
13533 +
13534 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
13535 + u16 *token);
13536 +
13537 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
13538 +
13539 +/**
13540 + * Enable the Congestion Group support
13541 + */
13542 +#define DPSECI_OPT_HAS_CG 0x000020
13543 +
13544 +/**
13545 + * Enable the Order Restoration support
13546 + */
13547 +#define DPSECI_OPT_HAS_OPR 0x000040
13548 +
13549 +/**
13550 + * Order Point Records are shared for the entire DPSECI
13551 + */
13552 +#define DPSECI_OPT_OPR_SHARED 0x000080
13553 +
13554 +/**
13555 + * struct dpseci_cfg - Structure representing DPSECI configuration
13556 + * @options: Any combination of the following options:
13557 + * DPSECI_OPT_HAS_CG
13558 + * DPSECI_OPT_HAS_OPR
13559 + * DPSECI_OPT_OPR_SHARED
13560 + * @num_tx_queues: num of queues towards the SEC
13561 + * @num_rx_queues: num of queues back from the SEC
13562 + * @priorities: Priorities for the SEC hardware processing;
13563 + * each place in the array is the priority of the tx queue
13564 + * towards the SEC;
13565 + * valid priorities are configured with values 1-8;
13566 + */
13567 +struct dpseci_cfg {
13568 + u32 options;
13569 + u8 num_tx_queues;
13570 + u8 num_rx_queues;
13571 + u8 priorities[DPSECI_MAX_QUEUE_NUM];
13572 +};
13573 +
13574 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
13575 + const struct dpseci_cfg *cfg, u32 *obj_id);
13576 +
13577 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
13578 + u32 object_id);
13579 +
13580 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
13581 +
13582 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
13583 +
13584 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13585 + int *en);
13586 +
13587 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
13588 +
13589 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13590 + u8 irq_index, u8 *en);
13591 +
13592 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13593 + u8 irq_index, u8 en);
13594 +
13595 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13596 + u8 irq_index, u32 *mask);
13597 +
13598 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13599 + u8 irq_index, u32 mask);
13600 +
13601 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13602 + u8 irq_index, u32 *status);
13603 +
13604 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13605 + u8 irq_index, u32 status);
13606 +
13607 +/**
13608 + * struct dpseci_attr - Structure representing DPSECI attributes
13609 + * @id: DPSECI object ID
13610 + * @num_tx_queues: number of queues towards the SEC
13611 + * @num_rx_queues: number of queues back from the SEC
13612 + * @options: any combination of the following options:
13613 + * DPSECI_OPT_HAS_CG
13614 + * DPSECI_OPT_HAS_OPR
13615 + * DPSECI_OPT_OPR_SHARED
13616 + */
13617 +struct dpseci_attr {
13618 + int id;
13619 + u8 num_tx_queues;
13620 + u8 num_rx_queues;
13621 + u32 options;
13622 +};
13623 +
13624 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13625 + struct dpseci_attr *attr);
13626 +
13627 +/**
13628 + * enum dpseci_dest - DPSECI destination types
13629 + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
13630 + * and does not generate FQDAN notifications; user is expected to dequeue
13631 + * from the queue based on polling or other user-defined method
13632 + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
13633 + * notifications to the specified DPIO; user is expected to dequeue from
13634 + * the queue only after notification is received
13635 + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
13636 + * FQDAN notifications, but is connected to the specified DPCON object;
13637 + * user is expected to dequeue from the DPCON channel
13638 + */
13639 +enum dpseci_dest {
13640 + DPSECI_DEST_NONE = 0,
13641 + DPSECI_DEST_DPIO,
13642 + DPSECI_DEST_DPCON
13643 +};
13644 +
13645 +/**
13646 + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
13647 + * @dest_type: Destination type
13648 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
13649 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
13650 + * are 0-1 or 0-7, depending on the number of priorities in that channel;
13651 + * not relevant for 'DPSECI_DEST_NONE' option
13652 + */
13653 +struct dpseci_dest_cfg {
13654 + enum dpseci_dest dest_type;
13655 + int dest_id;
13656 + u8 priority;
13657 +};
13658 +
13659 +/**
13660 + * DPSECI queue modification options
13661 + */
13662 +
13663 +/**
13664 + * Select to modify the user's context associated with the queue
13665 + */
13666 +#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
13667 +
13668 +/**
13669 + * Select to modify the queue's destination
13670 + */
13671 +#define DPSECI_QUEUE_OPT_DEST 0x00000002
13672 +
13673 +/**
13674 + * Select to modify the queue's order preservation
13675 + */
13676 +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
13677 +
13678 +/**
13679 + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
13680 + * @options: Flags representing the suggested modifications to the queue;
13681 + * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
13682 + * @order_preservation_en: order preservation configuration for the rx queue
13683 + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
13684 + * @user_ctx: User context value provided in the frame descriptor of each
13685 + * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
13686 + * in 'options'
13687 + * @dest_cfg: Queue destination parameters; valid only if
13688 + * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
13689 + */
13690 +struct dpseci_rx_queue_cfg {
13691 + u32 options;
13692 + int order_preservation_en;
13693 + u64 user_ctx;
13694 + struct dpseci_dest_cfg dest_cfg;
13695 +};
13696 +
13697 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13698 + u8 queue, const struct dpseci_rx_queue_cfg *cfg);
13699 +
13700 +/**
13701 + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
13702 + * @user_ctx: User context value provided in the frame descriptor of each
13703 + * dequeued frame
13704 + * @order_preservation_en: Status of the order preservation configuration on the
13705 + * queue
13706 + * @dest_cfg: Queue destination configuration
13707 + * @fqid: Virtual FQID value to be used for dequeue operations
13708 + */
13709 +struct dpseci_rx_queue_attr {
13710 + u64 user_ctx;
13711 + int order_preservation_en;
13712 + struct dpseci_dest_cfg dest_cfg;
13713 + u32 fqid;
13714 +};
13715 +
13716 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13717 + u8 queue, struct dpseci_rx_queue_attr *attr);
13718 +
13719 +/**
13720 + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
13721 + * @fqid: Virtual FQID to be used for sending frames to SEC hardware
13722 + * @priority: SEC hardware processing priority for the queue
13723 + */
13724 +struct dpseci_tx_queue_attr {
13725 + u32 fqid;
13726 + u8 priority;
13727 +};
13728 +
13729 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13730 + u8 queue, struct dpseci_tx_queue_attr *attr);
13731 +
13732 +/**
13733 + * struct dpseci_sec_attr - Structure representing attributes of the SEC
13734 + * hardware accelerator
13735 + * @ip_id: ID for SEC
13736 + * @major_rev: Major revision number for SEC
13737 + * @minor_rev: Minor revision number for SEC
13738 + * @era: SEC Era
13739 + * @deco_num: The number of copies of the DECO that are implemented in this
13740 + * version of SEC
13741 + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
13742 + * version of SEC
13743 + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
13744 + * version of SEC
13745 + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
13746 + * implemented in this version of SEC
13747 + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
13748 + * implemented in this version of SEC
13749 + * @crc_acc_num: The number of copies of the CRC module that are implemented in
13750 + * this version of SEC
13751 + * @pk_acc_num: The number of copies of the Public Key module that are
13752 + * implemented in this version of SEC
13753 + * @kasumi_acc_num: The number of copies of the Kasumi module that are
13754 + * implemented in this version of SEC
13755 + * @rng_acc_num: The number of copies of the Random Number Generator that are
13756 + * implemented in this version of SEC
13757 + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
13758 + * implemented in this version of SEC
13759 + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
13760 + * in this version of SEC
13761 + * @des_acc_num: The number of copies of the DES module that are implemented in
13762 + * this version of SEC
13763 + * @aes_acc_num: The number of copies of the AES module that are implemented in
13764 + * this version of SEC
13765 + * @ccha_acc_num: The number of copies of the ChaCha20 module that are
13766 + * implemented in this version of SEC.
13767 + * @ptha_acc_num: The number of copies of the Poly1305 module that are
13768 + * implemented in this version of SEC.
13769 + **/
13770 +struct dpseci_sec_attr {
13771 + u16 ip_id;
13772 + u8 major_rev;
13773 + u8 minor_rev;
13774 + u8 era;
13775 + u8 deco_num;
13776 + u8 zuc_auth_acc_num;
13777 + u8 zuc_enc_acc_num;
13778 + u8 snow_f8_acc_num;
13779 + u8 snow_f9_acc_num;
13780 + u8 crc_acc_num;
13781 + u8 pk_acc_num;
13782 + u8 kasumi_acc_num;
13783 + u8 rng_acc_num;
13784 + u8 md_acc_num;
13785 + u8 arc4_acc_num;
13786 + u8 des_acc_num;
13787 + u8 aes_acc_num;
13788 + u8 ccha_acc_num;
13789 + u8 ptha_acc_num;
13790 +};
13791 +
13792 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13793 + struct dpseci_sec_attr *attr);
13794 +
13795 +/**
13796 + * struct dpseci_sec_counters - Structure representing global SEC counters and
13797 + * not per dpseci counters
13798 + * @dequeued_requests: Number of Requests Dequeued
13799 + * @ob_enc_requests: Number of Outbound Encrypt Requests
13800 + * @ib_dec_requests: Number of Inbound Decrypt Requests
13801 + * @ob_enc_bytes: Number of Outbound Bytes Encrypted
13802 + * @ob_prot_bytes: Number of Outbound Bytes Protected
13803 + * @ib_dec_bytes: Number of Inbound Bytes Decrypted
13804 + * @ib_valid_bytes: Number of Inbound Bytes Validated
13805 + */
13806 +struct dpseci_sec_counters {
13807 + u64 dequeued_requests;
13808 + u64 ob_enc_requests;
13809 + u64 ib_dec_requests;
13810 + u64 ob_enc_bytes;
13811 + u64 ob_prot_bytes;
13812 + u64 ib_dec_bytes;
13813 + u64 ib_valid_bytes;
13814 +};
13815 +
13816 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13817 + struct dpseci_sec_counters *counters);
13818 +
13819 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
13820 + u16 *major_ver, u16 *minor_ver);
13821 +
13822 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
13823 + u8 options, struct opr_cfg *cfg);
13824 +
13825 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
13826 + struct opr_cfg *cfg, struct opr_qry *qry);
13827 +
13828 +/**
13829 + * enum dpseci_congestion_unit - DPSECI congestion units
13830 + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
13831 + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
13832 + */
13833 +enum dpseci_congestion_unit {
13834 + DPSECI_CONGESTION_UNIT_BYTES = 0,
13835 + DPSECI_CONGESTION_UNIT_FRAMES
13836 +};
13837 +
13838 +/**
13839 + * CSCN message is written to message_iova once entering a
13840 + * congestion state (see 'threshold_entry')
13841 + */
13842 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
13843 +
13844 +/**
13845 + * CSCN message is written to message_iova once exiting a
13846 + * congestion state (see 'threshold_exit')
13847 + */
13848 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
13849 +
13850 +/**
13851 + * CSCN write will attempt to allocate into a cache (coherent write);
13852 + * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
13853 + */
13854 +#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
13855 +
13856 +/**
13857 + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
13858 + * DPIO/DPCON's WQ channel once entering a congestion state
13859 + * (see 'threshold_entry')
13860 + */
13861 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
13862 +
13863 +/**
13864 + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
13865 + * DPIO/DPCON's WQ channel once exiting a congestion state
13866 + * (see 'threshold_exit')
13867 + */
13868 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
13869 +
13870 +/**
13871 + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
13872 + * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
13873 + * (if enabled)
13874 + */
13875 +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
13876 +
13877 +/**
13878 + * struct dpseci_congestion_notification_cfg - congestion notification
13879 + * configuration
13880 + * @units: units type
13881 + * @threshold_entry: above this threshold we enter a congestion state.
13882 + * set it to '0' to disable it
13883 + * @threshold_exit: below this threshold we exit the congestion state.
13884 + * @message_ctx: The context that will be part of the CSCN message
13885 + * @message_iova: I/O virtual address (must be in DMA-able memory),
13886 + * must be 16B aligned;
13887 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
13888 + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
13889 + * values
13890 + */
13891 +struct dpseci_congestion_notification_cfg {
13892 + enum dpseci_congestion_unit units;
13893 + u32 threshold_entry;
13894 + u32 threshold_exit;
13895 + u64 message_ctx;
13896 + u64 message_iova;
13897 + struct dpseci_dest_cfg dest_cfg;
13898 + u16 notification_mode;
13899 +};
13900 +
13901 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
13902 + u16 token, const struct dpseci_congestion_notification_cfg *cfg);
13903 +
13904 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
13905 + u16 token, struct dpseci_congestion_notification_cfg *cfg);
13906 +
13907 +#endif /* _DPSECI_H_ */
13908 --- /dev/null
13909 +++ b/drivers/crypto/caam/dpseci_cmd.h
13910 @@ -0,0 +1,287 @@
13911 +/*
13912 + * Copyright 2013-2016 Freescale Semiconductor Inc.
13913 + * Copyright 2017 NXP
13914 + *
13915 + * Redistribution and use in source and binary forms, with or without
13916 + * modification, are permitted provided that the following conditions are met:
13917 + * * Redistributions of source code must retain the above copyright
13918 + * notice, this list of conditions and the following disclaimer.
13919 + * * Redistributions in binary form must reproduce the above copyright
13920 + * notice, this list of conditions and the following disclaimer in the
13921 + * documentation and/or other materials provided with the distribution.
13922 + * * Neither the names of the above-listed copyright holders nor the
13923 + * names of any contributors may be used to endorse or promote products
13924 + * derived from this software without specific prior written permission.
13925 + *
13926 + *
13927 + * ALTERNATIVELY, this software may be distributed under the terms of the
13928 + * GNU General Public License ("GPL") as published by the Free Software
13929 + * Foundation, either version 2 of that License or (at your option) any
13930 + * later version.
13931 + *
13932 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
13933 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
13934 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
13935 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
13936 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
13937 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
13938 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
13939 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
13940 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
13941 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
13942 + * POSSIBILITY OF SUCH DAMAGE.
13943 + */
13944 +
13945 +#ifndef _DPSECI_CMD_H_
13946 +#define _DPSECI_CMD_H_
13947 +
13948 +/* DPSECI Version */
13949 +#define DPSECI_VER_MAJOR 5
13950 +#define DPSECI_VER_MINOR 3
13951 +
13952 +#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
13953 +#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
13954 +
13955 +/* Command versioning */
13956 +#define DPSECI_CMD_BASE_VERSION 1
13957 +#define DPSECI_CMD_BASE_VERSION_V2 2
13958 +#define DPSECI_CMD_BASE_VERSION_V3 3
13959 +#define DPSECI_CMD_ID_OFFSET 4
13960 +
13961 +#define DPSECI_CMD_V1(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
13962 + DPSECI_CMD_BASE_VERSION)
13963 +
13964 +#define DPSECI_CMD_V2(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
13965 + DPSECI_CMD_BASE_VERSION_V2)
13966 +
13967 +#define DPSECI_CMD_V3(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
13968 + DPSECI_CMD_BASE_VERSION_V3)
13969 +
13970 +/* Command IDs */
13971 +#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
13972 +#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
13973 +#define DPSECI_CMDID_CREATE DPSECI_CMD_V3(0x909)
13974 +#define DPSECI_CMDID_DESTROY DPSECI_CMD_V1(0x989)
13975 +#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
13976 +
13977 +#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
13978 +#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
13979 +#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
13980 +#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005)
13981 +#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
13982 +
13983 +#define DPSECI_CMDID_SET_IRQ_ENABLE DPSECI_CMD_V1(0x012)
13984 +#define DPSECI_CMDID_GET_IRQ_ENABLE DPSECI_CMD_V1(0x013)
13985 +#define DPSECI_CMDID_SET_IRQ_MASK DPSECI_CMD_V1(0x014)
13986 +#define DPSECI_CMDID_GET_IRQ_MASK DPSECI_CMD_V1(0x015)
13987 +#define DPSECI_CMDID_GET_IRQ_STATUS DPSECI_CMD_V1(0x016)
13988 +#define DPSECI_CMDID_CLEAR_IRQ_STATUS DPSECI_CMD_V1(0x017)
13989 +
13990 +#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
13991 +#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
13992 +#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
13993 +#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198)
13994 +#define DPSECI_CMDID_GET_SEC_COUNTERS DPSECI_CMD_V1(0x199)
13995 +#define DPSECI_CMDID_SET_OPR DPSECI_CMD_V1(0x19A)
13996 +#define DPSECI_CMDID_GET_OPR DPSECI_CMD_V1(0x19B)
13997 +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
13998 +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
13999 +
14000 +/* Macros for accessing command fields smaller than 1 byte */
14001 +#define DPSECI_MASK(field) \
14002 + GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
14003 + DPSECI_##field##_SHIFT)
14004 +
14005 +#define dpseci_set_field(var, field, val) \
14006 + ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
14007 +
14008 +#define dpseci_get_field(var, field) \
14009 + (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
14010 +
14011 +struct dpseci_cmd_open {
14012 + __le32 dpseci_id;
14013 +};
14014 +
14015 +struct dpseci_cmd_create {
14016 + u8 priorities[8];
14017 + u8 num_tx_queues;
14018 + u8 num_rx_queues;
14019 + u8 pad0[6];
14020 + __le32 options;
14021 + __le32 pad1;
14022 + u8 priorities2[8];
14023 +};
14024 +
14025 +struct dpseci_cmd_destroy {
14026 + __le32 object_id;
14027 +};
14028 +
14029 +#define DPSECI_ENABLE_SHIFT 0
14030 +#define DPSECI_ENABLE_SIZE 1
14031 +
14032 +struct dpseci_rsp_is_enabled {
14033 + u8 is_enabled;
14034 +};
14035 +
14036 +struct dpseci_cmd_irq_enable {
14037 + u8 enable_state;
14038 + u8 pad[3];
14039 + u8 irq_index;
14040 +};
14041 +
14042 +struct dpseci_rsp_get_irq_enable {
14043 + u8 enable_state;
14044 +};
14045 +
14046 +struct dpseci_cmd_irq_mask {
14047 + __le32 mask;
14048 + u8 irq_index;
14049 +};
14050 +
14051 +struct dpseci_cmd_irq_status {
14052 + __le32 status;
14053 + u8 irq_index;
14054 +};
14055 +
14056 +struct dpseci_rsp_get_attributes {
14057 + __le32 id;
14058 + __le32 pad0;
14059 + u8 num_tx_queues;
14060 + u8 num_rx_queues;
14061 + u8 pad1[6];
14062 + __le32 options;
14063 +};
14064 +
14065 +#define DPSECI_DEST_TYPE_SHIFT 0
14066 +#define DPSECI_DEST_TYPE_SIZE 4
14067 +
14068 +#define DPSECI_ORDER_PRESERVATION_SHIFT 0
14069 +#define DPSECI_ORDER_PRESERVATION_SIZE 1
14070 +
14071 +struct dpseci_cmd_queue {
14072 + __le32 dest_id;
14073 + u8 priority;
14074 + u8 queue;
14075 + u8 dest_type;
14076 + u8 pad;
14077 + __le64 user_ctx;
14078 + union {
14079 + __le32 options;
14080 + __le32 fqid;
14081 + };
14082 + u8 order_preservation_en;
14083 +};
14084 +
14085 +struct dpseci_rsp_get_tx_queue {
14086 + __le32 pad;
14087 + __le32 fqid;
14088 + u8 priority;
14089 +};
14090 +
14091 +struct dpseci_rsp_get_sec_attr {
14092 + __le16 ip_id;
14093 + u8 major_rev;
14094 + u8 minor_rev;
14095 + u8 era;
14096 + u8 pad0[3];
14097 + u8 deco_num;
14098 + u8 zuc_auth_acc_num;
14099 + u8 zuc_enc_acc_num;
14100 + u8 pad1;
14101 + u8 snow_f8_acc_num;
14102 + u8 snow_f9_acc_num;
14103 + u8 crc_acc_num;
14104 + u8 pad2;
14105 + u8 pk_acc_num;
14106 + u8 kasumi_acc_num;
14107 + u8 rng_acc_num;
14108 + u8 pad3;
14109 + u8 md_acc_num;
14110 + u8 arc4_acc_num;
14111 + u8 des_acc_num;
14112 + u8 aes_acc_num;
14113 + u8 ccha_acc_num;
14114 + u8 ptha_acc_num;
14115 +};
14116 +
14117 +struct dpseci_rsp_get_sec_counters {
14118 + __le64 dequeued_requests;
14119 + __le64 ob_enc_requests;
14120 + __le64 ib_dec_requests;
14121 + __le64 ob_enc_bytes;
14122 + __le64 ob_prot_bytes;
14123 + __le64 ib_dec_bytes;
14124 + __le64 ib_valid_bytes;
14125 +};
14126 +
14127 +struct dpseci_rsp_get_api_version {
14128 + __le16 major;
14129 + __le16 minor;
14130 +};
14131 +
14132 +struct dpseci_cmd_opr {
14133 + __le16 pad;
14134 + u8 index;
14135 + u8 options;
14136 + u8 pad1[7];
14137 + u8 oloe;
14138 + u8 oeane;
14139 + u8 olws;
14140 + u8 oa;
14141 + u8 oprrws;
14142 +};
14143 +
14144 +#define DPSECI_OPR_RIP_SHIFT 0
14145 +#define DPSECI_OPR_RIP_SIZE 1
14146 +#define DPSECI_OPR_ENABLE_SHIFT 1
14147 +#define DPSECI_OPR_ENABLE_SIZE 1
14148 +#define DPSECI_OPR_TSEQ_NLIS_SHIFT 0
14149 +#define DPSECI_OPR_TSEQ_NLIS_SIZE 1
14150 +#define DPSECI_OPR_HSEQ_NLIS_SHIFT 0
14151 +#define DPSECI_OPR_HSEQ_NLIS_SIZE 1
14152 +
14153 +struct dpseci_rsp_get_opr {
14154 + __le64 pad;
14155 + u8 flags;
14156 + u8 pad0[2];
14157 + u8 oloe;
14158 + u8 oeane;
14159 + u8 olws;
14160 + u8 oa;
14161 + u8 oprrws;
14162 + __le16 nesn;
14163 + __le16 pad1;
14164 + __le16 ndsn;
14165 + __le16 pad2;
14166 + __le16 ea_tseq;
14167 + u8 tseq_nlis;
14168 + u8 pad3;
14169 + __le16 ea_hseq;
14170 + u8 hseq_nlis;
14171 + u8 pad4;
14172 + __le16 ea_hptr;
14173 + __le16 pad5;
14174 + __le16 ea_tptr;
14175 + __le16 pad6;
14176 + __le16 opr_vid;
14177 + __le16 pad7;
14178 + __le16 opr_id;
14179 +};
14180 +
14181 +#define DPSECI_CGN_DEST_TYPE_SHIFT 0
14182 +#define DPSECI_CGN_DEST_TYPE_SIZE 4
14183 +#define DPSECI_CGN_UNITS_SHIFT 4
14184 +#define DPSECI_CGN_UNITS_SIZE 2
14185 +
14186 +struct dpseci_cmd_congestion_notification {
14187 + __le32 dest_id;
14188 + __le16 notification_mode;
14189 + u8 priority;
14190 + u8 options;
14191 + __le64 message_iova;
14192 + __le64 message_ctx;
14193 + __le32 threshold_entry;
14194 + __le32 threshold_exit;
14195 +};
14196 +
14197 +#endif /* _DPSECI_CMD_H_ */
14198 --- a/drivers/crypto/caam/error.c
14199 +++ b/drivers/crypto/caam/error.c
14200 @@ -50,6 +50,12 @@ void caam_dump_sg(const char *level, con
14201 #endif /* DEBUG */
14202 EXPORT_SYMBOL(caam_dump_sg);
14203
14204 +bool caam_little_end;
14205 +EXPORT_SYMBOL(caam_little_end);
14206 +
14207 +bool caam_imx;
14208 +EXPORT_SYMBOL(caam_imx);
14209 +
14210 static const struct {
14211 u8 value;
14212 const char *error_text;
14213 @@ -108,6 +114,54 @@ static const struct {
14214 { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
14215 };
14216
14217 +static const struct {
14218 + u8 value;
14219 + const char *error_text;
14220 +} qi_error_list[] = {
14221 + { 0x1F, "Job terminated by FQ or ICID flush" },
14222 + { 0x20, "FD format error"},
14223 + { 0x21, "FD command format error"},
14224 + { 0x23, "FL format error"},
14225 + { 0x25, "CRJD specified in FD, but not enabled in FLC"},
14226 + { 0x30, "Max. buffer size too small"},
14227 + { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
14228 + { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
14229 + { 0x33, "Size over/underflow (allocate mode)"},
14230 + { 0x34, "Size over/underflow (reuse mode)"},
14231 + { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
14232 + { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
14233 + { 0x41, "SBC frame format not supported (allocate mode)"},
14234 + { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
14235 + { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
14236 + { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
14237 + { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
14238 + { 0x46, "Annotation length exceeds offset (reuse mode)"},
14239 + { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
14240 + { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
14241 + { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
14242 + { 0x51, "Unsupported IF reuse mode"},
14243 + { 0x52, "Unsupported FL use mode"},
14244 + { 0x53, "Unsupported RJD use mode"},
14245 + { 0x54, "Unsupported inline descriptor use mode"},
14246 + { 0xC0, "Table buffer pool 0 depletion"},
14247 + { 0xC1, "Table buffer pool 1 depletion"},
14248 + { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
14249 + { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
14250 + { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
14251 + { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
14252 + { 0xD0, "FLC read error"},
14253 + { 0xD1, "FL read error"},
14254 + { 0xD2, "FL write error"},
14255 + { 0xD3, "OF SGT write error"},
14256 + { 0xD4, "PTA read error"},
14257 + { 0xD5, "PTA write error"},
14258 + { 0xD6, "OF SGT F-bit write error"},
14259 + { 0xD7, "ASA write error"},
14260 + { 0xE1, "FLC[ICR]=0 ICID error"},
14261 + { 0xE2, "FLC[ICR]=1 ICID error"},
14262 + { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
14263 +};
14264 +
14265 static const char * const cha_id_list[] = {
14266 "",
14267 "AES",
14268 @@ -236,6 +290,27 @@ static void report_deco_status(struct de
14269 status, error, idx_str, idx, err_str, err_err_code);
14270 }
14271
14272 +static void report_qi_status(struct device *qidev, const u32 status,
14273 + const char *error)
14274 +{
14275 + u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
14276 + const char *err_str = "unidentified error value 0x";
14277 + char err_err_code[3] = { 0 };
14278 + int i;
14279 +
14280 + for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
14281 + if (qi_error_list[i].value == err_id)
14282 + break;
14283 +
14284 + if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
14285 + err_str = qi_error_list[i].error_text;
14286 + else
14287 + snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
14288 +
14289 + dev_err(qidev, "%08x: %s: %s%s\n",
14290 + status, error, err_str, err_err_code);
14291 +}
14292 +
14293 static void report_jr_status(struct device *jrdev, const u32 status,
14294 const char *error)
14295 {
14296 @@ -250,7 +325,7 @@ static void report_cond_code_status(stru
14297 status, error, __func__);
14298 }
14299
14300 -void caam_jr_strstatus(struct device *jrdev, u32 status)
14301 +void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
14302 {
14303 static const struct stat_src {
14304 void (*report_ssed)(struct device *jrdev, const u32 status,
14305 @@ -262,7 +337,7 @@ void caam_jr_strstatus(struct device *jr
14306 { report_ccb_status, "CCB" },
14307 { report_jump_status, "Jump" },
14308 { report_deco_status, "DECO" },
14309 - { NULL, "Queue Manager Interface" },
14310 + { report_qi_status, "Queue Manager Interface" },
14311 { report_jr_status, "Job Ring" },
14312 { report_cond_code_status, "Condition Code" },
14313 { NULL, NULL },
14314 @@ -288,4 +363,4 @@ void caam_jr_strstatus(struct device *jr
14315 else
14316 dev_err(jrdev, "%d: unknown error source\n", ssrc);
14317 }
14318 -EXPORT_SYMBOL(caam_jr_strstatus);
14319 +EXPORT_SYMBOL(caam_strstatus);
14320 --- a/drivers/crypto/caam/error.h
14321 +++ b/drivers/crypto/caam/error.h
14322 @@ -8,7 +8,11 @@
14323 #ifndef CAAM_ERROR_H
14324 #define CAAM_ERROR_H
14325 #define CAAM_ERROR_STR_MAX 302
14326 -void caam_jr_strstatus(struct device *jrdev, u32 status);
14327 +
14328 +void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
14329 +
14330 +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
14331 +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
14332
14333 void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
14334 int rowsize, int groupsize, struct scatterlist *sg,
14335 --- a/drivers/crypto/caam/intern.h
14336 +++ b/drivers/crypto/caam/intern.h
14337 @@ -65,10 +65,6 @@ struct caam_drv_private_jr {
14338 * Driver-private storage for a single CAAM block instance
14339 */
14340 struct caam_drv_private {
14341 -#ifdef CONFIG_CAAM_QI
14342 - struct device *qidev;
14343 -#endif
14344 -
14345 /* Physical-presence section */
14346 struct caam_ctrl __iomem *ctrl; /* controller region */
14347 struct caam_deco __iomem *deco; /* DECO/CCB views */
14348 @@ -76,14 +72,21 @@ struct caam_drv_private {
14349 struct caam_queue_if __iomem *qi; /* QI control region */
14350 struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
14351
14352 + struct iommu_domain *domain;
14353 +
14354 /*
14355 * Detected geometry block. Filled in from device tree if powerpc,
14356 * or from register-based version detection code
14357 */
14358 u8 total_jobrs; /* Total Job Rings in device */
14359 u8 qi_present; /* Nonzero if QI present in device */
14360 +#ifdef CONFIG_CAAM_QI
14361 + u8 qi_init; /* Nonzero if QI has been initialized */
14362 +#endif
14363 + u8 mc_en; /* Nonzero if MC f/w is active */
14364 int secvio_irq; /* Security violation interrupt number */
14365 int virt_en; /* Virtualization enabled in CAAM */
14366 + int era; /* CAAM Era (internal HW revision) */
14367
14368 #define RNG4_MAX_HANDLES 2
14369 /* RNG4 block */
14370 @@ -108,8 +111,95 @@ struct caam_drv_private {
14371 #endif
14372 };
14373
14374 -void caam_jr_algapi_init(struct device *dev);
14375 -void caam_jr_algapi_remove(struct device *dev);
14376 +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
14377 +
14378 +int caam_algapi_init(struct device *dev);
14379 +void caam_algapi_exit(void);
14380 +
14381 +#else
14382 +
14383 +static inline int caam_algapi_init(struct device *dev)
14384 +{
14385 + return 0;
14386 +}
14387 +
14388 +static inline void caam_algapi_exit(void)
14389 +{
14390 +}
14391 +
14392 +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */
14393 +
14394 +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API
14395 +
14396 +int caam_algapi_hash_init(struct device *dev);
14397 +void caam_algapi_hash_exit(void);
14398 +
14399 +#else
14400 +
14401 +static inline int caam_algapi_hash_init(struct device *dev)
14402 +{
14403 + return 0;
14404 +}
14405 +
14406 +static inline void caam_algapi_hash_exit(void)
14407 +{
14408 +}
14409 +
14410 +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */
14411 +
14412 +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API
14413 +
14414 +int caam_pkc_init(struct device *dev);
14415 +void caam_pkc_exit(void);
14416 +
14417 +#else
14418 +
14419 +static inline int caam_pkc_init(struct device *dev)
14420 +{
14421 + return 0;
14422 +}
14423 +
14424 +static inline void caam_pkc_exit(void)
14425 +{
14426 +}
14427 +
14428 +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */
14429 +
14430 +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
14431 +
14432 +int caam_rng_init(struct device *dev);
14433 +void caam_rng_exit(void);
14434 +
14435 +#else
14436 +
14437 +static inline int caam_rng_init(struct device *dev)
14438 +{
14439 + return 0;
14440 +}
14441 +
14442 +static inline void caam_rng_exit(void)
14443 +{
14444 +}
14445 +
14446 +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
14447 +
14448 +#ifdef CONFIG_CAAM_QI
14449 +
14450 +int caam_qi_algapi_init(struct device *dev);
14451 +void caam_qi_algapi_exit(void);
14452 +
14453 +#else
14454 +
14455 +static inline int caam_qi_algapi_init(struct device *dev)
14456 +{
14457 + return 0;
14458 +}
14459 +
14460 +static inline void caam_qi_algapi_exit(void)
14461 +{
14462 +}
14463 +
14464 +#endif /* CONFIG_CAAM_QI */
14465
14466 #ifdef CONFIG_DEBUG_FS
14467 static int caam_debugfs_u64_get(void *data, u64 *val)
14468 --- a/drivers/crypto/caam/jr.c
14469 +++ b/drivers/crypto/caam/jr.c
14470 @@ -23,6 +23,52 @@ struct jr_driver_data {
14471
14472 static struct jr_driver_data driver_data;
14473
14474 +static int jr_driver_probed;
14475 +
14476 +int caam_jr_driver_probed(void)
14477 +{
14478 + return jr_driver_probed;
14479 +}
14480 +EXPORT_SYMBOL(caam_jr_driver_probed);
14481 +
14482 +static DEFINE_MUTEX(algs_lock);
14483 +static unsigned int active_devs;
14484 +
14485 +static void register_algs(struct device *dev)
14486 +{
14487 + mutex_lock(&algs_lock);
14488 +
14489 + if (++active_devs != 1)
14490 + goto algs_unlock;
14491 +
14492 + caam_algapi_init(dev);
14493 + caam_algapi_hash_init(dev);
14494 + caam_pkc_init(dev);
14495 + caam_rng_init(dev);
14496 + caam_qi_algapi_init(dev);
14497 +
14498 +algs_unlock:
14499 + mutex_unlock(&algs_lock);
14500 +}
14501 +
14502 +static void unregister_algs(void)
14503 +{
14504 + mutex_lock(&algs_lock);
14505 +
14506 + if (--active_devs != 0)
14507 + goto algs_unlock;
14508 +
14509 + caam_qi_algapi_exit();
14510 +
14511 + caam_rng_exit();
14512 + caam_pkc_exit();
14513 + caam_algapi_hash_exit();
14514 + caam_algapi_exit();
14515 +
14516 +algs_unlock:
14517 + mutex_unlock(&algs_lock);
14518 +}
14519 +
14520 static int caam_reset_hw_jr(struct device *dev)
14521 {
14522 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
14523 @@ -108,6 +154,9 @@ static int caam_jr_remove(struct platfor
14524 return -EBUSY;
14525 }
14526
14527 + /* Unregister JR-based RNG & crypto algorithms */
14528 + unregister_algs();
14529 +
14530 /* Remove the node from Physical JobR list maintained by driver */
14531 spin_lock(&driver_data.jr_alloc_lock);
14532 list_del(&jrpriv->list_node);
14533 @@ -119,6 +168,8 @@ static int caam_jr_remove(struct platfor
14534 dev_err(jrdev, "Failed to shut down job ring\n");
14535 irq_dispose_mapping(jrpriv->irq);
14536
14537 + jr_driver_probed--;
14538 +
14539 return ret;
14540 }
14541
14542 @@ -282,6 +333,36 @@ struct device *caam_jr_alloc(void)
14543 EXPORT_SYMBOL(caam_jr_alloc);
14544
14545 /**
14546 + * caam_jridx_alloc() - Alloc a specific job ring based on its index.
14547 + *
14548 + * returns : pointer to the newly allocated physical
14549 + * JobR dev can be written to if successful.
14550 + **/
14551 +struct device *caam_jridx_alloc(int idx)
14552 +{
14553 + struct caam_drv_private_jr *jrpriv;
14554 + struct device *dev = ERR_PTR(-ENODEV);
14555 +
14556 + spin_lock(&driver_data.jr_alloc_lock);
14557 +
14558 + if (list_empty(&driver_data.jr_list))
14559 + goto end;
14560 +
14561 + list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
14562 + if (jrpriv->ridx == idx) {
14563 + atomic_inc(&jrpriv->tfm_count);
14564 + dev = jrpriv->dev;
14565 + break;
14566 + }
14567 + }
14568 +
14569 +end:
14570 + spin_unlock(&driver_data.jr_alloc_lock);
14571 + return dev;
14572 +}
14573 +EXPORT_SYMBOL(caam_jridx_alloc);
14574 +
14575 +/**
14576 * caam_jr_free() - Free the Job Ring
14577 * @rdev - points to the dev that identifies the Job ring to
14578 * be released.
14579 @@ -539,6 +620,9 @@ static int caam_jr_probe(struct platform
14580
14581 atomic_set(&jrpriv->tfm_count, 0);
14582
14583 + register_algs(jrdev->parent);
14584 + jr_driver_probed++;
14585 +
14586 return 0;
14587 }
14588
14589 --- a/drivers/crypto/caam/jr.h
14590 +++ b/drivers/crypto/caam/jr.h
14591 @@ -9,7 +9,9 @@
14592 #define JR_H
14593
14594 /* Prototypes for backend-level services exposed to APIs */
14595 +int caam_jr_driver_probed(void);
14596 struct device *caam_jr_alloc(void);
14597 +struct device *caam_jridx_alloc(int idx);
14598 void caam_jr_free(struct device *rdev);
14599 int caam_jr_enqueue(struct device *dev, u32 *desc,
14600 void (*cbk)(struct device *dev, u32 *desc, u32 status,
14601 --- a/drivers/crypto/caam/key_gen.c
14602 +++ b/drivers/crypto/caam/key_gen.c
14603 @@ -11,36 +11,6 @@
14604 #include "desc_constr.h"
14605 #include "key_gen.h"
14606
14607 -/**
14608 - * split_key_len - Compute MDHA split key length for a given algorithm
14609 - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
14610 - * SHA224, SHA384, SHA512.
14611 - *
14612 - * Return: MDHA split key length
14613 - */
14614 -static inline u32 split_key_len(u32 hash)
14615 -{
14616 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
14617 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
14618 - u32 idx;
14619 -
14620 - idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
14621 -
14622 - return (u32)(mdpadlen[idx] * 2);
14623 -}
14624 -
14625 -/**
14626 - * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
14627 - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
14628 - * SHA224, SHA384, SHA512.
14629 - *
14630 - * Return: MDHA split key pad length
14631 - */
14632 -static inline u32 split_key_pad_len(u32 hash)
14633 -{
14634 - return ALIGN(split_key_len(hash), 16);
14635 -}
14636 -
14637 void split_key_done(struct device *dev, u32 *desc, u32 err,
14638 void *context)
14639 {
14640 --- a/drivers/crypto/caam/key_gen.h
14641 +++ b/drivers/crypto/caam/key_gen.h
14642 @@ -6,6 +6,36 @@
14643 *
14644 */
14645
14646 +/**
14647 + * split_key_len - Compute MDHA split key length for a given algorithm
14648 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
14649 + * SHA224, SHA384, SHA512.
14650 + *
14651 + * Return: MDHA split key length
14652 + */
14653 +static inline u32 split_key_len(u32 hash)
14654 +{
14655 + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
14656 + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
14657 + u32 idx;
14658 +
14659 + idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
14660 +
14661 + return (u32)(mdpadlen[idx] * 2);
14662 +}
14663 +
14664 +/**
14665 + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
14666 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
14667 + * SHA224, SHA384, SHA512.
14668 + *
14669 + * Return: MDHA split key pad length
14670 + */
14671 +static inline u32 split_key_pad_len(u32 hash)
14672 +{
14673 + return ALIGN(split_key_len(hash), 16);
14674 +}
14675 +
14676 struct split_key_result {
14677 struct completion completion;
14678 int err;
14679 --- a/drivers/crypto/caam/qi.c
14680 +++ b/drivers/crypto/caam/qi.c
14681 @@ -9,7 +9,7 @@
14682
14683 #include <linux/cpumask.h>
14684 #include <linux/kthread.h>
14685 -#include <soc/fsl/qman.h>
14686 +#include <linux/fsl_qman.h>
14687
14688 #include "regs.h"
14689 #include "qi.h"
14690 @@ -58,11 +58,9 @@ static DEFINE_PER_CPU(int, last_cpu);
14691 /*
14692 * caam_qi_priv - CAAM QI backend private params
14693 * @cgr: QMan congestion group
14694 - * @qi_pdev: platform device for QI backend
14695 */
14696 struct caam_qi_priv {
14697 struct qman_cgr cgr;
14698 - struct platform_device *qi_pdev;
14699 };
14700
14701 static struct caam_qi_priv qipriv ____cacheline_aligned;
14702 @@ -102,26 +100,34 @@ static int mod_init_cpu;
14703 */
14704 static struct kmem_cache *qi_cache;
14705
14706 +static void *caam_iova_to_virt(struct iommu_domain *domain,
14707 + dma_addr_t iova_addr)
14708 +{
14709 + phys_addr_t phys_addr;
14710 +
14711 + phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
14712 +
14713 + return phys_to_virt(phys_addr);
14714 +}
14715 +
14716 int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
14717 {
14718 struct qm_fd fd;
14719 - dma_addr_t addr;
14720 int ret;
14721 int num_retries = 0;
14722
14723 - qm_fd_clear_fd(&fd);
14724 - qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
14725 -
14726 - addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
14727 + fd.cmd = 0;
14728 + fd.format = qm_fd_compound;
14729 + fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
14730 + fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
14731 DMA_BIDIRECTIONAL);
14732 - if (dma_mapping_error(qidev, addr)) {
14733 + if (dma_mapping_error(qidev, fd.addr)) {
14734 dev_err(qidev, "DMA mapping error for QI enqueue request\n");
14735 return -EIO;
14736 }
14737 - qm_fd_addr_set64(&fd, addr);
14738
14739 do {
14740 - ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
14741 + ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
14742 if (likely(!ret))
14743 return 0;
14744
14745 @@ -137,20 +143,21 @@ int caam_qi_enqueue(struct device *qidev
14746 EXPORT_SYMBOL(caam_qi_enqueue);
14747
14748 static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
14749 - const union qm_mr_entry *msg)
14750 + const struct qm_mr_entry *msg)
14751 {
14752 const struct qm_fd *fd;
14753 struct caam_drv_req *drv_req;
14754 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
14755 + struct caam_drv_private *priv = dev_get_drvdata(qidev);
14756
14757 fd = &msg->ern.fd;
14758
14759 - if (qm_fd_get_format(fd) != qm_fd_compound) {
14760 + if (fd->format != qm_fd_compound) {
14761 dev_err(qidev, "Non-compound FD from CAAM\n");
14762 return;
14763 }
14764
14765 - drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
14766 + drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
14767 if (!drv_req) {
14768 dev_err(qidev,
14769 "Can't find original request for CAAM response\n");
14770 @@ -180,20 +187,22 @@ static struct qman_fq *create_caam_req_f
14771 req_fq->cb.fqs = NULL;
14772
14773 ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
14774 - QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
14775 + QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
14776 + req_fq);
14777 if (ret) {
14778 dev_err(qidev, "Failed to create session req FQ\n");
14779 goto create_req_fq_fail;
14780 }
14781
14782 - memset(&opts, 0, sizeof(opts));
14783 - opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
14784 - QM_INITFQ_WE_CONTEXTB |
14785 - QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
14786 - opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
14787 - qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
14788 - opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
14789 - qm_fqd_context_a_set64(&opts.fqd, hwdesc);
14790 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
14791 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
14792 + QM_INITFQ_WE_CGID;
14793 + opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
14794 + opts.fqd.dest.channel = qm_channel_caam;
14795 + opts.fqd.dest.wq = 2;
14796 + opts.fqd.context_b = qman_fq_fqid(rsp_fq);
14797 + opts.fqd.context_a.hi = upper_32_bits(hwdesc);
14798 + opts.fqd.context_a.lo = lower_32_bits(hwdesc);
14799 opts.fqd.cgid = qipriv.cgr.cgrid;
14800
14801 ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
14802 @@ -207,7 +216,7 @@ static struct qman_fq *create_caam_req_f
14803 return req_fq;
14804
14805 init_req_fq_fail:
14806 - qman_destroy_fq(req_fq);
14807 + qman_destroy_fq(req_fq, 0);
14808 create_req_fq_fail:
14809 kfree(req_fq);
14810 return ERR_PTR(ret);
14811 @@ -275,7 +284,7 @@ empty_fq:
14812 if (ret)
14813 dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
14814
14815 - qman_destroy_fq(fq);
14816 + qman_destroy_fq(fq, 0);
14817 kfree(fq);
14818
14819 return ret;
14820 @@ -292,7 +301,7 @@ static int empty_caam_fq(struct qman_fq
14821 if (ret)
14822 return ret;
14823
14824 - if (!qm_mcr_np_get(&np, frm_cnt))
14825 + if (!np.frm_cnt)
14826 break;
14827
14828 msleep(20);
14829 @@ -495,7 +504,7 @@ EXPORT_SYMBOL(caam_drv_ctx_rel);
14830 int caam_qi_shutdown(struct device *qidev)
14831 {
14832 int i, ret;
14833 - struct caam_qi_priv *priv = dev_get_drvdata(qidev);
14834 + struct caam_qi_priv *priv = &qipriv;
14835 const cpumask_t *cpus = qman_affine_cpus();
14836 struct cpumask old_cpumask = current->cpus_allowed;
14837
14838 @@ -528,7 +537,6 @@ int caam_qi_shutdown(struct device *qide
14839 /* Now that we're done with the CGRs, restore the cpus allowed mask */
14840 set_cpus_allowed_ptr(current, &old_cpumask);
14841
14842 - platform_device_unregister(priv->qi_pdev);
14843 return ret;
14844 }
14845
14846 @@ -572,22 +580,28 @@ static enum qman_cb_dqrr_result caam_rsp
14847 struct caam_drv_req *drv_req;
14848 const struct qm_fd *fd;
14849 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
14850 - u32 status;
14851 + struct caam_drv_private *priv = dev_get_drvdata(qidev);
14852
14853 if (caam_qi_napi_schedule(p, caam_napi))
14854 return qman_cb_dqrr_stop;
14855
14856 fd = &dqrr->fd;
14857 - status = be32_to_cpu(fd->status);
14858 - if (unlikely(status))
14859 - dev_err(qidev, "Error: %#x in CAAM response FD\n", status);
14860 + if (unlikely(fd->status)) {
14861 + u32 ssrc = fd->status & JRSTA_SSRC_MASK;
14862 + u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK;
14863
14864 - if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
14865 + if (ssrc != JRSTA_SSRC_CCB_ERROR ||
14866 + err_id != JRSTA_CCBERR_ERRID_ICVCHK)
14867 + dev_err(qidev, "Error: %#x in CAAM response FD\n",
14868 + fd->status);
14869 + }
14870 +
14871 + if (unlikely(fd->format != qm_fd_compound)) {
14872 dev_err(qidev, "Non-compound FD from CAAM\n");
14873 return qman_cb_dqrr_consume;
14874 }
14875
14876 - drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
14877 + drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
14878 if (unlikely(!drv_req)) {
14879 dev_err(qidev,
14880 "Can't find original request for caam response\n");
14881 @@ -597,7 +611,7 @@ static enum qman_cb_dqrr_result caam_rsp
14882 dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
14883 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
14884
14885 - drv_req->cbk(drv_req, status);
14886 + drv_req->cbk(drv_req, fd->status);
14887 return qman_cb_dqrr_consume;
14888 }
14889
14890 @@ -621,17 +635,18 @@ static int alloc_rsp_fq_cpu(struct devic
14891 return -ENODEV;
14892 }
14893
14894 - memset(&opts, 0, sizeof(opts));
14895 - opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
14896 - QM_INITFQ_WE_CONTEXTB |
14897 - QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
14898 - opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
14899 - QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
14900 - qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
14901 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
14902 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
14903 + QM_INITFQ_WE_CGID;
14904 + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
14905 + QM_FQCTRL_CGE;
14906 + opts.fqd.dest.channel = qman_affine_channel(cpu);
14907 + opts.fqd.dest.wq = 3;
14908 opts.fqd.cgid = qipriv.cgr.cgrid;
14909 opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
14910 QM_STASHING_EXCL_DATA;
14911 - qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
14912 + opts.fqd.context_a.stashing.data_cl = 1;
14913 + opts.fqd.context_a.stashing.context_cl = 1;
14914
14915 ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
14916 if (ret) {
14917 @@ -650,9 +665,8 @@ static int init_cgr(struct device *qidev
14918 {
14919 int ret;
14920 struct qm_mcc_initcgr opts;
14921 - const u64 cpus = *(u64 *)qman_affine_cpus();
14922 - const int num_cpus = hweight64(cpus);
14923 - const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
14924 + const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
14925 + MAX_RSP_FQ_BACKLOG_PER_CPU;
14926
14927 ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
14928 if (ret) {
14929 @@ -662,8 +676,7 @@ static int init_cgr(struct device *qidev
14930
14931 qipriv.cgr.cb = cgr_cb;
14932 memset(&opts, 0, sizeof(opts));
14933 - opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
14934 - QM_CGR_WE_MODE);
14935 + opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
14936 opts.cgr.cscn_en = QM_CGR_EN;
14937 opts.cgr.mode = QMAN_CGR_MODE_FRAME;
14938 qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
14939 @@ -708,15 +721,10 @@ static void free_rsp_fqs(void)
14940 int caam_qi_init(struct platform_device *caam_pdev)
14941 {
14942 int err, i;
14943 - struct platform_device *qi_pdev;
14944 struct device *ctrldev = &caam_pdev->dev, *qidev;
14945 struct caam_drv_private *ctrlpriv;
14946 const cpumask_t *cpus = qman_affine_cpus();
14947 struct cpumask old_cpumask = current->cpus_allowed;
14948 - static struct platform_device_info qi_pdev_info = {
14949 - .name = "caam_qi",
14950 - .id = PLATFORM_DEVID_NONE
14951 - };
14952
14953 /*
14954 * QMAN requires CGRs to be removed from same CPU+portal from where it
14955 @@ -728,24 +736,13 @@ int caam_qi_init(struct platform_device
14956 mod_init_cpu = cpumask_first(cpus);
14957 set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
14958
14959 - qi_pdev_info.parent = ctrldev;
14960 - qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
14961 - qi_pdev = platform_device_register_full(&qi_pdev_info);
14962 - if (IS_ERR(qi_pdev))
14963 - return PTR_ERR(qi_pdev);
14964 - set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
14965 -
14966 ctrlpriv = dev_get_drvdata(ctrldev);
14967 - qidev = &qi_pdev->dev;
14968 -
14969 - qipriv.qi_pdev = qi_pdev;
14970 - dev_set_drvdata(qidev, &qipriv);
14971 + qidev = ctrldev;
14972
14973 /* Initialize the congestion detection */
14974 err = init_cgr(qidev);
14975 if (err) {
14976 dev_err(qidev, "CGR initialization failed: %d\n", err);
14977 - platform_device_unregister(qi_pdev);
14978 return err;
14979 }
14980
14981 @@ -754,7 +751,6 @@ int caam_qi_init(struct platform_device
14982 if (err) {
14983 dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
14984 free_rsp_fqs();
14985 - platform_device_unregister(qi_pdev);
14986 return err;
14987 }
14988
14989 @@ -777,15 +773,11 @@ int caam_qi_init(struct platform_device
14990 napi_enable(irqtask);
14991 }
14992
14993 - /* Hook up QI device to parent controlling caam device */
14994 - ctrlpriv->qidev = qidev;
14995 -
14996 qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
14997 SLAB_CACHE_DMA, NULL);
14998 if (!qi_cache) {
14999 dev_err(qidev, "Can't allocate CAAM cache\n");
15000 free_rsp_fqs();
15001 - platform_device_unregister(qi_pdev);
15002 return -ENOMEM;
15003 }
15004
15005 @@ -795,6 +787,8 @@ int caam_qi_init(struct platform_device
15006 debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
15007 &times_congested, &caam_fops_u64_ro);
15008 #endif
15009 +
15010 + ctrlpriv->qi_init = 1;
15011 dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
15012 return 0;
15013 }
15014 --- a/drivers/crypto/caam/qi.h
15015 +++ b/drivers/crypto/caam/qi.h
15016 @@ -9,7 +9,7 @@
15017 #ifndef __QI_H__
15018 #define __QI_H__
15019
15020 -#include <soc/fsl/qman.h>
15021 +#include <linux/fsl_qman.h>
15022 #include "compat.h"
15023 #include "desc.h"
15024 #include "desc_constr.h"
15025 --- a/drivers/crypto/caam/regs.h
15026 +++ b/drivers/crypto/caam/regs.h
15027 @@ -3,6 +3,7 @@
15028 * CAAM hardware register-level view
15029 *
15030 * Copyright 2008-2011 Freescale Semiconductor, Inc.
15031 + * Copyright 2018 NXP
15032 */
15033
15034 #ifndef REGS_H
15035 @@ -211,6 +212,47 @@ struct jr_outentry {
15036 u32 jrstatus; /* Status for completed descriptor */
15037 } __packed;
15038
15039 +/* Version registers (Era 10+) e80-eff */
15040 +struct version_regs {
15041 + u32 crca; /* CRCA_VERSION */
15042 + u32 afha; /* AFHA_VERSION */
15043 + u32 kfha; /* KFHA_VERSION */
15044 + u32 pkha; /* PKHA_VERSION */
15045 + u32 aesa; /* AESA_VERSION */
15046 + u32 mdha; /* MDHA_VERSION */
15047 + u32 desa; /* DESA_VERSION */
15048 + u32 snw8a; /* SNW8A_VERSION */
15049 + u32 snw9a; /* SNW9A_VERSION */
15050 + u32 zuce; /* ZUCE_VERSION */
15051 + u32 zuca; /* ZUCA_VERSION */
15052 + u32 ccha; /* CCHA_VERSION */
15053 + u32 ptha; /* PTHA_VERSION */
15054 + u32 rng; /* RNG_VERSION */
15055 + u32 trng; /* TRNG_VERSION */
15056 + u32 aaha; /* AAHA_VERSION */
15057 + u32 rsvd[10];
15058 + u32 sr; /* SR_VERSION */
15059 + u32 dma; /* DMA_VERSION */
15060 + u32 ai; /* AI_VERSION */
15061 + u32 qi; /* QI_VERSION */
15062 + u32 jr; /* JR_VERSION */
15063 + u32 deco; /* DECO_VERSION */
15064 +};
15065 +
15066 +/* Version registers bitfields */
15067 +
15068 +/* Number of CHAs instantiated */
15069 +#define CHA_VER_NUM_MASK 0xffull
15070 +/* CHA Miscellaneous Information */
15071 +#define CHA_VER_MISC_SHIFT 8
15072 +#define CHA_VER_MISC_MASK (0xffull << CHA_VER_MISC_SHIFT)
15073 +/* CHA Revision Number */
15074 +#define CHA_VER_REV_SHIFT 16
15075 +#define CHA_VER_REV_MASK (0xffull << CHA_VER_REV_SHIFT)
15076 +/* CHA Version ID */
15077 +#define CHA_VER_VID_SHIFT 24
15078 +#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT)
15079 +
15080 /*
15081 * caam_perfmon - Performance Monitor/Secure Memory Status/
15082 * CAAM Global Status/Component Version IDs
15083 @@ -223,15 +265,13 @@ struct jr_outentry {
15084 #define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
15085
15086 /*
15087 - * CHA version IDs / instantiation bitfields
15088 + * CHA version IDs / instantiation bitfields (< Era 10)
15089 * Defined for use with the cha_id fields in perfmon, but the same shift/mask
15090 * selectors can be used to pull out the number of instantiated blocks within
15091 * cha_num fields in perfmon because the locations are the same.
15092 */
15093 #define CHA_ID_LS_AES_SHIFT 0
15094 #define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
15095 -#define CHA_ID_LS_AES_LP (0x3ull << CHA_ID_LS_AES_SHIFT)
15096 -#define CHA_ID_LS_AES_HP (0x4ull << CHA_ID_LS_AES_SHIFT)
15097
15098 #define CHA_ID_LS_DES_SHIFT 4
15099 #define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
15100 @@ -241,9 +281,6 @@ struct jr_outentry {
15101
15102 #define CHA_ID_LS_MD_SHIFT 12
15103 #define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
15104 -#define CHA_ID_LS_MD_LP256 (0x0ull << CHA_ID_LS_MD_SHIFT)
15105 -#define CHA_ID_LS_MD_LP512 (0x1ull << CHA_ID_LS_MD_SHIFT)
15106 -#define CHA_ID_LS_MD_HP (0x2ull << CHA_ID_LS_MD_SHIFT)
15107
15108 #define CHA_ID_LS_RNG_SHIFT 16
15109 #define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
15110 @@ -269,6 +306,13 @@ struct jr_outentry {
15111 #define CHA_ID_MS_JR_SHIFT 28
15112 #define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT)
15113
15114 +/* Specific CHA version IDs */
15115 +#define CHA_VER_VID_AES_LP 0x3ull
15116 +#define CHA_VER_VID_AES_HP 0x4ull
15117 +#define CHA_VER_VID_MD_LP256 0x0ull
15118 +#define CHA_VER_VID_MD_LP512 0x1ull
15119 +#define CHA_VER_VID_MD_HP 0x2ull
15120 +
15121 struct sec_vid {
15122 u16 ip_id;
15123 u8 maj_rev;
15124 @@ -473,8 +517,10 @@ struct caam_ctrl {
15125 struct rng4tst r4tst[2];
15126 };
15127
15128 - u32 rsvd9[448];
15129 + u32 rsvd9[416];
15130
15131 + /* Version registers - introduced with era 10 e80-eff */
15132 + struct version_regs vreg;
15133 /* Performance Monitor f00-fff */
15134 struct caam_perfmon perfmon;
15135 };
15136 @@ -564,8 +610,10 @@ struct caam_job_ring {
15137 u32 rsvd11;
15138 u32 jrcommand; /* JRCRx - JobR command */
15139
15140 - u32 rsvd12[932];
15141 + u32 rsvd12[900];
15142
15143 + /* Version registers - introduced with era 10 e80-eff */
15144 + struct version_regs vreg;
15145 /* Performance Monitor f00-fff */
15146 struct caam_perfmon perfmon;
15147 };
15148 @@ -627,6 +675,8 @@ struct caam_job_ring {
15149 #define JRSTA_DECOERR_INVSIGN 0x86
15150 #define JRSTA_DECOERR_DSASIGN 0x87
15151
15152 +#define JRSTA_QIERR_ERROR_MASK 0x00ff
15153 +
15154 #define JRSTA_CCBERR_JUMP 0x08000000
15155 #define JRSTA_CCBERR_INDEX_MASK 0xff00
15156 #define JRSTA_CCBERR_INDEX_SHIFT 8
15157 @@ -870,13 +920,19 @@ struct caam_deco {
15158 u32 rsvd29[48];
15159 u32 descbuf[64]; /* DxDESB - Descriptor buffer */
15160 u32 rscvd30[193];
15161 -#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
15162 #define DESC_DBG_DECO_STAT_VALID 0x80000000
15163 #define DESC_DBG_DECO_STAT_MASK 0x00F00000
15164 +#define DESC_DBG_DECO_STAT_SHIFT 20
15165 u32 desc_dbg; /* DxDDR - DECO Debug Register */
15166 - u32 rsvd31[126];
15167 + u32 rsvd31[13];
15168 +#define DESC_DER_DECO_STAT_MASK 0x000F0000
15169 +#define DESC_DER_DECO_STAT_SHIFT 16
15170 + u32 dbg_exec; /* DxDER - DECO Debug Exec Register */
15171 + u32 rsvd32[112];
15172 };
15173
15174 +#define DECO_STAT_HOST_ERR 0xD
15175 +
15176 #define DECO_JQCR_WHL 0x20000000
15177 #define DECO_JQCR_FOUR 0x10000000
15178
15179 --- a/drivers/crypto/caam/sg_sw_qm.h
15180 +++ b/drivers/crypto/caam/sg_sw_qm.h
15181 @@ -34,46 +34,61 @@
15182 #ifndef __SG_SW_QM_H
15183 #define __SG_SW_QM_H
15184
15185 -#include <soc/fsl/qman.h>
15186 +#include <linux/fsl_qman.h>
15187 #include "regs.h"
15188
15189 +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
15190 +{
15191 + dma_addr_t addr = qm_sg_ptr->opaque;
15192 +
15193 + qm_sg_ptr->opaque = cpu_to_caam64(addr);
15194 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
15195 +}
15196 +
15197 static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
15198 - u16 offset)
15199 + u32 len, u16 offset)
15200 {
15201 - qm_sg_entry_set64(qm_sg_ptr, dma);
15202 + qm_sg_ptr->addr = dma;
15203 + qm_sg_ptr->length = len;
15204 qm_sg_ptr->__reserved2 = 0;
15205 qm_sg_ptr->bpid = 0;
15206 - qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK);
15207 + qm_sg_ptr->__reserved3 = 0;
15208 + qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
15209 +
15210 + cpu_to_hw_sg(qm_sg_ptr);
15211 }
15212
15213 static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
15214 dma_addr_t dma, u32 len, u16 offset)
15215 {
15216 - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
15217 - qm_sg_entry_set_len(qm_sg_ptr, len);
15218 + qm_sg_ptr->extension = 0;
15219 + qm_sg_ptr->final = 0;
15220 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
15221 }
15222
15223 static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
15224 dma_addr_t dma, u32 len, u16 offset)
15225 {
15226 - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
15227 - qm_sg_entry_set_f(qm_sg_ptr, len);
15228 + qm_sg_ptr->extension = 0;
15229 + qm_sg_ptr->final = 1;
15230 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
15231 }
15232
15233 static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
15234 dma_addr_t dma, u32 len, u16 offset)
15235 {
15236 - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
15237 - qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK));
15238 + qm_sg_ptr->extension = 1;
15239 + qm_sg_ptr->final = 0;
15240 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
15241 }
15242
15243 static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
15244 dma_addr_t dma, u32 len,
15245 u16 offset)
15246 {
15247 - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
15248 - qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN |
15249 - (len & QM_SG_LEN_MASK));
15250 + qm_sg_ptr->extension = 1;
15251 + qm_sg_ptr->final = 1;
15252 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
15253 }
15254
15255 /*
15256 @@ -102,7 +117,10 @@ static inline void sg_to_qm_sg_last(stru
15257 struct qm_sg_entry *qm_sg_ptr, u16 offset)
15258 {
15259 qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
15260 - qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
15261 +
15262 + qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
15263 + qm_sg_ptr->final = 1;
15264 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
15265 }
15266
15267 #endif /* __SG_SW_QM_H */
15268 --- a/drivers/crypto/talitos.c
15269 +++ b/drivers/crypto/talitos.c
15270 @@ -1250,6 +1250,14 @@ static int ipsec_esp(struct talitos_edes
15271 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
15272 sg_count, areq->assoclen, tbl_off, elen);
15273
15274 + /*
15275 + * In case of SEC 2.x+, cipher in len must include only the ciphertext,
15276 + * while extent is used for ICV len.
15277 + */
15278 + if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
15279 + (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
15280 + desc->ptr[4].len = cpu_to_be16(cryptlen);
15281 +
15282 if (ret > 1) {
15283 tbl_off += ret;
15284 sync_needed = true;
15285 --- a/include/crypto/chacha20.h
15286 +++ b/include/crypto/chacha20.h
15287 @@ -13,6 +13,7 @@
15288 #define CHACHA20_IV_SIZE 16
15289 #define CHACHA20_KEY_SIZE 32
15290 #define CHACHA20_BLOCK_SIZE 64
15291 +#define CHACHAPOLY_IV_SIZE 12
15292
15293 struct chacha20_ctx {
15294 u32 key[8];