layerscape: refresh patches
[openwrt/staging/hauke.git] / target / linux / layerscape / patches-4.9 / 804-crypto-support-layerscape.patch
1 From 0a5b97d1f524c1769b4059e3c7123b52755f7121 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 27 Sep 2017 15:02:01 +0800
4 Subject: [PATCH] crypto: support layerscape
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 This is a integrated patch for layerscape sec support.
10
11 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
12 Signed-off-by: Fabio Estevam <festevam@gmail.com>
13 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
14 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
15 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
16 Signed-off-by: Eric Biggers <ebiggers@google.com>
17 Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
18 Signed-off-by: Xulin Sun <xulin.sun@windriver.com>
19 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
20 Signed-off-by: Marcus Folkesson <marcus.folkesson@gmail.com>
21 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
22 Signed-off-by: Andrew Lutomirski <luto@kernel.org>
23 Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
24 Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
25 Signed-off-by: Marcelo Cerri <marcelo.cerri@canonical.com>
26 Signed-off-by: Arvind Yadav <arvind.yadav.cs@gmail.com>
27 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
28 Signed-off-by: Laura Abbott <labbott@redhat.com>
29 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
30 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
31 ---
32 crypto/Kconfig | 30 +
33 crypto/Makefile | 4 +
34 crypto/acompress.c | 169 +
35 crypto/algboss.c | 12 +-
36 crypto/crypto_user.c | 19 +
37 crypto/scompress.c | 356 ++
38 crypto/tcrypt.c | 17 +-
39 crypto/testmgr.c | 1701 ++++----
40 crypto/testmgr.h | 1125 +++---
41 crypto/tls.c | 607 +++
42 drivers/crypto/caam/Kconfig | 72 +-
43 drivers/crypto/caam/Makefile | 15 +-
44 drivers/crypto/caam/caamalg.c | 2125 +++-------
45 drivers/crypto/caam/caamalg_desc.c | 1913 +++++++++
46 drivers/crypto/caam/caamalg_desc.h | 127 +
47 drivers/crypto/caam/caamalg_qi.c | 2877 +++++++++++++
48 drivers/crypto/caam/caamalg_qi2.c | 4428 +++++++++++++++++++++
49 drivers/crypto/caam/caamalg_qi2.h | 265 ++
50 drivers/crypto/caam/caamhash.c | 521 +--
51 drivers/crypto/caam/caampkc.c | 471 ++-
52 drivers/crypto/caam/caampkc.h | 58 +
53 drivers/crypto/caam/caamrng.c | 16 +-
54 drivers/crypto/caam/compat.h | 1 +
55 drivers/crypto/caam/ctrl.c | 356 +-
56 drivers/crypto/caam/ctrl.h | 2 +
57 drivers/crypto/caam/desc.h | 52 +-
58 drivers/crypto/caam/desc_constr.h | 139 +-
59 drivers/crypto/caam/dpseci.c | 859 ++++
60 drivers/crypto/caam/dpseci.h | 395 ++
61 drivers/crypto/caam/dpseci_cmd.h | 261 ++
62 drivers/crypto/caam/error.c | 127 +-
63 drivers/crypto/caam/error.h | 10 +-
64 drivers/crypto/caam/intern.h | 31 +-
65 drivers/crypto/caam/jr.c | 55 +-
66 drivers/crypto/caam/key_gen.c | 32 +-
67 drivers/crypto/caam/key_gen.h | 36 +-
68 drivers/crypto/caam/pdb.h | 62 +
69 drivers/crypto/caam/pkc_desc.c | 36 +
70 drivers/crypto/caam/qi.c | 797 ++++
71 drivers/crypto/caam/qi.h | 204 +
72 drivers/crypto/caam/regs.h | 63 +-
73 drivers/crypto/caam/sg_sw_qm.h | 126 +
74 drivers/crypto/caam/sg_sw_qm2.h | 81 +
75 drivers/crypto/caam/sg_sw_sec4.h | 60 +-
76 drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +-
77 drivers/staging/wilc1000/linux_wlan.c | 2 +-
78 drivers/staging/wilc1000/wilc_wfi_cfgoperations.c | 2 +-
79 include/crypto/acompress.h | 269 ++
80 include/crypto/internal/acompress.h | 81 +
81 include/crypto/internal/scompress.h | 136 +
82 include/linux/crypto.h | 3 +
83 include/uapi/linux/cryptouser.h | 5 +
84 scripts/spelling.txt | 3 +
85 sound/soc/amd/acp-pcm-dma.c | 2 +-
86 54 files changed, 17263 insertions(+), 3955 deletions(-)
87 create mode 100644 crypto/acompress.c
88 create mode 100644 crypto/scompress.c
89 create mode 100644 crypto/tls.c
90 create mode 100644 drivers/crypto/caam/caamalg_desc.c
91 create mode 100644 drivers/crypto/caam/caamalg_desc.h
92 create mode 100644 drivers/crypto/caam/caamalg_qi.c
93 create mode 100644 drivers/crypto/caam/caamalg_qi2.c
94 create mode 100644 drivers/crypto/caam/caamalg_qi2.h
95 create mode 100644 drivers/crypto/caam/dpseci.c
96 create mode 100644 drivers/crypto/caam/dpseci.h
97 create mode 100644 drivers/crypto/caam/dpseci_cmd.h
98 create mode 100644 drivers/crypto/caam/qi.c
99 create mode 100644 drivers/crypto/caam/qi.h
100 create mode 100644 drivers/crypto/caam/sg_sw_qm.h
101 create mode 100644 drivers/crypto/caam/sg_sw_qm2.h
102 create mode 100644 include/crypto/acompress.h
103 create mode 100644 include/crypto/internal/acompress.h
104 create mode 100644 include/crypto/internal/scompress.h
105
106 --- a/crypto/Kconfig
107 +++ b/crypto/Kconfig
108 @@ -102,6 +102,15 @@ config CRYPTO_KPP
109 select CRYPTO_ALGAPI
110 select CRYPTO_KPP2
111
112 +config CRYPTO_ACOMP2
113 + tristate
114 + select CRYPTO_ALGAPI2
115 +
116 +config CRYPTO_ACOMP
117 + tristate
118 + select CRYPTO_ALGAPI
119 + select CRYPTO_ACOMP2
120 +
121 config CRYPTO_RSA
122 tristate "RSA algorithm"
123 select CRYPTO_AKCIPHER
124 @@ -138,6 +147,7 @@ config CRYPTO_MANAGER2
125 select CRYPTO_BLKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
126 select CRYPTO_AKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
127 select CRYPTO_KPP2 if !CRYPTO_MANAGER_DISABLE_TESTS
128 + select CRYPTO_ACOMP2 if !CRYPTO_MANAGER_DISABLE_TESTS
129
130 config CRYPTO_USER
131 tristate "Userspace cryptographic algorithm configuration"
132 @@ -295,6 +305,26 @@ config CRYPTO_ECHAINIV
133 a sequence number xored with a salt. This is the default
134 algorithm for CBC.
135
136 +config CRYPTO_TLS
137 + tristate "TLS support"
138 + select CRYPTO_AEAD
139 + select CRYPTO_BLKCIPHER
140 + select CRYPTO_MANAGER
141 + select CRYPTO_HASH
142 + select CRYPTO_NULL
143 + select CRYPTO_AUTHENC
144 + help
145 + Support for TLS 1.0 record encryption and decryption
146 +
147 + This module adds support for encryption/decryption of TLS 1.0 frames
148 + using blockcipher algorithms. The name of the resulting algorithm is
149 + "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
150 + algorithms are used (e.g. aes-generic, sha1-generic), but hardware
151 + accelerated versions will be used automatically if available.
152 +
153 + User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
154 + operations through AF_ALG or cryptodev interfaces
155 +
156 comment "Block modes"
157
158 config CRYPTO_CBC
159 --- a/crypto/Makefile
160 +++ b/crypto/Makefile
161 @@ -51,6 +51,9 @@ rsa_generic-y += rsa_helper.o
162 rsa_generic-y += rsa-pkcs1pad.o
163 obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
164
165 +obj-$(CONFIG_CRYPTO_ACOMP2) += acompress.o
166 +obj-$(CONFIG_CRYPTO_ACOMP2) += scompress.o
167 +
168 cryptomgr-y := algboss.o testmgr.o
169
170 obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
171 @@ -115,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge
172 obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
173 obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
174 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
175 +obj-$(CONFIG_CRYPTO_TLS) += tls.o
176 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
177 obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
178 obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
179 --- /dev/null
180 +++ b/crypto/acompress.c
181 @@ -0,0 +1,169 @@
182 +/*
183 + * Asynchronous Compression operations
184 + *
185 + * Copyright (c) 2016, Intel Corporation
186 + * Authors: Weigang Li <weigang.li@intel.com>
187 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
188 + *
189 + * This program is free software; you can redistribute it and/or modify it
190 + * under the terms of the GNU General Public License as published by the Free
191 + * Software Foundation; either version 2 of the License, or (at your option)
192 + * any later version.
193 + *
194 + */
195 +#include <linux/errno.h>
196 +#include <linux/kernel.h>
197 +#include <linux/module.h>
198 +#include <linux/seq_file.h>
199 +#include <linux/slab.h>
200 +#include <linux/string.h>
201 +#include <linux/crypto.h>
202 +#include <crypto/algapi.h>
203 +#include <linux/cryptouser.h>
204 +#include <net/netlink.h>
205 +#include <crypto/internal/acompress.h>
206 +#include <crypto/internal/scompress.h>
207 +#include "internal.h"
208 +
209 +static const struct crypto_type crypto_acomp_type;
210 +
211 +#ifdef CONFIG_NET
212 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
213 +{
214 + struct crypto_report_acomp racomp;
215 +
216 + strncpy(racomp.type, "acomp", sizeof(racomp.type));
217 +
218 + if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
219 + sizeof(struct crypto_report_acomp), &racomp))
220 + goto nla_put_failure;
221 + return 0;
222 +
223 +nla_put_failure:
224 + return -EMSGSIZE;
225 +}
226 +#else
227 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
228 +{
229 + return -ENOSYS;
230 +}
231 +#endif
232 +
233 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
234 + __attribute__ ((unused));
235 +
236 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
237 +{
238 + seq_puts(m, "type : acomp\n");
239 +}
240 +
241 +static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
242 +{
243 + struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
244 + struct acomp_alg *alg = crypto_acomp_alg(acomp);
245 +
246 + alg->exit(acomp);
247 +}
248 +
249 +static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
250 +{
251 + struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
252 + struct acomp_alg *alg = crypto_acomp_alg(acomp);
253 +
254 + if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
255 + return crypto_init_scomp_ops_async(tfm);
256 +
257 + acomp->compress = alg->compress;
258 + acomp->decompress = alg->decompress;
259 + acomp->dst_free = alg->dst_free;
260 + acomp->reqsize = alg->reqsize;
261 +
262 + if (alg->exit)
263 + acomp->base.exit = crypto_acomp_exit_tfm;
264 +
265 + if (alg->init)
266 + return alg->init(acomp);
267 +
268 + return 0;
269 +}
270 +
271 +static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
272 +{
273 + int extsize = crypto_alg_extsize(alg);
274 +
275 + if (alg->cra_type != &crypto_acomp_type)
276 + extsize += sizeof(struct crypto_scomp *);
277 +
278 + return extsize;
279 +}
280 +
281 +static const struct crypto_type crypto_acomp_type = {
282 + .extsize = crypto_acomp_extsize,
283 + .init_tfm = crypto_acomp_init_tfm,
284 +#ifdef CONFIG_PROC_FS
285 + .show = crypto_acomp_show,
286 +#endif
287 + .report = crypto_acomp_report,
288 + .maskclear = ~CRYPTO_ALG_TYPE_MASK,
289 + .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
290 + .type = CRYPTO_ALG_TYPE_ACOMPRESS,
291 + .tfmsize = offsetof(struct crypto_acomp, base),
292 +};
293 +
294 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
295 + u32 mask)
296 +{
297 + return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
298 +}
299 +EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
300 +
301 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
302 +{
303 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
304 + struct acomp_req *req;
305 +
306 + req = __acomp_request_alloc(acomp);
307 + if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
308 + return crypto_acomp_scomp_alloc_ctx(req);
309 +
310 + return req;
311 +}
312 +EXPORT_SYMBOL_GPL(acomp_request_alloc);
313 +
314 +void acomp_request_free(struct acomp_req *req)
315 +{
316 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
317 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
318 +
319 + if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
320 + crypto_acomp_scomp_free_ctx(req);
321 +
322 + if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
323 + acomp->dst_free(req->dst);
324 + req->dst = NULL;
325 + }
326 +
327 + __acomp_request_free(req);
328 +}
329 +EXPORT_SYMBOL_GPL(acomp_request_free);
330 +
331 +int crypto_register_acomp(struct acomp_alg *alg)
332 +{
333 + struct crypto_alg *base = &alg->base;
334 +
335 + base->cra_type = &crypto_acomp_type;
336 + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
337 + base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
338 +
339 + return crypto_register_alg(base);
340 +}
341 +EXPORT_SYMBOL_GPL(crypto_register_acomp);
342 +
343 +int crypto_unregister_acomp(struct acomp_alg *alg)
344 +{
345 + return crypto_unregister_alg(&alg->base);
346 +}
347 +EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
348 +
349 +MODULE_LICENSE("GPL");
350 +MODULE_DESCRIPTION("Asynchronous compression type");
351 --- a/crypto/algboss.c
352 +++ b/crypto/algboss.c
353 @@ -247,17 +247,9 @@ static int cryptomgr_schedule_test(struc
354 memcpy(param->alg, alg->cra_name, sizeof(param->alg));
355 type = alg->cra_flags;
356
357 - /* This piece of crap needs to disappear into per-type test hooks. */
358 -#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
359 - type |= CRYPTO_ALG_TESTED;
360 -#else
361 - if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
362 - CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
363 - ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
364 - CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
365 - alg->cra_ablkcipher.ivsize))
366 + /* Do not test internal algorithms. */
367 + if (type & CRYPTO_ALG_INTERNAL)
368 type |= CRYPTO_ALG_TESTED;
369 -#endif
370
371 param->type = type;
372
373 --- a/crypto/crypto_user.c
374 +++ b/crypto/crypto_user.c
375 @@ -112,6 +112,21 @@ nla_put_failure:
376 return -EMSGSIZE;
377 }
378
379 +static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
380 +{
381 + struct crypto_report_acomp racomp;
382 +
383 + strncpy(racomp.type, "acomp", sizeof(racomp.type));
384 +
385 + if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
386 + sizeof(struct crypto_report_acomp), &racomp))
387 + goto nla_put_failure;
388 + return 0;
389 +
390 +nla_put_failure:
391 + return -EMSGSIZE;
392 +}
393 +
394 static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
395 {
396 struct crypto_report_akcipher rakcipher;
397 @@ -186,7 +201,11 @@ static int crypto_report_one(struct cryp
398 goto nla_put_failure;
399
400 break;
401 + case CRYPTO_ALG_TYPE_ACOMPRESS:
402 + if (crypto_report_acomp(skb, alg))
403 + goto nla_put_failure;
404
405 + break;
406 case CRYPTO_ALG_TYPE_AKCIPHER:
407 if (crypto_report_akcipher(skb, alg))
408 goto nla_put_failure;
409 --- /dev/null
410 +++ b/crypto/scompress.c
411 @@ -0,0 +1,356 @@
412 +/*
413 + * Synchronous Compression operations
414 + *
415 + * Copyright 2015 LG Electronics Inc.
416 + * Copyright (c) 2016, Intel Corporation
417 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
418 + *
419 + * This program is free software; you can redistribute it and/or modify it
420 + * under the terms of the GNU General Public License as published by the Free
421 + * Software Foundation; either version 2 of the License, or (at your option)
422 + * any later version.
423 + *
424 + */
425 +#include <linux/errno.h>
426 +#include <linux/kernel.h>
427 +#include <linux/module.h>
428 +#include <linux/seq_file.h>
429 +#include <linux/slab.h>
430 +#include <linux/string.h>
431 +#include <linux/crypto.h>
432 +#include <linux/vmalloc.h>
433 +#include <crypto/algapi.h>
434 +#include <linux/cryptouser.h>
435 +#include <net/netlink.h>
436 +#include <linux/scatterlist.h>
437 +#include <crypto/scatterwalk.h>
438 +#include <crypto/internal/acompress.h>
439 +#include <crypto/internal/scompress.h>
440 +#include "internal.h"
441 +
442 +static const struct crypto_type crypto_scomp_type;
443 +static void * __percpu *scomp_src_scratches;
444 +static void * __percpu *scomp_dst_scratches;
445 +static int scomp_scratch_users;
446 +static DEFINE_MUTEX(scomp_lock);
447 +
448 +#ifdef CONFIG_NET
449 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
450 +{
451 + struct crypto_report_comp rscomp;
452 +
453 + strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
454 +
455 + if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
456 + sizeof(struct crypto_report_comp), &rscomp))
457 + goto nla_put_failure;
458 + return 0;
459 +
460 +nla_put_failure:
461 + return -EMSGSIZE;
462 +}
463 +#else
464 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
465 +{
466 + return -ENOSYS;
467 +}
468 +#endif
469 +
470 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
471 + __attribute__ ((unused));
472 +
473 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
474 +{
475 + seq_puts(m, "type : scomp\n");
476 +}
477 +
478 +static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
479 +{
480 + return 0;
481 +}
482 +
483 +static void crypto_scomp_free_scratches(void * __percpu *scratches)
484 +{
485 + int i;
486 +
487 + if (!scratches)
488 + return;
489 +
490 + for_each_possible_cpu(i)
491 + vfree(*per_cpu_ptr(scratches, i));
492 +
493 + free_percpu(scratches);
494 +}
495 +
496 +static void * __percpu *crypto_scomp_alloc_scratches(void)
497 +{
498 + void * __percpu *scratches;
499 + int i;
500 +
501 + scratches = alloc_percpu(void *);
502 + if (!scratches)
503 + return NULL;
504 +
505 + for_each_possible_cpu(i) {
506 + void *scratch;
507 +
508 + scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
509 + if (!scratch)
510 + goto error;
511 + *per_cpu_ptr(scratches, i) = scratch;
512 + }
513 +
514 + return scratches;
515 +
516 +error:
517 + crypto_scomp_free_scratches(scratches);
518 + return NULL;
519 +}
520 +
521 +static void crypto_scomp_free_all_scratches(void)
522 +{
523 + if (!--scomp_scratch_users) {
524 + crypto_scomp_free_scratches(scomp_src_scratches);
525 + crypto_scomp_free_scratches(scomp_dst_scratches);
526 + scomp_src_scratches = NULL;
527 + scomp_dst_scratches = NULL;
528 + }
529 +}
530 +
531 +static int crypto_scomp_alloc_all_scratches(void)
532 +{
533 + if (!scomp_scratch_users++) {
534 + scomp_src_scratches = crypto_scomp_alloc_scratches();
535 + if (!scomp_src_scratches)
536 + return -ENOMEM;
537 + scomp_dst_scratches = crypto_scomp_alloc_scratches();
538 + if (!scomp_dst_scratches)
539 + return -ENOMEM;
540 + }
541 + return 0;
542 +}
543 +
544 +static void crypto_scomp_sg_free(struct scatterlist *sgl)
545 +{
546 + int i, n;
547 + struct page *page;
548 +
549 + if (!sgl)
550 + return;
551 +
552 + n = sg_nents(sgl);
553 + for_each_sg(sgl, sgl, n, i) {
554 + page = sg_page(sgl);
555 + if (page)
556 + __free_page(page);
557 + }
558 +
559 + kfree(sgl);
560 +}
561 +
562 +static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
563 +{
564 + struct scatterlist *sgl;
565 + struct page *page;
566 + int i, n;
567 +
568 + n = ((size - 1) >> PAGE_SHIFT) + 1;
569 +
570 + sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
571 + if (!sgl)
572 + return NULL;
573 +
574 + sg_init_table(sgl, n);
575 +
576 + for (i = 0; i < n; i++) {
577 + page = alloc_page(gfp);
578 + if (!page)
579 + goto err;
580 + sg_set_page(sgl + i, page, PAGE_SIZE, 0);
581 + }
582 +
583 + return sgl;
584 +
585 +err:
586 + sg_mark_end(sgl + i);
587 + crypto_scomp_sg_free(sgl);
588 + return NULL;
589 +}
590 +
591 +static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
592 +{
593 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
594 + void **tfm_ctx = acomp_tfm_ctx(tfm);
595 + struct crypto_scomp *scomp = *tfm_ctx;
596 + void **ctx = acomp_request_ctx(req);
597 + const int cpu = get_cpu();
598 + u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
599 + u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
600 + int ret;
601 +
602 + if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
603 + ret = -EINVAL;
604 + goto out;
605 + }
606 +
607 + if (req->dst && !req->dlen) {
608 + ret = -EINVAL;
609 + goto out;
610 + }
611 +
612 + if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
613 + req->dlen = SCOMP_SCRATCH_SIZE;
614 +
615 + scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
616 + if (dir)
617 + ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
618 + scratch_dst, &req->dlen, *ctx);
619 + else
620 + ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
621 + scratch_dst, &req->dlen, *ctx);
622 + if (!ret) {
623 + if (!req->dst) {
624 + req->dst = crypto_scomp_sg_alloc(req->dlen,
625 + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
626 + GFP_KERNEL : GFP_ATOMIC);
627 + if (!req->dst)
628 + goto out;
629 + }
630 + scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
631 + 1);
632 + }
633 +out:
634 + put_cpu();
635 + return ret;
636 +}
637 +
638 +static int scomp_acomp_compress(struct acomp_req *req)
639 +{
640 + return scomp_acomp_comp_decomp(req, 1);
641 +}
642 +
643 +static int scomp_acomp_decompress(struct acomp_req *req)
644 +{
645 + return scomp_acomp_comp_decomp(req, 0);
646 +}
647 +
648 +static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
649 +{
650 + struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
651 +
652 + crypto_free_scomp(*ctx);
653 +}
654 +
655 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
656 +{
657 + struct crypto_alg *calg = tfm->__crt_alg;
658 + struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
659 + struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
660 + struct crypto_scomp *scomp;
661 +
662 + if (!crypto_mod_get(calg))
663 + return -EAGAIN;
664 +
665 + scomp = crypto_create_tfm(calg, &crypto_scomp_type);
666 + if (IS_ERR(scomp)) {
667 + crypto_mod_put(calg);
668 + return PTR_ERR(scomp);
669 + }
670 +
671 + *ctx = scomp;
672 + tfm->exit = crypto_exit_scomp_ops_async;
673 +
674 + crt->compress = scomp_acomp_compress;
675 + crt->decompress = scomp_acomp_decompress;
676 + crt->dst_free = crypto_scomp_sg_free;
677 + crt->reqsize = sizeof(void *);
678 +
679 + return 0;
680 +}
681 +
682 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
683 +{
684 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
685 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
686 + struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
687 + struct crypto_scomp *scomp = *tfm_ctx;
688 + void *ctx;
689 +
690 + ctx = crypto_scomp_alloc_ctx(scomp);
691 + if (IS_ERR(ctx)) {
692 + kfree(req);
693 + return NULL;
694 + }
695 +
696 + *req->__ctx = ctx;
697 +
698 + return req;
699 +}
700 +
701 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
702 +{
703 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
704 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
705 + struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
706 + struct crypto_scomp *scomp = *tfm_ctx;
707 + void *ctx = *req->__ctx;
708 +
709 + if (ctx)
710 + crypto_scomp_free_ctx(scomp, ctx);
711 +}
712 +
713 +static const struct crypto_type crypto_scomp_type = {
714 + .extsize = crypto_alg_extsize,
715 + .init_tfm = crypto_scomp_init_tfm,
716 +#ifdef CONFIG_PROC_FS
717 + .show = crypto_scomp_show,
718 +#endif
719 + .report = crypto_scomp_report,
720 + .maskclear = ~CRYPTO_ALG_TYPE_MASK,
721 + .maskset = CRYPTO_ALG_TYPE_MASK,
722 + .type = CRYPTO_ALG_TYPE_SCOMPRESS,
723 + .tfmsize = offsetof(struct crypto_scomp, base),
724 +};
725 +
726 +int crypto_register_scomp(struct scomp_alg *alg)
727 +{
728 + struct crypto_alg *base = &alg->base;
729 + int ret = -ENOMEM;
730 +
731 + mutex_lock(&scomp_lock);
732 + if (crypto_scomp_alloc_all_scratches())
733 + goto error;
734 +
735 + base->cra_type = &crypto_scomp_type;
736 + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
737 + base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
738 +
739 + ret = crypto_register_alg(base);
740 + if (ret)
741 + goto error;
742 +
743 + mutex_unlock(&scomp_lock);
744 + return ret;
745 +
746 +error:
747 + crypto_scomp_free_all_scratches();
748 + mutex_unlock(&scomp_lock);
749 + return ret;
750 +}
751 +EXPORT_SYMBOL_GPL(crypto_register_scomp);
752 +
753 +int crypto_unregister_scomp(struct scomp_alg *alg)
754 +{
755 + int ret;
756 +
757 + mutex_lock(&scomp_lock);
758 + ret = crypto_unregister_alg(&alg->base);
759 + crypto_scomp_free_all_scratches();
760 + mutex_unlock(&scomp_lock);
761 +
762 + return ret;
763 +}
764 +EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
765 +
766 +MODULE_LICENSE("GPL");
767 +MODULE_DESCRIPTION("Synchronous compression type");
768 --- a/crypto/tcrypt.c
769 +++ b/crypto/tcrypt.c
770 @@ -74,7 +74,7 @@ static char *check[] = {
771 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
772 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
773 "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
774 - NULL
775 + "rsa", NULL
776 };
777
778 struct tcrypt_result {
779 @@ -1329,6 +1329,10 @@ static int do_test(const char *alg, u32
780 ret += tcrypt_test("hmac(sha3-512)");
781 break;
782
783 + case 115:
784 + ret += tcrypt_test("rsa");
785 + break;
786 +
787 case 150:
788 ret += tcrypt_test("ansi_cprng");
789 break;
790 @@ -1390,6 +1394,9 @@ static int do_test(const char *alg, u32
791 case 190:
792 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
793 break;
794 + case 191:
795 + ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
796 + break;
797 case 200:
798 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
799 speed_template_16_24_32);
800 @@ -1404,9 +1411,9 @@ static int do_test(const char *alg, u32
801 test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
802 speed_template_32_40_48);
803 test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
804 - speed_template_32_48_64);
805 + speed_template_32_64);
806 test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
807 - speed_template_32_48_64);
808 + speed_template_32_64);
809 test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
810 speed_template_16_24_32);
811 test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
812 @@ -1837,9 +1844,9 @@ static int do_test(const char *alg, u32
813 test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
814 speed_template_32_40_48);
815 test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
816 - speed_template_32_48_64);
817 + speed_template_32_64);
818 test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
819 - speed_template_32_48_64);
820 + speed_template_32_64);
821 test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
822 speed_template_16_24_32);
823 test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
824 --- a/crypto/testmgr.c
825 +++ b/crypto/testmgr.c
826 @@ -33,6 +33,7 @@
827 #include <crypto/drbg.h>
828 #include <crypto/akcipher.h>
829 #include <crypto/kpp.h>
830 +#include <crypto/acompress.h>
831
832 #include "internal.h"
833
834 @@ -62,7 +63,7 @@ int alg_test(const char *driver, const c
835 */
836 #define IDX1 32
837 #define IDX2 32400
838 -#define IDX3 1
839 +#define IDX3 1511
840 #define IDX4 8193
841 #define IDX5 22222
842 #define IDX6 17101
843 @@ -82,47 +83,54 @@ struct tcrypt_result {
844
845 struct aead_test_suite {
846 struct {
847 - struct aead_testvec *vecs;
848 + const struct aead_testvec *vecs;
849 unsigned int count;
850 } enc, dec;
851 };
852
853 struct cipher_test_suite {
854 struct {
855 - struct cipher_testvec *vecs;
856 + const struct cipher_testvec *vecs;
857 unsigned int count;
858 } enc, dec;
859 };
860
861 struct comp_test_suite {
862 struct {
863 - struct comp_testvec *vecs;
864 + const struct comp_testvec *vecs;
865 unsigned int count;
866 } comp, decomp;
867 };
868
869 struct hash_test_suite {
870 - struct hash_testvec *vecs;
871 + const struct hash_testvec *vecs;
872 unsigned int count;
873 };
874
875 struct cprng_test_suite {
876 - struct cprng_testvec *vecs;
877 + const struct cprng_testvec *vecs;
878 unsigned int count;
879 };
880
881 struct drbg_test_suite {
882 - struct drbg_testvec *vecs;
883 + const struct drbg_testvec *vecs;
884 unsigned int count;
885 };
886
887 +struct tls_test_suite {
888 + struct {
889 + struct tls_testvec *vecs;
890 + unsigned int count;
891 + } enc, dec;
892 +};
893 +
894 struct akcipher_test_suite {
895 - struct akcipher_testvec *vecs;
896 + const struct akcipher_testvec *vecs;
897 unsigned int count;
898 };
899
900 struct kpp_test_suite {
901 - struct kpp_testvec *vecs;
902 + const struct kpp_testvec *vecs;
903 unsigned int count;
904 };
905
906 @@ -139,12 +147,14 @@ struct alg_test_desc {
907 struct hash_test_suite hash;
908 struct cprng_test_suite cprng;
909 struct drbg_test_suite drbg;
910 + struct tls_test_suite tls;
911 struct akcipher_test_suite akcipher;
912 struct kpp_test_suite kpp;
913 } suite;
914 };
915
916 -static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
917 +static const unsigned int IDX[8] = {
918 + IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
919
920 static void hexdump(unsigned char *buf, unsigned int len)
921 {
922 @@ -202,7 +212,7 @@ static int wait_async_op(struct tcrypt_r
923 }
924
925 static int ahash_partial_update(struct ahash_request **preq,
926 - struct crypto_ahash *tfm, struct hash_testvec *template,
927 + struct crypto_ahash *tfm, const struct hash_testvec *template,
928 void *hash_buff, int k, int temp, struct scatterlist *sg,
929 const char *algo, char *result, struct tcrypt_result *tresult)
930 {
931 @@ -259,11 +269,12 @@ out_nostate:
932 return ret;
933 }
934
935 -static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
936 - unsigned int tcount, bool use_digest,
937 - const int align_offset)
938 +static int __test_hash(struct crypto_ahash *tfm,
939 + const struct hash_testvec *template, unsigned int tcount,
940 + bool use_digest, const int align_offset)
941 {
942 const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
943 + size_t digest_size = crypto_ahash_digestsize(tfm);
944 unsigned int i, j, k, temp;
945 struct scatterlist sg[8];
946 char *result;
947 @@ -274,7 +285,7 @@ static int __test_hash(struct crypto_aha
948 char *xbuf[XBUFSIZE];
949 int ret = -ENOMEM;
950
951 - result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
952 + result = kmalloc(digest_size, GFP_KERNEL);
953 if (!result)
954 return ret;
955 key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
956 @@ -304,7 +315,7 @@ static int __test_hash(struct crypto_aha
957 goto out;
958
959 j++;
960 - memset(result, 0, MAX_DIGEST_SIZE);
961 + memset(result, 0, digest_size);
962
963 hash_buff = xbuf[0];
964 hash_buff += align_offset;
965 @@ -379,7 +390,7 @@ static int __test_hash(struct crypto_aha
966 continue;
967
968 j++;
969 - memset(result, 0, MAX_DIGEST_SIZE);
970 + memset(result, 0, digest_size);
971
972 temp = 0;
973 sg_init_table(sg, template[i].np);
974 @@ -457,7 +468,7 @@ static int __test_hash(struct crypto_aha
975 continue;
976
977 j++;
978 - memset(result, 0, MAX_DIGEST_SIZE);
979 + memset(result, 0, digest_size);
980
981 ret = -EINVAL;
982 hash_buff = xbuf[0];
983 @@ -536,7 +547,8 @@ out_nobuf:
984 return ret;
985 }
986
987 -static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
988 +static int test_hash(struct crypto_ahash *tfm,
989 + const struct hash_testvec *template,
990 unsigned int tcount, bool use_digest)
991 {
992 unsigned int alignmask;
993 @@ -564,7 +576,7 @@ static int test_hash(struct crypto_ahash
994 }
995
996 static int __test_aead(struct crypto_aead *tfm, int enc,
997 - struct aead_testvec *template, unsigned int tcount,
998 + const struct aead_testvec *template, unsigned int tcount,
999 const bool diff_dst, const int align_offset)
1000 {
1001 const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1002 @@ -955,7 +967,7 @@ out_noxbuf:
1003 }
1004
1005 static int test_aead(struct crypto_aead *tfm, int enc,
1006 - struct aead_testvec *template, unsigned int tcount)
1007 + const struct aead_testvec *template, unsigned int tcount)
1008 {
1009 unsigned int alignmask;
1010 int ret;
1011 @@ -987,8 +999,236 @@ static int test_aead(struct crypto_aead
1012 return 0;
1013 }
1014
1015 +static int __test_tls(struct crypto_aead *tfm, int enc,
1016 + struct tls_testvec *template, unsigned int tcount,
1017 + const bool diff_dst)
1018 +{
1019 + const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1020 + unsigned int i, k, authsize;
1021 + char *q;
1022 + struct aead_request *req;
1023 + struct scatterlist *sg;
1024 + struct scatterlist *sgout;
1025 + const char *e, *d;
1026 + struct tcrypt_result result;
1027 + void *input;
1028 + void *output;
1029 + void *assoc;
1030 + char *iv;
1031 + char *key;
1032 + char *xbuf[XBUFSIZE];
1033 + char *xoutbuf[XBUFSIZE];
1034 + char *axbuf[XBUFSIZE];
1035 + int ret = -ENOMEM;
1036 +
1037 + if (testmgr_alloc_buf(xbuf))
1038 + goto out_noxbuf;
1039 +
1040 + if (diff_dst && testmgr_alloc_buf(xoutbuf))
1041 + goto out_nooutbuf;
1042 +
1043 + if (testmgr_alloc_buf(axbuf))
1044 + goto out_noaxbuf;
1045 +
1046 + iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
1047 + if (!iv)
1048 + goto out_noiv;
1049 +
1050 + key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
1051 + if (!key)
1052 + goto out_nokey;
1053 +
1054 + sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
1055 + if (!sg)
1056 + goto out_nosg;
1057 +
1058 + sgout = sg + 8;
1059 +
1060 + d = diff_dst ? "-ddst" : "";
1061 + e = enc ? "encryption" : "decryption";
1062 +
1063 + init_completion(&result.completion);
1064 +
1065 + req = aead_request_alloc(tfm, GFP_KERNEL);
1066 + if (!req) {
1067 + pr_err("alg: tls%s: Failed to allocate request for %s\n",
1068 + d, algo);
1069 + goto out;
1070 + }
1071 +
1072 + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1073 + tcrypt_complete, &result);
1074 +
1075 + for (i = 0; i < tcount; i++) {
1076 + input = xbuf[0];
1077 + assoc = axbuf[0];
1078 +
1079 + ret = -EINVAL;
1080 + if (WARN_ON(template[i].ilen > PAGE_SIZE ||
1081 + template[i].alen > PAGE_SIZE))
1082 + goto out;
1083 +
1084 + memcpy(assoc, template[i].assoc, template[i].alen);
1085 + memcpy(input, template[i].input, template[i].ilen);
1086 +
1087 + if (template[i].iv)
1088 + memcpy(iv, template[i].iv, MAX_IVLEN);
1089 + else
1090 + memset(iv, 0, MAX_IVLEN);
1091 +
1092 + crypto_aead_clear_flags(tfm, ~0);
1093 +
1094 + if (template[i].klen > MAX_KEYLEN) {
1095 + pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
1096 + d, i, algo, template[i].klen, MAX_KEYLEN);
1097 + ret = -EINVAL;
1098 + goto out;
1099 + }
1100 + memcpy(key, template[i].key, template[i].klen);
1101 +
1102 + ret = crypto_aead_setkey(tfm, key, template[i].klen);
1103 + if (!ret == template[i].fail) {
1104 + pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
1105 + d, i, algo, crypto_aead_get_flags(tfm));
1106 + goto out;
1107 + } else if (ret)
1108 + continue;
1109 +
1110 + authsize = 20;
1111 + ret = crypto_aead_setauthsize(tfm, authsize);
1112 + if (ret) {
1113 + pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
1114 + d, authsize, i, algo);
1115 + goto out;
1116 + }
1117 +
1118 + k = !!template[i].alen;
1119 + sg_init_table(sg, k + 1);
1120 + sg_set_buf(&sg[0], assoc, template[i].alen);
1121 + sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
1122 + template[i].ilen));
1123 + output = input;
1124 +
1125 + if (diff_dst) {
1126 + sg_init_table(sgout, k + 1);
1127 + sg_set_buf(&sgout[0], assoc, template[i].alen);
1128 +
1129 + output = xoutbuf[0];
1130 + sg_set_buf(&sgout[k], output,
1131 + (enc ? template[i].rlen : template[i].ilen));
1132 + }
1133 +
1134 + aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
1135 + template[i].ilen, iv);
1136 +
1137 + aead_request_set_ad(req, template[i].alen);
1138 +
1139 + ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
1140 +
1141 + switch (ret) {
1142 + case 0:
1143 + if (template[i].novrfy) {
1144 + /* verification was supposed to fail */
1145 + pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
1146 + d, e, i, algo);
1147 + /* so really, we got a bad message */
1148 + ret = -EBADMSG;
1149 + goto out;
1150 + }
1151 + break;
1152 + case -EINPROGRESS:
1153 + case -EBUSY:
1154 + wait_for_completion(&result.completion);
1155 + reinit_completion(&result.completion);
1156 + ret = result.err;
1157 + if (!ret)
1158 + break;
1159 + case -EBADMSG:
1160 + /* verification failure was expected */
1161 + if (template[i].novrfy)
1162 + continue;
1163 + /* fall through */
1164 + default:
1165 + pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
1166 + d, e, i, algo, -ret);
1167 + goto out;
1168 + }
1169 +
1170 + q = output;
1171 + if (memcmp(q, template[i].result, template[i].rlen)) {
1172 + pr_err("alg: tls%s: Test %d failed on %s for %s\n",
1173 + d, i, e, algo);
1174 + hexdump(q, template[i].rlen);
1175 + pr_err("should be:\n");
1176 + hexdump(template[i].result, template[i].rlen);
1177 + ret = -EINVAL;
1178 + goto out;
1179 + }
1180 + }
1181 +
1182 +out:
1183 + aead_request_free(req);
1184 +
1185 + kfree(sg);
1186 +out_nosg:
1187 + kfree(key);
1188 +out_nokey:
1189 + kfree(iv);
1190 +out_noiv:
1191 + testmgr_free_buf(axbuf);
1192 +out_noaxbuf:
1193 + if (diff_dst)
1194 + testmgr_free_buf(xoutbuf);
1195 +out_nooutbuf:
1196 + testmgr_free_buf(xbuf);
1197 +out_noxbuf:
1198 + return ret;
1199 +}
1200 +
1201 +static int test_tls(struct crypto_aead *tfm, int enc,
1202 + struct tls_testvec *template, unsigned int tcount)
1203 +{
1204 + int ret;
1205 + /* test 'dst == src' case */
1206 + ret = __test_tls(tfm, enc, template, tcount, false);
1207 + if (ret)
1208 + return ret;
1209 + /* test 'dst != src' case */
1210 + return __test_tls(tfm, enc, template, tcount, true);
1211 +}
1212 +
1213 +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
1214 + u32 type, u32 mask)
1215 +{
1216 + struct crypto_aead *tfm;
1217 + int err = 0;
1218 +
1219 + tfm = crypto_alloc_aead(driver, type, mask);
1220 + if (IS_ERR(tfm)) {
1221 + pr_err("alg: aead: Failed to load transform for %s: %ld\n",
1222 + driver, PTR_ERR(tfm));
1223 + return PTR_ERR(tfm);
1224 + }
1225 +
1226 + if (desc->suite.tls.enc.vecs) {
1227 + err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
1228 + desc->suite.tls.enc.count);
1229 + if (err)
1230 + goto out;
1231 + }
1232 +
1233 + if (!err && desc->suite.tls.dec.vecs)
1234 + err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
1235 + desc->suite.tls.dec.count);
1236 +
1237 +out:
1238 + crypto_free_aead(tfm);
1239 + return err;
1240 +}
1241 +
1242 static int test_cipher(struct crypto_cipher *tfm, int enc,
1243 - struct cipher_testvec *template, unsigned int tcount)
1244 + const struct cipher_testvec *template,
1245 + unsigned int tcount)
1246 {
1247 const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
1248 unsigned int i, j, k;
1249 @@ -1066,7 +1306,8 @@ out_nobuf:
1250 }
1251
1252 static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
1253 - struct cipher_testvec *template, unsigned int tcount,
1254 + const struct cipher_testvec *template,
1255 + unsigned int tcount,
1256 const bool diff_dst, const int align_offset)
1257 {
1258 const char *algo =
1259 @@ -1330,7 +1571,8 @@ out_nobuf:
1260 }
1261
1262 static int test_skcipher(struct crypto_skcipher *tfm, int enc,
1263 - struct cipher_testvec *template, unsigned int tcount)
1264 + const struct cipher_testvec *template,
1265 + unsigned int tcount)
1266 {
1267 unsigned int alignmask;
1268 int ret;
1269 @@ -1362,8 +1604,10 @@ static int test_skcipher(struct crypto_s
1270 return 0;
1271 }
1272
1273 -static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
1274 - struct comp_testvec *dtemplate, int ctcount, int dtcount)
1275 +static int test_comp(struct crypto_comp *tfm,
1276 + const struct comp_testvec *ctemplate,
1277 + const struct comp_testvec *dtemplate,
1278 + int ctcount, int dtcount)
1279 {
1280 const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
1281 unsigned int i;
1282 @@ -1442,7 +1686,154 @@ out:
1283 return ret;
1284 }
1285
1286 -static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
1287 +static int test_acomp(struct crypto_acomp *tfm,
1288 + const struct comp_testvec *ctemplate,
1289 + const struct comp_testvec *dtemplate,
1290 + int ctcount, int dtcount)
1291 +{
1292 + const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
1293 + unsigned int i;
1294 + char *output;
1295 + int ret;
1296 + struct scatterlist src, dst;
1297 + struct acomp_req *req;
1298 + struct tcrypt_result result;
1299 +
1300 + output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
1301 + if (!output)
1302 + return -ENOMEM;
1303 +
1304 + for (i = 0; i < ctcount; i++) {
1305 + unsigned int dlen = COMP_BUF_SIZE;
1306 + int ilen = ctemplate[i].inlen;
1307 + void *input_vec;
1308 +
1309 + input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
1310 + if (!input_vec) {
1311 + ret = -ENOMEM;
1312 + goto out;
1313 + }
1314 +
1315 + memset(output, 0, dlen);
1316 + init_completion(&result.completion);
1317 + sg_init_one(&src, input_vec, ilen);
1318 + sg_init_one(&dst, output, dlen);
1319 +
1320 + req = acomp_request_alloc(tfm);
1321 + if (!req) {
1322 + pr_err("alg: acomp: request alloc failed for %s\n",
1323 + algo);
1324 + kfree(input_vec);
1325 + ret = -ENOMEM;
1326 + goto out;
1327 + }
1328 +
1329 + acomp_request_set_params(req, &src, &dst, ilen, dlen);
1330 + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1331 + tcrypt_complete, &result);
1332 +
1333 + ret = wait_async_op(&result, crypto_acomp_compress(req));
1334 + if (ret) {
1335 + pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
1336 + i + 1, algo, -ret);
1337 + kfree(input_vec);
1338 + acomp_request_free(req);
1339 + goto out;
1340 + }
1341 +
1342 + if (req->dlen != ctemplate[i].outlen) {
1343 + pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
1344 + i + 1, algo, req->dlen);
1345 + ret = -EINVAL;
1346 + kfree(input_vec);
1347 + acomp_request_free(req);
1348 + goto out;
1349 + }
1350 +
1351 + if (memcmp(output, ctemplate[i].output, req->dlen)) {
1352 + pr_err("alg: acomp: Compression test %d failed for %s\n",
1353 + i + 1, algo);
1354 + hexdump(output, req->dlen);
1355 + ret = -EINVAL;
1356 + kfree(input_vec);
1357 + acomp_request_free(req);
1358 + goto out;
1359 + }
1360 +
1361 + kfree(input_vec);
1362 + acomp_request_free(req);
1363 + }
1364 +
1365 + for (i = 0; i < dtcount; i++) {
1366 + unsigned int dlen = COMP_BUF_SIZE;
1367 + int ilen = dtemplate[i].inlen;
1368 + void *input_vec;
1369 +
1370 + input_vec = kmemdup(dtemplate[i].input, ilen, GFP_KERNEL);
1371 + if (!input_vec) {
1372 + ret = -ENOMEM;
1373 + goto out;
1374 + }
1375 +
1376 + memset(output, 0, dlen);
1377 + init_completion(&result.completion);
1378 + sg_init_one(&src, input_vec, ilen);
1379 + sg_init_one(&dst, output, dlen);
1380 +
1381 + req = acomp_request_alloc(tfm);
1382 + if (!req) {
1383 + pr_err("alg: acomp: request alloc failed for %s\n",
1384 + algo);
1385 + kfree(input_vec);
1386 + ret = -ENOMEM;
1387 + goto out;
1388 + }
1389 +
1390 + acomp_request_set_params(req, &src, &dst, ilen, dlen);
1391 + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1392 + tcrypt_complete, &result);
1393 +
1394 + ret = wait_async_op(&result, crypto_acomp_decompress(req));
1395 + if (ret) {
1396 + pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
1397 + i + 1, algo, -ret);
1398 + kfree(input_vec);
1399 + acomp_request_free(req);
1400 + goto out;
1401 + }
1402 +
1403 + if (req->dlen != dtemplate[i].outlen) {
1404 + pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
1405 + i + 1, algo, req->dlen);
1406 + ret = -EINVAL;
1407 + kfree(input_vec);
1408 + acomp_request_free(req);
1409 + goto out;
1410 + }
1411 +
1412 + if (memcmp(output, dtemplate[i].output, req->dlen)) {
1413 + pr_err("alg: acomp: Decompression test %d failed for %s\n",
1414 + i + 1, algo);
1415 + hexdump(output, req->dlen);
1416 + ret = -EINVAL;
1417 + kfree(input_vec);
1418 + acomp_request_free(req);
1419 + goto out;
1420 + }
1421 +
1422 + kfree(input_vec);
1423 + acomp_request_free(req);
1424 + }
1425 +
1426 + ret = 0;
1427 +
1428 +out:
1429 + kfree(output);
1430 + return ret;
1431 +}
1432 +
1433 +static int test_cprng(struct crypto_rng *tfm,
1434 + const struct cprng_testvec *template,
1435 unsigned int tcount)
1436 {
1437 const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
1438 @@ -1509,7 +1900,7 @@ static int alg_test_aead(const struct al
1439 struct crypto_aead *tfm;
1440 int err = 0;
1441
1442 - tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask);
1443 + tfm = crypto_alloc_aead(driver, type, mask);
1444 if (IS_ERR(tfm)) {
1445 printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
1446 "%ld\n", driver, PTR_ERR(tfm));
1447 @@ -1538,7 +1929,7 @@ static int alg_test_cipher(const struct
1448 struct crypto_cipher *tfm;
1449 int err = 0;
1450
1451 - tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1452 + tfm = crypto_alloc_cipher(driver, type, mask);
1453 if (IS_ERR(tfm)) {
1454 printk(KERN_ERR "alg: cipher: Failed to load transform for "
1455 "%s: %ld\n", driver, PTR_ERR(tfm));
1456 @@ -1567,7 +1958,7 @@ static int alg_test_skcipher(const struc
1457 struct crypto_skcipher *tfm;
1458 int err = 0;
1459
1460 - tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1461 + tfm = crypto_alloc_skcipher(driver, type, mask);
1462 if (IS_ERR(tfm)) {
1463 printk(KERN_ERR "alg: skcipher: Failed to load transform for "
1464 "%s: %ld\n", driver, PTR_ERR(tfm));
1465 @@ -1593,22 +1984,38 @@ out:
1466 static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
1467 u32 type, u32 mask)
1468 {
1469 - struct crypto_comp *tfm;
1470 + struct crypto_comp *comp;
1471 + struct crypto_acomp *acomp;
1472 int err;
1473 + u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
1474
1475 - tfm = crypto_alloc_comp(driver, type, mask);
1476 - if (IS_ERR(tfm)) {
1477 - printk(KERN_ERR "alg: comp: Failed to load transform for %s: "
1478 - "%ld\n", driver, PTR_ERR(tfm));
1479 - return PTR_ERR(tfm);
1480 - }
1481 + if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
1482 + acomp = crypto_alloc_acomp(driver, type, mask);
1483 + if (IS_ERR(acomp)) {
1484 + pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
1485 + driver, PTR_ERR(acomp));
1486 + return PTR_ERR(acomp);
1487 + }
1488 + err = test_acomp(acomp, desc->suite.comp.comp.vecs,
1489 + desc->suite.comp.decomp.vecs,
1490 + desc->suite.comp.comp.count,
1491 + desc->suite.comp.decomp.count);
1492 + crypto_free_acomp(acomp);
1493 + } else {
1494 + comp = crypto_alloc_comp(driver, type, mask);
1495 + if (IS_ERR(comp)) {
1496 + pr_err("alg: comp: Failed to load transform for %s: %ld\n",
1497 + driver, PTR_ERR(comp));
1498 + return PTR_ERR(comp);
1499 + }
1500
1501 - err = test_comp(tfm, desc->suite.comp.comp.vecs,
1502 - desc->suite.comp.decomp.vecs,
1503 - desc->suite.comp.comp.count,
1504 - desc->suite.comp.decomp.count);
1505 + err = test_comp(comp, desc->suite.comp.comp.vecs,
1506 + desc->suite.comp.decomp.vecs,
1507 + desc->suite.comp.comp.count,
1508 + desc->suite.comp.decomp.count);
1509
1510 - crypto_free_comp(tfm);
1511 + crypto_free_comp(comp);
1512 + }
1513 return err;
1514 }
1515
1516 @@ -1618,7 +2025,7 @@ static int alg_test_hash(const struct al
1517 struct crypto_ahash *tfm;
1518 int err;
1519
1520 - tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1521 + tfm = crypto_alloc_ahash(driver, type, mask);
1522 if (IS_ERR(tfm)) {
1523 printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
1524 "%ld\n", driver, PTR_ERR(tfm));
1525 @@ -1646,7 +2053,7 @@ static int alg_test_crc32c(const struct
1526 if (err)
1527 goto out;
1528
1529 - tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1530 + tfm = crypto_alloc_shash(driver, type, mask);
1531 if (IS_ERR(tfm)) {
1532 printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
1533 "%ld\n", driver, PTR_ERR(tfm));
1534 @@ -1688,7 +2095,7 @@ static int alg_test_cprng(const struct a
1535 struct crypto_rng *rng;
1536 int err;
1537
1538 - rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1539 + rng = crypto_alloc_rng(driver, type, mask);
1540 if (IS_ERR(rng)) {
1541 printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
1542 "%ld\n", driver, PTR_ERR(rng));
1543 @@ -1703,7 +2110,7 @@ static int alg_test_cprng(const struct a
1544 }
1545
1546
1547 -static int drbg_cavs_test(struct drbg_testvec *test, int pr,
1548 +static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
1549 const char *driver, u32 type, u32 mask)
1550 {
1551 int ret = -EAGAIN;
1552 @@ -1715,7 +2122,7 @@ static int drbg_cavs_test(struct drbg_te
1553 if (!buf)
1554 return -ENOMEM;
1555
1556 - drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1557 + drng = crypto_alloc_rng(driver, type, mask);
1558 if (IS_ERR(drng)) {
1559 printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
1560 "%s\n", driver);
1561 @@ -1777,7 +2184,7 @@ static int alg_test_drbg(const struct al
1562 int err = 0;
1563 int pr = 0;
1564 int i = 0;
1565 - struct drbg_testvec *template = desc->suite.drbg.vecs;
1566 + const struct drbg_testvec *template = desc->suite.drbg.vecs;
1567 unsigned int tcount = desc->suite.drbg.count;
1568
1569 if (0 == memcmp(driver, "drbg_pr_", 8))
1570 @@ -1796,7 +2203,7 @@ static int alg_test_drbg(const struct al
1571
1572 }
1573
1574 -static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
1575 +static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
1576 const char *alg)
1577 {
1578 struct kpp_request *req;
1579 @@ -1888,7 +2295,7 @@ free_req:
1580 }
1581
1582 static int test_kpp(struct crypto_kpp *tfm, const char *alg,
1583 - struct kpp_testvec *vecs, unsigned int tcount)
1584 + const struct kpp_testvec *vecs, unsigned int tcount)
1585 {
1586 int ret, i;
1587
1588 @@ -1909,7 +2316,7 @@ static int alg_test_kpp(const struct alg
1589 struct crypto_kpp *tfm;
1590 int err = 0;
1591
1592 - tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask);
1593 + tfm = crypto_alloc_kpp(driver, type, mask);
1594 if (IS_ERR(tfm)) {
1595 pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
1596 driver, PTR_ERR(tfm));
1597 @@ -1924,7 +2331,7 @@ static int alg_test_kpp(const struct alg
1598 }
1599
1600 static int test_akcipher_one(struct crypto_akcipher *tfm,
1601 - struct akcipher_testvec *vecs)
1602 + const struct akcipher_testvec *vecs)
1603 {
1604 char *xbuf[XBUFSIZE];
1605 struct akcipher_request *req;
1606 @@ -2044,7 +2451,8 @@ free_xbuf:
1607 }
1608
1609 static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
1610 - struct akcipher_testvec *vecs, unsigned int tcount)
1611 + const struct akcipher_testvec *vecs,
1612 + unsigned int tcount)
1613 {
1614 const char *algo =
1615 crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
1616 @@ -2068,7 +2476,7 @@ static int alg_test_akcipher(const struc
1617 struct crypto_akcipher *tfm;
1618 int err = 0;
1619
1620 - tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1621 + tfm = crypto_alloc_akcipher(driver, type, mask);
1622 if (IS_ERR(tfm)) {
1623 pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
1624 driver, PTR_ERR(tfm));
1625 @@ -2088,112 +2496,23 @@ static int alg_test_null(const struct al
1626 return 0;
1627 }
1628
1629 +#define __VECS(tv) { .vecs = tv, .count = ARRAY_SIZE(tv) }
1630 +
1631 /* Please keep this list sorted by algorithm name. */
1632 static const struct alg_test_desc alg_test_descs[] = {
1633 {
1634 - .alg = "__cbc-cast5-avx",
1635 - .test = alg_test_null,
1636 - }, {
1637 - .alg = "__cbc-cast6-avx",
1638 - .test = alg_test_null,
1639 - }, {
1640 - .alg = "__cbc-serpent-avx",
1641 - .test = alg_test_null,
1642 - }, {
1643 - .alg = "__cbc-serpent-avx2",
1644 - .test = alg_test_null,
1645 - }, {
1646 - .alg = "__cbc-serpent-sse2",
1647 - .test = alg_test_null,
1648 - }, {
1649 - .alg = "__cbc-twofish-avx",
1650 - .test = alg_test_null,
1651 - }, {
1652 - .alg = "__driver-cbc-aes-aesni",
1653 - .test = alg_test_null,
1654 - .fips_allowed = 1,
1655 - }, {
1656 - .alg = "__driver-cbc-camellia-aesni",
1657 - .test = alg_test_null,
1658 - }, {
1659 - .alg = "__driver-cbc-camellia-aesni-avx2",
1660 - .test = alg_test_null,
1661 - }, {
1662 - .alg = "__driver-cbc-cast5-avx",
1663 - .test = alg_test_null,
1664 - }, {
1665 - .alg = "__driver-cbc-cast6-avx",
1666 - .test = alg_test_null,
1667 - }, {
1668 - .alg = "__driver-cbc-serpent-avx",
1669 - .test = alg_test_null,
1670 - }, {
1671 - .alg = "__driver-cbc-serpent-avx2",
1672 - .test = alg_test_null,
1673 - }, {
1674 - .alg = "__driver-cbc-serpent-sse2",
1675 - .test = alg_test_null,
1676 - }, {
1677 - .alg = "__driver-cbc-twofish-avx",
1678 - .test = alg_test_null,
1679 - }, {
1680 - .alg = "__driver-ecb-aes-aesni",
1681 - .test = alg_test_null,
1682 - .fips_allowed = 1,
1683 - }, {
1684 - .alg = "__driver-ecb-camellia-aesni",
1685 - .test = alg_test_null,
1686 - }, {
1687 - .alg = "__driver-ecb-camellia-aesni-avx2",
1688 - .test = alg_test_null,
1689 - }, {
1690 - .alg = "__driver-ecb-cast5-avx",
1691 - .test = alg_test_null,
1692 - }, {
1693 - .alg = "__driver-ecb-cast6-avx",
1694 - .test = alg_test_null,
1695 - }, {
1696 - .alg = "__driver-ecb-serpent-avx",
1697 - .test = alg_test_null,
1698 - }, {
1699 - .alg = "__driver-ecb-serpent-avx2",
1700 - .test = alg_test_null,
1701 - }, {
1702 - .alg = "__driver-ecb-serpent-sse2",
1703 - .test = alg_test_null,
1704 - }, {
1705 - .alg = "__driver-ecb-twofish-avx",
1706 - .test = alg_test_null,
1707 - }, {
1708 - .alg = "__driver-gcm-aes-aesni",
1709 - .test = alg_test_null,
1710 - .fips_allowed = 1,
1711 - }, {
1712 - .alg = "__ghash-pclmulqdqni",
1713 - .test = alg_test_null,
1714 - .fips_allowed = 1,
1715 - }, {
1716 .alg = "ansi_cprng",
1717 .test = alg_test_cprng,
1718 .suite = {
1719 - .cprng = {
1720 - .vecs = ansi_cprng_aes_tv_template,
1721 - .count = ANSI_CPRNG_AES_TEST_VECTORS
1722 - }
1723 + .cprng = __VECS(ansi_cprng_aes_tv_template)
1724 }
1725 }, {
1726 .alg = "authenc(hmac(md5),ecb(cipher_null))",
1727 .test = alg_test_aead,
1728 .suite = {
1729 .aead = {
1730 - .enc = {
1731 - .vecs = hmac_md5_ecb_cipher_null_enc_tv_template,
1732 - .count = HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS
1733 - },
1734 - .dec = {
1735 - .vecs = hmac_md5_ecb_cipher_null_dec_tv_template,
1736 - .count = HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS
1737 - }
1738 + .enc = __VECS(hmac_md5_ecb_cipher_null_enc_tv_template),
1739 + .dec = __VECS(hmac_md5_ecb_cipher_null_dec_tv_template)
1740 }
1741 }
1742 }, {
1743 @@ -2201,12 +2520,7 @@ static const struct alg_test_desc alg_te
1744 .test = alg_test_aead,
1745 .suite = {
1746 .aead = {
1747 - .enc = {
1748 - .vecs =
1749 - hmac_sha1_aes_cbc_enc_tv_temp,
1750 - .count =
1751 - HMAC_SHA1_AES_CBC_ENC_TEST_VEC
1752 - }
1753 + .enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp)
1754 }
1755 }
1756 }, {
1757 @@ -2214,12 +2528,7 @@ static const struct alg_test_desc alg_te
1758 .test = alg_test_aead,
1759 .suite = {
1760 .aead = {
1761 - .enc = {
1762 - .vecs =
1763 - hmac_sha1_des_cbc_enc_tv_temp,
1764 - .count =
1765 - HMAC_SHA1_DES_CBC_ENC_TEST_VEC
1766 - }
1767 + .enc = __VECS(hmac_sha1_des_cbc_enc_tv_temp)
1768 }
1769 }
1770 }, {
1771 @@ -2228,12 +2537,7 @@ static const struct alg_test_desc alg_te
1772 .fips_allowed = 1,
1773 .suite = {
1774 .aead = {
1775 - .enc = {
1776 - .vecs =
1777 - hmac_sha1_des3_ede_cbc_enc_tv_temp,
1778 - .count =
1779 - HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC
1780 - }
1781 + .enc = __VECS(hmac_sha1_des3_ede_cbc_enc_tv_temp)
1782 }
1783 }
1784 }, {
1785 @@ -2245,18 +2549,8 @@ static const struct alg_test_desc alg_te
1786 .test = alg_test_aead,
1787 .suite = {
1788 .aead = {
1789 - .enc = {
1790 - .vecs =
1791 - hmac_sha1_ecb_cipher_null_enc_tv_temp,
1792 - .count =
1793 - HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC
1794 - },
1795 - .dec = {
1796 - .vecs =
1797 - hmac_sha1_ecb_cipher_null_dec_tv_temp,
1798 - .count =
1799 - HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC
1800 - }
1801 + .enc = __VECS(hmac_sha1_ecb_cipher_null_enc_tv_temp),
1802 + .dec = __VECS(hmac_sha1_ecb_cipher_null_dec_tv_temp)
1803 }
1804 }
1805 }, {
1806 @@ -2268,12 +2562,7 @@ static const struct alg_test_desc alg_te
1807 .test = alg_test_aead,
1808 .suite = {
1809 .aead = {
1810 - .enc = {
1811 - .vecs =
1812 - hmac_sha224_des_cbc_enc_tv_temp,
1813 - .count =
1814 - HMAC_SHA224_DES_CBC_ENC_TEST_VEC
1815 - }
1816 + .enc = __VECS(hmac_sha224_des_cbc_enc_tv_temp)
1817 }
1818 }
1819 }, {
1820 @@ -2282,12 +2571,7 @@ static const struct alg_test_desc alg_te
1821 .fips_allowed = 1,
1822 .suite = {
1823 .aead = {
1824 - .enc = {
1825 - .vecs =
1826 - hmac_sha224_des3_ede_cbc_enc_tv_temp,
1827 - .count =
1828 - HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC
1829 - }
1830 + .enc = __VECS(hmac_sha224_des3_ede_cbc_enc_tv_temp)
1831 }
1832 }
1833 }, {
1834 @@ -2296,12 +2580,7 @@ static const struct alg_test_desc alg_te
1835 .fips_allowed = 1,
1836 .suite = {
1837 .aead = {
1838 - .enc = {
1839 - .vecs =
1840 - hmac_sha256_aes_cbc_enc_tv_temp,
1841 - .count =
1842 - HMAC_SHA256_AES_CBC_ENC_TEST_VEC
1843 - }
1844 + .enc = __VECS(hmac_sha256_aes_cbc_enc_tv_temp)
1845 }
1846 }
1847 }, {
1848 @@ -2309,12 +2588,7 @@ static const struct alg_test_desc alg_te
1849 .test = alg_test_aead,
1850 .suite = {
1851 .aead = {
1852 - .enc = {
1853 - .vecs =
1854 - hmac_sha256_des_cbc_enc_tv_temp,
1855 - .count =
1856 - HMAC_SHA256_DES_CBC_ENC_TEST_VEC
1857 - }
1858 + .enc = __VECS(hmac_sha256_des_cbc_enc_tv_temp)
1859 }
1860 }
1861 }, {
1862 @@ -2323,12 +2597,7 @@ static const struct alg_test_desc alg_te
1863 .fips_allowed = 1,
1864 .suite = {
1865 .aead = {
1866 - .enc = {
1867 - .vecs =
1868 - hmac_sha256_des3_ede_cbc_enc_tv_temp,
1869 - .count =
1870 - HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC
1871 - }
1872 + .enc = __VECS(hmac_sha256_des3_ede_cbc_enc_tv_temp)
1873 }
1874 }
1875 }, {
1876 @@ -2344,12 +2613,7 @@ static const struct alg_test_desc alg_te
1877 .test = alg_test_aead,
1878 .suite = {
1879 .aead = {
1880 - .enc = {
1881 - .vecs =
1882 - hmac_sha384_des_cbc_enc_tv_temp,
1883 - .count =
1884 - HMAC_SHA384_DES_CBC_ENC_TEST_VEC
1885 - }
1886 + .enc = __VECS(hmac_sha384_des_cbc_enc_tv_temp)
1887 }
1888 }
1889 }, {
1890 @@ -2358,12 +2622,7 @@ static const struct alg_test_desc alg_te
1891 .fips_allowed = 1,
1892 .suite = {
1893 .aead = {
1894 - .enc = {
1895 - .vecs =
1896 - hmac_sha384_des3_ede_cbc_enc_tv_temp,
1897 - .count =
1898 - HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC
1899 - }
1900 + .enc = __VECS(hmac_sha384_des3_ede_cbc_enc_tv_temp)
1901 }
1902 }
1903 }, {
1904 @@ -2380,12 +2639,7 @@ static const struct alg_test_desc alg_te
1905 .test = alg_test_aead,
1906 .suite = {
1907 .aead = {
1908 - .enc = {
1909 - .vecs =
1910 - hmac_sha512_aes_cbc_enc_tv_temp,
1911 - .count =
1912 - HMAC_SHA512_AES_CBC_ENC_TEST_VEC
1913 - }
1914 + .enc = __VECS(hmac_sha512_aes_cbc_enc_tv_temp)
1915 }
1916 }
1917 }, {
1918 @@ -2393,12 +2647,7 @@ static const struct alg_test_desc alg_te
1919 .test = alg_test_aead,
1920 .suite = {
1921 .aead = {
1922 - .enc = {
1923 - .vecs =
1924 - hmac_sha512_des_cbc_enc_tv_temp,
1925 - .count =
1926 - HMAC_SHA512_DES_CBC_ENC_TEST_VEC
1927 - }
1928 + .enc = __VECS(hmac_sha512_des_cbc_enc_tv_temp)
1929 }
1930 }
1931 }, {
1932 @@ -2407,12 +2656,7 @@ static const struct alg_test_desc alg_te
1933 .fips_allowed = 1,
1934 .suite = {
1935 .aead = {
1936 - .enc = {
1937 - .vecs =
1938 - hmac_sha512_des3_ede_cbc_enc_tv_temp,
1939 - .count =
1940 - HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC
1941 - }
1942 + .enc = __VECS(hmac_sha512_des3_ede_cbc_enc_tv_temp)
1943 }
1944 }
1945 }, {
1946 @@ -2429,14 +2673,8 @@ static const struct alg_test_desc alg_te
1947 .fips_allowed = 1,
1948 .suite = {
1949 .cipher = {
1950 - .enc = {
1951 - .vecs = aes_cbc_enc_tv_template,
1952 - .count = AES_CBC_ENC_TEST_VECTORS
1953 - },
1954 - .dec = {
1955 - .vecs = aes_cbc_dec_tv_template,
1956 - .count = AES_CBC_DEC_TEST_VECTORS
1957 - }
1958 + .enc = __VECS(aes_cbc_enc_tv_template),
1959 + .dec = __VECS(aes_cbc_dec_tv_template)
1960 }
1961 }
1962 }, {
1963 @@ -2444,14 +2682,8 @@ static const struct alg_test_desc alg_te
1964 .test = alg_test_skcipher,
1965 .suite = {
1966 .cipher = {
1967 - .enc = {
1968 - .vecs = anubis_cbc_enc_tv_template,
1969 - .count = ANUBIS_CBC_ENC_TEST_VECTORS
1970 - },
1971 - .dec = {
1972 - .vecs = anubis_cbc_dec_tv_template,
1973 - .count = ANUBIS_CBC_DEC_TEST_VECTORS
1974 - }
1975 + .enc = __VECS(anubis_cbc_enc_tv_template),
1976 + .dec = __VECS(anubis_cbc_dec_tv_template)
1977 }
1978 }
1979 }, {
1980 @@ -2459,14 +2691,8 @@ static const struct alg_test_desc alg_te
1981 .test = alg_test_skcipher,
1982 .suite = {
1983 .cipher = {
1984 - .enc = {
1985 - .vecs = bf_cbc_enc_tv_template,
1986 - .count = BF_CBC_ENC_TEST_VECTORS
1987 - },
1988 - .dec = {
1989 - .vecs = bf_cbc_dec_tv_template,
1990 - .count = BF_CBC_DEC_TEST_VECTORS
1991 - }
1992 + .enc = __VECS(bf_cbc_enc_tv_template),
1993 + .dec = __VECS(bf_cbc_dec_tv_template)
1994 }
1995 }
1996 }, {
1997 @@ -2474,14 +2700,8 @@ static const struct alg_test_desc alg_te
1998 .test = alg_test_skcipher,
1999 .suite = {
2000 .cipher = {
2001 - .enc = {
2002 - .vecs = camellia_cbc_enc_tv_template,
2003 - .count = CAMELLIA_CBC_ENC_TEST_VECTORS
2004 - },
2005 - .dec = {
2006 - .vecs = camellia_cbc_dec_tv_template,
2007 - .count = CAMELLIA_CBC_DEC_TEST_VECTORS
2008 - }
2009 + .enc = __VECS(camellia_cbc_enc_tv_template),
2010 + .dec = __VECS(camellia_cbc_dec_tv_template)
2011 }
2012 }
2013 }, {
2014 @@ -2489,14 +2709,8 @@ static const struct alg_test_desc alg_te
2015 .test = alg_test_skcipher,
2016 .suite = {
2017 .cipher = {
2018 - .enc = {
2019 - .vecs = cast5_cbc_enc_tv_template,
2020 - .count = CAST5_CBC_ENC_TEST_VECTORS
2021 - },
2022 - .dec = {
2023 - .vecs = cast5_cbc_dec_tv_template,
2024 - .count = CAST5_CBC_DEC_TEST_VECTORS
2025 - }
2026 + .enc = __VECS(cast5_cbc_enc_tv_template),
2027 + .dec = __VECS(cast5_cbc_dec_tv_template)
2028 }
2029 }
2030 }, {
2031 @@ -2504,14 +2718,8 @@ static const struct alg_test_desc alg_te
2032 .test = alg_test_skcipher,
2033 .suite = {
2034 .cipher = {
2035 - .enc = {
2036 - .vecs = cast6_cbc_enc_tv_template,
2037 - .count = CAST6_CBC_ENC_TEST_VECTORS
2038 - },
2039 - .dec = {
2040 - .vecs = cast6_cbc_dec_tv_template,
2041 - .count = CAST6_CBC_DEC_TEST_VECTORS
2042 - }
2043 + .enc = __VECS(cast6_cbc_enc_tv_template),
2044 + .dec = __VECS(cast6_cbc_dec_tv_template)
2045 }
2046 }
2047 }, {
2048 @@ -2519,14 +2727,8 @@ static const struct alg_test_desc alg_te
2049 .test = alg_test_skcipher,
2050 .suite = {
2051 .cipher = {
2052 - .enc = {
2053 - .vecs = des_cbc_enc_tv_template,
2054 - .count = DES_CBC_ENC_TEST_VECTORS
2055 - },
2056 - .dec = {
2057 - .vecs = des_cbc_dec_tv_template,
2058 - .count = DES_CBC_DEC_TEST_VECTORS
2059 - }
2060 + .enc = __VECS(des_cbc_enc_tv_template),
2061 + .dec = __VECS(des_cbc_dec_tv_template)
2062 }
2063 }
2064 }, {
2065 @@ -2535,14 +2737,8 @@ static const struct alg_test_desc alg_te
2066 .fips_allowed = 1,
2067 .suite = {
2068 .cipher = {
2069 - .enc = {
2070 - .vecs = des3_ede_cbc_enc_tv_template,
2071 - .count = DES3_EDE_CBC_ENC_TEST_VECTORS
2072 - },
2073 - .dec = {
2074 - .vecs = des3_ede_cbc_dec_tv_template,
2075 - .count = DES3_EDE_CBC_DEC_TEST_VECTORS
2076 - }
2077 + .enc = __VECS(des3_ede_cbc_enc_tv_template),
2078 + .dec = __VECS(des3_ede_cbc_dec_tv_template)
2079 }
2080 }
2081 }, {
2082 @@ -2550,14 +2746,8 @@ static const struct alg_test_desc alg_te
2083 .test = alg_test_skcipher,
2084 .suite = {
2085 .cipher = {
2086 - .enc = {
2087 - .vecs = serpent_cbc_enc_tv_template,
2088 - .count = SERPENT_CBC_ENC_TEST_VECTORS
2089 - },
2090 - .dec = {
2091 - .vecs = serpent_cbc_dec_tv_template,
2092 - .count = SERPENT_CBC_DEC_TEST_VECTORS
2093 - }
2094 + .enc = __VECS(serpent_cbc_enc_tv_template),
2095 + .dec = __VECS(serpent_cbc_dec_tv_template)
2096 }
2097 }
2098 }, {
2099 @@ -2565,30 +2755,25 @@ static const struct alg_test_desc alg_te
2100 .test = alg_test_skcipher,
2101 .suite = {
2102 .cipher = {
2103 - .enc = {
2104 - .vecs = tf_cbc_enc_tv_template,
2105 - .count = TF_CBC_ENC_TEST_VECTORS
2106 - },
2107 - .dec = {
2108 - .vecs = tf_cbc_dec_tv_template,
2109 - .count = TF_CBC_DEC_TEST_VECTORS
2110 - }
2111 + .enc = __VECS(tf_cbc_enc_tv_template),
2112 + .dec = __VECS(tf_cbc_dec_tv_template)
2113 }
2114 }
2115 }, {
2116 + .alg = "cbcmac(aes)",
2117 + .fips_allowed = 1,
2118 + .test = alg_test_hash,
2119 + .suite = {
2120 + .hash = __VECS(aes_cbcmac_tv_template)
2121 + }
2122 + }, {
2123 .alg = "ccm(aes)",
2124 .test = alg_test_aead,
2125 .fips_allowed = 1,
2126 .suite = {
2127 .aead = {
2128 - .enc = {
2129 - .vecs = aes_ccm_enc_tv_template,
2130 - .count = AES_CCM_ENC_TEST_VECTORS
2131 - },
2132 - .dec = {
2133 - .vecs = aes_ccm_dec_tv_template,
2134 - .count = AES_CCM_DEC_TEST_VECTORS
2135 - }
2136 + .enc = __VECS(aes_ccm_enc_tv_template),
2137 + .dec = __VECS(aes_ccm_dec_tv_template)
2138 }
2139 }
2140 }, {
2141 @@ -2596,14 +2781,8 @@ static const struct alg_test_desc alg_te
2142 .test = alg_test_skcipher,
2143 .suite = {
2144 .cipher = {
2145 - .enc = {
2146 - .vecs = chacha20_enc_tv_template,
2147 - .count = CHACHA20_ENC_TEST_VECTORS
2148 - },
2149 - .dec = {
2150 - .vecs = chacha20_enc_tv_template,
2151 - .count = CHACHA20_ENC_TEST_VECTORS
2152 - },
2153 + .enc = __VECS(chacha20_enc_tv_template),
2154 + .dec = __VECS(chacha20_enc_tv_template),
2155 }
2156 }
2157 }, {
2158 @@ -2611,20 +2790,14 @@ static const struct alg_test_desc alg_te
2159 .fips_allowed = 1,
2160 .test = alg_test_hash,
2161 .suite = {
2162 - .hash = {
2163 - .vecs = aes_cmac128_tv_template,
2164 - .count = CMAC_AES_TEST_VECTORS
2165 - }
2166 + .hash = __VECS(aes_cmac128_tv_template)
2167 }
2168 }, {
2169 .alg = "cmac(des3_ede)",
2170 .fips_allowed = 1,
2171 .test = alg_test_hash,
2172 .suite = {
2173 - .hash = {
2174 - .vecs = des3_ede_cmac64_tv_template,
2175 - .count = CMAC_DES3_EDE_TEST_VECTORS
2176 - }
2177 + .hash = __VECS(des3_ede_cmac64_tv_template)
2178 }
2179 }, {
2180 .alg = "compress_null",
2181 @@ -2633,94 +2806,30 @@ static const struct alg_test_desc alg_te
2182 .alg = "crc32",
2183 .test = alg_test_hash,
2184 .suite = {
2185 - .hash = {
2186 - .vecs = crc32_tv_template,
2187 - .count = CRC32_TEST_VECTORS
2188 - }
2189 + .hash = __VECS(crc32_tv_template)
2190 }
2191 }, {
2192 .alg = "crc32c",
2193 .test = alg_test_crc32c,
2194 .fips_allowed = 1,
2195 .suite = {
2196 - .hash = {
2197 - .vecs = crc32c_tv_template,
2198 - .count = CRC32C_TEST_VECTORS
2199 - }
2200 + .hash = __VECS(crc32c_tv_template)
2201 }
2202 }, {
2203 .alg = "crct10dif",
2204 .test = alg_test_hash,
2205 .fips_allowed = 1,
2206 .suite = {
2207 - .hash = {
2208 - .vecs = crct10dif_tv_template,
2209 - .count = CRCT10DIF_TEST_VECTORS
2210 - }
2211 + .hash = __VECS(crct10dif_tv_template)
2212 }
2213 }, {
2214 - .alg = "cryptd(__driver-cbc-aes-aesni)",
2215 - .test = alg_test_null,
2216 - .fips_allowed = 1,
2217 - }, {
2218 - .alg = "cryptd(__driver-cbc-camellia-aesni)",
2219 - .test = alg_test_null,
2220 - }, {
2221 - .alg = "cryptd(__driver-cbc-camellia-aesni-avx2)",
2222 - .test = alg_test_null,
2223 - }, {
2224 - .alg = "cryptd(__driver-cbc-serpent-avx2)",
2225 - .test = alg_test_null,
2226 - }, {
2227 - .alg = "cryptd(__driver-ecb-aes-aesni)",
2228 - .test = alg_test_null,
2229 - .fips_allowed = 1,
2230 - }, {
2231 - .alg = "cryptd(__driver-ecb-camellia-aesni)",
2232 - .test = alg_test_null,
2233 - }, {
2234 - .alg = "cryptd(__driver-ecb-camellia-aesni-avx2)",
2235 - .test = alg_test_null,
2236 - }, {
2237 - .alg = "cryptd(__driver-ecb-cast5-avx)",
2238 - .test = alg_test_null,
2239 - }, {
2240 - .alg = "cryptd(__driver-ecb-cast6-avx)",
2241 - .test = alg_test_null,
2242 - }, {
2243 - .alg = "cryptd(__driver-ecb-serpent-avx)",
2244 - .test = alg_test_null,
2245 - }, {
2246 - .alg = "cryptd(__driver-ecb-serpent-avx2)",
2247 - .test = alg_test_null,
2248 - }, {
2249 - .alg = "cryptd(__driver-ecb-serpent-sse2)",
2250 - .test = alg_test_null,
2251 - }, {
2252 - .alg = "cryptd(__driver-ecb-twofish-avx)",
2253 - .test = alg_test_null,
2254 - }, {
2255 - .alg = "cryptd(__driver-gcm-aes-aesni)",
2256 - .test = alg_test_null,
2257 - .fips_allowed = 1,
2258 - }, {
2259 - .alg = "cryptd(__ghash-pclmulqdqni)",
2260 - .test = alg_test_null,
2261 - .fips_allowed = 1,
2262 - }, {
2263 .alg = "ctr(aes)",
2264 .test = alg_test_skcipher,
2265 .fips_allowed = 1,
2266 .suite = {
2267 .cipher = {
2268 - .enc = {
2269 - .vecs = aes_ctr_enc_tv_template,
2270 - .count = AES_CTR_ENC_TEST_VECTORS
2271 - },
2272 - .dec = {
2273 - .vecs = aes_ctr_dec_tv_template,
2274 - .count = AES_CTR_DEC_TEST_VECTORS
2275 - }
2276 + .enc = __VECS(aes_ctr_enc_tv_template),
2277 + .dec = __VECS(aes_ctr_dec_tv_template)
2278 }
2279 }
2280 }, {
2281 @@ -2728,14 +2837,8 @@ static const struct alg_test_desc alg_te
2282 .test = alg_test_skcipher,
2283 .suite = {
2284 .cipher = {
2285 - .enc = {
2286 - .vecs = bf_ctr_enc_tv_template,
2287 - .count = BF_CTR_ENC_TEST_VECTORS
2288 - },
2289 - .dec = {
2290 - .vecs = bf_ctr_dec_tv_template,
2291 - .count = BF_CTR_DEC_TEST_VECTORS
2292 - }
2293 + .enc = __VECS(bf_ctr_enc_tv_template),
2294 + .dec = __VECS(bf_ctr_dec_tv_template)
2295 }
2296 }
2297 }, {
2298 @@ -2743,14 +2846,8 @@ static const struct alg_test_desc alg_te
2299 .test = alg_test_skcipher,
2300 .suite = {
2301 .cipher = {
2302 - .enc = {
2303 - .vecs = camellia_ctr_enc_tv_template,
2304 - .count = CAMELLIA_CTR_ENC_TEST_VECTORS
2305 - },
2306 - .dec = {
2307 - .vecs = camellia_ctr_dec_tv_template,
2308 - .count = CAMELLIA_CTR_DEC_TEST_VECTORS
2309 - }
2310 + .enc = __VECS(camellia_ctr_enc_tv_template),
2311 + .dec = __VECS(camellia_ctr_dec_tv_template)
2312 }
2313 }
2314 }, {
2315 @@ -2758,14 +2855,8 @@ static const struct alg_test_desc alg_te
2316 .test = alg_test_skcipher,
2317 .suite = {
2318 .cipher = {
2319 - .enc = {
2320 - .vecs = cast5_ctr_enc_tv_template,
2321 - .count = CAST5_CTR_ENC_TEST_VECTORS
2322 - },
2323 - .dec = {
2324 - .vecs = cast5_ctr_dec_tv_template,
2325 - .count = CAST5_CTR_DEC_TEST_VECTORS
2326 - }
2327 + .enc = __VECS(cast5_ctr_enc_tv_template),
2328 + .dec = __VECS(cast5_ctr_dec_tv_template)
2329 }
2330 }
2331 }, {
2332 @@ -2773,14 +2864,8 @@ static const struct alg_test_desc alg_te
2333 .test = alg_test_skcipher,
2334 .suite = {
2335 .cipher = {
2336 - .enc = {
2337 - .vecs = cast6_ctr_enc_tv_template,
2338 - .count = CAST6_CTR_ENC_TEST_VECTORS
2339 - },
2340 - .dec = {
2341 - .vecs = cast6_ctr_dec_tv_template,
2342 - .count = CAST6_CTR_DEC_TEST_VECTORS
2343 - }
2344 + .enc = __VECS(cast6_ctr_enc_tv_template),
2345 + .dec = __VECS(cast6_ctr_dec_tv_template)
2346 }
2347 }
2348 }, {
2349 @@ -2788,29 +2873,18 @@ static const struct alg_test_desc alg_te
2350 .test = alg_test_skcipher,
2351 .suite = {
2352 .cipher = {
2353 - .enc = {
2354 - .vecs = des_ctr_enc_tv_template,
2355 - .count = DES_CTR_ENC_TEST_VECTORS
2356 - },
2357 - .dec = {
2358 - .vecs = des_ctr_dec_tv_template,
2359 - .count = DES_CTR_DEC_TEST_VECTORS
2360 - }
2361 + .enc = __VECS(des_ctr_enc_tv_template),
2362 + .dec = __VECS(des_ctr_dec_tv_template)
2363 }
2364 }
2365 }, {
2366 .alg = "ctr(des3_ede)",
2367 .test = alg_test_skcipher,
2368 + .fips_allowed = 1,
2369 .suite = {
2370 .cipher = {
2371 - .enc = {
2372 - .vecs = des3_ede_ctr_enc_tv_template,
2373 - .count = DES3_EDE_CTR_ENC_TEST_VECTORS
2374 - },
2375 - .dec = {
2376 - .vecs = des3_ede_ctr_dec_tv_template,
2377 - .count = DES3_EDE_CTR_DEC_TEST_VECTORS
2378 - }
2379 + .enc = __VECS(des3_ede_ctr_enc_tv_template),
2380 + .dec = __VECS(des3_ede_ctr_dec_tv_template)
2381 }
2382 }
2383 }, {
2384 @@ -2818,14 +2892,8 @@ static const struct alg_test_desc alg_te
2385 .test = alg_test_skcipher,
2386 .suite = {
2387 .cipher = {
2388 - .enc = {
2389 - .vecs = serpent_ctr_enc_tv_template,
2390 - .count = SERPENT_CTR_ENC_TEST_VECTORS
2391 - },
2392 - .dec = {
2393 - .vecs = serpent_ctr_dec_tv_template,
2394 - .count = SERPENT_CTR_DEC_TEST_VECTORS
2395 - }
2396 + .enc = __VECS(serpent_ctr_enc_tv_template),
2397 + .dec = __VECS(serpent_ctr_dec_tv_template)
2398 }
2399 }
2400 }, {
2401 @@ -2833,14 +2901,8 @@ static const struct alg_test_desc alg_te
2402 .test = alg_test_skcipher,
2403 .suite = {
2404 .cipher = {
2405 - .enc = {
2406 - .vecs = tf_ctr_enc_tv_template,
2407 - .count = TF_CTR_ENC_TEST_VECTORS
2408 - },
2409 - .dec = {
2410 - .vecs = tf_ctr_dec_tv_template,
2411 - .count = TF_CTR_DEC_TEST_VECTORS
2412 - }
2413 + .enc = __VECS(tf_ctr_enc_tv_template),
2414 + .dec = __VECS(tf_ctr_dec_tv_template)
2415 }
2416 }
2417 }, {
2418 @@ -2848,14 +2910,8 @@ static const struct alg_test_desc alg_te
2419 .test = alg_test_skcipher,
2420 .suite = {
2421 .cipher = {
2422 - .enc = {
2423 - .vecs = cts_mode_enc_tv_template,
2424 - .count = CTS_MODE_ENC_TEST_VECTORS
2425 - },
2426 - .dec = {
2427 - .vecs = cts_mode_dec_tv_template,
2428 - .count = CTS_MODE_DEC_TEST_VECTORS
2429 - }
2430 + .enc = __VECS(cts_mode_enc_tv_template),
2431 + .dec = __VECS(cts_mode_dec_tv_template)
2432 }
2433 }
2434 }, {
2435 @@ -2864,14 +2920,8 @@ static const struct alg_test_desc alg_te
2436 .fips_allowed = 1,
2437 .suite = {
2438 .comp = {
2439 - .comp = {
2440 - .vecs = deflate_comp_tv_template,
2441 - .count = DEFLATE_COMP_TEST_VECTORS
2442 - },
2443 - .decomp = {
2444 - .vecs = deflate_decomp_tv_template,
2445 - .count = DEFLATE_DECOMP_TEST_VECTORS
2446 - }
2447 + .comp = __VECS(deflate_comp_tv_template),
2448 + .decomp = __VECS(deflate_decomp_tv_template)
2449 }
2450 }
2451 }, {
2452 @@ -2879,10 +2929,7 @@ static const struct alg_test_desc alg_te
2453 .test = alg_test_kpp,
2454 .fips_allowed = 1,
2455 .suite = {
2456 - .kpp = {
2457 - .vecs = dh_tv_template,
2458 - .count = DH_TEST_VECTORS
2459 - }
2460 + .kpp = __VECS(dh_tv_template)
2461 }
2462 }, {
2463 .alg = "digest_null",
2464 @@ -2892,30 +2939,21 @@ static const struct alg_test_desc alg_te
2465 .test = alg_test_drbg,
2466 .fips_allowed = 1,
2467 .suite = {
2468 - .drbg = {
2469 - .vecs = drbg_nopr_ctr_aes128_tv_template,
2470 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes128_tv_template)
2471 - }
2472 + .drbg = __VECS(drbg_nopr_ctr_aes128_tv_template)
2473 }
2474 }, {
2475 .alg = "drbg_nopr_ctr_aes192",
2476 .test = alg_test_drbg,
2477 .fips_allowed = 1,
2478 .suite = {
2479 - .drbg = {
2480 - .vecs = drbg_nopr_ctr_aes192_tv_template,
2481 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes192_tv_template)
2482 - }
2483 + .drbg = __VECS(drbg_nopr_ctr_aes192_tv_template)
2484 }
2485 }, {
2486 .alg = "drbg_nopr_ctr_aes256",
2487 .test = alg_test_drbg,
2488 .fips_allowed = 1,
2489 .suite = {
2490 - .drbg = {
2491 - .vecs = drbg_nopr_ctr_aes256_tv_template,
2492 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes256_tv_template)
2493 - }
2494 + .drbg = __VECS(drbg_nopr_ctr_aes256_tv_template)
2495 }
2496 }, {
2497 /*
2498 @@ -2930,11 +2968,7 @@ static const struct alg_test_desc alg_te
2499 .test = alg_test_drbg,
2500 .fips_allowed = 1,
2501 .suite = {
2502 - .drbg = {
2503 - .vecs = drbg_nopr_hmac_sha256_tv_template,
2504 - .count =
2505 - ARRAY_SIZE(drbg_nopr_hmac_sha256_tv_template)
2506 - }
2507 + .drbg = __VECS(drbg_nopr_hmac_sha256_tv_template)
2508 }
2509 }, {
2510 /* covered by drbg_nopr_hmac_sha256 test */
2511 @@ -2954,10 +2988,7 @@ static const struct alg_test_desc alg_te
2512 .test = alg_test_drbg,
2513 .fips_allowed = 1,
2514 .suite = {
2515 - .drbg = {
2516 - .vecs = drbg_nopr_sha256_tv_template,
2517 - .count = ARRAY_SIZE(drbg_nopr_sha256_tv_template)
2518 - }
2519 + .drbg = __VECS(drbg_nopr_sha256_tv_template)
2520 }
2521 }, {
2522 /* covered by drbg_nopr_sha256 test */
2523 @@ -2973,10 +3004,7 @@ static const struct alg_test_desc alg_te
2524 .test = alg_test_drbg,
2525 .fips_allowed = 1,
2526 .suite = {
2527 - .drbg = {
2528 - .vecs = drbg_pr_ctr_aes128_tv_template,
2529 - .count = ARRAY_SIZE(drbg_pr_ctr_aes128_tv_template)
2530 - }
2531 + .drbg = __VECS(drbg_pr_ctr_aes128_tv_template)
2532 }
2533 }, {
2534 /* covered by drbg_pr_ctr_aes128 test */
2535 @@ -2996,10 +3024,7 @@ static const struct alg_test_desc alg_te
2536 .test = alg_test_drbg,
2537 .fips_allowed = 1,
2538 .suite = {
2539 - .drbg = {
2540 - .vecs = drbg_pr_hmac_sha256_tv_template,
2541 - .count = ARRAY_SIZE(drbg_pr_hmac_sha256_tv_template)
2542 - }
2543 + .drbg = __VECS(drbg_pr_hmac_sha256_tv_template)
2544 }
2545 }, {
2546 /* covered by drbg_pr_hmac_sha256 test */
2547 @@ -3019,10 +3044,7 @@ static const struct alg_test_desc alg_te
2548 .test = alg_test_drbg,
2549 .fips_allowed = 1,
2550 .suite = {
2551 - .drbg = {
2552 - .vecs = drbg_pr_sha256_tv_template,
2553 - .count = ARRAY_SIZE(drbg_pr_sha256_tv_template)
2554 - }
2555 + .drbg = __VECS(drbg_pr_sha256_tv_template)
2556 }
2557 }, {
2558 /* covered by drbg_pr_sha256 test */
2559 @@ -3034,23 +3056,13 @@ static const struct alg_test_desc alg_te
2560 .fips_allowed = 1,
2561 .test = alg_test_null,
2562 }, {
2563 - .alg = "ecb(__aes-aesni)",
2564 - .test = alg_test_null,
2565 - .fips_allowed = 1,
2566 - }, {
2567 .alg = "ecb(aes)",
2568 .test = alg_test_skcipher,
2569 .fips_allowed = 1,
2570 .suite = {
2571 .cipher = {
2572 - .enc = {
2573 - .vecs = aes_enc_tv_template,
2574 - .count = AES_ENC_TEST_VECTORS
2575 - },
2576 - .dec = {
2577 - .vecs = aes_dec_tv_template,
2578 - .count = AES_DEC_TEST_VECTORS
2579 - }
2580 + .enc = __VECS(aes_enc_tv_template),
2581 + .dec = __VECS(aes_dec_tv_template)
2582 }
2583 }
2584 }, {
2585 @@ -3058,14 +3070,8 @@ static const struct alg_test_desc alg_te
2586 .test = alg_test_skcipher,
2587 .suite = {
2588 .cipher = {
2589 - .enc = {
2590 - .vecs = anubis_enc_tv_template,
2591 - .count = ANUBIS_ENC_TEST_VECTORS
2592 - },
2593 - .dec = {
2594 - .vecs = anubis_dec_tv_template,
2595 - .count = ANUBIS_DEC_TEST_VECTORS
2596 - }
2597 + .enc = __VECS(anubis_enc_tv_template),
2598 + .dec = __VECS(anubis_dec_tv_template)
2599 }
2600 }
2601 }, {
2602 @@ -3073,14 +3079,8 @@ static const struct alg_test_desc alg_te
2603 .test = alg_test_skcipher,
2604 .suite = {
2605 .cipher = {
2606 - .enc = {
2607 - .vecs = arc4_enc_tv_template,
2608 - .count = ARC4_ENC_TEST_VECTORS
2609 - },
2610 - .dec = {
2611 - .vecs = arc4_dec_tv_template,
2612 - .count = ARC4_DEC_TEST_VECTORS
2613 - }
2614 + .enc = __VECS(arc4_enc_tv_template),
2615 + .dec = __VECS(arc4_dec_tv_template)
2616 }
2617 }
2618 }, {
2619 @@ -3088,14 +3088,8 @@ static const struct alg_test_desc alg_te
2620 .test = alg_test_skcipher,
2621 .suite = {
2622 .cipher = {
2623 - .enc = {
2624 - .vecs = bf_enc_tv_template,
2625 - .count = BF_ENC_TEST_VECTORS
2626 - },
2627 - .dec = {
2628 - .vecs = bf_dec_tv_template,
2629 - .count = BF_DEC_TEST_VECTORS
2630 - }
2631 + .enc = __VECS(bf_enc_tv_template),
2632 + .dec = __VECS(bf_dec_tv_template)
2633 }
2634 }
2635 }, {
2636 @@ -3103,14 +3097,8 @@ static const struct alg_test_desc alg_te
2637 .test = alg_test_skcipher,
2638 .suite = {
2639 .cipher = {
2640 - .enc = {
2641 - .vecs = camellia_enc_tv_template,
2642 - .count = CAMELLIA_ENC_TEST_VECTORS
2643 - },
2644 - .dec = {
2645 - .vecs = camellia_dec_tv_template,
2646 - .count = CAMELLIA_DEC_TEST_VECTORS
2647 - }
2648 + .enc = __VECS(camellia_enc_tv_template),
2649 + .dec = __VECS(camellia_dec_tv_template)
2650 }
2651 }
2652 }, {
2653 @@ -3118,14 +3106,8 @@ static const struct alg_test_desc alg_te
2654 .test = alg_test_skcipher,
2655 .suite = {
2656 .cipher = {
2657 - .enc = {
2658 - .vecs = cast5_enc_tv_template,
2659 - .count = CAST5_ENC_TEST_VECTORS
2660 - },
2661 - .dec = {
2662 - .vecs = cast5_dec_tv_template,
2663 - .count = CAST5_DEC_TEST_VECTORS
2664 - }
2665 + .enc = __VECS(cast5_enc_tv_template),
2666 + .dec = __VECS(cast5_dec_tv_template)
2667 }
2668 }
2669 }, {
2670 @@ -3133,14 +3115,8 @@ static const struct alg_test_desc alg_te
2671 .test = alg_test_skcipher,
2672 .suite = {
2673 .cipher = {
2674 - .enc = {
2675 - .vecs = cast6_enc_tv_template,
2676 - .count = CAST6_ENC_TEST_VECTORS
2677 - },
2678 - .dec = {
2679 - .vecs = cast6_dec_tv_template,
2680 - .count = CAST6_DEC_TEST_VECTORS
2681 - }
2682 + .enc = __VECS(cast6_enc_tv_template),
2683 + .dec = __VECS(cast6_dec_tv_template)
2684 }
2685 }
2686 }, {
2687 @@ -3151,14 +3127,8 @@ static const struct alg_test_desc alg_te
2688 .test = alg_test_skcipher,
2689 .suite = {
2690 .cipher = {
2691 - .enc = {
2692 - .vecs = des_enc_tv_template,
2693 - .count = DES_ENC_TEST_VECTORS
2694 - },
2695 - .dec = {
2696 - .vecs = des_dec_tv_template,
2697 - .count = DES_DEC_TEST_VECTORS
2698 - }
2699 + .enc = __VECS(des_enc_tv_template),
2700 + .dec = __VECS(des_dec_tv_template)
2701 }
2702 }
2703 }, {
2704 @@ -3167,14 +3137,8 @@ static const struct alg_test_desc alg_te
2705 .fips_allowed = 1,
2706 .suite = {
2707 .cipher = {
2708 - .enc = {
2709 - .vecs = des3_ede_enc_tv_template,
2710 - .count = DES3_EDE_ENC_TEST_VECTORS
2711 - },
2712 - .dec = {
2713 - .vecs = des3_ede_dec_tv_template,
2714 - .count = DES3_EDE_DEC_TEST_VECTORS
2715 - }
2716 + .enc = __VECS(des3_ede_enc_tv_template),
2717 + .dec = __VECS(des3_ede_dec_tv_template)
2718 }
2719 }
2720 }, {
2721 @@ -3197,14 +3161,8 @@ static const struct alg_test_desc alg_te
2722 .test = alg_test_skcipher,
2723 .suite = {
2724 .cipher = {
2725 - .enc = {
2726 - .vecs = khazad_enc_tv_template,
2727 - .count = KHAZAD_ENC_TEST_VECTORS
2728 - },
2729 - .dec = {
2730 - .vecs = khazad_dec_tv_template,
2731 - .count = KHAZAD_DEC_TEST_VECTORS
2732 - }
2733 + .enc = __VECS(khazad_enc_tv_template),
2734 + .dec = __VECS(khazad_dec_tv_template)
2735 }
2736 }
2737 }, {
2738 @@ -3212,14 +3170,8 @@ static const struct alg_test_desc alg_te
2739 .test = alg_test_skcipher,
2740 .suite = {
2741 .cipher = {
2742 - .enc = {
2743 - .vecs = seed_enc_tv_template,
2744 - .count = SEED_ENC_TEST_VECTORS
2745 - },
2746 - .dec = {
2747 - .vecs = seed_dec_tv_template,
2748 - .count = SEED_DEC_TEST_VECTORS
2749 - }
2750 + .enc = __VECS(seed_enc_tv_template),
2751 + .dec = __VECS(seed_dec_tv_template)
2752 }
2753 }
2754 }, {
2755 @@ -3227,14 +3179,8 @@ static const struct alg_test_desc alg_te
2756 .test = alg_test_skcipher,
2757 .suite = {
2758 .cipher = {
2759 - .enc = {
2760 - .vecs = serpent_enc_tv_template,
2761 - .count = SERPENT_ENC_TEST_VECTORS
2762 - },
2763 - .dec = {
2764 - .vecs = serpent_dec_tv_template,
2765 - .count = SERPENT_DEC_TEST_VECTORS
2766 - }
2767 + .enc = __VECS(serpent_enc_tv_template),
2768 + .dec = __VECS(serpent_dec_tv_template)
2769 }
2770 }
2771 }, {
2772 @@ -3242,14 +3188,8 @@ static const struct alg_test_desc alg_te
2773 .test = alg_test_skcipher,
2774 .suite = {
2775 .cipher = {
2776 - .enc = {
2777 - .vecs = tea_enc_tv_template,
2778 - .count = TEA_ENC_TEST_VECTORS
2779 - },
2780 - .dec = {
2781 - .vecs = tea_dec_tv_template,
2782 - .count = TEA_DEC_TEST_VECTORS
2783 - }
2784 + .enc = __VECS(tea_enc_tv_template),
2785 + .dec = __VECS(tea_dec_tv_template)
2786 }
2787 }
2788 }, {
2789 @@ -3257,14 +3197,8 @@ static const struct alg_test_desc alg_te
2790 .test = alg_test_skcipher,
2791 .suite = {
2792 .cipher = {
2793 - .enc = {
2794 - .vecs = tnepres_enc_tv_template,
2795 - .count = TNEPRES_ENC_TEST_VECTORS
2796 - },
2797 - .dec = {
2798 - .vecs = tnepres_dec_tv_template,
2799 - .count = TNEPRES_DEC_TEST_VECTORS
2800 - }
2801 + .enc = __VECS(tnepres_enc_tv_template),
2802 + .dec = __VECS(tnepres_dec_tv_template)
2803 }
2804 }
2805 }, {
2806 @@ -3272,14 +3206,8 @@ static const struct alg_test_desc alg_te
2807 .test = alg_test_skcipher,
2808 .suite = {
2809 .cipher = {
2810 - .enc = {
2811 - .vecs = tf_enc_tv_template,
2812 - .count = TF_ENC_TEST_VECTORS
2813 - },
2814 - .dec = {
2815 - .vecs = tf_dec_tv_template,
2816 - .count = TF_DEC_TEST_VECTORS
2817 - }
2818 + .enc = __VECS(tf_enc_tv_template),
2819 + .dec = __VECS(tf_dec_tv_template)
2820 }
2821 }
2822 }, {
2823 @@ -3287,14 +3215,8 @@ static const struct alg_test_desc alg_te
2824 .test = alg_test_skcipher,
2825 .suite = {
2826 .cipher = {
2827 - .enc = {
2828 - .vecs = xeta_enc_tv_template,
2829 - .count = XETA_ENC_TEST_VECTORS
2830 - },
2831 - .dec = {
2832 - .vecs = xeta_dec_tv_template,
2833 - .count = XETA_DEC_TEST_VECTORS
2834 - }
2835 + .enc = __VECS(xeta_enc_tv_template),
2836 + .dec = __VECS(xeta_dec_tv_template)
2837 }
2838 }
2839 }, {
2840 @@ -3302,14 +3224,8 @@ static const struct alg_test_desc alg_te
2841 .test = alg_test_skcipher,
2842 .suite = {
2843 .cipher = {
2844 - .enc = {
2845 - .vecs = xtea_enc_tv_template,
2846 - .count = XTEA_ENC_TEST_VECTORS
2847 - },
2848 - .dec = {
2849 - .vecs = xtea_dec_tv_template,
2850 - .count = XTEA_DEC_TEST_VECTORS
2851 - }
2852 + .enc = __VECS(xtea_enc_tv_template),
2853 + .dec = __VECS(xtea_dec_tv_template)
2854 }
2855 }
2856 }, {
2857 @@ -3317,10 +3233,7 @@ static const struct alg_test_desc alg_te
2858 .test = alg_test_kpp,
2859 .fips_allowed = 1,
2860 .suite = {
2861 - .kpp = {
2862 - .vecs = ecdh_tv_template,
2863 - .count = ECDH_TEST_VECTORS
2864 - }
2865 + .kpp = __VECS(ecdh_tv_template)
2866 }
2867 }, {
2868 .alg = "gcm(aes)",
2869 @@ -3328,14 +3241,8 @@ static const struct alg_test_desc alg_te
2870 .fips_allowed = 1,
2871 .suite = {
2872 .aead = {
2873 - .enc = {
2874 - .vecs = aes_gcm_enc_tv_template,
2875 - .count = AES_GCM_ENC_TEST_VECTORS
2876 - },
2877 - .dec = {
2878 - .vecs = aes_gcm_dec_tv_template,
2879 - .count = AES_GCM_DEC_TEST_VECTORS
2880 - }
2881 + .enc = __VECS(aes_gcm_enc_tv_template),
2882 + .dec = __VECS(aes_gcm_dec_tv_template)
2883 }
2884 }
2885 }, {
2886 @@ -3343,136 +3250,94 @@ static const struct alg_test_desc alg_te
2887 .test = alg_test_hash,
2888 .fips_allowed = 1,
2889 .suite = {
2890 - .hash = {
2891 - .vecs = ghash_tv_template,
2892 - .count = GHASH_TEST_VECTORS
2893 - }
2894 + .hash = __VECS(ghash_tv_template)
2895 }
2896 }, {
2897 .alg = "hmac(crc32)",
2898 .test = alg_test_hash,
2899 .suite = {
2900 - .hash = {
2901 - .vecs = bfin_crc_tv_template,
2902 - .count = BFIN_CRC_TEST_VECTORS
2903 - }
2904 + .hash = __VECS(bfin_crc_tv_template)
2905 }
2906 }, {
2907 .alg = "hmac(md5)",
2908 .test = alg_test_hash,
2909 .suite = {
2910 - .hash = {
2911 - .vecs = hmac_md5_tv_template,
2912 - .count = HMAC_MD5_TEST_VECTORS
2913 - }
2914 + .hash = __VECS(hmac_md5_tv_template)
2915 }
2916 }, {
2917 .alg = "hmac(rmd128)",
2918 .test = alg_test_hash,
2919 .suite = {
2920 - .hash = {
2921 - .vecs = hmac_rmd128_tv_template,
2922 - .count = HMAC_RMD128_TEST_VECTORS
2923 - }
2924 + .hash = __VECS(hmac_rmd128_tv_template)
2925 }
2926 }, {
2927 .alg = "hmac(rmd160)",
2928 .test = alg_test_hash,
2929 .suite = {
2930 - .hash = {
2931 - .vecs = hmac_rmd160_tv_template,
2932 - .count = HMAC_RMD160_TEST_VECTORS
2933 - }
2934 + .hash = __VECS(hmac_rmd160_tv_template)
2935 }
2936 }, {
2937 .alg = "hmac(sha1)",
2938 .test = alg_test_hash,
2939 .fips_allowed = 1,
2940 .suite = {
2941 - .hash = {
2942 - .vecs = hmac_sha1_tv_template,
2943 - .count = HMAC_SHA1_TEST_VECTORS
2944 - }
2945 + .hash = __VECS(hmac_sha1_tv_template)
2946 }
2947 }, {
2948 .alg = "hmac(sha224)",
2949 .test = alg_test_hash,
2950 .fips_allowed = 1,
2951 .suite = {
2952 - .hash = {
2953 - .vecs = hmac_sha224_tv_template,
2954 - .count = HMAC_SHA224_TEST_VECTORS
2955 - }
2956 + .hash = __VECS(hmac_sha224_tv_template)
2957 }
2958 }, {
2959 .alg = "hmac(sha256)",
2960 .test = alg_test_hash,
2961 .fips_allowed = 1,
2962 .suite = {
2963 - .hash = {
2964 - .vecs = hmac_sha256_tv_template,
2965 - .count = HMAC_SHA256_TEST_VECTORS
2966 - }
2967 + .hash = __VECS(hmac_sha256_tv_template)
2968 }
2969 }, {
2970 .alg = "hmac(sha3-224)",
2971 .test = alg_test_hash,
2972 .fips_allowed = 1,
2973 .suite = {
2974 - .hash = {
2975 - .vecs = hmac_sha3_224_tv_template,
2976 - .count = HMAC_SHA3_224_TEST_VECTORS
2977 - }
2978 + .hash = __VECS(hmac_sha3_224_tv_template)
2979 }
2980 }, {
2981 .alg = "hmac(sha3-256)",
2982 .test = alg_test_hash,
2983 .fips_allowed = 1,
2984 .suite = {
2985 - .hash = {
2986 - .vecs = hmac_sha3_256_tv_template,
2987 - .count = HMAC_SHA3_256_TEST_VECTORS
2988 - }
2989 + .hash = __VECS(hmac_sha3_256_tv_template)
2990 }
2991 }, {
2992 .alg = "hmac(sha3-384)",
2993 .test = alg_test_hash,
2994 .fips_allowed = 1,
2995 .suite = {
2996 - .hash = {
2997 - .vecs = hmac_sha3_384_tv_template,
2998 - .count = HMAC_SHA3_384_TEST_VECTORS
2999 - }
3000 + .hash = __VECS(hmac_sha3_384_tv_template)
3001 }
3002 }, {
3003 .alg = "hmac(sha3-512)",
3004 .test = alg_test_hash,
3005 .fips_allowed = 1,
3006 .suite = {
3007 - .hash = {
3008 - .vecs = hmac_sha3_512_tv_template,
3009 - .count = HMAC_SHA3_512_TEST_VECTORS
3010 - }
3011 + .hash = __VECS(hmac_sha3_512_tv_template)
3012 }
3013 }, {
3014 .alg = "hmac(sha384)",
3015 .test = alg_test_hash,
3016 .fips_allowed = 1,
3017 .suite = {
3018 - .hash = {
3019 - .vecs = hmac_sha384_tv_template,
3020 - .count = HMAC_SHA384_TEST_VECTORS
3021 - }
3022 + .hash = __VECS(hmac_sha384_tv_template)
3023 }
3024 }, {
3025 .alg = "hmac(sha512)",
3026 .test = alg_test_hash,
3027 .fips_allowed = 1,
3028 .suite = {
3029 - .hash = {
3030 - .vecs = hmac_sha512_tv_template,
3031 - .count = HMAC_SHA512_TEST_VECTORS
3032 - }
3033 + .hash = __VECS(hmac_sha512_tv_template)
3034 }
3035 }, {
3036 .alg = "jitterentropy_rng",
3037 @@ -3484,14 +3349,8 @@ static const struct alg_test_desc alg_te
3038 .fips_allowed = 1,
3039 .suite = {
3040 .cipher = {
3041 - .enc = {
3042 - .vecs = aes_kw_enc_tv_template,
3043 - .count = ARRAY_SIZE(aes_kw_enc_tv_template)
3044 - },
3045 - .dec = {
3046 - .vecs = aes_kw_dec_tv_template,
3047 - .count = ARRAY_SIZE(aes_kw_dec_tv_template)
3048 - }
3049 + .enc = __VECS(aes_kw_enc_tv_template),
3050 + .dec = __VECS(aes_kw_dec_tv_template)
3051 }
3052 }
3053 }, {
3054 @@ -3499,14 +3358,8 @@ static const struct alg_test_desc alg_te
3055 .test = alg_test_skcipher,
3056 .suite = {
3057 .cipher = {
3058 - .enc = {
3059 - .vecs = aes_lrw_enc_tv_template,
3060 - .count = AES_LRW_ENC_TEST_VECTORS
3061 - },
3062 - .dec = {
3063 - .vecs = aes_lrw_dec_tv_template,
3064 - .count = AES_LRW_DEC_TEST_VECTORS
3065 - }
3066 + .enc = __VECS(aes_lrw_enc_tv_template),
3067 + .dec = __VECS(aes_lrw_dec_tv_template)
3068 }
3069 }
3070 }, {
3071 @@ -3514,14 +3367,8 @@ static const struct alg_test_desc alg_te
3072 .test = alg_test_skcipher,
3073 .suite = {
3074 .cipher = {
3075 - .enc = {
3076 - .vecs = camellia_lrw_enc_tv_template,
3077 - .count = CAMELLIA_LRW_ENC_TEST_VECTORS
3078 - },
3079 - .dec = {
3080 - .vecs = camellia_lrw_dec_tv_template,
3081 - .count = CAMELLIA_LRW_DEC_TEST_VECTORS
3082 - }
3083 + .enc = __VECS(camellia_lrw_enc_tv_template),
3084 + .dec = __VECS(camellia_lrw_dec_tv_template)
3085 }
3086 }
3087 }, {
3088 @@ -3529,14 +3376,8 @@ static const struct alg_test_desc alg_te
3089 .test = alg_test_skcipher,
3090 .suite = {
3091 .cipher = {
3092 - .enc = {
3093 - .vecs = cast6_lrw_enc_tv_template,
3094 - .count = CAST6_LRW_ENC_TEST_VECTORS
3095 - },
3096 - .dec = {
3097 - .vecs = cast6_lrw_dec_tv_template,
3098 - .count = CAST6_LRW_DEC_TEST_VECTORS
3099 - }
3100 + .enc = __VECS(cast6_lrw_enc_tv_template),
3101 + .dec = __VECS(cast6_lrw_dec_tv_template)
3102 }
3103 }
3104 }, {
3105 @@ -3544,14 +3385,8 @@ static const struct alg_test_desc alg_te
3106 .test = alg_test_skcipher,
3107 .suite = {
3108 .cipher = {
3109 - .enc = {
3110 - .vecs = serpent_lrw_enc_tv_template,
3111 - .count = SERPENT_LRW_ENC_TEST_VECTORS
3112 - },
3113 - .dec = {
3114 - .vecs = serpent_lrw_dec_tv_template,
3115 - .count = SERPENT_LRW_DEC_TEST_VECTORS
3116 - }
3117 + .enc = __VECS(serpent_lrw_enc_tv_template),
3118 + .dec = __VECS(serpent_lrw_dec_tv_template)
3119 }
3120 }
3121 }, {
3122 @@ -3559,14 +3394,8 @@ static const struct alg_test_desc alg_te
3123 .test = alg_test_skcipher,
3124 .suite = {
3125 .cipher = {
3126 - .enc = {
3127 - .vecs = tf_lrw_enc_tv_template,
3128 - .count = TF_LRW_ENC_TEST_VECTORS
3129 - },
3130 - .dec = {
3131 - .vecs = tf_lrw_dec_tv_template,
3132 - .count = TF_LRW_DEC_TEST_VECTORS
3133 - }
3134 + .enc = __VECS(tf_lrw_enc_tv_template),
3135 + .dec = __VECS(tf_lrw_dec_tv_template)
3136 }
3137 }
3138 }, {
3139 @@ -3575,14 +3404,8 @@ static const struct alg_test_desc alg_te
3140 .fips_allowed = 1,
3141 .suite = {
3142 .comp = {
3143 - .comp = {
3144 - .vecs = lz4_comp_tv_template,
3145 - .count = LZ4_COMP_TEST_VECTORS
3146 - },
3147 - .decomp = {
3148 - .vecs = lz4_decomp_tv_template,
3149 - .count = LZ4_DECOMP_TEST_VECTORS
3150 - }
3151 + .comp = __VECS(lz4_comp_tv_template),
3152 + .decomp = __VECS(lz4_decomp_tv_template)
3153 }
3154 }
3155 }, {
3156 @@ -3591,14 +3414,8 @@ static const struct alg_test_desc alg_te
3157 .fips_allowed = 1,
3158 .suite = {
3159 .comp = {
3160 - .comp = {
3161 - .vecs = lz4hc_comp_tv_template,
3162 - .count = LZ4HC_COMP_TEST_VECTORS
3163 - },
3164 - .decomp = {
3165 - .vecs = lz4hc_decomp_tv_template,
3166 - .count = LZ4HC_DECOMP_TEST_VECTORS
3167 - }
3168 + .comp = __VECS(lz4hc_comp_tv_template),
3169 + .decomp = __VECS(lz4hc_decomp_tv_template)
3170 }
3171 }
3172 }, {
3173 @@ -3607,42 +3424,27 @@ static const struct alg_test_desc alg_te
3174 .fips_allowed = 1,
3175 .suite = {
3176 .comp = {
3177 - .comp = {
3178 - .vecs = lzo_comp_tv_template,
3179 - .count = LZO_COMP_TEST_VECTORS
3180 - },
3181 - .decomp = {
3182 - .vecs = lzo_decomp_tv_template,
3183 - .count = LZO_DECOMP_TEST_VECTORS
3184 - }
3185 + .comp = __VECS(lzo_comp_tv_template),
3186 + .decomp = __VECS(lzo_decomp_tv_template)
3187 }
3188 }
3189 }, {
3190 .alg = "md4",
3191 .test = alg_test_hash,
3192 .suite = {
3193 - .hash = {
3194 - .vecs = md4_tv_template,
3195 - .count = MD4_TEST_VECTORS
3196 - }
3197 + .hash = __VECS(md4_tv_template)
3198 }
3199 }, {
3200 .alg = "md5",
3201 .test = alg_test_hash,
3202 .suite = {
3203 - .hash = {
3204 - .vecs = md5_tv_template,
3205 - .count = MD5_TEST_VECTORS
3206 - }
3207 + .hash = __VECS(md5_tv_template)
3208 }
3209 }, {
3210 .alg = "michael_mic",
3211 .test = alg_test_hash,
3212 .suite = {
3213 - .hash = {
3214 - .vecs = michael_mic_tv_template,
3215 - .count = MICHAEL_MIC_TEST_VECTORS
3216 - }
3217 + .hash = __VECS(michael_mic_tv_template)
3218 }
3219 }, {
3220 .alg = "ofb(aes)",
3221 @@ -3650,14 +3452,8 @@ static const struct alg_test_desc alg_te
3222 .fips_allowed = 1,
3223 .suite = {
3224 .cipher = {
3225 - .enc = {
3226 - .vecs = aes_ofb_enc_tv_template,
3227 - .count = AES_OFB_ENC_TEST_VECTORS
3228 - },
3229 - .dec = {
3230 - .vecs = aes_ofb_dec_tv_template,
3231 - .count = AES_OFB_DEC_TEST_VECTORS
3232 - }
3233 + .enc = __VECS(aes_ofb_enc_tv_template),
3234 + .dec = __VECS(aes_ofb_dec_tv_template)
3235 }
3236 }
3237 }, {
3238 @@ -3665,24 +3461,15 @@ static const struct alg_test_desc alg_te
3239 .test = alg_test_skcipher,
3240 .suite = {
3241 .cipher = {
3242 - .enc = {
3243 - .vecs = fcrypt_pcbc_enc_tv_template,
3244 - .count = FCRYPT_ENC_TEST_VECTORS
3245 - },
3246 - .dec = {
3247 - .vecs = fcrypt_pcbc_dec_tv_template,
3248 - .count = FCRYPT_DEC_TEST_VECTORS
3249 - }
3250 + .enc = __VECS(fcrypt_pcbc_enc_tv_template),
3251 + .dec = __VECS(fcrypt_pcbc_dec_tv_template)
3252 }
3253 }
3254 }, {
3255 .alg = "poly1305",
3256 .test = alg_test_hash,
3257 .suite = {
3258 - .hash = {
3259 - .vecs = poly1305_tv_template,
3260 - .count = POLY1305_TEST_VECTORS
3261 - }
3262 + .hash = __VECS(poly1305_tv_template)
3263 }
3264 }, {
3265 .alg = "rfc3686(ctr(aes))",
3266 @@ -3690,14 +3477,8 @@ static const struct alg_test_desc alg_te
3267 .fips_allowed = 1,
3268 .suite = {
3269 .cipher = {
3270 - .enc = {
3271 - .vecs = aes_ctr_rfc3686_enc_tv_template,
3272 - .count = AES_CTR_3686_ENC_TEST_VECTORS
3273 - },
3274 - .dec = {
3275 - .vecs = aes_ctr_rfc3686_dec_tv_template,
3276 - .count = AES_CTR_3686_DEC_TEST_VECTORS
3277 - }
3278 + .enc = __VECS(aes_ctr_rfc3686_enc_tv_template),
3279 + .dec = __VECS(aes_ctr_rfc3686_dec_tv_template)
3280 }
3281 }
3282 }, {
3283 @@ -3706,14 +3487,8 @@ static const struct alg_test_desc alg_te
3284 .fips_allowed = 1,
3285 .suite = {
3286 .aead = {
3287 - .enc = {
3288 - .vecs = aes_gcm_rfc4106_enc_tv_template,
3289 - .count = AES_GCM_4106_ENC_TEST_VECTORS
3290 - },
3291 - .dec = {
3292 - .vecs = aes_gcm_rfc4106_dec_tv_template,
3293 - .count = AES_GCM_4106_DEC_TEST_VECTORS
3294 - }
3295 + .enc = __VECS(aes_gcm_rfc4106_enc_tv_template),
3296 + .dec = __VECS(aes_gcm_rfc4106_dec_tv_template)
3297 }
3298 }
3299 }, {
3300 @@ -3722,14 +3497,8 @@ static const struct alg_test_desc alg_te
3301 .fips_allowed = 1,
3302 .suite = {
3303 .aead = {
3304 - .enc = {
3305 - .vecs = aes_ccm_rfc4309_enc_tv_template,
3306 - .count = AES_CCM_4309_ENC_TEST_VECTORS
3307 - },
3308 - .dec = {
3309 - .vecs = aes_ccm_rfc4309_dec_tv_template,
3310 - .count = AES_CCM_4309_DEC_TEST_VECTORS
3311 - }
3312 + .enc = __VECS(aes_ccm_rfc4309_enc_tv_template),
3313 + .dec = __VECS(aes_ccm_rfc4309_dec_tv_template)
3314 }
3315 }
3316 }, {
3317 @@ -3737,14 +3506,8 @@ static const struct alg_test_desc alg_te
3318 .test = alg_test_aead,
3319 .suite = {
3320 .aead = {
3321 - .enc = {
3322 - .vecs = aes_gcm_rfc4543_enc_tv_template,
3323 - .count = AES_GCM_4543_ENC_TEST_VECTORS
3324 - },
3325 - .dec = {
3326 - .vecs = aes_gcm_rfc4543_dec_tv_template,
3327 - .count = AES_GCM_4543_DEC_TEST_VECTORS
3328 - },
3329 + .enc = __VECS(aes_gcm_rfc4543_enc_tv_template),
3330 + .dec = __VECS(aes_gcm_rfc4543_dec_tv_template),
3331 }
3332 }
3333 }, {
3334 @@ -3752,14 +3515,8 @@ static const struct alg_test_desc alg_te
3335 .test = alg_test_aead,
3336 .suite = {
3337 .aead = {
3338 - .enc = {
3339 - .vecs = rfc7539_enc_tv_template,
3340 - .count = RFC7539_ENC_TEST_VECTORS
3341 - },
3342 - .dec = {
3343 - .vecs = rfc7539_dec_tv_template,
3344 - .count = RFC7539_DEC_TEST_VECTORS
3345 - },
3346 + .enc = __VECS(rfc7539_enc_tv_template),
3347 + .dec = __VECS(rfc7539_dec_tv_template),
3348 }
3349 }
3350 }, {
3351 @@ -3767,71 +3524,47 @@ static const struct alg_test_desc alg_te
3352 .test = alg_test_aead,
3353 .suite = {
3354 .aead = {
3355 - .enc = {
3356 - .vecs = rfc7539esp_enc_tv_template,
3357 - .count = RFC7539ESP_ENC_TEST_VECTORS
3358 - },
3359 - .dec = {
3360 - .vecs = rfc7539esp_dec_tv_template,
3361 - .count = RFC7539ESP_DEC_TEST_VECTORS
3362 - },
3363 + .enc = __VECS(rfc7539esp_enc_tv_template),
3364 + .dec = __VECS(rfc7539esp_dec_tv_template),
3365 }
3366 }
3367 }, {
3368 .alg = "rmd128",
3369 .test = alg_test_hash,
3370 .suite = {
3371 - .hash = {
3372 - .vecs = rmd128_tv_template,
3373 - .count = RMD128_TEST_VECTORS
3374 - }
3375 + .hash = __VECS(rmd128_tv_template)
3376 }
3377 }, {
3378 .alg = "rmd160",
3379 .test = alg_test_hash,
3380 .suite = {
3381 - .hash = {
3382 - .vecs = rmd160_tv_template,
3383 - .count = RMD160_TEST_VECTORS
3384 - }
3385 + .hash = __VECS(rmd160_tv_template)
3386 }
3387 }, {
3388 .alg = "rmd256",
3389 .test = alg_test_hash,
3390 .suite = {
3391 - .hash = {
3392 - .vecs = rmd256_tv_template,
3393 - .count = RMD256_TEST_VECTORS
3394 - }
3395 + .hash = __VECS(rmd256_tv_template)
3396 }
3397 }, {
3398 .alg = "rmd320",
3399 .test = alg_test_hash,
3400 .suite = {
3401 - .hash = {
3402 - .vecs = rmd320_tv_template,
3403 - .count = RMD320_TEST_VECTORS
3404 - }
3405 + .hash = __VECS(rmd320_tv_template)
3406 }
3407 }, {
3408 .alg = "rsa",
3409 .test = alg_test_akcipher,
3410 .fips_allowed = 1,
3411 .suite = {
3412 - .akcipher = {
3413 - .vecs = rsa_tv_template,
3414 - .count = RSA_TEST_VECTORS
3415 - }
3416 + .akcipher = __VECS(rsa_tv_template)
3417 }
3418 }, {
3419 .alg = "salsa20",
3420 .test = alg_test_skcipher,
3421 .suite = {
3422 .cipher = {
3423 - .enc = {
3424 - .vecs = salsa20_stream_enc_tv_template,
3425 - .count = SALSA20_STREAM_ENC_TEST_VECTORS
3426 - }
3427 + .enc = __VECS(salsa20_stream_enc_tv_template)
3428 }
3429 }
3430 }, {
3431 @@ -3839,162 +3572,120 @@ static const struct alg_test_desc alg_te
3432 .test = alg_test_hash,
3433 .fips_allowed = 1,
3434 .suite = {
3435 - .hash = {
3436 - .vecs = sha1_tv_template,
3437 - .count = SHA1_TEST_VECTORS
3438 - }
3439 + .hash = __VECS(sha1_tv_template)
3440 }
3441 }, {
3442 .alg = "sha224",
3443 .test = alg_test_hash,
3444 .fips_allowed = 1,
3445 .suite = {
3446 - .hash = {
3447 - .vecs = sha224_tv_template,
3448 - .count = SHA224_TEST_VECTORS
3449 - }
3450 + .hash = __VECS(sha224_tv_template)
3451 }
3452 }, {
3453 .alg = "sha256",
3454 .test = alg_test_hash,
3455 .fips_allowed = 1,
3456 .suite = {
3457 - .hash = {
3458 - .vecs = sha256_tv_template,
3459 - .count = SHA256_TEST_VECTORS
3460 - }
3461 + .hash = __VECS(sha256_tv_template)
3462 }
3463 }, {
3464 .alg = "sha3-224",
3465 .test = alg_test_hash,
3466 .fips_allowed = 1,
3467 .suite = {
3468 - .hash = {
3469 - .vecs = sha3_224_tv_template,
3470 - .count = SHA3_224_TEST_VECTORS
3471 - }
3472 + .hash = __VECS(sha3_224_tv_template)
3473 }
3474 }, {
3475 .alg = "sha3-256",
3476 .test = alg_test_hash,
3477 .fips_allowed = 1,
3478 .suite = {
3479 - .hash = {
3480 - .vecs = sha3_256_tv_template,
3481 - .count = SHA3_256_TEST_VECTORS
3482 - }
3483 + .hash = __VECS(sha3_256_tv_template)
3484 }
3485 }, {
3486 .alg = "sha3-384",
3487 .test = alg_test_hash,
3488 .fips_allowed = 1,
3489 .suite = {
3490 - .hash = {
3491 - .vecs = sha3_384_tv_template,
3492 - .count = SHA3_384_TEST_VECTORS
3493 - }
3494 + .hash = __VECS(sha3_384_tv_template)
3495 }
3496 }, {
3497 .alg = "sha3-512",
3498 .test = alg_test_hash,
3499 .fips_allowed = 1,
3500 .suite = {
3501 - .hash = {
3502 - .vecs = sha3_512_tv_template,
3503 - .count = SHA3_512_TEST_VECTORS
3504 - }
3505 + .hash = __VECS(sha3_512_tv_template)
3506 }
3507 }, {
3508 .alg = "sha384",
3509 .test = alg_test_hash,
3510 .fips_allowed = 1,
3511 .suite = {
3512 - .hash = {
3513 - .vecs = sha384_tv_template,
3514 - .count = SHA384_TEST_VECTORS
3515 - }
3516 + .hash = __VECS(sha384_tv_template)
3517 }
3518 }, {
3519 .alg = "sha512",
3520 .test = alg_test_hash,
3521 .fips_allowed = 1,
3522 .suite = {
3523 - .hash = {
3524 - .vecs = sha512_tv_template,
3525 - .count = SHA512_TEST_VECTORS
3526 - }
3527 + .hash = __VECS(sha512_tv_template)
3528 }
3529 }, {
3530 .alg = "tgr128",
3531 .test = alg_test_hash,
3532 .suite = {
3533 - .hash = {
3534 - .vecs = tgr128_tv_template,
3535 - .count = TGR128_TEST_VECTORS
3536 - }
3537 + .hash = __VECS(tgr128_tv_template)
3538 }
3539 }, {
3540 .alg = "tgr160",
3541 .test = alg_test_hash,
3542 .suite = {
3543 - .hash = {
3544 - .vecs = tgr160_tv_template,
3545 - .count = TGR160_TEST_VECTORS
3546 - }
3547 + .hash = __VECS(tgr160_tv_template)
3548 }
3549 }, {
3550 .alg = "tgr192",
3551 .test = alg_test_hash,
3552 .suite = {
3553 - .hash = {
3554 - .vecs = tgr192_tv_template,
3555 - .count = TGR192_TEST_VECTORS
3556 + .hash = __VECS(tgr192_tv_template)
3557 + }
3558 + }, {
3559 + .alg = "tls10(hmac(sha1),cbc(aes))",
3560 + .test = alg_test_tls,
3561 + .suite = {
3562 + .tls = {
3563 + .enc = __VECS(tls_enc_tv_template),
3564 + .dec = __VECS(tls_dec_tv_template)
3565 }
3566 }
3567 }, {
3568 .alg = "vmac(aes)",
3569 .test = alg_test_hash,
3570 .suite = {
3571 - .hash = {
3572 - .vecs = aes_vmac128_tv_template,
3573 - .count = VMAC_AES_TEST_VECTORS
3574 - }
3575 + .hash = __VECS(aes_vmac128_tv_template)
3576 }
3577 }, {
3578 .alg = "wp256",
3579 .test = alg_test_hash,
3580 .suite = {
3581 - .hash = {
3582 - .vecs = wp256_tv_template,
3583 - .count = WP256_TEST_VECTORS
3584 - }
3585 + .hash = __VECS(wp256_tv_template)
3586 }
3587 }, {
3588 .alg = "wp384",
3589 .test = alg_test_hash,
3590 .suite = {
3591 - .hash = {
3592 - .vecs = wp384_tv_template,
3593 - .count = WP384_TEST_VECTORS
3594 - }
3595 + .hash = __VECS(wp384_tv_template)
3596 }
3597 }, {
3598 .alg = "wp512",
3599 .test = alg_test_hash,
3600 .suite = {
3601 - .hash = {
3602 - .vecs = wp512_tv_template,
3603 - .count = WP512_TEST_VECTORS
3604 - }
3605 + .hash = __VECS(wp512_tv_template)
3606 }
3607 }, {
3608 .alg = "xcbc(aes)",
3609 .test = alg_test_hash,
3610 .suite = {
3611 - .hash = {
3612 - .vecs = aes_xcbc128_tv_template,
3613 - .count = XCBC_AES_TEST_VECTORS
3614 - }
3615 + .hash = __VECS(aes_xcbc128_tv_template)
3616 }
3617 }, {
3618 .alg = "xts(aes)",
3619 @@ -4002,14 +3693,8 @@ static const struct alg_test_desc alg_te
3620 .fips_allowed = 1,
3621 .suite = {
3622 .cipher = {
3623 - .enc = {
3624 - .vecs = aes_xts_enc_tv_template,
3625 - .count = AES_XTS_ENC_TEST_VECTORS
3626 - },
3627 - .dec = {
3628 - .vecs = aes_xts_dec_tv_template,
3629 - .count = AES_XTS_DEC_TEST_VECTORS
3630 - }
3631 + .enc = __VECS(aes_xts_enc_tv_template),
3632 + .dec = __VECS(aes_xts_dec_tv_template)
3633 }
3634 }
3635 }, {
3636 @@ -4017,14 +3702,8 @@ static const struct alg_test_desc alg_te
3637 .test = alg_test_skcipher,
3638 .suite = {
3639 .cipher = {
3640 - .enc = {
3641 - .vecs = camellia_xts_enc_tv_template,
3642 - .count = CAMELLIA_XTS_ENC_TEST_VECTORS
3643 - },
3644 - .dec = {
3645 - .vecs = camellia_xts_dec_tv_template,
3646 - .count = CAMELLIA_XTS_DEC_TEST_VECTORS
3647 - }
3648 + .enc = __VECS(camellia_xts_enc_tv_template),
3649 + .dec = __VECS(camellia_xts_dec_tv_template)
3650 }
3651 }
3652 }, {
3653 @@ -4032,14 +3711,8 @@ static const struct alg_test_desc alg_te
3654 .test = alg_test_skcipher,
3655 .suite = {
3656 .cipher = {
3657 - .enc = {
3658 - .vecs = cast6_xts_enc_tv_template,
3659 - .count = CAST6_XTS_ENC_TEST_VECTORS
3660 - },
3661 - .dec = {
3662 - .vecs = cast6_xts_dec_tv_template,
3663 - .count = CAST6_XTS_DEC_TEST_VECTORS
3664 - }
3665 + .enc = __VECS(cast6_xts_enc_tv_template),
3666 + .dec = __VECS(cast6_xts_dec_tv_template)
3667 }
3668 }
3669 }, {
3670 @@ -4047,14 +3720,8 @@ static const struct alg_test_desc alg_te
3671 .test = alg_test_skcipher,
3672 .suite = {
3673 .cipher = {
3674 - .enc = {
3675 - .vecs = serpent_xts_enc_tv_template,
3676 - .count = SERPENT_XTS_ENC_TEST_VECTORS
3677 - },
3678 - .dec = {
3679 - .vecs = serpent_xts_dec_tv_template,
3680 - .count = SERPENT_XTS_DEC_TEST_VECTORS
3681 - }
3682 + .enc = __VECS(serpent_xts_enc_tv_template),
3683 + .dec = __VECS(serpent_xts_dec_tv_template)
3684 }
3685 }
3686 }, {
3687 @@ -4062,14 +3729,8 @@ static const struct alg_test_desc alg_te
3688 .test = alg_test_skcipher,
3689 .suite = {
3690 .cipher = {
3691 - .enc = {
3692 - .vecs = tf_xts_enc_tv_template,
3693 - .count = TF_XTS_ENC_TEST_VECTORS
3694 - },
3695 - .dec = {
3696 - .vecs = tf_xts_dec_tv_template,
3697 - .count = TF_XTS_DEC_TEST_VECTORS
3698 - }
3699 + .enc = __VECS(tf_xts_enc_tv_template),
3700 + .dec = __VECS(tf_xts_dec_tv_template)
3701 }
3702 }
3703 }
3704 --- a/crypto/testmgr.h
3705 +++ b/crypto/testmgr.h
3706 @@ -34,9 +34,9 @@
3707
3708 struct hash_testvec {
3709 /* only used with keyed hash algorithms */
3710 - char *key;
3711 - char *plaintext;
3712 - char *digest;
3713 + const char *key;
3714 + const char *plaintext;
3715 + const char *digest;
3716 unsigned char tap[MAX_TAP];
3717 unsigned short psize;
3718 unsigned char np;
3719 @@ -63,11 +63,11 @@ struct hash_testvec {
3720 */
3721
3722 struct cipher_testvec {
3723 - char *key;
3724 - char *iv;
3725 - char *iv_out;
3726 - char *input;
3727 - char *result;
3728 + const char *key;
3729 + const char *iv;
3730 + const char *iv_out;
3731 + const char *input;
3732 + const char *result;
3733 unsigned short tap[MAX_TAP];
3734 int np;
3735 unsigned char also_non_np;
3736 @@ -80,11 +80,11 @@ struct cipher_testvec {
3737 };
3738
3739 struct aead_testvec {
3740 - char *key;
3741 - char *iv;
3742 - char *input;
3743 - char *assoc;
3744 - char *result;
3745 + const char *key;
3746 + const char *iv;
3747 + const char *input;
3748 + const char *assoc;
3749 + const char *result;
3750 unsigned char tap[MAX_TAP];
3751 unsigned char atap[MAX_TAP];
3752 int np;
3753 @@ -99,10 +99,10 @@ struct aead_testvec {
3754 };
3755
3756 struct cprng_testvec {
3757 - char *key;
3758 - char *dt;
3759 - char *v;
3760 - char *result;
3761 + const char *key;
3762 + const char *dt;
3763 + const char *v;
3764 + const char *result;
3765 unsigned char klen;
3766 unsigned short dtlen;
3767 unsigned short vlen;
3768 @@ -111,24 +111,38 @@ struct cprng_testvec {
3769 };
3770
3771 struct drbg_testvec {
3772 - unsigned char *entropy;
3773 + const unsigned char *entropy;
3774 size_t entropylen;
3775 - unsigned char *entpra;
3776 - unsigned char *entprb;
3777 + const unsigned char *entpra;
3778 + const unsigned char *entprb;
3779 size_t entprlen;
3780 - unsigned char *addtla;
3781 - unsigned char *addtlb;
3782 + const unsigned char *addtla;
3783 + const unsigned char *addtlb;
3784 size_t addtllen;
3785 - unsigned char *pers;
3786 + const unsigned char *pers;
3787 size_t perslen;
3788 - unsigned char *expected;
3789 + const unsigned char *expected;
3790 size_t expectedlen;
3791 };
3792
3793 +struct tls_testvec {
3794 + char *key; /* wrapped keys for encryption and authentication */
3795 + char *iv; /* initialization vector */
3796 + char *input; /* input data */
3797 + char *assoc; /* associated data: seq num, type, version, input len */
3798 + char *result; /* result data */
3799 + unsigned char fail; /* the test failure is expected */
3800 + unsigned char novrfy; /* dec verification failure expected */
3801 + unsigned char klen; /* key length */
3802 + unsigned short ilen; /* input data length */
3803 + unsigned short alen; /* associated data length */
3804 + unsigned short rlen; /* result length */
3805 +};
3806 +
3807 struct akcipher_testvec {
3808 - unsigned char *key;
3809 - unsigned char *m;
3810 - unsigned char *c;
3811 + const unsigned char *key;
3812 + const unsigned char *m;
3813 + const unsigned char *c;
3814 unsigned int key_len;
3815 unsigned int m_size;
3816 unsigned int c_size;
3817 @@ -136,27 +150,227 @@ struct akcipher_testvec {
3818 };
3819
3820 struct kpp_testvec {
3821 - unsigned char *secret;
3822 - unsigned char *b_public;
3823 - unsigned char *expected_a_public;
3824 - unsigned char *expected_ss;
3825 + const unsigned char *secret;
3826 + const unsigned char *b_public;
3827 + const unsigned char *expected_a_public;
3828 + const unsigned char *expected_ss;
3829 unsigned short secret_size;
3830 unsigned short b_public_size;
3831 unsigned short expected_a_public_size;
3832 unsigned short expected_ss_size;
3833 };
3834
3835 -static char zeroed_string[48];
3836 +static const char zeroed_string[48];
3837
3838 /*
3839 - * RSA test vectors. Borrowed from openSSL.
3840 + * TLS1.0 synthetic test vectors
3841 */
3842 -#ifdef CONFIG_CRYPTO_FIPS
3843 -#define RSA_TEST_VECTORS 2
3844 +static struct tls_testvec tls_enc_tv_template[] = {
3845 + {
3846 +#ifdef __LITTLE_ENDIAN
3847 + .key = "\x08\x00" /* rta length */
3848 + "\x01\x00" /* rta type */
3849 +#else
3850 + .key = "\x00\x08" /* rta length */
3851 + "\x00\x01" /* rta type */
3852 +#endif
3853 + "\x00\x00\x00\x10" /* enc key length */
3854 + "authenticationkey20benckeyis16_bytes",
3855 + .klen = 8 + 20 + 16,
3856 + .iv = "iv0123456789abcd",
3857 + .input = "Single block msg",
3858 + .ilen = 16,
3859 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3860 + "\x00\x03\x01\x00\x10",
3861 + .alen = 13,
3862 + .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3863 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3864 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3865 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3866 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3867 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3868 + .rlen = 16 + 20 + 12,
3869 + }, {
3870 +#ifdef __LITTLE_ENDIAN
3871 + .key = "\x08\x00" /* rta length */
3872 + "\x01\x00" /* rta type */
3873 +#else
3874 + .key = "\x00\x08" /* rta length */
3875 + "\x00\x01" /* rta type */
3876 +#endif
3877 + "\x00\x00\x00\x10" /* enc key length */
3878 + "authenticationkey20benckeyis16_bytes",
3879 + .klen = 8 + 20 + 16,
3880 + .iv = "iv0123456789abcd",
3881 + .input = "",
3882 + .ilen = 0,
3883 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3884 + "\x00\x03\x01\x00\x00",
3885 + .alen = 13,
3886 + .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
3887 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
3888 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
3889 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
3890 + .rlen = 20 + 12,
3891 + }, {
3892 +#ifdef __LITTLE_ENDIAN
3893 + .key = "\x08\x00" /* rta length */
3894 + "\x01\x00" /* rta type */
3895 +#else
3896 + .key = "\x00\x08" /* rta length */
3897 + "\x00\x01" /* rta type */
3898 +#endif
3899 + "\x00\x00\x00\x10" /* enc key length */
3900 + "authenticationkey20benckeyis16_bytes",
3901 + .klen = 8 + 20 + 16,
3902 + .iv = "iv0123456789abcd",
3903 + .input = "285 bytes plaintext285 bytes plaintext285 bytes"
3904 + " plaintext285 bytes plaintext285 bytes plaintext285"
3905 + " bytes plaintext285 bytes plaintext285 bytes"
3906 + " plaintext285 bytes plaintext285 bytes plaintext285"
3907 + " bytes plaintext285 bytes plaintext285 bytes"
3908 + " plaintext285 bytes plaintext285 bytes plaintext285"
3909 + " bytes plaintext285 bytes plaintext",
3910 + .ilen = 285,
3911 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3912 + "\x00\x03\x01\x01\x1d",
3913 + .alen = 13,
3914 + .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
3915 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
3916 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
3917 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
3918 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
3919 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
3920 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
3921 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
3922 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
3923 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
3924 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
3925 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
3926 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
3927 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
3928 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
3929 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
3930 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
3931 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
3932 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
3933 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
3934 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
3935 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
3936 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
3937 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
3938 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
3939 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
3940 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
3941 + .rlen = 285 + 20 + 15,
3942 + }
3943 +};
3944 +
3945 +static struct tls_testvec tls_dec_tv_template[] = {
3946 + {
3947 +#ifdef __LITTLE_ENDIAN
3948 + .key = "\x08\x00" /* rta length */
3949 + "\x01\x00" /* rta type */
3950 +#else
3951 + .key = "\x00\x08" /* rta length */
3952 + "\x00\x01" /* rta type */
3953 +#endif
3954 + "\x00\x00\x00\x10" /* enc key length */
3955 + "authenticationkey20benckeyis16_bytes",
3956 + .klen = 8 + 20 + 16,
3957 + .iv = "iv0123456789abcd",
3958 + .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3959 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3960 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3961 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3962 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3963 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3964 + .ilen = 16 + 20 + 12,
3965 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3966 + "\x00\x03\x01\x00\x30",
3967 + .alen = 13,
3968 + .result = "Single block msg",
3969 + .rlen = 16,
3970 + }, {
3971 +#ifdef __LITTLE_ENDIAN
3972 + .key = "\x08\x00" /* rta length */
3973 + "\x01\x00" /* rta type */
3974 #else
3975 -#define RSA_TEST_VECTORS 5
3976 + .key = "\x00\x08" /* rta length */
3977 + "\x00\x01" /* rta type */
3978 #endif
3979 -static struct akcipher_testvec rsa_tv_template[] = {
3980 + "\x00\x00\x00\x10" /* enc key length */
3981 + "authenticationkey20benckeyis16_bytes",
3982 + .klen = 8 + 20 + 16,
3983 + .iv = "iv0123456789abcd",
3984 + .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
3985 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
3986 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
3987 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
3988 + .ilen = 20 + 12,
3989 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3990 + "\x00\x03\x01\x00\x20",
3991 + .alen = 13,
3992 + .result = "",
3993 + .rlen = 0,
3994 + }, {
3995 +#ifdef __LITTLE_ENDIAN
3996 + .key = "\x08\x00" /* rta length */
3997 + "\x01\x00" /* rta type */
3998 +#else
3999 + .key = "\x00\x08" /* rta length */
4000 + "\x00\x01" /* rta type */
4001 +#endif
4002 + "\x00\x00\x00\x10" /* enc key length */
4003 + "authenticationkey20benckeyis16_bytes",
4004 + .klen = 8 + 20 + 16,
4005 + .iv = "iv0123456789abcd",
4006 + .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
4007 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
4008 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
4009 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
4010 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
4011 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
4012 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
4013 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
4014 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
4015 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
4016 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
4017 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
4018 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
4019 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
4020 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
4021 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
4022 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
4023 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
4024 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
4025 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
4026 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
4027 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
4028 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
4029 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
4030 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
4031 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
4032 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
4033 +
4034 + .ilen = 285 + 20 + 15,
4035 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
4036 + "\x00\x03\x01\x01\x40",
4037 + .alen = 13,
4038 + .result = "285 bytes plaintext285 bytes plaintext285 bytes"
4039 + " plaintext285 bytes plaintext285 bytes plaintext285"
4040 + " bytes plaintext285 bytes plaintext285 bytes"
4041 + " plaintext285 bytes plaintext285 bytes plaintext285"
4042 + " bytes plaintext285 bytes plaintext285 bytes"
4043 + " plaintext285 bytes plaintext285 bytes plaintext",
4044 + .rlen = 285,
4045 + }
4046 +};
4047 +
4048 +/*
4049 + * RSA test vectors. Borrowed from openSSL.
4050 + */
4051 +static const struct akcipher_testvec rsa_tv_template[] = {
4052 {
4053 #ifndef CONFIG_CRYPTO_FIPS
4054 .key =
4055 @@ -340,6 +554,7 @@ static struct akcipher_testvec rsa_tv_te
4056 .m_size = 8,
4057 .c_size = 256,
4058 .public_key_vec = true,
4059 +#ifndef CONFIG_CRYPTO_FIPS
4060 }, {
4061 .key =
4062 "\x30\x82\x09\x29" /* sequence of 2345 bytes */
4063 @@ -538,12 +753,11 @@ static struct akcipher_testvec rsa_tv_te
4064 .key_len = 2349,
4065 .m_size = 8,
4066 .c_size = 512,
4067 +#endif
4068 }
4069 };
4070
4071 -#define DH_TEST_VECTORS 2
4072 -
4073 -struct kpp_testvec dh_tv_template[] = {
4074 +static const struct kpp_testvec dh_tv_template[] = {
4075 {
4076 .secret =
4077 #ifdef __LITTLE_ENDIAN
4078 @@ -760,12 +974,7 @@ struct kpp_testvec dh_tv_template[] = {
4079 }
4080 };
4081
4082 -#ifdef CONFIG_CRYPTO_FIPS
4083 -#define ECDH_TEST_VECTORS 1
4084 -#else
4085 -#define ECDH_TEST_VECTORS 2
4086 -#endif
4087 -struct kpp_testvec ecdh_tv_template[] = {
4088 +static const struct kpp_testvec ecdh_tv_template[] = {
4089 {
4090 #ifndef CONFIG_CRYPTO_FIPS
4091 .secret =
4092 @@ -856,9 +1065,7 @@ struct kpp_testvec ecdh_tv_template[] =
4093 /*
4094 * MD4 test vectors from RFC1320
4095 */
4096 -#define MD4_TEST_VECTORS 7
4097 -
4098 -static struct hash_testvec md4_tv_template [] = {
4099 +static const struct hash_testvec md4_tv_template[] = {
4100 {
4101 .plaintext = "",
4102 .digest = "\x31\xd6\xcf\xe0\xd1\x6a\xe9\x31"
4103 @@ -899,8 +1106,7 @@ static struct hash_testvec md4_tv_templa
4104 },
4105 };
4106
4107 -#define SHA3_224_TEST_VECTORS 3
4108 -static struct hash_testvec sha3_224_tv_template[] = {
4109 +static const struct hash_testvec sha3_224_tv_template[] = {
4110 {
4111 .plaintext = "",
4112 .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7"
4113 @@ -925,8 +1131,7 @@ static struct hash_testvec sha3_224_tv_t
4114 },
4115 };
4116
4117 -#define SHA3_256_TEST_VECTORS 3
4118 -static struct hash_testvec sha3_256_tv_template[] = {
4119 +static const struct hash_testvec sha3_256_tv_template[] = {
4120 {
4121 .plaintext = "",
4122 .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66"
4123 @@ -952,8 +1157,7 @@ static struct hash_testvec sha3_256_tv_t
4124 };
4125
4126
4127 -#define SHA3_384_TEST_VECTORS 3
4128 -static struct hash_testvec sha3_384_tv_template[] = {
4129 +static const struct hash_testvec sha3_384_tv_template[] = {
4130 {
4131 .plaintext = "",
4132 .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d"
4133 @@ -985,8 +1189,7 @@ static struct hash_testvec sha3_384_tv_t
4134 };
4135
4136
4137 -#define SHA3_512_TEST_VECTORS 3
4138 -static struct hash_testvec sha3_512_tv_template[] = {
4139 +static const struct hash_testvec sha3_512_tv_template[] = {
4140 {
4141 .plaintext = "",
4142 .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5"
4143 @@ -1027,9 +1230,7 @@ static struct hash_testvec sha3_512_tv_t
4144 /*
4145 * MD5 test vectors from RFC1321
4146 */
4147 -#define MD5_TEST_VECTORS 7
4148 -
4149 -static struct hash_testvec md5_tv_template[] = {
4150 +static const struct hash_testvec md5_tv_template[] = {
4151 {
4152 .digest = "\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04"
4153 "\xe9\x80\x09\x98\xec\xf8\x42\x7e",
4154 @@ -1073,9 +1274,7 @@ static struct hash_testvec md5_tv_templa
4155 /*
4156 * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
4157 */
4158 -#define RMD128_TEST_VECTORS 10
4159 -
4160 -static struct hash_testvec rmd128_tv_template[] = {
4161 +static const struct hash_testvec rmd128_tv_template[] = {
4162 {
4163 .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
4164 "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
4165 @@ -1137,9 +1336,7 @@ static struct hash_testvec rmd128_tv_tem
4166 /*
4167 * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
4168 */
4169 -#define RMD160_TEST_VECTORS 10
4170 -
4171 -static struct hash_testvec rmd160_tv_template[] = {
4172 +static const struct hash_testvec rmd160_tv_template[] = {
4173 {
4174 .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28"
4175 "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31",
4176 @@ -1201,9 +1398,7 @@ static struct hash_testvec rmd160_tv_tem
4177 /*
4178 * RIPEMD-256 test vectors
4179 */
4180 -#define RMD256_TEST_VECTORS 8
4181 -
4182 -static struct hash_testvec rmd256_tv_template[] = {
4183 +static const struct hash_testvec rmd256_tv_template[] = {
4184 {
4185 .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
4186 "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
4187 @@ -1269,9 +1464,7 @@ static struct hash_testvec rmd256_tv_tem
4188 /*
4189 * RIPEMD-320 test vectors
4190 */
4191 -#define RMD320_TEST_VECTORS 8
4192 -
4193 -static struct hash_testvec rmd320_tv_template[] = {
4194 +static const struct hash_testvec rmd320_tv_template[] = {
4195 {
4196 .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
4197 "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
4198 @@ -1334,36 +1527,49 @@ static struct hash_testvec rmd320_tv_tem
4199 }
4200 };
4201
4202 -#define CRCT10DIF_TEST_VECTORS 3
4203 -static struct hash_testvec crct10dif_tv_template[] = {
4204 +static const struct hash_testvec crct10dif_tv_template[] = {
4205 {
4206 - .plaintext = "abc",
4207 - .psize = 3,
4208 -#ifdef __LITTLE_ENDIAN
4209 - .digest = "\x3b\x44",
4210 -#else
4211 - .digest = "\x44\x3b",
4212 -#endif
4213 - }, {
4214 - .plaintext = "1234567890123456789012345678901234567890"
4215 - "123456789012345678901234567890123456789",
4216 - .psize = 79,
4217 -#ifdef __LITTLE_ENDIAN
4218 - .digest = "\x70\x4b",
4219 -#else
4220 - .digest = "\x4b\x70",
4221 -#endif
4222 - }, {
4223 - .plaintext =
4224 - "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd",
4225 - .psize = 56,
4226 -#ifdef __LITTLE_ENDIAN
4227 - .digest = "\xe3\x9c",
4228 -#else
4229 - .digest = "\x9c\xe3",
4230 -#endif
4231 - .np = 2,
4232 - .tap = { 28, 28 }
4233 + .plaintext = "abc",
4234 + .psize = 3,
4235 + .digest = (u8 *)(u16 []){ 0x443b },
4236 + }, {
4237 + .plaintext = "1234567890123456789012345678901234567890"
4238 + "123456789012345678901234567890123456789",
4239 + .psize = 79,
4240 + .digest = (u8 *)(u16 []){ 0x4b70 },
4241 + .np = 2,
4242 + .tap = { 63, 16 },
4243 + }, {
4244 + .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd"
4245 + "ddddddddddddd",
4246 + .psize = 56,
4247 + .digest = (u8 *)(u16 []){ 0x9ce3 },
4248 + .np = 8,
4249 + .tap = { 1, 2, 28, 7, 6, 5, 4, 3 },
4250 + }, {
4251 + .plaintext = "1234567890123456789012345678901234567890"
4252 + "1234567890123456789012345678901234567890"
4253 + "1234567890123456789012345678901234567890"
4254 + "1234567890123456789012345678901234567890"
4255 + "1234567890123456789012345678901234567890"
4256 + "1234567890123456789012345678901234567890"
4257 + "1234567890123456789012345678901234567890"
4258 + "123456789012345678901234567890123456789",
4259 + .psize = 319,
4260 + .digest = (u8 *)(u16 []){ 0x44c6 },
4261 + }, {
4262 + .plaintext = "1234567890123456789012345678901234567890"
4263 + "1234567890123456789012345678901234567890"
4264 + "1234567890123456789012345678901234567890"
4265 + "1234567890123456789012345678901234567890"
4266 + "1234567890123456789012345678901234567890"
4267 + "1234567890123456789012345678901234567890"
4268 + "1234567890123456789012345678901234567890"
4269 + "123456789012345678901234567890123456789",
4270 + .psize = 319,
4271 + .digest = (u8 *)(u16 []){ 0x44c6 },
4272 + .np = 4,
4273 + .tap = { 1, 255, 57, 6 },
4274 }
4275 };
4276
4277 @@ -1371,9 +1577,7 @@ static struct hash_testvec crct10dif_tv_
4278 * SHA1 test vectors from from FIPS PUB 180-1
4279 * Long vector from CAVS 5.0
4280 */
4281 -#define SHA1_TEST_VECTORS 6
4282 -
4283 -static struct hash_testvec sha1_tv_template[] = {
4284 +static const struct hash_testvec sha1_tv_template[] = {
4285 {
4286 .plaintext = "",
4287 .psize = 0,
4288 @@ -1563,9 +1767,7 @@ static struct hash_testvec sha1_tv_templ
4289 /*
4290 * SHA224 test vectors from from FIPS PUB 180-2
4291 */
4292 -#define SHA224_TEST_VECTORS 5
4293 -
4294 -static struct hash_testvec sha224_tv_template[] = {
4295 +static const struct hash_testvec sha224_tv_template[] = {
4296 {
4297 .plaintext = "",
4298 .psize = 0,
4299 @@ -1737,9 +1939,7 @@ static struct hash_testvec sha224_tv_tem
4300 /*
4301 * SHA256 test vectors from from NIST
4302 */
4303 -#define SHA256_TEST_VECTORS 5
4304 -
4305 -static struct hash_testvec sha256_tv_template[] = {
4306 +static const struct hash_testvec sha256_tv_template[] = {
4307 {
4308 .plaintext = "",
4309 .psize = 0,
4310 @@ -1910,9 +2110,7 @@ static struct hash_testvec sha256_tv_tem
4311 /*
4312 * SHA384 test vectors from from NIST and kerneli
4313 */
4314 -#define SHA384_TEST_VECTORS 6
4315 -
4316 -static struct hash_testvec sha384_tv_template[] = {
4317 +static const struct hash_testvec sha384_tv_template[] = {
4318 {
4319 .plaintext = "",
4320 .psize = 0,
4321 @@ -2104,9 +2302,7 @@ static struct hash_testvec sha384_tv_tem
4322 /*
4323 * SHA512 test vectors from from NIST and kerneli
4324 */
4325 -#define SHA512_TEST_VECTORS 6
4326 -
4327 -static struct hash_testvec sha512_tv_template[] = {
4328 +static const struct hash_testvec sha512_tv_template[] = {
4329 {
4330 .plaintext = "",
4331 .psize = 0,
4332 @@ -2313,9 +2509,7 @@ static struct hash_testvec sha512_tv_tem
4333 * by Vincent Rijmen and Paulo S. L. M. Barreto as part of the NESSIE
4334 * submission
4335 */
4336 -#define WP512_TEST_VECTORS 8
4337 -
4338 -static struct hash_testvec wp512_tv_template[] = {
4339 +static const struct hash_testvec wp512_tv_template[] = {
4340 {
4341 .plaintext = "",
4342 .psize = 0,
4343 @@ -2411,9 +2605,7 @@ static struct hash_testvec wp512_tv_temp
4344 },
4345 };
4346
4347 -#define WP384_TEST_VECTORS 8
4348 -
4349 -static struct hash_testvec wp384_tv_template[] = {
4350 +static const struct hash_testvec wp384_tv_template[] = {
4351 {
4352 .plaintext = "",
4353 .psize = 0,
4354 @@ -2493,9 +2685,7 @@ static struct hash_testvec wp384_tv_temp
4355 },
4356 };
4357
4358 -#define WP256_TEST_VECTORS 8
4359 -
4360 -static struct hash_testvec wp256_tv_template[] = {
4361 +static const struct hash_testvec wp256_tv_template[] = {
4362 {
4363 .plaintext = "",
4364 .psize = 0,
4365 @@ -2562,9 +2752,7 @@ static struct hash_testvec wp256_tv_temp
4366 /*
4367 * TIGER test vectors from Tiger website
4368 */
4369 -#define TGR192_TEST_VECTORS 6
4370 -
4371 -static struct hash_testvec tgr192_tv_template[] = {
4372 +static const struct hash_testvec tgr192_tv_template[] = {
4373 {
4374 .plaintext = "",
4375 .psize = 0,
4376 @@ -2607,9 +2795,7 @@ static struct hash_testvec tgr192_tv_tem
4377 },
4378 };
4379
4380 -#define TGR160_TEST_VECTORS 6
4381 -
4382 -static struct hash_testvec tgr160_tv_template[] = {
4383 +static const struct hash_testvec tgr160_tv_template[] = {
4384 {
4385 .plaintext = "",
4386 .psize = 0,
4387 @@ -2652,9 +2838,7 @@ static struct hash_testvec tgr160_tv_tem
4388 },
4389 };
4390
4391 -#define TGR128_TEST_VECTORS 6
4392 -
4393 -static struct hash_testvec tgr128_tv_template[] = {
4394 +static const struct hash_testvec tgr128_tv_template[] = {
4395 {
4396 .plaintext = "",
4397 .psize = 0,
4398 @@ -2691,9 +2875,7 @@ static struct hash_testvec tgr128_tv_tem
4399 },
4400 };
4401
4402 -#define GHASH_TEST_VECTORS 6
4403 -
4404 -static struct hash_testvec ghash_tv_template[] =
4405 +static const struct hash_testvec ghash_tv_template[] =
4406 {
4407 {
4408 .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03"
4409 @@ -2808,9 +2990,7 @@ static struct hash_testvec ghash_tv_temp
4410 * HMAC-MD5 test vectors from RFC2202
4411 * (These need to be fixed to not use strlen).
4412 */
4413 -#define HMAC_MD5_TEST_VECTORS 7
4414 -
4415 -static struct hash_testvec hmac_md5_tv_template[] =
4416 +static const struct hash_testvec hmac_md5_tv_template[] =
4417 {
4418 {
4419 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4420 @@ -2890,9 +3070,7 @@ static struct hash_testvec hmac_md5_tv_t
4421 /*
4422 * HMAC-RIPEMD128 test vectors from RFC2286
4423 */
4424 -#define HMAC_RMD128_TEST_VECTORS 7
4425 -
4426 -static struct hash_testvec hmac_rmd128_tv_template[] = {
4427 +static const struct hash_testvec hmac_rmd128_tv_template[] = {
4428 {
4429 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4430 .ksize = 16,
4431 @@ -2971,9 +3149,7 @@ static struct hash_testvec hmac_rmd128_t
4432 /*
4433 * HMAC-RIPEMD160 test vectors from RFC2286
4434 */
4435 -#define HMAC_RMD160_TEST_VECTORS 7
4436 -
4437 -static struct hash_testvec hmac_rmd160_tv_template[] = {
4438 +static const struct hash_testvec hmac_rmd160_tv_template[] = {
4439 {
4440 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4441 .ksize = 20,
4442 @@ -3052,9 +3228,7 @@ static struct hash_testvec hmac_rmd160_t
4443 /*
4444 * HMAC-SHA1 test vectors from RFC2202
4445 */
4446 -#define HMAC_SHA1_TEST_VECTORS 7
4447 -
4448 -static struct hash_testvec hmac_sha1_tv_template[] = {
4449 +static const struct hash_testvec hmac_sha1_tv_template[] = {
4450 {
4451 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4452 .ksize = 20,
4453 @@ -3135,9 +3309,7 @@ static struct hash_testvec hmac_sha1_tv_
4454 /*
4455 * SHA224 HMAC test vectors from RFC4231
4456 */
4457 -#define HMAC_SHA224_TEST_VECTORS 4
4458 -
4459 -static struct hash_testvec hmac_sha224_tv_template[] = {
4460 +static const struct hash_testvec hmac_sha224_tv_template[] = {
4461 {
4462 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4463 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4464 @@ -3250,9 +3422,7 @@ static struct hash_testvec hmac_sha224_t
4465 * HMAC-SHA256 test vectors from
4466 * draft-ietf-ipsec-ciph-sha-256-01.txt
4467 */
4468 -#define HMAC_SHA256_TEST_VECTORS 10
4469 -
4470 -static struct hash_testvec hmac_sha256_tv_template[] = {
4471 +static const struct hash_testvec hmac_sha256_tv_template[] = {
4472 {
4473 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
4474 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
4475 @@ -3387,9 +3557,7 @@ static struct hash_testvec hmac_sha256_t
4476 },
4477 };
4478
4479 -#define CMAC_AES_TEST_VECTORS 6
4480 -
4481 -static struct hash_testvec aes_cmac128_tv_template[] = {
4482 +static const struct hash_testvec aes_cmac128_tv_template[] = {
4483 { /* From NIST Special Publication 800-38B, AES-128 */
4484 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4485 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4486 @@ -3464,9 +3632,67 @@ static struct hash_testvec aes_cmac128_t
4487 }
4488 };
4489
4490 -#define CMAC_DES3_EDE_TEST_VECTORS 4
4491 +static const struct hash_testvec aes_cbcmac_tv_template[] = {
4492 + {
4493 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4494 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4495 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4496 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
4497 + .digest = "\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60"
4498 + "\xa8\x9e\xca\xf3\x24\x66\xef\x97",
4499 + .psize = 16,
4500 + .ksize = 16,
4501 + }, {
4502 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4503 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4504 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4505 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4506 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4507 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4508 + "\x30",
4509 + .digest = "\x9d\x0d\xd0\x63\xfb\xcb\x24\x43"
4510 + "\xf8\xf2\x76\x03\xac\x39\xb0\x9d",
4511 + .psize = 33,
4512 + .ksize = 16,
4513 + .np = 2,
4514 + .tap = { 7, 26 },
4515 + }, {
4516 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4517 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4518 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4519 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4520 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4521 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4522 + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4523 + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4524 + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4525 + "\xad\x2b\x41\x7b\xe6\x6c\x37",
4526 + .digest = "\xc0\x71\x73\xb8\xa0\x2c\x11\x7c"
4527 + "\xaf\xdc\xb2\xf8\x89\x32\xa3\x3a",
4528 + .psize = 63,
4529 + .ksize = 16,
4530 + }, {
4531 + .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
4532 + "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
4533 + "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
4534 + "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
4535 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4536 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4537 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4538 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4539 + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4540 + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4541 + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4542 + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10"
4543 + "\x1c",
4544 + .digest = "\x6a\x4e\xdb\x21\x47\x51\xdf\x4f"
4545 + "\xa8\x4d\x4c\x10\x3b\x72\x7d\xd6",
4546 + .psize = 65,
4547 + .ksize = 32,
4548 + }
4549 +};
4550
4551 -static struct hash_testvec des3_ede_cmac64_tv_template[] = {
4552 +static const struct hash_testvec des3_ede_cmac64_tv_template[] = {
4553 /*
4554 * From NIST Special Publication 800-38B, Three Key TDEA
4555 * Corrected test vectors from:
4556 @@ -3512,9 +3738,7 @@ static struct hash_testvec des3_ede_cmac
4557 }
4558 };
4559
4560 -#define XCBC_AES_TEST_VECTORS 6
4561 -
4562 -static struct hash_testvec aes_xcbc128_tv_template[] = {
4563 +static const struct hash_testvec aes_xcbc128_tv_template[] = {
4564 {
4565 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
4566 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4567 @@ -3580,36 +3804,35 @@ static struct hash_testvec aes_xcbc128_t
4568 }
4569 };
4570
4571 -#define VMAC_AES_TEST_VECTORS 11
4572 -static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4573 - '\x02', '\x03', '\x02', '\x02',
4574 - '\x02', '\x04', '\x01', '\x07',
4575 - '\x04', '\x01', '\x04', '\x03',};
4576 -static char vmac_string2[128] = {'a', 'b', 'c',};
4577 -static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4578 - 'a', 'b', 'c', 'a', 'b', 'c',
4579 - 'a', 'b', 'c', 'a', 'b', 'c',
4580 - 'a', 'b', 'c', 'a', 'b', 'c',
4581 - 'a', 'b', 'c', 'a', 'b', 'c',
4582 - 'a', 'b', 'c', 'a', 'b', 'c',
4583 - 'a', 'b', 'c', 'a', 'b', 'c',
4584 - 'a', 'b', 'c', 'a', 'b', 'c',
4585 - };
4586 -
4587 -static char vmac_string4[17] = {'b', 'c', 'e', 'f',
4588 - 'i', 'j', 'l', 'm',
4589 - 'o', 'p', 'r', 's',
4590 - 't', 'u', 'w', 'x', 'z'};
4591 -
4592 -static char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4593 - 'o', 'l', 'k', ']', '%',
4594 - '9', '2', '7', '!', 'A'};
4595 -
4596 -static char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4597 - 'i', '!', '#', 'w', '0',
4598 - 'z', '/', '4', 'A', 'n'};
4599 +static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4600 + '\x02', '\x03', '\x02', '\x02',
4601 + '\x02', '\x04', '\x01', '\x07',
4602 + '\x04', '\x01', '\x04', '\x03',};
4603 +static const char vmac_string2[128] = {'a', 'b', 'c',};
4604 +static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4605 + 'a', 'b', 'c', 'a', 'b', 'c',
4606 + 'a', 'b', 'c', 'a', 'b', 'c',
4607 + 'a', 'b', 'c', 'a', 'b', 'c',
4608 + 'a', 'b', 'c', 'a', 'b', 'c',
4609 + 'a', 'b', 'c', 'a', 'b', 'c',
4610 + 'a', 'b', 'c', 'a', 'b', 'c',
4611 + 'a', 'b', 'c', 'a', 'b', 'c',
4612 + };
4613 +
4614 +static const char vmac_string4[17] = {'b', 'c', 'e', 'f',
4615 + 'i', 'j', 'l', 'm',
4616 + 'o', 'p', 'r', 's',
4617 + 't', 'u', 'w', 'x', 'z'};
4618 +
4619 +static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4620 + 'o', 'l', 'k', ']', '%',
4621 + '9', '2', '7', '!', 'A'};
4622 +
4623 +static const char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4624 + 'i', '!', '#', 'w', '0',
4625 + 'z', '/', '4', 'A', 'n'};
4626
4627 -static struct hash_testvec aes_vmac128_tv_template[] = {
4628 +static const struct hash_testvec aes_vmac128_tv_template[] = {
4629 {
4630 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
4631 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4632 @@ -3687,9 +3910,7 @@ static struct hash_testvec aes_vmac128_t
4633 * SHA384 HMAC test vectors from RFC4231
4634 */
4635
4636 -#define HMAC_SHA384_TEST_VECTORS 4
4637 -
4638 -static struct hash_testvec hmac_sha384_tv_template[] = {
4639 +static const struct hash_testvec hmac_sha384_tv_template[] = {
4640 {
4641 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4642 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4643 @@ -3787,9 +4008,7 @@ static struct hash_testvec hmac_sha384_t
4644 * SHA512 HMAC test vectors from RFC4231
4645 */
4646
4647 -#define HMAC_SHA512_TEST_VECTORS 4
4648 -
4649 -static struct hash_testvec hmac_sha512_tv_template[] = {
4650 +static const struct hash_testvec hmac_sha512_tv_template[] = {
4651 {
4652 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4653 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4654 @@ -3894,9 +4113,7 @@ static struct hash_testvec hmac_sha512_t
4655 },
4656 };
4657
4658 -#define HMAC_SHA3_224_TEST_VECTORS 4
4659 -
4660 -static struct hash_testvec hmac_sha3_224_tv_template[] = {
4661 +static const struct hash_testvec hmac_sha3_224_tv_template[] = {
4662 {
4663 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4664 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4665 @@ -3985,9 +4202,7 @@ static struct hash_testvec hmac_sha3_224
4666 },
4667 };
4668
4669 -#define HMAC_SHA3_256_TEST_VECTORS 4
4670 -
4671 -static struct hash_testvec hmac_sha3_256_tv_template[] = {
4672 +static const struct hash_testvec hmac_sha3_256_tv_template[] = {
4673 {
4674 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4675 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4676 @@ -4076,9 +4291,7 @@ static struct hash_testvec hmac_sha3_256
4677 },
4678 };
4679
4680 -#define HMAC_SHA3_384_TEST_VECTORS 4
4681 -
4682 -static struct hash_testvec hmac_sha3_384_tv_template[] = {
4683 +static const struct hash_testvec hmac_sha3_384_tv_template[] = {
4684 {
4685 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4686 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4687 @@ -4175,9 +4388,7 @@ static struct hash_testvec hmac_sha3_384
4688 },
4689 };
4690
4691 -#define HMAC_SHA3_512_TEST_VECTORS 4
4692 -
4693 -static struct hash_testvec hmac_sha3_512_tv_template[] = {
4694 +static const struct hash_testvec hmac_sha3_512_tv_template[] = {
4695 {
4696 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4697 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4698 @@ -4286,9 +4497,7 @@ static struct hash_testvec hmac_sha3_512
4699 * Poly1305 test vectors from RFC7539 A.3.
4700 */
4701
4702 -#define POLY1305_TEST_VECTORS 11
4703 -
4704 -static struct hash_testvec poly1305_tv_template[] = {
4705 +static const struct hash_testvec poly1305_tv_template[] = {
4706 { /* Test Vector #1 */
4707 .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
4708 "\x00\x00\x00\x00\x00\x00\x00\x00"
4709 @@ -4533,20 +4742,7 @@ static struct hash_testvec poly1305_tv_t
4710 /*
4711 * DES test vectors.
4712 */
4713 -#define DES_ENC_TEST_VECTORS 11
4714 -#define DES_DEC_TEST_VECTORS 5
4715 -#define DES_CBC_ENC_TEST_VECTORS 6
4716 -#define DES_CBC_DEC_TEST_VECTORS 5
4717 -#define DES_CTR_ENC_TEST_VECTORS 2
4718 -#define DES_CTR_DEC_TEST_VECTORS 2
4719 -#define DES3_EDE_ENC_TEST_VECTORS 4
4720 -#define DES3_EDE_DEC_TEST_VECTORS 4
4721 -#define DES3_EDE_CBC_ENC_TEST_VECTORS 2
4722 -#define DES3_EDE_CBC_DEC_TEST_VECTORS 2
4723 -#define DES3_EDE_CTR_ENC_TEST_VECTORS 2
4724 -#define DES3_EDE_CTR_DEC_TEST_VECTORS 2
4725 -
4726 -static struct cipher_testvec des_enc_tv_template[] = {
4727 +static const struct cipher_testvec des_enc_tv_template[] = {
4728 { /* From Applied Cryptography */
4729 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4730 .klen = 8,
4731 @@ -4720,7 +4916,7 @@ static struct cipher_testvec des_enc_tv_
4732 },
4733 };
4734
4735 -static struct cipher_testvec des_dec_tv_template[] = {
4736 +static const struct cipher_testvec des_dec_tv_template[] = {
4737 { /* From Applied Cryptography */
4738 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4739 .klen = 8,
4740 @@ -4830,7 +5026,7 @@ static struct cipher_testvec des_dec_tv_
4741 },
4742 };
4743
4744 -static struct cipher_testvec des_cbc_enc_tv_template[] = {
4745 +static const struct cipher_testvec des_cbc_enc_tv_template[] = {
4746 { /* From OpenSSL */
4747 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4748 .klen = 8,
4749 @@ -4956,7 +5152,7 @@ static struct cipher_testvec des_cbc_enc
4750 },
4751 };
4752
4753 -static struct cipher_testvec des_cbc_dec_tv_template[] = {
4754 +static const struct cipher_testvec des_cbc_dec_tv_template[] = {
4755 { /* FIPS Pub 81 */
4756 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4757 .klen = 8,
4758 @@ -5065,7 +5261,7 @@ static struct cipher_testvec des_cbc_dec
4759 },
4760 };
4761
4762 -static struct cipher_testvec des_ctr_enc_tv_template[] = {
4763 +static const struct cipher_testvec des_ctr_enc_tv_template[] = {
4764 { /* Generated with Crypto++ */
4765 .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4766 .klen = 8,
4767 @@ -5211,7 +5407,7 @@ static struct cipher_testvec des_ctr_enc
4768 },
4769 };
4770
4771 -static struct cipher_testvec des_ctr_dec_tv_template[] = {
4772 +static const struct cipher_testvec des_ctr_dec_tv_template[] = {
4773 { /* Generated with Crypto++ */
4774 .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4775 .klen = 8,
4776 @@ -5357,7 +5553,7 @@ static struct cipher_testvec des_ctr_dec
4777 },
4778 };
4779
4780 -static struct cipher_testvec des3_ede_enc_tv_template[] = {
4781 +static const struct cipher_testvec des3_ede_enc_tv_template[] = {
4782 { /* These are from openssl */
4783 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4784 "\x55\x55\x55\x55\x55\x55\x55\x55"
4785 @@ -5522,7 +5718,7 @@ static struct cipher_testvec des3_ede_en
4786 },
4787 };
4788
4789 -static struct cipher_testvec des3_ede_dec_tv_template[] = {
4790 +static const struct cipher_testvec des3_ede_dec_tv_template[] = {
4791 { /* These are from openssl */
4792 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4793 "\x55\x55\x55\x55\x55\x55\x55\x55"
4794 @@ -5687,7 +5883,7 @@ static struct cipher_testvec des3_ede_de
4795 },
4796 };
4797
4798 -static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4799 +static const struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4800 { /* Generated from openssl */
4801 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4802 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4803 @@ -5867,7 +6063,7 @@ static struct cipher_testvec des3_ede_cb
4804 },
4805 };
4806
4807 -static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4808 +static const struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4809 { /* Generated from openssl */
4810 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4811 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4812 @@ -6047,7 +6243,7 @@ static struct cipher_testvec des3_ede_cb
4813 },
4814 };
4815
4816 -static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4817 +static const struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4818 { /* Generated with Crypto++ */
4819 .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4820 "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4821 @@ -6325,7 +6521,7 @@ static struct cipher_testvec des3_ede_ct
4822 },
4823 };
4824
4825 -static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4826 +static const struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4827 { /* Generated with Crypto++ */
4828 .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4829 "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4830 @@ -6606,14 +6802,7 @@ static struct cipher_testvec des3_ede_ct
4831 /*
4832 * Blowfish test vectors.
4833 */
4834 -#define BF_ENC_TEST_VECTORS 7
4835 -#define BF_DEC_TEST_VECTORS 7
4836 -#define BF_CBC_ENC_TEST_VECTORS 2
4837 -#define BF_CBC_DEC_TEST_VECTORS 2
4838 -#define BF_CTR_ENC_TEST_VECTORS 2
4839 -#define BF_CTR_DEC_TEST_VECTORS 2
4840 -
4841 -static struct cipher_testvec bf_enc_tv_template[] = {
4842 +static const struct cipher_testvec bf_enc_tv_template[] = {
4843 { /* DES test vectors from OpenSSL */
4844 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
4845 .klen = 8,
4846 @@ -6805,7 +6994,7 @@ static struct cipher_testvec bf_enc_tv_t
4847 },
4848 };
4849
4850 -static struct cipher_testvec bf_dec_tv_template[] = {
4851 +static const struct cipher_testvec bf_dec_tv_template[] = {
4852 { /* DES test vectors from OpenSSL */
4853 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
4854 .klen = 8,
4855 @@ -6997,7 +7186,7 @@ static struct cipher_testvec bf_dec_tv_t
4856 },
4857 };
4858
4859 -static struct cipher_testvec bf_cbc_enc_tv_template[] = {
4860 +static const struct cipher_testvec bf_cbc_enc_tv_template[] = {
4861 { /* From OpenSSL */
4862 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4863 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4864 @@ -7154,7 +7343,7 @@ static struct cipher_testvec bf_cbc_enc_
4865 },
4866 };
4867
4868 -static struct cipher_testvec bf_cbc_dec_tv_template[] = {
4869 +static const struct cipher_testvec bf_cbc_dec_tv_template[] = {
4870 { /* From OpenSSL */
4871 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4872 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4873 @@ -7311,7 +7500,7 @@ static struct cipher_testvec bf_cbc_dec_
4874 },
4875 };
4876
4877 -static struct cipher_testvec bf_ctr_enc_tv_template[] = {
4878 +static const struct cipher_testvec bf_ctr_enc_tv_template[] = {
4879 { /* Generated with Crypto++ */
4880 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4881 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4882 @@ -7723,7 +7912,7 @@ static struct cipher_testvec bf_ctr_enc_
4883 },
4884 };
4885
4886 -static struct cipher_testvec bf_ctr_dec_tv_template[] = {
4887 +static const struct cipher_testvec bf_ctr_dec_tv_template[] = {
4888 { /* Generated with Crypto++ */
4889 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4890 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4891 @@ -8138,18 +8327,7 @@ static struct cipher_testvec bf_ctr_dec_
4892 /*
4893 * Twofish test vectors.
4894 */
4895 -#define TF_ENC_TEST_VECTORS 4
4896 -#define TF_DEC_TEST_VECTORS 4
4897 -#define TF_CBC_ENC_TEST_VECTORS 5
4898 -#define TF_CBC_DEC_TEST_VECTORS 5
4899 -#define TF_CTR_ENC_TEST_VECTORS 2
4900 -#define TF_CTR_DEC_TEST_VECTORS 2
4901 -#define TF_LRW_ENC_TEST_VECTORS 8
4902 -#define TF_LRW_DEC_TEST_VECTORS 8
4903 -#define TF_XTS_ENC_TEST_VECTORS 5
4904 -#define TF_XTS_DEC_TEST_VECTORS 5
4905 -
4906 -static struct cipher_testvec tf_enc_tv_template[] = {
4907 +static const struct cipher_testvec tf_enc_tv_template[] = {
4908 {
4909 .key = zeroed_string,
4910 .klen = 16,
4911 @@ -8317,7 +8495,7 @@ static struct cipher_testvec tf_enc_tv_t
4912 },
4913 };
4914
4915 -static struct cipher_testvec tf_dec_tv_template[] = {
4916 +static const struct cipher_testvec tf_dec_tv_template[] = {
4917 {
4918 .key = zeroed_string,
4919 .klen = 16,
4920 @@ -8485,7 +8663,7 @@ static struct cipher_testvec tf_dec_tv_t
4921 },
4922 };
4923
4924 -static struct cipher_testvec tf_cbc_enc_tv_template[] = {
4925 +static const struct cipher_testvec tf_cbc_enc_tv_template[] = {
4926 { /* Generated with Nettle */
4927 .key = zeroed_string,
4928 .klen = 16,
4929 @@ -8668,7 +8846,7 @@ static struct cipher_testvec tf_cbc_enc_
4930 },
4931 };
4932
4933 -static struct cipher_testvec tf_cbc_dec_tv_template[] = {
4934 +static const struct cipher_testvec tf_cbc_dec_tv_template[] = {
4935 { /* Reverse of the first four above */
4936 .key = zeroed_string,
4937 .klen = 16,
4938 @@ -8851,7 +9029,7 @@ static struct cipher_testvec tf_cbc_dec_
4939 },
4940 };
4941
4942 -static struct cipher_testvec tf_ctr_enc_tv_template[] = {
4943 +static const struct cipher_testvec tf_ctr_enc_tv_template[] = {
4944 { /* Generated with Crypto++ */
4945 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4946 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4947 @@ -9262,7 +9440,7 @@ static struct cipher_testvec tf_ctr_enc_
4948 },
4949 };
4950
4951 -static struct cipher_testvec tf_ctr_dec_tv_template[] = {
4952 +static const struct cipher_testvec tf_ctr_dec_tv_template[] = {
4953 { /* Generated with Crypto++ */
4954 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4955 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4956 @@ -9673,7 +9851,7 @@ static struct cipher_testvec tf_ctr_dec_
4957 },
4958 };
4959
4960 -static struct cipher_testvec tf_lrw_enc_tv_template[] = {
4961 +static const struct cipher_testvec tf_lrw_enc_tv_template[] = {
4962 /* Generated from AES-LRW test vectors */
4963 {
4964 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
4965 @@ -9925,7 +10103,7 @@ static struct cipher_testvec tf_lrw_enc_
4966 },
4967 };
4968
4969 -static struct cipher_testvec tf_lrw_dec_tv_template[] = {
4970 +static const struct cipher_testvec tf_lrw_dec_tv_template[] = {
4971 /* Generated from AES-LRW test vectors */
4972 /* same as enc vectors with input and result reversed */
4973 {
4974 @@ -10178,7 +10356,7 @@ static struct cipher_testvec tf_lrw_dec_
4975 },
4976 };
4977
4978 -static struct cipher_testvec tf_xts_enc_tv_template[] = {
4979 +static const struct cipher_testvec tf_xts_enc_tv_template[] = {
4980 /* Generated from AES-XTS test vectors */
4981 {
4982 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
4983 @@ -10520,7 +10698,7 @@ static struct cipher_testvec tf_xts_enc_
4984 },
4985 };
4986
4987 -static struct cipher_testvec tf_xts_dec_tv_template[] = {
4988 +static const struct cipher_testvec tf_xts_dec_tv_template[] = {
4989 /* Generated from AES-XTS test vectors */
4990 /* same as enc vectors with input and result reversed */
4991 {
4992 @@ -10867,25 +11045,7 @@ static struct cipher_testvec tf_xts_dec_
4993 * Serpent test vectors. These are backwards because Serpent writes
4994 * octet sequences in right-to-left mode.
4995 */
4996 -#define SERPENT_ENC_TEST_VECTORS 5
4997 -#define SERPENT_DEC_TEST_VECTORS 5
4998 -
4999 -#define TNEPRES_ENC_TEST_VECTORS 4
5000 -#define TNEPRES_DEC_TEST_VECTORS 4
5001 -
5002 -#define SERPENT_CBC_ENC_TEST_VECTORS 1
5003 -#define SERPENT_CBC_DEC_TEST_VECTORS 1
5004 -
5005 -#define SERPENT_CTR_ENC_TEST_VECTORS 2
5006 -#define SERPENT_CTR_DEC_TEST_VECTORS 2
5007 -
5008 -#define SERPENT_LRW_ENC_TEST_VECTORS 8
5009 -#define SERPENT_LRW_DEC_TEST_VECTORS 8
5010 -
5011 -#define SERPENT_XTS_ENC_TEST_VECTORS 5
5012 -#define SERPENT_XTS_DEC_TEST_VECTORS 5
5013 -
5014 -static struct cipher_testvec serpent_enc_tv_template[] = {
5015 +static const struct cipher_testvec serpent_enc_tv_template[] = {
5016 {
5017 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
5018 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5019 @@ -11061,7 +11221,7 @@ static struct cipher_testvec serpent_enc
5020 },
5021 };
5022
5023 -static struct cipher_testvec tnepres_enc_tv_template[] = {
5024 +static const struct cipher_testvec tnepres_enc_tv_template[] = {
5025 { /* KeySize=128, PT=0, I=1 */
5026 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
5027 "\x00\x00\x00\x00\x00\x00\x00\x00",
5028 @@ -11111,7 +11271,7 @@ static struct cipher_testvec tnepres_enc
5029 };
5030
5031
5032 -static struct cipher_testvec serpent_dec_tv_template[] = {
5033 +static const struct cipher_testvec serpent_dec_tv_template[] = {
5034 {
5035 .input = "\x12\x07\xfc\xce\x9b\xd0\xd6\x47"
5036 "\x6a\xe9\x8f\xbe\xd1\x43\xa0\xe2",
5037 @@ -11287,7 +11447,7 @@ static struct cipher_testvec serpent_dec
5038 },
5039 };
5040
5041 -static struct cipher_testvec tnepres_dec_tv_template[] = {
5042 +static const struct cipher_testvec tnepres_dec_tv_template[] = {
5043 {
5044 .input = "\x41\xcc\x6b\x31\x59\x31\x45\x97"
5045 "\x6d\x6f\xbb\x38\x4b\x37\x21\x28",
5046 @@ -11328,7 +11488,7 @@ static struct cipher_testvec tnepres_dec
5047 },
5048 };
5049
5050 -static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5051 +static const struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5052 { /* Generated with Crypto++ */
5053 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5054 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5055 @@ -11469,7 +11629,7 @@ static struct cipher_testvec serpent_cbc
5056 },
5057 };
5058
5059 -static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5060 +static const struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5061 { /* Generated with Crypto++ */
5062 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5063 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5064 @@ -11610,7 +11770,7 @@ static struct cipher_testvec serpent_cbc
5065 },
5066 };
5067
5068 -static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5069 +static const struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5070 { /* Generated with Crypto++ */
5071 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5072 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5073 @@ -12021,7 +12181,7 @@ static struct cipher_testvec serpent_ctr
5074 },
5075 };
5076
5077 -static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5078 +static const struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5079 { /* Generated with Crypto++ */
5080 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5081 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5082 @@ -12432,7 +12592,7 @@ static struct cipher_testvec serpent_ctr
5083 },
5084 };
5085
5086 -static struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5087 +static const struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5088 /* Generated from AES-LRW test vectors */
5089 {
5090 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5091 @@ -12684,7 +12844,7 @@ static struct cipher_testvec serpent_lrw
5092 },
5093 };
5094
5095 -static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5096 +static const struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5097 /* Generated from AES-LRW test vectors */
5098 /* same as enc vectors with input and result reversed */
5099 {
5100 @@ -12937,7 +13097,7 @@ static struct cipher_testvec serpent_lrw
5101 },
5102 };
5103
5104 -static struct cipher_testvec serpent_xts_enc_tv_template[] = {
5105 +static const struct cipher_testvec serpent_xts_enc_tv_template[] = {
5106 /* Generated from AES-XTS test vectors */
5107 {
5108 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5109 @@ -13279,7 +13439,7 @@ static struct cipher_testvec serpent_xts
5110 },
5111 };
5112
5113 -static struct cipher_testvec serpent_xts_dec_tv_template[] = {
5114 +static const struct cipher_testvec serpent_xts_dec_tv_template[] = {
5115 /* Generated from AES-XTS test vectors */
5116 /* same as enc vectors with input and result reversed */
5117 {
5118 @@ -13623,18 +13783,7 @@ static struct cipher_testvec serpent_xts
5119 };
5120
5121 /* Cast6 test vectors from RFC 2612 */
5122 -#define CAST6_ENC_TEST_VECTORS 4
5123 -#define CAST6_DEC_TEST_VECTORS 4
5124 -#define CAST6_CBC_ENC_TEST_VECTORS 1
5125 -#define CAST6_CBC_DEC_TEST_VECTORS 1
5126 -#define CAST6_CTR_ENC_TEST_VECTORS 2
5127 -#define CAST6_CTR_DEC_TEST_VECTORS 2
5128 -#define CAST6_LRW_ENC_TEST_VECTORS 1
5129 -#define CAST6_LRW_DEC_TEST_VECTORS 1
5130 -#define CAST6_XTS_ENC_TEST_VECTORS 1
5131 -#define CAST6_XTS_DEC_TEST_VECTORS 1
5132 -
5133 -static struct cipher_testvec cast6_enc_tv_template[] = {
5134 +static const struct cipher_testvec cast6_enc_tv_template[] = {
5135 {
5136 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5137 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5138 @@ -13805,7 +13954,7 @@ static struct cipher_testvec cast6_enc_t
5139 },
5140 };
5141
5142 -static struct cipher_testvec cast6_dec_tv_template[] = {
5143 +static const struct cipher_testvec cast6_dec_tv_template[] = {
5144 {
5145 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5146 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5147 @@ -13976,7 +14125,7 @@ static struct cipher_testvec cast6_dec_t
5148 },
5149 };
5150
5151 -static struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5152 +static const struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5153 { /* Generated from TF test vectors */
5154 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5155 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5156 @@ -14117,7 +14266,7 @@ static struct cipher_testvec cast6_cbc_e
5157 },
5158 };
5159
5160 -static struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5161 +static const struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5162 { /* Generated from TF test vectors */
5163 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5164 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5165 @@ -14258,7 +14407,7 @@ static struct cipher_testvec cast6_cbc_d
5166 },
5167 };
5168
5169 -static struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5170 +static const struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5171 { /* Generated from TF test vectors */
5172 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5173 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5174 @@ -14415,7 +14564,7 @@ static struct cipher_testvec cast6_ctr_e
5175 },
5176 };
5177
5178 -static struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5179 +static const struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5180 { /* Generated from TF test vectors */
5181 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5182 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5183 @@ -14572,7 +14721,7 @@ static struct cipher_testvec cast6_ctr_d
5184 },
5185 };
5186
5187 -static struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5188 +static const struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5189 { /* Generated from TF test vectors */
5190 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5191 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5192 @@ -14719,7 +14868,7 @@ static struct cipher_testvec cast6_lrw_e
5193 },
5194 };
5195
5196 -static struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5197 +static const struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5198 { /* Generated from TF test vectors */
5199 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5200 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5201 @@ -14866,7 +15015,7 @@ static struct cipher_testvec cast6_lrw_d
5202 },
5203 };
5204
5205 -static struct cipher_testvec cast6_xts_enc_tv_template[] = {
5206 +static const struct cipher_testvec cast6_xts_enc_tv_template[] = {
5207 { /* Generated from TF test vectors */
5208 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
5209 "\x23\x53\x60\x28\x74\x71\x35\x26"
5210 @@ -15015,7 +15164,7 @@ static struct cipher_testvec cast6_xts_e
5211 },
5212 };
5213
5214 -static struct cipher_testvec cast6_xts_dec_tv_template[] = {
5215 +static const struct cipher_testvec cast6_xts_dec_tv_template[] = {
5216 { /* Generated from TF test vectors */
5217 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
5218 "\x23\x53\x60\x28\x74\x71\x35\x26"
5219 @@ -15168,39 +15317,7 @@ static struct cipher_testvec cast6_xts_d
5220 /*
5221 * AES test vectors.
5222 */
5223 -#define AES_ENC_TEST_VECTORS 4
5224 -#define AES_DEC_TEST_VECTORS 4
5225 -#define AES_CBC_ENC_TEST_VECTORS 5
5226 -#define AES_CBC_DEC_TEST_VECTORS 5
5227 -#define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2
5228 -#define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2
5229 -#define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC 2
5230 -#define HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC 2
5231 -#define HMAC_SHA1_AES_CBC_ENC_TEST_VEC 7
5232 -#define HMAC_SHA256_AES_CBC_ENC_TEST_VEC 7
5233 -#define HMAC_SHA512_AES_CBC_ENC_TEST_VEC 7
5234 -#define AES_LRW_ENC_TEST_VECTORS 8
5235 -#define AES_LRW_DEC_TEST_VECTORS 8
5236 -#define AES_XTS_ENC_TEST_VECTORS 5
5237 -#define AES_XTS_DEC_TEST_VECTORS 5
5238 -#define AES_CTR_ENC_TEST_VECTORS 5
5239 -#define AES_CTR_DEC_TEST_VECTORS 5
5240 -#define AES_OFB_ENC_TEST_VECTORS 1
5241 -#define AES_OFB_DEC_TEST_VECTORS 1
5242 -#define AES_CTR_3686_ENC_TEST_VECTORS 7
5243 -#define AES_CTR_3686_DEC_TEST_VECTORS 6
5244 -#define AES_GCM_ENC_TEST_VECTORS 9
5245 -#define AES_GCM_DEC_TEST_VECTORS 8
5246 -#define AES_GCM_4106_ENC_TEST_VECTORS 23
5247 -#define AES_GCM_4106_DEC_TEST_VECTORS 23
5248 -#define AES_GCM_4543_ENC_TEST_VECTORS 1
5249 -#define AES_GCM_4543_DEC_TEST_VECTORS 2
5250 -#define AES_CCM_ENC_TEST_VECTORS 8
5251 -#define AES_CCM_DEC_TEST_VECTORS 7
5252 -#define AES_CCM_4309_ENC_TEST_VECTORS 7
5253 -#define AES_CCM_4309_DEC_TEST_VECTORS 10
5254 -
5255 -static struct cipher_testvec aes_enc_tv_template[] = {
5256 +static const struct cipher_testvec aes_enc_tv_template[] = {
5257 { /* From FIPS-197 */
5258 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
5259 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5260 @@ -15372,7 +15489,7 @@ static struct cipher_testvec aes_enc_tv_
5261 },
5262 };
5263
5264 -static struct cipher_testvec aes_dec_tv_template[] = {
5265 +static const struct cipher_testvec aes_dec_tv_template[] = {
5266 { /* From FIPS-197 */
5267 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
5268 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5269 @@ -15544,7 +15661,7 @@ static struct cipher_testvec aes_dec_tv_
5270 },
5271 };
5272
5273 -static struct cipher_testvec aes_cbc_enc_tv_template[] = {
5274 +static const struct cipher_testvec aes_cbc_enc_tv_template[] = {
5275 { /* From RFC 3602 */
5276 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5277 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5278 @@ -15766,7 +15883,7 @@ static struct cipher_testvec aes_cbc_enc
5279 },
5280 };
5281
5282 -static struct cipher_testvec aes_cbc_dec_tv_template[] = {
5283 +static const struct cipher_testvec aes_cbc_dec_tv_template[] = {
5284 { /* From RFC 3602 */
5285 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5286 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5287 @@ -15988,7 +16105,7 @@ static struct cipher_testvec aes_cbc_dec
5288 },
5289 };
5290
5291 -static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5292 +static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5293 { /* Input data from RFC 2410 Case 1 */
5294 #ifdef __LITTLE_ENDIAN
5295 .key = "\x08\x00" /* rta length */
5296 @@ -16030,7 +16147,7 @@ static struct aead_testvec hmac_md5_ecb_
5297 },
5298 };
5299
5300 -static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5301 +static const struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5302 {
5303 #ifdef __LITTLE_ENDIAN
5304 .key = "\x08\x00" /* rta length */
5305 @@ -16072,7 +16189,7 @@ static struct aead_testvec hmac_md5_ecb_
5306 },
5307 };
5308
5309 -static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5310 +static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5311 { /* RFC 3602 Case 1 */
5312 #ifdef __LITTLE_ENDIAN
5313 .key = "\x08\x00" /* rta length */
5314 @@ -16341,7 +16458,7 @@ static struct aead_testvec hmac_sha1_aes
5315 },
5316 };
5317
5318 -static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5319 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5320 { /* Input data from RFC 2410 Case 1 */
5321 #ifdef __LITTLE_ENDIAN
5322 .key = "\x08\x00" /* rta length */
5323 @@ -16387,7 +16504,7 @@ static struct aead_testvec hmac_sha1_ecb
5324 },
5325 };
5326
5327 -static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5328 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5329 {
5330 #ifdef __LITTLE_ENDIAN
5331 .key = "\x08\x00" /* rta length */
5332 @@ -16433,7 +16550,7 @@ static struct aead_testvec hmac_sha1_ecb
5333 },
5334 };
5335
5336 -static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5337 +static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5338 { /* RFC 3602 Case 1 */
5339 #ifdef __LITTLE_ENDIAN
5340 .key = "\x08\x00" /* rta length */
5341 @@ -16716,7 +16833,7 @@ static struct aead_testvec hmac_sha256_a
5342 },
5343 };
5344
5345 -static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5346 +static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5347 { /* RFC 3602 Case 1 */
5348 #ifdef __LITTLE_ENDIAN
5349 .key = "\x08\x00" /* rta length */
5350 @@ -17055,9 +17172,7 @@ static struct aead_testvec hmac_sha512_a
5351 },
5352 };
5353
5354 -#define HMAC_SHA1_DES_CBC_ENC_TEST_VEC 1
5355 -
5356 -static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5357 +static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5358 { /*Generated with cryptopp*/
5359 #ifdef __LITTLE_ENDIAN
5360 .key = "\x08\x00" /* rta length */
5361 @@ -17116,9 +17231,7 @@ static struct aead_testvec hmac_sha1_des
5362 },
5363 };
5364
5365 -#define HMAC_SHA224_DES_CBC_ENC_TEST_VEC 1
5366 -
5367 -static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5368 +static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5369 { /*Generated with cryptopp*/
5370 #ifdef __LITTLE_ENDIAN
5371 .key = "\x08\x00" /* rta length */
5372 @@ -17177,9 +17290,7 @@ static struct aead_testvec hmac_sha224_d
5373 },
5374 };
5375
5376 -#define HMAC_SHA256_DES_CBC_ENC_TEST_VEC 1
5377 -
5378 -static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5379 +static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5380 { /*Generated with cryptopp*/
5381 #ifdef __LITTLE_ENDIAN
5382 .key = "\x08\x00" /* rta length */
5383 @@ -17240,9 +17351,7 @@ static struct aead_testvec hmac_sha256_d
5384 },
5385 };
5386
5387 -#define HMAC_SHA384_DES_CBC_ENC_TEST_VEC 1
5388 -
5389 -static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5390 +static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5391 { /*Generated with cryptopp*/
5392 #ifdef __LITTLE_ENDIAN
5393 .key = "\x08\x00" /* rta length */
5394 @@ -17307,9 +17416,7 @@ static struct aead_testvec hmac_sha384_d
5395 },
5396 };
5397
5398 -#define HMAC_SHA512_DES_CBC_ENC_TEST_VEC 1
5399 -
5400 -static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5401 +static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5402 { /*Generated with cryptopp*/
5403 #ifdef __LITTLE_ENDIAN
5404 .key = "\x08\x00" /* rta length */
5405 @@ -17378,9 +17485,7 @@ static struct aead_testvec hmac_sha512_d
5406 },
5407 };
5408
5409 -#define HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC 1
5410 -
5411 -static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5412 +static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5413 { /*Generated with cryptopp*/
5414 #ifdef __LITTLE_ENDIAN
5415 .key = "\x08\x00" /* rta length */
5416 @@ -17441,9 +17546,7 @@ static struct aead_testvec hmac_sha1_des
5417 },
5418 };
5419
5420 -#define HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC 1
5421 -
5422 -static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5423 +static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5424 { /*Generated with cryptopp*/
5425 #ifdef __LITTLE_ENDIAN
5426 .key = "\x08\x00" /* rta length */
5427 @@ -17504,9 +17607,7 @@ static struct aead_testvec hmac_sha224_d
5428 },
5429 };
5430
5431 -#define HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC 1
5432 -
5433 -static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5434 +static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5435 { /*Generated with cryptopp*/
5436 #ifdef __LITTLE_ENDIAN
5437 .key = "\x08\x00" /* rta length */
5438 @@ -17569,9 +17670,7 @@ static struct aead_testvec hmac_sha256_d
5439 },
5440 };
5441
5442 -#define HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC 1
5443 -
5444 -static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5445 +static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5446 { /*Generated with cryptopp*/
5447 #ifdef __LITTLE_ENDIAN
5448 .key = "\x08\x00" /* rta length */
5449 @@ -17638,9 +17737,7 @@ static struct aead_testvec hmac_sha384_d
5450 },
5451 };
5452
5453 -#define HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC 1
5454 -
5455 -static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5456 +static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5457 { /*Generated with cryptopp*/
5458 #ifdef __LITTLE_ENDIAN
5459 .key = "\x08\x00" /* rta length */
5460 @@ -17711,7 +17808,7 @@ static struct aead_testvec hmac_sha512_d
5461 },
5462 };
5463
5464 -static struct cipher_testvec aes_lrw_enc_tv_template[] = {
5465 +static const struct cipher_testvec aes_lrw_enc_tv_template[] = {
5466 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5467 { /* LRW-32-AES 1 */
5468 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5469 @@ -17964,7 +18061,7 @@ static struct cipher_testvec aes_lrw_enc
5470 }
5471 };
5472
5473 -static struct cipher_testvec aes_lrw_dec_tv_template[] = {
5474 +static const struct cipher_testvec aes_lrw_dec_tv_template[] = {
5475 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5476 /* same as enc vectors with input and result reversed */
5477 { /* LRW-32-AES 1 */
5478 @@ -18218,7 +18315,7 @@ static struct cipher_testvec aes_lrw_dec
5479 }
5480 };
5481
5482 -static struct cipher_testvec aes_xts_enc_tv_template[] = {
5483 +static const struct cipher_testvec aes_xts_enc_tv_template[] = {
5484 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5485 { /* XTS-AES 1 */
5486 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5487 @@ -18561,7 +18658,7 @@ static struct cipher_testvec aes_xts_enc
5488 }
5489 };
5490
5491 -static struct cipher_testvec aes_xts_dec_tv_template[] = {
5492 +static const struct cipher_testvec aes_xts_dec_tv_template[] = {
5493 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5494 { /* XTS-AES 1 */
5495 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5496 @@ -18905,7 +19002,7 @@ static struct cipher_testvec aes_xts_dec
5497 };
5498
5499
5500 -static struct cipher_testvec aes_ctr_enc_tv_template[] = {
5501 +static const struct cipher_testvec aes_ctr_enc_tv_template[] = {
5502 { /* From NIST Special Publication 800-38A, Appendix F.5 */
5503 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5504 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5505 @@ -19260,7 +19357,7 @@ static struct cipher_testvec aes_ctr_enc
5506 },
5507 };
5508
5509 -static struct cipher_testvec aes_ctr_dec_tv_template[] = {
5510 +static const struct cipher_testvec aes_ctr_dec_tv_template[] = {
5511 { /* From NIST Special Publication 800-38A, Appendix F.5 */
5512 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5513 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5514 @@ -19615,7 +19712,7 @@ static struct cipher_testvec aes_ctr_dec
5515 },
5516 };
5517
5518 -static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5519 +static const struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5520 { /* From RFC 3686 */
5521 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5522 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5523 @@ -20747,7 +20844,7 @@ static struct cipher_testvec aes_ctr_rfc
5524 },
5525 };
5526
5527 -static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5528 +static const struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5529 { /* From RFC 3686 */
5530 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5531 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5532 @@ -20838,7 +20935,7 @@ static struct cipher_testvec aes_ctr_rfc
5533 },
5534 };
5535
5536 -static struct cipher_testvec aes_ofb_enc_tv_template[] = {
5537 +static const struct cipher_testvec aes_ofb_enc_tv_template[] = {
5538 /* From NIST Special Publication 800-38A, Appendix F.5 */
5539 {
5540 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5541 @@ -20867,7 +20964,7 @@ static struct cipher_testvec aes_ofb_enc
5542 }
5543 };
5544
5545 -static struct cipher_testvec aes_ofb_dec_tv_template[] = {
5546 +static const struct cipher_testvec aes_ofb_dec_tv_template[] = {
5547 /* From NIST Special Publication 800-38A, Appendix F.5 */
5548 {
5549 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5550 @@ -20896,7 +20993,7 @@ static struct cipher_testvec aes_ofb_dec
5551 }
5552 };
5553
5554 -static struct aead_testvec aes_gcm_enc_tv_template[] = {
5555 +static const struct aead_testvec aes_gcm_enc_tv_template[] = {
5556 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5557 .key = zeroed_string,
5558 .klen = 16,
5559 @@ -21056,7 +21153,7 @@ static struct aead_testvec aes_gcm_enc_t
5560 }
5561 };
5562
5563 -static struct aead_testvec aes_gcm_dec_tv_template[] = {
5564 +static const struct aead_testvec aes_gcm_dec_tv_template[] = {
5565 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5566 .key = zeroed_string,
5567 .klen = 32,
5568 @@ -21258,7 +21355,7 @@ static struct aead_testvec aes_gcm_dec_t
5569 }
5570 };
5571
5572 -static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5573 +static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5574 { /* Generated using Crypto++ */
5575 .key = zeroed_string,
5576 .klen = 20,
5577 @@ -21871,7 +21968,7 @@ static struct aead_testvec aes_gcm_rfc41
5578 }
5579 };
5580
5581 -static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5582 +static const struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5583 { /* Generated using Crypto++ */
5584 .key = zeroed_string,
5585 .klen = 20,
5586 @@ -22485,7 +22582,7 @@ static struct aead_testvec aes_gcm_rfc41
5587 }
5588 };
5589
5590 -static struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5591 +static const struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5592 { /* From draft-mcgrew-gcm-test-01 */
5593 .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5594 "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5595 @@ -22516,7 +22613,7 @@ static struct aead_testvec aes_gcm_rfc45
5596 }
5597 };
5598
5599 -static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5600 +static const struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5601 { /* From draft-mcgrew-gcm-test-01 */
5602 .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5603 "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5604 @@ -22575,7 +22672,7 @@ static struct aead_testvec aes_gcm_rfc45
5605 },
5606 };
5607
5608 -static struct aead_testvec aes_ccm_enc_tv_template[] = {
5609 +static const struct aead_testvec aes_ccm_enc_tv_template[] = {
5610 { /* From RFC 3610 */
5611 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5612 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5613 @@ -22859,7 +22956,7 @@ static struct aead_testvec aes_ccm_enc_t
5614 }
5615 };
5616
5617 -static struct aead_testvec aes_ccm_dec_tv_template[] = {
5618 +static const struct aead_testvec aes_ccm_dec_tv_template[] = {
5619 { /* From RFC 3610 */
5620 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5621 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5622 @@ -23191,7 +23288,7 @@ static struct aead_testvec aes_ccm_dec_t
5623 * These vectors are copied/generated from the ones for rfc4106 with
5624 * the key truncated by one byte..
5625 */
5626 -static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5627 +static const struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5628 { /* Generated using Crypto++ */
5629 .key = zeroed_string,
5630 .klen = 19,
5631 @@ -23804,7 +23901,7 @@ static struct aead_testvec aes_ccm_rfc43
5632 }
5633 };
5634
5635 -static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
5636 +static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
5637 { /* Generated using Crypto++ */
5638 .key = zeroed_string,
5639 .klen = 19,
5640 @@ -24420,9 +24517,7 @@ static struct aead_testvec aes_ccm_rfc43
5641 /*
5642 * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5.
5643 */
5644 -#define RFC7539_ENC_TEST_VECTORS 2
5645 -#define RFC7539_DEC_TEST_VECTORS 2
5646 -static struct aead_testvec rfc7539_enc_tv_template[] = {
5647 +static const struct aead_testvec rfc7539_enc_tv_template[] = {
5648 {
5649 .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
5650 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5651 @@ -24554,7 +24649,7 @@ static struct aead_testvec rfc7539_enc_t
5652 },
5653 };
5654
5655 -static struct aead_testvec rfc7539_dec_tv_template[] = {
5656 +static const struct aead_testvec rfc7539_dec_tv_template[] = {
5657 {
5658 .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
5659 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5660 @@ -24689,9 +24784,7 @@ static struct aead_testvec rfc7539_dec_t
5661 /*
5662 * draft-irtf-cfrg-chacha20-poly1305
5663 */
5664 -#define RFC7539ESP_DEC_TEST_VECTORS 1
5665 -#define RFC7539ESP_ENC_TEST_VECTORS 1
5666 -static struct aead_testvec rfc7539esp_enc_tv_template[] = {
5667 +static const struct aead_testvec rfc7539esp_enc_tv_template[] = {
5668 {
5669 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5670 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5671 @@ -24779,7 +24872,7 @@ static struct aead_testvec rfc7539esp_en
5672 },
5673 };
5674
5675 -static struct aead_testvec rfc7539esp_dec_tv_template[] = {
5676 +static const struct aead_testvec rfc7539esp_dec_tv_template[] = {
5677 {
5678 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5679 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5680 @@ -24875,7 +24968,7 @@ static struct aead_testvec rfc7539esp_de
5681 * semiblock of the ciphertext from the test vector. For decryption, iv is
5682 * the first semiblock of the ciphertext.
5683 */
5684 -static struct cipher_testvec aes_kw_enc_tv_template[] = {
5685 +static const struct cipher_testvec aes_kw_enc_tv_template[] = {
5686 {
5687 .key = "\x75\x75\xda\x3a\x93\x60\x7c\xc2"
5688 "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6",
5689 @@ -24890,7 +24983,7 @@ static struct cipher_testvec aes_kw_enc_
5690 },
5691 };
5692
5693 -static struct cipher_testvec aes_kw_dec_tv_template[] = {
5694 +static const struct cipher_testvec aes_kw_dec_tv_template[] = {
5695 {
5696 .key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b"
5697 "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71"
5698 @@ -24913,9 +25006,7 @@ static struct cipher_testvec aes_kw_dec_
5699 * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf
5700 * Only AES-128 is supported at this time.
5701 */
5702 -#define ANSI_CPRNG_AES_TEST_VECTORS 6
5703 -
5704 -static struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5705 +static const struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5706 {
5707 .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
5708 "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
5709 @@ -25011,7 +25102,7 @@ static struct cprng_testvec ansi_cprng_a
5710 * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5711 * w/o personalization string, w/ and w/o additional input string).
5712 */
5713 -static struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5714 +static const struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5715 {
5716 .entropy = (unsigned char *)
5717 "\x72\x88\x4c\xcd\x6c\x85\x57\x70\xf7\x0b\x8b\x86"
5718 @@ -25169,7 +25260,7 @@ static struct drbg_testvec drbg_pr_sha25
5719 },
5720 };
5721
5722 -static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5723 +static const struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5724 {
5725 .entropy = (unsigned char *)
5726 "\x99\x69\xe5\x4b\x47\x03\xff\x31\x78\x5b\x87\x9a"
5727 @@ -25327,7 +25418,7 @@ static struct drbg_testvec drbg_pr_hmac_
5728 },
5729 };
5730
5731 -static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5732 +static const struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5733 {
5734 .entropy = (unsigned char *)
5735 "\xd1\x44\xc6\x61\x81\x6d\xca\x9d\x15\x28\x8a\x42"
5736 @@ -25451,7 +25542,7 @@ static struct drbg_testvec drbg_pr_ctr_a
5737 * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5738 * w/o personalization string, w/ and w/o additional input string).
5739 */
5740 -static struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5741 +static const struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5742 {
5743 .entropy = (unsigned char *)
5744 "\xa6\x5a\xd0\xf3\x45\xdb\x4e\x0e\xff\xe8\x75\xc3"
5745 @@ -25573,7 +25664,7 @@ static struct drbg_testvec drbg_nopr_sha
5746 },
5747 };
5748
5749 -static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5750 +static const struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5751 {
5752 .entropy = (unsigned char *)
5753 "\xca\x85\x19\x11\x34\x93\x84\xbf\xfe\x89\xde\x1c"
5754 @@ -25695,7 +25786,7 @@ static struct drbg_testvec drbg_nopr_hma
5755 },
5756 };
5757
5758 -static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5759 +static const struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5760 {
5761 .entropy = (unsigned char *)
5762 "\xc3\x5c\x2f\xa2\xa8\x9d\x52\xa1\x1f\xa3\x2a\xa9"
5763 @@ -25719,7 +25810,7 @@ static struct drbg_testvec drbg_nopr_ctr
5764 },
5765 };
5766
5767 -static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5768 +static const struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5769 {
5770 .entropy = (unsigned char *)
5771 "\x36\x40\x19\x40\xfa\x8b\x1f\xba\x91\xa1\x66\x1f"
5772 @@ -25743,7 +25834,7 @@ static struct drbg_testvec drbg_nopr_ctr
5773 },
5774 };
5775
5776 -static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5777 +static const struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5778 {
5779 .entropy = (unsigned char *)
5780 "\x87\xe1\xc5\x32\x99\x7f\x57\xa3\x5c\x28\x6d\xe8"
5781 @@ -25832,14 +25923,7 @@ static struct drbg_testvec drbg_nopr_ctr
5782 };
5783
5784 /* Cast5 test vectors from RFC 2144 */
5785 -#define CAST5_ENC_TEST_VECTORS 4
5786 -#define CAST5_DEC_TEST_VECTORS 4
5787 -#define CAST5_CBC_ENC_TEST_VECTORS 1
5788 -#define CAST5_CBC_DEC_TEST_VECTORS 1
5789 -#define CAST5_CTR_ENC_TEST_VECTORS 2
5790 -#define CAST5_CTR_DEC_TEST_VECTORS 2
5791 -
5792 -static struct cipher_testvec cast5_enc_tv_template[] = {
5793 +static const struct cipher_testvec cast5_enc_tv_template[] = {
5794 {
5795 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5796 "\x23\x45\x67\x89\x34\x56\x78\x9a",
5797 @@ -26000,7 +26084,7 @@ static struct cipher_testvec cast5_enc_t
5798 },
5799 };
5800
5801 -static struct cipher_testvec cast5_dec_tv_template[] = {
5802 +static const struct cipher_testvec cast5_dec_tv_template[] = {
5803 {
5804 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5805 "\x23\x45\x67\x89\x34\x56\x78\x9a",
5806 @@ -26161,7 +26245,7 @@ static struct cipher_testvec cast5_dec_t
5807 },
5808 };
5809
5810 -static struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5811 +static const struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5812 { /* Generated from TF test vectors */
5813 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5814 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5815 @@ -26299,7 +26383,7 @@ static struct cipher_testvec cast5_cbc_e
5816 },
5817 };
5818
5819 -static struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5820 +static const struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5821 { /* Generated from TF test vectors */
5822 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5823 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5824 @@ -26437,7 +26521,7 @@ static struct cipher_testvec cast5_cbc_d
5825 },
5826 };
5827
5828 -static struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5829 +static const struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5830 { /* Generated from TF test vectors */
5831 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5832 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5833 @@ -26588,7 +26672,7 @@ static struct cipher_testvec cast5_ctr_e
5834 },
5835 };
5836
5837 -static struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5838 +static const struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5839 { /* Generated from TF test vectors */
5840 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5841 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5842 @@ -26742,10 +26826,7 @@ static struct cipher_testvec cast5_ctr_d
5843 /*
5844 * ARC4 test vectors from OpenSSL
5845 */
5846 -#define ARC4_ENC_TEST_VECTORS 7
5847 -#define ARC4_DEC_TEST_VECTORS 7
5848 -
5849 -static struct cipher_testvec arc4_enc_tv_template[] = {
5850 +static const struct cipher_testvec arc4_enc_tv_template[] = {
5851 {
5852 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5853 .klen = 8,
5854 @@ -26811,7 +26892,7 @@ static struct cipher_testvec arc4_enc_tv
5855 },
5856 };
5857
5858 -static struct cipher_testvec arc4_dec_tv_template[] = {
5859 +static const struct cipher_testvec arc4_dec_tv_template[] = {
5860 {
5861 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5862 .klen = 8,
5863 @@ -26880,10 +26961,7 @@ static struct cipher_testvec arc4_dec_tv
5864 /*
5865 * TEA test vectors
5866 */
5867 -#define TEA_ENC_TEST_VECTORS 4
5868 -#define TEA_DEC_TEST_VECTORS 4
5869 -
5870 -static struct cipher_testvec tea_enc_tv_template[] = {
5871 +static const struct cipher_testvec tea_enc_tv_template[] = {
5872 {
5873 .key = zeroed_string,
5874 .klen = 16,
5875 @@ -26926,7 +27004,7 @@ static struct cipher_testvec tea_enc_tv_
5876 }
5877 };
5878
5879 -static struct cipher_testvec tea_dec_tv_template[] = {
5880 +static const struct cipher_testvec tea_dec_tv_template[] = {
5881 {
5882 .key = zeroed_string,
5883 .klen = 16,
5884 @@ -26972,10 +27050,7 @@ static struct cipher_testvec tea_dec_tv_
5885 /*
5886 * XTEA test vectors
5887 */
5888 -#define XTEA_ENC_TEST_VECTORS 4
5889 -#define XTEA_DEC_TEST_VECTORS 4
5890 -
5891 -static struct cipher_testvec xtea_enc_tv_template[] = {
5892 +static const struct cipher_testvec xtea_enc_tv_template[] = {
5893 {
5894 .key = zeroed_string,
5895 .klen = 16,
5896 @@ -27018,7 +27093,7 @@ static struct cipher_testvec xtea_enc_tv
5897 }
5898 };
5899
5900 -static struct cipher_testvec xtea_dec_tv_template[] = {
5901 +static const struct cipher_testvec xtea_dec_tv_template[] = {
5902 {
5903 .key = zeroed_string,
5904 .klen = 16,
5905 @@ -27064,10 +27139,7 @@ static struct cipher_testvec xtea_dec_tv
5906 /*
5907 * KHAZAD test vectors.
5908 */
5909 -#define KHAZAD_ENC_TEST_VECTORS 5
5910 -#define KHAZAD_DEC_TEST_VECTORS 5
5911 -
5912 -static struct cipher_testvec khazad_enc_tv_template[] = {
5913 +static const struct cipher_testvec khazad_enc_tv_template[] = {
5914 {
5915 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
5916 "\x00\x00\x00\x00\x00\x00\x00\x00",
5917 @@ -27113,7 +27185,7 @@ static struct cipher_testvec khazad_enc_
5918 },
5919 };
5920
5921 -static struct cipher_testvec khazad_dec_tv_template[] = {
5922 +static const struct cipher_testvec khazad_dec_tv_template[] = {
5923 {
5924 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
5925 "\x00\x00\x00\x00\x00\x00\x00\x00",
5926 @@ -27163,12 +27235,7 @@ static struct cipher_testvec khazad_dec_
5927 * Anubis test vectors.
5928 */
5929
5930 -#define ANUBIS_ENC_TEST_VECTORS 5
5931 -#define ANUBIS_DEC_TEST_VECTORS 5
5932 -#define ANUBIS_CBC_ENC_TEST_VECTORS 2
5933 -#define ANUBIS_CBC_DEC_TEST_VECTORS 2
5934 -
5935 -static struct cipher_testvec anubis_enc_tv_template[] = {
5936 +static const struct cipher_testvec anubis_enc_tv_template[] = {
5937 {
5938 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5939 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5940 @@ -27231,7 +27298,7 @@ static struct cipher_testvec anubis_enc_
5941 },
5942 };
5943
5944 -static struct cipher_testvec anubis_dec_tv_template[] = {
5945 +static const struct cipher_testvec anubis_dec_tv_template[] = {
5946 {
5947 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5948 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5949 @@ -27294,7 +27361,7 @@ static struct cipher_testvec anubis_dec_
5950 },
5951 };
5952
5953 -static struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5954 +static const struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5955 {
5956 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5957 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5958 @@ -27329,7 +27396,7 @@ static struct cipher_testvec anubis_cbc_
5959 },
5960 };
5961
5962 -static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5963 +static const struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5964 {
5965 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5966 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5967 @@ -27367,10 +27434,7 @@ static struct cipher_testvec anubis_cbc_
5968 /*
5969 * XETA test vectors
5970 */
5971 -#define XETA_ENC_TEST_VECTORS 4
5972 -#define XETA_DEC_TEST_VECTORS 4
5973 -
5974 -static struct cipher_testvec xeta_enc_tv_template[] = {
5975 +static const struct cipher_testvec xeta_enc_tv_template[] = {
5976 {
5977 .key = zeroed_string,
5978 .klen = 16,
5979 @@ -27413,7 +27477,7 @@ static struct cipher_testvec xeta_enc_tv
5980 }
5981 };
5982
5983 -static struct cipher_testvec xeta_dec_tv_template[] = {
5984 +static const struct cipher_testvec xeta_dec_tv_template[] = {
5985 {
5986 .key = zeroed_string,
5987 .klen = 16,
5988 @@ -27459,10 +27523,7 @@ static struct cipher_testvec xeta_dec_tv
5989 /*
5990 * FCrypt test vectors
5991 */
5992 -#define FCRYPT_ENC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_enc_tv_template)
5993 -#define FCRYPT_DEC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_dec_tv_template)
5994 -
5995 -static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
5996 +static const struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
5997 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
5998 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
5999 .klen = 8,
6000 @@ -27523,7 +27584,7 @@ static struct cipher_testvec fcrypt_pcbc
6001 }
6002 };
6003
6004 -static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6005 +static const struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6006 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6007 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6008 .klen = 8,
6009 @@ -27587,18 +27648,7 @@ static struct cipher_testvec fcrypt_pcbc
6010 /*
6011 * CAMELLIA test vectors.
6012 */
6013 -#define CAMELLIA_ENC_TEST_VECTORS 4
6014 -#define CAMELLIA_DEC_TEST_VECTORS 4
6015 -#define CAMELLIA_CBC_ENC_TEST_VECTORS 3
6016 -#define CAMELLIA_CBC_DEC_TEST_VECTORS 3
6017 -#define CAMELLIA_CTR_ENC_TEST_VECTORS 2
6018 -#define CAMELLIA_CTR_DEC_TEST_VECTORS 2
6019 -#define CAMELLIA_LRW_ENC_TEST_VECTORS 8
6020 -#define CAMELLIA_LRW_DEC_TEST_VECTORS 8
6021 -#define CAMELLIA_XTS_ENC_TEST_VECTORS 5
6022 -#define CAMELLIA_XTS_DEC_TEST_VECTORS 5
6023 -
6024 -static struct cipher_testvec camellia_enc_tv_template[] = {
6025 +static const struct cipher_testvec camellia_enc_tv_template[] = {
6026 {
6027 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6028 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6029 @@ -27898,7 +27948,7 @@ static struct cipher_testvec camellia_en
6030 },
6031 };
6032
6033 -static struct cipher_testvec camellia_dec_tv_template[] = {
6034 +static const struct cipher_testvec camellia_dec_tv_template[] = {
6035 {
6036 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6037 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6038 @@ -28198,7 +28248,7 @@ static struct cipher_testvec camellia_de
6039 },
6040 };
6041
6042 -static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6043 +static const struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6044 {
6045 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6046 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6047 @@ -28494,7 +28544,7 @@ static struct cipher_testvec camellia_cb
6048 },
6049 };
6050
6051 -static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6052 +static const struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6053 {
6054 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6055 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6056 @@ -28790,7 +28840,7 @@ static struct cipher_testvec camellia_cb
6057 },
6058 };
6059
6060 -static struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6061 +static const struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6062 { /* Generated with Crypto++ */
6063 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6064 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6065 @@ -29457,7 +29507,7 @@ static struct cipher_testvec camellia_ct
6066 },
6067 };
6068
6069 -static struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6070 +static const struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6071 { /* Generated with Crypto++ */
6072 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6073 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6074 @@ -30124,7 +30174,7 @@ static struct cipher_testvec camellia_ct
6075 },
6076 };
6077
6078 -static struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6079 +static const struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6080 /* Generated from AES-LRW test vectors */
6081 {
6082 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
6083 @@ -30376,7 +30426,7 @@ static struct cipher_testvec camellia_lr
6084 },
6085 };
6086
6087 -static struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6088 +static const struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6089 /* Generated from AES-LRW test vectors */
6090 /* same as enc vectors with input and result reversed */
6091 {
6092 @@ -30629,7 +30679,7 @@ static struct cipher_testvec camellia_lr
6093 },
6094 };
6095
6096 -static struct cipher_testvec camellia_xts_enc_tv_template[] = {
6097 +static const struct cipher_testvec camellia_xts_enc_tv_template[] = {
6098 /* Generated from AES-XTS test vectors */
6099 {
6100 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
6101 @@ -30971,7 +31021,7 @@ static struct cipher_testvec camellia_xt
6102 },
6103 };
6104
6105 -static struct cipher_testvec camellia_xts_dec_tv_template[] = {
6106 +static const struct cipher_testvec camellia_xts_dec_tv_template[] = {
6107 /* Generated from AES-XTS test vectors */
6108 /* same as enc vectors with input and result reversed */
6109 {
6110 @@ -31317,10 +31367,7 @@ static struct cipher_testvec camellia_xt
6111 /*
6112 * SEED test vectors
6113 */
6114 -#define SEED_ENC_TEST_VECTORS 4
6115 -#define SEED_DEC_TEST_VECTORS 4
6116 -
6117 -static struct cipher_testvec seed_enc_tv_template[] = {
6118 +static const struct cipher_testvec seed_enc_tv_template[] = {
6119 {
6120 .key = zeroed_string,
6121 .klen = 16,
6122 @@ -31362,7 +31409,7 @@ static struct cipher_testvec seed_enc_tv
6123 }
6124 };
6125
6126 -static struct cipher_testvec seed_dec_tv_template[] = {
6127 +static const struct cipher_testvec seed_dec_tv_template[] = {
6128 {
6129 .key = zeroed_string,
6130 .klen = 16,
6131 @@ -31404,8 +31451,7 @@ static struct cipher_testvec seed_dec_tv
6132 }
6133 };
6134
6135 -#define SALSA20_STREAM_ENC_TEST_VECTORS 5
6136 -static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6137 +static const struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6138 /*
6139 * Testvectors from verified.test-vectors submitted to ECRYPT.
6140 * They are truncated to size 39, 64, 111, 129 to test a variety
6141 @@ -32574,8 +32620,7 @@ static struct cipher_testvec salsa20_str
6142 },
6143 };
6144
6145 -#define CHACHA20_ENC_TEST_VECTORS 4
6146 -static struct cipher_testvec chacha20_enc_tv_template[] = {
6147 +static const struct cipher_testvec chacha20_enc_tv_template[] = {
6148 { /* RFC7539 A.2. Test Vector #1 */
6149 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
6150 "\x00\x00\x00\x00\x00\x00\x00\x00"
6151 @@ -33086,9 +33131,7 @@ static struct cipher_testvec chacha20_en
6152 /*
6153 * CTS (Cipher Text Stealing) mode tests
6154 */
6155 -#define CTS_MODE_ENC_TEST_VECTORS 6
6156 -#define CTS_MODE_DEC_TEST_VECTORS 6
6157 -static struct cipher_testvec cts_mode_enc_tv_template[] = {
6158 +static const struct cipher_testvec cts_mode_enc_tv_template[] = {
6159 { /* from rfc3962 */
6160 .klen = 16,
6161 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6162 @@ -33190,7 +33233,7 @@ static struct cipher_testvec cts_mode_en
6163 }
6164 };
6165
6166 -static struct cipher_testvec cts_mode_dec_tv_template[] = {
6167 +static const struct cipher_testvec cts_mode_dec_tv_template[] = {
6168 { /* from rfc3962 */
6169 .klen = 16,
6170 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6171 @@ -33308,10 +33351,7 @@ struct comp_testvec {
6172 * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
6173 */
6174
6175 -#define DEFLATE_COMP_TEST_VECTORS 2
6176 -#define DEFLATE_DECOMP_TEST_VECTORS 2
6177 -
6178 -static struct comp_testvec deflate_comp_tv_template[] = {
6179 +static const struct comp_testvec deflate_comp_tv_template[] = {
6180 {
6181 .inlen = 70,
6182 .outlen = 38,
6183 @@ -33347,7 +33387,7 @@ static struct comp_testvec deflate_comp_
6184 },
6185 };
6186
6187 -static struct comp_testvec deflate_decomp_tv_template[] = {
6188 +static const struct comp_testvec deflate_decomp_tv_template[] = {
6189 {
6190 .inlen = 122,
6191 .outlen = 191,
6192 @@ -33386,10 +33426,7 @@ static struct comp_testvec deflate_decom
6193 /*
6194 * LZO test vectors (null-terminated strings).
6195 */
6196 -#define LZO_COMP_TEST_VECTORS 2
6197 -#define LZO_DECOMP_TEST_VECTORS 2
6198 -
6199 -static struct comp_testvec lzo_comp_tv_template[] = {
6200 +static const struct comp_testvec lzo_comp_tv_template[] = {
6201 {
6202 .inlen = 70,
6203 .outlen = 57,
6204 @@ -33429,7 +33466,7 @@ static struct comp_testvec lzo_comp_tv_t
6205 },
6206 };
6207
6208 -static struct comp_testvec lzo_decomp_tv_template[] = {
6209 +static const struct comp_testvec lzo_decomp_tv_template[] = {
6210 {
6211 .inlen = 133,
6212 .outlen = 159,
6213 @@ -33472,7 +33509,7 @@ static struct comp_testvec lzo_decomp_tv
6214 */
6215 #define MICHAEL_MIC_TEST_VECTORS 6
6216
6217 -static struct hash_testvec michael_mic_tv_template[] = {
6218 +static const struct hash_testvec michael_mic_tv_template[] = {
6219 {
6220 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6221 .ksize = 8,
6222 @@ -33520,9 +33557,7 @@ static struct hash_testvec michael_mic_t
6223 /*
6224 * CRC32 test vectors
6225 */
6226 -#define CRC32_TEST_VECTORS 14
6227 -
6228 -static struct hash_testvec crc32_tv_template[] = {
6229 +static const struct hash_testvec crc32_tv_template[] = {
6230 {
6231 .key = "\x87\xa9\xcb\xed",
6232 .ksize = 4,
6233 @@ -33954,9 +33989,7 @@ static struct hash_testvec crc32_tv_temp
6234 /*
6235 * CRC32C test vectors
6236 */
6237 -#define CRC32C_TEST_VECTORS 15
6238 -
6239 -static struct hash_testvec crc32c_tv_template[] = {
6240 +static const struct hash_testvec crc32c_tv_template[] = {
6241 {
6242 .psize = 0,
6243 .digest = "\x00\x00\x00\x00",
6244 @@ -34392,9 +34425,7 @@ static struct hash_testvec crc32c_tv_tem
6245 /*
6246 * Blakcifn CRC test vectors
6247 */
6248 -#define BFIN_CRC_TEST_VECTORS 6
6249 -
6250 -static struct hash_testvec bfin_crc_tv_template[] = {
6251 +static const struct hash_testvec bfin_crc_tv_template[] = {
6252 {
6253 .psize = 0,
6254 .digest = "\x00\x00\x00\x00",
6255 @@ -34479,9 +34510,6 @@ static struct hash_testvec bfin_crc_tv_t
6256
6257 };
6258
6259 -#define LZ4_COMP_TEST_VECTORS 1
6260 -#define LZ4_DECOMP_TEST_VECTORS 1
6261 -
6262 static struct comp_testvec lz4_comp_tv_template[] = {
6263 {
6264 .inlen = 70,
6265 @@ -34512,9 +34540,6 @@ static struct comp_testvec lz4_decomp_tv
6266 },
6267 };
6268
6269 -#define LZ4HC_COMP_TEST_VECTORS 1
6270 -#define LZ4HC_DECOMP_TEST_VECTORS 1
6271 -
6272 static struct comp_testvec lz4hc_comp_tv_template[] = {
6273 {
6274 .inlen = 70,
6275 --- /dev/null
6276 +++ b/crypto/tls.c
6277 @@ -0,0 +1,607 @@
6278 +/*
6279 + * Copyright 2013 Freescale Semiconductor, Inc.
6280 + * Copyright 2017 NXP Semiconductor, Inc.
6281 + *
6282 + * This program is free software; you can redistribute it and/or modify it
6283 + * under the terms of the GNU General Public License as published by the Free
6284 + * Software Foundation; either version 2 of the License, or (at your option)
6285 + * any later version.
6286 + *
6287 + */
6288 +
6289 +#include <crypto/internal/aead.h>
6290 +#include <crypto/internal/hash.h>
6291 +#include <crypto/internal/skcipher.h>
6292 +#include <crypto/authenc.h>
6293 +#include <crypto/null.h>
6294 +#include <crypto/scatterwalk.h>
6295 +#include <linux/err.h>
6296 +#include <linux/init.h>
6297 +#include <linux/module.h>
6298 +#include <linux/rtnetlink.h>
6299 +
6300 +struct tls_instance_ctx {
6301 + struct crypto_ahash_spawn auth;
6302 + struct crypto_skcipher_spawn enc;
6303 +};
6304 +
6305 +struct crypto_tls_ctx {
6306 + unsigned int reqoff;
6307 + struct crypto_ahash *auth;
6308 + struct crypto_skcipher *enc;
6309 + struct crypto_skcipher *null;
6310 +};
6311 +
6312 +struct tls_request_ctx {
6313 + /*
6314 + * cryptlen holds the payload length in the case of encryption or
6315 + * payload_len + icv_len + padding_len in case of decryption
6316 + */
6317 + unsigned int cryptlen;
6318 + /* working space for partial results */
6319 + struct scatterlist tmp[2];
6320 + struct scatterlist cipher[2];
6321 + struct scatterlist dst[2];
6322 + char tail[];
6323 +};
6324 +
6325 +struct async_op {
6326 + struct completion completion;
6327 + int err;
6328 +};
6329 +
6330 +static void tls_async_op_done(struct crypto_async_request *req, int err)
6331 +{
6332 + struct async_op *areq = req->data;
6333 +
6334 + if (err == -EINPROGRESS)
6335 + return;
6336 +
6337 + areq->err = err;
6338 + complete(&areq->completion);
6339 +}
6340 +
6341 +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
6342 + unsigned int keylen)
6343 +{
6344 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6345 + struct crypto_ahash *auth = ctx->auth;
6346 + struct crypto_skcipher *enc = ctx->enc;
6347 + struct crypto_authenc_keys keys;
6348 + int err = -EINVAL;
6349 +
6350 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
6351 + goto badkey;
6352 +
6353 + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
6354 + crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
6355 + CRYPTO_TFM_REQ_MASK);
6356 + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
6357 + crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
6358 + CRYPTO_TFM_RES_MASK);
6359 +
6360 + if (err)
6361 + goto out;
6362 +
6363 + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
6364 + crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
6365 + CRYPTO_TFM_REQ_MASK);
6366 + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
6367 + crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
6368 + CRYPTO_TFM_RES_MASK);
6369 +
6370 +out:
6371 + return err;
6372 +
6373 +badkey:
6374 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
6375 + goto out;
6376 +}
6377 +
6378 +/**
6379 + * crypto_tls_genicv - Calculate hmac digest for a TLS record
6380 + * @hash: (output) buffer to save the digest into
6381 + * @src: (input) scatterlist with the assoc and payload data
6382 + * @srclen: (input) size of the source buffer (assoclen + cryptlen)
6383 + * @req: (input) aead request
6384 + **/
6385 +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
6386 + unsigned int srclen, struct aead_request *req)
6387 +{
6388 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6389 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6390 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6391 + struct async_op ahash_op;
6392 + struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
6393 + unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
6394 + int err = -EBADMSG;
6395 +
6396 + /* Bail out if the request assoc len is 0 */
6397 + if (!req->assoclen)
6398 + return err;
6399 +
6400 + init_completion(&ahash_op.completion);
6401 +
6402 + /* the hash transform to be executed comes from the original request */
6403 + ahash_request_set_tfm(ahreq, ctx->auth);
6404 + /* prepare the hash request with input data and result pointer */
6405 + ahash_request_set_crypt(ahreq, src, hash, srclen);
6406 + /* set the notifier for when the async hash function returns */
6407 + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
6408 + tls_async_op_done, &ahash_op);
6409 +
6410 + /* Calculate the digest on the given data. The result is put in hash */
6411 + err = crypto_ahash_digest(ahreq);
6412 + if (err == -EINPROGRESS) {
6413 + err = wait_for_completion_interruptible(&ahash_op.completion);
6414 + if (!err)
6415 + err = ahash_op.err;
6416 + }
6417 +
6418 + return err;
6419 +}
6420 +
6421 +/**
6422 + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
6423 + * @hash: (output) buffer to save the digest and padding into
6424 + * @phashlen: (output) the size of digest + padding
6425 + * @req: (input) aead request
6426 + **/
6427 +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
6428 + struct aead_request *req)
6429 +{
6430 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6431 + unsigned int hash_size = crypto_aead_authsize(tls);
6432 + unsigned int block_size = crypto_aead_blocksize(tls);
6433 + unsigned int srclen = req->cryptlen + hash_size;
6434 + unsigned int icvlen = req->cryptlen + req->assoclen;
6435 + unsigned int padlen;
6436 + int err;
6437 +
6438 + err = crypto_tls_genicv(hash, req->src, icvlen, req);
6439 + if (err)
6440 + goto out;
6441 +
6442 + /* add padding after digest */
6443 + padlen = block_size - (srclen % block_size);
6444 + memset(hash + hash_size, padlen - 1, padlen);
6445 +
6446 + *phashlen = hash_size + padlen;
6447 +out:
6448 + return err;
6449 +}
6450 +
6451 +static int crypto_tls_copy_data(struct aead_request *req,
6452 + struct scatterlist *src,
6453 + struct scatterlist *dst,
6454 + unsigned int len)
6455 +{
6456 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6457 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6458 + SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
6459 +
6460 + skcipher_request_set_tfm(skreq, ctx->null);
6461 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6462 + NULL, NULL);
6463 + skcipher_request_set_crypt(skreq, src, dst, len, NULL);
6464 +
6465 + return crypto_skcipher_encrypt(skreq);
6466 +}
6467 +
6468 +static int crypto_tls_encrypt(struct aead_request *req)
6469 +{
6470 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6471 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6472 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6473 + struct skcipher_request *skreq;
6474 + struct scatterlist *cipher = treq_ctx->cipher;
6475 + struct scatterlist *tmp = treq_ctx->tmp;
6476 + struct scatterlist *sg, *src, *dst;
6477 + unsigned int cryptlen, phashlen;
6478 + u8 *hash = treq_ctx->tail;
6479 + int err;
6480 +
6481 + /*
6482 + * The hash result is saved at the beginning of the tls request ctx
6483 + * and is aligned as required by the hash transform. Enough space was
6484 + * allocated in crypto_tls_init_tfm to accommodate the difference. The
6485 + * requests themselves start later at treq_ctx->tail + ctx->reqoff so
6486 + * the result is not overwritten by the second (cipher) request.
6487 + */
6488 + hash = (u8 *)ALIGN((unsigned long)hash +
6489 + crypto_ahash_alignmask(ctx->auth),
6490 + crypto_ahash_alignmask(ctx->auth) + 1);
6491 +
6492 + /*
6493 + * STEP 1: create ICV together with necessary padding
6494 + */
6495 + err = crypto_tls_gen_padicv(hash, &phashlen, req);
6496 + if (err)
6497 + return err;
6498 +
6499 + /*
6500 + * STEP 2: Hash and padding are combined with the payload
6501 + * depending on the form it arrives. Scatter tables must have at least
6502 + * one page of data before chaining with another table and can't have
6503 + * an empty data page. The following code addresses these requirements.
6504 + *
6505 + * If the payload is empty, only the hash is encrypted, otherwise the
6506 + * payload scatterlist is merged with the hash. A special merging case
6507 + * is when the payload has only one page of data. In that case the
6508 + * payload page is moved to another scatterlist and prepared there for
6509 + * encryption.
6510 + */
6511 + if (req->cryptlen) {
6512 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6513 +
6514 + sg_init_table(cipher, 2);
6515 + sg_set_buf(cipher + 1, hash, phashlen);
6516 +
6517 + if (sg_is_last(src)) {
6518 + sg_set_page(cipher, sg_page(src), req->cryptlen,
6519 + src->offset);
6520 + src = cipher;
6521 + } else {
6522 + unsigned int rem_len = req->cryptlen;
6523 +
6524 + for (sg = src; rem_len > sg->length; sg = sg_next(sg))
6525 + rem_len -= min(rem_len, sg->length);
6526 +
6527 + sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
6528 + sg_chain(sg, 1, cipher);
6529 + }
6530 + } else {
6531 + sg_init_one(cipher, hash, phashlen);
6532 + src = cipher;
6533 + }
6534 +
6535 + /**
6536 + * If src != dst copy the associated data from source to destination.
6537 + * In both cases fast-forward passed the associated data in the dest.
6538 + */
6539 + if (req->src != req->dst) {
6540 + err = crypto_tls_copy_data(req, req->src, req->dst,
6541 + req->assoclen);
6542 + if (err)
6543 + return err;
6544 + }
6545 + dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
6546 +
6547 + /*
6548 + * STEP 3: encrypt the frame and return the result
6549 + */
6550 + cryptlen = req->cryptlen + phashlen;
6551 +
6552 + /*
6553 + * The hash and the cipher are applied at different times and their
6554 + * requests can use the same memory space without interference
6555 + */
6556 + skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6557 + skcipher_request_set_tfm(skreq, ctx->enc);
6558 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6559 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6560 + req->base.complete, req->base.data);
6561 + /*
6562 + * Apply the cipher transform. The result will be in req->dst when the
6563 + * asynchronuous call terminates
6564 + */
6565 + err = crypto_skcipher_encrypt(skreq);
6566 +
6567 + return err;
6568 +}
6569 +
6570 +static int crypto_tls_decrypt(struct aead_request *req)
6571 +{
6572 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6573 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6574 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6575 + unsigned int cryptlen = req->cryptlen;
6576 + unsigned int hash_size = crypto_aead_authsize(tls);
6577 + unsigned int block_size = crypto_aead_blocksize(tls);
6578 + struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6579 + struct scatterlist *tmp = treq_ctx->tmp;
6580 + struct scatterlist *src, *dst;
6581 +
6582 + u8 padding[255]; /* padding can be 0-255 bytes */
6583 + u8 pad_size;
6584 + u16 *len_field;
6585 + u8 *ihash, *hash = treq_ctx->tail;
6586 +
6587 + int paderr = 0;
6588 + int err = -EINVAL;
6589 + int i;
6590 + struct async_op ciph_op;
6591 +
6592 + /*
6593 + * Rule out bad packets. The input packet length must be at least one
6594 + * byte more than the hash_size
6595 + */
6596 + if (cryptlen <= hash_size || cryptlen % block_size)
6597 + goto out;
6598 +
6599 + /*
6600 + * Step 1 - Decrypt the source. Fast-forward past the associated data
6601 + * to the encrypted data. The result will be overwritten in place so
6602 + * that the decrypted data will be adjacent to the associated data. The
6603 + * last step (computing the hash) will have it's input data already
6604 + * prepared and ready to be accessed at req->src.
6605 + */
6606 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6607 + dst = src;
6608 +
6609 + init_completion(&ciph_op.completion);
6610 + skcipher_request_set_tfm(skreq, ctx->enc);
6611 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6612 + tls_async_op_done, &ciph_op);
6613 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6614 + err = crypto_skcipher_decrypt(skreq);
6615 + if (err == -EINPROGRESS) {
6616 + err = wait_for_completion_interruptible(&ciph_op.completion);
6617 + if (!err)
6618 + err = ciph_op.err;
6619 + }
6620 + if (err)
6621 + goto out;
6622 +
6623 + /*
6624 + * Step 2 - Verify padding
6625 + * Retrieve the last byte of the payload; this is the padding size.
6626 + */
6627 + cryptlen -= 1;
6628 + scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
6629 +
6630 + /* RFC recommendation for invalid padding size. */
6631 + if (cryptlen < pad_size + hash_size) {
6632 + pad_size = 0;
6633 + paderr = -EBADMSG;
6634 + }
6635 + cryptlen -= pad_size;
6636 + scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
6637 +
6638 + /* Padding content must be equal with pad_size. We verify it all */
6639 + for (i = 0; i < pad_size; i++)
6640 + if (padding[i] != pad_size)
6641 + paderr = -EBADMSG;
6642 +
6643 + /*
6644 + * Step 3 - Verify hash
6645 + * Align the digest result as required by the hash transform. Enough
6646 + * space was allocated in crypto_tls_init_tfm
6647 + */
6648 + hash = (u8 *)ALIGN((unsigned long)hash +
6649 + crypto_ahash_alignmask(ctx->auth),
6650 + crypto_ahash_alignmask(ctx->auth) + 1);
6651 + /*
6652 + * Two bytes at the end of the associated data make the length field.
6653 + * It must be updated with the length of the cleartext message before
6654 + * the hash is calculated.
6655 + */
6656 + len_field = sg_virt(req->src) + req->assoclen - 2;
6657 + cryptlen -= hash_size;
6658 + *len_field = htons(cryptlen);
6659 +
6660 + /* This is the hash from the decrypted packet. Save it for later */
6661 + ihash = hash + hash_size;
6662 + scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
6663 +
6664 + /* Now compute and compare our ICV with the one from the packet */
6665 + err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
6666 + if (!err)
6667 + err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
6668 +
6669 + if (req->src != req->dst) {
6670 + err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
6671 + req->assoclen);
6672 + if (err)
6673 + goto out;
6674 + }
6675 +
6676 + /* return the first found error */
6677 + if (paderr)
6678 + err = paderr;
6679 +
6680 +out:
6681 + aead_request_complete(req, err);
6682 + return err;
6683 +}
6684 +
6685 +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
6686 +{
6687 + struct aead_instance *inst = aead_alg_instance(tfm);
6688 + struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
6689 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6690 + struct crypto_ahash *auth;
6691 + struct crypto_skcipher *enc;
6692 + struct crypto_skcipher *null;
6693 + int err;
6694 +
6695 + auth = crypto_spawn_ahash(&ictx->auth);
6696 + if (IS_ERR(auth))
6697 + return PTR_ERR(auth);
6698 +
6699 + enc = crypto_spawn_skcipher(&ictx->enc);
6700 + err = PTR_ERR(enc);
6701 + if (IS_ERR(enc))
6702 + goto err_free_ahash;
6703 +
6704 + null = crypto_get_default_null_skcipher2();
6705 + err = PTR_ERR(null);
6706 + if (IS_ERR(null))
6707 + goto err_free_skcipher;
6708 +
6709 + ctx->auth = auth;
6710 + ctx->enc = enc;
6711 + ctx->null = null;
6712 +
6713 + /*
6714 + * Allow enough space for two digests. The two digests will be compared
6715 + * during the decryption phase. One will come from the decrypted packet
6716 + * and the other will be calculated. For encryption, one digest is
6717 + * padded (up to a cipher blocksize) and chained with the payload
6718 + */
6719 + ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
6720 + crypto_ahash_alignmask(auth),
6721 + crypto_ahash_alignmask(auth) + 1) +
6722 + max(crypto_ahash_digestsize(auth),
6723 + crypto_skcipher_blocksize(enc));
6724 +
6725 + crypto_aead_set_reqsize(tfm,
6726 + sizeof(struct tls_request_ctx) +
6727 + ctx->reqoff +
6728 + max_t(unsigned int,
6729 + crypto_ahash_reqsize(auth) +
6730 + sizeof(struct ahash_request),
6731 + crypto_skcipher_reqsize(enc) +
6732 + sizeof(struct skcipher_request)));
6733 +
6734 + return 0;
6735 +
6736 +err_free_skcipher:
6737 + crypto_free_skcipher(enc);
6738 +err_free_ahash:
6739 + crypto_free_ahash(auth);
6740 + return err;
6741 +}
6742 +
6743 +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
6744 +{
6745 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6746 +
6747 + crypto_free_ahash(ctx->auth);
6748 + crypto_free_skcipher(ctx->enc);
6749 + crypto_put_default_null_skcipher2();
6750 +}
6751 +
6752 +static void crypto_tls_free(struct aead_instance *inst)
6753 +{
6754 + struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
6755 +
6756 + crypto_drop_skcipher(&ctx->enc);
6757 + crypto_drop_ahash(&ctx->auth);
6758 + kfree(inst);
6759 +}
6760 +
6761 +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
6762 +{
6763 + struct crypto_attr_type *algt;
6764 + struct aead_instance *inst;
6765 + struct hash_alg_common *auth;
6766 + struct crypto_alg *auth_base;
6767 + struct skcipher_alg *enc;
6768 + struct tls_instance_ctx *ctx;
6769 + const char *enc_name;
6770 + int err;
6771 +
6772 + algt = crypto_get_attr_type(tb);
6773 + if (IS_ERR(algt))
6774 + return PTR_ERR(algt);
6775 +
6776 + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
6777 + return -EINVAL;
6778 +
6779 + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
6780 + CRYPTO_ALG_TYPE_AHASH_MASK |
6781 + crypto_requires_sync(algt->type, algt->mask));
6782 + if (IS_ERR(auth))
6783 + return PTR_ERR(auth);
6784 +
6785 + auth_base = &auth->base;
6786 +
6787 + enc_name = crypto_attr_alg_name(tb[2]);
6788 + err = PTR_ERR(enc_name);
6789 + if (IS_ERR(enc_name))
6790 + goto out_put_auth;
6791 +
6792 + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
6793 + err = -ENOMEM;
6794 + if (!inst)
6795 + goto out_put_auth;
6796 +
6797 + ctx = aead_instance_ctx(inst);
6798 +
6799 + err = crypto_init_ahash_spawn(&ctx->auth, auth,
6800 + aead_crypto_instance(inst));
6801 + if (err)
6802 + goto err_free_inst;
6803 +
6804 + crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
6805 + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
6806 + crypto_requires_sync(algt->type,
6807 + algt->mask));
6808 + if (err)
6809 + goto err_drop_auth;
6810 +
6811 + enc = crypto_spawn_skcipher_alg(&ctx->enc);
6812 +
6813 + err = -ENAMETOOLONG;
6814 + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
6815 + "tls10(%s,%s)", auth_base->cra_name,
6816 + enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
6817 + goto err_drop_enc;
6818 +
6819 + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
6820 + "tls10(%s,%s)", auth_base->cra_driver_name,
6821 + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
6822 + goto err_drop_enc;
6823 +
6824 + inst->alg.base.cra_flags = (auth_base->cra_flags |
6825 + enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
6826 + inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
6827 + auth_base->cra_priority;
6828 + inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
6829 + inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
6830 + enc->base.cra_alignmask;
6831 + inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
6832 +
6833 + inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
6834 + inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
6835 + inst->alg.maxauthsize = auth->digestsize;
6836 +
6837 + inst->alg.init = crypto_tls_init_tfm;
6838 + inst->alg.exit = crypto_tls_exit_tfm;
6839 +
6840 + inst->alg.setkey = crypto_tls_setkey;
6841 + inst->alg.encrypt = crypto_tls_encrypt;
6842 + inst->alg.decrypt = crypto_tls_decrypt;
6843 +
6844 + inst->free = crypto_tls_free;
6845 +
6846 + err = aead_register_instance(tmpl, inst);
6847 + if (err)
6848 + goto err_drop_enc;
6849 +
6850 +out:
6851 + crypto_mod_put(auth_base);
6852 + return err;
6853 +
6854 +err_drop_enc:
6855 + crypto_drop_skcipher(&ctx->enc);
6856 +err_drop_auth:
6857 + crypto_drop_ahash(&ctx->auth);
6858 +err_free_inst:
6859 + kfree(inst);
6860 +out_put_auth:
6861 + goto out;
6862 +}
6863 +
6864 +static struct crypto_template crypto_tls_tmpl = {
6865 + .name = "tls10",
6866 + .create = crypto_tls_create,
6867 + .module = THIS_MODULE,
6868 +};
6869 +
6870 +static int __init crypto_tls_module_init(void)
6871 +{
6872 + return crypto_register_template(&crypto_tls_tmpl);
6873 +}
6874 +
6875 +static void __exit crypto_tls_module_exit(void)
6876 +{
6877 + crypto_unregister_template(&crypto_tls_tmpl);
6878 +}
6879 +
6880 +module_init(crypto_tls_module_init);
6881 +module_exit(crypto_tls_module_exit);
6882 +
6883 +MODULE_LICENSE("GPL");
6884 +MODULE_DESCRIPTION("TLS 1.0 record encryption");
6885 --- a/drivers/crypto/caam/Kconfig
6886 +++ b/drivers/crypto/caam/Kconfig
6887 @@ -1,6 +1,11 @@
6888 +config CRYPTO_DEV_FSL_CAAM_COMMON
6889 + tristate
6890 +
6891 config CRYPTO_DEV_FSL_CAAM
6892 - tristate "Freescale CAAM-Multicore driver backend"
6893 + tristate "Freescale CAAM-Multicore platform driver backend"
6894 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
6895 + select CRYPTO_DEV_FSL_CAAM_COMMON
6896 + select SOC_BUS
6897 help
6898 Enables the driver module for Freescale's Cryptographic Accelerator
6899 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
6900 @@ -11,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM
6901 To compile this driver as a module, choose M here: the module
6902 will be called caam.
6903
6904 +if CRYPTO_DEV_FSL_CAAM
6905 +
6906 +config CRYPTO_DEV_FSL_CAAM_DEBUG
6907 + bool "Enable debug output in CAAM driver"
6908 + help
6909 + Selecting this will enable printing of various debug
6910 + information in the CAAM driver.
6911 +
6912 config CRYPTO_DEV_FSL_CAAM_JR
6913 tristate "Freescale CAAM Job Ring driver backend"
6914 - depends on CRYPTO_DEV_FSL_CAAM
6915 default y
6916 help
6917 Enables the driver module for Job Rings which are part of
6918 @@ -24,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
6919 To compile this driver as a module, choose M here: the module
6920 will be called caam_jr.
6921
6922 +if CRYPTO_DEV_FSL_CAAM_JR
6923 +
6924 config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6925 int "Job Ring size"
6926 - depends on CRYPTO_DEV_FSL_CAAM_JR
6927 range 2 9
6928 default "9"
6929 help
6930 @@ -44,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6931
6932 config CRYPTO_DEV_FSL_CAAM_INTC
6933 bool "Job Ring interrupt coalescing"
6934 - depends on CRYPTO_DEV_FSL_CAAM_JR
6935 help
6936 Enable the Job Ring's interrupt coalescing feature.
6937
6938 @@ -74,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
6939
6940 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6941 tristate "Register algorithm implementations with the Crypto API"
6942 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6943 default y
6944 select CRYPTO_AEAD
6945 select CRYPTO_AUTHENC
6946 @@ -87,9 +98,25 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6947 To compile this as a module, choose M here: the module
6948 will be called caamalg.
6949
6950 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
6951 + tristate "Queue Interface as Crypto API backend"
6952 + depends on FSL_SDK_DPA && NET
6953 + default y
6954 + select CRYPTO_AUTHENC
6955 + select CRYPTO_BLKCIPHER
6956 + help
6957 + Selecting this will use CAAM Queue Interface (QI) for sending
6958 + & receiving crypto jobs to/from CAAM. This gives better performance
6959 + than job ring interface when the number of cores are more than the
6960 + number of job rings assigned to the kernel. The number of portals
6961 + assigned to the kernel should also be more than the number of
6962 + job rings.
6963 +
6964 + To compile this as a module, choose M here: the module
6965 + will be called caamalg_qi.
6966 +
6967 config CRYPTO_DEV_FSL_CAAM_AHASH_API
6968 tristate "Register hash algorithm implementations with Crypto API"
6969 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6970 default y
6971 select CRYPTO_HASH
6972 help
6973 @@ -101,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
6974
6975 config CRYPTO_DEV_FSL_CAAM_PKC_API
6976 tristate "Register public key cryptography implementations with Crypto API"
6977 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6978 default y
6979 select CRYPTO_RSA
6980 help
6981 @@ -113,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
6982
6983 config CRYPTO_DEV_FSL_CAAM_RNG_API
6984 tristate "Register caam device for hwrng API"
6985 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6986 default y
6987 select CRYPTO_RNG
6988 select HW_RANDOM
6989 @@ -124,13 +149,26 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
6990 To compile this as a module, choose M here: the module
6991 will be called caamrng.
6992
6993 -config CRYPTO_DEV_FSL_CAAM_IMX
6994 - def_bool SOC_IMX6 || SOC_IMX7D
6995 - depends on CRYPTO_DEV_FSL_CAAM
6996 +endif # CRYPTO_DEV_FSL_CAAM_JR
6997
6998 -config CRYPTO_DEV_FSL_CAAM_DEBUG
6999 - bool "Enable debug output in CAAM driver"
7000 - depends on CRYPTO_DEV_FSL_CAAM
7001 - help
7002 - Selecting this will enable printing of various debug
7003 - information in the CAAM driver.
7004 +endif # CRYPTO_DEV_FSL_CAAM
7005 +
7006 +config CRYPTO_DEV_FSL_DPAA2_CAAM
7007 + tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
7008 + depends on FSL_MC_DPIO
7009 + select CRYPTO_DEV_FSL_CAAM_COMMON
7010 + select CRYPTO_BLKCIPHER
7011 + select CRYPTO_AUTHENC
7012 + select CRYPTO_AEAD
7013 + ---help---
7014 + CAAM driver for QorIQ Data Path Acceleration Architecture 2.
7015 + It handles DPSECI DPAA2 objects that sit on the Management Complex
7016 + (MC) fsl-mc bus.
7017 +
7018 + To compile this as a module, choose M here: the module
7019 + will be called dpaa2_caam.
7020 +
7021 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
7022 + def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
7023 + CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
7024 + CRYPTO_DEV_FSL_DPAA2_CAAM)
7025 --- a/drivers/crypto/caam/Makefile
7026 +++ b/drivers/crypto/caam/Makefile
7027 @@ -5,13 +5,26 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
7028 ccflags-y := -DDEBUG
7029 endif
7030
7031 +ccflags-y += -DVERSION=\"\"
7032 +
7033 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
7034 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
7035 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
7036 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
7037 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
7038 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
7039 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
7040 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
7041 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
7042
7043 caam-objs := ctrl.o
7044 -caam_jr-objs := jr.o key_gen.o error.o
7045 +caam_jr-objs := jr.o key_gen.o
7046 caam_pkc-y := caampkc.o pkc_desc.o
7047 +ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
7048 + ccflags-y += -DCONFIG_CAAM_QI
7049 + caam-objs += qi.o
7050 +endif
7051 +
7052 +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
7053 +
7054 +dpaa2_caam-y := caamalg_qi2.o dpseci.o
7055 --- a/drivers/crypto/caam/caamalg.c
7056 +++ b/drivers/crypto/caam/caamalg.c
7057 @@ -2,6 +2,7 @@
7058 * caam - Freescale FSL CAAM support for crypto API
7059 *
7060 * Copyright 2008-2011 Freescale Semiconductor, Inc.
7061 + * Copyright 2016 NXP
7062 *
7063 * Based on talitos crypto API driver.
7064 *
7065 @@ -53,6 +54,7 @@
7066 #include "error.h"
7067 #include "sg_sw_sec4.h"
7068 #include "key_gen.h"
7069 +#include "caamalg_desc.h"
7070
7071 /*
7072 * crypto alg
7073 @@ -62,8 +64,6 @@
7074 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
7075 CTR_RFC3686_NONCE_SIZE + \
7076 SHA512_DIGEST_SIZE * 2)
7077 -/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
7078 -#define CAAM_MAX_IV_LENGTH 16
7079
7080 #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
7081 #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
7082 @@ -71,37 +71,6 @@
7083 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
7084 CAAM_CMD_SZ * 5)
7085
7086 -/* length of descriptors text */
7087 -#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
7088 -#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
7089 -#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
7090 -#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
7091 -
7092 -/* Note: Nonce is counted in enckeylen */
7093 -#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
7094 -
7095 -#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
7096 -#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
7097 -#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
7098 -
7099 -#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
7100 -#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
7101 -#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
7102 -
7103 -#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
7104 -#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7105 -#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7106 -
7107 -#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
7108 -#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
7109 -#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
7110 -
7111 -#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
7112 -#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
7113 - 20 * CAAM_CMD_SZ)
7114 -#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
7115 - 15 * CAAM_CMD_SZ)
7116 -
7117 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
7118 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
7119
7120 @@ -112,47 +81,11 @@
7121 #define debug(format, arg...)
7122 #endif
7123
7124 -#ifdef DEBUG
7125 -#include <linux/highmem.h>
7126 -
7127 -static void dbg_dump_sg(const char *level, const char *prefix_str,
7128 - int prefix_type, int rowsize, int groupsize,
7129 - struct scatterlist *sg, size_t tlen, bool ascii,
7130 - bool may_sleep)
7131 -{
7132 - struct scatterlist *it;
7133 - void *it_page;
7134 - size_t len;
7135 - void *buf;
7136 -
7137 - for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
7138 - /*
7139 - * make sure the scatterlist's page
7140 - * has a valid virtual memory mapping
7141 - */
7142 - it_page = kmap_atomic(sg_page(it));
7143 - if (unlikely(!it_page)) {
7144 - printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
7145 - return;
7146 - }
7147 -
7148 - buf = it_page + it->offset;
7149 - len = min_t(size_t, tlen, it->length);
7150 - print_hex_dump(level, prefix_str, prefix_type, rowsize,
7151 - groupsize, buf, len, ascii);
7152 - tlen -= len;
7153 -
7154 - kunmap_atomic(it_page);
7155 - }
7156 -}
7157 -#endif
7158 -
7159 static struct list_head alg_list;
7160
7161 struct caam_alg_entry {
7162 int class1_alg_type;
7163 int class2_alg_type;
7164 - int alg_op;
7165 bool rfc3686;
7166 bool geniv;
7167 };
7168 @@ -163,302 +96,67 @@ struct caam_aead_alg {
7169 bool registered;
7170 };
7171
7172 -/* Set DK bit in class 1 operation if shared */
7173 -static inline void append_dec_op1(u32 *desc, u32 type)
7174 -{
7175 - u32 *jump_cmd, *uncond_jump_cmd;
7176 -
7177 - /* DK bit is valid only for AES */
7178 - if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
7179 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7180 - OP_ALG_DECRYPT);
7181 - return;
7182 - }
7183 -
7184 - jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
7185 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7186 - OP_ALG_DECRYPT);
7187 - uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7188 - set_jump_tgt_here(desc, jump_cmd);
7189 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7190 - OP_ALG_DECRYPT | OP_ALG_AAI_DK);
7191 - set_jump_tgt_here(desc, uncond_jump_cmd);
7192 -}
7193 -
7194 -/*
7195 - * For aead functions, read payload and write payload,
7196 - * both of which are specified in req->src and req->dst
7197 - */
7198 -static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
7199 -{
7200 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7201 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
7202 - KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
7203 -}
7204 -
7205 -/*
7206 - * For ablkcipher encrypt and decrypt, read from req->src and
7207 - * write to req->dst
7208 - */
7209 -static inline void ablkcipher_append_src_dst(u32 *desc)
7210 -{
7211 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7212 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7213 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
7214 - KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7215 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7216 -}
7217 -
7218 /*
7219 * per-session context
7220 */
7221 struct caam_ctx {
7222 - struct device *jrdev;
7223 u32 sh_desc_enc[DESC_MAX_USED_LEN];
7224 u32 sh_desc_dec[DESC_MAX_USED_LEN];
7225 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
7226 + u8 key[CAAM_MAX_KEY_SIZE];
7227 dma_addr_t sh_desc_enc_dma;
7228 dma_addr_t sh_desc_dec_dma;
7229 dma_addr_t sh_desc_givenc_dma;
7230 - u32 class1_alg_type;
7231 - u32 class2_alg_type;
7232 - u32 alg_op;
7233 - u8 key[CAAM_MAX_KEY_SIZE];
7234 dma_addr_t key_dma;
7235 - unsigned int enckeylen;
7236 - unsigned int split_key_len;
7237 - unsigned int split_key_pad_len;
7238 + struct device *jrdev;
7239 + struct alginfo adata;
7240 + struct alginfo cdata;
7241 unsigned int authsize;
7242 };
7243
7244 -static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
7245 - int keys_fit_inline, bool is_rfc3686)
7246 -{
7247 - u32 *nonce;
7248 - unsigned int enckeylen = ctx->enckeylen;
7249 -
7250 - /*
7251 - * RFC3686 specific:
7252 - * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
7253 - * | enckeylen = encryption key size + nonce size
7254 - */
7255 - if (is_rfc3686)
7256 - enckeylen -= CTR_RFC3686_NONCE_SIZE;
7257 -
7258 - if (keys_fit_inline) {
7259 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7260 - ctx->split_key_len, CLASS_2 |
7261 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7262 - append_key_as_imm(desc, (void *)ctx->key +
7263 - ctx->split_key_pad_len, enckeylen,
7264 - enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7265 - } else {
7266 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7267 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7268 - append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
7269 - enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7270 - }
7271 -
7272 - /* Load Counter into CONTEXT1 reg */
7273 - if (is_rfc3686) {
7274 - nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
7275 - enckeylen);
7276 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
7277 - LDST_CLASS_IND_CCB |
7278 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
7279 - append_move(desc,
7280 - MOVE_SRC_OUTFIFO |
7281 - MOVE_DEST_CLASS1CTX |
7282 - (16 << MOVE_OFFSET_SHIFT) |
7283 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
7284 - }
7285 -}
7286 -
7287 -static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
7288 - int keys_fit_inline, bool is_rfc3686)
7289 -{
7290 - u32 *key_jump_cmd;
7291 -
7292 - /* Note: Context registers are saved. */
7293 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
7294 -
7295 - /* Skip if already shared */
7296 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7297 - JUMP_COND_SHRD);
7298 -
7299 - append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7300 -
7301 - set_jump_tgt_here(desc, key_jump_cmd);
7302 -}
7303 -
7304 static int aead_null_set_sh_desc(struct crypto_aead *aead)
7305 {
7306 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7307 struct device *jrdev = ctx->jrdev;
7308 - bool keys_fit_inline = false;
7309 - u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
7310 u32 *desc;
7311 + int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
7312 + ctx->adata.keylen_pad;
7313
7314 /*
7315 * Job Descriptor and Shared Descriptors
7316 * must all fit into the 64-word Descriptor h/w Buffer
7317 */
7318 - if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
7319 - ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7320 - keys_fit_inline = true;
7321 + if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
7322 + ctx->adata.key_inline = true;
7323 + ctx->adata.key_virt = ctx->key;
7324 + } else {
7325 + ctx->adata.key_inline = false;
7326 + ctx->adata.key_dma = ctx->key_dma;
7327 + }
7328
7329 /* aead_encrypt shared descriptor */
7330 desc = ctx->sh_desc_enc;
7331 -
7332 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7333 -
7334 - /* Skip if already shared */
7335 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7336 - JUMP_COND_SHRD);
7337 - if (keys_fit_inline)
7338 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7339 - ctx->split_key_len, CLASS_2 |
7340 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7341 - else
7342 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7343 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7344 - set_jump_tgt_here(desc, key_jump_cmd);
7345 -
7346 - /* assoclen + cryptlen = seqinlen */
7347 - append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
7348 -
7349 - /* Prepare to read and write cryptlen + assoclen bytes */
7350 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7351 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7352 -
7353 - /*
7354 - * MOVE_LEN opcode is not available in all SEC HW revisions,
7355 - * thus need to do some magic, i.e. self-patch the descriptor
7356 - * buffer.
7357 - */
7358 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7359 - MOVE_DEST_MATH3 |
7360 - (0x6 << MOVE_LEN_SHIFT));
7361 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
7362 - MOVE_DEST_DESCBUF |
7363 - MOVE_WAITCOMP |
7364 - (0x8 << MOVE_LEN_SHIFT));
7365 -
7366 - /* Class 2 operation */
7367 - append_operation(desc, ctx->class2_alg_type |
7368 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7369 -
7370 - /* Read and write cryptlen bytes */
7371 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7372 -
7373 - set_move_tgt_here(desc, read_move_cmd);
7374 - set_move_tgt_here(desc, write_move_cmd);
7375 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7376 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7377 - MOVE_AUX_LS);
7378 -
7379 - /* Write ICV */
7380 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7381 - LDST_SRCDST_BYTE_CONTEXT);
7382 -
7383 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7384 - desc_bytes(desc),
7385 - DMA_TO_DEVICE);
7386 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7387 - dev_err(jrdev, "unable to map shared descriptor\n");
7388 - return -ENOMEM;
7389 - }
7390 -#ifdef DEBUG
7391 - print_hex_dump(KERN_ERR,
7392 - "aead null enc shdesc@"__stringify(__LINE__)": ",
7393 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7394 - desc_bytes(desc), 1);
7395 -#endif
7396 + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
7397 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7398 + desc_bytes(desc), DMA_TO_DEVICE);
7399
7400 /*
7401 * Job Descriptor and Shared Descriptors
7402 * must all fit into the 64-word Descriptor h/w Buffer
7403 */
7404 - keys_fit_inline = false;
7405 - if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
7406 - ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7407 - keys_fit_inline = true;
7408 -
7409 - desc = ctx->sh_desc_dec;
7410 + if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
7411 + ctx->adata.key_inline = true;
7412 + ctx->adata.key_virt = ctx->key;
7413 + } else {
7414 + ctx->adata.key_inline = false;
7415 + ctx->adata.key_dma = ctx->key_dma;
7416 + }
7417
7418 /* aead_decrypt shared descriptor */
7419 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7420 -
7421 - /* Skip if already shared */
7422 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7423 - JUMP_COND_SHRD);
7424 - if (keys_fit_inline)
7425 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7426 - ctx->split_key_len, CLASS_2 |
7427 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7428 - else
7429 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7430 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7431 - set_jump_tgt_here(desc, key_jump_cmd);
7432 -
7433 - /* Class 2 operation */
7434 - append_operation(desc, ctx->class2_alg_type |
7435 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7436 -
7437 - /* assoclen + cryptlen = seqoutlen */
7438 - append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7439 -
7440 - /* Prepare to read and write cryptlen + assoclen bytes */
7441 - append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
7442 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
7443 -
7444 - /*
7445 - * MOVE_LEN opcode is not available in all SEC HW revisions,
7446 - * thus need to do some magic, i.e. self-patch the descriptor
7447 - * buffer.
7448 - */
7449 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7450 - MOVE_DEST_MATH2 |
7451 - (0x6 << MOVE_LEN_SHIFT));
7452 - write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
7453 - MOVE_DEST_DESCBUF |
7454 - MOVE_WAITCOMP |
7455 - (0x8 << MOVE_LEN_SHIFT));
7456 -
7457 - /* Read and write cryptlen bytes */
7458 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7459 -
7460 - /*
7461 - * Insert a NOP here, since we need at least 4 instructions between
7462 - * code patching the descriptor buffer and the location being patched.
7463 - */
7464 - jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7465 - set_jump_tgt_here(desc, jump_cmd);
7466 -
7467 - set_move_tgt_here(desc, read_move_cmd);
7468 - set_move_tgt_here(desc, write_move_cmd);
7469 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7470 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7471 - MOVE_AUX_LS);
7472 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7473 -
7474 - /* Load ICV */
7475 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7476 - FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7477 -
7478 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7479 - desc_bytes(desc),
7480 - DMA_TO_DEVICE);
7481 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7482 - dev_err(jrdev, "unable to map shared descriptor\n");
7483 - return -ENOMEM;
7484 - }
7485 -#ifdef DEBUG
7486 - print_hex_dump(KERN_ERR,
7487 - "aead null dec shdesc@"__stringify(__LINE__)": ",
7488 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7489 - desc_bytes(desc), 1);
7490 -#endif
7491 + desc = ctx->sh_desc_dec;
7492 + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
7493 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7494 + desc_bytes(desc), DMA_TO_DEVICE);
7495
7496 return 0;
7497 }
7498 @@ -470,11 +168,11 @@ static int aead_set_sh_desc(struct crypt
7499 unsigned int ivsize = crypto_aead_ivsize(aead);
7500 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7501 struct device *jrdev = ctx->jrdev;
7502 - bool keys_fit_inline;
7503 - u32 geniv, moveiv;
7504 u32 ctx1_iv_off = 0;
7505 - u32 *desc;
7506 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
7507 + u32 *desc, *nonce = NULL;
7508 + u32 inl_mask;
7509 + unsigned int data_len[2];
7510 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
7511 OP_ALG_AAI_CTR_MOD128);
7512 const bool is_rfc3686 = alg->caam.rfc3686;
7513
7514 @@ -482,7 +180,7 @@ static int aead_set_sh_desc(struct crypt
7515 return 0;
7516
7517 /* NULL encryption / decryption */
7518 - if (!ctx->enckeylen)
7519 + if (!ctx->cdata.keylen)
7520 return aead_null_set_sh_desc(aead);
7521
7522 /*
7523 @@ -497,8 +195,14 @@ static int aead_set_sh_desc(struct crypt
7524 * RFC3686 specific:
7525 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
7526 */
7527 - if (is_rfc3686)
7528 + if (is_rfc3686) {
7529 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
7530 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
7531 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
7532 + }
7533 +
7534 + data_len[0] = ctx->adata.keylen_pad;
7535 + data_len[1] = ctx->cdata.keylen;
7536
7537 if (alg->caam.geniv)
7538 goto skip_enc;
7539 @@ -507,146 +211,64 @@ static int aead_set_sh_desc(struct crypt
7540 * Job Descriptor and Shared Descriptors
7541 * must all fit into the 64-word Descriptor h/w Buffer
7542 */
7543 - keys_fit_inline = false;
7544 - if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7545 - ctx->split_key_pad_len + ctx->enckeylen +
7546 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7547 - CAAM_DESC_BYTES_MAX)
7548 - keys_fit_inline = true;
7549 -
7550 - /* aead_encrypt shared descriptor */
7551 - desc = ctx->sh_desc_enc;
7552 -
7553 - /* Note: Context registers are saved. */
7554 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7555 -
7556 - /* Class 2 operation */
7557 - append_operation(desc, ctx->class2_alg_type |
7558 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7559 + if (desc_inline_query(DESC_AEAD_ENC_LEN +
7560 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7561 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7562 + ARRAY_SIZE(data_len)) < 0)
7563 + return -EINVAL;
7564
7565 - /* Read and write assoclen bytes */
7566 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7567 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7568 + if (inl_mask & 1)
7569 + ctx->adata.key_virt = ctx->key;
7570 + else
7571 + ctx->adata.key_dma = ctx->key_dma;
7572
7573 - /* Skip assoc data */
7574 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7575 + if (inl_mask & 2)
7576 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7577 + else
7578 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7579
7580 - /* read assoc before reading payload */
7581 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7582 - FIFOLDST_VLF);
7583 + ctx->adata.key_inline = !!(inl_mask & 1);
7584 + ctx->cdata.key_inline = !!(inl_mask & 2);
7585
7586 - /* Load Counter into CONTEXT1 reg */
7587 - if (is_rfc3686)
7588 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7589 - LDST_SRCDST_BYTE_CONTEXT |
7590 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7591 - LDST_OFFSET_SHIFT));
7592 -
7593 - /* Class 1 operation */
7594 - append_operation(desc, ctx->class1_alg_type |
7595 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7596 -
7597 - /* Read and write cryptlen bytes */
7598 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7599 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7600 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
7601 -
7602 - /* Write ICV */
7603 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7604 - LDST_SRCDST_BYTE_CONTEXT);
7605 -
7606 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7607 - desc_bytes(desc),
7608 - DMA_TO_DEVICE);
7609 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7610 - dev_err(jrdev, "unable to map shared descriptor\n");
7611 - return -ENOMEM;
7612 - }
7613 -#ifdef DEBUG
7614 - print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
7615 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7616 - desc_bytes(desc), 1);
7617 -#endif
7618 + /* aead_encrypt shared descriptor */
7619 + desc = ctx->sh_desc_enc;
7620 + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
7621 + ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
7622 + false);
7623 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7624 + desc_bytes(desc), DMA_TO_DEVICE);
7625
7626 skip_enc:
7627 /*
7628 * Job Descriptor and Shared Descriptors
7629 * must all fit into the 64-word Descriptor h/w Buffer
7630 */
7631 - keys_fit_inline = false;
7632 - if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7633 - ctx->split_key_pad_len + ctx->enckeylen +
7634 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7635 - CAAM_DESC_BYTES_MAX)
7636 - keys_fit_inline = true;
7637 -
7638 - /* aead_decrypt shared descriptor */
7639 - desc = ctx->sh_desc_dec;
7640 -
7641 - /* Note: Context registers are saved. */
7642 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7643 -
7644 - /* Class 2 operation */
7645 - append_operation(desc, ctx->class2_alg_type |
7646 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7647 + if (desc_inline_query(DESC_AEAD_DEC_LEN +
7648 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7649 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7650 + ARRAY_SIZE(data_len)) < 0)
7651 + return -EINVAL;
7652
7653 - /* Read and write assoclen bytes */
7654 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7655 - if (alg->caam.geniv)
7656 - append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
7657 + if (inl_mask & 1)
7658 + ctx->adata.key_virt = ctx->key;
7659 else
7660 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7661 -
7662 - /* Skip assoc data */
7663 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7664 -
7665 - /* read assoc before reading payload */
7666 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7667 - KEY_VLF);
7668 -
7669 - if (alg->caam.geniv) {
7670 - append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
7671 - LDST_SRCDST_BYTE_CONTEXT |
7672 - (ctx1_iv_off << LDST_OFFSET_SHIFT));
7673 - append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
7674 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
7675 - }
7676 -
7677 - /* Load Counter into CONTEXT1 reg */
7678 - if (is_rfc3686)
7679 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7680 - LDST_SRCDST_BYTE_CONTEXT |
7681 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7682 - LDST_OFFSET_SHIFT));
7683 + ctx->adata.key_dma = ctx->key_dma;
7684
7685 - /* Choose operation */
7686 - if (ctr_mode)
7687 - append_operation(desc, ctx->class1_alg_type |
7688 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
7689 + if (inl_mask & 2)
7690 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7691 else
7692 - append_dec_op1(desc, ctx->class1_alg_type);
7693 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7694
7695 - /* Read and write cryptlen bytes */
7696 - append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7697 - append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7698 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
7699 -
7700 - /* Load ICV */
7701 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7702 - FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7703 + ctx->adata.key_inline = !!(inl_mask & 1);
7704 + ctx->cdata.key_inline = !!(inl_mask & 2);
7705
7706 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7707 - desc_bytes(desc),
7708 - DMA_TO_DEVICE);
7709 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7710 - dev_err(jrdev, "unable to map shared descriptor\n");
7711 - return -ENOMEM;
7712 - }
7713 -#ifdef DEBUG
7714 - print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
7715 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7716 - desc_bytes(desc), 1);
7717 -#endif
7718 + /* aead_decrypt shared descriptor */
7719 + desc = ctx->sh_desc_dec;
7720 + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
7721 + ctx->authsize, alg->caam.geniv, is_rfc3686,
7722 + nonce, ctx1_iv_off, false);
7723 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7724 + desc_bytes(desc), DMA_TO_DEVICE);
7725
7726 if (!alg->caam.geniv)
7727 goto skip_givenc;
7728 @@ -655,107 +277,32 @@ skip_enc:
7729 * Job Descriptor and Shared Descriptors
7730 * must all fit into the 64-word Descriptor h/w Buffer
7731 */
7732 - keys_fit_inline = false;
7733 - if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7734 - ctx->split_key_pad_len + ctx->enckeylen +
7735 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7736 - CAAM_DESC_BYTES_MAX)
7737 - keys_fit_inline = true;
7738 -
7739 - /* aead_givencrypt shared descriptor */
7740 - desc = ctx->sh_desc_enc;
7741 -
7742 - /* Note: Context registers are saved. */
7743 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7744 + if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
7745 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7746 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7747 + ARRAY_SIZE(data_len)) < 0)
7748 + return -EINVAL;
7749
7750 - if (is_rfc3686)
7751 - goto copy_iv;
7752 + if (inl_mask & 1)
7753 + ctx->adata.key_virt = ctx->key;
7754 + else
7755 + ctx->adata.key_dma = ctx->key_dma;
7756
7757 - /* Generate IV */
7758 - geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
7759 - NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
7760 - NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7761 - append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
7762 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7763 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7764 - append_move(desc, MOVE_WAITCOMP |
7765 - MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
7766 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7767 - (ivsize << MOVE_LEN_SHIFT));
7768 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7769 -
7770 -copy_iv:
7771 - /* Copy IV to class 1 context */
7772 - append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
7773 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7774 - (ivsize << MOVE_LEN_SHIFT));
7775 -
7776 - /* Return to encryption */
7777 - append_operation(desc, ctx->class2_alg_type |
7778 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7779 -
7780 - /* Read and write assoclen bytes */
7781 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7782 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7783 -
7784 - /* ivsize + cryptlen = seqoutlen - authsize */
7785 - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
7786 -
7787 - /* Skip assoc data */
7788 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7789 -
7790 - /* read assoc before reading payload */
7791 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7792 - KEY_VLF);
7793 -
7794 - /* Copy iv from outfifo to class 2 fifo */
7795 - moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
7796 - NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7797 - append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
7798 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7799 - append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
7800 - LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
7801 + if (inl_mask & 2)
7802 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7803 + else
7804 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7805
7806 - /* Load Counter into CONTEXT1 reg */
7807 - if (is_rfc3686)
7808 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7809 - LDST_SRCDST_BYTE_CONTEXT |
7810 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7811 - LDST_OFFSET_SHIFT));
7812 -
7813 - /* Class 1 operation */
7814 - append_operation(desc, ctx->class1_alg_type |
7815 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7816 -
7817 - /* Will write ivsize + cryptlen */
7818 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7819 -
7820 - /* Not need to reload iv */
7821 - append_seq_fifo_load(desc, ivsize,
7822 - FIFOLD_CLASS_SKIP);
7823 -
7824 - /* Will read cryptlen */
7825 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7826 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
7827 - FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
7828 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7829 -
7830 - /* Write ICV */
7831 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7832 - LDST_SRCDST_BYTE_CONTEXT);
7833 + ctx->adata.key_inline = !!(inl_mask & 1);
7834 + ctx->cdata.key_inline = !!(inl_mask & 2);
7835
7836 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7837 - desc_bytes(desc),
7838 - DMA_TO_DEVICE);
7839 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7840 - dev_err(jrdev, "unable to map shared descriptor\n");
7841 - return -ENOMEM;
7842 - }
7843 -#ifdef DEBUG
7844 - print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
7845 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7846 - desc_bytes(desc), 1);
7847 -#endif
7848 + /* aead_givencrypt shared descriptor */
7849 + desc = ctx->sh_desc_enc;
7850 + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
7851 + ctx->authsize, is_rfc3686, nonce,
7852 + ctx1_iv_off, false);
7853 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7854 + desc_bytes(desc), DMA_TO_DEVICE);
7855
7856 skip_givenc:
7857 return 0;
7858 @@ -776,12 +323,12 @@ static int gcm_set_sh_desc(struct crypto
7859 {
7860 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7861 struct device *jrdev = ctx->jrdev;
7862 - bool keys_fit_inline = false;
7863 - u32 *key_jump_cmd, *zero_payload_jump_cmd,
7864 - *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
7865 + unsigned int ivsize = crypto_aead_ivsize(aead);
7866 u32 *desc;
7867 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
7868 + ctx->cdata.keylen;
7869
7870 - if (!ctx->enckeylen || !ctx->authsize)
7871 + if (!ctx->cdata.keylen || !ctx->authsize)
7872 return 0;
7873
7874 /*
7875 @@ -789,175 +336,35 @@ static int gcm_set_sh_desc(struct crypto
7876 * Job Descriptor and Shared Descriptor
7877 * must fit into the 64-word Descriptor h/w Buffer
7878 */
7879 - if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
7880 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
7881 - keys_fit_inline = true;
7882 + if (rem_bytes >= DESC_GCM_ENC_LEN) {
7883 + ctx->cdata.key_inline = true;
7884 + ctx->cdata.key_virt = ctx->key;
7885 + } else {
7886 + ctx->cdata.key_inline = false;
7887 + ctx->cdata.key_dma = ctx->key_dma;
7888 + }
7889
7890 desc = ctx->sh_desc_enc;
7891 -
7892 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7893 -
7894 - /* skip key loading if they are loaded due to sharing */
7895 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7896 - JUMP_COND_SHRD | JUMP_COND_SELF);
7897 - if (keys_fit_inline)
7898 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
7899 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7900 - else
7901 - append_key(desc, ctx->key_dma, ctx->enckeylen,
7902 - CLASS_1 | KEY_DEST_CLASS_REG);
7903 - set_jump_tgt_here(desc, key_jump_cmd);
7904 -
7905 - /* class 1 operation */
7906 - append_operation(desc, ctx->class1_alg_type |
7907 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7908 -
7909 - /* if assoclen + cryptlen is ZERO, skip to ICV write */
7910 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7911 - zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
7912 - JUMP_COND_MATH_Z);
7913 -
7914 - /* if assoclen is ZERO, skip reading the assoc data */
7915 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7916 - zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
7917 - JUMP_COND_MATH_Z);
7918 -
7919 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7920 -
7921 - /* skip assoc data */
7922 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7923 -
7924 - /* cryptlen = seqinlen - assoclen */
7925 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
7926 -
7927 - /* if cryptlen is ZERO jump to zero-payload commands */
7928 - zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
7929 - JUMP_COND_MATH_Z);
7930 -
7931 - /* read assoc data */
7932 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7933 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
7934 - set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
7935 -
7936 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7937 -
7938 - /* write encrypted data */
7939 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
7940 -
7941 - /* read payload data */
7942 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7943 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7944 -
7945 - /* jump the zero-payload commands */
7946 - append_jump(desc, JUMP_TEST_ALL | 2);
7947 -
7948 - /* zero-payload commands */
7949 - set_jump_tgt_here(desc, zero_payload_jump_cmd);
7950 -
7951 - /* read assoc data */
7952 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7953 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
7954 -
7955 - /* There is no input data */
7956 - set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
7957 -
7958 - /* write ICV */
7959 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
7960 - LDST_SRCDST_BYTE_CONTEXT);
7961 -
7962 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7963 - desc_bytes(desc),
7964 - DMA_TO_DEVICE);
7965 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7966 - dev_err(jrdev, "unable to map shared descriptor\n");
7967 - return -ENOMEM;
7968 - }
7969 -#ifdef DEBUG
7970 - print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
7971 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7972 - desc_bytes(desc), 1);
7973 -#endif
7974 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
7975 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7976 + desc_bytes(desc), DMA_TO_DEVICE);
7977
7978 /*
7979 * Job Descriptor and Shared Descriptors
7980 * must all fit into the 64-word Descriptor h/w Buffer
7981 */
7982 - keys_fit_inline = false;
7983 - if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
7984 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
7985 - keys_fit_inline = true;
7986 + if (rem_bytes >= DESC_GCM_DEC_LEN) {
7987 + ctx->cdata.key_inline = true;
7988 + ctx->cdata.key_virt = ctx->key;
7989 + } else {
7990 + ctx->cdata.key_inline = false;
7991 + ctx->cdata.key_dma = ctx->key_dma;
7992 + }
7993
7994 desc = ctx->sh_desc_dec;
7995 -
7996 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7997 -
7998 - /* skip key loading if they are loaded due to sharing */
7999 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8000 - JUMP_TEST_ALL | JUMP_COND_SHRD |
8001 - JUMP_COND_SELF);
8002 - if (keys_fit_inline)
8003 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8004 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8005 - else
8006 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8007 - CLASS_1 | KEY_DEST_CLASS_REG);
8008 - set_jump_tgt_here(desc, key_jump_cmd);
8009 -
8010 - /* class 1 operation */
8011 - append_operation(desc, ctx->class1_alg_type |
8012 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8013 -
8014 - /* if assoclen is ZERO, skip reading the assoc data */
8015 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
8016 - zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
8017 - JUMP_COND_MATH_Z);
8018 -
8019 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8020 -
8021 - /* skip assoc data */
8022 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8023 -
8024 - /* read assoc data */
8025 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8026 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8027 -
8028 - set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
8029 -
8030 - /* cryptlen = seqoutlen - assoclen */
8031 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8032 -
8033 - /* jump to zero-payload command if cryptlen is zero */
8034 - zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
8035 - JUMP_COND_MATH_Z);
8036 -
8037 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8038 -
8039 - /* store encrypted data */
8040 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8041 -
8042 - /* read payload data */
8043 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8044 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8045 -
8046 - /* zero-payload command */
8047 - set_jump_tgt_here(desc, zero_payload_jump_cmd);
8048 -
8049 - /* read ICV */
8050 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8051 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8052 -
8053 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8054 - desc_bytes(desc),
8055 - DMA_TO_DEVICE);
8056 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8057 - dev_err(jrdev, "unable to map shared descriptor\n");
8058 - return -ENOMEM;
8059 - }
8060 -#ifdef DEBUG
8061 - print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
8062 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8063 - desc_bytes(desc), 1);
8064 -#endif
8065 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
8066 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8067 + desc_bytes(desc), DMA_TO_DEVICE);
8068
8069 return 0;
8070 }
8071 @@ -976,11 +383,12 @@ static int rfc4106_set_sh_desc(struct cr
8072 {
8073 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8074 struct device *jrdev = ctx->jrdev;
8075 - bool keys_fit_inline = false;
8076 - u32 *key_jump_cmd;
8077 + unsigned int ivsize = crypto_aead_ivsize(aead);
8078 u32 *desc;
8079 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8080 + ctx->cdata.keylen;
8081
8082 - if (!ctx->enckeylen || !ctx->authsize)
8083 + if (!ctx->cdata.keylen || !ctx->authsize)
8084 return 0;
8085
8086 /*
8087 @@ -988,148 +396,37 @@ static int rfc4106_set_sh_desc(struct cr
8088 * Job Descriptor and Shared Descriptor
8089 * must fit into the 64-word Descriptor h/w Buffer
8090 */
8091 - if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8092 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8093 - keys_fit_inline = true;
8094 + if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
8095 + ctx->cdata.key_inline = true;
8096 + ctx->cdata.key_virt = ctx->key;
8097 + } else {
8098 + ctx->cdata.key_inline = false;
8099 + ctx->cdata.key_dma = ctx->key_dma;
8100 + }
8101
8102 desc = ctx->sh_desc_enc;
8103 -
8104 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8105 -
8106 - /* Skip key loading if it is loaded due to sharing */
8107 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8108 - JUMP_COND_SHRD);
8109 - if (keys_fit_inline)
8110 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8111 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8112 - else
8113 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8114 - CLASS_1 | KEY_DEST_CLASS_REG);
8115 - set_jump_tgt_here(desc, key_jump_cmd);
8116 -
8117 - /* Class 1 operation */
8118 - append_operation(desc, ctx->class1_alg_type |
8119 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8120 -
8121 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8122 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8123 -
8124 - /* Read assoc data */
8125 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8126 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8127 -
8128 - /* Skip IV */
8129 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8130 -
8131 - /* Will read cryptlen bytes */
8132 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8133 -
8134 - /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8135 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8136 -
8137 - /* Skip assoc data */
8138 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8139 -
8140 - /* cryptlen = seqoutlen - assoclen */
8141 - append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
8142 -
8143 - /* Write encrypted data */
8144 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8145 -
8146 - /* Read payload data */
8147 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8148 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
8149 -
8150 - /* Write ICV */
8151 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8152 - LDST_SRCDST_BYTE_CONTEXT);
8153 -
8154 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8155 - desc_bytes(desc),
8156 - DMA_TO_DEVICE);
8157 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8158 - dev_err(jrdev, "unable to map shared descriptor\n");
8159 - return -ENOMEM;
8160 - }
8161 -#ifdef DEBUG
8162 - print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
8163 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8164 - desc_bytes(desc), 1);
8165 -#endif
8166 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8167 + false);
8168 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8169 + desc_bytes(desc), DMA_TO_DEVICE);
8170
8171 /*
8172 * Job Descriptor and Shared Descriptors
8173 * must all fit into the 64-word Descriptor h/w Buffer
8174 */
8175 - keys_fit_inline = false;
8176 - if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
8177 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8178 - keys_fit_inline = true;
8179 + if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
8180 + ctx->cdata.key_inline = true;
8181 + ctx->cdata.key_virt = ctx->key;
8182 + } else {
8183 + ctx->cdata.key_inline = false;
8184 + ctx->cdata.key_dma = ctx->key_dma;
8185 + }
8186
8187 desc = ctx->sh_desc_dec;
8188 -
8189 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8190 -
8191 - /* Skip key loading if it is loaded due to sharing */
8192 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8193 - JUMP_TEST_ALL | JUMP_COND_SHRD);
8194 - if (keys_fit_inline)
8195 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8196 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8197 - else
8198 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8199 - CLASS_1 | KEY_DEST_CLASS_REG);
8200 - set_jump_tgt_here(desc, key_jump_cmd);
8201 -
8202 - /* Class 1 operation */
8203 - append_operation(desc, ctx->class1_alg_type |
8204 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8205 -
8206 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8207 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8208 -
8209 - /* Read assoc data */
8210 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8211 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8212 -
8213 - /* Skip IV */
8214 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8215 -
8216 - /* Will read cryptlen bytes */
8217 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
8218 -
8219 - /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8220 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8221 -
8222 - /* Skip assoc data */
8223 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8224 -
8225 - /* Will write cryptlen bytes */
8226 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8227 -
8228 - /* Store payload data */
8229 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8230 -
8231 - /* Read encrypted data */
8232 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8233 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8234 -
8235 - /* Read ICV */
8236 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8237 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8238 -
8239 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8240 - desc_bytes(desc),
8241 - DMA_TO_DEVICE);
8242 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8243 - dev_err(jrdev, "unable to map shared descriptor\n");
8244 - return -ENOMEM;
8245 - }
8246 -#ifdef DEBUG
8247 - print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
8248 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8249 - desc_bytes(desc), 1);
8250 -#endif
8251 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8252 + false);
8253 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8254 + desc_bytes(desc), DMA_TO_DEVICE);
8255
8256 return 0;
8257 }
8258 @@ -1149,12 +446,12 @@ static int rfc4543_set_sh_desc(struct cr
8259 {
8260 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8261 struct device *jrdev = ctx->jrdev;
8262 - bool keys_fit_inline = false;
8263 - u32 *key_jump_cmd;
8264 - u32 *read_move_cmd, *write_move_cmd;
8265 + unsigned int ivsize = crypto_aead_ivsize(aead);
8266 u32 *desc;
8267 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8268 + ctx->cdata.keylen;
8269
8270 - if (!ctx->enckeylen || !ctx->authsize)
8271 + if (!ctx->cdata.keylen || !ctx->authsize)
8272 return 0;
8273
8274 /*
8275 @@ -1162,151 +459,37 @@ static int rfc4543_set_sh_desc(struct cr
8276 * Job Descriptor and Shared Descriptor
8277 * must fit into the 64-word Descriptor h/w Buffer
8278 */
8279 - if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8280 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8281 - keys_fit_inline = true;
8282 + if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
8283 + ctx->cdata.key_inline = true;
8284 + ctx->cdata.key_virt = ctx->key;
8285 + } else {
8286 + ctx->cdata.key_inline = false;
8287 + ctx->cdata.key_dma = ctx->key_dma;
8288 + }
8289
8290 desc = ctx->sh_desc_enc;
8291 -
8292 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8293 -
8294 - /* Skip key loading if it is loaded due to sharing */
8295 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8296 - JUMP_COND_SHRD);
8297 - if (keys_fit_inline)
8298 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8299 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8300 - else
8301 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8302 - CLASS_1 | KEY_DEST_CLASS_REG);
8303 - set_jump_tgt_here(desc, key_jump_cmd);
8304 -
8305 - /* Class 1 operation */
8306 - append_operation(desc, ctx->class1_alg_type |
8307 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8308 -
8309 - /* assoclen + cryptlen = seqinlen */
8310 - append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
8311 -
8312 - /*
8313 - * MOVE_LEN opcode is not available in all SEC HW revisions,
8314 - * thus need to do some magic, i.e. self-patch the descriptor
8315 - * buffer.
8316 - */
8317 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8318 - (0x6 << MOVE_LEN_SHIFT));
8319 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8320 - (0x8 << MOVE_LEN_SHIFT));
8321 -
8322 - /* Will read assoclen + cryptlen bytes */
8323 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8324 -
8325 - /* Will write assoclen + cryptlen bytes */
8326 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8327 -
8328 - /* Read and write assoclen + cryptlen bytes */
8329 - aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
8330 -
8331 - set_move_tgt_here(desc, read_move_cmd);
8332 - set_move_tgt_here(desc, write_move_cmd);
8333 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8334 - /* Move payload data to OFIFO */
8335 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8336 -
8337 - /* Write ICV */
8338 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8339 - LDST_SRCDST_BYTE_CONTEXT);
8340 -
8341 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8342 - desc_bytes(desc),
8343 - DMA_TO_DEVICE);
8344 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8345 - dev_err(jrdev, "unable to map shared descriptor\n");
8346 - return -ENOMEM;
8347 - }
8348 -#ifdef DEBUG
8349 - print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
8350 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8351 - desc_bytes(desc), 1);
8352 -#endif
8353 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8354 + false);
8355 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8356 + desc_bytes(desc), DMA_TO_DEVICE);
8357
8358 /*
8359 * Job Descriptor and Shared Descriptors
8360 * must all fit into the 64-word Descriptor h/w Buffer
8361 */
8362 - keys_fit_inline = false;
8363 - if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
8364 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8365 - keys_fit_inline = true;
8366 + if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
8367 + ctx->cdata.key_inline = true;
8368 + ctx->cdata.key_virt = ctx->key;
8369 + } else {
8370 + ctx->cdata.key_inline = false;
8371 + ctx->cdata.key_dma = ctx->key_dma;
8372 + }
8373
8374 desc = ctx->sh_desc_dec;
8375 -
8376 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8377 -
8378 - /* Skip key loading if it is loaded due to sharing */
8379 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8380 - JUMP_TEST_ALL | JUMP_COND_SHRD);
8381 - if (keys_fit_inline)
8382 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8383 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8384 - else
8385 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8386 - CLASS_1 | KEY_DEST_CLASS_REG);
8387 - set_jump_tgt_here(desc, key_jump_cmd);
8388 -
8389 - /* Class 1 operation */
8390 - append_operation(desc, ctx->class1_alg_type |
8391 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8392 -
8393 - /* assoclen + cryptlen = seqoutlen */
8394 - append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8395 -
8396 - /*
8397 - * MOVE_LEN opcode is not available in all SEC HW revisions,
8398 - * thus need to do some magic, i.e. self-patch the descriptor
8399 - * buffer.
8400 - */
8401 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8402 - (0x6 << MOVE_LEN_SHIFT));
8403 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8404 - (0x8 << MOVE_LEN_SHIFT));
8405 -
8406 - /* Will read assoclen + cryptlen bytes */
8407 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8408 -
8409 - /* Will write assoclen + cryptlen bytes */
8410 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8411 -
8412 - /* Store payload data */
8413 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8414 -
8415 - /* In-snoop assoclen + cryptlen data */
8416 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
8417 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
8418 -
8419 - set_move_tgt_here(desc, read_move_cmd);
8420 - set_move_tgt_here(desc, write_move_cmd);
8421 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8422 - /* Move payload data to OFIFO */
8423 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8424 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8425 -
8426 - /* Read ICV */
8427 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8428 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8429 -
8430 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8431 - desc_bytes(desc),
8432 - DMA_TO_DEVICE);
8433 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8434 - dev_err(jrdev, "unable to map shared descriptor\n");
8435 - return -ENOMEM;
8436 - }
8437 -#ifdef DEBUG
8438 - print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
8439 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8440 - desc_bytes(desc), 1);
8441 -#endif
8442 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8443 + false);
8444 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8445 + desc_bytes(desc), DMA_TO_DEVICE);
8446
8447 return 0;
8448 }
8449 @@ -1322,19 +505,9 @@ static int rfc4543_setauthsize(struct cr
8450 return 0;
8451 }
8452
8453 -static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
8454 - u32 authkeylen)
8455 -{
8456 - return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
8457 - ctx->split_key_pad_len, key_in, authkeylen,
8458 - ctx->alg_op);
8459 -}
8460 -
8461 static int aead_setkey(struct crypto_aead *aead,
8462 const u8 *key, unsigned int keylen)
8463 {
8464 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
8465 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
8466 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8467 struct device *jrdev = ctx->jrdev;
8468 struct crypto_authenc_keys keys;
8469 @@ -1343,53 +516,32 @@ static int aead_setkey(struct crypto_aea
8470 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
8471 goto badkey;
8472
8473 - /* Pick class 2 key length from algorithm submask */
8474 - ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
8475 - OP_ALG_ALGSEL_SHIFT] * 2;
8476 - ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
8477 -
8478 - if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
8479 - goto badkey;
8480 -
8481 #ifdef DEBUG
8482 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
8483 keys.authkeylen + keys.enckeylen, keys.enckeylen,
8484 keys.authkeylen);
8485 - printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
8486 - ctx->split_key_len, ctx->split_key_pad_len);
8487 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8488 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8489 #endif
8490
8491 - ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
8492 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
8493 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
8494 + keys.enckeylen);
8495 if (ret) {
8496 goto badkey;
8497 }
8498
8499 /* postpend encryption key to auth split key */
8500 - memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
8501 -
8502 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
8503 - keys.enckeylen, DMA_TO_DEVICE);
8504 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8505 - dev_err(jrdev, "unable to map key i/o memory\n");
8506 - return -ENOMEM;
8507 - }
8508 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
8509 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
8510 + keys.enckeylen, DMA_TO_DEVICE);
8511 #ifdef DEBUG
8512 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
8513 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
8514 - ctx->split_key_pad_len + keys.enckeylen, 1);
8515 + ctx->adata.keylen_pad + keys.enckeylen, 1);
8516 #endif
8517 -
8518 - ctx->enckeylen = keys.enckeylen;
8519 -
8520 - ret = aead_set_sh_desc(aead);
8521 - if (ret) {
8522 - dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
8523 - keys.enckeylen, DMA_TO_DEVICE);
8524 - }
8525 -
8526 - return ret;
8527 + ctx->cdata.keylen = keys.enckeylen;
8528 + return aead_set_sh_desc(aead);
8529 badkey:
8530 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
8531 return -EINVAL;
8532 @@ -1400,7 +552,6 @@ static int gcm_setkey(struct crypto_aead
8533 {
8534 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8535 struct device *jrdev = ctx->jrdev;
8536 - int ret = 0;
8537
8538 #ifdef DEBUG
8539 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8540 @@ -1408,21 +559,10 @@ static int gcm_setkey(struct crypto_aead
8541 #endif
8542
8543 memcpy(ctx->key, key, keylen);
8544 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8545 - DMA_TO_DEVICE);
8546 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8547 - dev_err(jrdev, "unable to map key i/o memory\n");
8548 - return -ENOMEM;
8549 - }
8550 - ctx->enckeylen = keylen;
8551 -
8552 - ret = gcm_set_sh_desc(aead);
8553 - if (ret) {
8554 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8555 - DMA_TO_DEVICE);
8556 - }
8557 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8558 + ctx->cdata.keylen = keylen;
8559
8560 - return ret;
8561 + return gcm_set_sh_desc(aead);
8562 }
8563
8564 static int rfc4106_setkey(struct crypto_aead *aead,
8565 @@ -1430,7 +570,6 @@ static int rfc4106_setkey(struct crypto_
8566 {
8567 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8568 struct device *jrdev = ctx->jrdev;
8569 - int ret = 0;
8570
8571 if (keylen < 4)
8572 return -EINVAL;
8573 @@ -1446,22 +585,10 @@ static int rfc4106_setkey(struct crypto_
8574 * The last four bytes of the key material are used as the salt value
8575 * in the nonce. Update the AES key length.
8576 */
8577 - ctx->enckeylen = keylen - 4;
8578 -
8579 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8580 - DMA_TO_DEVICE);
8581 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8582 - dev_err(jrdev, "unable to map key i/o memory\n");
8583 - return -ENOMEM;
8584 - }
8585 -
8586 - ret = rfc4106_set_sh_desc(aead);
8587 - if (ret) {
8588 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8589 - DMA_TO_DEVICE);
8590 - }
8591 -
8592 - return ret;
8593 + ctx->cdata.keylen = keylen - 4;
8594 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8595 + DMA_TO_DEVICE);
8596 + return rfc4106_set_sh_desc(aead);
8597 }
8598
8599 static int rfc4543_setkey(struct crypto_aead *aead,
8600 @@ -1469,7 +596,6 @@ static int rfc4543_setkey(struct crypto_
8601 {
8602 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8603 struct device *jrdev = ctx->jrdev;
8604 - int ret = 0;
8605
8606 if (keylen < 4)
8607 return -EINVAL;
8608 @@ -1485,43 +611,28 @@ static int rfc4543_setkey(struct crypto_
8609 * The last four bytes of the key material are used as the salt value
8610 * in the nonce. Update the AES key length.
8611 */
8612 - ctx->enckeylen = keylen - 4;
8613 -
8614 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8615 - DMA_TO_DEVICE);
8616 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8617 - dev_err(jrdev, "unable to map key i/o memory\n");
8618 - return -ENOMEM;
8619 - }
8620 -
8621 - ret = rfc4543_set_sh_desc(aead);
8622 - if (ret) {
8623 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8624 - DMA_TO_DEVICE);
8625 - }
8626 -
8627 - return ret;
8628 + ctx->cdata.keylen = keylen - 4;
8629 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8630 + DMA_TO_DEVICE);
8631 + return rfc4543_set_sh_desc(aead);
8632 }
8633
8634 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8635 const u8 *key, unsigned int keylen)
8636 {
8637 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8638 - struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
8639 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
8640 const char *alg_name = crypto_tfm_alg_name(tfm);
8641 struct device *jrdev = ctx->jrdev;
8642 - int ret = 0;
8643 - u32 *key_jump_cmd;
8644 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
8645 u32 *desc;
8646 - u8 *nonce;
8647 - u32 geniv;
8648 u32 ctx1_iv_off = 0;
8649 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
8650 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
8651 OP_ALG_AAI_CTR_MOD128);
8652 const bool is_rfc3686 = (ctr_mode &&
8653 (strstr(alg_name, "rfc3686") != NULL));
8654
8655 + memcpy(ctx->key, key, keylen);
8656 #ifdef DEBUG
8657 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8658 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8659 @@ -1544,215 +655,33 @@ static int ablkcipher_setkey(struct cryp
8660 keylen -= CTR_RFC3686_NONCE_SIZE;
8661 }
8662
8663 - memcpy(ctx->key, key, keylen);
8664 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8665 - DMA_TO_DEVICE);
8666 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8667 - dev_err(jrdev, "unable to map key i/o memory\n");
8668 - return -ENOMEM;
8669 - }
8670 - ctx->enckeylen = keylen;
8671 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8672 + ctx->cdata.keylen = keylen;
8673 + ctx->cdata.key_virt = ctx->key;
8674 + ctx->cdata.key_inline = true;
8675
8676 /* ablkcipher_encrypt shared descriptor */
8677 desc = ctx->sh_desc_enc;
8678 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8679 - /* Skip if already shared */
8680 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8681 - JUMP_COND_SHRD);
8682 -
8683 - /* Load class1 key only */
8684 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8685 - ctx->enckeylen, CLASS_1 |
8686 - KEY_DEST_CLASS_REG);
8687 -
8688 - /* Load nonce into CONTEXT1 reg */
8689 - if (is_rfc3686) {
8690 - nonce = (u8 *)key + keylen;
8691 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8692 - LDST_CLASS_IND_CCB |
8693 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8694 - append_move(desc, MOVE_WAITCOMP |
8695 - MOVE_SRC_OUTFIFO |
8696 - MOVE_DEST_CLASS1CTX |
8697 - (16 << MOVE_OFFSET_SHIFT) |
8698 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8699 - }
8700 -
8701 - set_jump_tgt_here(desc, key_jump_cmd);
8702 -
8703 - /* Load iv */
8704 - append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8705 - LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8706 + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
8707 + ctx1_iv_off);
8708 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8709 + desc_bytes(desc), DMA_TO_DEVICE);
8710
8711 - /* Load counter into CONTEXT1 reg */
8712 - if (is_rfc3686)
8713 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8714 - LDST_SRCDST_BYTE_CONTEXT |
8715 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8716 - LDST_OFFSET_SHIFT));
8717 -
8718 - /* Load operation */
8719 - append_operation(desc, ctx->class1_alg_type |
8720 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8721 -
8722 - /* Perform operation */
8723 - ablkcipher_append_src_dst(desc);
8724 -
8725 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8726 - desc_bytes(desc),
8727 - DMA_TO_DEVICE);
8728 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8729 - dev_err(jrdev, "unable to map shared descriptor\n");
8730 - return -ENOMEM;
8731 - }
8732 -#ifdef DEBUG
8733 - print_hex_dump(KERN_ERR,
8734 - "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
8735 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8736 - desc_bytes(desc), 1);
8737 -#endif
8738 /* ablkcipher_decrypt shared descriptor */
8739 desc = ctx->sh_desc_dec;
8740 + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
8741 + ctx1_iv_off);
8742 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8743 + desc_bytes(desc), DMA_TO_DEVICE);
8744
8745 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8746 - /* Skip if already shared */
8747 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8748 - JUMP_COND_SHRD);
8749 -
8750 - /* Load class1 key only */
8751 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8752 - ctx->enckeylen, CLASS_1 |
8753 - KEY_DEST_CLASS_REG);
8754 -
8755 - /* Load nonce into CONTEXT1 reg */
8756 - if (is_rfc3686) {
8757 - nonce = (u8 *)key + keylen;
8758 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8759 - LDST_CLASS_IND_CCB |
8760 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8761 - append_move(desc, MOVE_WAITCOMP |
8762 - MOVE_SRC_OUTFIFO |
8763 - MOVE_DEST_CLASS1CTX |
8764 - (16 << MOVE_OFFSET_SHIFT) |
8765 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8766 - }
8767 -
8768 - set_jump_tgt_here(desc, key_jump_cmd);
8769 -
8770 - /* load IV */
8771 - append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8772 - LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8773 -
8774 - /* Load counter into CONTEXT1 reg */
8775 - if (is_rfc3686)
8776 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8777 - LDST_SRCDST_BYTE_CONTEXT |
8778 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8779 - LDST_OFFSET_SHIFT));
8780 -
8781 - /* Choose operation */
8782 - if (ctr_mode)
8783 - append_operation(desc, ctx->class1_alg_type |
8784 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
8785 - else
8786 - append_dec_op1(desc, ctx->class1_alg_type);
8787 -
8788 - /* Perform operation */
8789 - ablkcipher_append_src_dst(desc);
8790 -
8791 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8792 - desc_bytes(desc),
8793 - DMA_TO_DEVICE);
8794 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8795 - dev_err(jrdev, "unable to map shared descriptor\n");
8796 - return -ENOMEM;
8797 - }
8798 -
8799 -#ifdef DEBUG
8800 - print_hex_dump(KERN_ERR,
8801 - "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
8802 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8803 - desc_bytes(desc), 1);
8804 -#endif
8805 /* ablkcipher_givencrypt shared descriptor */
8806 desc = ctx->sh_desc_givenc;
8807 + cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
8808 + ctx1_iv_off);
8809 + dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
8810 + desc_bytes(desc), DMA_TO_DEVICE);
8811
8812 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8813 - /* Skip if already shared */
8814 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8815 - JUMP_COND_SHRD);
8816 -
8817 - /* Load class1 key only */
8818 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8819 - ctx->enckeylen, CLASS_1 |
8820 - KEY_DEST_CLASS_REG);
8821 -
8822 - /* Load Nonce into CONTEXT1 reg */
8823 - if (is_rfc3686) {
8824 - nonce = (u8 *)key + keylen;
8825 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8826 - LDST_CLASS_IND_CCB |
8827 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8828 - append_move(desc, MOVE_WAITCOMP |
8829 - MOVE_SRC_OUTFIFO |
8830 - MOVE_DEST_CLASS1CTX |
8831 - (16 << MOVE_OFFSET_SHIFT) |
8832 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8833 - }
8834 - set_jump_tgt_here(desc, key_jump_cmd);
8835 -
8836 - /* Generate IV */
8837 - geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
8838 - NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
8839 - NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
8840 - append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
8841 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
8842 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8843 - append_move(desc, MOVE_WAITCOMP |
8844 - MOVE_SRC_INFIFO |
8845 - MOVE_DEST_CLASS1CTX |
8846 - (crt->ivsize << MOVE_LEN_SHIFT) |
8847 - (ctx1_iv_off << MOVE_OFFSET_SHIFT));
8848 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8849 -
8850 - /* Copy generated IV to memory */
8851 - append_seq_store(desc, crt->ivsize,
8852 - LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
8853 - (ctx1_iv_off << LDST_OFFSET_SHIFT));
8854 -
8855 - /* Load Counter into CONTEXT1 reg */
8856 - if (is_rfc3686)
8857 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8858 - LDST_SRCDST_BYTE_CONTEXT |
8859 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8860 - LDST_OFFSET_SHIFT));
8861 -
8862 - if (ctx1_iv_off)
8863 - append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
8864 - (1 << JUMP_OFFSET_SHIFT));
8865 -
8866 - /* Load operation */
8867 - append_operation(desc, ctx->class1_alg_type |
8868 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8869 -
8870 - /* Perform operation */
8871 - ablkcipher_append_src_dst(desc);
8872 -
8873 - ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
8874 - desc_bytes(desc),
8875 - DMA_TO_DEVICE);
8876 - if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
8877 - dev_err(jrdev, "unable to map shared descriptor\n");
8878 - return -ENOMEM;
8879 - }
8880 -#ifdef DEBUG
8881 - print_hex_dump(KERN_ERR,
8882 - "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
8883 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8884 - desc_bytes(desc), 1);
8885 -#endif
8886 -
8887 - return ret;
8888 + return 0;
8889 }
8890
8891 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8892 @@ -1760,8 +689,7 @@ static int xts_ablkcipher_setkey(struct
8893 {
8894 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8895 struct device *jrdev = ctx->jrdev;
8896 - u32 *key_jump_cmd, *desc;
8897 - __be64 sector_size = cpu_to_be64(512);
8898 + u32 *desc;
8899
8900 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
8901 crypto_ablkcipher_set_flags(ablkcipher,
8902 @@ -1771,126 +699,38 @@ static int xts_ablkcipher_setkey(struct
8903 }
8904
8905 memcpy(ctx->key, key, keylen);
8906 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
8907 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8908 - dev_err(jrdev, "unable to map key i/o memory\n");
8909 - return -ENOMEM;
8910 - }
8911 - ctx->enckeylen = keylen;
8912 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8913 + ctx->cdata.keylen = keylen;
8914 + ctx->cdata.key_virt = ctx->key;
8915 + ctx->cdata.key_inline = true;
8916
8917 /* xts_ablkcipher_encrypt shared descriptor */
8918 desc = ctx->sh_desc_enc;
8919 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8920 - /* Skip if already shared */
8921 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8922 - JUMP_COND_SHRD);
8923 -
8924 - /* Load class1 keys only */
8925 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8926 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8927 -
8928 - /* Load sector size with index 40 bytes (0x28) */
8929 - append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
8930 - LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
8931 - append_data(desc, (void *)&sector_size, 8);
8932 -
8933 - set_jump_tgt_here(desc, key_jump_cmd);
8934 -
8935 - /*
8936 - * create sequence for loading the sector index
8937 - * Upper 8B of IV - will be used as sector index
8938 - * Lower 8B of IV - will be discarded
8939 - */
8940 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
8941 - LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
8942 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8943 -
8944 - /* Load operation */
8945 - append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
8946 - OP_ALG_ENCRYPT);
8947 -
8948 - /* Perform operation */
8949 - ablkcipher_append_src_dst(desc);
8950 -
8951 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
8952 - DMA_TO_DEVICE);
8953 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8954 - dev_err(jrdev, "unable to map shared descriptor\n");
8955 - return -ENOMEM;
8956 - }
8957 -#ifdef DEBUG
8958 - print_hex_dump(KERN_ERR,
8959 - "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
8960 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8961 -#endif
8962 + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
8963 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8964 + desc_bytes(desc), DMA_TO_DEVICE);
8965
8966 /* xts_ablkcipher_decrypt shared descriptor */
8967 desc = ctx->sh_desc_dec;
8968 -
8969 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8970 - /* Skip if already shared */
8971 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8972 - JUMP_COND_SHRD);
8973 -
8974 - /* Load class1 key only */
8975 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8976 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8977 -
8978 - /* Load sector size with index 40 bytes (0x28) */
8979 - append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
8980 - LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
8981 - append_data(desc, (void *)&sector_size, 8);
8982 -
8983 - set_jump_tgt_here(desc, key_jump_cmd);
8984 -
8985 - /*
8986 - * create sequence for loading the sector index
8987 - * Upper 8B of IV - will be used as sector index
8988 - * Lower 8B of IV - will be discarded
8989 - */
8990 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
8991 - LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
8992 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8993 -
8994 - /* Load operation */
8995 - append_dec_op1(desc, ctx->class1_alg_type);
8996 -
8997 - /* Perform operation */
8998 - ablkcipher_append_src_dst(desc);
8999 -
9000 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
9001 - DMA_TO_DEVICE);
9002 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
9003 - dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
9004 - desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
9005 - dev_err(jrdev, "unable to map shared descriptor\n");
9006 - return -ENOMEM;
9007 - }
9008 -#ifdef DEBUG
9009 - print_hex_dump(KERN_ERR,
9010 - "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
9011 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
9012 -#endif
9013 + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
9014 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
9015 + desc_bytes(desc), DMA_TO_DEVICE);
9016
9017 return 0;
9018 }
9019
9020 /*
9021 * aead_edesc - s/w-extended aead descriptor
9022 - * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
9023 - * @src_nents: number of segments in input scatterlist
9024 - * @dst_nents: number of segments in output scatterlist
9025 - * @iv_dma: dma address of iv for checking continuity and link table
9026 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9027 + * @src_nents: number of segments in input s/w scatterlist
9028 + * @dst_nents: number of segments in output s/w scatterlist
9029 * @sec4_sg_bytes: length of dma mapped sec4_sg space
9030 * @sec4_sg_dma: bus physical mapped address of h/w link table
9031 + * @sec4_sg: pointer to h/w link table
9032 * @hw_desc: the h/w job descriptor followed by any referenced link tables
9033 */
9034 struct aead_edesc {
9035 - int assoc_nents;
9036 int src_nents;
9037 int dst_nents;
9038 - dma_addr_t iv_dma;
9039 int sec4_sg_bytes;
9040 dma_addr_t sec4_sg_dma;
9041 struct sec4_sg_entry *sec4_sg;
9042 @@ -1899,12 +739,12 @@ struct aead_edesc {
9043
9044 /*
9045 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
9046 - * @src_nents: number of segments in input scatterlist
9047 - * @dst_nents: number of segments in output scatterlist
9048 + * @src_nents: number of segments in input s/w scatterlist
9049 + * @dst_nents: number of segments in output s/w scatterlist
9050 * @iv_dma: dma address of iv for checking continuity and link table
9051 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9052 * @sec4_sg_bytes: length of dma mapped sec4_sg space
9053 * @sec4_sg_dma: bus physical mapped address of h/w link table
9054 + * @sec4_sg: pointer to h/w link table
9055 * @hw_desc: the h/w job descriptor followed by any referenced link tables
9056 */
9057 struct ablkcipher_edesc {
9058 @@ -1924,10 +764,11 @@ static void caam_unmap(struct device *de
9059 int sec4_sg_bytes)
9060 {
9061 if (dst != src) {
9062 - dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
9063 - dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
9064 + if (src_nents)
9065 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
9066 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
9067 } else {
9068 - dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
9069 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
9070 }
9071
9072 if (iv_dma)
9073 @@ -2021,8 +862,7 @@ static void ablkcipher_encrypt_done(stru
9074 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9075 #endif
9076
9077 - edesc = (struct ablkcipher_edesc *)((char *)desc -
9078 - offsetof(struct ablkcipher_edesc, hw_desc));
9079 + edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9080
9081 if (err)
9082 caam_jr_strstatus(jrdev, err);
9083 @@ -2031,10 +871,10 @@ static void ablkcipher_encrypt_done(stru
9084 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
9085 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9086 edesc->src_nents > 1 ? 100 : ivsize, 1);
9087 - dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
9088 - DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9089 - edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9090 #endif
9091 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
9092 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9093 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9094
9095 ablkcipher_unmap(jrdev, edesc, req);
9096
9097 @@ -2062,8 +902,7 @@ static void ablkcipher_decrypt_done(stru
9098 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9099 #endif
9100
9101 - edesc = (struct ablkcipher_edesc *)((char *)desc -
9102 - offsetof(struct ablkcipher_edesc, hw_desc));
9103 + edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9104 if (err)
9105 caam_jr_strstatus(jrdev, err);
9106
9107 @@ -2071,10 +910,10 @@ static void ablkcipher_decrypt_done(stru
9108 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
9109 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9110 ivsize, 1);
9111 - dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
9112 - DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9113 - edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9114 #endif
9115 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
9116 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9117 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9118
9119 ablkcipher_unmap(jrdev, edesc, req);
9120
9121 @@ -2114,7 +953,7 @@ static void init_aead_job(struct aead_re
9122 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9123
9124 if (all_contig) {
9125 - src_dma = sg_dma_address(req->src);
9126 + src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
9127 in_options = 0;
9128 } else {
9129 src_dma = edesc->sec4_sg_dma;
9130 @@ -2129,7 +968,7 @@ static void init_aead_job(struct aead_re
9131 out_options = in_options;
9132
9133 if (unlikely(req->src != req->dst)) {
9134 - if (!edesc->dst_nents) {
9135 + if (edesc->dst_nents == 1) {
9136 dst_dma = sg_dma_address(req->dst);
9137 } else {
9138 dst_dma = edesc->sec4_sg_dma +
9139 @@ -2175,7 +1014,7 @@ static void init_gcm_job(struct aead_req
9140 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
9141 /* Append Salt */
9142 if (!generic_gcm)
9143 - append_data(desc, ctx->key + ctx->enckeylen, 4);
9144 + append_data(desc, ctx->key + ctx->cdata.keylen, 4);
9145 /* Append IV */
9146 append_data(desc, req->iv, ivsize);
9147 /* End of blank commands */
9148 @@ -2190,7 +1029,7 @@ static void init_authenc_job(struct aead
9149 struct caam_aead_alg, aead);
9150 unsigned int ivsize = crypto_aead_ivsize(aead);
9151 struct caam_ctx *ctx = crypto_aead_ctx(aead);
9152 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
9153 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
9154 OP_ALG_AAI_CTR_MOD128);
9155 const bool is_rfc3686 = alg->caam.rfc3686;
9156 u32 *desc = edesc->hw_desc;
9157 @@ -2236,16 +1075,15 @@ static void init_ablkcipher_job(u32 *sh_
9158 int len, sec4_sg_index = 0;
9159
9160 #ifdef DEBUG
9161 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9162 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9163 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
9164 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9165 ivsize, 1);
9166 - printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
9167 - dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
9168 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9169 - edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9170 + pr_err("asked=%d, nbytes%d\n",
9171 + (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
9172 #endif
9173 + caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
9174 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9175 + edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9176
9177 len = desc_len(sh_desc);
9178 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9179 @@ -2261,7 +1099,7 @@ static void init_ablkcipher_job(u32 *sh_
9180 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
9181
9182 if (likely(req->src == req->dst)) {
9183 - if (!edesc->src_nents && iv_contig) {
9184 + if (edesc->src_nents == 1 && iv_contig) {
9185 dst_dma = sg_dma_address(req->src);
9186 } else {
9187 dst_dma = edesc->sec4_sg_dma +
9188 @@ -2269,7 +1107,7 @@ static void init_ablkcipher_job(u32 *sh_
9189 out_options = LDST_SGF;
9190 }
9191 } else {
9192 - if (!edesc->dst_nents) {
9193 + if (edesc->dst_nents == 1) {
9194 dst_dma = sg_dma_address(req->dst);
9195 } else {
9196 dst_dma = edesc->sec4_sg_dma +
9197 @@ -2296,20 +1134,18 @@ static void init_ablkcipher_giv_job(u32
9198 int len, sec4_sg_index = 0;
9199
9200 #ifdef DEBUG
9201 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9202 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9203 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
9204 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9205 ivsize, 1);
9206 - dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
9207 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9208 - edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9209 #endif
9210 + caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
9211 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9212 + edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9213
9214 len = desc_len(sh_desc);
9215 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9216
9217 - if (!edesc->src_nents) {
9218 + if (edesc->src_nents == 1) {
9219 src_dma = sg_dma_address(req->src);
9220 in_options = 0;
9221 } else {
9222 @@ -2340,87 +1176,100 @@ static struct aead_edesc *aead_edesc_all
9223 struct crypto_aead *aead = crypto_aead_reqtfm(req);
9224 struct caam_ctx *ctx = crypto_aead_ctx(aead);
9225 struct device *jrdev = ctx->jrdev;
9226 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9227 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
9228 - int src_nents, dst_nents = 0;
9229 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9230 + GFP_KERNEL : GFP_ATOMIC;
9231 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9232 struct aead_edesc *edesc;
9233 - int sgc;
9234 - bool all_contig = true;
9235 - int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
9236 + int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
9237 unsigned int authsize = ctx->authsize;
9238
9239 if (unlikely(req->dst != req->src)) {
9240 - src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
9241 - dst_nents = sg_count(req->dst,
9242 - req->assoclen + req->cryptlen +
9243 - (encrypt ? authsize : (-authsize)));
9244 - } else {
9245 - src_nents = sg_count(req->src,
9246 - req->assoclen + req->cryptlen +
9247 - (encrypt ? authsize : 0));
9248 - }
9249 -
9250 - /* Check if data are contiguous. */
9251 - all_contig = !src_nents;
9252 - if (!all_contig) {
9253 - src_nents = src_nents ? : 1;
9254 - sec4_sg_len = src_nents;
9255 - }
9256 -
9257 - sec4_sg_len += dst_nents;
9258 -
9259 - sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9260 + src_nents = sg_nents_for_len(req->src, req->assoclen +
9261 + req->cryptlen);
9262 + if (unlikely(src_nents < 0)) {
9263 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9264 + req->assoclen + req->cryptlen);
9265 + return ERR_PTR(src_nents);
9266 + }
9267
9268 - /* allocate space for base edesc and hw desc commands, link tables */
9269 - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9270 - GFP_DMA | flags);
9271 - if (!edesc) {
9272 - dev_err(jrdev, "could not allocate extended descriptor\n");
9273 - return ERR_PTR(-ENOMEM);
9274 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
9275 + req->cryptlen +
9276 + (encrypt ? authsize :
9277 + (-authsize)));
9278 + if (unlikely(dst_nents < 0)) {
9279 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9280 + req->assoclen + req->cryptlen +
9281 + (encrypt ? authsize : (-authsize)));
9282 + return ERR_PTR(dst_nents);
9283 + }
9284 + } else {
9285 + src_nents = sg_nents_for_len(req->src, req->assoclen +
9286 + req->cryptlen +
9287 + (encrypt ? authsize : 0));
9288 + if (unlikely(src_nents < 0)) {
9289 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9290 + req->assoclen + req->cryptlen +
9291 + (encrypt ? authsize : 0));
9292 + return ERR_PTR(src_nents);
9293 + }
9294 }
9295
9296 if (likely(req->src == req->dst)) {
9297 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9298 - DMA_BIDIRECTIONAL);
9299 - if (unlikely(!sgc)) {
9300 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9301 + DMA_BIDIRECTIONAL);
9302 + if (unlikely(!mapped_src_nents)) {
9303 dev_err(jrdev, "unable to map source\n");
9304 - kfree(edesc);
9305 return ERR_PTR(-ENOMEM);
9306 }
9307 } else {
9308 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9309 - DMA_TO_DEVICE);
9310 - if (unlikely(!sgc)) {
9311 - dev_err(jrdev, "unable to map source\n");
9312 - kfree(edesc);
9313 - return ERR_PTR(-ENOMEM);
9314 + /* Cover also the case of null (zero length) input data */
9315 + if (src_nents) {
9316 + mapped_src_nents = dma_map_sg(jrdev, req->src,
9317 + src_nents, DMA_TO_DEVICE);
9318 + if (unlikely(!mapped_src_nents)) {
9319 + dev_err(jrdev, "unable to map source\n");
9320 + return ERR_PTR(-ENOMEM);
9321 + }
9322 + } else {
9323 + mapped_src_nents = 0;
9324 }
9325
9326 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9327 - DMA_FROM_DEVICE);
9328 - if (unlikely(!sgc)) {
9329 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9330 + DMA_FROM_DEVICE);
9331 + if (unlikely(!mapped_dst_nents)) {
9332 dev_err(jrdev, "unable to map destination\n");
9333 - dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
9334 - DMA_TO_DEVICE);
9335 - kfree(edesc);
9336 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9337 return ERR_PTR(-ENOMEM);
9338 }
9339 }
9340
9341 + sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
9342 + sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9343 + sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9344 +
9345 + /* allocate space for base edesc and hw desc commands, link tables */
9346 + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9347 + GFP_DMA | flags);
9348 + if (!edesc) {
9349 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9350 + 0, 0, 0);
9351 + return ERR_PTR(-ENOMEM);
9352 + }
9353 +
9354 edesc->src_nents = src_nents;
9355 edesc->dst_nents = dst_nents;
9356 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
9357 desc_bytes;
9358 - *all_contig_ptr = all_contig;
9359 + *all_contig_ptr = !(mapped_src_nents > 1);
9360
9361 sec4_sg_index = 0;
9362 - if (!all_contig) {
9363 - sg_to_sec4_sg_last(req->src, src_nents,
9364 - edesc->sec4_sg + sec4_sg_index, 0);
9365 - sec4_sg_index += src_nents;
9366 + if (mapped_src_nents > 1) {
9367 + sg_to_sec4_sg_last(req->src, mapped_src_nents,
9368 + edesc->sec4_sg + sec4_sg_index, 0);
9369 + sec4_sg_index += mapped_src_nents;
9370 }
9371 - if (dst_nents) {
9372 - sg_to_sec4_sg_last(req->dst, dst_nents,
9373 + if (mapped_dst_nents > 1) {
9374 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9375 edesc->sec4_sg + sec4_sg_index, 0);
9376 }
9377
9378 @@ -2573,13 +1422,9 @@ static int aead_decrypt(struct aead_requ
9379 u32 *desc;
9380 int ret = 0;
9381
9382 -#ifdef DEBUG
9383 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9384 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9385 - dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
9386 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9387 - req->assoclen + req->cryptlen, 1, may_sleep);
9388 -#endif
9389 + caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
9390 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9391 + req->assoclen + req->cryptlen, 1);
9392
9393 /* allocate extended descriptor */
9394 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
9395 @@ -2619,51 +1464,80 @@ static struct ablkcipher_edesc *ablkciph
9396 struct device *jrdev = ctx->jrdev;
9397 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9398 GFP_KERNEL : GFP_ATOMIC;
9399 - int src_nents, dst_nents = 0, sec4_sg_bytes;
9400 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9401 struct ablkcipher_edesc *edesc;
9402 dma_addr_t iv_dma = 0;
9403 - bool iv_contig = false;
9404 - int sgc;
9405 + bool in_contig;
9406 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9407 - int sec4_sg_index;
9408 + int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9409
9410 - src_nents = sg_count(req->src, req->nbytes);
9411 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9412 + if (unlikely(src_nents < 0)) {
9413 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9414 + req->nbytes);
9415 + return ERR_PTR(src_nents);
9416 + }
9417
9418 - if (req->dst != req->src)
9419 - dst_nents = sg_count(req->dst, req->nbytes);
9420 + if (req->dst != req->src) {
9421 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9422 + if (unlikely(dst_nents < 0)) {
9423 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9424 + req->nbytes);
9425 + return ERR_PTR(dst_nents);
9426 + }
9427 + }
9428
9429 if (likely(req->src == req->dst)) {
9430 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9431 - DMA_BIDIRECTIONAL);
9432 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9433 + DMA_BIDIRECTIONAL);
9434 + if (unlikely(!mapped_src_nents)) {
9435 + dev_err(jrdev, "unable to map source\n");
9436 + return ERR_PTR(-ENOMEM);
9437 + }
9438 } else {
9439 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9440 - DMA_TO_DEVICE);
9441 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9442 - DMA_FROM_DEVICE);
9443 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9444 + DMA_TO_DEVICE);
9445 + if (unlikely(!mapped_src_nents)) {
9446 + dev_err(jrdev, "unable to map source\n");
9447 + return ERR_PTR(-ENOMEM);
9448 + }
9449 +
9450 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9451 + DMA_FROM_DEVICE);
9452 + if (unlikely(!mapped_dst_nents)) {
9453 + dev_err(jrdev, "unable to map destination\n");
9454 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9455 + return ERR_PTR(-ENOMEM);
9456 + }
9457 }
9458
9459 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
9460 if (dma_mapping_error(jrdev, iv_dma)) {
9461 dev_err(jrdev, "unable to map IV\n");
9462 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9463 + 0, 0, 0);
9464 return ERR_PTR(-ENOMEM);
9465 }
9466
9467 - /*
9468 - * Check if iv can be contiguous with source and destination.
9469 - * If so, include it. If not, create scatterlist.
9470 - */
9471 - if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
9472 - iv_contig = true;
9473 - else
9474 - src_nents = src_nents ? : 1;
9475 - sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9476 - sizeof(struct sec4_sg_entry);
9477 + if (mapped_src_nents == 1 &&
9478 + iv_dma + ivsize == sg_dma_address(req->src)) {
9479 + in_contig = true;
9480 + sec4_sg_ents = 0;
9481 + } else {
9482 + in_contig = false;
9483 + sec4_sg_ents = 1 + mapped_src_nents;
9484 + }
9485 + dst_sg_idx = sec4_sg_ents;
9486 + sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9487 + sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9488
9489 /* allocate space for base edesc and hw desc commands, link tables */
9490 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9491 GFP_DMA | flags);
9492 if (!edesc) {
9493 dev_err(jrdev, "could not allocate extended descriptor\n");
9494 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9495 + iv_dma, ivsize, 0, 0);
9496 return ERR_PTR(-ENOMEM);
9497 }
9498
9499 @@ -2673,23 +1547,24 @@ static struct ablkcipher_edesc *ablkciph
9500 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9501 desc_bytes;
9502
9503 - sec4_sg_index = 0;
9504 - if (!iv_contig) {
9505 + if (!in_contig) {
9506 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
9507 - sg_to_sec4_sg_last(req->src, src_nents,
9508 + sg_to_sec4_sg_last(req->src, mapped_src_nents,
9509 edesc->sec4_sg + 1, 0);
9510 - sec4_sg_index += 1 + src_nents;
9511 }
9512
9513 - if (dst_nents) {
9514 - sg_to_sec4_sg_last(req->dst, dst_nents,
9515 - edesc->sec4_sg + sec4_sg_index, 0);
9516 + if (mapped_dst_nents > 1) {
9517 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9518 + edesc->sec4_sg + dst_sg_idx, 0);
9519 }
9520
9521 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9522 sec4_sg_bytes, DMA_TO_DEVICE);
9523 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9524 dev_err(jrdev, "unable to map S/G table\n");
9525 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9526 + iv_dma, ivsize, 0, 0);
9527 + kfree(edesc);
9528 return ERR_PTR(-ENOMEM);
9529 }
9530
9531 @@ -2701,7 +1576,7 @@ static struct ablkcipher_edesc *ablkciph
9532 sec4_sg_bytes, 1);
9533 #endif
9534
9535 - *iv_contig_out = iv_contig;
9536 + *iv_contig_out = in_contig;
9537 return edesc;
9538 }
9539
9540 @@ -2792,30 +1667,54 @@ static struct ablkcipher_edesc *ablkciph
9541 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9542 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9543 struct device *jrdev = ctx->jrdev;
9544 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9545 - CRYPTO_TFM_REQ_MAY_SLEEP)) ?
9546 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9547 GFP_KERNEL : GFP_ATOMIC;
9548 - int src_nents, dst_nents = 0, sec4_sg_bytes;
9549 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
9550 struct ablkcipher_edesc *edesc;
9551 dma_addr_t iv_dma = 0;
9552 - bool iv_contig = false;
9553 - int sgc;
9554 + bool out_contig;
9555 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9556 - int sec4_sg_index;
9557 -
9558 - src_nents = sg_count(req->src, req->nbytes);
9559 + int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9560
9561 - if (unlikely(req->dst != req->src))
9562 - dst_nents = sg_count(req->dst, req->nbytes);
9563 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9564 + if (unlikely(src_nents < 0)) {
9565 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9566 + req->nbytes);
9567 + return ERR_PTR(src_nents);
9568 + }
9569
9570 if (likely(req->src == req->dst)) {
9571 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9572 - DMA_BIDIRECTIONAL);
9573 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9574 + DMA_BIDIRECTIONAL);
9575 + if (unlikely(!mapped_src_nents)) {
9576 + dev_err(jrdev, "unable to map source\n");
9577 + return ERR_PTR(-ENOMEM);
9578 + }
9579 +
9580 + dst_nents = src_nents;
9581 + mapped_dst_nents = src_nents;
9582 } else {
9583 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9584 - DMA_TO_DEVICE);
9585 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9586 - DMA_FROM_DEVICE);
9587 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9588 + DMA_TO_DEVICE);
9589 + if (unlikely(!mapped_src_nents)) {
9590 + dev_err(jrdev, "unable to map source\n");
9591 + return ERR_PTR(-ENOMEM);
9592 + }
9593 +
9594 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9595 + if (unlikely(dst_nents < 0)) {
9596 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9597 + req->nbytes);
9598 + return ERR_PTR(dst_nents);
9599 + }
9600 +
9601 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9602 + DMA_FROM_DEVICE);
9603 + if (unlikely(!mapped_dst_nents)) {
9604 + dev_err(jrdev, "unable to map destination\n");
9605 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9606 + return ERR_PTR(-ENOMEM);
9607 + }
9608 }
9609
9610 /*
9611 @@ -2825,21 +1724,29 @@ static struct ablkcipher_edesc *ablkciph
9612 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
9613 if (dma_mapping_error(jrdev, iv_dma)) {
9614 dev_err(jrdev, "unable to map IV\n");
9615 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9616 + 0, 0, 0);
9617 return ERR_PTR(-ENOMEM);
9618 }
9619
9620 - if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
9621 - iv_contig = true;
9622 - else
9623 - dst_nents = dst_nents ? : 1;
9624 - sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9625 - sizeof(struct sec4_sg_entry);
9626 + sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
9627 + dst_sg_idx = sec4_sg_ents;
9628 + if (mapped_dst_nents == 1 &&
9629 + iv_dma + ivsize == sg_dma_address(req->dst)) {
9630 + out_contig = true;
9631 + } else {
9632 + out_contig = false;
9633 + sec4_sg_ents += 1 + mapped_dst_nents;
9634 + }
9635
9636 /* allocate space for base edesc and hw desc commands, link tables */
9637 + sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9638 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9639 GFP_DMA | flags);
9640 if (!edesc) {
9641 dev_err(jrdev, "could not allocate extended descriptor\n");
9642 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9643 + iv_dma, ivsize, 0, 0);
9644 return ERR_PTR(-ENOMEM);
9645 }
9646
9647 @@ -2849,24 +1756,24 @@ static struct ablkcipher_edesc *ablkciph
9648 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9649 desc_bytes;
9650
9651 - sec4_sg_index = 0;
9652 - if (src_nents) {
9653 - sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
9654 - sec4_sg_index += src_nents;
9655 - }
9656 + if (mapped_src_nents > 1)
9657 + sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
9658 + 0);
9659
9660 - if (!iv_contig) {
9661 - dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
9662 + if (!out_contig) {
9663 + dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
9664 iv_dma, ivsize, 0);
9665 - sec4_sg_index += 1;
9666 - sg_to_sec4_sg_last(req->dst, dst_nents,
9667 - edesc->sec4_sg + sec4_sg_index, 0);
9668 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9669 + edesc->sec4_sg + dst_sg_idx + 1, 0);
9670 }
9671
9672 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9673 sec4_sg_bytes, DMA_TO_DEVICE);
9674 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9675 dev_err(jrdev, "unable to map S/G table\n");
9676 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9677 + iv_dma, ivsize, 0, 0);
9678 + kfree(edesc);
9679 return ERR_PTR(-ENOMEM);
9680 }
9681 edesc->iv_dma = iv_dma;
9682 @@ -2878,7 +1785,7 @@ static struct ablkcipher_edesc *ablkciph
9683 sec4_sg_bytes, 1);
9684 #endif
9685
9686 - *iv_contig_out = iv_contig;
9687 + *iv_contig_out = out_contig;
9688 return edesc;
9689 }
9690
9691 @@ -2889,7 +1796,7 @@ static int ablkcipher_givencrypt(struct
9692 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9693 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9694 struct device *jrdev = ctx->jrdev;
9695 - bool iv_contig;
9696 + bool iv_contig = false;
9697 u32 *desc;
9698 int ret = 0;
9699
9700 @@ -2933,7 +1840,6 @@ struct caam_alg_template {
9701 } template_u;
9702 u32 class1_alg_type;
9703 u32 class2_alg_type;
9704 - u32 alg_op;
9705 };
9706
9707 static struct caam_alg_template driver_algs[] = {
9708 @@ -3118,7 +2024,6 @@ static struct caam_aead_alg driver_aeads
9709 .caam = {
9710 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9711 OP_ALG_AAI_HMAC_PRECOMP,
9712 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9713 },
9714 },
9715 {
9716 @@ -3140,7 +2045,6 @@ static struct caam_aead_alg driver_aeads
9717 .caam = {
9718 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9719 OP_ALG_AAI_HMAC_PRECOMP,
9720 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9721 },
9722 },
9723 {
9724 @@ -3162,7 +2066,6 @@ static struct caam_aead_alg driver_aeads
9725 .caam = {
9726 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9727 OP_ALG_AAI_HMAC_PRECOMP,
9728 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9729 },
9730 },
9731 {
9732 @@ -3184,7 +2087,6 @@ static struct caam_aead_alg driver_aeads
9733 .caam = {
9734 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9735 OP_ALG_AAI_HMAC_PRECOMP,
9736 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9737 },
9738 },
9739 {
9740 @@ -3206,7 +2108,6 @@ static struct caam_aead_alg driver_aeads
9741 .caam = {
9742 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9743 OP_ALG_AAI_HMAC_PRECOMP,
9744 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9745 },
9746 },
9747 {
9748 @@ -3228,7 +2129,6 @@ static struct caam_aead_alg driver_aeads
9749 .caam = {
9750 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9751 OP_ALG_AAI_HMAC_PRECOMP,
9752 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9753 },
9754 },
9755 {
9756 @@ -3250,7 +2150,6 @@ static struct caam_aead_alg driver_aeads
9757 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9758 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9759 OP_ALG_AAI_HMAC_PRECOMP,
9760 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9761 },
9762 },
9763 {
9764 @@ -3273,7 +2172,6 @@ static struct caam_aead_alg driver_aeads
9765 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9766 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9767 OP_ALG_AAI_HMAC_PRECOMP,
9768 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9769 .geniv = true,
9770 },
9771 },
9772 @@ -3296,7 +2194,6 @@ static struct caam_aead_alg driver_aeads
9773 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9774 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9775 OP_ALG_AAI_HMAC_PRECOMP,
9776 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9777 },
9778 },
9779 {
9780 @@ -3319,7 +2216,6 @@ static struct caam_aead_alg driver_aeads
9781 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9782 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9783 OP_ALG_AAI_HMAC_PRECOMP,
9784 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9785 .geniv = true,
9786 },
9787 },
9788 @@ -3342,7 +2238,6 @@ static struct caam_aead_alg driver_aeads
9789 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9790 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9791 OP_ALG_AAI_HMAC_PRECOMP,
9792 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9793 },
9794 },
9795 {
9796 @@ -3365,7 +2260,6 @@ static struct caam_aead_alg driver_aeads
9797 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9798 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9799 OP_ALG_AAI_HMAC_PRECOMP,
9800 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9801 .geniv = true,
9802 },
9803 },
9804 @@ -3388,7 +2282,6 @@ static struct caam_aead_alg driver_aeads
9805 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9806 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9807 OP_ALG_AAI_HMAC_PRECOMP,
9808 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9809 },
9810 },
9811 {
9812 @@ -3411,7 +2304,6 @@ static struct caam_aead_alg driver_aeads
9813 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9814 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9815 OP_ALG_AAI_HMAC_PRECOMP,
9816 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9817 .geniv = true,
9818 },
9819 },
9820 @@ -3434,7 +2326,6 @@ static struct caam_aead_alg driver_aeads
9821 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9822 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9823 OP_ALG_AAI_HMAC_PRECOMP,
9824 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9825 },
9826 },
9827 {
9828 @@ -3457,7 +2348,6 @@ static struct caam_aead_alg driver_aeads
9829 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9830 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9831 OP_ALG_AAI_HMAC_PRECOMP,
9832 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9833 .geniv = true,
9834 },
9835 },
9836 @@ -3480,7 +2370,6 @@ static struct caam_aead_alg driver_aeads
9837 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9838 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9839 OP_ALG_AAI_HMAC_PRECOMP,
9840 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9841 },
9842 },
9843 {
9844 @@ -3503,7 +2392,6 @@ static struct caam_aead_alg driver_aeads
9845 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9846 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9847 OP_ALG_AAI_HMAC_PRECOMP,
9848 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9849 .geniv = true,
9850 },
9851 },
9852 @@ -3526,7 +2414,6 @@ static struct caam_aead_alg driver_aeads
9853 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9854 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9855 OP_ALG_AAI_HMAC_PRECOMP,
9856 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9857 }
9858 },
9859 {
9860 @@ -3549,7 +2436,6 @@ static struct caam_aead_alg driver_aeads
9861 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9862 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9863 OP_ALG_AAI_HMAC_PRECOMP,
9864 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9865 .geniv = true,
9866 }
9867 },
9868 @@ -3573,7 +2459,6 @@ static struct caam_aead_alg driver_aeads
9869 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9870 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9871 OP_ALG_AAI_HMAC_PRECOMP,
9872 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9873 },
9874 },
9875 {
9876 @@ -3597,7 +2482,6 @@ static struct caam_aead_alg driver_aeads
9877 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9878 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9879 OP_ALG_AAI_HMAC_PRECOMP,
9880 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9881 .geniv = true,
9882 },
9883 },
9884 @@ -3621,7 +2505,6 @@ static struct caam_aead_alg driver_aeads
9885 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9886 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9887 OP_ALG_AAI_HMAC_PRECOMP,
9888 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9889 },
9890 },
9891 {
9892 @@ -3645,7 +2528,6 @@ static struct caam_aead_alg driver_aeads
9893 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9894 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9895 OP_ALG_AAI_HMAC_PRECOMP,
9896 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9897 .geniv = true,
9898 },
9899 },
9900 @@ -3669,7 +2551,6 @@ static struct caam_aead_alg driver_aeads
9901 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9902 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9903 OP_ALG_AAI_HMAC_PRECOMP,
9904 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9905 },
9906 },
9907 {
9908 @@ -3693,7 +2574,6 @@ static struct caam_aead_alg driver_aeads
9909 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9910 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9911 OP_ALG_AAI_HMAC_PRECOMP,
9912 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9913 .geniv = true,
9914 },
9915 },
9916 @@ -3717,7 +2597,6 @@ static struct caam_aead_alg driver_aeads
9917 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9918 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9919 OP_ALG_AAI_HMAC_PRECOMP,
9920 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9921 },
9922 },
9923 {
9924 @@ -3741,7 +2620,6 @@ static struct caam_aead_alg driver_aeads
9925 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9926 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9927 OP_ALG_AAI_HMAC_PRECOMP,
9928 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9929 .geniv = true,
9930 },
9931 },
9932 @@ -3765,7 +2643,6 @@ static struct caam_aead_alg driver_aeads
9933 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9934 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9935 OP_ALG_AAI_HMAC_PRECOMP,
9936 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9937 },
9938 },
9939 {
9940 @@ -3789,7 +2666,6 @@ static struct caam_aead_alg driver_aeads
9941 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9942 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9943 OP_ALG_AAI_HMAC_PRECOMP,
9944 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9945 .geniv = true,
9946 },
9947 },
9948 @@ -3812,7 +2688,6 @@ static struct caam_aead_alg driver_aeads
9949 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9950 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9951 OP_ALG_AAI_HMAC_PRECOMP,
9952 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9953 },
9954 },
9955 {
9956 @@ -3835,7 +2710,6 @@ static struct caam_aead_alg driver_aeads
9957 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9958 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9959 OP_ALG_AAI_HMAC_PRECOMP,
9960 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9961 .geniv = true,
9962 },
9963 },
9964 @@ -3858,7 +2732,6 @@ static struct caam_aead_alg driver_aeads
9965 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9966 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9967 OP_ALG_AAI_HMAC_PRECOMP,
9968 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9969 },
9970 },
9971 {
9972 @@ -3881,7 +2754,6 @@ static struct caam_aead_alg driver_aeads
9973 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9974 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9975 OP_ALG_AAI_HMAC_PRECOMP,
9976 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9977 .geniv = true,
9978 },
9979 },
9980 @@ -3904,7 +2776,6 @@ static struct caam_aead_alg driver_aeads
9981 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9982 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9983 OP_ALG_AAI_HMAC_PRECOMP,
9984 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9985 },
9986 },
9987 {
9988 @@ -3927,7 +2798,6 @@ static struct caam_aead_alg driver_aeads
9989 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9990 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9991 OP_ALG_AAI_HMAC_PRECOMP,
9992 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9993 .geniv = true,
9994 },
9995 },
9996 @@ -3950,7 +2820,6 @@ static struct caam_aead_alg driver_aeads
9997 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9998 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9999 OP_ALG_AAI_HMAC_PRECOMP,
10000 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10001 },
10002 },
10003 {
10004 @@ -3973,7 +2842,6 @@ static struct caam_aead_alg driver_aeads
10005 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10006 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10007 OP_ALG_AAI_HMAC_PRECOMP,
10008 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10009 .geniv = true,
10010 },
10011 },
10012 @@ -3996,7 +2864,6 @@ static struct caam_aead_alg driver_aeads
10013 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10014 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10015 OP_ALG_AAI_HMAC_PRECOMP,
10016 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10017 },
10018 },
10019 {
10020 @@ -4019,7 +2886,6 @@ static struct caam_aead_alg driver_aeads
10021 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10022 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10023 OP_ALG_AAI_HMAC_PRECOMP,
10024 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10025 .geniv = true,
10026 },
10027 },
10028 @@ -4042,7 +2908,6 @@ static struct caam_aead_alg driver_aeads
10029 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10030 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10031 OP_ALG_AAI_HMAC_PRECOMP,
10032 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10033 },
10034 },
10035 {
10036 @@ -4065,7 +2930,6 @@ static struct caam_aead_alg driver_aeads
10037 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10038 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10039 OP_ALG_AAI_HMAC_PRECOMP,
10040 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10041 .geniv = true,
10042 },
10043 },
10044 @@ -4090,7 +2954,6 @@ static struct caam_aead_alg driver_aeads
10045 OP_ALG_AAI_CTR_MOD128,
10046 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10047 OP_ALG_AAI_HMAC_PRECOMP,
10048 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10049 .rfc3686 = true,
10050 },
10051 },
10052 @@ -4115,7 +2978,6 @@ static struct caam_aead_alg driver_aeads
10053 OP_ALG_AAI_CTR_MOD128,
10054 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10055 OP_ALG_AAI_HMAC_PRECOMP,
10056 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10057 .rfc3686 = true,
10058 .geniv = true,
10059 },
10060 @@ -4141,7 +3003,6 @@ static struct caam_aead_alg driver_aeads
10061 OP_ALG_AAI_CTR_MOD128,
10062 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10063 OP_ALG_AAI_HMAC_PRECOMP,
10064 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10065 .rfc3686 = true,
10066 },
10067 },
10068 @@ -4166,7 +3027,6 @@ static struct caam_aead_alg driver_aeads
10069 OP_ALG_AAI_CTR_MOD128,
10070 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10071 OP_ALG_AAI_HMAC_PRECOMP,
10072 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10073 .rfc3686 = true,
10074 .geniv = true,
10075 },
10076 @@ -4192,7 +3052,6 @@ static struct caam_aead_alg driver_aeads
10077 OP_ALG_AAI_CTR_MOD128,
10078 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10079 OP_ALG_AAI_HMAC_PRECOMP,
10080 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10081 .rfc3686 = true,
10082 },
10083 },
10084 @@ -4217,7 +3076,6 @@ static struct caam_aead_alg driver_aeads
10085 OP_ALG_AAI_CTR_MOD128,
10086 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10087 OP_ALG_AAI_HMAC_PRECOMP,
10088 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10089 .rfc3686 = true,
10090 .geniv = true,
10091 },
10092 @@ -4243,7 +3101,6 @@ static struct caam_aead_alg driver_aeads
10093 OP_ALG_AAI_CTR_MOD128,
10094 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10095 OP_ALG_AAI_HMAC_PRECOMP,
10096 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10097 .rfc3686 = true,
10098 },
10099 },
10100 @@ -4268,7 +3125,6 @@ static struct caam_aead_alg driver_aeads
10101 OP_ALG_AAI_CTR_MOD128,
10102 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10103 OP_ALG_AAI_HMAC_PRECOMP,
10104 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10105 .rfc3686 = true,
10106 .geniv = true,
10107 },
10108 @@ -4294,7 +3150,6 @@ static struct caam_aead_alg driver_aeads
10109 OP_ALG_AAI_CTR_MOD128,
10110 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10111 OP_ALG_AAI_HMAC_PRECOMP,
10112 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10113 .rfc3686 = true,
10114 },
10115 },
10116 @@ -4319,7 +3174,6 @@ static struct caam_aead_alg driver_aeads
10117 OP_ALG_AAI_CTR_MOD128,
10118 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10119 OP_ALG_AAI_HMAC_PRECOMP,
10120 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10121 .rfc3686 = true,
10122 .geniv = true,
10123 },
10124 @@ -4345,7 +3199,6 @@ static struct caam_aead_alg driver_aeads
10125 OP_ALG_AAI_CTR_MOD128,
10126 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10127 OP_ALG_AAI_HMAC_PRECOMP,
10128 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10129 .rfc3686 = true,
10130 },
10131 },
10132 @@ -4370,7 +3223,6 @@ static struct caam_aead_alg driver_aeads
10133 OP_ALG_AAI_CTR_MOD128,
10134 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10135 OP_ALG_AAI_HMAC_PRECOMP,
10136 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10137 .rfc3686 = true,
10138 .geniv = true,
10139 },
10140 @@ -4385,16 +3237,34 @@ struct caam_crypto_alg {
10141
10142 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
10143 {
10144 + dma_addr_t dma_addr;
10145 +
10146 ctx->jrdev = caam_jr_alloc();
10147 if (IS_ERR(ctx->jrdev)) {
10148 pr_err("Job Ring Device allocation for transform failed\n");
10149 return PTR_ERR(ctx->jrdev);
10150 }
10151
10152 + dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
10153 + offsetof(struct caam_ctx,
10154 + sh_desc_enc_dma),
10155 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10156 + if (dma_mapping_error(ctx->jrdev, dma_addr)) {
10157 + dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
10158 + caam_jr_free(ctx->jrdev);
10159 + return -ENOMEM;
10160 + }
10161 +
10162 + ctx->sh_desc_enc_dma = dma_addr;
10163 + ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
10164 + sh_desc_dec);
10165 + ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
10166 + sh_desc_givenc);
10167 + ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
10168 +
10169 /* copy descriptor header template value */
10170 - ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10171 - ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10172 - ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
10173 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10174 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10175
10176 return 0;
10177 }
10178 @@ -4421,25 +3291,9 @@ static int caam_aead_init(struct crypto_
10179
10180 static void caam_exit_common(struct caam_ctx *ctx)
10181 {
10182 - if (ctx->sh_desc_enc_dma &&
10183 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
10184 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
10185 - desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
10186 - if (ctx->sh_desc_dec_dma &&
10187 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
10188 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
10189 - desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
10190 - if (ctx->sh_desc_givenc_dma &&
10191 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
10192 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
10193 - desc_bytes(ctx->sh_desc_givenc),
10194 - DMA_TO_DEVICE);
10195 - if (ctx->key_dma &&
10196 - !dma_mapping_error(ctx->jrdev, ctx->key_dma))
10197 - dma_unmap_single(ctx->jrdev, ctx->key_dma,
10198 - ctx->enckeylen + ctx->split_key_pad_len,
10199 - DMA_TO_DEVICE);
10200 -
10201 + dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
10202 + offsetof(struct caam_ctx, sh_desc_enc_dma),
10203 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10204 caam_jr_free(ctx->jrdev);
10205 }
10206
10207 @@ -4515,7 +3369,6 @@ static struct caam_crypto_alg *caam_alg_
10208
10209 t_alg->caam.class1_alg_type = template->class1_alg_type;
10210 t_alg->caam.class2_alg_type = template->class2_alg_type;
10211 - t_alg->caam.alg_op = template->alg_op;
10212
10213 return t_alg;
10214 }
10215 --- /dev/null
10216 +++ b/drivers/crypto/caam/caamalg_desc.c
10217 @@ -0,0 +1,1913 @@
10218 +/*
10219 + * Shared descriptors for aead, ablkcipher algorithms
10220 + *
10221 + * Copyright 2016 NXP
10222 + */
10223 +
10224 +#include "compat.h"
10225 +#include "desc_constr.h"
10226 +#include "caamalg_desc.h"
10227 +
10228 +/*
10229 + * For aead functions, read payload and write payload,
10230 + * both of which are specified in req->src and req->dst
10231 + */
10232 +static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
10233 +{
10234 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10235 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
10236 + KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
10237 +}
10238 +
10239 +/* Set DK bit in class 1 operation if shared */
10240 +static inline void append_dec_op1(u32 *desc, u32 type)
10241 +{
10242 + u32 *jump_cmd, *uncond_jump_cmd;
10243 +
10244 + /* DK bit is valid only for AES */
10245 + if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
10246 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10247 + OP_ALG_DECRYPT);
10248 + return;
10249 + }
10250 +
10251 + jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
10252 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10253 + OP_ALG_DECRYPT);
10254 + uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10255 + set_jump_tgt_here(desc, jump_cmd);
10256 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10257 + OP_ALG_DECRYPT | OP_ALG_AAI_DK);
10258 + set_jump_tgt_here(desc, uncond_jump_cmd);
10259 +}
10260 +
10261 +/**
10262 + * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
10263 + * (non-protocol) with no (null) encryption.
10264 + * @desc: pointer to buffer used for descriptor construction
10265 + * @adata: pointer to authentication transform definitions. Note that since a
10266 + * split key is to be used, the size of the split key itself is
10267 + * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10268 + * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10269 + * @icvsize: integrity check value (ICV) size (truncated or full)
10270 + *
10271 + * Note: Requires an MDHA split key.
10272 + */
10273 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
10274 + unsigned int icvsize)
10275 +{
10276 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
10277 +
10278 + init_sh_desc(desc, HDR_SHARE_SERIAL);
10279 +
10280 + /* Skip if already shared */
10281 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10282 + JUMP_COND_SHRD);
10283 + if (adata->key_inline)
10284 + append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10285 + adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
10286 + KEY_ENC);
10287 + else
10288 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10289 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10290 + set_jump_tgt_here(desc, key_jump_cmd);
10291 +
10292 + /* assoclen + cryptlen = seqinlen */
10293 + append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
10294 +
10295 + /* Prepare to read and write cryptlen + assoclen bytes */
10296 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10297 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10298 +
10299 + /*
10300 + * MOVE_LEN opcode is not available in all SEC HW revisions,
10301 + * thus need to do some magic, i.e. self-patch the descriptor
10302 + * buffer.
10303 + */
10304 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10305 + MOVE_DEST_MATH3 |
10306 + (0x6 << MOVE_LEN_SHIFT));
10307 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
10308 + MOVE_DEST_DESCBUF |
10309 + MOVE_WAITCOMP |
10310 + (0x8 << MOVE_LEN_SHIFT));
10311 +
10312 + /* Class 2 operation */
10313 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10314 + OP_ALG_ENCRYPT);
10315 +
10316 + /* Read and write cryptlen bytes */
10317 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10318 +
10319 + set_move_tgt_here(desc, read_move_cmd);
10320 + set_move_tgt_here(desc, write_move_cmd);
10321 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10322 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10323 + MOVE_AUX_LS);
10324 +
10325 + /* Write ICV */
10326 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10327 + LDST_SRCDST_BYTE_CONTEXT);
10328 +
10329 +#ifdef DEBUG
10330 + print_hex_dump(KERN_ERR,
10331 + "aead null enc shdesc@" __stringify(__LINE__)": ",
10332 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10333 +#endif
10334 +}
10335 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
10336 +
10337 +/**
10338 + * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
10339 + * (non-protocol) with no (null) decryption.
10340 + * @desc: pointer to buffer used for descriptor construction
10341 + * @adata: pointer to authentication transform definitions. Note that since a
10342 + * split key is to be used, the size of the split key itself is
10343 + * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10344 + * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10345 + * @icvsize: integrity check value (ICV) size (truncated or full)
10346 + *
10347 + * Note: Requires an MDHA split key.
10348 + */
10349 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
10350 + unsigned int icvsize)
10351 +{
10352 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
10353 +
10354 + init_sh_desc(desc, HDR_SHARE_SERIAL);
10355 +
10356 + /* Skip if already shared */
10357 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10358 + JUMP_COND_SHRD);
10359 + if (adata->key_inline)
10360 + append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10361 + adata->keylen, CLASS_2 |
10362 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10363 + else
10364 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10365 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10366 + set_jump_tgt_here(desc, key_jump_cmd);
10367 +
10368 + /* Class 2 operation */
10369 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10370 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10371 +
10372 + /* assoclen + cryptlen = seqoutlen */
10373 + append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10374 +
10375 + /* Prepare to read and write cryptlen + assoclen bytes */
10376 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
10377 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
10378 +
10379 + /*
10380 + * MOVE_LEN opcode is not available in all SEC HW revisions,
10381 + * thus need to do some magic, i.e. self-patch the descriptor
10382 + * buffer.
10383 + */
10384 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10385 + MOVE_DEST_MATH2 |
10386 + (0x6 << MOVE_LEN_SHIFT));
10387 + write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
10388 + MOVE_DEST_DESCBUF |
10389 + MOVE_WAITCOMP |
10390 + (0x8 << MOVE_LEN_SHIFT));
10391 +
10392 + /* Read and write cryptlen bytes */
10393 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10394 +
10395 + /*
10396 + * Insert a NOP here, since we need at least 4 instructions between
10397 + * code patching the descriptor buffer and the location being patched.
10398 + */
10399 + jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10400 + set_jump_tgt_here(desc, jump_cmd);
10401 +
10402 + set_move_tgt_here(desc, read_move_cmd);
10403 + set_move_tgt_here(desc, write_move_cmd);
10404 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10405 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10406 + MOVE_AUX_LS);
10407 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10408 +
10409 + /* Load ICV */
10410 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10411 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10412 +
10413 +#ifdef DEBUG
10414 + print_hex_dump(KERN_ERR,
10415 + "aead null dec shdesc@" __stringify(__LINE__)": ",
10416 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10417 +#endif
10418 +}
10419 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
10420 +
10421 +static void init_sh_desc_key_aead(u32 * const desc,
10422 + struct alginfo * const cdata,
10423 + struct alginfo * const adata,
10424 + const bool is_rfc3686, u32 *nonce)
10425 +{
10426 + u32 *key_jump_cmd;
10427 + unsigned int enckeylen = cdata->keylen;
10428 +
10429 + /* Note: Context registers are saved. */
10430 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
10431 +
10432 + /* Skip if already shared */
10433 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10434 + JUMP_COND_SHRD);
10435 +
10436 + /*
10437 + * RFC3686 specific:
10438 + * | key = {AUTH_KEY, ENC_KEY, NONCE}
10439 + * | enckeylen = encryption key size + nonce size
10440 + */
10441 + if (is_rfc3686)
10442 + enckeylen -= CTR_RFC3686_NONCE_SIZE;
10443 +
10444 + if (adata->key_inline)
10445 + append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10446 + adata->keylen, CLASS_2 |
10447 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10448 + else
10449 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10450 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10451 +
10452 + if (cdata->key_inline)
10453 + append_key_as_imm(desc, cdata->key_virt, enckeylen,
10454 + enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
10455 + else
10456 + append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
10457 + KEY_DEST_CLASS_REG);
10458 +
10459 + /* Load Counter into CONTEXT1 reg */
10460 + if (is_rfc3686) {
10461 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
10462 + LDST_CLASS_IND_CCB |
10463 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
10464 + append_move(desc,
10465 + MOVE_SRC_OUTFIFO |
10466 + MOVE_DEST_CLASS1CTX |
10467 + (16 << MOVE_OFFSET_SHIFT) |
10468 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
10469 + }
10470 +
10471 + set_jump_tgt_here(desc, key_jump_cmd);
10472 +}
10473 +
10474 +/**
10475 + * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
10476 + * (non-protocol).
10477 + * @desc: pointer to buffer used for descriptor construction
10478 + * @cdata: pointer to block cipher transform definitions
10479 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10480 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10481 + * @adata: pointer to authentication transform definitions. Note that since a
10482 + * split key is to be used, the size of the split key itself is
10483 + * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10484 + * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10485 + * @ivsize: initialization vector size
10486 + * @icvsize: integrity check value (ICV) size (truncated or full)
10487 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10488 + * @nonce: pointer to rfc3686 nonce
10489 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10490 + * @is_qi: true when called from caam/qi
10491 + *
10492 + * Note: Requires an MDHA split key.
10493 + */
10494 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
10495 + struct alginfo *adata, unsigned int ivsize,
10496 + unsigned int icvsize, const bool is_rfc3686,
10497 + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
10498 +{
10499 + /* Note: Context registers are saved. */
10500 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
10501 +
10502 + /* Class 2 operation */
10503 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10504 + OP_ALG_ENCRYPT);
10505 +
10506 + if (is_qi) {
10507 + u32 *wait_load_cmd;
10508 +
10509 + /* REG3 = assoclen */
10510 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10511 + LDST_SRCDST_WORD_DECO_MATH3 |
10512 + (4 << LDST_OFFSET_SHIFT));
10513 +
10514 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10515 + JUMP_COND_CALM | JUMP_COND_NCP |
10516 + JUMP_COND_NOP | JUMP_COND_NIP |
10517 + JUMP_COND_NIFP);
10518 + set_jump_tgt_here(desc, wait_load_cmd);
10519 +
10520 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10521 + LDST_SRCDST_BYTE_CONTEXT |
10522 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10523 + }
10524 +
10525 + /* Read and write assoclen bytes */
10526 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10527 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10528 +
10529 + /* Skip assoc data */
10530 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10531 +
10532 + /* read assoc before reading payload */
10533 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10534 + FIFOLDST_VLF);
10535 +
10536 + /* Load Counter into CONTEXT1 reg */
10537 + if (is_rfc3686)
10538 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10539 + LDST_SRCDST_BYTE_CONTEXT |
10540 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10541 + LDST_OFFSET_SHIFT));
10542 +
10543 + /* Class 1 operation */
10544 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10545 + OP_ALG_ENCRYPT);
10546 +
10547 + /* Read and write cryptlen bytes */
10548 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10549 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10550 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
10551 +
10552 + /* Write ICV */
10553 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10554 + LDST_SRCDST_BYTE_CONTEXT);
10555 +
10556 +#ifdef DEBUG
10557 + print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
10558 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10559 +#endif
10560 +}
10561 +EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
10562 +
10563 +/**
10564 + * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
10565 + * (non-protocol).
10566 + * @desc: pointer to buffer used for descriptor construction
10567 + * @cdata: pointer to block cipher transform definitions
10568 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10569 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10570 + * @adata: pointer to authentication transform definitions. Note that since a
10571 + * split key is to be used, the size of the split key itself is
10572 + * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10573 + * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10574 + * @ivsize: initialization vector size
10575 + * @icvsize: integrity check value (ICV) size (truncated or full)
10576 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10577 + * @nonce: pointer to rfc3686 nonce
10578 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10579 + * @is_qi: true when called from caam/qi
10580 + *
10581 + * Note: Requires an MDHA split key.
10582 + */
10583 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
10584 + struct alginfo *adata, unsigned int ivsize,
10585 + unsigned int icvsize, const bool geniv,
10586 + const bool is_rfc3686, u32 *nonce,
10587 + const u32 ctx1_iv_off, const bool is_qi)
10588 +{
10589 + /* Note: Context registers are saved. */
10590 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
10591 +
10592 + /* Class 2 operation */
10593 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10594 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10595 +
10596 + if (is_qi) {
10597 + u32 *wait_load_cmd;
10598 +
10599 + /* REG3 = assoclen */
10600 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10601 + LDST_SRCDST_WORD_DECO_MATH3 |
10602 + (4 << LDST_OFFSET_SHIFT));
10603 +
10604 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10605 + JUMP_COND_CALM | JUMP_COND_NCP |
10606 + JUMP_COND_NOP | JUMP_COND_NIP |
10607 + JUMP_COND_NIFP);
10608 + set_jump_tgt_here(desc, wait_load_cmd);
10609 +
10610 + if (!geniv)
10611 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10612 + LDST_SRCDST_BYTE_CONTEXT |
10613 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10614 + }
10615 +
10616 + /* Read and write assoclen bytes */
10617 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10618 + if (geniv)
10619 + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
10620 + else
10621 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10622 +
10623 + /* Skip assoc data */
10624 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10625 +
10626 + /* read assoc before reading payload */
10627 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10628 + KEY_VLF);
10629 +
10630 + if (geniv) {
10631 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10632 + LDST_SRCDST_BYTE_CONTEXT |
10633 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10634 + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
10635 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
10636 + }
10637 +
10638 + /* Load Counter into CONTEXT1 reg */
10639 + if (is_rfc3686)
10640 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10641 + LDST_SRCDST_BYTE_CONTEXT |
10642 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10643 + LDST_OFFSET_SHIFT));
10644 +
10645 + /* Choose operation */
10646 + if (ctx1_iv_off)
10647 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10648 + OP_ALG_DECRYPT);
10649 + else
10650 + append_dec_op1(desc, cdata->algtype);
10651 +
10652 + /* Read and write cryptlen bytes */
10653 + append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10654 + append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10655 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
10656 +
10657 + /* Load ICV */
10658 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10659 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10660 +
10661 +#ifdef DEBUG
10662 + print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
10663 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10664 +#endif
10665 +}
10666 +EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
10667 +
10668 +/**
10669 + * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
10670 + * (non-protocol) with HW-generated initialization
10671 + * vector.
10672 + * @desc: pointer to buffer used for descriptor construction
10673 + * @cdata: pointer to block cipher transform definitions
10674 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10675 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10676 + * @adata: pointer to authentication transform definitions. Note that since a
10677 + * split key is to be used, the size of the split key itself is
10678 + * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10679 + * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10680 + * @ivsize: initialization vector size
10681 + * @icvsize: integrity check value (ICV) size (truncated or full)
10682 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10683 + * @nonce: pointer to rfc3686 nonce
10684 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10685 + * @is_qi: true when called from caam/qi
10686 + *
10687 + * Note: Requires an MDHA split key.
10688 + */
10689 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
10690 + struct alginfo *adata, unsigned int ivsize,
10691 + unsigned int icvsize, const bool is_rfc3686,
10692 + u32 *nonce, const u32 ctx1_iv_off,
10693 + const bool is_qi)
10694 +{
10695 + u32 geniv, moveiv;
10696 +
10697 + /* Note: Context registers are saved. */
10698 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
10699 +
10700 + if (is_qi) {
10701 + u32 *wait_load_cmd;
10702 +
10703 + /* REG3 = assoclen */
10704 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10705 + LDST_SRCDST_WORD_DECO_MATH3 |
10706 + (4 << LDST_OFFSET_SHIFT));
10707 +
10708 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10709 + JUMP_COND_CALM | JUMP_COND_NCP |
10710 + JUMP_COND_NOP | JUMP_COND_NIP |
10711 + JUMP_COND_NIFP);
10712 + set_jump_tgt_here(desc, wait_load_cmd);
10713 + }
10714 +
10715 + if (is_rfc3686) {
10716 + if (is_qi)
10717 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10718 + LDST_SRCDST_BYTE_CONTEXT |
10719 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10720 +
10721 + goto copy_iv;
10722 + }
10723 +
10724 + /* Generate IV */
10725 + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
10726 + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
10727 + NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10728 + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
10729 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10730 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10731 + append_move(desc, MOVE_WAITCOMP |
10732 + MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
10733 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10734 + (ivsize << MOVE_LEN_SHIFT));
10735 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10736 +
10737 +copy_iv:
10738 + /* Copy IV to class 1 context */
10739 + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
10740 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10741 + (ivsize << MOVE_LEN_SHIFT));
10742 +
10743 + /* Return to encryption */
10744 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10745 + OP_ALG_ENCRYPT);
10746 +
10747 + /* Read and write assoclen bytes */
10748 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10749 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10750 +
10751 + /* Skip assoc data */
10752 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10753 +
10754 + /* read assoc before reading payload */
10755 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10756 + KEY_VLF);
10757 +
10758 + /* Copy iv from outfifo to class 2 fifo */
10759 + moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
10760 + NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10761 + append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
10762 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10763 + append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
10764 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
10765 +
10766 + /* Load Counter into CONTEXT1 reg */
10767 + if (is_rfc3686)
10768 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10769 + LDST_SRCDST_BYTE_CONTEXT |
10770 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10771 + LDST_OFFSET_SHIFT));
10772 +
10773 + /* Class 1 operation */
10774 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10775 + OP_ALG_ENCRYPT);
10776 +
10777 + /* Will write ivsize + cryptlen */
10778 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10779 +
10780 + /* Not need to reload iv */
10781 + append_seq_fifo_load(desc, ivsize,
10782 + FIFOLD_CLASS_SKIP);
10783 +
10784 + /* Will read cryptlen */
10785 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10786 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
10787 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
10788 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10789 +
10790 + /* Write ICV */
10791 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10792 + LDST_SRCDST_BYTE_CONTEXT);
10793 +
10794 +#ifdef DEBUG
10795 + print_hex_dump(KERN_ERR,
10796 + "aead givenc shdesc@" __stringify(__LINE__)": ",
10797 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10798 +#endif
10799 +}
10800 +EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
10801 +
10802 +/**
10803 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
10804 + * @desc: pointer to buffer used for descriptor construction
10805 + * @cdata: pointer to block cipher transform definitions
10806 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
10807 + * with OP_ALG_AAI_CBC
10808 + * @adata: pointer to authentication transform definitions. Note that since a
10809 + * split key is to be used, the size of the split key itself is
10810 + * specified. Valid algorithm values OP_ALG_ALGSEL_SHA1 ANDed with
10811 + * OP_ALG_AAI_HMAC_PRECOMP.
10812 + * @assoclen: associated data length
10813 + * @ivsize: initialization vector size
10814 + * @authsize: authentication data size
10815 + * @blocksize: block cipher size
10816 + */
10817 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
10818 + struct alginfo *adata, unsigned int assoclen,
10819 + unsigned int ivsize, unsigned int authsize,
10820 + unsigned int blocksize)
10821 +{
10822 + u32 *key_jump_cmd, *zero_payload_jump_cmd;
10823 + u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
10824 +
10825 + /*
10826 + * Compute the index (in bytes) for the LOAD with destination of
10827 + * Class 1 Data Size Register and for the LOAD that generates padding
10828 + */
10829 + if (adata->key_inline) {
10830 + idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10831 + cdata->keylen - 4 * CAAM_CMD_SZ;
10832 + idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10833 + cdata->keylen - 2 * CAAM_CMD_SZ;
10834 + } else {
10835 + idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
10836 + 4 * CAAM_CMD_SZ;
10837 + idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
10838 + 2 * CAAM_CMD_SZ;
10839 + }
10840 +
10841 + stidx = 1 << HDR_START_IDX_SHIFT;
10842 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
10843 +
10844 + /* skip key loading if they are loaded due to sharing */
10845 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10846 + JUMP_COND_SHRD);
10847 +
10848 + if (adata->key_inline)
10849 + append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10850 + adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
10851 + KEY_ENC);
10852 + else
10853 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10854 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10855 +
10856 + if (cdata->key_inline)
10857 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
10858 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
10859 + else
10860 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
10861 + KEY_DEST_CLASS_REG);
10862 +
10863 + set_jump_tgt_here(desc, key_jump_cmd);
10864 +
10865 + /* class 2 operation */
10866 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10867 + OP_ALG_ENCRYPT);
10868 + /* class 1 operation */
10869 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10870 + OP_ALG_ENCRYPT);
10871 +
10872 + /* payloadlen = input data length - (assoclen + ivlen) */
10873 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
10874 +
10875 + /* math1 = payloadlen + icvlen */
10876 + append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
10877 +
10878 + /* padlen = block_size - math1 % block_size */
10879 + append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
10880 + append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
10881 +
10882 + /* cryptlen = payloadlen + icvlen + padlen */
10883 + append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
10884 +
10885 + /*
10886 + * update immediate data with the padding length value
10887 + * for the LOAD in the class 1 data size register.
10888 + */
10889 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
10890 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
10891 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
10892 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
10893 +
10894 + /* overwrite PL field for the padding iNFO FIFO entry */
10895 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
10896 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
10897 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
10898 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
10899 +
10900 + /* store encrypted payload, icv and padding */
10901 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
10902 +
10903 + /* if payload length is zero, jump to zero-payload commands */
10904 + append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
10905 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
10906 + JUMP_COND_MATH_Z);
10907 +
10908 + /* load iv in context1 */
10909 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
10910 + LDST_CLASS_1_CCB | ivsize);
10911 +
10912 + /* read assoc for authentication */
10913 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
10914 + FIFOLD_TYPE_MSG);
10915 + /* insnoop payload */
10916 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
10917 + FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
10918 +
10919 + /* jump the zero-payload commands */
10920 + append_jump(desc, JUMP_TEST_ALL | 3);
10921 +
10922 + /* zero-payload commands */
10923 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
10924 +
10925 + /* load iv in context1 */
10926 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
10927 + LDST_CLASS_1_CCB | ivsize);
10928 +
10929 + /* assoc data is the only data for authentication */
10930 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
10931 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
10932 +
10933 + /* send icv to encryption */
10934 + append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
10935 + authsize);
10936 +
10937 + /* update class 1 data size register with padding length */
10938 + append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
10939 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
10940 +
10941 + /* generate padding and send it to encryption */
10942 + genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
10943 + NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
10944 + append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
10945 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10946 +
10947 +#ifdef DEBUG
10948 + print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
10949 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
10950 + desc_bytes(desc), 1);
10951 +#endif
10952 +}
10953 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
10954 +
10955 +/**
10956 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
10957 + * @desc: pointer to buffer used for descriptor construction
10958 + * @cdata: pointer to block cipher transform definitions
10959 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
10960 + * with OP_ALG_AAI_CBC
10961 + * @adata: pointer to authentication transform definitions. Note that since a
10962 + * split key is to be used, the size of the split key itself is
10963 + * specified. Valid algorithm values OP_ALG_ALGSEL_ SHA1 ANDed with
10964 + * OP_ALG_AAI_HMAC_PRECOMP.
10965 + * @assoclen: associated data length
10966 + * @ivsize: initialization vector size
10967 + * @authsize: authentication data size
10968 + * @blocksize: block cipher size
10969 + */
10970 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
10971 + struct alginfo *adata, unsigned int assoclen,
10972 + unsigned int ivsize, unsigned int authsize,
10973 + unsigned int blocksize)
10974 +{
10975 + u32 stidx, jumpback;
10976 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
10977 + /*
10978 + * Pointer Size bool determines the size of address pointers.
10979 + * false - Pointers fit in one 32-bit word.
10980 + * true - Pointers fit in two 32-bit words.
10981 + */
10982 + static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
10983 +
10984 + stidx = 1 << HDR_START_IDX_SHIFT;
10985 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
10986 +
10987 + /* skip key loading if they are loaded due to sharing */
10988 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10989 + JUMP_COND_SHRD);
10990 +
10991 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10992 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10993 +
10994 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
10995 + KEY_DEST_CLASS_REG);
10996 +
10997 + set_jump_tgt_here(desc, key_jump_cmd);
10998 +
10999 + /* class 2 operation */
11000 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
11001 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11002 + /* class 1 operation */
11003 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11004 + OP_ALG_DECRYPT);
11005 +
11006 + /* VSIL = input data length - 2 * block_size */
11007 + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
11008 + blocksize);
11009 +
11010 + /*
11011 + * payloadlen + icvlen + padlen = input data length - (assoclen +
11012 + * ivsize)
11013 + */
11014 + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
11015 +
11016 + /* skip data to the last but one cipher block */
11017 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
11018 +
11019 + /* load iv for the last cipher block */
11020 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
11021 + LDST_CLASS_1_CCB | ivsize);
11022 +
11023 + /* read last cipher block */
11024 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11025 + FIFOLD_TYPE_LAST1 | blocksize);
11026 +
11027 + /* move decrypted block into math0 and math1 */
11028 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
11029 + blocksize);
11030 +
11031 + /* reset AES CHA */
11032 + append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
11033 + LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
11034 +
11035 + /* rewind input sequence */
11036 + append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
11037 +
11038 + /* key1 is in decryption form */
11039 + append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
11040 + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
11041 +
11042 + /* load iv in context1 */
11043 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
11044 + LDST_SRCDST_WORD_CLASS_CTX | ivsize);
11045 +
11046 + /* read sequence number */
11047 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
11048 + /* load Type, Version and Len fields in math0 */
11049 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
11050 + LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
11051 +
11052 + /* compute (padlen - 1) */
11053 + append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
11054 +
11055 + /* math2 = icvlen + (padlen - 1) + 1 */
11056 + append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
11057 +
11058 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11059 +
11060 + /* VSOL = payloadlen + icvlen + padlen */
11061 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
11062 +
11063 +#ifdef __LITTLE_ENDIAN
11064 + append_moveb(desc, MOVE_WAITCOMP |
11065 + MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
11066 +#endif
11067 + /* update Len field */
11068 + append_math_sub(desc, REG0, REG0, REG2, 8);
11069 +
11070 + /* store decrypted payload, icv and padding */
11071 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
11072 +
11073 + /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
11074 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11075 +
11076 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11077 + JUMP_COND_MATH_Z);
11078 +
11079 + /* send Type, Version and Len(pre ICV) fields to authentication */
11080 + append_move(desc, MOVE_WAITCOMP |
11081 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11082 + (3 << MOVE_OFFSET_SHIFT) | 5);
11083 +
11084 + /* outsnooping payload */
11085 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
11086 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
11087 + FIFOLDST_VLF);
11088 + skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
11089 +
11090 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11091 + /* send Type, Version and Len(pre ICV) fields to authentication */
11092 + append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
11093 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11094 + (3 << MOVE_OFFSET_SHIFT) | 5);
11095 +
11096 + set_jump_tgt_here(desc, skip_zero_jump_cmd);
11097 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
11098 +
11099 + /* load icvlen and padlen */
11100 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11101 + FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
11102 +
11103 + /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
11104 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11105 +
11106 + /*
11107 + * Start a new input sequence using the SEQ OUT PTR command options,
11108 + * pointer and length used when the current output sequence was defined.
11109 + */
11110 + if (ps) {
11111 + /*
11112 + * Move the lower 32 bits of Shared Descriptor address, the
11113 + * SEQ OUT PTR command, Output Pointer (2 words) and
11114 + * Output Length into math registers.
11115 + */
11116 +#ifdef __LITTLE_ENDIAN
11117 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11118 + MOVE_DEST_MATH0 | (55 * 4 << MOVE_OFFSET_SHIFT) |
11119 + 20);
11120 +#else
11121 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11122 + MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
11123 + 20);
11124 +#endif
11125 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11126 + append_math_and_imm_u32(desc, REG0, REG0, IMM,
11127 + ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
11128 + /* Append a JUMP command after the copied fields */
11129 + jumpback = CMD_JUMP | (char)-9;
11130 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11131 + LDST_SRCDST_WORD_DECO_MATH2 |
11132 + (4 << LDST_OFFSET_SHIFT));
11133 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11134 + /* Move the updated fields back to the Job Descriptor */
11135 +#ifdef __LITTLE_ENDIAN
11136 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11137 + MOVE_DEST_DESCBUF | (55 * 4 << MOVE_OFFSET_SHIFT) |
11138 + 24);
11139 +#else
11140 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11141 + MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
11142 + 24);
11143 +#endif
11144 + /*
11145 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
11146 + * and then jump back to the next command from the
11147 + * Shared Descriptor.
11148 + */
11149 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
11150 + } else {
11151 + /*
11152 + * Move the SEQ OUT PTR command, Output Pointer (1 word) and
11153 + * Output Length into math registers.
11154 + */
11155 +#ifdef __LITTLE_ENDIAN
11156 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11157 + MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
11158 + 12);
11159 +#else
11160 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11161 + MOVE_DEST_MATH0 | (53 * 4 << MOVE_OFFSET_SHIFT) |
11162 + 12);
11163 +#endif
11164 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11165 + append_math_and_imm_u64(desc, REG0, REG0, IMM,
11166 + ~(((u64)(CMD_SEQ_IN_PTR ^
11167 + CMD_SEQ_OUT_PTR)) << 32));
11168 + /* Append a JUMP command after the copied fields */
11169 + jumpback = CMD_JUMP | (char)-7;
11170 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11171 + LDST_SRCDST_WORD_DECO_MATH1 |
11172 + (4 << LDST_OFFSET_SHIFT));
11173 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11174 + /* Move the updated fields back to the Job Descriptor */
11175 +#ifdef __LITTLE_ENDIAN
11176 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11177 + MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
11178 + 16);
11179 +#else
11180 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11181 + MOVE_DEST_DESCBUF | (53 * 4 << MOVE_OFFSET_SHIFT) |
11182 + 16);
11183 +#endif
11184 + /*
11185 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
11186 + * and then jump back to the next command from the
11187 + * Shared Descriptor.
11188 + */
11189 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
11190 + }
11191 +
11192 + /* skip payload */
11193 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
11194 + /* check icv */
11195 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
11196 + FIFOLD_TYPE_LAST2 | authsize);
11197 +
11198 +#ifdef DEBUG
11199 + print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
11200 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
11201 + desc_bytes(desc), 1);
11202 +#endif
11203 +}
11204 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
11205 +
11206 +/**
11207 + * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
11208 + * @desc: pointer to buffer used for descriptor construction
11209 + * @cdata: pointer to block cipher transform definitions
11210 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11211 + * @ivsize: initialization vector size
11212 + * @icvsize: integrity check value (ICV) size (truncated or full)
11213 + * @is_qi: true when called from caam/qi
11214 + */
11215 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
11216 + unsigned int ivsize, unsigned int icvsize,
11217 + const bool is_qi)
11218 +{
11219 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
11220 + *zero_assoc_jump_cmd2;
11221 +
11222 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11223 +
11224 + /* skip key loading if they are loaded due to sharing */
11225 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11226 + JUMP_COND_SHRD);
11227 + if (cdata->key_inline)
11228 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11229 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11230 + else
11231 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11232 + KEY_DEST_CLASS_REG);
11233 + set_jump_tgt_here(desc, key_jump_cmd);
11234 +
11235 + /* class 1 operation */
11236 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11237 + OP_ALG_ENCRYPT);
11238 +
11239 + if (is_qi) {
11240 + u32 *wait_load_cmd;
11241 +
11242 + /* REG3 = assoclen */
11243 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11244 + LDST_SRCDST_WORD_DECO_MATH3 |
11245 + (4 << LDST_OFFSET_SHIFT));
11246 +
11247 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11248 + JUMP_COND_CALM | JUMP_COND_NCP |
11249 + JUMP_COND_NOP | JUMP_COND_NIP |
11250 + JUMP_COND_NIFP);
11251 + set_jump_tgt_here(desc, wait_load_cmd);
11252 +
11253 + append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
11254 + ivsize);
11255 + } else {
11256 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
11257 + CAAM_CMD_SZ);
11258 + }
11259 +
11260 + /* if assoclen + cryptlen is ZERO, skip to ICV write */
11261 + zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
11262 + JUMP_COND_MATH_Z);
11263 +
11264 + if (is_qi)
11265 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11266 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11267 +
11268 + /* if assoclen is ZERO, skip reading the assoc data */
11269 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11270 + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11271 + JUMP_COND_MATH_Z);
11272 +
11273 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11274 +
11275 + /* skip assoc data */
11276 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11277 +
11278 + /* cryptlen = seqinlen - assoclen */
11279 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
11280 +
11281 + /* if cryptlen is ZERO jump to zero-payload commands */
11282 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11283 + JUMP_COND_MATH_Z);
11284 +
11285 + /* read assoc data */
11286 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11287 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11288 + set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11289 +
11290 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11291 +
11292 + /* write encrypted data */
11293 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11294 +
11295 + /* read payload data */
11296 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11297 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11298 +
11299 + /* jump to ICV writing */
11300 + if (is_qi)
11301 + append_jump(desc, JUMP_TEST_ALL | 4);
11302 + else
11303 + append_jump(desc, JUMP_TEST_ALL | 2);
11304 +
11305 + /* zero-payload commands */
11306 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11307 +
11308 + /* read assoc data */
11309 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11310 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
11311 + if (is_qi)
11312 + /* jump to ICV writing */
11313 + append_jump(desc, JUMP_TEST_ALL | 2);
11314 +
11315 + /* There is no input data */
11316 + set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
11317 +
11318 + if (is_qi)
11319 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11320 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
11321 + FIFOLD_TYPE_LAST1);
11322 +
11323 + /* write ICV */
11324 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11325 + LDST_SRCDST_BYTE_CONTEXT);
11326 +
11327 +#ifdef DEBUG
11328 + print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
11329 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11330 +#endif
11331 +}
11332 +EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
11333 +
11334 +/**
11335 + * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
11336 + * @desc: pointer to buffer used for descriptor construction
11337 + * @cdata: pointer to block cipher transform definitions
11338 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11339 + * @ivsize: initialization vector size
11340 + * @icvsize: integrity check value (ICV) size (truncated or full)
11341 + * @is_qi: true when called from caam/qi
11342 + */
11343 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
11344 + unsigned int ivsize, unsigned int icvsize,
11345 + const bool is_qi)
11346 +{
11347 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
11348 +
11349 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11350 +
11351 + /* skip key loading if they are loaded due to sharing */
11352 + key_jump_cmd = append_jump(desc, JUMP_JSL |
11353 + JUMP_TEST_ALL | JUMP_COND_SHRD);
11354 + if (cdata->key_inline)
11355 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11356 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11357 + else
11358 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11359 + KEY_DEST_CLASS_REG);
11360 + set_jump_tgt_here(desc, key_jump_cmd);
11361 +
11362 + /* class 1 operation */
11363 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11364 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11365 +
11366 + if (is_qi) {
11367 + u32 *wait_load_cmd;
11368 +
11369 + /* REG3 = assoclen */
11370 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11371 + LDST_SRCDST_WORD_DECO_MATH3 |
11372 + (4 << LDST_OFFSET_SHIFT));
11373 +
11374 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11375 + JUMP_COND_CALM | JUMP_COND_NCP |
11376 + JUMP_COND_NOP | JUMP_COND_NIP |
11377 + JUMP_COND_NIFP);
11378 + set_jump_tgt_here(desc, wait_load_cmd);
11379 +
11380 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11381 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11382 + }
11383 +
11384 + /* if assoclen is ZERO, skip reading the assoc data */
11385 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11386 + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11387 + JUMP_COND_MATH_Z);
11388 +
11389 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11390 +
11391 + /* skip assoc data */
11392 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11393 +
11394 + /* read assoc data */
11395 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11396 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11397 +
11398 + set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11399 +
11400 + /* cryptlen = seqoutlen - assoclen */
11401 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11402 +
11403 + /* jump to zero-payload command if cryptlen is zero */
11404 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11405 + JUMP_COND_MATH_Z);
11406 +
11407 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11408 +
11409 + /* store encrypted data */
11410 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11411 +
11412 + /* read payload data */
11413 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11414 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11415 +
11416 + /* zero-payload command */
11417 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11418 +
11419 + /* read ICV */
11420 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11421 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11422 +
11423 +#ifdef DEBUG
11424 + print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
11425 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11426 +#endif
11427 +}
11428 +EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
11429 +
11430 +/**
11431 + * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
11432 + * (non-protocol).
11433 + * @desc: pointer to buffer used for descriptor construction
11434 + * @cdata: pointer to block cipher transform definitions
11435 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11436 + * @ivsize: initialization vector size
11437 + * @icvsize: integrity check value (ICV) size (truncated or full)
11438 + * @is_qi: true when called from caam/qi
11439 + */
11440 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
11441 + unsigned int ivsize, unsigned int icvsize,
11442 + const bool is_qi)
11443 +{
11444 + u32 *key_jump_cmd;
11445 +
11446 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11447 +
11448 + /* Skip key loading if it is loaded due to sharing */
11449 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11450 + JUMP_COND_SHRD);
11451 + if (cdata->key_inline)
11452 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11453 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11454 + else
11455 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11456 + KEY_DEST_CLASS_REG);
11457 + set_jump_tgt_here(desc, key_jump_cmd);
11458 +
11459 + /* Class 1 operation */
11460 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11461 + OP_ALG_ENCRYPT);
11462 +
11463 + if (is_qi) {
11464 + u32 *wait_load_cmd;
11465 +
11466 + /* REG3 = assoclen */
11467 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11468 + LDST_SRCDST_WORD_DECO_MATH3 |
11469 + (4 << LDST_OFFSET_SHIFT));
11470 +
11471 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11472 + JUMP_COND_CALM | JUMP_COND_NCP |
11473 + JUMP_COND_NOP | JUMP_COND_NIP |
11474 + JUMP_COND_NIFP);
11475 + set_jump_tgt_here(desc, wait_load_cmd);
11476 +
11477 + /* Read salt and IV */
11478 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11479 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11480 + FIFOLD_TYPE_IV);
11481 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11482 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11483 + }
11484 +
11485 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11486 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11487 +
11488 + /* Read assoc data */
11489 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11490 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11491 +
11492 + /* Skip IV */
11493 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11494 +
11495 + /* Will read cryptlen bytes */
11496 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11497 +
11498 + /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11499 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11500 +
11501 + /* Skip assoc data */
11502 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11503 +
11504 + /* cryptlen = seqoutlen - assoclen */
11505 + append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
11506 +
11507 + /* Write encrypted data */
11508 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11509 +
11510 + /* Read payload data */
11511 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11512 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11513 +
11514 + /* Write ICV */
11515 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11516 + LDST_SRCDST_BYTE_CONTEXT);
11517 +
11518 +#ifdef DEBUG
11519 + print_hex_dump(KERN_ERR,
11520 + "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
11521 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11522 +#endif
11523 +}
11524 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
11525 +
11526 +/**
11527 + * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
11528 + * (non-protocol).
11529 + * @desc: pointer to buffer used for descriptor construction
11530 + * @cdata: pointer to block cipher transform definitions
11531 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11532 + * @ivsize: initialization vector size
11533 + * @icvsize: integrity check value (ICV) size (truncated or full)
11534 + * @is_qi: true when called from caam/qi
11535 + */
11536 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
11537 + unsigned int ivsize, unsigned int icvsize,
11538 + const bool is_qi)
11539 +{
11540 + u32 *key_jump_cmd;
11541 +
11542 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11543 +
11544 + /* Skip key loading if it is loaded due to sharing */
11545 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11546 + JUMP_COND_SHRD);
11547 + if (cdata->key_inline)
11548 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11549 + cdata->keylen, CLASS_1 |
11550 + KEY_DEST_CLASS_REG);
11551 + else
11552 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11553 + KEY_DEST_CLASS_REG);
11554 + set_jump_tgt_here(desc, key_jump_cmd);
11555 +
11556 + /* Class 1 operation */
11557 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11558 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11559 +
11560 + if (is_qi) {
11561 + u32 *wait_load_cmd;
11562 +
11563 + /* REG3 = assoclen */
11564 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11565 + LDST_SRCDST_WORD_DECO_MATH3 |
11566 + (4 << LDST_OFFSET_SHIFT));
11567 +
11568 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11569 + JUMP_COND_CALM | JUMP_COND_NCP |
11570 + JUMP_COND_NOP | JUMP_COND_NIP |
11571 + JUMP_COND_NIFP);
11572 + set_jump_tgt_here(desc, wait_load_cmd);
11573 +
11574 + /* Read salt and IV */
11575 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11576 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11577 + FIFOLD_TYPE_IV);
11578 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11579 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11580 + }
11581 +
11582 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11583 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11584 +
11585 + /* Read assoc data */
11586 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11587 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11588 +
11589 + /* Skip IV */
11590 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11591 +
11592 + /* Will read cryptlen bytes */
11593 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
11594 +
11595 + /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11596 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11597 +
11598 + /* Skip assoc data */
11599 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11600 +
11601 + /* Will write cryptlen bytes */
11602 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11603 +
11604 + /* Store payload data */
11605 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11606 +
11607 + /* Read encrypted data */
11608 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11609 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11610 +
11611 + /* Read ICV */
11612 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11613 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11614 +
11615 +#ifdef DEBUG
11616 + print_hex_dump(KERN_ERR,
11617 + "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
11618 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11619 +#endif
11620 +}
11621 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
11622 +
11623 +/**
11624 + * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
11625 + * (non-protocol).
11626 + * @desc: pointer to buffer used for descriptor construction
11627 + * @cdata: pointer to block cipher transform definitions
11628 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11629 + * @ivsize: initialization vector size
11630 + * @icvsize: integrity check value (ICV) size (truncated or full)
11631 + * @is_qi: true when called from caam/qi
11632 + */
11633 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
11634 + unsigned int ivsize, unsigned int icvsize,
11635 + const bool is_qi)
11636 +{
11637 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11638 +
11639 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11640 +
11641 + /* Skip key loading if it is loaded due to sharing */
11642 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11643 + JUMP_COND_SHRD);
11644 + if (cdata->key_inline)
11645 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11646 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11647 + else
11648 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11649 + KEY_DEST_CLASS_REG);
11650 + set_jump_tgt_here(desc, key_jump_cmd);
11651 +
11652 + /* Class 1 operation */
11653 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11654 + OP_ALG_ENCRYPT);
11655 +
11656 + if (is_qi) {
11657 + /* assoclen is not needed, skip it */
11658 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11659 +
11660 + /* Read salt and IV */
11661 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11662 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11663 + FIFOLD_TYPE_IV);
11664 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11665 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11666 + }
11667 +
11668 + /* assoclen + cryptlen = seqinlen */
11669 + append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
11670 +
11671 + /*
11672 + * MOVE_LEN opcode is not available in all SEC HW revisions,
11673 + * thus need to do some magic, i.e. self-patch the descriptor
11674 + * buffer.
11675 + */
11676 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11677 + (0x6 << MOVE_LEN_SHIFT));
11678 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11679 + (0x8 << MOVE_LEN_SHIFT));
11680 +
11681 + /* Will read assoclen + cryptlen bytes */
11682 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11683 +
11684 + /* Will write assoclen + cryptlen bytes */
11685 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11686 +
11687 + /* Read and write assoclen + cryptlen bytes */
11688 + aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
11689 +
11690 + set_move_tgt_here(desc, read_move_cmd);
11691 + set_move_tgt_here(desc, write_move_cmd);
11692 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11693 + /* Move payload data to OFIFO */
11694 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11695 +
11696 + /* Write ICV */
11697 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11698 + LDST_SRCDST_BYTE_CONTEXT);
11699 +
11700 +#ifdef DEBUG
11701 + print_hex_dump(KERN_ERR,
11702 + "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
11703 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11704 +#endif
11705 +}
11706 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
11707 +
11708 +/**
11709 + * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
11710 + * (non-protocol).
11711 + * @desc: pointer to buffer used for descriptor construction
11712 + * @cdata: pointer to block cipher transform definitions
11713 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11714 + * @ivsize: initialization vector size
11715 + * @icvsize: integrity check value (ICV) size (truncated or full)
11716 + * @is_qi: true when called from caam/qi
11717 + */
11718 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
11719 + unsigned int ivsize, unsigned int icvsize,
11720 + const bool is_qi)
11721 +{
11722 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11723 +
11724 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11725 +
11726 + /* Skip key loading if it is loaded due to sharing */
11727 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11728 + JUMP_COND_SHRD);
11729 + if (cdata->key_inline)
11730 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11731 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11732 + else
11733 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11734 + KEY_DEST_CLASS_REG);
11735 + set_jump_tgt_here(desc, key_jump_cmd);
11736 +
11737 + /* Class 1 operation */
11738 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11739 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11740 +
11741 + if (is_qi) {
11742 + /* assoclen is not needed, skip it */
11743 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11744 +
11745 + /* Read salt and IV */
11746 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11747 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11748 + FIFOLD_TYPE_IV);
11749 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11750 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11751 + }
11752 +
11753 + /* assoclen + cryptlen = seqoutlen */
11754 + append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11755 +
11756 + /*
11757 + * MOVE_LEN opcode is not available in all SEC HW revisions,
11758 + * thus need to do some magic, i.e. self-patch the descriptor
11759 + * buffer.
11760 + */
11761 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11762 + (0x6 << MOVE_LEN_SHIFT));
11763 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11764 + (0x8 << MOVE_LEN_SHIFT));
11765 +
11766 + /* Will read assoclen + cryptlen bytes */
11767 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11768 +
11769 + /* Will write assoclen + cryptlen bytes */
11770 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11771 +
11772 + /* Store payload data */
11773 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11774 +
11775 + /* In-snoop assoclen + cryptlen data */
11776 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
11777 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
11778 +
11779 + set_move_tgt_here(desc, read_move_cmd);
11780 + set_move_tgt_here(desc, write_move_cmd);
11781 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11782 + /* Move payload data to OFIFO */
11783 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11784 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
11785 +
11786 + /* Read ICV */
11787 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11788 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11789 +
11790 +#ifdef DEBUG
11791 + print_hex_dump(KERN_ERR,
11792 + "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
11793 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11794 +#endif
11795 +}
11796 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
11797 +
11798 +/*
11799 + * For ablkcipher encrypt and decrypt, read from req->src and
11800 + * write to req->dst
11801 + */
11802 +static inline void ablkcipher_append_src_dst(u32 *desc)
11803 +{
11804 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11805 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11806 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
11807 + KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11808 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
11809 +}
11810 +
11811 +/**
11812 + * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
11813 + * @desc: pointer to buffer used for descriptor construction
11814 + * @cdata: pointer to block cipher transform definitions
11815 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11816 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
11817 + * @ivsize: initialization vector size
11818 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11819 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11820 + */
11821 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
11822 + unsigned int ivsize, const bool is_rfc3686,
11823 + const u32 ctx1_iv_off)
11824 +{
11825 + u32 *key_jump_cmd;
11826 +
11827 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
11828 + /* Skip if already shared */
11829 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11830 + JUMP_COND_SHRD);
11831 +
11832 + /* Load class1 key only */
11833 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11834 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11835 +
11836 + /* Load nonce into CONTEXT1 reg */
11837 + if (is_rfc3686) {
11838 + u8 *nonce = cdata->key_virt + cdata->keylen;
11839 +
11840 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
11841 + LDST_CLASS_IND_CCB |
11842 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
11843 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
11844 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
11845 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
11846 + }
11847 +
11848 + set_jump_tgt_here(desc, key_jump_cmd);
11849 +
11850 + /* Load iv */
11851 + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
11852 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
11853 +
11854 + /* Load counter into CONTEXT1 reg */
11855 + if (is_rfc3686)
11856 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
11857 + LDST_SRCDST_BYTE_CONTEXT |
11858 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
11859 + LDST_OFFSET_SHIFT));
11860 +
11861 + /* Load operation */
11862 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11863 + OP_ALG_ENCRYPT);
11864 +
11865 + /* Perform operation */
11866 + ablkcipher_append_src_dst(desc);
11867 +
11868 +#ifdef DEBUG
11869 + print_hex_dump(KERN_ERR,
11870 + "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
11871 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11872 +#endif
11873 +}
11874 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
11875 +
11876 +/**
11877 + * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
11878 + * @desc: pointer to buffer used for descriptor construction
11879 + * @cdata: pointer to block cipher transform definitions
11880 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11881 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
11882 + * @ivsize: initialization vector size
11883 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11884 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11885 + */
11886 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
11887 + unsigned int ivsize, const bool is_rfc3686,
11888 + const u32 ctx1_iv_off)
11889 +{
11890 + u32 *key_jump_cmd;
11891 +
11892 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
11893 + /* Skip if already shared */
11894 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11895 + JUMP_COND_SHRD);
11896 +
11897 + /* Load class1 key only */
11898 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11899 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11900 +
11901 + /* Load nonce into CONTEXT1 reg */
11902 + if (is_rfc3686) {
11903 + u8 *nonce = cdata->key_virt + cdata->keylen;
11904 +
11905 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
11906 + LDST_CLASS_IND_CCB |
11907 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
11908 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
11909 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
11910 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
11911 + }
11912 +
11913 + set_jump_tgt_here(desc, key_jump_cmd);
11914 +
11915 + /* load IV */
11916 + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
11917 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
11918 +
11919 + /* Load counter into CONTEXT1 reg */
11920 + if (is_rfc3686)
11921 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
11922 + LDST_SRCDST_BYTE_CONTEXT |
11923 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
11924 + LDST_OFFSET_SHIFT));
11925 +
11926 + /* Choose operation */
11927 + if (ctx1_iv_off)
11928 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11929 + OP_ALG_DECRYPT);
11930 + else
11931 + append_dec_op1(desc, cdata->algtype);
11932 +
11933 + /* Perform operation */
11934 + ablkcipher_append_src_dst(desc);
11935 +
11936 +#ifdef DEBUG
11937 + print_hex_dump(KERN_ERR,
11938 + "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
11939 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11940 +#endif
11941 +}
11942 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
11943 +
11944 +/**
11945 + * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
11946 + * with HW-generated initialization vector.
11947 + * @desc: pointer to buffer used for descriptor construction
11948 + * @cdata: pointer to block cipher transform definitions
11949 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11950 + * with OP_ALG_AAI_CBC.
11951 + * @ivsize: initialization vector size
11952 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11953 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11954 + */
11955 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
11956 + unsigned int ivsize, const bool is_rfc3686,
11957 + const u32 ctx1_iv_off)
11958 +{
11959 + u32 *key_jump_cmd, geniv;
11960 +
11961 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
11962 + /* Skip if already shared */
11963 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11964 + JUMP_COND_SHRD);
11965 +
11966 + /* Load class1 key only */
11967 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11968 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11969 +
11970 + /* Load Nonce into CONTEXT1 reg */
11971 + if (is_rfc3686) {
11972 + u8 *nonce = cdata->key_virt + cdata->keylen;
11973 +
11974 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
11975 + LDST_CLASS_IND_CCB |
11976 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
11977 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
11978 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
11979 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
11980 + }
11981 + set_jump_tgt_here(desc, key_jump_cmd);
11982 +
11983 + /* Generate IV */
11984 + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
11985 + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
11986 + (ivsize << NFIFOENTRY_DLEN_SHIFT);
11987 + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
11988 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
11989 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11990 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
11991 + MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
11992 + (ctx1_iv_off << MOVE_OFFSET_SHIFT));
11993 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
11994 +
11995 + /* Copy generated IV to memory */
11996 + append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
11997 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
11998 +
11999 + /* Load Counter into CONTEXT1 reg */
12000 + if (is_rfc3686)
12001 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
12002 + LDST_SRCDST_BYTE_CONTEXT |
12003 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
12004 + LDST_OFFSET_SHIFT));
12005 +
12006 + if (ctx1_iv_off)
12007 + append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
12008 + (1 << JUMP_OFFSET_SHIFT));
12009 +
12010 + /* Load operation */
12011 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12012 + OP_ALG_ENCRYPT);
12013 +
12014 + /* Perform operation */
12015 + ablkcipher_append_src_dst(desc);
12016 +
12017 +#ifdef DEBUG
12018 + print_hex_dump(KERN_ERR,
12019 + "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
12020 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12021 +#endif
12022 +}
12023 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
12024 +
12025 +/**
12026 + * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
12027 + * descriptor
12028 + * @desc: pointer to buffer used for descriptor construction
12029 + * @cdata: pointer to block cipher transform definitions
12030 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12031 + */
12032 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
12033 +{
12034 + __be64 sector_size = cpu_to_be64(512);
12035 + u32 *key_jump_cmd;
12036 +
12037 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12038 + /* Skip if already shared */
12039 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12040 + JUMP_COND_SHRD);
12041 +
12042 + /* Load class1 keys only */
12043 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12044 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12045 +
12046 + /* Load sector size with index 40 bytes (0x28) */
12047 + append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
12048 + LDST_SRCDST_BYTE_CONTEXT |
12049 + (0x28 << LDST_OFFSET_SHIFT));
12050 +
12051 + set_jump_tgt_here(desc, key_jump_cmd);
12052 +
12053 + /*
12054 + * create sequence for loading the sector index
12055 + * Upper 8B of IV - will be used as sector index
12056 + * Lower 8B of IV - will be discarded
12057 + */
12058 + append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12059 + (0x20 << LDST_OFFSET_SHIFT));
12060 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12061 +
12062 + /* Load operation */
12063 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12064 + OP_ALG_ENCRYPT);
12065 +
12066 + /* Perform operation */
12067 + ablkcipher_append_src_dst(desc);
12068 +
12069 +#ifdef DEBUG
12070 + print_hex_dump(KERN_ERR,
12071 + "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
12072 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12073 +#endif
12074 +}
12075 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
12076 +
12077 +/**
12078 + * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
12079 + * descriptor
12080 + * @desc: pointer to buffer used for descriptor construction
12081 + * @cdata: pointer to block cipher transform definitions
12082 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12083 + */
12084 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
12085 +{
12086 + __be64 sector_size = cpu_to_be64(512);
12087 + u32 *key_jump_cmd;
12088 +
12089 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12090 + /* Skip if already shared */
12091 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12092 + JUMP_COND_SHRD);
12093 +
12094 + /* Load class1 key only */
12095 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12096 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12097 +
12098 + /* Load sector size with index 40 bytes (0x28) */
12099 + append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
12100 + LDST_SRCDST_BYTE_CONTEXT |
12101 + (0x28 << LDST_OFFSET_SHIFT));
12102 +
12103 + set_jump_tgt_here(desc, key_jump_cmd);
12104 +
12105 + /*
12106 + * create sequence for loading the sector index
12107 + * Upper 8B of IV - will be used as sector index
12108 + * Lower 8B of IV - will be discarded
12109 + */
12110 + append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12111 + (0x20 << LDST_OFFSET_SHIFT));
12112 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12113 +
12114 + /* Load operation */
12115 + append_dec_op1(desc, cdata->algtype);
12116 +
12117 + /* Perform operation */
12118 + ablkcipher_append_src_dst(desc);
12119 +
12120 +#ifdef DEBUG
12121 + print_hex_dump(KERN_ERR,
12122 + "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
12123 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12124 +#endif
12125 +}
12126 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
12127 +
12128 +MODULE_LICENSE("GPL");
12129 +MODULE_DESCRIPTION("FSL CAAM descriptor support");
12130 +MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
12131 --- /dev/null
12132 +++ b/drivers/crypto/caam/caamalg_desc.h
12133 @@ -0,0 +1,127 @@
12134 +/*
12135 + * Shared descriptors for aead, ablkcipher algorithms
12136 + *
12137 + * Copyright 2016 NXP
12138 + */
12139 +
12140 +#ifndef _CAAMALG_DESC_H_
12141 +#define _CAAMALG_DESC_H_
12142 +
12143 +/* length of descriptors text */
12144 +#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
12145 +#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
12146 +#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
12147 +#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
12148 +#define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
12149 +#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
12150 +#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
12151 +
12152 +#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
12153 +#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
12154 +
12155 +/* Note: Nonce is counted in cdata.keylen */
12156 +#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
12157 +
12158 +#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
12159 +#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
12160 +#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
12161 +
12162 +#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
12163 +#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
12164 +#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
12165 +#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
12166 +#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
12167 +
12168 +#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
12169 +#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12170 +#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12171 +#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
12172 +#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
12173 +
12174 +#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
12175 +#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
12176 +#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
12177 +#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
12178 +#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
12179 +
12180 +#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
12181 +#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
12182 + 20 * CAAM_CMD_SZ)
12183 +#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
12184 + 15 * CAAM_CMD_SZ)
12185 +
12186 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
12187 + unsigned int icvsize);
12188 +
12189 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
12190 + unsigned int icvsize);
12191 +
12192 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
12193 + struct alginfo *adata, unsigned int ivsize,
12194 + unsigned int icvsize, const bool is_rfc3686,
12195 + u32 *nonce, const u32 ctx1_iv_off,
12196 + const bool is_qi);
12197 +
12198 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
12199 + struct alginfo *adata, unsigned int ivsize,
12200 + unsigned int icvsize, const bool geniv,
12201 + const bool is_rfc3686, u32 *nonce,
12202 + const u32 ctx1_iv_off, const bool is_qi);
12203 +
12204 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
12205 + struct alginfo *adata, unsigned int ivsize,
12206 + unsigned int icvsize, const bool is_rfc3686,
12207 + u32 *nonce, const u32 ctx1_iv_off,
12208 + const bool is_qi);
12209 +
12210 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
12211 + struct alginfo *adata, unsigned int assoclen,
12212 + unsigned int ivsize, unsigned int authsize,
12213 + unsigned int blocksize);
12214 +
12215 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
12216 + struct alginfo *adata, unsigned int assoclen,
12217 + unsigned int ivsize, unsigned int authsize,
12218 + unsigned int blocksize);
12219 +
12220 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
12221 + unsigned int ivsize, unsigned int icvsize,
12222 + const bool is_qi);
12223 +
12224 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
12225 + unsigned int ivsize, unsigned int icvsize,
12226 + const bool is_qi);
12227 +
12228 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
12229 + unsigned int ivsize, unsigned int icvsize,
12230 + const bool is_qi);
12231 +
12232 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
12233 + unsigned int ivsize, unsigned int icvsize,
12234 + const bool is_qi);
12235 +
12236 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
12237 + unsigned int ivsize, unsigned int icvsize,
12238 + const bool is_qi);
12239 +
12240 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
12241 + unsigned int ivsize, unsigned int icvsize,
12242 + const bool is_qi);
12243 +
12244 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
12245 + unsigned int ivsize, const bool is_rfc3686,
12246 + const u32 ctx1_iv_off);
12247 +
12248 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
12249 + unsigned int ivsize, const bool is_rfc3686,
12250 + const u32 ctx1_iv_off);
12251 +
12252 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
12253 + unsigned int ivsize, const bool is_rfc3686,
12254 + const u32 ctx1_iv_off);
12255 +
12256 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
12257 +
12258 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
12259 +
12260 +#endif /* _CAAMALG_DESC_H_ */
12261 --- /dev/null
12262 +++ b/drivers/crypto/caam/caamalg_qi.c
12263 @@ -0,0 +1,2877 @@
12264 +/*
12265 + * Freescale FSL CAAM support for crypto API over QI backend.
12266 + * Based on caamalg.c
12267 + *
12268 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
12269 + * Copyright 2016-2017 NXP
12270 + */
12271 +
12272 +#include "compat.h"
12273 +#include "ctrl.h"
12274 +#include "regs.h"
12275 +#include "intern.h"
12276 +#include "desc_constr.h"
12277 +#include "error.h"
12278 +#include "sg_sw_qm.h"
12279 +#include "key_gen.h"
12280 +#include "qi.h"
12281 +#include "jr.h"
12282 +#include "caamalg_desc.h"
12283 +
12284 +/*
12285 + * crypto alg
12286 + */
12287 +#define CAAM_CRA_PRIORITY 2000
12288 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
12289 +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
12290 + SHA512_DIGEST_SIZE * 2)
12291 +
12292 +#define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
12293 + CAAM_MAX_KEY_SIZE)
12294 +#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
12295 +
12296 +struct caam_alg_entry {
12297 + int class1_alg_type;
12298 + int class2_alg_type;
12299 + bool rfc3686;
12300 + bool geniv;
12301 +};
12302 +
12303 +struct caam_aead_alg {
12304 + struct aead_alg aead;
12305 + struct caam_alg_entry caam;
12306 + bool registered;
12307 +};
12308 +
12309 +/*
12310 + * per-session context
12311 + */
12312 +struct caam_ctx {
12313 + struct device *jrdev;
12314 + u32 sh_desc_enc[DESC_MAX_USED_LEN];
12315 + u32 sh_desc_dec[DESC_MAX_USED_LEN];
12316 + u32 sh_desc_givenc[DESC_MAX_USED_LEN];
12317 + u8 key[CAAM_MAX_KEY_SIZE];
12318 + dma_addr_t key_dma;
12319 + struct alginfo adata;
12320 + struct alginfo cdata;
12321 + unsigned int authsize;
12322 + struct device *qidev;
12323 + spinlock_t lock; /* Protects multiple init of driver context */
12324 + struct caam_drv_ctx *drv_ctx[NUM_OP];
12325 +};
12326 +
12327 +static int aead_set_sh_desc(struct crypto_aead *aead)
12328 +{
12329 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
12330 + typeof(*alg), aead);
12331 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
12332 + unsigned int ivsize = crypto_aead_ivsize(aead);
12333 + u32 ctx1_iv_off = 0;
12334 + u32 *nonce = NULL;
12335 + unsigned int data_len[2];
12336 + u32 inl_mask;
12337 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
12338 + OP_ALG_AAI_CTR_MOD128);
12339 + const bool is_rfc3686 = alg->caam.rfc3686;
12340 +
12341 + if (!ctx->cdata.keylen || !ctx->authsize)
12342 + return 0;
12343 +
12344 + /*
12345 + * AES-CTR needs to load IV in CONTEXT1 reg
12346 + * at an offset of 128bits (16bytes)
12347 + * CONTEXT1[255:128] = IV
12348 + */
12349 + if (ctr_mode)
12350 + ctx1_iv_off = 16;
12351 +
12352 + /*
12353 + * RFC3686 specific:
12354 + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
12355 + */
12356 + if (is_rfc3686) {
12357 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
12358 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
12359 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
12360 + }
12361 +
12362 + data_len[0] = ctx->adata.keylen_pad;
12363 + data_len[1] = ctx->cdata.keylen;
12364 +
12365 + if (alg->caam.geniv)
12366 + goto skip_enc;
12367 +
12368 + /* aead_encrypt shared descriptor */
12369 + if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
12370 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12371 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12372 + ARRAY_SIZE(data_len)) < 0)
12373 + return -EINVAL;
12374 +
12375 + if (inl_mask & 1)
12376 + ctx->adata.key_virt = ctx->key;
12377 + else
12378 + ctx->adata.key_dma = ctx->key_dma;
12379 +
12380 + if (inl_mask & 2)
12381 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12382 + else
12383 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12384 +
12385 + ctx->adata.key_inline = !!(inl_mask & 1);
12386 + ctx->cdata.key_inline = !!(inl_mask & 2);
12387 +
12388 + cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12389 + ivsize, ctx->authsize, is_rfc3686, nonce,
12390 + ctx1_iv_off, true);
12391 +
12392 +skip_enc:
12393 + /* aead_decrypt shared descriptor */
12394 + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
12395 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12396 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12397 + ARRAY_SIZE(data_len)) < 0)
12398 + return -EINVAL;
12399 +
12400 + if (inl_mask & 1)
12401 + ctx->adata.key_virt = ctx->key;
12402 + else
12403 + ctx->adata.key_dma = ctx->key_dma;
12404 +
12405 + if (inl_mask & 2)
12406 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12407 + else
12408 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12409 +
12410 + ctx->adata.key_inline = !!(inl_mask & 1);
12411 + ctx->cdata.key_inline = !!(inl_mask & 2);
12412 +
12413 + cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12414 + ivsize, ctx->authsize, alg->caam.geniv,
12415 + is_rfc3686, nonce, ctx1_iv_off, true);
12416 +
12417 + if (!alg->caam.geniv)
12418 + goto skip_givenc;
12419 +
12420 + /* aead_givencrypt shared descriptor */
12421 + if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
12422 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12423 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12424 + ARRAY_SIZE(data_len)) < 0)
12425 + return -EINVAL;
12426 +
12427 + if (inl_mask & 1)
12428 + ctx->adata.key_virt = ctx->key;
12429 + else
12430 + ctx->adata.key_dma = ctx->key_dma;
12431 +
12432 + if (inl_mask & 2)
12433 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12434 + else
12435 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12436 +
12437 + ctx->adata.key_inline = !!(inl_mask & 1);
12438 + ctx->cdata.key_inline = !!(inl_mask & 2);
12439 +
12440 + cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12441 + ivsize, ctx->authsize, is_rfc3686, nonce,
12442 + ctx1_iv_off, true);
12443 +
12444 +skip_givenc:
12445 + return 0;
12446 +}
12447 +
12448 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
12449 +{
12450 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
12451 +
12452 + ctx->authsize = authsize;
12453 + aead_set_sh_desc(authenc);
12454 +
12455 + return 0;
12456 +}
12457 +
12458 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
12459 + unsigned int keylen)
12460 +{
12461 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
12462 + struct device *jrdev = ctx->jrdev;
12463 + struct crypto_authenc_keys keys;
12464 + int ret = 0;
12465 +
12466 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12467 + goto badkey;
12468 +
12469 +#ifdef DEBUG
12470 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12471 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
12472 + keys.authkeylen);
12473 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12474 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12475 +#endif
12476 +
12477 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12478 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
12479 + keys.enckeylen);
12480 + if (ret)
12481 + goto badkey;
12482 +
12483 + /* postpend encryption key to auth split key */
12484 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12485 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12486 + keys.enckeylen, DMA_TO_DEVICE);
12487 +#ifdef DEBUG
12488 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12489 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12490 + ctx->adata.keylen_pad + keys.enckeylen, 1);
12491 +#endif
12492 +
12493 + ctx->cdata.keylen = keys.enckeylen;
12494 +
12495 + ret = aead_set_sh_desc(aead);
12496 + if (ret)
12497 + goto badkey;
12498 +
12499 + /* Now update the driver contexts with the new shared descriptor */
12500 + if (ctx->drv_ctx[ENCRYPT]) {
12501 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12502 + ctx->sh_desc_enc);
12503 + if (ret) {
12504 + dev_err(jrdev, "driver enc context update failed\n");
12505 + goto badkey;
12506 + }
12507 + }
12508 +
12509 + if (ctx->drv_ctx[DECRYPT]) {
12510 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12511 + ctx->sh_desc_dec);
12512 + if (ret) {
12513 + dev_err(jrdev, "driver dec context update failed\n");
12514 + goto badkey;
12515 + }
12516 + }
12517 +
12518 + return ret;
12519 +badkey:
12520 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
12521 + return -EINVAL;
12522 +}
12523 +
12524 +static int tls_set_sh_desc(struct crypto_aead *tls)
12525 +{
12526 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12527 + unsigned int ivsize = crypto_aead_ivsize(tls);
12528 + unsigned int blocksize = crypto_aead_blocksize(tls);
12529 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
12530 + unsigned int data_len[2];
12531 + u32 inl_mask;
12532 +
12533 + if (!ctx->cdata.keylen || !ctx->authsize)
12534 + return 0;
12535 +
12536 + /*
12537 + * TLS 1.0 encrypt shared descriptor
12538 + * Job Descriptor and Shared Descriptor
12539 + * must fit into the 64-word Descriptor h/w Buffer
12540 + */
12541 + data_len[0] = ctx->adata.keylen_pad;
12542 + data_len[1] = ctx->cdata.keylen;
12543 +
12544 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
12545 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
12546 + return -EINVAL;
12547 +
12548 + if (inl_mask & 1)
12549 + ctx->adata.key_virt = ctx->key;
12550 + else
12551 + ctx->adata.key_dma = ctx->key_dma;
12552 +
12553 + if (inl_mask & 2)
12554 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12555 + else
12556 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12557 +
12558 + ctx->adata.key_inline = !!(inl_mask & 1);
12559 + ctx->cdata.key_inline = !!(inl_mask & 2);
12560 +
12561 + cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12562 + assoclen, ivsize, ctx->authsize, blocksize);
12563 +
12564 + /*
12565 + * TLS 1.0 decrypt shared descriptor
12566 + * Keys do not fit inline, regardless of algorithms used
12567 + */
12568 + ctx->adata.key_dma = ctx->key_dma;
12569 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12570 +
12571 + cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12572 + assoclen, ivsize, ctx->authsize, blocksize);
12573 +
12574 + return 0;
12575 +}
12576 +
12577 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
12578 +{
12579 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12580 +
12581 + ctx->authsize = authsize;
12582 + tls_set_sh_desc(tls);
12583 +
12584 + return 0;
12585 +}
12586 +
12587 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
12588 + unsigned int keylen)
12589 +{
12590 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12591 + struct device *jrdev = ctx->jrdev;
12592 + struct crypto_authenc_keys keys;
12593 + int ret = 0;
12594 +
12595 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12596 + goto badkey;
12597 +
12598 +#ifdef DEBUG
12599 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12600 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
12601 + keys.authkeylen);
12602 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12603 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12604 +#endif
12605 +
12606 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12607 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
12608 + keys.enckeylen);
12609 + if (ret)
12610 + goto badkey;
12611 +
12612 + /* postpend encryption key to auth split key */
12613 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12614 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12615 + keys.enckeylen, DMA_TO_DEVICE);
12616 +
12617 +#ifdef DEBUG
12618 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
12619 + ctx->adata.keylen, ctx->adata.keylen_pad);
12620 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12621 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12622 + ctx->adata.keylen_pad + keys.enckeylen, 1);
12623 +#endif
12624 +
12625 + ctx->cdata.keylen = keys.enckeylen;
12626 +
12627 + ret = tls_set_sh_desc(tls);
12628 + if (ret)
12629 + goto badkey;
12630 +
12631 + /* Now update the driver contexts with the new shared descriptor */
12632 + if (ctx->drv_ctx[ENCRYPT]) {
12633 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12634 + ctx->sh_desc_enc);
12635 + if (ret) {
12636 + dev_err(jrdev, "driver enc context update failed\n");
12637 + goto badkey;
12638 + }
12639 + }
12640 +
12641 + if (ctx->drv_ctx[DECRYPT]) {
12642 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12643 + ctx->sh_desc_dec);
12644 + if (ret) {
12645 + dev_err(jrdev, "driver dec context update failed\n");
12646 + goto badkey;
12647 + }
12648 + }
12649 +
12650 + return ret;
12651 +badkey:
12652 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
12653 + return -EINVAL;
12654 +}
12655 +
12656 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
12657 + const u8 *key, unsigned int keylen)
12658 +{
12659 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
12660 + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
12661 + const char *alg_name = crypto_tfm_alg_name(tfm);
12662 + struct device *jrdev = ctx->jrdev;
12663 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
12664 + u32 ctx1_iv_off = 0;
12665 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
12666 + OP_ALG_AAI_CTR_MOD128);
12667 + const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
12668 + int ret = 0;
12669 +
12670 + memcpy(ctx->key, key, keylen);
12671 +#ifdef DEBUG
12672 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12673 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12674 +#endif
12675 + /*
12676 + * AES-CTR needs to load IV in CONTEXT1 reg
12677 + * at an offset of 128bits (16bytes)
12678 + * CONTEXT1[255:128] = IV
12679 + */
12680 + if (ctr_mode)
12681 + ctx1_iv_off = 16;
12682 +
12683 + /*
12684 + * RFC3686 specific:
12685 + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
12686 + * | *key = {KEY, NONCE}
12687 + */
12688 + if (is_rfc3686) {
12689 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
12690 + keylen -= CTR_RFC3686_NONCE_SIZE;
12691 + }
12692 +
12693 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
12694 + ctx->cdata.keylen = keylen;
12695 + ctx->cdata.key_virt = ctx->key;
12696 + ctx->cdata.key_inline = true;
12697 +
12698 + /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
12699 + cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
12700 + is_rfc3686, ctx1_iv_off);
12701 + cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
12702 + is_rfc3686, ctx1_iv_off);
12703 + cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
12704 + ivsize, is_rfc3686, ctx1_iv_off);
12705 +
12706 + /* Now update the driver contexts with the new shared descriptor */
12707 + if (ctx->drv_ctx[ENCRYPT]) {
12708 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12709 + ctx->sh_desc_enc);
12710 + if (ret) {
12711 + dev_err(jrdev, "driver enc context update failed\n");
12712 + goto badkey;
12713 + }
12714 + }
12715 +
12716 + if (ctx->drv_ctx[DECRYPT]) {
12717 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12718 + ctx->sh_desc_dec);
12719 + if (ret) {
12720 + dev_err(jrdev, "driver dec context update failed\n");
12721 + goto badkey;
12722 + }
12723 + }
12724 +
12725 + if (ctx->drv_ctx[GIVENCRYPT]) {
12726 + ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
12727 + ctx->sh_desc_givenc);
12728 + if (ret) {
12729 + dev_err(jrdev, "driver givenc context update failed\n");
12730 + goto badkey;
12731 + }
12732 + }
12733 +
12734 + return ret;
12735 +badkey:
12736 + crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
12737 + return -EINVAL;
12738 +}
12739 +
12740 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
12741 + const u8 *key, unsigned int keylen)
12742 +{
12743 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
12744 + struct device *jrdev = ctx->jrdev;
12745 + int ret = 0;
12746 +
12747 + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
12748 + crypto_ablkcipher_set_flags(ablkcipher,
12749 + CRYPTO_TFM_RES_BAD_KEY_LEN);
12750 + dev_err(jrdev, "key size mismatch\n");
12751 + return -EINVAL;
12752 + }
12753 +
12754 + memcpy(ctx->key, key, keylen);
12755 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
12756 + ctx->cdata.keylen = keylen;
12757 + ctx->cdata.key_virt = ctx->key;
12758 + ctx->cdata.key_inline = true;
12759 +
12760 + /* xts ablkcipher encrypt, decrypt shared descriptors */
12761 + cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
12762 + cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
12763 +
12764 + /* Now update the driver contexts with the new shared descriptor */
12765 + if (ctx->drv_ctx[ENCRYPT]) {
12766 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12767 + ctx->sh_desc_enc);
12768 + if (ret) {
12769 + dev_err(jrdev, "driver enc context update failed\n");
12770 + goto badkey;
12771 + }
12772 + }
12773 +
12774 + if (ctx->drv_ctx[DECRYPT]) {
12775 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12776 + ctx->sh_desc_dec);
12777 + if (ret) {
12778 + dev_err(jrdev, "driver dec context update failed\n");
12779 + goto badkey;
12780 + }
12781 + }
12782 +
12783 + return ret;
12784 +badkey:
12785 + crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
12786 + return 0;
12787 +}
12788 +
12789 +/*
12790 + * aead_edesc - s/w-extended aead descriptor
12791 + * @src_nents: number of segments in input scatterlist
12792 + * @dst_nents: number of segments in output scatterlist
12793 + * @iv_dma: dma address of iv for checking continuity and link table
12794 + * @qm_sg_bytes: length of dma mapped h/w link table
12795 + * @qm_sg_dma: bus physical mapped address of h/w link table
12796 + * @assoclen: associated data length, in CAAM endianness
12797 + * @assoclen_dma: bus physical mapped address of req->assoclen
12798 + * @drv_req: driver-specific request structure
12799 + * @sgt: the h/w link table
12800 + */
12801 +struct aead_edesc {
12802 + int src_nents;
12803 + int dst_nents;
12804 + dma_addr_t iv_dma;
12805 + int qm_sg_bytes;
12806 + dma_addr_t qm_sg_dma;
12807 + unsigned int assoclen;
12808 + dma_addr_t assoclen_dma;
12809 + struct caam_drv_req drv_req;
12810 +#define CAAM_QI_MAX_AEAD_SG \
12811 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
12812 + sizeof(struct qm_sg_entry))
12813 + struct qm_sg_entry sgt[0];
12814 +};
12815 +
12816 +/*
12817 + * tls_edesc - s/w-extended tls descriptor
12818 + * @src_nents: number of segments in input scatterlist
12819 + * @dst_nents: number of segments in output scatterlist
12820 + * @iv_dma: dma address of iv for checking continuity and link table
12821 + * @qm_sg_bytes: length of dma mapped h/w link table
12822 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
12823 + * @qm_sg_dma: bus physical mapped address of h/w link table
12824 + * @drv_req: driver-specific request structure
12825 + * @sgt: the h/w link table
12826 + */
12827 +struct tls_edesc {
12828 + int src_nents;
12829 + int dst_nents;
12830 + dma_addr_t iv_dma;
12831 + int qm_sg_bytes;
12832 + dma_addr_t qm_sg_dma;
12833 + struct scatterlist tmp[2];
12834 + struct scatterlist *dst;
12835 + struct caam_drv_req drv_req;
12836 + struct qm_sg_entry sgt[0];
12837 +};
12838 +
12839 +/*
12840 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
12841 + * @src_nents: number of segments in input scatterlist
12842 + * @dst_nents: number of segments in output scatterlist
12843 + * @iv_dma: dma address of iv for checking continuity and link table
12844 + * @qm_sg_bytes: length of dma mapped h/w link table
12845 + * @qm_sg_dma: bus physical mapped address of h/w link table
12846 + * @drv_req: driver-specific request structure
12847 + * @sgt: the h/w link table
12848 + */
12849 +struct ablkcipher_edesc {
12850 + int src_nents;
12851 + int dst_nents;
12852 + dma_addr_t iv_dma;
12853 + int qm_sg_bytes;
12854 + dma_addr_t qm_sg_dma;
12855 + struct caam_drv_req drv_req;
12856 +#define CAAM_QI_MAX_ABLKCIPHER_SG \
12857 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
12858 + sizeof(struct qm_sg_entry))
12859 + struct qm_sg_entry sgt[0];
12860 +};
12861 +
12862 +static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
12863 + enum optype type)
12864 +{
12865 + /*
12866 + * This function is called on the fast path with values of 'type'
12867 + * known at compile time. Invalid arguments are not expected and
12868 + * thus no checks are made.
12869 + */
12870 + struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
12871 + u32 *desc;
12872 +
12873 + if (unlikely(!drv_ctx)) {
12874 + spin_lock(&ctx->lock);
12875 +
12876 + /* Read again to check if some other core init drv_ctx */
12877 + drv_ctx = ctx->drv_ctx[type];
12878 + if (!drv_ctx) {
12879 + int cpu;
12880 +
12881 + if (type == ENCRYPT)
12882 + desc = ctx->sh_desc_enc;
12883 + else if (type == DECRYPT)
12884 + desc = ctx->sh_desc_dec;
12885 + else /* (type == GIVENCRYPT) */
12886 + desc = ctx->sh_desc_givenc;
12887 +
12888 + cpu = smp_processor_id();
12889 + drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
12890 + if (likely(!IS_ERR_OR_NULL(drv_ctx)))
12891 + drv_ctx->op_type = type;
12892 +
12893 + ctx->drv_ctx[type] = drv_ctx;
12894 + }
12895 +
12896 + spin_unlock(&ctx->lock);
12897 + }
12898 +
12899 + return drv_ctx;
12900 +}
12901 +
12902 +static void caam_unmap(struct device *dev, struct scatterlist *src,
12903 + struct scatterlist *dst, int src_nents,
12904 + int dst_nents, dma_addr_t iv_dma, int ivsize,
12905 + enum optype op_type, dma_addr_t qm_sg_dma,
12906 + int qm_sg_bytes)
12907 +{
12908 + if (dst != src) {
12909 + if (src_nents)
12910 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
12911 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
12912 + } else {
12913 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
12914 + }
12915 +
12916 + if (iv_dma)
12917 + dma_unmap_single(dev, iv_dma, ivsize,
12918 + op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
12919 + DMA_TO_DEVICE);
12920 + if (qm_sg_bytes)
12921 + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
12922 +}
12923 +
12924 +static void aead_unmap(struct device *dev,
12925 + struct aead_edesc *edesc,
12926 + struct aead_request *req)
12927 +{
12928 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
12929 + int ivsize = crypto_aead_ivsize(aead);
12930 +
12931 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
12932 + edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
12933 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
12934 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
12935 +}
12936 +
12937 +static void tls_unmap(struct device *dev,
12938 + struct tls_edesc *edesc,
12939 + struct aead_request *req)
12940 +{
12941 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
12942 + int ivsize = crypto_aead_ivsize(aead);
12943 +
12944 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
12945 + edesc->dst_nents, edesc->iv_dma, ivsize,
12946 + edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
12947 + edesc->qm_sg_bytes);
12948 +}
12949 +
12950 +static void ablkcipher_unmap(struct device *dev,
12951 + struct ablkcipher_edesc *edesc,
12952 + struct ablkcipher_request *req)
12953 +{
12954 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
12955 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
12956 +
12957 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
12958 + edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
12959 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
12960 +}
12961 +
12962 +static void aead_done(struct caam_drv_req *drv_req, u32 status)
12963 +{
12964 + struct device *qidev;
12965 + struct aead_edesc *edesc;
12966 + struct aead_request *aead_req = drv_req->app_ctx;
12967 + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
12968 + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
12969 + int ecode = 0;
12970 +
12971 + qidev = caam_ctx->qidev;
12972 +
12973 + if (unlikely(status)) {
12974 + caam_jr_strstatus(qidev, status);
12975 + ecode = -EIO;
12976 + }
12977 +
12978 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
12979 + aead_unmap(qidev, edesc, aead_req);
12980 +
12981 + aead_request_complete(aead_req, ecode);
12982 + qi_cache_free(edesc);
12983 +}
12984 +
12985 +/*
12986 + * allocate and map the aead extended descriptor
12987 + */
12988 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
12989 + bool encrypt)
12990 +{
12991 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
12992 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
12993 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
12994 + typeof(*alg), aead);
12995 + struct device *qidev = ctx->qidev;
12996 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
12997 + GFP_KERNEL : GFP_ATOMIC;
12998 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
12999 + struct aead_edesc *edesc;
13000 + dma_addr_t qm_sg_dma, iv_dma = 0;
13001 + int ivsize = 0;
13002 + unsigned int authsize = ctx->authsize;
13003 + int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
13004 + int in_len, out_len;
13005 + struct qm_sg_entry *sg_table, *fd_sgt;
13006 + struct caam_drv_ctx *drv_ctx;
13007 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13008 +
13009 + drv_ctx = get_drv_ctx(ctx, op_type);
13010 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13011 + return (struct aead_edesc *)drv_ctx;
13012 +
13013 + /* allocate space for base edesc and hw desc commands, link tables */
13014 + edesc = qi_cache_alloc(GFP_DMA | flags);
13015 + if (unlikely(!edesc)) {
13016 + dev_err(qidev, "could not allocate extended descriptor\n");
13017 + return ERR_PTR(-ENOMEM);
13018 + }
13019 +
13020 + if (likely(req->src == req->dst)) {
13021 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13022 + req->cryptlen +
13023 + (encrypt ? authsize : 0));
13024 + if (unlikely(src_nents < 0)) {
13025 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13026 + req->assoclen + req->cryptlen +
13027 + (encrypt ? authsize : 0));
13028 + qi_cache_free(edesc);
13029 + return ERR_PTR(src_nents);
13030 + }
13031 +
13032 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13033 + DMA_BIDIRECTIONAL);
13034 + if (unlikely(!mapped_src_nents)) {
13035 + dev_err(qidev, "unable to map source\n");
13036 + qi_cache_free(edesc);
13037 + return ERR_PTR(-ENOMEM);
13038 + }
13039 + } else {
13040 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13041 + req->cryptlen);
13042 + if (unlikely(src_nents < 0)) {
13043 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13044 + req->assoclen + req->cryptlen);
13045 + qi_cache_free(edesc);
13046 + return ERR_PTR(src_nents);
13047 + }
13048 +
13049 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
13050 + req->cryptlen +
13051 + (encrypt ? authsize :
13052 + (-authsize)));
13053 + if (unlikely(dst_nents < 0)) {
13054 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13055 + req->assoclen + req->cryptlen +
13056 + (encrypt ? authsize : (-authsize)));
13057 + qi_cache_free(edesc);
13058 + return ERR_PTR(dst_nents);
13059 + }
13060 +
13061 + if (src_nents) {
13062 + mapped_src_nents = dma_map_sg(qidev, req->src,
13063 + src_nents, DMA_TO_DEVICE);
13064 + if (unlikely(!mapped_src_nents)) {
13065 + dev_err(qidev, "unable to map source\n");
13066 + qi_cache_free(edesc);
13067 + return ERR_PTR(-ENOMEM);
13068 + }
13069 + } else {
13070 + mapped_src_nents = 0;
13071 + }
13072 +
13073 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13074 + DMA_FROM_DEVICE);
13075 + if (unlikely(!mapped_dst_nents)) {
13076 + dev_err(qidev, "unable to map destination\n");
13077 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13078 + qi_cache_free(edesc);
13079 + return ERR_PTR(-ENOMEM);
13080 + }
13081 + }
13082 +
13083 + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
13084 + ivsize = crypto_aead_ivsize(aead);
13085 + iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13086 + if (dma_mapping_error(qidev, iv_dma)) {
13087 + dev_err(qidev, "unable to map IV\n");
13088 + caam_unmap(qidev, req->src, req->dst, src_nents,
13089 + dst_nents, 0, 0, op_type, 0, 0);
13090 + qi_cache_free(edesc);
13091 + return ERR_PTR(-ENOMEM);
13092 + }
13093 + }
13094 +
13095 + /*
13096 + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
13097 + * Input is not contiguous.
13098 + */
13099 + qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
13100 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13101 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
13102 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13103 + qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
13104 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13105 + iv_dma, ivsize, op_type, 0, 0);
13106 + qi_cache_free(edesc);
13107 + return ERR_PTR(-ENOMEM);
13108 + }
13109 + sg_table = &edesc->sgt[0];
13110 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13111 +
13112 + edesc->src_nents = src_nents;
13113 + edesc->dst_nents = dst_nents;
13114 + edesc->iv_dma = iv_dma;
13115 + edesc->drv_req.app_ctx = req;
13116 + edesc->drv_req.cbk = aead_done;
13117 + edesc->drv_req.drv_ctx = drv_ctx;
13118 +
13119 + edesc->assoclen = cpu_to_caam32(req->assoclen);
13120 + edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
13121 + DMA_TO_DEVICE);
13122 + if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
13123 + dev_err(qidev, "unable to map assoclen\n");
13124 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13125 + iv_dma, ivsize, op_type, 0, 0);
13126 + qi_cache_free(edesc);
13127 + return ERR_PTR(-ENOMEM);
13128 + }
13129 +
13130 + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
13131 + qm_sg_index++;
13132 + if (ivsize) {
13133 + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
13134 + qm_sg_index++;
13135 + }
13136 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13137 + qm_sg_index += mapped_src_nents;
13138 +
13139 + if (mapped_dst_nents > 1)
13140 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13141 + qm_sg_index, 0);
13142 +
13143 + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13144 + if (dma_mapping_error(qidev, qm_sg_dma)) {
13145 + dev_err(qidev, "unable to map S/G table\n");
13146 + dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
13147 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13148 + iv_dma, ivsize, op_type, 0, 0);
13149 + qi_cache_free(edesc);
13150 + return ERR_PTR(-ENOMEM);
13151 + }
13152 +
13153 + edesc->qm_sg_dma = qm_sg_dma;
13154 + edesc->qm_sg_bytes = qm_sg_bytes;
13155 +
13156 + out_len = req->assoclen + req->cryptlen +
13157 + (encrypt ? ctx->authsize : (-ctx->authsize));
13158 + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
13159 +
13160 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13161 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13162 +
13163 + if (req->dst == req->src) {
13164 + if (mapped_src_nents == 1)
13165 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
13166 + out_len, 0);
13167 + else
13168 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13169 + (1 + !!ivsize) * sizeof(*sg_table),
13170 + out_len, 0);
13171 + } else if (mapped_dst_nents == 1) {
13172 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
13173 + 0);
13174 + } else {
13175 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13176 + qm_sg_index, out_len, 0);
13177 + }
13178 +
13179 + return edesc;
13180 +}
13181 +
13182 +static inline int aead_crypt(struct aead_request *req, bool encrypt)
13183 +{
13184 + struct aead_edesc *edesc;
13185 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13186 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13187 + int ret;
13188 +
13189 + if (unlikely(caam_congested))
13190 + return -EAGAIN;
13191 +
13192 + /* allocate extended descriptor */
13193 + edesc = aead_edesc_alloc(req, encrypt);
13194 + if (IS_ERR_OR_NULL(edesc))
13195 + return PTR_ERR(edesc);
13196 +
13197 + /* Create and submit job descriptor */
13198 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13199 + if (!ret) {
13200 + ret = -EINPROGRESS;
13201 + } else {
13202 + aead_unmap(ctx->qidev, edesc, req);
13203 + qi_cache_free(edesc);
13204 + }
13205 +
13206 + return ret;
13207 +}
13208 +
13209 +static int aead_encrypt(struct aead_request *req)
13210 +{
13211 + return aead_crypt(req, true);
13212 +}
13213 +
13214 +static int aead_decrypt(struct aead_request *req)
13215 +{
13216 + return aead_crypt(req, false);
13217 +}
13218 +
13219 +static void tls_done(struct caam_drv_req *drv_req, u32 status)
13220 +{
13221 + struct device *qidev;
13222 + struct tls_edesc *edesc;
13223 + struct aead_request *aead_req = drv_req->app_ctx;
13224 + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
13225 + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
13226 + int ecode = 0;
13227 +
13228 + qidev = caam_ctx->qidev;
13229 +
13230 + if (unlikely(status)) {
13231 + caam_jr_strstatus(qidev, status);
13232 + ecode = -EIO;
13233 + }
13234 +
13235 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
13236 + tls_unmap(qidev, edesc, aead_req);
13237 +
13238 + aead_request_complete(aead_req, ecode);
13239 + qi_cache_free(edesc);
13240 +}
13241 +
13242 +/*
13243 + * allocate and map the tls extended descriptor
13244 + */
13245 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
13246 +{
13247 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13248 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13249 + unsigned int blocksize = crypto_aead_blocksize(aead);
13250 + unsigned int padsize, authsize;
13251 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
13252 + typeof(*alg), aead);
13253 + struct device *qidev = ctx->qidev;
13254 + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
13255 + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
13256 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13257 + struct tls_edesc *edesc;
13258 + dma_addr_t qm_sg_dma, iv_dma = 0;
13259 + int ivsize = 0;
13260 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
13261 + int in_len, out_len;
13262 + struct qm_sg_entry *sg_table, *fd_sgt;
13263 + struct caam_drv_ctx *drv_ctx;
13264 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13265 + struct scatterlist *dst;
13266 +
13267 + if (encrypt) {
13268 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
13269 + blocksize);
13270 + authsize = ctx->authsize + padsize;
13271 + } else {
13272 + authsize = ctx->authsize;
13273 + }
13274 +
13275 + drv_ctx = get_drv_ctx(ctx, op_type);
13276 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13277 + return (struct tls_edesc *)drv_ctx;
13278 +
13279 + /* allocate space for base edesc and hw desc commands, link tables */
13280 + edesc = qi_cache_alloc(GFP_DMA | flags);
13281 + if (unlikely(!edesc)) {
13282 + dev_err(qidev, "could not allocate extended descriptor\n");
13283 + return ERR_PTR(-ENOMEM);
13284 + }
13285 +
13286 + if (likely(req->src == req->dst)) {
13287 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13288 + req->cryptlen +
13289 + (encrypt ? authsize : 0));
13290 + if (unlikely(src_nents < 0)) {
13291 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13292 + req->assoclen + req->cryptlen +
13293 + (encrypt ? authsize : 0));
13294 + qi_cache_free(edesc);
13295 + return ERR_PTR(src_nents);
13296 + }
13297 +
13298 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13299 + DMA_BIDIRECTIONAL);
13300 + if (unlikely(!mapped_src_nents)) {
13301 + dev_err(qidev, "unable to map source\n");
13302 + qi_cache_free(edesc);
13303 + return ERR_PTR(-ENOMEM);
13304 + }
13305 + dst = req->dst;
13306 + } else {
13307 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13308 + req->cryptlen);
13309 + if (unlikely(src_nents < 0)) {
13310 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13311 + req->assoclen + req->cryptlen);
13312 + qi_cache_free(edesc);
13313 + return ERR_PTR(src_nents);
13314 + }
13315 +
13316 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
13317 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
13318 + (encrypt ? authsize : 0));
13319 + if (unlikely(dst_nents < 0)) {
13320 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13321 + req->cryptlen +
13322 + (encrypt ? authsize : 0));
13323 + qi_cache_free(edesc);
13324 + return ERR_PTR(dst_nents);
13325 + }
13326 +
13327 + if (src_nents) {
13328 + mapped_src_nents = dma_map_sg(qidev, req->src,
13329 + src_nents, DMA_TO_DEVICE);
13330 + if (unlikely(!mapped_src_nents)) {
13331 + dev_err(qidev, "unable to map source\n");
13332 + qi_cache_free(edesc);
13333 + return ERR_PTR(-ENOMEM);
13334 + }
13335 + } else {
13336 + mapped_src_nents = 0;
13337 + }
13338 +
13339 + mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
13340 + DMA_FROM_DEVICE);
13341 + if (unlikely(!mapped_dst_nents)) {
13342 + dev_err(qidev, "unable to map destination\n");
13343 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13344 + qi_cache_free(edesc);
13345 + return ERR_PTR(-ENOMEM);
13346 + }
13347 + }
13348 +
13349 + ivsize = crypto_aead_ivsize(aead);
13350 + iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13351 + if (dma_mapping_error(qidev, iv_dma)) {
13352 + dev_err(qidev, "unable to map IV\n");
13353 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
13354 + op_type, 0, 0);
13355 + qi_cache_free(edesc);
13356 + return ERR_PTR(-ENOMEM);
13357 + }
13358 +
13359 + /*
13360 + * Create S/G table: IV, src, dst.
13361 + * Input is not contiguous.
13362 + */
13363 + qm_sg_ents = 1 + mapped_src_nents +
13364 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13365 + sg_table = &edesc->sgt[0];
13366 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13367 +
13368 + edesc->src_nents = src_nents;
13369 + edesc->dst_nents = dst_nents;
13370 + edesc->dst = dst;
13371 + edesc->iv_dma = iv_dma;
13372 + edesc->drv_req.app_ctx = req;
13373 + edesc->drv_req.cbk = tls_done;
13374 + edesc->drv_req.drv_ctx = drv_ctx;
13375 +
13376 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
13377 + qm_sg_index = 1;
13378 +
13379 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13380 + qm_sg_index += mapped_src_nents;
13381 +
13382 + if (mapped_dst_nents > 1)
13383 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
13384 + qm_sg_index, 0);
13385 +
13386 + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13387 + if (dma_mapping_error(qidev, qm_sg_dma)) {
13388 + dev_err(qidev, "unable to map S/G table\n");
13389 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
13390 + ivsize, op_type, 0, 0);
13391 + qi_cache_free(edesc);
13392 + return ERR_PTR(-ENOMEM);
13393 + }
13394 +
13395 + edesc->qm_sg_dma = qm_sg_dma;
13396 + edesc->qm_sg_bytes = qm_sg_bytes;
13397 +
13398 + out_len = req->cryptlen + (encrypt ? authsize : 0);
13399 + in_len = ivsize + req->assoclen + req->cryptlen;
13400 +
13401 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13402 +
13403 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13404 +
13405 + if (req->dst == req->src)
13406 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13407 + (sg_nents_for_len(req->src, req->assoclen) +
13408 + 1) * sizeof(*sg_table), out_len, 0);
13409 + else if (mapped_dst_nents == 1)
13410 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
13411 + else
13412 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13413 + qm_sg_index, out_len, 0);
13414 +
13415 + return edesc;
13416 +}
13417 +
13418 +static int tls_crypt(struct aead_request *req, bool encrypt)
13419 +{
13420 + struct tls_edesc *edesc;
13421 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13422 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13423 + int ret;
13424 +
13425 + if (unlikely(caam_congested))
13426 + return -EAGAIN;
13427 +
13428 + edesc = tls_edesc_alloc(req, encrypt);
13429 + if (IS_ERR_OR_NULL(edesc))
13430 + return PTR_ERR(edesc);
13431 +
13432 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13433 + if (!ret) {
13434 + ret = -EINPROGRESS;
13435 + } else {
13436 + tls_unmap(ctx->qidev, edesc, req);
13437 + qi_cache_free(edesc);
13438 + }
13439 +
13440 + return ret;
13441 +}
13442 +
13443 +static int tls_encrypt(struct aead_request *req)
13444 +{
13445 + return tls_crypt(req, true);
13446 +}
13447 +
13448 +static int tls_decrypt(struct aead_request *req)
13449 +{
13450 + return tls_crypt(req, false);
13451 +}
13452 +
13453 +static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
13454 +{
13455 + struct ablkcipher_edesc *edesc;
13456 + struct ablkcipher_request *req = drv_req->app_ctx;
13457 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13458 + struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
13459 + struct device *qidev = caam_ctx->qidev;
13460 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13461 +
13462 +#ifdef DEBUG
13463 + dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
13464 +#endif
13465 +
13466 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
13467 +
13468 + if (status)
13469 + caam_jr_strstatus(qidev, status);
13470 +
13471 +#ifdef DEBUG
13472 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
13473 + DUMP_PREFIX_ADDRESS, 16, 4, req->info,
13474 + edesc->src_nents > 1 ? 100 : ivsize, 1);
13475 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
13476 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
13477 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
13478 +#endif
13479 +
13480 + ablkcipher_unmap(qidev, edesc, req);
13481 + qi_cache_free(edesc);
13482 +
13483 + /*
13484 + * The crypto API expects us to set the IV (req->info) to the last
13485 + * ciphertext block. This is used e.g. by the CTS mode.
13486 + */
13487 + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
13488 + ivsize, 0);
13489 +
13490 + ablkcipher_request_complete(req, status);
13491 +}
13492 +
13493 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
13494 + *req, bool encrypt)
13495 +{
13496 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13497 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13498 + struct device *qidev = ctx->qidev;
13499 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13500 + GFP_KERNEL : GFP_ATOMIC;
13501 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13502 + struct ablkcipher_edesc *edesc;
13503 + dma_addr_t iv_dma;
13504 + bool in_contig;
13505 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13506 + int dst_sg_idx, qm_sg_ents;
13507 + struct qm_sg_entry *sg_table, *fd_sgt;
13508 + struct caam_drv_ctx *drv_ctx;
13509 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13510 +
13511 + drv_ctx = get_drv_ctx(ctx, op_type);
13512 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13513 + return (struct ablkcipher_edesc *)drv_ctx;
13514 +
13515 + src_nents = sg_nents_for_len(req->src, req->nbytes);
13516 + if (unlikely(src_nents < 0)) {
13517 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13518 + req->nbytes);
13519 + return ERR_PTR(src_nents);
13520 + }
13521 +
13522 + if (unlikely(req->src != req->dst)) {
13523 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
13524 + if (unlikely(dst_nents < 0)) {
13525 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13526 + req->nbytes);
13527 + return ERR_PTR(dst_nents);
13528 + }
13529 +
13530 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13531 + DMA_TO_DEVICE);
13532 + if (unlikely(!mapped_src_nents)) {
13533 + dev_err(qidev, "unable to map source\n");
13534 + return ERR_PTR(-ENOMEM);
13535 + }
13536 +
13537 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13538 + DMA_FROM_DEVICE);
13539 + if (unlikely(!mapped_dst_nents)) {
13540 + dev_err(qidev, "unable to map destination\n");
13541 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13542 + return ERR_PTR(-ENOMEM);
13543 + }
13544 + } else {
13545 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13546 + DMA_BIDIRECTIONAL);
13547 + if (unlikely(!mapped_src_nents)) {
13548 + dev_err(qidev, "unable to map source\n");
13549 + return ERR_PTR(-ENOMEM);
13550 + }
13551 + }
13552 +
13553 + iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
13554 + if (dma_mapping_error(qidev, iv_dma)) {
13555 + dev_err(qidev, "unable to map IV\n");
13556 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
13557 + 0, 0, 0, 0);
13558 + return ERR_PTR(-ENOMEM);
13559 + }
13560 +
13561 + if (mapped_src_nents == 1 &&
13562 + iv_dma + ivsize == sg_dma_address(req->src)) {
13563 + in_contig = true;
13564 + qm_sg_ents = 0;
13565 + } else {
13566 + in_contig = false;
13567 + qm_sg_ents = 1 + mapped_src_nents;
13568 + }
13569 + dst_sg_idx = qm_sg_ents;
13570 +
13571 + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
13572 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
13573 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13574 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
13575 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13576 + iv_dma, ivsize, op_type, 0, 0);
13577 + return ERR_PTR(-ENOMEM);
13578 + }
13579 +
13580 + /* allocate space for base edesc and link tables */
13581 + edesc = qi_cache_alloc(GFP_DMA | flags);
13582 + if (unlikely(!edesc)) {
13583 + dev_err(qidev, "could not allocate extended descriptor\n");
13584 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13585 + iv_dma, ivsize, op_type, 0, 0);
13586 + return ERR_PTR(-ENOMEM);
13587 + }
13588 +
13589 + edesc->src_nents = src_nents;
13590 + edesc->dst_nents = dst_nents;
13591 + edesc->iv_dma = iv_dma;
13592 + sg_table = &edesc->sgt[0];
13593 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13594 + edesc->drv_req.app_ctx = req;
13595 + edesc->drv_req.cbk = ablkcipher_done;
13596 + edesc->drv_req.drv_ctx = drv_ctx;
13597 +
13598 + if (!in_contig) {
13599 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
13600 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
13601 + }
13602 +
13603 + if (mapped_dst_nents > 1)
13604 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13605 + dst_sg_idx, 0);
13606 +
13607 + edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
13608 + DMA_TO_DEVICE);
13609 + if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
13610 + dev_err(qidev, "unable to map S/G table\n");
13611 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13612 + iv_dma, ivsize, op_type, 0, 0);
13613 + qi_cache_free(edesc);
13614 + return ERR_PTR(-ENOMEM);
13615 + }
13616 +
13617 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13618 +
13619 + if (!in_contig)
13620 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
13621 + ivsize + req->nbytes, 0);
13622 + else
13623 + dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
13624 + 0);
13625 +
13626 + if (req->src == req->dst) {
13627 + if (!in_contig)
13628 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
13629 + sizeof(*sg_table), req->nbytes, 0);
13630 + else
13631 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
13632 + req->nbytes, 0);
13633 + } else if (mapped_dst_nents > 1) {
13634 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
13635 + sizeof(*sg_table), req->nbytes, 0);
13636 + } else {
13637 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
13638 + req->nbytes, 0);
13639 + }
13640 +
13641 + return edesc;
13642 +}
13643 +
13644 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
13645 + struct skcipher_givcrypt_request *creq)
13646 +{
13647 + struct ablkcipher_request *req = &creq->creq;
13648 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13649 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13650 + struct device *qidev = ctx->qidev;
13651 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13652 + GFP_KERNEL : GFP_ATOMIC;
13653 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
13654 + struct ablkcipher_edesc *edesc;
13655 + dma_addr_t iv_dma;
13656 + bool out_contig;
13657 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13658 + struct qm_sg_entry *sg_table, *fd_sgt;
13659 + int dst_sg_idx, qm_sg_ents;
13660 + struct caam_drv_ctx *drv_ctx;
13661 +
13662 + drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
13663 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13664 + return (struct ablkcipher_edesc *)drv_ctx;
13665 +
13666 + src_nents = sg_nents_for_len(req->src, req->nbytes);
13667 + if (unlikely(src_nents < 0)) {
13668 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13669 + req->nbytes);
13670 + return ERR_PTR(src_nents);
13671 + }
13672 +
13673 + if (unlikely(req->src != req->dst)) {
13674 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
13675 + if (unlikely(dst_nents < 0)) {
13676 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13677 + req->nbytes);
13678 + return ERR_PTR(dst_nents);
13679 + }
13680 +
13681 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13682 + DMA_TO_DEVICE);
13683 + if (unlikely(!mapped_src_nents)) {
13684 + dev_err(qidev, "unable to map source\n");
13685 + return ERR_PTR(-ENOMEM);
13686 + }
13687 +
13688 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13689 + DMA_FROM_DEVICE);
13690 + if (unlikely(!mapped_dst_nents)) {
13691 + dev_err(qidev, "unable to map destination\n");
13692 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13693 + return ERR_PTR(-ENOMEM);
13694 + }
13695 + } else {
13696 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13697 + DMA_BIDIRECTIONAL);
13698 + if (unlikely(!mapped_src_nents)) {
13699 + dev_err(qidev, "unable to map source\n");
13700 + return ERR_PTR(-ENOMEM);
13701 + }
13702 +
13703 + dst_nents = src_nents;
13704 + mapped_dst_nents = src_nents;
13705 + }
13706 +
13707 + iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
13708 + if (dma_mapping_error(qidev, iv_dma)) {
13709 + dev_err(qidev, "unable to map IV\n");
13710 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
13711 + 0, 0, 0, 0);
13712 + return ERR_PTR(-ENOMEM);
13713 + }
13714 +
13715 + qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
13716 + dst_sg_idx = qm_sg_ents;
13717 + if (mapped_dst_nents == 1 &&
13718 + iv_dma + ivsize == sg_dma_address(req->dst)) {
13719 + out_contig = true;
13720 + } else {
13721 + out_contig = false;
13722 + qm_sg_ents += 1 + mapped_dst_nents;
13723 + }
13724 +
13725 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
13726 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13727 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
13728 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13729 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
13730 + return ERR_PTR(-ENOMEM);
13731 + }
13732 +
13733 + /* allocate space for base edesc and link tables */
13734 + edesc = qi_cache_alloc(GFP_DMA | flags);
13735 + if (!edesc) {
13736 + dev_err(qidev, "could not allocate extended descriptor\n");
13737 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13738 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
13739 + return ERR_PTR(-ENOMEM);
13740 + }
13741 +
13742 + edesc->src_nents = src_nents;
13743 + edesc->dst_nents = dst_nents;
13744 + edesc->iv_dma = iv_dma;
13745 + sg_table = &edesc->sgt[0];
13746 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13747 + edesc->drv_req.app_ctx = req;
13748 + edesc->drv_req.cbk = ablkcipher_done;
13749 + edesc->drv_req.drv_ctx = drv_ctx;
13750 +
13751 + if (mapped_src_nents > 1)
13752 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
13753 +
13754 + if (!out_contig) {
13755 + dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
13756 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13757 + dst_sg_idx + 1, 0);
13758 + }
13759 +
13760 + edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
13761 + DMA_TO_DEVICE);
13762 + if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
13763 + dev_err(qidev, "unable to map S/G table\n");
13764 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13765 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
13766 + qi_cache_free(edesc);
13767 + return ERR_PTR(-ENOMEM);
13768 + }
13769 +
13770 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13771 +
13772 + if (mapped_src_nents > 1)
13773 + dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
13774 + 0);
13775 + else
13776 + dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
13777 + req->nbytes, 0);
13778 +
13779 + if (!out_contig)
13780 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
13781 + sizeof(*sg_table), ivsize + req->nbytes,
13782 + 0);
13783 + else
13784 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
13785 + ivsize + req->nbytes, 0);
13786 +
13787 + return edesc;
13788 +}
13789 +
13790 +static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
13791 +{
13792 + struct ablkcipher_edesc *edesc;
13793 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13794 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13795 + int ret;
13796 +
13797 + if (unlikely(caam_congested))
13798 + return -EAGAIN;
13799 +
13800 + /* allocate extended descriptor */
13801 + edesc = ablkcipher_edesc_alloc(req, encrypt);
13802 + if (IS_ERR(edesc))
13803 + return PTR_ERR(edesc);
13804 +
13805 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13806 + if (!ret) {
13807 + ret = -EINPROGRESS;
13808 + } else {
13809 + ablkcipher_unmap(ctx->qidev, edesc, req);
13810 + qi_cache_free(edesc);
13811 + }
13812 +
13813 + return ret;
13814 +}
13815 +
13816 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
13817 +{
13818 + return ablkcipher_crypt(req, true);
13819 +}
13820 +
13821 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
13822 +{
13823 + return ablkcipher_crypt(req, false);
13824 +}
13825 +
13826 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
13827 +{
13828 + struct ablkcipher_request *req = &creq->creq;
13829 + struct ablkcipher_edesc *edesc;
13830 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13831 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13832 + int ret;
13833 +
13834 + if (unlikely(caam_congested))
13835 + return -EAGAIN;
13836 +
13837 + /* allocate extended descriptor */
13838 + edesc = ablkcipher_giv_edesc_alloc(creq);
13839 + if (IS_ERR(edesc))
13840 + return PTR_ERR(edesc);
13841 +
13842 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13843 + if (!ret) {
13844 + ret = -EINPROGRESS;
13845 + } else {
13846 + ablkcipher_unmap(ctx->qidev, edesc, req);
13847 + qi_cache_free(edesc);
13848 + }
13849 +
13850 + return ret;
13851 +}
13852 +
13853 +#define template_ablkcipher template_u.ablkcipher
13854 +struct caam_alg_template {
13855 + char name[CRYPTO_MAX_ALG_NAME];
13856 + char driver_name[CRYPTO_MAX_ALG_NAME];
13857 + unsigned int blocksize;
13858 + u32 type;
13859 + union {
13860 + struct ablkcipher_alg ablkcipher;
13861 + } template_u;
13862 + u32 class1_alg_type;
13863 + u32 class2_alg_type;
13864 +};
13865 +
13866 +static struct caam_alg_template driver_algs[] = {
13867 + /* ablkcipher descriptor */
13868 + {
13869 + .name = "cbc(aes)",
13870 + .driver_name = "cbc-aes-caam-qi",
13871 + .blocksize = AES_BLOCK_SIZE,
13872 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13873 + .template_ablkcipher = {
13874 + .setkey = ablkcipher_setkey,
13875 + .encrypt = ablkcipher_encrypt,
13876 + .decrypt = ablkcipher_decrypt,
13877 + .givencrypt = ablkcipher_givencrypt,
13878 + .geniv = "<built-in>",
13879 + .min_keysize = AES_MIN_KEY_SIZE,
13880 + .max_keysize = AES_MAX_KEY_SIZE,
13881 + .ivsize = AES_BLOCK_SIZE,
13882 + },
13883 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
13884 + },
13885 + {
13886 + .name = "cbc(des3_ede)",
13887 + .driver_name = "cbc-3des-caam-qi",
13888 + .blocksize = DES3_EDE_BLOCK_SIZE,
13889 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13890 + .template_ablkcipher = {
13891 + .setkey = ablkcipher_setkey,
13892 + .encrypt = ablkcipher_encrypt,
13893 + .decrypt = ablkcipher_decrypt,
13894 + .givencrypt = ablkcipher_givencrypt,
13895 + .geniv = "<built-in>",
13896 + .min_keysize = DES3_EDE_KEY_SIZE,
13897 + .max_keysize = DES3_EDE_KEY_SIZE,
13898 + .ivsize = DES3_EDE_BLOCK_SIZE,
13899 + },
13900 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
13901 + },
13902 + {
13903 + .name = "cbc(des)",
13904 + .driver_name = "cbc-des-caam-qi",
13905 + .blocksize = DES_BLOCK_SIZE,
13906 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13907 + .template_ablkcipher = {
13908 + .setkey = ablkcipher_setkey,
13909 + .encrypt = ablkcipher_encrypt,
13910 + .decrypt = ablkcipher_decrypt,
13911 + .givencrypt = ablkcipher_givencrypt,
13912 + .geniv = "<built-in>",
13913 + .min_keysize = DES_KEY_SIZE,
13914 + .max_keysize = DES_KEY_SIZE,
13915 + .ivsize = DES_BLOCK_SIZE,
13916 + },
13917 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
13918 + },
13919 + {
13920 + .name = "ctr(aes)",
13921 + .driver_name = "ctr-aes-caam-qi",
13922 + .blocksize = 1,
13923 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
13924 + .template_ablkcipher = {
13925 + .setkey = ablkcipher_setkey,
13926 + .encrypt = ablkcipher_encrypt,
13927 + .decrypt = ablkcipher_decrypt,
13928 + .geniv = "chainiv",
13929 + .min_keysize = AES_MIN_KEY_SIZE,
13930 + .max_keysize = AES_MAX_KEY_SIZE,
13931 + .ivsize = AES_BLOCK_SIZE,
13932 + },
13933 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
13934 + },
13935 + {
13936 + .name = "rfc3686(ctr(aes))",
13937 + .driver_name = "rfc3686-ctr-aes-caam-qi",
13938 + .blocksize = 1,
13939 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13940 + .template_ablkcipher = {
13941 + .setkey = ablkcipher_setkey,
13942 + .encrypt = ablkcipher_encrypt,
13943 + .decrypt = ablkcipher_decrypt,
13944 + .givencrypt = ablkcipher_givencrypt,
13945 + .geniv = "<built-in>",
13946 + .min_keysize = AES_MIN_KEY_SIZE +
13947 + CTR_RFC3686_NONCE_SIZE,
13948 + .max_keysize = AES_MAX_KEY_SIZE +
13949 + CTR_RFC3686_NONCE_SIZE,
13950 + .ivsize = CTR_RFC3686_IV_SIZE,
13951 + },
13952 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
13953 + },
13954 + {
13955 + .name = "xts(aes)",
13956 + .driver_name = "xts-aes-caam-qi",
13957 + .blocksize = AES_BLOCK_SIZE,
13958 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
13959 + .template_ablkcipher = {
13960 + .setkey = xts_ablkcipher_setkey,
13961 + .encrypt = ablkcipher_encrypt,
13962 + .decrypt = ablkcipher_decrypt,
13963 + .geniv = "eseqiv",
13964 + .min_keysize = 2 * AES_MIN_KEY_SIZE,
13965 + .max_keysize = 2 * AES_MAX_KEY_SIZE,
13966 + .ivsize = AES_BLOCK_SIZE,
13967 + },
13968 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
13969 + },
13970 +};
13971 +
13972 +static struct caam_aead_alg driver_aeads[] = {
13973 + /* single-pass ipsec_esp descriptor */
13974 + {
13975 + .aead = {
13976 + .base = {
13977 + .cra_name = "authenc(hmac(md5),cbc(aes))",
13978 + .cra_driver_name = "authenc-hmac-md5-"
13979 + "cbc-aes-caam-qi",
13980 + .cra_blocksize = AES_BLOCK_SIZE,
13981 + },
13982 + .setkey = aead_setkey,
13983 + .setauthsize = aead_setauthsize,
13984 + .encrypt = aead_encrypt,
13985 + .decrypt = aead_decrypt,
13986 + .ivsize = AES_BLOCK_SIZE,
13987 + .maxauthsize = MD5_DIGEST_SIZE,
13988 + },
13989 + .caam = {
13990 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
13991 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
13992 + OP_ALG_AAI_HMAC_PRECOMP,
13993 + }
13994 + },
13995 + {
13996 + .aead = {
13997 + .base = {
13998 + .cra_name = "echainiv(authenc(hmac(md5),"
13999 + "cbc(aes)))",
14000 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
14001 + "cbc-aes-caam-qi",
14002 + .cra_blocksize = AES_BLOCK_SIZE,
14003 + },
14004 + .setkey = aead_setkey,
14005 + .setauthsize = aead_setauthsize,
14006 + .encrypt = aead_encrypt,
14007 + .decrypt = aead_decrypt,
14008 + .ivsize = AES_BLOCK_SIZE,
14009 + .maxauthsize = MD5_DIGEST_SIZE,
14010 + },
14011 + .caam = {
14012 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14013 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14014 + OP_ALG_AAI_HMAC_PRECOMP,
14015 + .geniv = true,
14016 + }
14017 + },
14018 + {
14019 + .aead = {
14020 + .base = {
14021 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
14022 + .cra_driver_name = "authenc-hmac-sha1-"
14023 + "cbc-aes-caam-qi",
14024 + .cra_blocksize = AES_BLOCK_SIZE,
14025 + },
14026 + .setkey = aead_setkey,
14027 + .setauthsize = aead_setauthsize,
14028 + .encrypt = aead_encrypt,
14029 + .decrypt = aead_decrypt,
14030 + .ivsize = AES_BLOCK_SIZE,
14031 + .maxauthsize = SHA1_DIGEST_SIZE,
14032 + },
14033 + .caam = {
14034 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14035 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14036 + OP_ALG_AAI_HMAC_PRECOMP,
14037 + }
14038 + },
14039 + {
14040 + .aead = {
14041 + .base = {
14042 + .cra_name = "echainiv(authenc(hmac(sha1),"
14043 + "cbc(aes)))",
14044 + .cra_driver_name = "echainiv-authenc-"
14045 + "hmac-sha1-cbc-aes-caam-qi",
14046 + .cra_blocksize = AES_BLOCK_SIZE,
14047 + },
14048 + .setkey = aead_setkey,
14049 + .setauthsize = aead_setauthsize,
14050 + .encrypt = aead_encrypt,
14051 + .decrypt = aead_decrypt,
14052 + .ivsize = AES_BLOCK_SIZE,
14053 + .maxauthsize = SHA1_DIGEST_SIZE,
14054 + },
14055 + .caam = {
14056 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14057 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14058 + OP_ALG_AAI_HMAC_PRECOMP,
14059 + .geniv = true,
14060 + },
14061 + },
14062 + {
14063 + .aead = {
14064 + .base = {
14065 + .cra_name = "authenc(hmac(sha224),cbc(aes))",
14066 + .cra_driver_name = "authenc-hmac-sha224-"
14067 + "cbc-aes-caam-qi",
14068 + .cra_blocksize = AES_BLOCK_SIZE,
14069 + },
14070 + .setkey = aead_setkey,
14071 + .setauthsize = aead_setauthsize,
14072 + .encrypt = aead_encrypt,
14073 + .decrypt = aead_decrypt,
14074 + .ivsize = AES_BLOCK_SIZE,
14075 + .maxauthsize = SHA224_DIGEST_SIZE,
14076 + },
14077 + .caam = {
14078 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14079 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14080 + OP_ALG_AAI_HMAC_PRECOMP,
14081 + }
14082 + },
14083 + {
14084 + .aead = {
14085 + .base = {
14086 + .cra_name = "echainiv(authenc(hmac(sha224),"
14087 + "cbc(aes)))",
14088 + .cra_driver_name = "echainiv-authenc-"
14089 + "hmac-sha224-cbc-aes-caam-qi",
14090 + .cra_blocksize = AES_BLOCK_SIZE,
14091 + },
14092 + .setkey = aead_setkey,
14093 + .setauthsize = aead_setauthsize,
14094 + .encrypt = aead_encrypt,
14095 + .decrypt = aead_decrypt,
14096 + .ivsize = AES_BLOCK_SIZE,
14097 + .maxauthsize = SHA224_DIGEST_SIZE,
14098 + },
14099 + .caam = {
14100 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14101 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14102 + OP_ALG_AAI_HMAC_PRECOMP,
14103 + .geniv = true,
14104 + }
14105 + },
14106 + {
14107 + .aead = {
14108 + .base = {
14109 + .cra_name = "authenc(hmac(sha256),cbc(aes))",
14110 + .cra_driver_name = "authenc-hmac-sha256-"
14111 + "cbc-aes-caam-qi",
14112 + .cra_blocksize = AES_BLOCK_SIZE,
14113 + },
14114 + .setkey = aead_setkey,
14115 + .setauthsize = aead_setauthsize,
14116 + .encrypt = aead_encrypt,
14117 + .decrypt = aead_decrypt,
14118 + .ivsize = AES_BLOCK_SIZE,
14119 + .maxauthsize = SHA256_DIGEST_SIZE,
14120 + },
14121 + .caam = {
14122 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14123 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14124 + OP_ALG_AAI_HMAC_PRECOMP,
14125 + }
14126 + },
14127 + {
14128 + .aead = {
14129 + .base = {
14130 + .cra_name = "echainiv(authenc(hmac(sha256),"
14131 + "cbc(aes)))",
14132 + .cra_driver_name = "echainiv-authenc-"
14133 + "hmac-sha256-cbc-aes-"
14134 + "caam-qi",
14135 + .cra_blocksize = AES_BLOCK_SIZE,
14136 + },
14137 + .setkey = aead_setkey,
14138 + .setauthsize = aead_setauthsize,
14139 + .encrypt = aead_encrypt,
14140 + .decrypt = aead_decrypt,
14141 + .ivsize = AES_BLOCK_SIZE,
14142 + .maxauthsize = SHA256_DIGEST_SIZE,
14143 + },
14144 + .caam = {
14145 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14146 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14147 + OP_ALG_AAI_HMAC_PRECOMP,
14148 + .geniv = true,
14149 + }
14150 + },
14151 + {
14152 + .aead = {
14153 + .base = {
14154 + .cra_name = "authenc(hmac(sha384),cbc(aes))",
14155 + .cra_driver_name = "authenc-hmac-sha384-"
14156 + "cbc-aes-caam-qi",
14157 + .cra_blocksize = AES_BLOCK_SIZE,
14158 + },
14159 + .setkey = aead_setkey,
14160 + .setauthsize = aead_setauthsize,
14161 + .encrypt = aead_encrypt,
14162 + .decrypt = aead_decrypt,
14163 + .ivsize = AES_BLOCK_SIZE,
14164 + .maxauthsize = SHA384_DIGEST_SIZE,
14165 + },
14166 + .caam = {
14167 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14168 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14169 + OP_ALG_AAI_HMAC_PRECOMP,
14170 + }
14171 + },
14172 + {
14173 + .aead = {
14174 + .base = {
14175 + .cra_name = "echainiv(authenc(hmac(sha384),"
14176 + "cbc(aes)))",
14177 + .cra_driver_name = "echainiv-authenc-"
14178 + "hmac-sha384-cbc-aes-"
14179 + "caam-qi",
14180 + .cra_blocksize = AES_BLOCK_SIZE,
14181 + },
14182 + .setkey = aead_setkey,
14183 + .setauthsize = aead_setauthsize,
14184 + .encrypt = aead_encrypt,
14185 + .decrypt = aead_decrypt,
14186 + .ivsize = AES_BLOCK_SIZE,
14187 + .maxauthsize = SHA384_DIGEST_SIZE,
14188 + },
14189 + .caam = {
14190 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14191 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14192 + OP_ALG_AAI_HMAC_PRECOMP,
14193 + .geniv = true,
14194 + }
14195 + },
14196 + {
14197 + .aead = {
14198 + .base = {
14199 + .cra_name = "authenc(hmac(sha512),cbc(aes))",
14200 + .cra_driver_name = "authenc-hmac-sha512-"
14201 + "cbc-aes-caam-qi",
14202 + .cra_blocksize = AES_BLOCK_SIZE,
14203 + },
14204 + .setkey = aead_setkey,
14205 + .setauthsize = aead_setauthsize,
14206 + .encrypt = aead_encrypt,
14207 + .decrypt = aead_decrypt,
14208 + .ivsize = AES_BLOCK_SIZE,
14209 + .maxauthsize = SHA512_DIGEST_SIZE,
14210 + },
14211 + .caam = {
14212 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14213 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14214 + OP_ALG_AAI_HMAC_PRECOMP,
14215 + }
14216 + },
14217 + {
14218 + .aead = {
14219 + .base = {
14220 + .cra_name = "echainiv(authenc(hmac(sha512),"
14221 + "cbc(aes)))",
14222 + .cra_driver_name = "echainiv-authenc-"
14223 + "hmac-sha512-cbc-aes-"
14224 + "caam-qi",
14225 + .cra_blocksize = AES_BLOCK_SIZE,
14226 + },
14227 + .setkey = aead_setkey,
14228 + .setauthsize = aead_setauthsize,
14229 + .encrypt = aead_encrypt,
14230 + .decrypt = aead_decrypt,
14231 + .ivsize = AES_BLOCK_SIZE,
14232 + .maxauthsize = SHA512_DIGEST_SIZE,
14233 + },
14234 + .caam = {
14235 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14236 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14237 + OP_ALG_AAI_HMAC_PRECOMP,
14238 + .geniv = true,
14239 + }
14240 + },
14241 + {
14242 + .aead = {
14243 + .base = {
14244 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
14245 + .cra_driver_name = "authenc-hmac-md5-"
14246 + "cbc-des3_ede-caam-qi",
14247 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14248 + },
14249 + .setkey = aead_setkey,
14250 + .setauthsize = aead_setauthsize,
14251 + .encrypt = aead_encrypt,
14252 + .decrypt = aead_decrypt,
14253 + .ivsize = DES3_EDE_BLOCK_SIZE,
14254 + .maxauthsize = MD5_DIGEST_SIZE,
14255 + },
14256 + .caam = {
14257 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14258 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14259 + OP_ALG_AAI_HMAC_PRECOMP,
14260 + }
14261 + },
14262 + {
14263 + .aead = {
14264 + .base = {
14265 + .cra_name = "echainiv(authenc(hmac(md5),"
14266 + "cbc(des3_ede)))",
14267 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
14268 + "cbc-des3_ede-caam-qi",
14269 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14270 + },
14271 + .setkey = aead_setkey,
14272 + .setauthsize = aead_setauthsize,
14273 + .encrypt = aead_encrypt,
14274 + .decrypt = aead_decrypt,
14275 + .ivsize = DES3_EDE_BLOCK_SIZE,
14276 + .maxauthsize = MD5_DIGEST_SIZE,
14277 + },
14278 + .caam = {
14279 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14280 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14281 + OP_ALG_AAI_HMAC_PRECOMP,
14282 + .geniv = true,
14283 + }
14284 + },
14285 + {
14286 + .aead = {
14287 + .base = {
14288 + .cra_name = "authenc(hmac(sha1),"
14289 + "cbc(des3_ede))",
14290 + .cra_driver_name = "authenc-hmac-sha1-"
14291 + "cbc-des3_ede-caam-qi",
14292 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14293 + },
14294 + .setkey = aead_setkey,
14295 + .setauthsize = aead_setauthsize,
14296 + .encrypt = aead_encrypt,
14297 + .decrypt = aead_decrypt,
14298 + .ivsize = DES3_EDE_BLOCK_SIZE,
14299 + .maxauthsize = SHA1_DIGEST_SIZE,
14300 + },
14301 + .caam = {
14302 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14303 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14304 + OP_ALG_AAI_HMAC_PRECOMP,
14305 + },
14306 + },
14307 + {
14308 + .aead = {
14309 + .base = {
14310 + .cra_name = "echainiv(authenc(hmac(sha1),"
14311 + "cbc(des3_ede)))",
14312 + .cra_driver_name = "echainiv-authenc-"
14313 + "hmac-sha1-"
14314 + "cbc-des3_ede-caam-qi",
14315 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14316 + },
14317 + .setkey = aead_setkey,
14318 + .setauthsize = aead_setauthsize,
14319 + .encrypt = aead_encrypt,
14320 + .decrypt = aead_decrypt,
14321 + .ivsize = DES3_EDE_BLOCK_SIZE,
14322 + .maxauthsize = SHA1_DIGEST_SIZE,
14323 + },
14324 + .caam = {
14325 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14326 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14327 + OP_ALG_AAI_HMAC_PRECOMP,
14328 + .geniv = true,
14329 + }
14330 + },
14331 + {
14332 + .aead = {
14333 + .base = {
14334 + .cra_name = "authenc(hmac(sha224),"
14335 + "cbc(des3_ede))",
14336 + .cra_driver_name = "authenc-hmac-sha224-"
14337 + "cbc-des3_ede-caam-qi",
14338 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14339 + },
14340 + .setkey = aead_setkey,
14341 + .setauthsize = aead_setauthsize,
14342 + .encrypt = aead_encrypt,
14343 + .decrypt = aead_decrypt,
14344 + .ivsize = DES3_EDE_BLOCK_SIZE,
14345 + .maxauthsize = SHA224_DIGEST_SIZE,
14346 + },
14347 + .caam = {
14348 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14349 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14350 + OP_ALG_AAI_HMAC_PRECOMP,
14351 + },
14352 + },
14353 + {
14354 + .aead = {
14355 + .base = {
14356 + .cra_name = "echainiv(authenc(hmac(sha224),"
14357 + "cbc(des3_ede)))",
14358 + .cra_driver_name = "echainiv-authenc-"
14359 + "hmac-sha224-"
14360 + "cbc-des3_ede-caam-qi",
14361 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14362 + },
14363 + .setkey = aead_setkey,
14364 + .setauthsize = aead_setauthsize,
14365 + .encrypt = aead_encrypt,
14366 + .decrypt = aead_decrypt,
14367 + .ivsize = DES3_EDE_BLOCK_SIZE,
14368 + .maxauthsize = SHA224_DIGEST_SIZE,
14369 + },
14370 + .caam = {
14371 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14372 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14373 + OP_ALG_AAI_HMAC_PRECOMP,
14374 + .geniv = true,
14375 + }
14376 + },
14377 + {
14378 + .aead = {
14379 + .base = {
14380 + .cra_name = "authenc(hmac(sha256),"
14381 + "cbc(des3_ede))",
14382 + .cra_driver_name = "authenc-hmac-sha256-"
14383 + "cbc-des3_ede-caam-qi",
14384 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14385 + },
14386 + .setkey = aead_setkey,
14387 + .setauthsize = aead_setauthsize,
14388 + .encrypt = aead_encrypt,
14389 + .decrypt = aead_decrypt,
14390 + .ivsize = DES3_EDE_BLOCK_SIZE,
14391 + .maxauthsize = SHA256_DIGEST_SIZE,
14392 + },
14393 + .caam = {
14394 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14395 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14396 + OP_ALG_AAI_HMAC_PRECOMP,
14397 + },
14398 + },
14399 + {
14400 + .aead = {
14401 + .base = {
14402 + .cra_name = "echainiv(authenc(hmac(sha256),"
14403 + "cbc(des3_ede)))",
14404 + .cra_driver_name = "echainiv-authenc-"
14405 + "hmac-sha256-"
14406 + "cbc-des3_ede-caam-qi",
14407 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14408 + },
14409 + .setkey = aead_setkey,
14410 + .setauthsize = aead_setauthsize,
14411 + .encrypt = aead_encrypt,
14412 + .decrypt = aead_decrypt,
14413 + .ivsize = DES3_EDE_BLOCK_SIZE,
14414 + .maxauthsize = SHA256_DIGEST_SIZE,
14415 + },
14416 + .caam = {
14417 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14418 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14419 + OP_ALG_AAI_HMAC_PRECOMP,
14420 + .geniv = true,
14421 + }
14422 + },
14423 + {
14424 + .aead = {
14425 + .base = {
14426 + .cra_name = "authenc(hmac(sha384),"
14427 + "cbc(des3_ede))",
14428 + .cra_driver_name = "authenc-hmac-sha384-"
14429 + "cbc-des3_ede-caam-qi",
14430 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14431 + },
14432 + .setkey = aead_setkey,
14433 + .setauthsize = aead_setauthsize,
14434 + .encrypt = aead_encrypt,
14435 + .decrypt = aead_decrypt,
14436 + .ivsize = DES3_EDE_BLOCK_SIZE,
14437 + .maxauthsize = SHA384_DIGEST_SIZE,
14438 + },
14439 + .caam = {
14440 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14441 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14442 + OP_ALG_AAI_HMAC_PRECOMP,
14443 + },
14444 + },
14445 + {
14446 + .aead = {
14447 + .base = {
14448 + .cra_name = "echainiv(authenc(hmac(sha384),"
14449 + "cbc(des3_ede)))",
14450 + .cra_driver_name = "echainiv-authenc-"
14451 + "hmac-sha384-"
14452 + "cbc-des3_ede-caam-qi",
14453 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14454 + },
14455 + .setkey = aead_setkey,
14456 + .setauthsize = aead_setauthsize,
14457 + .encrypt = aead_encrypt,
14458 + .decrypt = aead_decrypt,
14459 + .ivsize = DES3_EDE_BLOCK_SIZE,
14460 + .maxauthsize = SHA384_DIGEST_SIZE,
14461 + },
14462 + .caam = {
14463 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14464 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14465 + OP_ALG_AAI_HMAC_PRECOMP,
14466 + .geniv = true,
14467 + }
14468 + },
14469 + {
14470 + .aead = {
14471 + .base = {
14472 + .cra_name = "authenc(hmac(sha512),"
14473 + "cbc(des3_ede))",
14474 + .cra_driver_name = "authenc-hmac-sha512-"
14475 + "cbc-des3_ede-caam-qi",
14476 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14477 + },
14478 + .setkey = aead_setkey,
14479 + .setauthsize = aead_setauthsize,
14480 + .encrypt = aead_encrypt,
14481 + .decrypt = aead_decrypt,
14482 + .ivsize = DES3_EDE_BLOCK_SIZE,
14483 + .maxauthsize = SHA512_DIGEST_SIZE,
14484 + },
14485 + .caam = {
14486 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14487 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14488 + OP_ALG_AAI_HMAC_PRECOMP,
14489 + },
14490 + },
14491 + {
14492 + .aead = {
14493 + .base = {
14494 + .cra_name = "echainiv(authenc(hmac(sha512),"
14495 + "cbc(des3_ede)))",
14496 + .cra_driver_name = "echainiv-authenc-"
14497 + "hmac-sha512-"
14498 + "cbc-des3_ede-caam-qi",
14499 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14500 + },
14501 + .setkey = aead_setkey,
14502 + .setauthsize = aead_setauthsize,
14503 + .encrypt = aead_encrypt,
14504 + .decrypt = aead_decrypt,
14505 + .ivsize = DES3_EDE_BLOCK_SIZE,
14506 + .maxauthsize = SHA512_DIGEST_SIZE,
14507 + },
14508 + .caam = {
14509 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14510 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14511 + OP_ALG_AAI_HMAC_PRECOMP,
14512 + .geniv = true,
14513 + }
14514 + },
14515 + {
14516 + .aead = {
14517 + .base = {
14518 + .cra_name = "authenc(hmac(md5),cbc(des))",
14519 + .cra_driver_name = "authenc-hmac-md5-"
14520 + "cbc-des-caam-qi",
14521 + .cra_blocksize = DES_BLOCK_SIZE,
14522 + },
14523 + .setkey = aead_setkey,
14524 + .setauthsize = aead_setauthsize,
14525 + .encrypt = aead_encrypt,
14526 + .decrypt = aead_decrypt,
14527 + .ivsize = DES_BLOCK_SIZE,
14528 + .maxauthsize = MD5_DIGEST_SIZE,
14529 + },
14530 + .caam = {
14531 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14532 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14533 + OP_ALG_AAI_HMAC_PRECOMP,
14534 + },
14535 + },
14536 + {
14537 + .aead = {
14538 + .base = {
14539 + .cra_name = "echainiv(authenc(hmac(md5),"
14540 + "cbc(des)))",
14541 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
14542 + "cbc-des-caam-qi",
14543 + .cra_blocksize = DES_BLOCK_SIZE,
14544 + },
14545 + .setkey = aead_setkey,
14546 + .setauthsize = aead_setauthsize,
14547 + .encrypt = aead_encrypt,
14548 + .decrypt = aead_decrypt,
14549 + .ivsize = DES_BLOCK_SIZE,
14550 + .maxauthsize = MD5_DIGEST_SIZE,
14551 + },
14552 + .caam = {
14553 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14554 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14555 + OP_ALG_AAI_HMAC_PRECOMP,
14556 + .geniv = true,
14557 + }
14558 + },
14559 + {
14560 + .aead = {
14561 + .base = {
14562 + .cra_name = "authenc(hmac(sha1),cbc(des))",
14563 + .cra_driver_name = "authenc-hmac-sha1-"
14564 + "cbc-des-caam-qi",
14565 + .cra_blocksize = DES_BLOCK_SIZE,
14566 + },
14567 + .setkey = aead_setkey,
14568 + .setauthsize = aead_setauthsize,
14569 + .encrypt = aead_encrypt,
14570 + .decrypt = aead_decrypt,
14571 + .ivsize = DES_BLOCK_SIZE,
14572 + .maxauthsize = SHA1_DIGEST_SIZE,
14573 + },
14574 + .caam = {
14575 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14576 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14577 + OP_ALG_AAI_HMAC_PRECOMP,
14578 + },
14579 + },
14580 + {
14581 + .aead = {
14582 + .base = {
14583 + .cra_name = "echainiv(authenc(hmac(sha1),"
14584 + "cbc(des)))",
14585 + .cra_driver_name = "echainiv-authenc-"
14586 + "hmac-sha1-cbc-des-caam-qi",
14587 + .cra_blocksize = DES_BLOCK_SIZE,
14588 + },
14589 + .setkey = aead_setkey,
14590 + .setauthsize = aead_setauthsize,
14591 + .encrypt = aead_encrypt,
14592 + .decrypt = aead_decrypt,
14593 + .ivsize = DES_BLOCK_SIZE,
14594 + .maxauthsize = SHA1_DIGEST_SIZE,
14595 + },
14596 + .caam = {
14597 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14598 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14599 + OP_ALG_AAI_HMAC_PRECOMP,
14600 + .geniv = true,
14601 + }
14602 + },
14603 + {
14604 + .aead = {
14605 + .base = {
14606 + .cra_name = "authenc(hmac(sha224),cbc(des))",
14607 + .cra_driver_name = "authenc-hmac-sha224-"
14608 + "cbc-des-caam-qi",
14609 + .cra_blocksize = DES_BLOCK_SIZE,
14610 + },
14611 + .setkey = aead_setkey,
14612 + .setauthsize = aead_setauthsize,
14613 + .encrypt = aead_encrypt,
14614 + .decrypt = aead_decrypt,
14615 + .ivsize = DES_BLOCK_SIZE,
14616 + .maxauthsize = SHA224_DIGEST_SIZE,
14617 + },
14618 + .caam = {
14619 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14620 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14621 + OP_ALG_AAI_HMAC_PRECOMP,
14622 + },
14623 + },
14624 + {
14625 + .aead = {
14626 + .base = {
14627 + .cra_name = "echainiv(authenc(hmac(sha224),"
14628 + "cbc(des)))",
14629 + .cra_driver_name = "echainiv-authenc-"
14630 + "hmac-sha224-cbc-des-"
14631 + "caam-qi",
14632 + .cra_blocksize = DES_BLOCK_SIZE,
14633 + },
14634 + .setkey = aead_setkey,
14635 + .setauthsize = aead_setauthsize,
14636 + .encrypt = aead_encrypt,
14637 + .decrypt = aead_decrypt,
14638 + .ivsize = DES_BLOCK_SIZE,
14639 + .maxauthsize = SHA224_DIGEST_SIZE,
14640 + },
14641 + .caam = {
14642 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14643 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14644 + OP_ALG_AAI_HMAC_PRECOMP,
14645 + .geniv = true,
14646 + }
14647 + },
14648 + {
14649 + .aead = {
14650 + .base = {
14651 + .cra_name = "authenc(hmac(sha256),cbc(des))",
14652 + .cra_driver_name = "authenc-hmac-sha256-"
14653 + "cbc-des-caam-qi",
14654 + .cra_blocksize = DES_BLOCK_SIZE,
14655 + },
14656 + .setkey = aead_setkey,
14657 + .setauthsize = aead_setauthsize,
14658 + .encrypt = aead_encrypt,
14659 + .decrypt = aead_decrypt,
14660 + .ivsize = DES_BLOCK_SIZE,
14661 + .maxauthsize = SHA256_DIGEST_SIZE,
14662 + },
14663 + .caam = {
14664 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14665 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14666 + OP_ALG_AAI_HMAC_PRECOMP,
14667 + },
14668 + },
14669 + {
14670 + .aead = {
14671 + .base = {
14672 + .cra_name = "echainiv(authenc(hmac(sha256),"
14673 + "cbc(des)))",
14674 + .cra_driver_name = "echainiv-authenc-"
14675 + "hmac-sha256-cbc-des-"
14676 + "caam-qi",
14677 + .cra_blocksize = DES_BLOCK_SIZE,
14678 + },
14679 + .setkey = aead_setkey,
14680 + .setauthsize = aead_setauthsize,
14681 + .encrypt = aead_encrypt,
14682 + .decrypt = aead_decrypt,
14683 + .ivsize = DES_BLOCK_SIZE,
14684 + .maxauthsize = SHA256_DIGEST_SIZE,
14685 + },
14686 + .caam = {
14687 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14688 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14689 + OP_ALG_AAI_HMAC_PRECOMP,
14690 + .geniv = true,
14691 + },
14692 + },
14693 + {
14694 + .aead = {
14695 + .base = {
14696 + .cra_name = "authenc(hmac(sha384),cbc(des))",
14697 + .cra_driver_name = "authenc-hmac-sha384-"
14698 + "cbc-des-caam-qi",
14699 + .cra_blocksize = DES_BLOCK_SIZE,
14700 + },
14701 + .setkey = aead_setkey,
14702 + .setauthsize = aead_setauthsize,
14703 + .encrypt = aead_encrypt,
14704 + .decrypt = aead_decrypt,
14705 + .ivsize = DES_BLOCK_SIZE,
14706 + .maxauthsize = SHA384_DIGEST_SIZE,
14707 + },
14708 + .caam = {
14709 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14710 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14711 + OP_ALG_AAI_HMAC_PRECOMP,
14712 + },
14713 + },
14714 + {
14715 + .aead = {
14716 + .base = {
14717 + .cra_name = "echainiv(authenc(hmac(sha384),"
14718 + "cbc(des)))",
14719 + .cra_driver_name = "echainiv-authenc-"
14720 + "hmac-sha384-cbc-des-"
14721 + "caam-qi",
14722 + .cra_blocksize = DES_BLOCK_SIZE,
14723 + },
14724 + .setkey = aead_setkey,
14725 + .setauthsize = aead_setauthsize,
14726 + .encrypt = aead_encrypt,
14727 + .decrypt = aead_decrypt,
14728 + .ivsize = DES_BLOCK_SIZE,
14729 + .maxauthsize = SHA384_DIGEST_SIZE,
14730 + },
14731 + .caam = {
14732 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14733 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14734 + OP_ALG_AAI_HMAC_PRECOMP,
14735 + .geniv = true,
14736 + }
14737 + },
14738 + {
14739 + .aead = {
14740 + .base = {
14741 + .cra_name = "authenc(hmac(sha512),cbc(des))",
14742 + .cra_driver_name = "authenc-hmac-sha512-"
14743 + "cbc-des-caam-qi",
14744 + .cra_blocksize = DES_BLOCK_SIZE,
14745 + },
14746 + .setkey = aead_setkey,
14747 + .setauthsize = aead_setauthsize,
14748 + .encrypt = aead_encrypt,
14749 + .decrypt = aead_decrypt,
14750 + .ivsize = DES_BLOCK_SIZE,
14751 + .maxauthsize = SHA512_DIGEST_SIZE,
14752 + },
14753 + .caam = {
14754 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14755 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14756 + OP_ALG_AAI_HMAC_PRECOMP,
14757 + }
14758 + },
14759 + {
14760 + .aead = {
14761 + .base = {
14762 + .cra_name = "echainiv(authenc(hmac(sha512),"
14763 + "cbc(des)))",
14764 + .cra_driver_name = "echainiv-authenc-"
14765 + "hmac-sha512-cbc-des-"
14766 + "caam-qi",
14767 + .cra_blocksize = DES_BLOCK_SIZE,
14768 + },
14769 + .setkey = aead_setkey,
14770 + .setauthsize = aead_setauthsize,
14771 + .encrypt = aead_encrypt,
14772 + .decrypt = aead_decrypt,
14773 + .ivsize = DES_BLOCK_SIZE,
14774 + .maxauthsize = SHA512_DIGEST_SIZE,
14775 + },
14776 + .caam = {
14777 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14778 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14779 + OP_ALG_AAI_HMAC_PRECOMP,
14780 + .geniv = true,
14781 + }
14782 + },
14783 + {
14784 + .aead = {
14785 + .base = {
14786 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
14787 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
14788 + .cra_blocksize = AES_BLOCK_SIZE,
14789 + },
14790 + .setkey = tls_setkey,
14791 + .setauthsize = tls_setauthsize,
14792 + .encrypt = tls_encrypt,
14793 + .decrypt = tls_decrypt,
14794 + .ivsize = AES_BLOCK_SIZE,
14795 + .maxauthsize = SHA1_DIGEST_SIZE,
14796 + },
14797 + .caam = {
14798 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14799 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14800 + OP_ALG_AAI_HMAC_PRECOMP,
14801 + }
14802 + }
14803 +};
14804 +
14805 +struct caam_crypto_alg {
14806 + struct list_head entry;
14807 + struct crypto_alg crypto_alg;
14808 + struct caam_alg_entry caam;
14809 +};
14810 +
14811 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
14812 +{
14813 + struct caam_drv_private *priv;
14814 + /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
14815 + static const u8 digest_size[] = {
14816 + MD5_DIGEST_SIZE,
14817 + SHA1_DIGEST_SIZE,
14818 + SHA224_DIGEST_SIZE,
14819 + SHA256_DIGEST_SIZE,
14820 + SHA384_DIGEST_SIZE,
14821 + SHA512_DIGEST_SIZE
14822 + };
14823 + u8 op_id;
14824 +
14825 + /*
14826 + * distribute tfms across job rings to ensure in-order
14827 + * crypto request processing per tfm
14828 + */
14829 + ctx->jrdev = caam_jr_alloc();
14830 + if (IS_ERR(ctx->jrdev)) {
14831 + pr_err("Job Ring Device allocation for transform failed\n");
14832 + return PTR_ERR(ctx->jrdev);
14833 + }
14834 +
14835 + ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
14836 + DMA_TO_DEVICE);
14837 + if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
14838 + dev_err(ctx->jrdev, "unable to map key\n");
14839 + caam_jr_free(ctx->jrdev);
14840 + return -ENOMEM;
14841 + }
14842 +
14843 + /* copy descriptor header template value */
14844 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
14845 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
14846 +
14847 + if (ctx->adata.algtype) {
14848 + op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
14849 + >> OP_ALG_ALGSEL_SHIFT;
14850 + if (op_id < ARRAY_SIZE(digest_size)) {
14851 + ctx->authsize = digest_size[op_id];
14852 + } else {
14853 + dev_err(ctx->jrdev,
14854 + "incorrect op_id %d; must be less than %zu\n",
14855 + op_id, ARRAY_SIZE(digest_size));
14856 + caam_jr_free(ctx->jrdev);
14857 + return -EINVAL;
14858 + }
14859 + } else {
14860 + ctx->authsize = 0;
14861 + }
14862 +
14863 + priv = dev_get_drvdata(ctx->jrdev->parent);
14864 + ctx->qidev = priv->qidev;
14865 +
14866 + spin_lock_init(&ctx->lock);
14867 + ctx->drv_ctx[ENCRYPT] = NULL;
14868 + ctx->drv_ctx[DECRYPT] = NULL;
14869 + ctx->drv_ctx[GIVENCRYPT] = NULL;
14870 +
14871 + return 0;
14872 +}
14873 +
14874 +static int caam_cra_init(struct crypto_tfm *tfm)
14875 +{
14876 + struct crypto_alg *alg = tfm->__crt_alg;
14877 + struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
14878 + crypto_alg);
14879 + struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
14880 +
14881 + return caam_init_common(ctx, &caam_alg->caam);
14882 +}
14883 +
14884 +static int caam_aead_init(struct crypto_aead *tfm)
14885 +{
14886 + struct aead_alg *alg = crypto_aead_alg(tfm);
14887 + struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
14888 + aead);
14889 + struct caam_ctx *ctx = crypto_aead_ctx(tfm);
14890 +
14891 + return caam_init_common(ctx, &caam_alg->caam);
14892 +}
14893 +
14894 +static void caam_exit_common(struct caam_ctx *ctx)
14895 +{
14896 + caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
14897 + caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
14898 + caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
14899 +
14900 + dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
14901 + DMA_TO_DEVICE);
14902 +
14903 + caam_jr_free(ctx->jrdev);
14904 +}
14905 +
14906 +static void caam_cra_exit(struct crypto_tfm *tfm)
14907 +{
14908 + caam_exit_common(crypto_tfm_ctx(tfm));
14909 +}
14910 +
14911 +static void caam_aead_exit(struct crypto_aead *tfm)
14912 +{
14913 + caam_exit_common(crypto_aead_ctx(tfm));
14914 +}
14915 +
14916 +static struct list_head alg_list;
14917 +static void __exit caam_qi_algapi_exit(void)
14918 +{
14919 + struct caam_crypto_alg *t_alg, *n;
14920 + int i;
14921 +
14922 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
14923 + struct caam_aead_alg *t_alg = driver_aeads + i;
14924 +
14925 + if (t_alg->registered)
14926 + crypto_unregister_aead(&t_alg->aead);
14927 + }
14928 +
14929 + if (!alg_list.next)
14930 + return;
14931 +
14932 + list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
14933 + crypto_unregister_alg(&t_alg->crypto_alg);
14934 + list_del(&t_alg->entry);
14935 + kfree(t_alg);
14936 + }
14937 +}
14938 +
14939 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
14940 + *template)
14941 +{
14942 + struct caam_crypto_alg *t_alg;
14943 + struct crypto_alg *alg;
14944 +
14945 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
14946 + if (!t_alg)
14947 + return ERR_PTR(-ENOMEM);
14948 +
14949 + alg = &t_alg->crypto_alg;
14950 +
14951 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
14952 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
14953 + template->driver_name);
14954 + alg->cra_module = THIS_MODULE;
14955 + alg->cra_init = caam_cra_init;
14956 + alg->cra_exit = caam_cra_exit;
14957 + alg->cra_priority = CAAM_CRA_PRIORITY;
14958 + alg->cra_blocksize = template->blocksize;
14959 + alg->cra_alignmask = 0;
14960 + alg->cra_ctxsize = sizeof(struct caam_ctx);
14961 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
14962 + template->type;
14963 + switch (template->type) {
14964 + case CRYPTO_ALG_TYPE_GIVCIPHER:
14965 + alg->cra_type = &crypto_givcipher_type;
14966 + alg->cra_ablkcipher = template->template_ablkcipher;
14967 + break;
14968 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
14969 + alg->cra_type = &crypto_ablkcipher_type;
14970 + alg->cra_ablkcipher = template->template_ablkcipher;
14971 + break;
14972 + }
14973 +
14974 + t_alg->caam.class1_alg_type = template->class1_alg_type;
14975 + t_alg->caam.class2_alg_type = template->class2_alg_type;
14976 +
14977 + return t_alg;
14978 +}
14979 +
14980 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
14981 +{
14982 + struct aead_alg *alg = &t_alg->aead;
14983 +
14984 + alg->base.cra_module = THIS_MODULE;
14985 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
14986 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
14987 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
14988 +
14989 + alg->init = caam_aead_init;
14990 + alg->exit = caam_aead_exit;
14991 +}
14992 +
14993 +static int __init caam_qi_algapi_init(void)
14994 +{
14995 + struct device_node *dev_node;
14996 + struct platform_device *pdev;
14997 + struct device *ctrldev;
14998 + struct caam_drv_private *priv;
14999 + int i = 0, err = 0;
15000 + u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
15001 + unsigned int md_limit = SHA512_DIGEST_SIZE;
15002 + bool registered = false;
15003 +
15004 + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
15005 + if (!dev_node) {
15006 + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
15007 + if (!dev_node)
15008 + return -ENODEV;
15009 + }
15010 +
15011 + pdev = of_find_device_by_node(dev_node);
15012 + of_node_put(dev_node);
15013 + if (!pdev)
15014 + return -ENODEV;
15015 +
15016 + ctrldev = &pdev->dev;
15017 + priv = dev_get_drvdata(ctrldev);
15018 +
15019 + /*
15020 + * If priv is NULL, it's probably because the caam driver wasn't
15021 + * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
15022 + */
15023 + if (!priv || !priv->qi_present)
15024 + return -ENODEV;
15025 +
15026 + if (caam_dpaa2) {
15027 + dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
15028 + return -ENODEV;
15029 + }
15030 +
15031 + INIT_LIST_HEAD(&alg_list);
15032 +
15033 + /*
15034 + * Register crypto algorithms the device supports.
15035 + * First, detect presence and attributes of DES, AES, and MD blocks.
15036 + */
15037 + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
15038 + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
15039 + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
15040 + aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
15041 + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
15042 +
15043 + /* If MD is present, limit digest size based on LP256 */
15044 + if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
15045 + md_limit = SHA256_DIGEST_SIZE;
15046 +
15047 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
15048 + struct caam_crypto_alg *t_alg;
15049 + struct caam_alg_template *alg = driver_algs + i;
15050 + u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
15051 +
15052 + /* Skip DES algorithms if not supported by device */
15053 + if (!des_inst &&
15054 + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
15055 + (alg_sel == OP_ALG_ALGSEL_DES)))
15056 + continue;
15057 +
15058 + /* Skip AES algorithms if not supported by device */
15059 + if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
15060 + continue;
15061 +
15062 + t_alg = caam_alg_alloc(alg);
15063 + if (IS_ERR(t_alg)) {
15064 + err = PTR_ERR(t_alg);
15065 + dev_warn(priv->qidev, "%s alg allocation failed\n",
15066 + alg->driver_name);
15067 + continue;
15068 + }
15069 +
15070 + err = crypto_register_alg(&t_alg->crypto_alg);
15071 + if (err) {
15072 + dev_warn(priv->qidev, "%s alg registration failed\n",
15073 + t_alg->crypto_alg.cra_driver_name);
15074 + kfree(t_alg);
15075 + continue;
15076 + }
15077 +
15078 + list_add_tail(&t_alg->entry, &alg_list);
15079 + registered = true;
15080 + }
15081 +
15082 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
15083 + struct caam_aead_alg *t_alg = driver_aeads + i;
15084 + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
15085 + OP_ALG_ALGSEL_MASK;
15086 + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
15087 + OP_ALG_ALGSEL_MASK;
15088 + u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
15089 +
15090 + /* Skip DES algorithms if not supported by device */
15091 + if (!des_inst &&
15092 + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
15093 + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
15094 + continue;
15095 +
15096 + /* Skip AES algorithms if not supported by device */
15097 + if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
15098 + continue;
15099 +
15100 + /*
15101 + * Check support for AES algorithms not available
15102 + * on LP devices.
15103 + */
15104 + if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
15105 + (alg_aai == OP_ALG_AAI_GCM))
15106 + continue;
15107 +
15108 + /*
15109 + * Skip algorithms requiring message digests
15110 + * if MD or MD size is not supported by device.
15111 + */
15112 + if (c2_alg_sel &&
15113 + (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
15114 + continue;
15115 +
15116 + caam_aead_alg_init(t_alg);
15117 +
15118 + err = crypto_register_aead(&t_alg->aead);
15119 + if (err) {
15120 + pr_warn("%s alg registration failed\n",
15121 + t_alg->aead.base.cra_driver_name);
15122 + continue;
15123 + }
15124 +
15125 + t_alg->registered = true;
15126 + registered = true;
15127 + }
15128 +
15129 + if (registered)
15130 + dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
15131 +
15132 + return err;
15133 +}
15134 +
15135 +module_init(caam_qi_algapi_init);
15136 +module_exit(caam_qi_algapi_exit);
15137 +
15138 +MODULE_LICENSE("GPL");
15139 +MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
15140 +MODULE_AUTHOR("Freescale Semiconductor");
15141 --- /dev/null
15142 +++ b/drivers/crypto/caam/caamalg_qi2.c
15143 @@ -0,0 +1,4428 @@
15144 +/*
15145 + * Copyright 2015-2016 Freescale Semiconductor Inc.
15146 + * Copyright 2017 NXP
15147 + *
15148 + * Redistribution and use in source and binary forms, with or without
15149 + * modification, are permitted provided that the following conditions are met:
15150 + * * Redistributions of source code must retain the above copyright
15151 + * notice, this list of conditions and the following disclaimer.
15152 + * * Redistributions in binary form must reproduce the above copyright
15153 + * notice, this list of conditions and the following disclaimer in the
15154 + * documentation and/or other materials provided with the distribution.
15155 + * * Neither the names of the above-listed copyright holders nor the
15156 + * names of any contributors may be used to endorse or promote products
15157 + * derived from this software without specific prior written permission.
15158 + *
15159 + *
15160 + * ALTERNATIVELY, this software may be distributed under the terms of the
15161 + * GNU General Public License ("GPL") as published by the Free Software
15162 + * Foundation, either version 2 of that License or (at your option) any
15163 + * later version.
15164 + *
15165 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15166 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15167 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15168 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
15169 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
15170 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
15171 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
15172 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
15173 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
15174 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
15175 + * POSSIBILITY OF SUCH DAMAGE.
15176 + */
15177 +
15178 +#include "compat.h"
15179 +#include "regs.h"
15180 +#include "caamalg_qi2.h"
15181 +#include "dpseci_cmd.h"
15182 +#include "desc_constr.h"
15183 +#include "error.h"
15184 +#include "sg_sw_sec4.h"
15185 +#include "sg_sw_qm2.h"
15186 +#include "key_gen.h"
15187 +#include "caamalg_desc.h"
15188 +#include "../../../drivers/staging/fsl-mc/include/mc.h"
15189 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
15190 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
15191 +
15192 +#define CAAM_CRA_PRIORITY 2000
15193 +
15194 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
15195 +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
15196 + SHA512_DIGEST_SIZE * 2)
15197 +
15198 +#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM
15199 +bool caam_little_end;
15200 +EXPORT_SYMBOL(caam_little_end);
15201 +bool caam_imx;
15202 +EXPORT_SYMBOL(caam_imx);
15203 +#endif
15204 +
15205 +/*
15206 + * This is a a cache of buffers, from which the users of CAAM QI driver
15207 + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
15208 + * NOTE: A more elegant solution would be to have some headroom in the frames
15209 + * being processed. This can be added by the dpaa2-eth driver. This would
15210 + * pose a problem for userspace application processing which cannot
15211 + * know of this limitation. So for now, this will work.
15212 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
15213 + */
15214 +static struct kmem_cache *qi_cache;
15215 +
15216 +struct caam_alg_entry {
15217 + struct device *dev;
15218 + int class1_alg_type;
15219 + int class2_alg_type;
15220 + bool rfc3686;
15221 + bool geniv;
15222 +};
15223 +
15224 +struct caam_aead_alg {
15225 + struct aead_alg aead;
15226 + struct caam_alg_entry caam;
15227 + bool registered;
15228 +};
15229 +
15230 +/**
15231 + * caam_ctx - per-session context
15232 + * @flc: Flow Contexts array
15233 + * @key: virtual address of the key(s): [authentication key], encryption key
15234 + * @key_dma: I/O virtual address of the key
15235 + * @dev: dpseci device
15236 + * @adata: authentication algorithm details
15237 + * @cdata: encryption algorithm details
15238 + * @authsize: authentication tag (a.k.a. ICV / MAC) size
15239 + */
15240 +struct caam_ctx {
15241 + struct caam_flc flc[NUM_OP];
15242 + u8 key[CAAM_MAX_KEY_SIZE];
15243 + dma_addr_t key_dma;
15244 + struct device *dev;
15245 + struct alginfo adata;
15246 + struct alginfo cdata;
15247 + unsigned int authsize;
15248 +};
15249 +
15250 +void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
15251 + dma_addr_t iova_addr)
15252 +{
15253 + phys_addr_t phys_addr;
15254 +
15255 + phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
15256 + iova_addr;
15257 +
15258 + return phys_to_virt(phys_addr);
15259 +}
15260 +
15261 +/*
15262 + * qi_cache_alloc - Allocate buffers from CAAM-QI cache
15263 + *
15264 + * Allocate data on the hotpath. Instead of using kmalloc, one can use the
15265 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
15266 + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
15267 + * hosting 16 SG entries.
15268 + *
15269 + * @flags - flags that would be used for the equivalent kmalloc(..) call
15270 + *
15271 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
15272 + */
15273 +static inline void *qi_cache_alloc(gfp_t flags)
15274 +{
15275 + return kmem_cache_alloc(qi_cache, flags);
15276 +}
15277 +
15278 +/*
15279 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
15280 + *
15281 + * @obj - buffer previously allocated by qi_cache_alloc
15282 + *
15283 + * No checking is being done, the call is a passthrough call to
15284 + * kmem_cache_free(...)
15285 + */
15286 +static inline void qi_cache_free(void *obj)
15287 +{
15288 + kmem_cache_free(qi_cache, obj);
15289 +}
15290 +
15291 +static struct caam_request *to_caam_req(struct crypto_async_request *areq)
15292 +{
15293 + switch (crypto_tfm_alg_type(areq->tfm)) {
15294 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
15295 + case CRYPTO_ALG_TYPE_GIVCIPHER:
15296 + return ablkcipher_request_ctx(ablkcipher_request_cast(areq));
15297 + case CRYPTO_ALG_TYPE_AEAD:
15298 + return aead_request_ctx(container_of(areq, struct aead_request,
15299 + base));
15300 + default:
15301 + return ERR_PTR(-EINVAL);
15302 + }
15303 +}
15304 +
15305 +static void caam_unmap(struct device *dev, struct scatterlist *src,
15306 + struct scatterlist *dst, int src_nents,
15307 + int dst_nents, dma_addr_t iv_dma, int ivsize,
15308 + enum optype op_type, dma_addr_t qm_sg_dma,
15309 + int qm_sg_bytes)
15310 +{
15311 + if (dst != src) {
15312 + if (src_nents)
15313 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
15314 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
15315 + } else {
15316 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
15317 + }
15318 +
15319 + if (iv_dma)
15320 + dma_unmap_single(dev, iv_dma, ivsize,
15321 + op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
15322 + DMA_TO_DEVICE);
15323 +
15324 + if (qm_sg_bytes)
15325 + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
15326 +}
15327 +
15328 +static int aead_set_sh_desc(struct crypto_aead *aead)
15329 +{
15330 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
15331 + typeof(*alg), aead);
15332 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
15333 + unsigned int ivsize = crypto_aead_ivsize(aead);
15334 + struct device *dev = ctx->dev;
15335 + struct caam_flc *flc;
15336 + u32 *desc;
15337 + u32 ctx1_iv_off = 0;
15338 + u32 *nonce = NULL;
15339 + unsigned int data_len[2];
15340 + u32 inl_mask;
15341 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
15342 + OP_ALG_AAI_CTR_MOD128);
15343 + const bool is_rfc3686 = alg->caam.rfc3686;
15344 +
15345 + if (!ctx->cdata.keylen || !ctx->authsize)
15346 + return 0;
15347 +
15348 + /*
15349 + * AES-CTR needs to load IV in CONTEXT1 reg
15350 + * at an offset of 128bits (16bytes)
15351 + * CONTEXT1[255:128] = IV
15352 + */
15353 + if (ctr_mode)
15354 + ctx1_iv_off = 16;
15355 +
15356 + /*
15357 + * RFC3686 specific:
15358 + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
15359 + */
15360 + if (is_rfc3686) {
15361 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
15362 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
15363 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
15364 + }
15365 +
15366 + data_len[0] = ctx->adata.keylen_pad;
15367 + data_len[1] = ctx->cdata.keylen;
15368 +
15369 + /* aead_encrypt shared descriptor */
15370 + if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
15371 + DESC_QI_AEAD_ENC_LEN) +
15372 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
15373 + DESC_JOB_IO_LEN, data_len, &inl_mask,
15374 + ARRAY_SIZE(data_len)) < 0)
15375 + return -EINVAL;
15376 +
15377 + if (inl_mask & 1)
15378 + ctx->adata.key_virt = ctx->key;
15379 + else
15380 + ctx->adata.key_dma = ctx->key_dma;
15381 +
15382 + if (inl_mask & 2)
15383 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
15384 + else
15385 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
15386 +
15387 + ctx->adata.key_inline = !!(inl_mask & 1);
15388 + ctx->cdata.key_inline = !!(inl_mask & 2);
15389 +
15390 + flc = &ctx->flc[ENCRYPT];
15391 + desc = flc->sh_desc;
15392 +
15393 + if (alg->caam.geniv)
15394 + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
15395 + ivsize, ctx->authsize, is_rfc3686,
15396 + nonce, ctx1_iv_off, true);
15397 + else
15398 + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
15399 + ivsize, ctx->authsize, is_rfc3686, nonce,
15400 + ctx1_iv_off, true);
15401 +
15402 + flc->flc[1] = desc_len(desc); /* SDL */
15403 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
15404 + desc_bytes(desc), DMA_TO_DEVICE);
15405 + if (dma_mapping_error(dev, flc->flc_dma)) {
15406 + dev_err(dev, "unable to map shared descriptor\n");
15407 + return -ENOMEM;
15408 + }
15409 +
15410 + /* aead_decrypt shared descriptor */
15411 + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
15412 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
15413 + DESC_JOB_IO_LEN, data_len, &inl_mask,
15414 + ARRAY_SIZE(data_len)) < 0)
15415 + return -EINVAL;
15416 +
15417 + if (inl_mask & 1)
15418 + ctx->adata.key_virt = ctx->key;
15419 + else
15420 + ctx->adata.key_dma = ctx->key_dma;
15421 +
15422 + if (inl_mask & 2)
15423 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
15424 + else
15425 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
15426 +
15427 + ctx->adata.key_inline = !!(inl_mask & 1);
15428 + ctx->cdata.key_inline = !!(inl_mask & 2);
15429 +
15430 + flc = &ctx->flc[DECRYPT];
15431 + desc = flc->sh_desc;
15432 +
15433 + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
15434 + ivsize, ctx->authsize, alg->caam.geniv,
15435 + is_rfc3686, nonce, ctx1_iv_off, true);
15436 +
15437 + flc->flc[1] = desc_len(desc); /* SDL */
15438 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
15439 + desc_bytes(desc), DMA_TO_DEVICE);
15440 + if (dma_mapping_error(dev, flc->flc_dma)) {
15441 + dev_err(dev, "unable to map shared descriptor\n");
15442 + return -ENOMEM;
15443 + }
15444 +
15445 + return 0;
15446 +}
15447 +
15448 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
15449 +{
15450 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
15451 +
15452 + ctx->authsize = authsize;
15453 + aead_set_sh_desc(authenc);
15454 +
15455 + return 0;
15456 +}
15457 +
15458 +struct split_key_sh_result {
15459 + struct completion completion;
15460 + int err;
15461 + struct device *dev;
15462 +};
15463 +
15464 +static void split_key_sh_done(void *cbk_ctx, u32 err)
15465 +{
15466 + struct split_key_sh_result *res = cbk_ctx;
15467 +
15468 +#ifdef DEBUG
15469 + dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
15470 +#endif
15471 +
15472 + if (err)
15473 + caam_qi2_strstatus(res->dev, err);
15474 +
15475 + res->err = err;
15476 + complete(&res->completion);
15477 +}
15478 +
15479 +static int gen_split_key_sh(struct device *dev, u8 *key_out,
15480 + struct alginfo * const adata, const u8 *key_in,
15481 + u32 keylen)
15482 +{
15483 + struct caam_request *req_ctx;
15484 + u32 *desc;
15485 + struct split_key_sh_result result;
15486 + dma_addr_t dma_addr_in, dma_addr_out;
15487 + struct caam_flc *flc;
15488 + struct dpaa2_fl_entry *in_fle, *out_fle;
15489 + int ret = -ENOMEM;
15490 +
15491 + req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
15492 + if (!req_ctx)
15493 + return -ENOMEM;
15494 +
15495 + in_fle = &req_ctx->fd_flt[1];
15496 + out_fle = &req_ctx->fd_flt[0];
15497 +
15498 + flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
15499 + if (!flc)
15500 + goto err_flc;
15501 +
15502 + dma_addr_in = dma_map_single(dev, (void *)key_in, keylen,
15503 + DMA_TO_DEVICE);
15504 + if (dma_mapping_error(dev, dma_addr_in)) {
15505 + dev_err(dev, "unable to map key input memory\n");
15506 + goto err_dma_addr_in;
15507 + }
15508 +
15509 + dma_addr_out = dma_map_single(dev, key_out, adata->keylen_pad,
15510 + DMA_FROM_DEVICE);
15511 + if (dma_mapping_error(dev, dma_addr_out)) {
15512 + dev_err(dev, "unable to map key output memory\n");
15513 + goto err_dma_addr_out;
15514 + }
15515 +
15516 + desc = flc->sh_desc;
15517 +
15518 + init_sh_desc(desc, 0);
15519 + append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
15520 +
15521 + /* Sets MDHA up into an HMAC-INIT */
15522 + append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
15523 + OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
15524 + OP_ALG_AS_INIT);
15525 +
15526 + /*
15527 + * do a FIFO_LOAD of zero, this will trigger the internal key expansion
15528 + * into both pads inside MDHA
15529 + */
15530 + append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
15531 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
15532 +
15533 + /*
15534 + * FIFO_STORE with the explicit split-key content store
15535 + * (0x26 output type)
15536 + */
15537 + append_fifo_store(desc, dma_addr_out, adata->keylen,
15538 + LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
15539 +
15540 + flc->flc[1] = desc_len(desc); /* SDL */
15541 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
15542 + desc_bytes(desc), DMA_TO_DEVICE);
15543 + if (dma_mapping_error(dev, flc->flc_dma)) {
15544 + dev_err(dev, "unable to map shared descriptor\n");
15545 + goto err_flc_dma;
15546 + }
15547 +
15548 + dpaa2_fl_set_final(in_fle, true);
15549 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
15550 + dpaa2_fl_set_addr(in_fle, dma_addr_in);
15551 + dpaa2_fl_set_len(in_fle, keylen);
15552 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15553 + dpaa2_fl_set_addr(out_fle, dma_addr_out);
15554 + dpaa2_fl_set_len(out_fle, adata->keylen_pad);
15555 +
15556 +#ifdef DEBUG
15557 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15558 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
15559 + print_hex_dump(KERN_ERR, "desc@" __stringify(__LINE__)": ",
15560 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
15561 +#endif
15562 +
15563 + result.err = 0;
15564 + init_completion(&result.completion);
15565 + result.dev = dev;
15566 +
15567 + req_ctx->flc = flc;
15568 + req_ctx->cbk = split_key_sh_done;
15569 + req_ctx->ctx = &result;
15570 +
15571 + ret = dpaa2_caam_enqueue(dev, req_ctx);
15572 + if (ret == -EINPROGRESS) {
15573 + /* in progress */
15574 + wait_for_completion(&result.completion);
15575 + ret = result.err;
15576 +#ifdef DEBUG
15577 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15578 + DUMP_PREFIX_ADDRESS, 16, 4, key_out,
15579 + adata->keylen_pad, 1);
15580 +#endif
15581 + }
15582 +
15583 + dma_unmap_single(dev, flc->flc_dma, sizeof(flc->flc) + desc_bytes(desc),
15584 + DMA_TO_DEVICE);
15585 +err_flc_dma:
15586 + dma_unmap_single(dev, dma_addr_out, adata->keylen_pad, DMA_FROM_DEVICE);
15587 +err_dma_addr_out:
15588 + dma_unmap_single(dev, dma_addr_in, keylen, DMA_TO_DEVICE);
15589 +err_dma_addr_in:
15590 + kfree(flc);
15591 +err_flc:
15592 + kfree(req_ctx);
15593 + return ret;
15594 +}
15595 +
15596 +static int gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
15597 + u32 authkeylen)
15598 +{
15599 + return gen_split_key_sh(ctx->dev, ctx->key, &ctx->adata, key_in,
15600 + authkeylen);
15601 +}
15602 +
15603 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
15604 + unsigned int keylen)
15605 +{
15606 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
15607 + struct device *dev = ctx->dev;
15608 + struct crypto_authenc_keys keys;
15609 + int ret;
15610 +
15611 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
15612 + goto badkey;
15613 +
15614 +#ifdef DEBUG
15615 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
15616 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
15617 + keys.authkeylen);
15618 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
15619 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
15620 +#endif
15621 +
15622 + ctx->adata.keylen = split_key_len(ctx->adata.algtype &
15623 + OP_ALG_ALGSEL_MASK);
15624 + ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
15625 + OP_ALG_ALGSEL_MASK);
15626 +
15627 +#ifdef DEBUG
15628 + dev_err(dev, "split keylen %d split keylen padded %d\n",
15629 + ctx->adata.keylen, ctx->adata.keylen_pad);
15630 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15631 + DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey, keylen, 1);
15632 +#endif
15633 +
15634 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
15635 + goto badkey;
15636 +
15637 + ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
15638 + if (ret)
15639 + goto badkey;
15640 +
15641 + /* postpend encryption key to auth split key */
15642 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
15643 +
15644 + ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
15645 + keys.enckeylen, DMA_TO_DEVICE);
15646 + if (dma_mapping_error(dev, ctx->key_dma)) {
15647 + dev_err(dev, "unable to map key i/o memory\n");
15648 + return -ENOMEM;
15649 + }
15650 +#ifdef DEBUG
15651 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15652 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
15653 + ctx->adata.keylen_pad + keys.enckeylen, 1);
15654 +#endif
15655 +
15656 + ctx->cdata.keylen = keys.enckeylen;
15657 +
15658 + ret = aead_set_sh_desc(aead);
15659 + if (ret)
15660 + dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
15661 + keys.enckeylen, DMA_TO_DEVICE);
15662 +
15663 + return ret;
15664 +badkey:
15665 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
15666 + return -EINVAL;
15667 +}
15668 +
15669 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
15670 + bool encrypt)
15671 +{
15672 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
15673 + struct caam_request *req_ctx = aead_request_ctx(req);
15674 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
15675 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
15676 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
15677 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
15678 + typeof(*alg), aead);
15679 + struct device *dev = ctx->dev;
15680 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
15681 + GFP_KERNEL : GFP_ATOMIC;
15682 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
15683 + struct aead_edesc *edesc;
15684 + dma_addr_t qm_sg_dma, iv_dma = 0;
15685 + int ivsize = 0;
15686 + unsigned int authsize = ctx->authsize;
15687 + int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
15688 + int in_len, out_len;
15689 + struct dpaa2_sg_entry *sg_table;
15690 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
15691 +
15692 + /* allocate space for base edesc and link tables */
15693 + edesc = qi_cache_alloc(GFP_DMA | flags);
15694 + if (unlikely(!edesc)) {
15695 + dev_err(dev, "could not allocate extended descriptor\n");
15696 + return ERR_PTR(-ENOMEM);
15697 + }
15698 +
15699 + if (unlikely(req->dst != req->src)) {
15700 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15701 + req->cryptlen);
15702 + if (unlikely(src_nents < 0)) {
15703 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15704 + req->assoclen + req->cryptlen);
15705 + qi_cache_free(edesc);
15706 + return ERR_PTR(src_nents);
15707 + }
15708 +
15709 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
15710 + req->cryptlen +
15711 + (encrypt ? authsize :
15712 + (-authsize)));
15713 + if (unlikely(dst_nents < 0)) {
15714 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
15715 + req->assoclen + req->cryptlen +
15716 + (encrypt ? authsize : (-authsize)));
15717 + qi_cache_free(edesc);
15718 + return ERR_PTR(dst_nents);
15719 + }
15720 +
15721 + if (src_nents) {
15722 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15723 + DMA_TO_DEVICE);
15724 + if (unlikely(!mapped_src_nents)) {
15725 + dev_err(dev, "unable to map source\n");
15726 + qi_cache_free(edesc);
15727 + return ERR_PTR(-ENOMEM);
15728 + }
15729 + } else {
15730 + mapped_src_nents = 0;
15731 + }
15732 +
15733 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
15734 + DMA_FROM_DEVICE);
15735 + if (unlikely(!mapped_dst_nents)) {
15736 + dev_err(dev, "unable to map destination\n");
15737 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
15738 + qi_cache_free(edesc);
15739 + return ERR_PTR(-ENOMEM);
15740 + }
15741 + } else {
15742 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15743 + req->cryptlen +
15744 + (encrypt ? authsize : 0));
15745 + if (unlikely(src_nents < 0)) {
15746 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15747 + req->assoclen + req->cryptlen +
15748 + (encrypt ? authsize : 0));
15749 + qi_cache_free(edesc);
15750 + return ERR_PTR(src_nents);
15751 + }
15752 +
15753 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15754 + DMA_BIDIRECTIONAL);
15755 + if (unlikely(!mapped_src_nents)) {
15756 + dev_err(dev, "unable to map source\n");
15757 + qi_cache_free(edesc);
15758 + return ERR_PTR(-ENOMEM);
15759 + }
15760 + }
15761 +
15762 + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
15763 + ivsize = crypto_aead_ivsize(aead);
15764 + iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
15765 + if (dma_mapping_error(dev, iv_dma)) {
15766 + dev_err(dev, "unable to map IV\n");
15767 + caam_unmap(dev, req->src, req->dst, src_nents,
15768 + dst_nents, 0, 0, op_type, 0, 0);
15769 + qi_cache_free(edesc);
15770 + return ERR_PTR(-ENOMEM);
15771 + }
15772 + }
15773 +
15774 + /*
15775 + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
15776 + * Input is not contiguous.
15777 + */
15778 + qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
15779 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
15780 + if (unlikely(qm_sg_nents > CAAM_QI_MAX_AEAD_SG)) {
15781 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
15782 + qm_sg_nents, CAAM_QI_MAX_AEAD_SG);
15783 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15784 + iv_dma, ivsize, op_type, 0, 0);
15785 + qi_cache_free(edesc);
15786 + return ERR_PTR(-ENOMEM);
15787 + }
15788 + sg_table = &edesc->sgt[0];
15789 + qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
15790 +
15791 + edesc->src_nents = src_nents;
15792 + edesc->dst_nents = dst_nents;
15793 + edesc->iv_dma = iv_dma;
15794 +
15795 + edesc->assoclen_dma = dma_map_single(dev, &req->assoclen, 4,
15796 + DMA_TO_DEVICE);
15797 + if (dma_mapping_error(dev, edesc->assoclen_dma)) {
15798 + dev_err(dev, "unable to map assoclen\n");
15799 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15800 + iv_dma, ivsize, op_type, 0, 0);
15801 + qi_cache_free(edesc);
15802 + return ERR_PTR(-ENOMEM);
15803 + }
15804 +
15805 + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
15806 + qm_sg_index++;
15807 + if (ivsize) {
15808 + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
15809 + qm_sg_index++;
15810 + }
15811 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
15812 + qm_sg_index += mapped_src_nents;
15813 +
15814 + if (mapped_dst_nents > 1)
15815 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
15816 + qm_sg_index, 0);
15817 +
15818 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
15819 + if (dma_mapping_error(dev, qm_sg_dma)) {
15820 + dev_err(dev, "unable to map S/G table\n");
15821 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
15822 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15823 + iv_dma, ivsize, op_type, 0, 0);
15824 + qi_cache_free(edesc);
15825 + return ERR_PTR(-ENOMEM);
15826 + }
15827 +
15828 + edesc->qm_sg_dma = qm_sg_dma;
15829 + edesc->qm_sg_bytes = qm_sg_bytes;
15830 +
15831 + out_len = req->assoclen + req->cryptlen +
15832 + (encrypt ? ctx->authsize : (-ctx->authsize));
15833 + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
15834 +
15835 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
15836 + dpaa2_fl_set_final(in_fle, true);
15837 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
15838 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
15839 + dpaa2_fl_set_len(in_fle, in_len);
15840 +
15841 + if (req->dst == req->src) {
15842 + if (mapped_src_nents == 1) {
15843 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15844 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
15845 + } else {
15846 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
15847 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
15848 + (1 + !!ivsize) * sizeof(*sg_table));
15849 + }
15850 + } else if (mapped_dst_nents == 1) {
15851 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15852 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
15853 + } else {
15854 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
15855 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
15856 + sizeof(*sg_table));
15857 + }
15858 +
15859 + dpaa2_fl_set_len(out_fle, out_len);
15860 +
15861 + return edesc;
15862 +}
15863 +
15864 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
15865 + bool encrypt)
15866 +{
15867 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
15868 + unsigned int blocksize = crypto_aead_blocksize(tls);
15869 + unsigned int padsize, authsize;
15870 + struct caam_request *req_ctx = aead_request_ctx(req);
15871 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
15872 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
15873 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
15874 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
15875 + typeof(*alg), aead);
15876 + struct device *dev = ctx->dev;
15877 + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
15878 + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
15879 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
15880 + struct tls_edesc *edesc;
15881 + dma_addr_t qm_sg_dma, iv_dma = 0;
15882 + int ivsize = 0;
15883 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
15884 + int in_len, out_len;
15885 + struct dpaa2_sg_entry *sg_table;
15886 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
15887 + struct scatterlist *dst;
15888 +
15889 + if (encrypt) {
15890 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
15891 + blocksize);
15892 + authsize = ctx->authsize + padsize;
15893 + } else {
15894 + authsize = ctx->authsize;
15895 + }
15896 +
15897 + /* allocate space for base edesc and link tables */
15898 + edesc = qi_cache_alloc(GFP_DMA | flags);
15899 + if (unlikely(!edesc)) {
15900 + dev_err(dev, "could not allocate extended descriptor\n");
15901 + return ERR_PTR(-ENOMEM);
15902 + }
15903 +
15904 + if (likely(req->src == req->dst)) {
15905 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15906 + req->cryptlen +
15907 + (encrypt ? authsize : 0));
15908 + if (unlikely(src_nents < 0)) {
15909 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15910 + req->assoclen + req->cryptlen +
15911 + (encrypt ? authsize : 0));
15912 + qi_cache_free(edesc);
15913 + return ERR_PTR(src_nents);
15914 + }
15915 +
15916 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15917 + DMA_BIDIRECTIONAL);
15918 + if (unlikely(!mapped_src_nents)) {
15919 + dev_err(dev, "unable to map source\n");
15920 + qi_cache_free(edesc);
15921 + return ERR_PTR(-ENOMEM);
15922 + }
15923 + dst = req->dst;
15924 + } else {
15925 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15926 + req->cryptlen);
15927 + if (unlikely(src_nents < 0)) {
15928 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15929 + req->assoclen + req->cryptlen);
15930 + qi_cache_free(edesc);
15931 + return ERR_PTR(src_nents);
15932 + }
15933 +
15934 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
15935 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
15936 + (encrypt ? authsize : 0));
15937 + if (unlikely(dst_nents < 0)) {
15938 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
15939 + req->cryptlen +
15940 + (encrypt ? authsize : 0));
15941 + qi_cache_free(edesc);
15942 + return ERR_PTR(dst_nents);
15943 + }
15944 +
15945 + if (src_nents) {
15946 + mapped_src_nents = dma_map_sg(dev, req->src,
15947 + src_nents, DMA_TO_DEVICE);
15948 + if (unlikely(!mapped_src_nents)) {
15949 + dev_err(dev, "unable to map source\n");
15950 + qi_cache_free(edesc);
15951 + return ERR_PTR(-ENOMEM);
15952 + }
15953 + } else {
15954 + mapped_src_nents = 0;
15955 + }
15956 +
15957 + mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
15958 + DMA_FROM_DEVICE);
15959 + if (unlikely(!mapped_dst_nents)) {
15960 + dev_err(dev, "unable to map destination\n");
15961 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
15962 + qi_cache_free(edesc);
15963 + return ERR_PTR(-ENOMEM);
15964 + }
15965 + }
15966 +
15967 + ivsize = crypto_aead_ivsize(tls);
15968 + iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
15969 + if (dma_mapping_error(dev, iv_dma)) {
15970 + dev_err(dev, "unable to map IV\n");
15971 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0,
15972 + op_type, 0, 0);
15973 + qi_cache_free(edesc);
15974 + return ERR_PTR(-ENOMEM);
15975 + }
15976 +
15977 + /*
15978 + * Create S/G table: IV, src, dst.
15979 + * Input is not contiguous.
15980 + */
15981 + qm_sg_ents = 1 + mapped_src_nents +
15982 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
15983 + sg_table = &edesc->sgt[0];
15984 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
15985 +
15986 + edesc->src_nents = src_nents;
15987 + edesc->dst_nents = dst_nents;
15988 + edesc->dst = dst;
15989 + edesc->iv_dma = iv_dma;
15990 +
15991 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
15992 + qm_sg_index = 1;
15993 +
15994 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
15995 + qm_sg_index += mapped_src_nents;
15996 +
15997 + if (mapped_dst_nents > 1)
15998 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
15999 + qm_sg_index, 0);
16000 +
16001 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
16002 + if (dma_mapping_error(dev, qm_sg_dma)) {
16003 + dev_err(dev, "unable to map S/G table\n");
16004 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
16005 + ivsize, op_type, 0, 0);
16006 + qi_cache_free(edesc);
16007 + return ERR_PTR(-ENOMEM);
16008 + }
16009 +
16010 + edesc->qm_sg_dma = qm_sg_dma;
16011 + edesc->qm_sg_bytes = qm_sg_bytes;
16012 +
16013 + out_len = req->cryptlen + (encrypt ? authsize : 0);
16014 + in_len = ivsize + req->assoclen + req->cryptlen;
16015 +
16016 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16017 + dpaa2_fl_set_final(in_fle, true);
16018 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16019 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
16020 + dpaa2_fl_set_len(in_fle, in_len);
16021 +
16022 + if (req->dst == req->src) {
16023 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16024 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
16025 + (sg_nents_for_len(req->src, req->assoclen) +
16026 + 1) * sizeof(*sg_table));
16027 + } else if (mapped_dst_nents == 1) {
16028 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16029 + dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
16030 + } else {
16031 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16032 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
16033 + sizeof(*sg_table));
16034 + }
16035 +
16036 + dpaa2_fl_set_len(out_fle, out_len);
16037 +
16038 + return edesc;
16039 +}
16040 +
16041 +static int tls_set_sh_desc(struct crypto_aead *tls)
16042 +{
16043 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16044 + unsigned int ivsize = crypto_aead_ivsize(tls);
16045 + unsigned int blocksize = crypto_aead_blocksize(tls);
16046 + struct device *dev = ctx->dev;
16047 + struct caam_flc *flc;
16048 + u32 *desc;
16049 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
16050 + unsigned int data_len[2];
16051 + u32 inl_mask;
16052 +
16053 + if (!ctx->cdata.keylen || !ctx->authsize)
16054 + return 0;
16055 +
16056 + /*
16057 + * TLS 1.0 encrypt shared descriptor
16058 + * Job Descriptor and Shared Descriptor
16059 + * must fit into the 64-word Descriptor h/w Buffer
16060 + */
16061 + data_len[0] = ctx->adata.keylen_pad;
16062 + data_len[1] = ctx->cdata.keylen;
16063 +
16064 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
16065 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
16066 + return -EINVAL;
16067 +
16068 + if (inl_mask & 1)
16069 + ctx->adata.key_virt = ctx->key;
16070 + else
16071 + ctx->adata.key_dma = ctx->key_dma;
16072 +
16073 + if (inl_mask & 2)
16074 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
16075 + else
16076 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16077 +
16078 + ctx->adata.key_inline = !!(inl_mask & 1);
16079 + ctx->cdata.key_inline = !!(inl_mask & 2);
16080 +
16081 + flc = &ctx->flc[ENCRYPT];
16082 + desc = flc->sh_desc;
16083 +
16084 + cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
16085 + assoclen, ivsize, ctx->authsize, blocksize);
16086 +
16087 + flc->flc[1] = desc_len(desc);
16088 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16089 + desc_bytes(desc), DMA_TO_DEVICE);
16090 +
16091 + if (dma_mapping_error(dev, flc->flc_dma)) {
16092 + dev_err(dev, "unable to map shared descriptor\n");
16093 + return -ENOMEM;
16094 + }
16095 +
16096 + /*
16097 + * TLS 1.0 decrypt shared descriptor
16098 + * Keys do not fit inline, regardless of algorithms used
16099 + */
16100 + ctx->adata.key_dma = ctx->key_dma;
16101 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16102 +
16103 + flc = &ctx->flc[DECRYPT];
16104 + desc = flc->sh_desc;
16105 +
16106 + cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
16107 + ctx->authsize, blocksize);
16108 +
16109 + flc->flc[1] = desc_len(desc); /* SDL */
16110 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16111 + desc_bytes(desc), DMA_TO_DEVICE);
16112 + if (dma_mapping_error(dev, flc->flc_dma)) {
16113 + dev_err(dev, "unable to map shared descriptor\n");
16114 + return -ENOMEM;
16115 + }
16116 +
16117 + return 0;
16118 +}
16119 +
16120 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
16121 + unsigned int keylen)
16122 +{
16123 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16124 + struct device *dev = ctx->dev;
16125 + struct crypto_authenc_keys keys;
16126 + int ret;
16127 +
16128 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
16129 + goto badkey;
16130 +
16131 +#ifdef DEBUG
16132 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
16133 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
16134 + keys.authkeylen);
16135 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16136 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16137 +#endif
16138 +
16139 + ctx->adata.keylen = split_key_len(ctx->adata.algtype &
16140 + OP_ALG_ALGSEL_MASK);
16141 + ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
16142 + OP_ALG_ALGSEL_MASK);
16143 +
16144 +#ifdef DEBUG
16145 + dev_err(dev, "split keylen %d split keylen padded %d\n",
16146 + ctx->adata.keylen, ctx->adata.keylen_pad);
16147 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
16148 + DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey,
16149 + keys.authkeylen + keys.enckeylen, 1);
16150 +#endif
16151 +
16152 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
16153 + goto badkey;
16154 +
16155 + ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
16156 + if (ret)
16157 + goto badkey;
16158 +
16159 + /* postpend encryption key to auth split key */
16160 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
16161 +
16162 + ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
16163 + keys.enckeylen, DMA_TO_DEVICE);
16164 + if (dma_mapping_error(dev, ctx->key_dma)) {
16165 + dev_err(dev, "unable to map key i/o memory\n");
16166 + return -ENOMEM;
16167 + }
16168 +#ifdef DEBUG
16169 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
16170 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
16171 + ctx->adata.keylen_pad + keys.enckeylen, 1);
16172 +#endif
16173 +
16174 + ctx->cdata.keylen = keys.enckeylen;
16175 +
16176 + ret = tls_set_sh_desc(tls);
16177 + if (ret)
16178 + dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
16179 + keys.enckeylen, DMA_TO_DEVICE);
16180 +
16181 + return ret;
16182 +badkey:
16183 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
16184 + return -EINVAL;
16185 +}
16186 +
16187 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
16188 +{
16189 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16190 +
16191 + ctx->authsize = authsize;
16192 + tls_set_sh_desc(tls);
16193 +
16194 + return 0;
16195 +}
16196 +
16197 +static int gcm_set_sh_desc(struct crypto_aead *aead)
16198 +{
16199 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16200 + struct device *dev = ctx->dev;
16201 + unsigned int ivsize = crypto_aead_ivsize(aead);
16202 + struct caam_flc *flc;
16203 + u32 *desc;
16204 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16205 + ctx->cdata.keylen;
16206 +
16207 + if (!ctx->cdata.keylen || !ctx->authsize)
16208 + return 0;
16209 +
16210 + /*
16211 + * AES GCM encrypt shared descriptor
16212 + * Job Descriptor and Shared Descriptor
16213 + * must fit into the 64-word Descriptor h/w Buffer
16214 + */
16215 + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
16216 + ctx->cdata.key_inline = true;
16217 + ctx->cdata.key_virt = ctx->key;
16218 + } else {
16219 + ctx->cdata.key_inline = false;
16220 + ctx->cdata.key_dma = ctx->key_dma;
16221 + }
16222 +
16223 + flc = &ctx->flc[ENCRYPT];
16224 + desc = flc->sh_desc;
16225 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16226 +
16227 + flc->flc[1] = desc_len(desc); /* SDL */
16228 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16229 + desc_bytes(desc), DMA_TO_DEVICE);
16230 + if (dma_mapping_error(dev, flc->flc_dma)) {
16231 + dev_err(dev, "unable to map shared descriptor\n");
16232 + return -ENOMEM;
16233 + }
16234 +
16235 + /*
16236 + * Job Descriptor and Shared Descriptors
16237 + * must all fit into the 64-word Descriptor h/w Buffer
16238 + */
16239 + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
16240 + ctx->cdata.key_inline = true;
16241 + ctx->cdata.key_virt = ctx->key;
16242 + } else {
16243 + ctx->cdata.key_inline = false;
16244 + ctx->cdata.key_dma = ctx->key_dma;
16245 + }
16246 +
16247 + flc = &ctx->flc[DECRYPT];
16248 + desc = flc->sh_desc;
16249 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16250 +
16251 + flc->flc[1] = desc_len(desc); /* SDL */
16252 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16253 + desc_bytes(desc), DMA_TO_DEVICE);
16254 + if (dma_mapping_error(dev, flc->flc_dma)) {
16255 + dev_err(dev, "unable to map shared descriptor\n");
16256 + return -ENOMEM;
16257 + }
16258 +
16259 + return 0;
16260 +}
16261 +
16262 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
16263 +{
16264 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16265 +
16266 + ctx->authsize = authsize;
16267 + gcm_set_sh_desc(authenc);
16268 +
16269 + return 0;
16270 +}
16271 +
16272 +static int gcm_setkey(struct crypto_aead *aead,
16273 + const u8 *key, unsigned int keylen)
16274 +{
16275 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16276 + struct device *dev = ctx->dev;
16277 + int ret;
16278 +
16279 +#ifdef DEBUG
16280 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16281 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16282 +#endif
16283 +
16284 + memcpy(ctx->key, key, keylen);
16285 + ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
16286 + if (dma_mapping_error(dev, ctx->key_dma)) {
16287 + dev_err(dev, "unable to map key i/o memory\n");
16288 + return -ENOMEM;
16289 + }
16290 + ctx->cdata.keylen = keylen;
16291 +
16292 + ret = gcm_set_sh_desc(aead);
16293 + if (ret)
16294 + dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
16295 + DMA_TO_DEVICE);
16296 +
16297 + return ret;
16298 +}
16299 +
16300 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
16301 +{
16302 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16303 + struct device *dev = ctx->dev;
16304 + unsigned int ivsize = crypto_aead_ivsize(aead);
16305 + struct caam_flc *flc;
16306 + u32 *desc;
16307 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16308 + ctx->cdata.keylen;
16309 +
16310 + if (!ctx->cdata.keylen || !ctx->authsize)
16311 + return 0;
16312 +
16313 + ctx->cdata.key_virt = ctx->key;
16314 +
16315 + /*
16316 + * RFC4106 encrypt shared descriptor
16317 + * Job Descriptor and Shared Descriptor
16318 + * must fit into the 64-word Descriptor h/w Buffer
16319 + */
16320 + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
16321 + ctx->cdata.key_inline = true;
16322 + } else {
16323 + ctx->cdata.key_inline = false;
16324 + ctx->cdata.key_dma = ctx->key_dma;
16325 + }
16326 +
16327 + flc = &ctx->flc[ENCRYPT];
16328 + desc = flc->sh_desc;
16329 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16330 + true);
16331 +
16332 + flc->flc[1] = desc_len(desc); /* SDL */
16333 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16334 + desc_bytes(desc), DMA_TO_DEVICE);
16335 + if (dma_mapping_error(dev, flc->flc_dma)) {
16336 + dev_err(dev, "unable to map shared descriptor\n");
16337 + return -ENOMEM;
16338 + }
16339 +
16340 + /*
16341 + * Job Descriptor and Shared Descriptors
16342 + * must all fit into the 64-word Descriptor h/w Buffer
16343 + */
16344 + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
16345 + ctx->cdata.key_inline = true;
16346 + } else {
16347 + ctx->cdata.key_inline = false;
16348 + ctx->cdata.key_dma = ctx->key_dma;
16349 + }
16350 +
16351 + flc = &ctx->flc[DECRYPT];
16352 + desc = flc->sh_desc;
16353 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16354 + true);
16355 +
16356 + flc->flc[1] = desc_len(desc); /* SDL */
16357 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16358 + desc_bytes(desc), DMA_TO_DEVICE);
16359 + if (dma_mapping_error(dev, flc->flc_dma)) {
16360 + dev_err(dev, "unable to map shared descriptor\n");
16361 + return -ENOMEM;
16362 + }
16363 +
16364 + return 0;
16365 +}
16366 +
16367 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
16368 + unsigned int authsize)
16369 +{
16370 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16371 +
16372 + ctx->authsize = authsize;
16373 + rfc4106_set_sh_desc(authenc);
16374 +
16375 + return 0;
16376 +}
16377 +
16378 +static int rfc4106_setkey(struct crypto_aead *aead,
16379 + const u8 *key, unsigned int keylen)
16380 +{
16381 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16382 + struct device *dev = ctx->dev;
16383 + int ret;
16384 +
16385 + if (keylen < 4)
16386 + return -EINVAL;
16387 +
16388 +#ifdef DEBUG
16389 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16390 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16391 +#endif
16392 +
16393 + memcpy(ctx->key, key, keylen);
16394 + /*
16395 + * The last four bytes of the key material are used as the salt value
16396 + * in the nonce. Update the AES key length.
16397 + */
16398 + ctx->cdata.keylen = keylen - 4;
16399 + ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
16400 + DMA_TO_DEVICE);
16401 + if (dma_mapping_error(dev, ctx->key_dma)) {
16402 + dev_err(dev, "unable to map key i/o memory\n");
16403 + return -ENOMEM;
16404 + }
16405 +
16406 + ret = rfc4106_set_sh_desc(aead);
16407 + if (ret)
16408 + dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
16409 + DMA_TO_DEVICE);
16410 +
16411 + return ret;
16412 +}
16413 +
16414 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
16415 +{
16416 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16417 + struct device *dev = ctx->dev;
16418 + unsigned int ivsize = crypto_aead_ivsize(aead);
16419 + struct caam_flc *flc;
16420 + u32 *desc;
16421 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16422 + ctx->cdata.keylen;
16423 +
16424 + if (!ctx->cdata.keylen || !ctx->authsize)
16425 + return 0;
16426 +
16427 + ctx->cdata.key_virt = ctx->key;
16428 +
16429 + /*
16430 + * RFC4543 encrypt shared descriptor
16431 + * Job Descriptor and Shared Descriptor
16432 + * must fit into the 64-word Descriptor h/w Buffer
16433 + */
16434 + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
16435 + ctx->cdata.key_inline = true;
16436 + } else {
16437 + ctx->cdata.key_inline = false;
16438 + ctx->cdata.key_dma = ctx->key_dma;
16439 + }
16440 +
16441 + flc = &ctx->flc[ENCRYPT];
16442 + desc = flc->sh_desc;
16443 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16444 + true);
16445 +
16446 + flc->flc[1] = desc_len(desc); /* SDL */
16447 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16448 + desc_bytes(desc), DMA_TO_DEVICE);
16449 + if (dma_mapping_error(dev, flc->flc_dma)) {
16450 + dev_err(dev, "unable to map shared descriptor\n");
16451 + return -ENOMEM;
16452 + }
16453 +
16454 + /*
16455 + * Job Descriptor and Shared Descriptors
16456 + * must all fit into the 64-word Descriptor h/w Buffer
16457 + */
16458 + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
16459 + ctx->cdata.key_inline = true;
16460 + } else {
16461 + ctx->cdata.key_inline = false;
16462 + ctx->cdata.key_dma = ctx->key_dma;
16463 + }
16464 +
16465 + flc = &ctx->flc[DECRYPT];
16466 + desc = flc->sh_desc;
16467 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16468 + true);
16469 +
16470 + flc->flc[1] = desc_len(desc); /* SDL */
16471 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16472 + desc_bytes(desc), DMA_TO_DEVICE);
16473 + if (dma_mapping_error(dev, flc->flc_dma)) {
16474 + dev_err(dev, "unable to map shared descriptor\n");
16475 + return -ENOMEM;
16476 + }
16477 +
16478 + return 0;
16479 +}
16480 +
16481 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
16482 + unsigned int authsize)
16483 +{
16484 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16485 +
16486 + ctx->authsize = authsize;
16487 + rfc4543_set_sh_desc(authenc);
16488 +
16489 + return 0;
16490 +}
16491 +
16492 +static int rfc4543_setkey(struct crypto_aead *aead,
16493 + const u8 *key, unsigned int keylen)
16494 +{
16495 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16496 + struct device *dev = ctx->dev;
16497 + int ret;
16498 +
16499 + if (keylen < 4)
16500 + return -EINVAL;
16501 +
16502 +#ifdef DEBUG
16503 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16504 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16505 +#endif
16506 +
16507 + memcpy(ctx->key, key, keylen);
16508 + /*
16509 + * The last four bytes of the key material are used as the salt value
16510 + * in the nonce. Update the AES key length.
16511 + */
16512 + ctx->cdata.keylen = keylen - 4;
16513 + ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
16514 + DMA_TO_DEVICE);
16515 + if (dma_mapping_error(dev, ctx->key_dma)) {
16516 + dev_err(dev, "unable to map key i/o memory\n");
16517 + return -ENOMEM;
16518 + }
16519 +
16520 + ret = rfc4543_set_sh_desc(aead);
16521 + if (ret)
16522 + dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
16523 + DMA_TO_DEVICE);
16524 +
16525 + return ret;
16526 +}
16527 +
16528 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16529 + const u8 *key, unsigned int keylen)
16530 +{
16531 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16532 + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
16533 + const char *alg_name = crypto_tfm_alg_name(tfm);
16534 + struct device *dev = ctx->dev;
16535 + struct caam_flc *flc;
16536 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16537 + u32 *desc;
16538 + u32 ctx1_iv_off = 0;
16539 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
16540 + OP_ALG_AAI_CTR_MOD128);
16541 + const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
16542 +
16543 + memcpy(ctx->key, key, keylen);
16544 +#ifdef DEBUG
16545 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16546 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16547 +#endif
16548 + /*
16549 + * AES-CTR needs to load IV in CONTEXT1 reg
16550 + * at an offset of 128bits (16bytes)
16551 + * CONTEXT1[255:128] = IV
16552 + */
16553 + if (ctr_mode)
16554 + ctx1_iv_off = 16;
16555 +
16556 + /*
16557 + * RFC3686 specific:
16558 + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
16559 + * | *key = {KEY, NONCE}
16560 + */
16561 + if (is_rfc3686) {
16562 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
16563 + keylen -= CTR_RFC3686_NONCE_SIZE;
16564 + }
16565 +
16566 + ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
16567 + if (dma_mapping_error(dev, ctx->key_dma)) {
16568 + dev_err(dev, "unable to map key i/o memory\n");
16569 + return -ENOMEM;
16570 + }
16571 + ctx->cdata.keylen = keylen;
16572 + ctx->cdata.key_virt = ctx->key;
16573 + ctx->cdata.key_inline = true;
16574 +
16575 + /* ablkcipher_encrypt shared descriptor */
16576 + flc = &ctx->flc[ENCRYPT];
16577 + desc = flc->sh_desc;
16578 +
16579 + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
16580 + is_rfc3686, ctx1_iv_off);
16581 +
16582 + flc->flc[1] = desc_len(desc); /* SDL */
16583 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16584 + desc_bytes(desc), DMA_TO_DEVICE);
16585 + if (dma_mapping_error(dev, flc->flc_dma)) {
16586 + dev_err(dev, "unable to map shared descriptor\n");
16587 + return -ENOMEM;
16588 + }
16589 +
16590 + /* ablkcipher_decrypt shared descriptor */
16591 + flc = &ctx->flc[DECRYPT];
16592 + desc = flc->sh_desc;
16593 +
16594 + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
16595 + is_rfc3686, ctx1_iv_off);
16596 +
16597 + flc->flc[1] = desc_len(desc); /* SDL */
16598 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16599 + desc_bytes(desc), DMA_TO_DEVICE);
16600 + if (dma_mapping_error(dev, flc->flc_dma)) {
16601 + dev_err(dev, "unable to map shared descriptor\n");
16602 + return -ENOMEM;
16603 + }
16604 +
16605 + /* ablkcipher_givencrypt shared descriptor */
16606 + flc = &ctx->flc[GIVENCRYPT];
16607 + desc = flc->sh_desc;
16608 +
16609 + cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata,
16610 + ivsize, is_rfc3686, ctx1_iv_off);
16611 +
16612 + flc->flc[1] = desc_len(desc); /* SDL */
16613 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16614 + desc_bytes(desc), DMA_TO_DEVICE);
16615 + if (dma_mapping_error(dev, flc->flc_dma)) {
16616 + dev_err(dev, "unable to map shared descriptor\n");
16617 + return -ENOMEM;
16618 + }
16619 +
16620 + return 0;
16621 +}
16622 +
16623 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16624 + const u8 *key, unsigned int keylen)
16625 +{
16626 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16627 + struct device *dev = ctx->dev;
16628 + struct caam_flc *flc;
16629 + u32 *desc;
16630 +
16631 + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
16632 + dev_err(dev, "key size mismatch\n");
16633 + crypto_ablkcipher_set_flags(ablkcipher,
16634 + CRYPTO_TFM_RES_BAD_KEY_LEN);
16635 + return -EINVAL;
16636 + }
16637 +
16638 + memcpy(ctx->key, key, keylen);
16639 + ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
16640 + if (dma_mapping_error(dev, ctx->key_dma)) {
16641 + dev_err(dev, "unable to map key i/o memory\n");
16642 + return -ENOMEM;
16643 + }
16644 + ctx->cdata.keylen = keylen;
16645 + ctx->cdata.key_virt = ctx->key;
16646 + ctx->cdata.key_inline = true;
16647 +
16648 + /* xts_ablkcipher_encrypt shared descriptor */
16649 + flc = &ctx->flc[ENCRYPT];
16650 + desc = flc->sh_desc;
16651 + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
16652 +
16653 + flc->flc[1] = desc_len(desc); /* SDL */
16654 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16655 + desc_bytes(desc), DMA_TO_DEVICE);
16656 + if (dma_mapping_error(dev, flc->flc_dma)) {
16657 + dev_err(dev, "unable to map shared descriptor\n");
16658 + return -ENOMEM;
16659 + }
16660 +
16661 + /* xts_ablkcipher_decrypt shared descriptor */
16662 + flc = &ctx->flc[DECRYPT];
16663 + desc = flc->sh_desc;
16664 +
16665 + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
16666 +
16667 + flc->flc[1] = desc_len(desc); /* SDL */
16668 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16669 + desc_bytes(desc), DMA_TO_DEVICE);
16670 + if (dma_mapping_error(dev, flc->flc_dma)) {
16671 + dev_err(dev, "unable to map shared descriptor\n");
16672 + return -ENOMEM;
16673 + }
16674 +
16675 + return 0;
16676 +}
16677 +
16678 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
16679 + *req, bool encrypt)
16680 +{
16681 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16682 + struct caam_request *req_ctx = ablkcipher_request_ctx(req);
16683 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16684 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16685 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16686 + struct device *dev = ctx->dev;
16687 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
16688 + GFP_KERNEL : GFP_ATOMIC;
16689 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
16690 + struct ablkcipher_edesc *edesc;
16691 + dma_addr_t iv_dma;
16692 + bool in_contig;
16693 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16694 + int dst_sg_idx, qm_sg_ents;
16695 + struct dpaa2_sg_entry *sg_table;
16696 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
16697 +
16698 + src_nents = sg_nents_for_len(req->src, req->nbytes);
16699 + if (unlikely(src_nents < 0)) {
16700 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16701 + req->nbytes);
16702 + return ERR_PTR(src_nents);
16703 + }
16704 +
16705 + if (unlikely(req->dst != req->src)) {
16706 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
16707 + if (unlikely(dst_nents < 0)) {
16708 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16709 + req->nbytes);
16710 + return ERR_PTR(dst_nents);
16711 + }
16712 +
16713 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16714 + DMA_TO_DEVICE);
16715 + if (unlikely(!mapped_src_nents)) {
16716 + dev_err(dev, "unable to map source\n");
16717 + return ERR_PTR(-ENOMEM);
16718 + }
16719 +
16720 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
16721 + DMA_FROM_DEVICE);
16722 + if (unlikely(!mapped_dst_nents)) {
16723 + dev_err(dev, "unable to map destination\n");
16724 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16725 + return ERR_PTR(-ENOMEM);
16726 + }
16727 + } else {
16728 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16729 + DMA_BIDIRECTIONAL);
16730 + if (unlikely(!mapped_src_nents)) {
16731 + dev_err(dev, "unable to map source\n");
16732 + return ERR_PTR(-ENOMEM);
16733 + }
16734 + }
16735 +
16736 + iv_dma = dma_map_single(dev, req->info, ivsize, DMA_TO_DEVICE);
16737 + if (dma_mapping_error(dev, iv_dma)) {
16738 + dev_err(dev, "unable to map IV\n");
16739 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
16740 + 0, 0, 0, 0);
16741 + return ERR_PTR(-ENOMEM);
16742 + }
16743 +
16744 + if (mapped_src_nents == 1 &&
16745 + iv_dma + ivsize == sg_dma_address(req->src)) {
16746 + in_contig = true;
16747 + qm_sg_ents = 0;
16748 + } else {
16749 + in_contig = false;
16750 + qm_sg_ents = 1 + mapped_src_nents;
16751 + }
16752 + dst_sg_idx = qm_sg_ents;
16753 +
16754 + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
16755 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
16756 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
16757 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
16758 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16759 + iv_dma, ivsize, op_type, 0, 0);
16760 + return ERR_PTR(-ENOMEM);
16761 + }
16762 +
16763 + /* allocate space for base edesc and link tables */
16764 + edesc = qi_cache_alloc(GFP_DMA | flags);
16765 + if (unlikely(!edesc)) {
16766 + dev_err(dev, "could not allocate extended descriptor\n");
16767 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16768 + iv_dma, ivsize, op_type, 0, 0);
16769 + return ERR_PTR(-ENOMEM);
16770 + }
16771 +
16772 + edesc->src_nents = src_nents;
16773 + edesc->dst_nents = dst_nents;
16774 + edesc->iv_dma = iv_dma;
16775 + sg_table = &edesc->sgt[0];
16776 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16777 +
16778 + if (!in_contig) {
16779 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
16780 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
16781 + }
16782 +
16783 + if (mapped_dst_nents > 1)
16784 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
16785 + dst_sg_idx, 0);
16786 +
16787 + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
16788 + DMA_TO_DEVICE);
16789 + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
16790 + dev_err(dev, "unable to map S/G table\n");
16791 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16792 + iv_dma, ivsize, op_type, 0, 0);
16793 + qi_cache_free(edesc);
16794 + return ERR_PTR(-ENOMEM);
16795 + }
16796 +
16797 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16798 + dpaa2_fl_set_final(in_fle, true);
16799 + dpaa2_fl_set_len(in_fle, req->nbytes + ivsize);
16800 + dpaa2_fl_set_len(out_fle, req->nbytes);
16801 +
16802 + if (!in_contig) {
16803 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16804 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
16805 + } else {
16806 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
16807 + dpaa2_fl_set_addr(in_fle, iv_dma);
16808 + }
16809 +
16810 + if (req->src == req->dst) {
16811 + if (!in_contig) {
16812 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16813 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
16814 + sizeof(*sg_table));
16815 + } else {
16816 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16817 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
16818 + }
16819 + } else if (mapped_dst_nents > 1) {
16820 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16821 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
16822 + sizeof(*sg_table));
16823 + } else {
16824 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16825 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
16826 + }
16827 +
16828 + return edesc;
16829 +}
16830 +
16831 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
16832 + struct skcipher_givcrypt_request *greq)
16833 +{
16834 + struct ablkcipher_request *req = &greq->creq;
16835 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16836 + struct caam_request *req_ctx = ablkcipher_request_ctx(req);
16837 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16838 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16839 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16840 + struct device *dev = ctx->dev;
16841 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
16842 + GFP_KERNEL : GFP_ATOMIC;
16843 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
16844 + struct ablkcipher_edesc *edesc;
16845 + dma_addr_t iv_dma;
16846 + bool out_contig;
16847 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16848 + struct dpaa2_sg_entry *sg_table;
16849 + int dst_sg_idx, qm_sg_ents;
16850 +
16851 + src_nents = sg_nents_for_len(req->src, req->nbytes);
16852 + if (unlikely(src_nents < 0)) {
16853 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16854 + req->nbytes);
16855 + return ERR_PTR(src_nents);
16856 + }
16857 +
16858 + if (unlikely(req->dst != req->src)) {
16859 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
16860 + if (unlikely(dst_nents < 0)) {
16861 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16862 + req->nbytes);
16863 + return ERR_PTR(dst_nents);
16864 + }
16865 +
16866 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16867 + DMA_TO_DEVICE);
16868 + if (unlikely(!mapped_src_nents)) {
16869 + dev_err(dev, "unable to map source\n");
16870 + return ERR_PTR(-ENOMEM);
16871 + }
16872 +
16873 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
16874 + DMA_FROM_DEVICE);
16875 + if (unlikely(!mapped_dst_nents)) {
16876 + dev_err(dev, "unable to map destination\n");
16877 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16878 + return ERR_PTR(-ENOMEM);
16879 + }
16880 + } else {
16881 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16882 + DMA_BIDIRECTIONAL);
16883 + if (unlikely(!mapped_src_nents)) {
16884 + dev_err(dev, "unable to map source\n");
16885 + return ERR_PTR(-ENOMEM);
16886 + }
16887 +
16888 + dst_nents = src_nents;
16889 + mapped_dst_nents = src_nents;
16890 + }
16891 +
16892 + iv_dma = dma_map_single(dev, greq->giv, ivsize, DMA_FROM_DEVICE);
16893 + if (dma_mapping_error(dev, iv_dma)) {
16894 + dev_err(dev, "unable to map IV\n");
16895 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
16896 + 0, 0, 0, 0);
16897 + return ERR_PTR(-ENOMEM);
16898 + }
16899 +
16900 + qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
16901 + dst_sg_idx = qm_sg_ents;
16902 + if (mapped_dst_nents == 1 &&
16903 + iv_dma + ivsize == sg_dma_address(req->dst)) {
16904 + out_contig = true;
16905 + } else {
16906 + out_contig = false;
16907 + qm_sg_ents += 1 + mapped_dst_nents;
16908 + }
16909 +
16910 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
16911 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
16912 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
16913 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16914 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
16915 + return ERR_PTR(-ENOMEM);
16916 + }
16917 +
16918 + /* allocate space for base edesc and link tables */
16919 + edesc = qi_cache_alloc(GFP_DMA | flags);
16920 + if (!edesc) {
16921 + dev_err(dev, "could not allocate extended descriptor\n");
16922 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16923 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
16924 + return ERR_PTR(-ENOMEM);
16925 + }
16926 +
16927 + edesc->src_nents = src_nents;
16928 + edesc->dst_nents = dst_nents;
16929 + edesc->iv_dma = iv_dma;
16930 + sg_table = &edesc->sgt[0];
16931 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16932 +
16933 + if (mapped_src_nents > 1)
16934 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
16935 +
16936 + if (!out_contig) {
16937 + dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
16938 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
16939 + dst_sg_idx + 1, 0);
16940 + }
16941 +
16942 + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
16943 + DMA_TO_DEVICE);
16944 + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
16945 + dev_err(dev, "unable to map S/G table\n");
16946 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16947 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
16948 + qi_cache_free(edesc);
16949 + return ERR_PTR(-ENOMEM);
16950 + }
16951 +
16952 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16953 + dpaa2_fl_set_final(in_fle, true);
16954 + dpaa2_fl_set_len(in_fle, req->nbytes);
16955 + dpaa2_fl_set_len(out_fle, ivsize + req->nbytes);
16956 +
16957 + if (mapped_src_nents > 1) {
16958 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16959 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
16960 + } else {
16961 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
16962 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
16963 + }
16964 +
16965 + if (!out_contig) {
16966 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16967 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
16968 + sizeof(*sg_table));
16969 + } else {
16970 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16971 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
16972 + }
16973 +
16974 + return edesc;
16975 +}
16976 +
16977 +static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
16978 + struct aead_request *req)
16979 +{
16980 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
16981 + int ivsize = crypto_aead_ivsize(aead);
16982 + struct caam_request *caam_req = aead_request_ctx(req);
16983 +
16984 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
16985 + edesc->iv_dma, ivsize, caam_req->op_type,
16986 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
16987 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
16988 +}
16989 +
16990 +static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
16991 + struct aead_request *req)
16992 +{
16993 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
16994 + int ivsize = crypto_aead_ivsize(tls);
16995 + struct caam_request *caam_req = aead_request_ctx(req);
16996 +
16997 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
16998 + edesc->dst_nents, edesc->iv_dma, ivsize, caam_req->op_type,
16999 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
17000 +}
17001 +
17002 +static void ablkcipher_unmap(struct device *dev,
17003 + struct ablkcipher_edesc *edesc,
17004 + struct ablkcipher_request *req)
17005 +{
17006 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17007 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
17008 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17009 +
17010 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
17011 + edesc->iv_dma, ivsize, caam_req->op_type,
17012 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
17013 +}
17014 +
17015 +static void aead_encrypt_done(void *cbk_ctx, u32 status)
17016 +{
17017 + struct crypto_async_request *areq = cbk_ctx;
17018 + struct aead_request *req = container_of(areq, struct aead_request,
17019 + base);
17020 + struct caam_request *req_ctx = to_caam_req(areq);
17021 + struct aead_edesc *edesc = req_ctx->edesc;
17022 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17023 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17024 + int ecode = 0;
17025 +
17026 +#ifdef DEBUG
17027 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17028 +#endif
17029 +
17030 + if (unlikely(status)) {
17031 + caam_qi2_strstatus(ctx->dev, status);
17032 + ecode = -EIO;
17033 + }
17034 +
17035 + aead_unmap(ctx->dev, edesc, req);
17036 + qi_cache_free(edesc);
17037 + aead_request_complete(req, ecode);
17038 +}
17039 +
17040 +static void aead_decrypt_done(void *cbk_ctx, u32 status)
17041 +{
17042 + struct crypto_async_request *areq = cbk_ctx;
17043 + struct aead_request *req = container_of(areq, struct aead_request,
17044 + base);
17045 + struct caam_request *req_ctx = to_caam_req(areq);
17046 + struct aead_edesc *edesc = req_ctx->edesc;
17047 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17048 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17049 + int ecode = 0;
17050 +
17051 +#ifdef DEBUG
17052 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17053 +#endif
17054 +
17055 + if (unlikely(status)) {
17056 + caam_qi2_strstatus(ctx->dev, status);
17057 + /*
17058 + * verify hw auth check passed else return -EBADMSG
17059 + */
17060 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
17061 + JRSTA_CCBERR_ERRID_ICVCHK)
17062 + ecode = -EBADMSG;
17063 + else
17064 + ecode = -EIO;
17065 + }
17066 +
17067 + aead_unmap(ctx->dev, edesc, req);
17068 + qi_cache_free(edesc);
17069 + aead_request_complete(req, ecode);
17070 +}
17071 +
17072 +static int aead_encrypt(struct aead_request *req)
17073 +{
17074 + struct aead_edesc *edesc;
17075 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17076 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17077 + struct caam_request *caam_req = aead_request_ctx(req);
17078 + int ret;
17079 +
17080 + /* allocate extended descriptor */
17081 + edesc = aead_edesc_alloc(req, true);
17082 + if (IS_ERR(edesc))
17083 + return PTR_ERR(edesc);
17084 +
17085 + caam_req->flc = &ctx->flc[ENCRYPT];
17086 + caam_req->op_type = ENCRYPT;
17087 + caam_req->cbk = aead_encrypt_done;
17088 + caam_req->ctx = &req->base;
17089 + caam_req->edesc = edesc;
17090 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17091 + if (ret != -EINPROGRESS &&
17092 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17093 + aead_unmap(ctx->dev, edesc, req);
17094 + qi_cache_free(edesc);
17095 + }
17096 +
17097 + return ret;
17098 +}
17099 +
17100 +static int aead_decrypt(struct aead_request *req)
17101 +{
17102 + struct aead_edesc *edesc;
17103 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17104 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17105 + struct caam_request *caam_req = aead_request_ctx(req);
17106 + int ret;
17107 +
17108 + /* allocate extended descriptor */
17109 + edesc = aead_edesc_alloc(req, false);
17110 + if (IS_ERR(edesc))
17111 + return PTR_ERR(edesc);
17112 +
17113 + caam_req->flc = &ctx->flc[DECRYPT];
17114 + caam_req->op_type = DECRYPT;
17115 + caam_req->cbk = aead_decrypt_done;
17116 + caam_req->ctx = &req->base;
17117 + caam_req->edesc = edesc;
17118 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17119 + if (ret != -EINPROGRESS &&
17120 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17121 + aead_unmap(ctx->dev, edesc, req);
17122 + qi_cache_free(edesc);
17123 + }
17124 +
17125 + return ret;
17126 +}
17127 +
17128 +static void tls_encrypt_done(void *cbk_ctx, u32 status)
17129 +{
17130 + struct crypto_async_request *areq = cbk_ctx;
17131 + struct aead_request *req = container_of(areq, struct aead_request,
17132 + base);
17133 + struct caam_request *req_ctx = to_caam_req(areq);
17134 + struct tls_edesc *edesc = req_ctx->edesc;
17135 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17136 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17137 + int ecode = 0;
17138 +
17139 +#ifdef DEBUG
17140 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17141 +#endif
17142 +
17143 + if (unlikely(status)) {
17144 + caam_qi2_strstatus(ctx->dev, status);
17145 + ecode = -EIO;
17146 + }
17147 +
17148 + tls_unmap(ctx->dev, edesc, req);
17149 + qi_cache_free(edesc);
17150 + aead_request_complete(req, ecode);
17151 +}
17152 +
17153 +static void tls_decrypt_done(void *cbk_ctx, u32 status)
17154 +{
17155 + struct crypto_async_request *areq = cbk_ctx;
17156 + struct aead_request *req = container_of(areq, struct aead_request,
17157 + base);
17158 + struct caam_request *req_ctx = to_caam_req(areq);
17159 + struct tls_edesc *edesc = req_ctx->edesc;
17160 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17161 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17162 + int ecode = 0;
17163 +
17164 +#ifdef DEBUG
17165 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17166 +#endif
17167 +
17168 + if (unlikely(status)) {
17169 + caam_qi2_strstatus(ctx->dev, status);
17170 + /*
17171 + * verify hw auth check passed else return -EBADMSG
17172 + */
17173 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
17174 + JRSTA_CCBERR_ERRID_ICVCHK)
17175 + ecode = -EBADMSG;
17176 + else
17177 + ecode = -EIO;
17178 + }
17179 +
17180 + tls_unmap(ctx->dev, edesc, req);
17181 + qi_cache_free(edesc);
17182 + aead_request_complete(req, ecode);
17183 +}
17184 +
17185 +static int tls_encrypt(struct aead_request *req)
17186 +{
17187 + struct tls_edesc *edesc;
17188 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17189 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17190 + struct caam_request *caam_req = aead_request_ctx(req);
17191 + int ret;
17192 +
17193 + /* allocate extended descriptor */
17194 + edesc = tls_edesc_alloc(req, true);
17195 + if (IS_ERR(edesc))
17196 + return PTR_ERR(edesc);
17197 +
17198 + caam_req->flc = &ctx->flc[ENCRYPT];
17199 + caam_req->op_type = ENCRYPT;
17200 + caam_req->cbk = tls_encrypt_done;
17201 + caam_req->ctx = &req->base;
17202 + caam_req->edesc = edesc;
17203 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17204 + if (ret != -EINPROGRESS &&
17205 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17206 + tls_unmap(ctx->dev, edesc, req);
17207 + qi_cache_free(edesc);
17208 + }
17209 +
17210 + return ret;
17211 +}
17212 +
17213 +static int tls_decrypt(struct aead_request *req)
17214 +{
17215 + struct tls_edesc *edesc;
17216 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17217 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17218 + struct caam_request *caam_req = aead_request_ctx(req);
17219 + int ret;
17220 +
17221 + /* allocate extended descriptor */
17222 + edesc = tls_edesc_alloc(req, false);
17223 + if (IS_ERR(edesc))
17224 + return PTR_ERR(edesc);
17225 +
17226 + caam_req->flc = &ctx->flc[DECRYPT];
17227 + caam_req->op_type = DECRYPT;
17228 + caam_req->cbk = tls_decrypt_done;
17229 + caam_req->ctx = &req->base;
17230 + caam_req->edesc = edesc;
17231 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17232 + if (ret != -EINPROGRESS &&
17233 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17234 + tls_unmap(ctx->dev, edesc, req);
17235 + qi_cache_free(edesc);
17236 + }
17237 +
17238 + return ret;
17239 +}
17240 +
17241 +static int ipsec_gcm_encrypt(struct aead_request *req)
17242 +{
17243 + if (req->assoclen < 8)
17244 + return -EINVAL;
17245 +
17246 + return aead_encrypt(req);
17247 +}
17248 +
17249 +static int ipsec_gcm_decrypt(struct aead_request *req)
17250 +{
17251 + if (req->assoclen < 8)
17252 + return -EINVAL;
17253 +
17254 + return aead_decrypt(req);
17255 +}
17256 +
17257 +static void ablkcipher_done(void *cbk_ctx, u32 status)
17258 +{
17259 + struct crypto_async_request *areq = cbk_ctx;
17260 + struct ablkcipher_request *req = ablkcipher_request_cast(areq);
17261 + struct caam_request *req_ctx = to_caam_req(areq);
17262 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17263 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17264 + struct ablkcipher_edesc *edesc = req_ctx->edesc;
17265 + int ecode = 0;
17266 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
17267 +
17268 +#ifdef DEBUG
17269 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17270 +#endif
17271 +
17272 + if (unlikely(status)) {
17273 + caam_qi2_strstatus(ctx->dev, status);
17274 + ecode = -EIO;
17275 + }
17276 +
17277 +#ifdef DEBUG
17278 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
17279 + DUMP_PREFIX_ADDRESS, 16, 4, req->info,
17280 + edesc->src_nents > 1 ? 100 : ivsize, 1);
17281 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
17282 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
17283 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
17284 +#endif
17285 +
17286 + ablkcipher_unmap(ctx->dev, edesc, req);
17287 + qi_cache_free(edesc);
17288 +
17289 + /*
17290 + * The crypto API expects us to set the IV (req->info) to the last
17291 + * ciphertext block. This is used e.g. by the CTS mode.
17292 + */
17293 + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
17294 + ivsize, 0);
17295 +
17296 + ablkcipher_request_complete(req, ecode);
17297 +}
17298 +
17299 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
17300 +{
17301 + struct ablkcipher_edesc *edesc;
17302 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17303 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17304 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17305 + int ret;
17306 +
17307 + /* allocate extended descriptor */
17308 + edesc = ablkcipher_edesc_alloc(req, true);
17309 + if (IS_ERR(edesc))
17310 + return PTR_ERR(edesc);
17311 +
17312 + caam_req->flc = &ctx->flc[ENCRYPT];
17313 + caam_req->op_type = ENCRYPT;
17314 + caam_req->cbk = ablkcipher_done;
17315 + caam_req->ctx = &req->base;
17316 + caam_req->edesc = edesc;
17317 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17318 + if (ret != -EINPROGRESS &&
17319 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17320 + ablkcipher_unmap(ctx->dev, edesc, req);
17321 + qi_cache_free(edesc);
17322 + }
17323 +
17324 + return ret;
17325 +}
17326 +
17327 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *greq)
17328 +{
17329 + struct ablkcipher_request *req = &greq->creq;
17330 + struct ablkcipher_edesc *edesc;
17331 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17332 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17333 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17334 + int ret;
17335 +
17336 + /* allocate extended descriptor */
17337 + edesc = ablkcipher_giv_edesc_alloc(greq);
17338 + if (IS_ERR(edesc))
17339 + return PTR_ERR(edesc);
17340 +
17341 + caam_req->flc = &ctx->flc[GIVENCRYPT];
17342 + caam_req->op_type = GIVENCRYPT;
17343 + caam_req->cbk = ablkcipher_done;
17344 + caam_req->ctx = &req->base;
17345 + caam_req->edesc = edesc;
17346 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17347 + if (ret != -EINPROGRESS &&
17348 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17349 + ablkcipher_unmap(ctx->dev, edesc, req);
17350 + qi_cache_free(edesc);
17351 + }
17352 +
17353 + return ret;
17354 +}
17355 +
17356 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
17357 +{
17358 + struct ablkcipher_edesc *edesc;
17359 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17360 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17361 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17362 + int ret;
17363 +
17364 + /* allocate extended descriptor */
17365 + edesc = ablkcipher_edesc_alloc(req, false);
17366 + if (IS_ERR(edesc))
17367 + return PTR_ERR(edesc);
17368 +
17369 + caam_req->flc = &ctx->flc[DECRYPT];
17370 + caam_req->op_type = DECRYPT;
17371 + caam_req->cbk = ablkcipher_done;
17372 + caam_req->ctx = &req->base;
17373 + caam_req->edesc = edesc;
17374 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17375 + if (ret != -EINPROGRESS &&
17376 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17377 + ablkcipher_unmap(ctx->dev, edesc, req);
17378 + qi_cache_free(edesc);
17379 + }
17380 +
17381 + return ret;
17382 +}
17383 +
17384 +struct caam_crypto_alg {
17385 + struct list_head entry;
17386 + struct crypto_alg crypto_alg;
17387 + struct caam_alg_entry caam;
17388 +};
17389 +
17390 +static int caam_cra_init(struct crypto_tfm *tfm)
17391 +{
17392 + struct crypto_alg *alg = tfm->__crt_alg;
17393 + struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
17394 + crypto_alg);
17395 + struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
17396 +
17397 + /* copy descriptor header template value */
17398 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG |
17399 + caam_alg->caam.class1_alg_type;
17400 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG |
17401 + caam_alg->caam.class2_alg_type;
17402 +
17403 + ctx->dev = caam_alg->caam.dev;
17404 +
17405 + return 0;
17406 +}
17407 +
17408 +static int caam_cra_init_ablkcipher(struct crypto_tfm *tfm)
17409 +{
17410 + struct ablkcipher_tfm *ablkcipher_tfm =
17411 + crypto_ablkcipher_crt(__crypto_ablkcipher_cast(tfm));
17412 +
17413 + ablkcipher_tfm->reqsize = sizeof(struct caam_request);
17414 + return caam_cra_init(tfm);
17415 +}
17416 +
17417 +static int caam_cra_init_aead(struct crypto_aead *tfm)
17418 +{
17419 + crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
17420 + return caam_cra_init(crypto_aead_tfm(tfm));
17421 +}
17422 +
17423 +static void caam_exit_common(struct caam_ctx *ctx)
17424 +{
17425 + int i;
17426 +
17427 + for (i = 0; i < NUM_OP; i++) {
17428 + if (!ctx->flc[i].flc_dma)
17429 + continue;
17430 + dma_unmap_single(ctx->dev, ctx->flc[i].flc_dma,
17431 + sizeof(ctx->flc[i].flc) +
17432 + desc_bytes(ctx->flc[i].sh_desc),
17433 + DMA_TO_DEVICE);
17434 + }
17435 +
17436 + if (ctx->key_dma)
17437 + dma_unmap_single(ctx->dev, ctx->key_dma,
17438 + ctx->cdata.keylen + ctx->adata.keylen_pad,
17439 + DMA_TO_DEVICE);
17440 +}
17441 +
17442 +static void caam_cra_exit(struct crypto_tfm *tfm)
17443 +{
17444 + caam_exit_common(crypto_tfm_ctx(tfm));
17445 +}
17446 +
17447 +static void caam_cra_exit_aead(struct crypto_aead *tfm)
17448 +{
17449 + caam_exit_common(crypto_aead_ctx(tfm));
17450 +}
17451 +
17452 +#define template_ablkcipher template_u.ablkcipher
17453 +struct caam_alg_template {
17454 + char name[CRYPTO_MAX_ALG_NAME];
17455 + char driver_name[CRYPTO_MAX_ALG_NAME];
17456 + unsigned int blocksize;
17457 + u32 type;
17458 + union {
17459 + struct ablkcipher_alg ablkcipher;
17460 + } template_u;
17461 + u32 class1_alg_type;
17462 + u32 class2_alg_type;
17463 +};
17464 +
17465 +static struct caam_alg_template driver_algs[] = {
17466 + /* ablkcipher descriptor */
17467 + {
17468 + .name = "cbc(aes)",
17469 + .driver_name = "cbc-aes-caam-qi2",
17470 + .blocksize = AES_BLOCK_SIZE,
17471 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17472 + .template_ablkcipher = {
17473 + .setkey = ablkcipher_setkey,
17474 + .encrypt = ablkcipher_encrypt,
17475 + .decrypt = ablkcipher_decrypt,
17476 + .givencrypt = ablkcipher_givencrypt,
17477 + .geniv = "<built-in>",
17478 + .min_keysize = AES_MIN_KEY_SIZE,
17479 + .max_keysize = AES_MAX_KEY_SIZE,
17480 + .ivsize = AES_BLOCK_SIZE,
17481 + },
17482 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17483 + },
17484 + {
17485 + .name = "cbc(des3_ede)",
17486 + .driver_name = "cbc-3des-caam-qi2",
17487 + .blocksize = DES3_EDE_BLOCK_SIZE,
17488 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17489 + .template_ablkcipher = {
17490 + .setkey = ablkcipher_setkey,
17491 + .encrypt = ablkcipher_encrypt,
17492 + .decrypt = ablkcipher_decrypt,
17493 + .givencrypt = ablkcipher_givencrypt,
17494 + .geniv = "<built-in>",
17495 + .min_keysize = DES3_EDE_KEY_SIZE,
17496 + .max_keysize = DES3_EDE_KEY_SIZE,
17497 + .ivsize = DES3_EDE_BLOCK_SIZE,
17498 + },
17499 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17500 + },
17501 + {
17502 + .name = "cbc(des)",
17503 + .driver_name = "cbc-des-caam-qi2",
17504 + .blocksize = DES_BLOCK_SIZE,
17505 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17506 + .template_ablkcipher = {
17507 + .setkey = ablkcipher_setkey,
17508 + .encrypt = ablkcipher_encrypt,
17509 + .decrypt = ablkcipher_decrypt,
17510 + .givencrypt = ablkcipher_givencrypt,
17511 + .geniv = "<built-in>",
17512 + .min_keysize = DES_KEY_SIZE,
17513 + .max_keysize = DES_KEY_SIZE,
17514 + .ivsize = DES_BLOCK_SIZE,
17515 + },
17516 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
17517 + },
17518 + {
17519 + .name = "ctr(aes)",
17520 + .driver_name = "ctr-aes-caam-qi2",
17521 + .blocksize = 1,
17522 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17523 + .template_ablkcipher = {
17524 + .setkey = ablkcipher_setkey,
17525 + .encrypt = ablkcipher_encrypt,
17526 + .decrypt = ablkcipher_decrypt,
17527 + .geniv = "chainiv",
17528 + .min_keysize = AES_MIN_KEY_SIZE,
17529 + .max_keysize = AES_MAX_KEY_SIZE,
17530 + .ivsize = AES_BLOCK_SIZE,
17531 + },
17532 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17533 + },
17534 + {
17535 + .name = "rfc3686(ctr(aes))",
17536 + .driver_name = "rfc3686-ctr-aes-caam-qi2",
17537 + .blocksize = 1,
17538 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17539 + .template_ablkcipher = {
17540 + .setkey = ablkcipher_setkey,
17541 + .encrypt = ablkcipher_encrypt,
17542 + .decrypt = ablkcipher_decrypt,
17543 + .givencrypt = ablkcipher_givencrypt,
17544 + .geniv = "<built-in>",
17545 + .min_keysize = AES_MIN_KEY_SIZE +
17546 + CTR_RFC3686_NONCE_SIZE,
17547 + .max_keysize = AES_MAX_KEY_SIZE +
17548 + CTR_RFC3686_NONCE_SIZE,
17549 + .ivsize = CTR_RFC3686_IV_SIZE,
17550 + },
17551 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17552 + },
17553 + {
17554 + .name = "xts(aes)",
17555 + .driver_name = "xts-aes-caam-qi2",
17556 + .blocksize = AES_BLOCK_SIZE,
17557 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17558 + .template_ablkcipher = {
17559 + .setkey = xts_ablkcipher_setkey,
17560 + .encrypt = ablkcipher_encrypt,
17561 + .decrypt = ablkcipher_decrypt,
17562 + .geniv = "eseqiv",
17563 + .min_keysize = 2 * AES_MIN_KEY_SIZE,
17564 + .max_keysize = 2 * AES_MAX_KEY_SIZE,
17565 + .ivsize = AES_BLOCK_SIZE,
17566 + },
17567 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
17568 + }
17569 +};
17570 +
17571 +static struct caam_aead_alg driver_aeads[] = {
17572 + {
17573 + .aead = {
17574 + .base = {
17575 + .cra_name = "rfc4106(gcm(aes))",
17576 + .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
17577 + .cra_blocksize = 1,
17578 + },
17579 + .setkey = rfc4106_setkey,
17580 + .setauthsize = rfc4106_setauthsize,
17581 + .encrypt = ipsec_gcm_encrypt,
17582 + .decrypt = ipsec_gcm_decrypt,
17583 + .ivsize = 8,
17584 + .maxauthsize = AES_BLOCK_SIZE,
17585 + },
17586 + .caam = {
17587 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17588 + },
17589 + },
17590 + {
17591 + .aead = {
17592 + .base = {
17593 + .cra_name = "rfc4543(gcm(aes))",
17594 + .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
17595 + .cra_blocksize = 1,
17596 + },
17597 + .setkey = rfc4543_setkey,
17598 + .setauthsize = rfc4543_setauthsize,
17599 + .encrypt = ipsec_gcm_encrypt,
17600 + .decrypt = ipsec_gcm_decrypt,
17601 + .ivsize = 8,
17602 + .maxauthsize = AES_BLOCK_SIZE,
17603 + },
17604 + .caam = {
17605 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17606 + },
17607 + },
17608 + /* Galois Counter Mode */
17609 + {
17610 + .aead = {
17611 + .base = {
17612 + .cra_name = "gcm(aes)",
17613 + .cra_driver_name = "gcm-aes-caam-qi2",
17614 + .cra_blocksize = 1,
17615 + },
17616 + .setkey = gcm_setkey,
17617 + .setauthsize = gcm_setauthsize,
17618 + .encrypt = aead_encrypt,
17619 + .decrypt = aead_decrypt,
17620 + .ivsize = 12,
17621 + .maxauthsize = AES_BLOCK_SIZE,
17622 + },
17623 + .caam = {
17624 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17625 + }
17626 + },
17627 + /* single-pass ipsec_esp descriptor */
17628 + {
17629 + .aead = {
17630 + .base = {
17631 + .cra_name = "authenc(hmac(md5),cbc(aes))",
17632 + .cra_driver_name = "authenc-hmac-md5-"
17633 + "cbc-aes-caam-qi2",
17634 + .cra_blocksize = AES_BLOCK_SIZE,
17635 + },
17636 + .setkey = aead_setkey,
17637 + .setauthsize = aead_setauthsize,
17638 + .encrypt = aead_encrypt,
17639 + .decrypt = aead_decrypt,
17640 + .ivsize = AES_BLOCK_SIZE,
17641 + .maxauthsize = MD5_DIGEST_SIZE,
17642 + },
17643 + .caam = {
17644 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17645 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17646 + OP_ALG_AAI_HMAC_PRECOMP,
17647 + }
17648 + },
17649 + {
17650 + .aead = {
17651 + .base = {
17652 + .cra_name = "echainiv(authenc(hmac(md5),"
17653 + "cbc(aes)))",
17654 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
17655 + "cbc-aes-caam-qi2",
17656 + .cra_blocksize = AES_BLOCK_SIZE,
17657 + },
17658 + .setkey = aead_setkey,
17659 + .setauthsize = aead_setauthsize,
17660 + .encrypt = aead_encrypt,
17661 + .decrypt = aead_decrypt,
17662 + .ivsize = AES_BLOCK_SIZE,
17663 + .maxauthsize = MD5_DIGEST_SIZE,
17664 + },
17665 + .caam = {
17666 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17667 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17668 + OP_ALG_AAI_HMAC_PRECOMP,
17669 + .geniv = true,
17670 + }
17671 + },
17672 + {
17673 + .aead = {
17674 + .base = {
17675 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
17676 + .cra_driver_name = "authenc-hmac-sha1-"
17677 + "cbc-aes-caam-qi2",
17678 + .cra_blocksize = AES_BLOCK_SIZE,
17679 + },
17680 + .setkey = aead_setkey,
17681 + .setauthsize = aead_setauthsize,
17682 + .encrypt = aead_encrypt,
17683 + .decrypt = aead_decrypt,
17684 + .ivsize = AES_BLOCK_SIZE,
17685 + .maxauthsize = SHA1_DIGEST_SIZE,
17686 + },
17687 + .caam = {
17688 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17689 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17690 + OP_ALG_AAI_HMAC_PRECOMP,
17691 + }
17692 + },
17693 + {
17694 + .aead = {
17695 + .base = {
17696 + .cra_name = "echainiv(authenc(hmac(sha1),"
17697 + "cbc(aes)))",
17698 + .cra_driver_name = "echainiv-authenc-"
17699 + "hmac-sha1-cbc-aes-caam-qi2",
17700 + .cra_blocksize = AES_BLOCK_SIZE,
17701 + },
17702 + .setkey = aead_setkey,
17703 + .setauthsize = aead_setauthsize,
17704 + .encrypt = aead_encrypt,
17705 + .decrypt = aead_decrypt,
17706 + .ivsize = AES_BLOCK_SIZE,
17707 + .maxauthsize = SHA1_DIGEST_SIZE,
17708 + },
17709 + .caam = {
17710 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17711 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17712 + OP_ALG_AAI_HMAC_PRECOMP,
17713 + .geniv = true,
17714 + },
17715 + },
17716 + {
17717 + .aead = {
17718 + .base = {
17719 + .cra_name = "authenc(hmac(sha224),cbc(aes))",
17720 + .cra_driver_name = "authenc-hmac-sha224-"
17721 + "cbc-aes-caam-qi2",
17722 + .cra_blocksize = AES_BLOCK_SIZE,
17723 + },
17724 + .setkey = aead_setkey,
17725 + .setauthsize = aead_setauthsize,
17726 + .encrypt = aead_encrypt,
17727 + .decrypt = aead_decrypt,
17728 + .ivsize = AES_BLOCK_SIZE,
17729 + .maxauthsize = SHA224_DIGEST_SIZE,
17730 + },
17731 + .caam = {
17732 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17733 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17734 + OP_ALG_AAI_HMAC_PRECOMP,
17735 + }
17736 + },
17737 + {
17738 + .aead = {
17739 + .base = {
17740 + .cra_name = "echainiv(authenc(hmac(sha224),"
17741 + "cbc(aes)))",
17742 + .cra_driver_name = "echainiv-authenc-"
17743 + "hmac-sha224-cbc-aes-caam-qi2",
17744 + .cra_blocksize = AES_BLOCK_SIZE,
17745 + },
17746 + .setkey = aead_setkey,
17747 + .setauthsize = aead_setauthsize,
17748 + .encrypt = aead_encrypt,
17749 + .decrypt = aead_decrypt,
17750 + .ivsize = AES_BLOCK_SIZE,
17751 + .maxauthsize = SHA224_DIGEST_SIZE,
17752 + },
17753 + .caam = {
17754 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17755 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17756 + OP_ALG_AAI_HMAC_PRECOMP,
17757 + .geniv = true,
17758 + }
17759 + },
17760 + {
17761 + .aead = {
17762 + .base = {
17763 + .cra_name = "authenc(hmac(sha256),cbc(aes))",
17764 + .cra_driver_name = "authenc-hmac-sha256-"
17765 + "cbc-aes-caam-qi2",
17766 + .cra_blocksize = AES_BLOCK_SIZE,
17767 + },
17768 + .setkey = aead_setkey,
17769 + .setauthsize = aead_setauthsize,
17770 + .encrypt = aead_encrypt,
17771 + .decrypt = aead_decrypt,
17772 + .ivsize = AES_BLOCK_SIZE,
17773 + .maxauthsize = SHA256_DIGEST_SIZE,
17774 + },
17775 + .caam = {
17776 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17777 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17778 + OP_ALG_AAI_HMAC_PRECOMP,
17779 + }
17780 + },
17781 + {
17782 + .aead = {
17783 + .base = {
17784 + .cra_name = "echainiv(authenc(hmac(sha256),"
17785 + "cbc(aes)))",
17786 + .cra_driver_name = "echainiv-authenc-"
17787 + "hmac-sha256-cbc-aes-"
17788 + "caam-qi2",
17789 + .cra_blocksize = AES_BLOCK_SIZE,
17790 + },
17791 + .setkey = aead_setkey,
17792 + .setauthsize = aead_setauthsize,
17793 + .encrypt = aead_encrypt,
17794 + .decrypt = aead_decrypt,
17795 + .ivsize = AES_BLOCK_SIZE,
17796 + .maxauthsize = SHA256_DIGEST_SIZE,
17797 + },
17798 + .caam = {
17799 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17800 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17801 + OP_ALG_AAI_HMAC_PRECOMP,
17802 + .geniv = true,
17803 + }
17804 + },
17805 + {
17806 + .aead = {
17807 + .base = {
17808 + .cra_name = "authenc(hmac(sha384),cbc(aes))",
17809 + .cra_driver_name = "authenc-hmac-sha384-"
17810 + "cbc-aes-caam-qi2",
17811 + .cra_blocksize = AES_BLOCK_SIZE,
17812 + },
17813 + .setkey = aead_setkey,
17814 + .setauthsize = aead_setauthsize,
17815 + .encrypt = aead_encrypt,
17816 + .decrypt = aead_decrypt,
17817 + .ivsize = AES_BLOCK_SIZE,
17818 + .maxauthsize = SHA384_DIGEST_SIZE,
17819 + },
17820 + .caam = {
17821 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17822 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
17823 + OP_ALG_AAI_HMAC_PRECOMP,
17824 + }
17825 + },
17826 + {
17827 + .aead = {
17828 + .base = {
17829 + .cra_name = "echainiv(authenc(hmac(sha384),"
17830 + "cbc(aes)))",
17831 + .cra_driver_name = "echainiv-authenc-"
17832 + "hmac-sha384-cbc-aes-"
17833 + "caam-qi2",
17834 + .cra_blocksize = AES_BLOCK_SIZE,
17835 + },
17836 + .setkey = aead_setkey,
17837 + .setauthsize = aead_setauthsize,
17838 + .encrypt = aead_encrypt,
17839 + .decrypt = aead_decrypt,
17840 + .ivsize = AES_BLOCK_SIZE,
17841 + .maxauthsize = SHA384_DIGEST_SIZE,
17842 + },
17843 + .caam = {
17844 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17845 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
17846 + OP_ALG_AAI_HMAC_PRECOMP,
17847 + .geniv = true,
17848 + }
17849 + },
17850 + {
17851 + .aead = {
17852 + .base = {
17853 + .cra_name = "authenc(hmac(sha512),cbc(aes))",
17854 + .cra_driver_name = "authenc-hmac-sha512-"
17855 + "cbc-aes-caam-qi2",
17856 + .cra_blocksize = AES_BLOCK_SIZE,
17857 + },
17858 + .setkey = aead_setkey,
17859 + .setauthsize = aead_setauthsize,
17860 + .encrypt = aead_encrypt,
17861 + .decrypt = aead_decrypt,
17862 + .ivsize = AES_BLOCK_SIZE,
17863 + .maxauthsize = SHA512_DIGEST_SIZE,
17864 + },
17865 + .caam = {
17866 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17867 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
17868 + OP_ALG_AAI_HMAC_PRECOMP,
17869 + }
17870 + },
17871 + {
17872 + .aead = {
17873 + .base = {
17874 + .cra_name = "echainiv(authenc(hmac(sha512),"
17875 + "cbc(aes)))",
17876 + .cra_driver_name = "echainiv-authenc-"
17877 + "hmac-sha512-cbc-aes-"
17878 + "caam-qi2",
17879 + .cra_blocksize = AES_BLOCK_SIZE,
17880 + },
17881 + .setkey = aead_setkey,
17882 + .setauthsize = aead_setauthsize,
17883 + .encrypt = aead_encrypt,
17884 + .decrypt = aead_decrypt,
17885 + .ivsize = AES_BLOCK_SIZE,
17886 + .maxauthsize = SHA512_DIGEST_SIZE,
17887 + },
17888 + .caam = {
17889 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17890 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
17891 + OP_ALG_AAI_HMAC_PRECOMP,
17892 + .geniv = true,
17893 + }
17894 + },
17895 + {
17896 + .aead = {
17897 + .base = {
17898 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
17899 + .cra_driver_name = "authenc-hmac-md5-"
17900 + "cbc-des3_ede-caam-qi2",
17901 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17902 + },
17903 + .setkey = aead_setkey,
17904 + .setauthsize = aead_setauthsize,
17905 + .encrypt = aead_encrypt,
17906 + .decrypt = aead_decrypt,
17907 + .ivsize = DES3_EDE_BLOCK_SIZE,
17908 + .maxauthsize = MD5_DIGEST_SIZE,
17909 + },
17910 + .caam = {
17911 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17912 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17913 + OP_ALG_AAI_HMAC_PRECOMP,
17914 + }
17915 + },
17916 + {
17917 + .aead = {
17918 + .base = {
17919 + .cra_name = "echainiv(authenc(hmac(md5),"
17920 + "cbc(des3_ede)))",
17921 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
17922 + "cbc-des3_ede-caam-qi2",
17923 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17924 + },
17925 + .setkey = aead_setkey,
17926 + .setauthsize = aead_setauthsize,
17927 + .encrypt = aead_encrypt,
17928 + .decrypt = aead_decrypt,
17929 + .ivsize = DES3_EDE_BLOCK_SIZE,
17930 + .maxauthsize = MD5_DIGEST_SIZE,
17931 + },
17932 + .caam = {
17933 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17934 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17935 + OP_ALG_AAI_HMAC_PRECOMP,
17936 + .geniv = true,
17937 + }
17938 + },
17939 + {
17940 + .aead = {
17941 + .base = {
17942 + .cra_name = "authenc(hmac(sha1),"
17943 + "cbc(des3_ede))",
17944 + .cra_driver_name = "authenc-hmac-sha1-"
17945 + "cbc-des3_ede-caam-qi2",
17946 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17947 + },
17948 + .setkey = aead_setkey,
17949 + .setauthsize = aead_setauthsize,
17950 + .encrypt = aead_encrypt,
17951 + .decrypt = aead_decrypt,
17952 + .ivsize = DES3_EDE_BLOCK_SIZE,
17953 + .maxauthsize = SHA1_DIGEST_SIZE,
17954 + },
17955 + .caam = {
17956 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17957 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17958 + OP_ALG_AAI_HMAC_PRECOMP,
17959 + },
17960 + },
17961 + {
17962 + .aead = {
17963 + .base = {
17964 + .cra_name = "echainiv(authenc(hmac(sha1),"
17965 + "cbc(des3_ede)))",
17966 + .cra_driver_name = "echainiv-authenc-"
17967 + "hmac-sha1-"
17968 + "cbc-des3_ede-caam-qi2",
17969 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17970 + },
17971 + .setkey = aead_setkey,
17972 + .setauthsize = aead_setauthsize,
17973 + .encrypt = aead_encrypt,
17974 + .decrypt = aead_decrypt,
17975 + .ivsize = DES3_EDE_BLOCK_SIZE,
17976 + .maxauthsize = SHA1_DIGEST_SIZE,
17977 + },
17978 + .caam = {
17979 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17980 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17981 + OP_ALG_AAI_HMAC_PRECOMP,
17982 + .geniv = true,
17983 + }
17984 + },
17985 + {
17986 + .aead = {
17987 + .base = {
17988 + .cra_name = "authenc(hmac(sha224),"
17989 + "cbc(des3_ede))",
17990 + .cra_driver_name = "authenc-hmac-sha224-"
17991 + "cbc-des3_ede-caam-qi2",
17992 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17993 + },
17994 + .setkey = aead_setkey,
17995 + .setauthsize = aead_setauthsize,
17996 + .encrypt = aead_encrypt,
17997 + .decrypt = aead_decrypt,
17998 + .ivsize = DES3_EDE_BLOCK_SIZE,
17999 + .maxauthsize = SHA224_DIGEST_SIZE,
18000 + },
18001 + .caam = {
18002 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18003 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18004 + OP_ALG_AAI_HMAC_PRECOMP,
18005 + },
18006 + },
18007 + {
18008 + .aead = {
18009 + .base = {
18010 + .cra_name = "echainiv(authenc(hmac(sha224),"
18011 + "cbc(des3_ede)))",
18012 + .cra_driver_name = "echainiv-authenc-"
18013 + "hmac-sha224-"
18014 + "cbc-des3_ede-caam-qi2",
18015 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18016 + },
18017 + .setkey = aead_setkey,
18018 + .setauthsize = aead_setauthsize,
18019 + .encrypt = aead_encrypt,
18020 + .decrypt = aead_decrypt,
18021 + .ivsize = DES3_EDE_BLOCK_SIZE,
18022 + .maxauthsize = SHA224_DIGEST_SIZE,
18023 + },
18024 + .caam = {
18025 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18026 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18027 + OP_ALG_AAI_HMAC_PRECOMP,
18028 + .geniv = true,
18029 + }
18030 + },
18031 + {
18032 + .aead = {
18033 + .base = {
18034 + .cra_name = "authenc(hmac(sha256),"
18035 + "cbc(des3_ede))",
18036 + .cra_driver_name = "authenc-hmac-sha256-"
18037 + "cbc-des3_ede-caam-qi2",
18038 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18039 + },
18040 + .setkey = aead_setkey,
18041 + .setauthsize = aead_setauthsize,
18042 + .encrypt = aead_encrypt,
18043 + .decrypt = aead_decrypt,
18044 + .ivsize = DES3_EDE_BLOCK_SIZE,
18045 + .maxauthsize = SHA256_DIGEST_SIZE,
18046 + },
18047 + .caam = {
18048 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18049 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18050 + OP_ALG_AAI_HMAC_PRECOMP,
18051 + },
18052 + },
18053 + {
18054 + .aead = {
18055 + .base = {
18056 + .cra_name = "echainiv(authenc(hmac(sha256),"
18057 + "cbc(des3_ede)))",
18058 + .cra_driver_name = "echainiv-authenc-"
18059 + "hmac-sha256-"
18060 + "cbc-des3_ede-caam-qi2",
18061 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18062 + },
18063 + .setkey = aead_setkey,
18064 + .setauthsize = aead_setauthsize,
18065 + .encrypt = aead_encrypt,
18066 + .decrypt = aead_decrypt,
18067 + .ivsize = DES3_EDE_BLOCK_SIZE,
18068 + .maxauthsize = SHA256_DIGEST_SIZE,
18069 + },
18070 + .caam = {
18071 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18072 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18073 + OP_ALG_AAI_HMAC_PRECOMP,
18074 + .geniv = true,
18075 + }
18076 + },
18077 + {
18078 + .aead = {
18079 + .base = {
18080 + .cra_name = "authenc(hmac(sha384),"
18081 + "cbc(des3_ede))",
18082 + .cra_driver_name = "authenc-hmac-sha384-"
18083 + "cbc-des3_ede-caam-qi2",
18084 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18085 + },
18086 + .setkey = aead_setkey,
18087 + .setauthsize = aead_setauthsize,
18088 + .encrypt = aead_encrypt,
18089 + .decrypt = aead_decrypt,
18090 + .ivsize = DES3_EDE_BLOCK_SIZE,
18091 + .maxauthsize = SHA384_DIGEST_SIZE,
18092 + },
18093 + .caam = {
18094 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18095 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18096 + OP_ALG_AAI_HMAC_PRECOMP,
18097 + },
18098 + },
18099 + {
18100 + .aead = {
18101 + .base = {
18102 + .cra_name = "echainiv(authenc(hmac(sha384),"
18103 + "cbc(des3_ede)))",
18104 + .cra_driver_name = "echainiv-authenc-"
18105 + "hmac-sha384-"
18106 + "cbc-des3_ede-caam-qi2",
18107 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18108 + },
18109 + .setkey = aead_setkey,
18110 + .setauthsize = aead_setauthsize,
18111 + .encrypt = aead_encrypt,
18112 + .decrypt = aead_decrypt,
18113 + .ivsize = DES3_EDE_BLOCK_SIZE,
18114 + .maxauthsize = SHA384_DIGEST_SIZE,
18115 + },
18116 + .caam = {
18117 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18118 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18119 + OP_ALG_AAI_HMAC_PRECOMP,
18120 + .geniv = true,
18121 + }
18122 + },
18123 + {
18124 + .aead = {
18125 + .base = {
18126 + .cra_name = "authenc(hmac(sha512),"
18127 + "cbc(des3_ede))",
18128 + .cra_driver_name = "authenc-hmac-sha512-"
18129 + "cbc-des3_ede-caam-qi2",
18130 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18131 + },
18132 + .setkey = aead_setkey,
18133 + .setauthsize = aead_setauthsize,
18134 + .encrypt = aead_encrypt,
18135 + .decrypt = aead_decrypt,
18136 + .ivsize = DES3_EDE_BLOCK_SIZE,
18137 + .maxauthsize = SHA512_DIGEST_SIZE,
18138 + },
18139 + .caam = {
18140 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18141 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18142 + OP_ALG_AAI_HMAC_PRECOMP,
18143 + },
18144 + },
18145 + {
18146 + .aead = {
18147 + .base = {
18148 + .cra_name = "echainiv(authenc(hmac(sha512),"
18149 + "cbc(des3_ede)))",
18150 + .cra_driver_name = "echainiv-authenc-"
18151 + "hmac-sha512-"
18152 + "cbc-des3_ede-caam-qi2",
18153 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18154 + },
18155 + .setkey = aead_setkey,
18156 + .setauthsize = aead_setauthsize,
18157 + .encrypt = aead_encrypt,
18158 + .decrypt = aead_decrypt,
18159 + .ivsize = DES3_EDE_BLOCK_SIZE,
18160 + .maxauthsize = SHA512_DIGEST_SIZE,
18161 + },
18162 + .caam = {
18163 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18164 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18165 + OP_ALG_AAI_HMAC_PRECOMP,
18166 + .geniv = true,
18167 + }
18168 + },
18169 + {
18170 + .aead = {
18171 + .base = {
18172 + .cra_name = "authenc(hmac(md5),cbc(des))",
18173 + .cra_driver_name = "authenc-hmac-md5-"
18174 + "cbc-des-caam-qi2",
18175 + .cra_blocksize = DES_BLOCK_SIZE,
18176 + },
18177 + .setkey = aead_setkey,
18178 + .setauthsize = aead_setauthsize,
18179 + .encrypt = aead_encrypt,
18180 + .decrypt = aead_decrypt,
18181 + .ivsize = DES_BLOCK_SIZE,
18182 + .maxauthsize = MD5_DIGEST_SIZE,
18183 + },
18184 + .caam = {
18185 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18186 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18187 + OP_ALG_AAI_HMAC_PRECOMP,
18188 + },
18189 + },
18190 + {
18191 + .aead = {
18192 + .base = {
18193 + .cra_name = "echainiv(authenc(hmac(md5),"
18194 + "cbc(des)))",
18195 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
18196 + "cbc-des-caam-qi2",
18197 + .cra_blocksize = DES_BLOCK_SIZE,
18198 + },
18199 + .setkey = aead_setkey,
18200 + .setauthsize = aead_setauthsize,
18201 + .encrypt = aead_encrypt,
18202 + .decrypt = aead_decrypt,
18203 + .ivsize = DES_BLOCK_SIZE,
18204 + .maxauthsize = MD5_DIGEST_SIZE,
18205 + },
18206 + .caam = {
18207 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18208 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18209 + OP_ALG_AAI_HMAC_PRECOMP,
18210 + .geniv = true,
18211 + }
18212 + },
18213 + {
18214 + .aead = {
18215 + .base = {
18216 + .cra_name = "authenc(hmac(sha1),cbc(des))",
18217 + .cra_driver_name = "authenc-hmac-sha1-"
18218 + "cbc-des-caam-qi2",
18219 + .cra_blocksize = DES_BLOCK_SIZE,
18220 + },
18221 + .setkey = aead_setkey,
18222 + .setauthsize = aead_setauthsize,
18223 + .encrypt = aead_encrypt,
18224 + .decrypt = aead_decrypt,
18225 + .ivsize = DES_BLOCK_SIZE,
18226 + .maxauthsize = SHA1_DIGEST_SIZE,
18227 + },
18228 + .caam = {
18229 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18230 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18231 + OP_ALG_AAI_HMAC_PRECOMP,
18232 + },
18233 + },
18234 + {
18235 + .aead = {
18236 + .base = {
18237 + .cra_name = "echainiv(authenc(hmac(sha1),"
18238 + "cbc(des)))",
18239 + .cra_driver_name = "echainiv-authenc-"
18240 + "hmac-sha1-cbc-des-caam-qi2",
18241 + .cra_blocksize = DES_BLOCK_SIZE,
18242 + },
18243 + .setkey = aead_setkey,
18244 + .setauthsize = aead_setauthsize,
18245 + .encrypt = aead_encrypt,
18246 + .decrypt = aead_decrypt,
18247 + .ivsize = DES_BLOCK_SIZE,
18248 + .maxauthsize = SHA1_DIGEST_SIZE,
18249 + },
18250 + .caam = {
18251 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18252 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18253 + OP_ALG_AAI_HMAC_PRECOMP,
18254 + .geniv = true,
18255 + }
18256 + },
18257 + {
18258 + .aead = {
18259 + .base = {
18260 + .cra_name = "authenc(hmac(sha224),cbc(des))",
18261 + .cra_driver_name = "authenc-hmac-sha224-"
18262 + "cbc-des-caam-qi2",
18263 + .cra_blocksize = DES_BLOCK_SIZE,
18264 + },
18265 + .setkey = aead_setkey,
18266 + .setauthsize = aead_setauthsize,
18267 + .encrypt = aead_encrypt,
18268 + .decrypt = aead_decrypt,
18269 + .ivsize = DES_BLOCK_SIZE,
18270 + .maxauthsize = SHA224_DIGEST_SIZE,
18271 + },
18272 + .caam = {
18273 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18274 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18275 + OP_ALG_AAI_HMAC_PRECOMP,
18276 + },
18277 + },
18278 + {
18279 + .aead = {
18280 + .base = {
18281 + .cra_name = "echainiv(authenc(hmac(sha224),"
18282 + "cbc(des)))",
18283 + .cra_driver_name = "echainiv-authenc-"
18284 + "hmac-sha224-cbc-des-"
18285 + "caam-qi2",
18286 + .cra_blocksize = DES_BLOCK_SIZE,
18287 + },
18288 + .setkey = aead_setkey,
18289 + .setauthsize = aead_setauthsize,
18290 + .encrypt = aead_encrypt,
18291 + .decrypt = aead_decrypt,
18292 + .ivsize = DES_BLOCK_SIZE,
18293 + .maxauthsize = SHA224_DIGEST_SIZE,
18294 + },
18295 + .caam = {
18296 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18297 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18298 + OP_ALG_AAI_HMAC_PRECOMP,
18299 + .geniv = true,
18300 + }
18301 + },
18302 + {
18303 + .aead = {
18304 + .base = {
18305 + .cra_name = "authenc(hmac(sha256),cbc(des))",
18306 + .cra_driver_name = "authenc-hmac-sha256-"
18307 + "cbc-des-caam-qi2",
18308 + .cra_blocksize = DES_BLOCK_SIZE,
18309 + },
18310 + .setkey = aead_setkey,
18311 + .setauthsize = aead_setauthsize,
18312 + .encrypt = aead_encrypt,
18313 + .decrypt = aead_decrypt,
18314 + .ivsize = DES_BLOCK_SIZE,
18315 + .maxauthsize = SHA256_DIGEST_SIZE,
18316 + },
18317 + .caam = {
18318 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18319 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18320 + OP_ALG_AAI_HMAC_PRECOMP,
18321 + },
18322 + },
18323 + {
18324 + .aead = {
18325 + .base = {
18326 + .cra_name = "echainiv(authenc(hmac(sha256),"
18327 + "cbc(des)))",
18328 + .cra_driver_name = "echainiv-authenc-"
18329 + "hmac-sha256-cbc-desi-"
18330 + "caam-qi2",
18331 + .cra_blocksize = DES_BLOCK_SIZE,
18332 + },
18333 + .setkey = aead_setkey,
18334 + .setauthsize = aead_setauthsize,
18335 + .encrypt = aead_encrypt,
18336 + .decrypt = aead_decrypt,
18337 + .ivsize = DES_BLOCK_SIZE,
18338 + .maxauthsize = SHA256_DIGEST_SIZE,
18339 + },
18340 + .caam = {
18341 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18342 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18343 + OP_ALG_AAI_HMAC_PRECOMP,
18344 + .geniv = true,
18345 + },
18346 + },
18347 + {
18348 + .aead = {
18349 + .base = {
18350 + .cra_name = "authenc(hmac(sha384),cbc(des))",
18351 + .cra_driver_name = "authenc-hmac-sha384-"
18352 + "cbc-des-caam-qi2",
18353 + .cra_blocksize = DES_BLOCK_SIZE,
18354 + },
18355 + .setkey = aead_setkey,
18356 + .setauthsize = aead_setauthsize,
18357 + .encrypt = aead_encrypt,
18358 + .decrypt = aead_decrypt,
18359 + .ivsize = DES_BLOCK_SIZE,
18360 + .maxauthsize = SHA384_DIGEST_SIZE,
18361 + },
18362 + .caam = {
18363 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18364 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18365 + OP_ALG_AAI_HMAC_PRECOMP,
18366 + },
18367 + },
18368 + {
18369 + .aead = {
18370 + .base = {
18371 + .cra_name = "echainiv(authenc(hmac(sha384),"
18372 + "cbc(des)))",
18373 + .cra_driver_name = "echainiv-authenc-"
18374 + "hmac-sha384-cbc-des-"
18375 + "caam-qi2",
18376 + .cra_blocksize = DES_BLOCK_SIZE,
18377 + },
18378 + .setkey = aead_setkey,
18379 + .setauthsize = aead_setauthsize,
18380 + .encrypt = aead_encrypt,
18381 + .decrypt = aead_decrypt,
18382 + .ivsize = DES_BLOCK_SIZE,
18383 + .maxauthsize = SHA384_DIGEST_SIZE,
18384 + },
18385 + .caam = {
18386 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18387 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18388 + OP_ALG_AAI_HMAC_PRECOMP,
18389 + .geniv = true,
18390 + }
18391 + },
18392 + {
18393 + .aead = {
18394 + .base = {
18395 + .cra_name = "authenc(hmac(sha512),cbc(des))",
18396 + .cra_driver_name = "authenc-hmac-sha512-"
18397 + "cbc-des-caam-qi2",
18398 + .cra_blocksize = DES_BLOCK_SIZE,
18399 + },
18400 + .setkey = aead_setkey,
18401 + .setauthsize = aead_setauthsize,
18402 + .encrypt = aead_encrypt,
18403 + .decrypt = aead_decrypt,
18404 + .ivsize = DES_BLOCK_SIZE,
18405 + .maxauthsize = SHA512_DIGEST_SIZE,
18406 + },
18407 + .caam = {
18408 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18409 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18410 + OP_ALG_AAI_HMAC_PRECOMP,
18411 + }
18412 + },
18413 + {
18414 + .aead = {
18415 + .base = {
18416 + .cra_name = "echainiv(authenc(hmac(sha512),"
18417 + "cbc(des)))",
18418 + .cra_driver_name = "echainiv-authenc-"
18419 + "hmac-sha512-cbc-des-"
18420 + "caam-qi2",
18421 + .cra_blocksize = DES_BLOCK_SIZE,
18422 + },
18423 + .setkey = aead_setkey,
18424 + .setauthsize = aead_setauthsize,
18425 + .encrypt = aead_encrypt,
18426 + .decrypt = aead_decrypt,
18427 + .ivsize = DES_BLOCK_SIZE,
18428 + .maxauthsize = SHA512_DIGEST_SIZE,
18429 + },
18430 + .caam = {
18431 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18432 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18433 + OP_ALG_AAI_HMAC_PRECOMP,
18434 + .geniv = true,
18435 + }
18436 + },
18437 + {
18438 + .aead = {
18439 + .base = {
18440 + .cra_name = "authenc(hmac(md5),"
18441 + "rfc3686(ctr(aes)))",
18442 + .cra_driver_name = "authenc-hmac-md5-"
18443 + "rfc3686-ctr-aes-caam-qi2",
18444 + .cra_blocksize = 1,
18445 + },
18446 + .setkey = aead_setkey,
18447 + .setauthsize = aead_setauthsize,
18448 + .encrypt = aead_encrypt,
18449 + .decrypt = aead_decrypt,
18450 + .ivsize = CTR_RFC3686_IV_SIZE,
18451 + .maxauthsize = MD5_DIGEST_SIZE,
18452 + },
18453 + .caam = {
18454 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18455 + OP_ALG_AAI_CTR_MOD128,
18456 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18457 + OP_ALG_AAI_HMAC_PRECOMP,
18458 + .rfc3686 = true,
18459 + },
18460 + },
18461 + {
18462 + .aead = {
18463 + .base = {
18464 + .cra_name = "seqiv(authenc("
18465 + "hmac(md5),rfc3686(ctr(aes))))",
18466 + .cra_driver_name = "seqiv-authenc-hmac-md5-"
18467 + "rfc3686-ctr-aes-caam-qi2",
18468 + .cra_blocksize = 1,
18469 + },
18470 + .setkey = aead_setkey,
18471 + .setauthsize = aead_setauthsize,
18472 + .encrypt = aead_encrypt,
18473 + .decrypt = aead_decrypt,
18474 + .ivsize = CTR_RFC3686_IV_SIZE,
18475 + .maxauthsize = MD5_DIGEST_SIZE,
18476 + },
18477 + .caam = {
18478 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18479 + OP_ALG_AAI_CTR_MOD128,
18480 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18481 + OP_ALG_AAI_HMAC_PRECOMP,
18482 + .rfc3686 = true,
18483 + .geniv = true,
18484 + },
18485 + },
18486 + {
18487 + .aead = {
18488 + .base = {
18489 + .cra_name = "authenc(hmac(sha1),"
18490 + "rfc3686(ctr(aes)))",
18491 + .cra_driver_name = "authenc-hmac-sha1-"
18492 + "rfc3686-ctr-aes-caam-qi2",
18493 + .cra_blocksize = 1,
18494 + },
18495 + .setkey = aead_setkey,
18496 + .setauthsize = aead_setauthsize,
18497 + .encrypt = aead_encrypt,
18498 + .decrypt = aead_decrypt,
18499 + .ivsize = CTR_RFC3686_IV_SIZE,
18500 + .maxauthsize = SHA1_DIGEST_SIZE,
18501 + },
18502 + .caam = {
18503 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18504 + OP_ALG_AAI_CTR_MOD128,
18505 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18506 + OP_ALG_AAI_HMAC_PRECOMP,
18507 + .rfc3686 = true,
18508 + },
18509 + },
18510 + {
18511 + .aead = {
18512 + .base = {
18513 + .cra_name = "seqiv(authenc("
18514 + "hmac(sha1),rfc3686(ctr(aes))))",
18515 + .cra_driver_name = "seqiv-authenc-hmac-sha1-"
18516 + "rfc3686-ctr-aes-caam-qi2",
18517 + .cra_blocksize = 1,
18518 + },
18519 + .setkey = aead_setkey,
18520 + .setauthsize = aead_setauthsize,
18521 + .encrypt = aead_encrypt,
18522 + .decrypt = aead_decrypt,
18523 + .ivsize = CTR_RFC3686_IV_SIZE,
18524 + .maxauthsize = SHA1_DIGEST_SIZE,
18525 + },
18526 + .caam = {
18527 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18528 + OP_ALG_AAI_CTR_MOD128,
18529 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18530 + OP_ALG_AAI_HMAC_PRECOMP,
18531 + .rfc3686 = true,
18532 + .geniv = true,
18533 + },
18534 + },
18535 + {
18536 + .aead = {
18537 + .base = {
18538 + .cra_name = "authenc(hmac(sha224),"
18539 + "rfc3686(ctr(aes)))",
18540 + .cra_driver_name = "authenc-hmac-sha224-"
18541 + "rfc3686-ctr-aes-caam-qi2",
18542 + .cra_blocksize = 1,
18543 + },
18544 + .setkey = aead_setkey,
18545 + .setauthsize = aead_setauthsize,
18546 + .encrypt = aead_encrypt,
18547 + .decrypt = aead_decrypt,
18548 + .ivsize = CTR_RFC3686_IV_SIZE,
18549 + .maxauthsize = SHA224_DIGEST_SIZE,
18550 + },
18551 + .caam = {
18552 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18553 + OP_ALG_AAI_CTR_MOD128,
18554 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18555 + OP_ALG_AAI_HMAC_PRECOMP,
18556 + .rfc3686 = true,
18557 + },
18558 + },
18559 + {
18560 + .aead = {
18561 + .base = {
18562 + .cra_name = "seqiv(authenc("
18563 + "hmac(sha224),rfc3686(ctr(aes))))",
18564 + .cra_driver_name = "seqiv-authenc-hmac-sha224-"
18565 + "rfc3686-ctr-aes-caam-qi2",
18566 + .cra_blocksize = 1,
18567 + },
18568 + .setkey = aead_setkey,
18569 + .setauthsize = aead_setauthsize,
18570 + .encrypt = aead_encrypt,
18571 + .decrypt = aead_decrypt,
18572 + .ivsize = CTR_RFC3686_IV_SIZE,
18573 + .maxauthsize = SHA224_DIGEST_SIZE,
18574 + },
18575 + .caam = {
18576 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18577 + OP_ALG_AAI_CTR_MOD128,
18578 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18579 + OP_ALG_AAI_HMAC_PRECOMP,
18580 + .rfc3686 = true,
18581 + .geniv = true,
18582 + },
18583 + },
18584 + {
18585 + .aead = {
18586 + .base = {
18587 + .cra_name = "authenc(hmac(sha256),"
18588 + "rfc3686(ctr(aes)))",
18589 + .cra_driver_name = "authenc-hmac-sha256-"
18590 + "rfc3686-ctr-aes-caam-qi2",
18591 + .cra_blocksize = 1,
18592 + },
18593 + .setkey = aead_setkey,
18594 + .setauthsize = aead_setauthsize,
18595 + .encrypt = aead_encrypt,
18596 + .decrypt = aead_decrypt,
18597 + .ivsize = CTR_RFC3686_IV_SIZE,
18598 + .maxauthsize = SHA256_DIGEST_SIZE,
18599 + },
18600 + .caam = {
18601 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18602 + OP_ALG_AAI_CTR_MOD128,
18603 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18604 + OP_ALG_AAI_HMAC_PRECOMP,
18605 + .rfc3686 = true,
18606 + },
18607 + },
18608 + {
18609 + .aead = {
18610 + .base = {
18611 + .cra_name = "seqiv(authenc(hmac(sha256),"
18612 + "rfc3686(ctr(aes))))",
18613 + .cra_driver_name = "seqiv-authenc-hmac-sha256-"
18614 + "rfc3686-ctr-aes-caam-qi2",
18615 + .cra_blocksize = 1,
18616 + },
18617 + .setkey = aead_setkey,
18618 + .setauthsize = aead_setauthsize,
18619 + .encrypt = aead_encrypt,
18620 + .decrypt = aead_decrypt,
18621 + .ivsize = CTR_RFC3686_IV_SIZE,
18622 + .maxauthsize = SHA256_DIGEST_SIZE,
18623 + },
18624 + .caam = {
18625 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18626 + OP_ALG_AAI_CTR_MOD128,
18627 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18628 + OP_ALG_AAI_HMAC_PRECOMP,
18629 + .rfc3686 = true,
18630 + .geniv = true,
18631 + },
18632 + },
18633 + {
18634 + .aead = {
18635 + .base = {
18636 + .cra_name = "authenc(hmac(sha384),"
18637 + "rfc3686(ctr(aes)))",
18638 + .cra_driver_name = "authenc-hmac-sha384-"
18639 + "rfc3686-ctr-aes-caam-qi2",
18640 + .cra_blocksize = 1,
18641 + },
18642 + .setkey = aead_setkey,
18643 + .setauthsize = aead_setauthsize,
18644 + .encrypt = aead_encrypt,
18645 + .decrypt = aead_decrypt,
18646 + .ivsize = CTR_RFC3686_IV_SIZE,
18647 + .maxauthsize = SHA384_DIGEST_SIZE,
18648 + },
18649 + .caam = {
18650 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18651 + OP_ALG_AAI_CTR_MOD128,
18652 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18653 + OP_ALG_AAI_HMAC_PRECOMP,
18654 + .rfc3686 = true,
18655 + },
18656 + },
18657 + {
18658 + .aead = {
18659 + .base = {
18660 + .cra_name = "seqiv(authenc(hmac(sha384),"
18661 + "rfc3686(ctr(aes))))",
18662 + .cra_driver_name = "seqiv-authenc-hmac-sha384-"
18663 + "rfc3686-ctr-aes-caam-qi2",
18664 + .cra_blocksize = 1,
18665 + },
18666 + .setkey = aead_setkey,
18667 + .setauthsize = aead_setauthsize,
18668 + .encrypt = aead_encrypt,
18669 + .decrypt = aead_decrypt,
18670 + .ivsize = CTR_RFC3686_IV_SIZE,
18671 + .maxauthsize = SHA384_DIGEST_SIZE,
18672 + },
18673 + .caam = {
18674 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18675 + OP_ALG_AAI_CTR_MOD128,
18676 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18677 + OP_ALG_AAI_HMAC_PRECOMP,
18678 + .rfc3686 = true,
18679 + .geniv = true,
18680 + },
18681 + },
18682 + {
18683 + .aead = {
18684 + .base = {
18685 + .cra_name = "authenc(hmac(sha512),"
18686 + "rfc3686(ctr(aes)))",
18687 + .cra_driver_name = "authenc-hmac-sha512-"
18688 + "rfc3686-ctr-aes-caam-qi2",
18689 + .cra_blocksize = 1,
18690 + },
18691 + .setkey = aead_setkey,
18692 + .setauthsize = aead_setauthsize,
18693 + .encrypt = aead_encrypt,
18694 + .decrypt = aead_decrypt,
18695 + .ivsize = CTR_RFC3686_IV_SIZE,
18696 + .maxauthsize = SHA512_DIGEST_SIZE,
18697 + },
18698 + .caam = {
18699 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18700 + OP_ALG_AAI_CTR_MOD128,
18701 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18702 + OP_ALG_AAI_HMAC_PRECOMP,
18703 + .rfc3686 = true,
18704 + },
18705 + },
18706 + {
18707 + .aead = {
18708 + .base = {
18709 + .cra_name = "seqiv(authenc(hmac(sha512),"
18710 + "rfc3686(ctr(aes))))",
18711 + .cra_driver_name = "seqiv-authenc-hmac-sha512-"
18712 + "rfc3686-ctr-aes-caam-qi2",
18713 + .cra_blocksize = 1,
18714 + },
18715 + .setkey = aead_setkey,
18716 + .setauthsize = aead_setauthsize,
18717 + .encrypt = aead_encrypt,
18718 + .decrypt = aead_decrypt,
18719 + .ivsize = CTR_RFC3686_IV_SIZE,
18720 + .maxauthsize = SHA512_DIGEST_SIZE,
18721 + },
18722 + .caam = {
18723 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18724 + OP_ALG_AAI_CTR_MOD128,
18725 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18726 + OP_ALG_AAI_HMAC_PRECOMP,
18727 + .rfc3686 = true,
18728 + .geniv = true,
18729 + },
18730 + },
18731 + {
18732 + .aead = {
18733 + .base = {
18734 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
18735 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
18736 + .cra_blocksize = AES_BLOCK_SIZE,
18737 + },
18738 + .setkey = tls_setkey,
18739 + .setauthsize = tls_setauthsize,
18740 + .encrypt = tls_encrypt,
18741 + .decrypt = tls_decrypt,
18742 + .ivsize = AES_BLOCK_SIZE,
18743 + .maxauthsize = SHA1_DIGEST_SIZE,
18744 + },
18745 + .caam = {
18746 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18747 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18748 + OP_ALG_AAI_HMAC_PRECOMP,
18749 + },
18750 + },
18751 +};
18752 +
18753 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
18754 + *template)
18755 +{
18756 + struct caam_crypto_alg *t_alg;
18757 + struct crypto_alg *alg;
18758 +
18759 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
18760 + if (!t_alg)
18761 + return ERR_PTR(-ENOMEM);
18762 +
18763 + alg = &t_alg->crypto_alg;
18764 +
18765 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
18766 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
18767 + template->driver_name);
18768 + alg->cra_module = THIS_MODULE;
18769 + alg->cra_exit = caam_cra_exit;
18770 + alg->cra_priority = CAAM_CRA_PRIORITY;
18771 + alg->cra_blocksize = template->blocksize;
18772 + alg->cra_alignmask = 0;
18773 + alg->cra_ctxsize = sizeof(struct caam_ctx);
18774 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
18775 + template->type;
18776 + switch (template->type) {
18777 + case CRYPTO_ALG_TYPE_GIVCIPHER:
18778 + alg->cra_init = caam_cra_init_ablkcipher;
18779 + alg->cra_type = &crypto_givcipher_type;
18780 + alg->cra_ablkcipher = template->template_ablkcipher;
18781 + break;
18782 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
18783 + alg->cra_init = caam_cra_init_ablkcipher;
18784 + alg->cra_type = &crypto_ablkcipher_type;
18785 + alg->cra_ablkcipher = template->template_ablkcipher;
18786 + break;
18787 + }
18788 +
18789 + t_alg->caam.class1_alg_type = template->class1_alg_type;
18790 + t_alg->caam.class2_alg_type = template->class2_alg_type;
18791 +
18792 + return t_alg;
18793 +}
18794 +
18795 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
18796 +{
18797 + struct aead_alg *alg = &t_alg->aead;
18798 +
18799 + alg->base.cra_module = THIS_MODULE;
18800 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
18801 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
18802 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
18803 +
18804 + alg->init = caam_cra_init_aead;
18805 + alg->exit = caam_cra_exit_aead;
18806 +}
18807 +
18808 +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
18809 +{
18810 + struct dpaa2_caam_priv_per_cpu *ppriv;
18811 +
18812 + ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
18813 + napi_schedule_irqoff(&ppriv->napi);
18814 +}
18815 +
18816 +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
18817 +{
18818 + struct device *dev = priv->dev;
18819 + struct dpaa2_io_notification_ctx *nctx;
18820 + struct dpaa2_caam_priv_per_cpu *ppriv;
18821 + int err, i = 0, cpu;
18822 +
18823 + for_each_online_cpu(cpu) {
18824 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
18825 + ppriv->priv = priv;
18826 + nctx = &ppriv->nctx;
18827 + nctx->is_cdan = 0;
18828 + nctx->id = ppriv->rsp_fqid;
18829 + nctx->desired_cpu = cpu;
18830 + nctx->cb = dpaa2_caam_fqdan_cb;
18831 +
18832 + /* Register notification callbacks */
18833 + err = dpaa2_io_service_register(NULL, nctx);
18834 + if (unlikely(err)) {
18835 + dev_err(dev, "notification register failed\n");
18836 + nctx->cb = NULL;
18837 + goto err;
18838 + }
18839 +
18840 + ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
18841 + dev);
18842 + if (unlikely(!ppriv->store)) {
18843 + dev_err(dev, "dpaa2_io_store_create() failed\n");
18844 + goto err;
18845 + }
18846 +
18847 + if (++i == priv->num_pairs)
18848 + break;
18849 + }
18850 +
18851 + return 0;
18852 +
18853 +err:
18854 + for_each_online_cpu(cpu) {
18855 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
18856 + if (!ppriv->nctx.cb)
18857 + break;
18858 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
18859 + }
18860 +
18861 + for_each_online_cpu(cpu) {
18862 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
18863 + if (!ppriv->store)
18864 + break;
18865 + dpaa2_io_store_destroy(ppriv->store);
18866 + }
18867 +
18868 + return err;
18869 +}
18870 +
18871 +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
18872 +{
18873 + struct dpaa2_caam_priv_per_cpu *ppriv;
18874 + int i = 0, cpu;
18875 +
18876 + for_each_online_cpu(cpu) {
18877 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
18878 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
18879 + dpaa2_io_store_destroy(ppriv->store);
18880 +
18881 + if (++i == priv->num_pairs)
18882 + return;
18883 + }
18884 +}
18885 +
18886 +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
18887 +{
18888 + struct dpseci_rx_queue_cfg rx_queue_cfg;
18889 + struct device *dev = priv->dev;
18890 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
18891 + struct dpaa2_caam_priv_per_cpu *ppriv;
18892 + int err = 0, i = 0, cpu;
18893 +
18894 + /* Configure Rx queues */
18895 + for_each_online_cpu(cpu) {
18896 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
18897 +
18898 + rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
18899 + DPSECI_QUEUE_OPT_USER_CTX;
18900 + rx_queue_cfg.order_preservation_en = 0;
18901 + rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
18902 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
18903 + /*
18904 + * Rx priority (WQ) doesn't really matter, since we use
18905 + * pull mode, i.e. volatile dequeues from specific FQs
18906 + */
18907 + rx_queue_cfg.dest_cfg.priority = 0;
18908 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
18909 +
18910 + err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
18911 + &rx_queue_cfg);
18912 + if (err) {
18913 + dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
18914 + err);
18915 + return err;
18916 + }
18917 +
18918 + if (++i == priv->num_pairs)
18919 + break;
18920 + }
18921 +
18922 + return err;
18923 +}
18924 +
18925 +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
18926 +{
18927 + struct device *dev = priv->dev;
18928 +
18929 + if (!priv->cscn_mem)
18930 + return;
18931 +
18932 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
18933 + kfree(priv->cscn_mem);
18934 +}
18935 +
18936 +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
18937 +{
18938 + struct device *dev = priv->dev;
18939 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
18940 +
18941 + dpaa2_dpseci_congestion_free(priv);
18942 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
18943 +}
18944 +
18945 +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
18946 + const struct dpaa2_fd *fd)
18947 +{
18948 + struct caam_request *req;
18949 + u32 fd_err;
18950 +
18951 + if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
18952 + dev_err(priv->dev, "Only Frame List FD format is supported!\n");
18953 + return;
18954 + }
18955 +
18956 + fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
18957 + if (unlikely(fd_err))
18958 + dev_err(priv->dev, "FD error: %08x\n", fd_err);
18959 +
18960 + /*
18961 + * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
18962 + * in FD[ERR] or FD[FRC].
18963 + */
18964 + req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
18965 + dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
18966 + DMA_BIDIRECTIONAL);
18967 + req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
18968 +}
18969 +
18970 +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
18971 +{
18972 + int err;
18973 +
18974 + /* Retry while portal is busy */
18975 + do {
18976 + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
18977 + ppriv->store);
18978 + } while (err == -EBUSY);
18979 +
18980 + if (unlikely(err))
18981 + dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
18982 +
18983 + return err;
18984 +}
18985 +
18986 +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
18987 +{
18988 + struct dpaa2_dq *dq;
18989 + int cleaned = 0, is_last;
18990 +
18991 + do {
18992 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
18993 + if (unlikely(!dq)) {
18994 + if (unlikely(!is_last)) {
18995 + dev_dbg(ppriv->priv->dev,
18996 + "FQ %d returned no valid frames\n",
18997 + ppriv->rsp_fqid);
18998 + /*
18999 + * MUST retry until we get some sort of
19000 + * valid response token (be it "empty dequeue"
19001 + * or a valid frame).
19002 + */
19003 + continue;
19004 + }
19005 + break;
19006 + }
19007 +
19008 + /* Process FD */
19009 + dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
19010 + cleaned++;
19011 + } while (!is_last);
19012 +
19013 + return cleaned;
19014 +}
19015 +
19016 +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
19017 +{
19018 + struct dpaa2_caam_priv_per_cpu *ppriv;
19019 + struct dpaa2_caam_priv *priv;
19020 + int err, cleaned = 0, store_cleaned;
19021 +
19022 + ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
19023 + priv = ppriv->priv;
19024 +
19025 + if (unlikely(dpaa2_caam_pull_fq(ppriv)))
19026 + return 0;
19027 +
19028 + do {
19029 + store_cleaned = dpaa2_caam_store_consume(ppriv);
19030 + cleaned += store_cleaned;
19031 +
19032 + if (store_cleaned == 0 ||
19033 + cleaned > budget - DPAA2_CAAM_STORE_SIZE)
19034 + break;
19035 +
19036 + /* Try to dequeue some more */
19037 + err = dpaa2_caam_pull_fq(ppriv);
19038 + if (unlikely(err))
19039 + break;
19040 + } while (1);
19041 +
19042 + if (cleaned < budget) {
19043 + napi_complete_done(napi, cleaned);
19044 + err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
19045 + if (unlikely(err))
19046 + dev_err(priv->dev, "Notification rearm failed: %d\n",
19047 + err);
19048 + }
19049 +
19050 + return cleaned;
19051 +}
19052 +
19053 +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
19054 + u16 token)
19055 +{
19056 + struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
19057 + struct device *dev = priv->dev;
19058 + int err;
19059 +
19060 + /*
19061 + * Congestion group feature supported starting with DPSECI API v5.1
19062 + * and only when object has been created with this capability.
19063 + */
19064 + if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
19065 + !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
19066 + return 0;
19067 +
19068 + priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
19069 + GFP_KERNEL | GFP_DMA);
19070 + if (!priv->cscn_mem)
19071 + return -ENOMEM;
19072 +
19073 + priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
19074 + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
19075 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
19076 + if (dma_mapping_error(dev, priv->cscn_dma)) {
19077 + dev_err(dev, "Error mapping CSCN memory area\n");
19078 + err = -ENOMEM;
19079 + goto err_dma_map;
19080 + }
19081 +
19082 + cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
19083 + cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
19084 + cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
19085 + cong_notif_cfg.message_ctx = (u64)priv;
19086 + cong_notif_cfg.message_iova = priv->cscn_dma;
19087 + cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
19088 + DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
19089 + DPSECI_CGN_MODE_COHERENT_WRITE;
19090 +
19091 + err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
19092 + &cong_notif_cfg);
19093 + if (err) {
19094 + dev_err(dev, "dpseci_set_congestion_notification failed\n");
19095 + goto err_set_cong;
19096 + }
19097 +
19098 + return 0;
19099 +
19100 +err_set_cong:
19101 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
19102 +err_dma_map:
19103 + kfree(priv->cscn_mem);
19104 +
19105 + return err;
19106 +}
19107 +
19108 +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
19109 +{
19110 + struct device *dev = &ls_dev->dev;
19111 + struct dpaa2_caam_priv *priv;
19112 + struct dpaa2_caam_priv_per_cpu *ppriv;
19113 + int err, cpu;
19114 + u8 i;
19115 +
19116 + priv = dev_get_drvdata(dev);
19117 +
19118 + priv->dev = dev;
19119 + priv->dpsec_id = ls_dev->obj_desc.id;
19120 +
19121 + /* Get a handle for the DPSECI this interface is associate with */
19122 + err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
19123 + if (err) {
19124 + dev_err(dev, "dpsec_open() failed: %d\n", err);
19125 + goto err_open;
19126 + }
19127 +
19128 + dev_info(dev, "Opened dpseci object successfully\n");
19129 +
19130 + err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
19131 + &priv->minor_ver);
19132 + if (err) {
19133 + dev_err(dev, "dpseci_get_api_version() failed\n");
19134 + goto err_get_vers;
19135 + }
19136 +
19137 + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
19138 + &priv->dpseci_attr);
19139 + if (err) {
19140 + dev_err(dev, "dpseci_get_attributes() failed\n");
19141 + goto err_get_vers;
19142 + }
19143 +
19144 + err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
19145 + &priv->sec_attr);
19146 + if (err) {
19147 + dev_err(dev, "dpseci_get_sec_attr() failed\n");
19148 + goto err_get_vers;
19149 + }
19150 +
19151 + err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
19152 + if (err) {
19153 + dev_err(dev, "setup_congestion() failed\n");
19154 + goto err_get_vers;
19155 + }
19156 +
19157 + priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
19158 + priv->dpseci_attr.num_tx_queues);
19159 + if (priv->num_pairs > num_online_cpus()) {
19160 + dev_warn(dev, "%d queues won't be used\n",
19161 + priv->num_pairs - num_online_cpus());
19162 + priv->num_pairs = num_online_cpus();
19163 + }
19164 +
19165 + for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
19166 + err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
19167 + &priv->rx_queue_attr[i]);
19168 + if (err) {
19169 + dev_err(dev, "dpseci_get_rx_queue() failed\n");
19170 + goto err_get_rx_queue;
19171 + }
19172 + }
19173 +
19174 + for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
19175 + err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
19176 + &priv->tx_queue_attr[i]);
19177 + if (err) {
19178 + dev_err(dev, "dpseci_get_tx_queue() failed\n");
19179 + goto err_get_rx_queue;
19180 + }
19181 + }
19182 +
19183 + i = 0;
19184 + for_each_online_cpu(cpu) {
19185 + dev_info(dev, "prio %d: rx queue %d, tx queue %d\n", i,
19186 + priv->rx_queue_attr[i].fqid,
19187 + priv->tx_queue_attr[i].fqid);
19188 +
19189 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
19190 + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
19191 + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
19192 + ppriv->prio = i;
19193 +
19194 + ppriv->net_dev.dev = *dev;
19195 + INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
19196 + netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
19197 + DPAA2_CAAM_NAPI_WEIGHT);
19198 + if (++i == priv->num_pairs)
19199 + break;
19200 + }
19201 +
19202 + return 0;
19203 +
19204 +err_get_rx_queue:
19205 + dpaa2_dpseci_congestion_free(priv);
19206 +err_get_vers:
19207 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
19208 +err_open:
19209 + return err;
19210 +}
19211 +
19212 +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
19213 +{
19214 + struct device *dev = priv->dev;
19215 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
19216 + struct dpaa2_caam_priv_per_cpu *ppriv;
19217 + int err, i;
19218 +
19219 + for (i = 0; i < priv->num_pairs; i++) {
19220 + ppriv = per_cpu_ptr(priv->ppriv, i);
19221 + napi_enable(&ppriv->napi);
19222 + }
19223 +
19224 + err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
19225 + if (err) {
19226 + dev_err(dev, "dpseci_enable() failed\n");
19227 + return err;
19228 + }
19229 +
19230 + dev_info(dev, "DPSECI version %d.%d\n",
19231 + priv->major_ver,
19232 + priv->minor_ver);
19233 +
19234 + return 0;
19235 +}
19236 +
19237 +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
19238 +{
19239 + struct device *dev = priv->dev;
19240 + struct dpaa2_caam_priv_per_cpu *ppriv;
19241 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
19242 + int i, err = 0, enabled;
19243 +
19244 + err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
19245 + if (err) {
19246 + dev_err(dev, "dpseci_disable() failed\n");
19247 + return err;
19248 + }
19249 +
19250 + err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
19251 + if (err) {
19252 + dev_err(dev, "dpseci_is_enabled() failed\n");
19253 + return err;
19254 + }
19255 +
19256 + dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
19257 +
19258 + for (i = 0; i < priv->num_pairs; i++) {
19259 + ppriv = per_cpu_ptr(priv->ppriv, i);
19260 + napi_disable(&ppriv->napi);
19261 + netif_napi_del(&ppriv->napi);
19262 + }
19263 +
19264 + return 0;
19265 +}
19266 +
19267 +static struct list_head alg_list;
19268 +
19269 +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
19270 +{
19271 + struct device *dev;
19272 + struct dpaa2_caam_priv *priv;
19273 + int i, err = 0;
19274 + bool registered = false;
19275 +
19276 + /*
19277 + * There is no way to get CAAM endianness - there is no direct register
19278 + * space access and MC f/w does not provide this attribute.
19279 + * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
19280 + * property.
19281 + */
19282 + caam_little_end = true;
19283 +
19284 + caam_imx = false;
19285 +
19286 + dev = &dpseci_dev->dev;
19287 +
19288 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
19289 + if (!priv)
19290 + return -ENOMEM;
19291 +
19292 + dev_set_drvdata(dev, priv);
19293 +
19294 + priv->domain = iommu_get_domain_for_dev(dev);
19295 +
19296 + qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
19297 + 0, SLAB_CACHE_DMA, NULL);
19298 + if (!qi_cache) {
19299 + dev_err(dev, "Can't allocate SEC cache\n");
19300 + err = -ENOMEM;
19301 + goto err_qicache;
19302 + }
19303 +
19304 + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
19305 + if (err) {
19306 + dev_err(dev, "dma_set_mask_and_coherent() failed\n");
19307 + goto err_dma_mask;
19308 + }
19309 +
19310 + /* Obtain a MC portal */
19311 + err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
19312 + if (err) {
19313 + dev_err(dev, "MC portal allocation failed\n");
19314 + goto err_dma_mask;
19315 + }
19316 +
19317 + priv->ppriv = alloc_percpu(*priv->ppriv);
19318 + if (!priv->ppriv) {
19319 + dev_err(dev, "alloc_percpu() failed\n");
19320 + goto err_alloc_ppriv;
19321 + }
19322 +
19323 + /* DPSECI initialization */
19324 + err = dpaa2_dpseci_setup(dpseci_dev);
19325 + if (err < 0) {
19326 + dev_err(dev, "dpaa2_dpseci_setup() failed\n");
19327 + goto err_dpseci_setup;
19328 + }
19329 +
19330 + /* DPIO */
19331 + err = dpaa2_dpseci_dpio_setup(priv);
19332 + if (err) {
19333 + dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
19334 + goto err_dpio_setup;
19335 + }
19336 +
19337 + /* DPSECI binding to DPIO */
19338 + err = dpaa2_dpseci_bind(priv);
19339 + if (err) {
19340 + dev_err(dev, "dpaa2_dpseci_bind() failed\n");
19341 + goto err_bind;
19342 + }
19343 +
19344 + /* DPSECI enable */
19345 + err = dpaa2_dpseci_enable(priv);
19346 + if (err) {
19347 + dev_err(dev, "dpaa2_dpseci_enable() failed");
19348 + goto err_bind;
19349 + }
19350 +
19351 + /* register crypto algorithms the device supports */
19352 + INIT_LIST_HEAD(&alg_list);
19353 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
19354 + struct caam_crypto_alg *t_alg;
19355 + struct caam_alg_template *alg = driver_algs + i;
19356 + u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
19357 +
19358 + /* Skip DES algorithms if not supported by device */
19359 + if (!priv->sec_attr.des_acc_num &&
19360 + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
19361 + (alg_sel == OP_ALG_ALGSEL_DES)))
19362 + continue;
19363 +
19364 + /* Skip AES algorithms if not supported by device */
19365 + if (!priv->sec_attr.aes_acc_num &&
19366 + (alg_sel == OP_ALG_ALGSEL_AES))
19367 + continue;
19368 +
19369 + t_alg = caam_alg_alloc(alg);
19370 + if (IS_ERR(t_alg)) {
19371 + err = PTR_ERR(t_alg);
19372 + dev_warn(dev, "%s alg allocation failed: %d\n",
19373 + alg->driver_name, err);
19374 + continue;
19375 + }
19376 + t_alg->caam.dev = dev;
19377 +
19378 + err = crypto_register_alg(&t_alg->crypto_alg);
19379 + if (err) {
19380 + dev_warn(dev, "%s alg registration failed: %d\n",
19381 + t_alg->crypto_alg.cra_driver_name, err);
19382 + kfree(t_alg);
19383 + continue;
19384 + }
19385 +
19386 + list_add_tail(&t_alg->entry, &alg_list);
19387 + registered = true;
19388 + }
19389 +
19390 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
19391 + struct caam_aead_alg *t_alg = driver_aeads + i;
19392 + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
19393 + OP_ALG_ALGSEL_MASK;
19394 + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
19395 + OP_ALG_ALGSEL_MASK;
19396 +
19397 + /* Skip DES algorithms if not supported by device */
19398 + if (!priv->sec_attr.des_acc_num &&
19399 + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
19400 + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
19401 + continue;
19402 +
19403 + /* Skip AES algorithms if not supported by device */
19404 + if (!priv->sec_attr.aes_acc_num &&
19405 + (c1_alg_sel == OP_ALG_ALGSEL_AES))
19406 + continue;
19407 +
19408 + /*
19409 + * Skip algorithms requiring message digests
19410 + * if MD not supported by device.
19411 + */
19412 + if (!priv->sec_attr.md_acc_num && c2_alg_sel)
19413 + continue;
19414 +
19415 + t_alg->caam.dev = dev;
19416 + caam_aead_alg_init(t_alg);
19417 +
19418 + err = crypto_register_aead(&t_alg->aead);
19419 + if (err) {
19420 + dev_warn(dev, "%s alg registration failed: %d\n",
19421 + t_alg->aead.base.cra_driver_name, err);
19422 + continue;
19423 + }
19424 +
19425 + t_alg->registered = true;
19426 + registered = true;
19427 + }
19428 + if (registered)
19429 + dev_info(dev, "algorithms registered in /proc/crypto\n");
19430 +
19431 + return err;
19432 +
19433 +err_bind:
19434 + dpaa2_dpseci_dpio_free(priv);
19435 +err_dpio_setup:
19436 + dpaa2_dpseci_free(priv);
19437 +err_dpseci_setup:
19438 + free_percpu(priv->ppriv);
19439 +err_alloc_ppriv:
19440 + fsl_mc_portal_free(priv->mc_io);
19441 +err_dma_mask:
19442 + kmem_cache_destroy(qi_cache);
19443 +err_qicache:
19444 + dev_set_drvdata(dev, NULL);
19445 +
19446 + return err;
19447 +}
19448 +
19449 +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
19450 +{
19451 + struct device *dev;
19452 + struct dpaa2_caam_priv *priv;
19453 + int i;
19454 +
19455 + dev = &ls_dev->dev;
19456 + priv = dev_get_drvdata(dev);
19457 +
19458 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
19459 + struct caam_aead_alg *t_alg = driver_aeads + i;
19460 +
19461 + if (t_alg->registered)
19462 + crypto_unregister_aead(&t_alg->aead);
19463 + }
19464 +
19465 + if (alg_list.next) {
19466 + struct caam_crypto_alg *t_alg, *n;
19467 +
19468 + list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
19469 + crypto_unregister_alg(&t_alg->crypto_alg);
19470 + list_del(&t_alg->entry);
19471 + kfree(t_alg);
19472 + }
19473 + }
19474 +
19475 + dpaa2_dpseci_disable(priv);
19476 + dpaa2_dpseci_dpio_free(priv);
19477 + dpaa2_dpseci_free(priv);
19478 + free_percpu(priv->ppriv);
19479 + fsl_mc_portal_free(priv->mc_io);
19480 + dev_set_drvdata(dev, NULL);
19481 + kmem_cache_destroy(qi_cache);
19482 +
19483 + return 0;
19484 +}
19485 +
19486 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
19487 +{
19488 + struct dpaa2_fd fd;
19489 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
19490 + int err = 0, i, id;
19491 +
19492 + if (IS_ERR(req))
19493 + return PTR_ERR(req);
19494 +
19495 + if (priv->cscn_mem) {
19496 + dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
19497 + DPAA2_CSCN_SIZE,
19498 + DMA_FROM_DEVICE);
19499 + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
19500 + dev_dbg_ratelimited(dev, "Dropping request\n");
19501 + return -EBUSY;
19502 + }
19503 + }
19504 +
19505 + dpaa2_fl_set_flc(&req->fd_flt[1], req->flc->flc_dma);
19506 +
19507 + req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
19508 + DMA_BIDIRECTIONAL);
19509 + if (dma_mapping_error(dev, req->fd_flt_dma)) {
19510 + dev_err(dev, "DMA mapping error for QI enqueue request\n");
19511 + goto err_out;
19512 + }
19513 +
19514 + memset(&fd, 0, sizeof(fd));
19515 + dpaa2_fd_set_format(&fd, dpaa2_fd_list);
19516 + dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
19517 + dpaa2_fd_set_len(&fd, req->fd_flt[1].len);
19518 + dpaa2_fd_set_flc(&fd, req->flc->flc_dma);
19519 +
19520 + /*
19521 + * There is no guarantee that preemption is disabled here,
19522 + * thus take action.
19523 + */
19524 + preempt_disable();
19525 + id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
19526 + for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
19527 + err = dpaa2_io_service_enqueue_fq(NULL,
19528 + priv->tx_queue_attr[id].fqid,
19529 + &fd);
19530 + if (err != -EBUSY)
19531 + break;
19532 + }
19533 + preempt_enable();
19534 +
19535 + if (unlikely(err < 0)) {
19536 + dev_err(dev, "Error enqueuing frame: %d\n", err);
19537 + goto err_out;
19538 + }
19539 +
19540 + return -EINPROGRESS;
19541 +
19542 +err_out:
19543 + dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
19544 + DMA_BIDIRECTIONAL);
19545 + return -EIO;
19546 +}
19547 +EXPORT_SYMBOL(dpaa2_caam_enqueue);
19548 +
19549 +const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
19550 + {
19551 + .vendor = FSL_MC_VENDOR_FREESCALE,
19552 + .obj_type = "dpseci",
19553 + },
19554 + { .vendor = 0x0 }
19555 +};
19556 +
19557 +static struct fsl_mc_driver dpaa2_caam_driver = {
19558 + .driver = {
19559 + .name = KBUILD_MODNAME,
19560 + .owner = THIS_MODULE,
19561 + },
19562 + .probe = dpaa2_caam_probe,
19563 + .remove = dpaa2_caam_remove,
19564 + .match_id_table = dpaa2_caam_match_id_table
19565 +};
19566 +
19567 +MODULE_LICENSE("Dual BSD/GPL");
19568 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
19569 +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
19570 +
19571 +module_fsl_mc_driver(dpaa2_caam_driver);
19572 --- /dev/null
19573 +++ b/drivers/crypto/caam/caamalg_qi2.h
19574 @@ -0,0 +1,265 @@
19575 +/*
19576 + * Copyright 2015-2016 Freescale Semiconductor Inc.
19577 + * Copyright 2017 NXP
19578 + *
19579 + * Redistribution and use in source and binary forms, with or without
19580 + * modification, are permitted provided that the following conditions are met:
19581 + * * Redistributions of source code must retain the above copyright
19582 + * notice, this list of conditions and the following disclaimer.
19583 + * * Redistributions in binary form must reproduce the above copyright
19584 + * notice, this list of conditions and the following disclaimer in the
19585 + * documentation and/or other materials provided with the distribution.
19586 + * * Neither the names of the above-listed copyright holders nor the
19587 + * names of any contributors may be used to endorse or promote products
19588 + * derived from this software without specific prior written permission.
19589 + *
19590 + *
19591 + * ALTERNATIVELY, this software may be distributed under the terms of the
19592 + * GNU General Public License ("GPL") as published by the Free Software
19593 + * Foundation, either version 2 of that License or (at your option) any
19594 + * later version.
19595 + *
19596 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19597 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19598 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19599 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
19600 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19601 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19602 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19603 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
19604 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
19605 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
19606 + * POSSIBILITY OF SUCH DAMAGE.
19607 + */
19608 +
19609 +#ifndef _CAAMALG_QI2_H_
19610 +#define _CAAMALG_QI2_H_
19611 +
19612 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
19613 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
19614 +#include <linux/threads.h>
19615 +#include "dpseci.h"
19616 +#include "desc_constr.h"
19617 +
19618 +#define DPAA2_CAAM_STORE_SIZE 16
19619 +/* NAPI weight *must* be a multiple of the store size. */
19620 +#define DPAA2_CAAM_NAPI_WEIGHT 64
19621 +
19622 +/* The congestion entrance threshold was chosen so that on LS2088
19623 + * we support the maximum throughput for the available memory
19624 + */
19625 +#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
19626 +#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
19627 +
19628 +/**
19629 + * dpaa2_caam_priv - driver private data
19630 + * @dpseci_id: DPSECI object unique ID
19631 + * @major_ver: DPSECI major version
19632 + * @minor_ver: DPSECI minor version
19633 + * @dpseci_attr: DPSECI attributes
19634 + * @sec_attr: SEC engine attributes
19635 + * @rx_queue_attr: array of Rx queue attributes
19636 + * @tx_queue_attr: array of Tx queue attributes
19637 + * @cscn_mem: pointer to memory region containing the
19638 + * dpaa2_cscn struct; it's size is larger than
19639 + * sizeof(struct dpaa2_cscn) to accommodate alignment
19640 + * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
19641 + * as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
19642 + * @cscn_dma: dma address used by the QMAN to write CSCN messages
19643 + * @dev: device associated with the DPSECI object
19644 + * @mc_io: pointer to MC portal's I/O object
19645 + * @domain: IOMMU domain
19646 + * @ppriv: per CPU pointers to privata data
19647 + */
19648 +struct dpaa2_caam_priv {
19649 + int dpsec_id;
19650 +
19651 + u16 major_ver;
19652 + u16 minor_ver;
19653 +
19654 + struct dpseci_attr dpseci_attr;
19655 + struct dpseci_sec_attr sec_attr;
19656 + struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_PRIO_NUM];
19657 + struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_PRIO_NUM];
19658 + int num_pairs;
19659 +
19660 + /* congestion */
19661 + void *cscn_mem;
19662 + void *cscn_mem_aligned;
19663 + dma_addr_t cscn_dma;
19664 +
19665 + struct device *dev;
19666 + struct fsl_mc_io *mc_io;
19667 + struct iommu_domain *domain;
19668 +
19669 + struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
19670 +};
19671 +
19672 +/**
19673 + * dpaa2_caam_priv_per_cpu - per CPU private data
19674 + * @napi: napi structure
19675 + * @net_dev: netdev used by napi
19676 + * @req_fqid: (virtual) request (Tx / enqueue) FQID
19677 + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
19678 + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
19679 + * @nctx: notification context of response FQ
19680 + * @store: where dequeued frames are stored
19681 + * @priv: backpointer to dpaa2_caam_priv
19682 + */
19683 +struct dpaa2_caam_priv_per_cpu {
19684 + struct napi_struct napi;
19685 + struct net_device net_dev;
19686 + int req_fqid;
19687 + int rsp_fqid;
19688 + int prio;
19689 + struct dpaa2_io_notification_ctx nctx;
19690 + struct dpaa2_io_store *store;
19691 + struct dpaa2_caam_priv *priv;
19692 +};
19693 +
19694 +/*
19695 + * The CAAM QI hardware constructs a job descriptor which points
19696 + * to shared descriptor (as pointed by context_a of FQ to CAAM).
19697 + * When the job descriptor is executed by deco, the whole job
19698 + * descriptor together with shared descriptor gets loaded in
19699 + * deco buffer which is 64 words long (each 32-bit).
19700 + *
19701 + * The job descriptor constructed by QI hardware has layout:
19702 + *
19703 + * HEADER (1 word)
19704 + * Shdesc ptr (1 or 2 words)
19705 + * SEQ_OUT_PTR (1 word)
19706 + * Out ptr (1 or 2 words)
19707 + * Out length (1 word)
19708 + * SEQ_IN_PTR (1 word)
19709 + * In ptr (1 or 2 words)
19710 + * In length (1 word)
19711 + *
19712 + * The shdesc ptr is used to fetch shared descriptor contents
19713 + * into deco buffer.
19714 + *
19715 + * Apart from shdesc contents, the total number of words that
19716 + * get loaded in deco buffer are '8' or '11'. The remaining words
19717 + * in deco buffer can be used for storing shared descriptor.
19718 + */
19719 +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
19720 +
19721 +/* Length of a single buffer in the QI driver memory cache */
19722 +#define CAAM_QI_MEMCACHE_SIZE 512
19723 +
19724 +/*
19725 + * aead_edesc - s/w-extended aead descriptor
19726 + * @src_nents: number of segments in input scatterlist
19727 + * @dst_nents: number of segments in output scatterlist
19728 + * @iv_dma: dma address of iv for checking continuity and link table
19729 + * @qm_sg_bytes: length of dma mapped h/w link table
19730 + * @qm_sg_dma: bus physical mapped address of h/w link table
19731 + * @assoclen_dma: bus physical mapped address of req->assoclen
19732 + * @sgt: the h/w link table
19733 + */
19734 +struct aead_edesc {
19735 + int src_nents;
19736 + int dst_nents;
19737 + dma_addr_t iv_dma;
19738 + int qm_sg_bytes;
19739 + dma_addr_t qm_sg_dma;
19740 + dma_addr_t assoclen_dma;
19741 +#define CAAM_QI_MAX_AEAD_SG \
19742 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
19743 + sizeof(struct dpaa2_sg_entry))
19744 + struct dpaa2_sg_entry sgt[0];
19745 +};
19746 +
19747 +/*
19748 + * tls_edesc - s/w-extended tls descriptor
19749 + * @src_nents: number of segments in input scatterlist
19750 + * @dst_nents: number of segments in output scatterlist
19751 + * @iv_dma: dma address of iv for checking continuity and link table
19752 + * @qm_sg_bytes: length of dma mapped h/w link table
19753 + * @qm_sg_dma: bus physical mapped address of h/w link table
19754 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
19755 + * @dst: pointer to output scatterlist, usefull for unmapping
19756 + * @sgt: the h/w link table
19757 + */
19758 +struct tls_edesc {
19759 + int src_nents;
19760 + int dst_nents;
19761 + dma_addr_t iv_dma;
19762 + int qm_sg_bytes;
19763 + dma_addr_t qm_sg_dma;
19764 + struct scatterlist tmp[2];
19765 + struct scatterlist *dst;
19766 + struct dpaa2_sg_entry sgt[0];
19767 +};
19768 +
19769 +/*
19770 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
19771 + * @src_nents: number of segments in input scatterlist
19772 + * @dst_nents: number of segments in output scatterlist
19773 + * @iv_dma: dma address of iv for checking continuity and link table
19774 + * @qm_sg_bytes: length of dma mapped qm_sg space
19775 + * @qm_sg_dma: I/O virtual address of h/w link table
19776 + * @sgt: the h/w link table
19777 + */
19778 +struct ablkcipher_edesc {
19779 + int src_nents;
19780 + int dst_nents;
19781 + dma_addr_t iv_dma;
19782 + int qm_sg_bytes;
19783 + dma_addr_t qm_sg_dma;
19784 +#define CAAM_QI_MAX_ABLKCIPHER_SG \
19785 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
19786 + sizeof(struct dpaa2_sg_entry))
19787 + struct dpaa2_sg_entry sgt[0];
19788 +};
19789 +
19790 +/**
19791 + * caam_flc - Flow Context (FLC)
19792 + * @flc: Flow Context options
19793 + * @sh_desc: Shared Descriptor
19794 + * @flc_dma: DMA address of the Flow Context
19795 + */
19796 +struct caam_flc {
19797 + u32 flc[16];
19798 + u32 sh_desc[MAX_SDLEN];
19799 + dma_addr_t flc_dma;
19800 +} ____cacheline_aligned;
19801 +
19802 +enum optype {
19803 + ENCRYPT = 0,
19804 + DECRYPT,
19805 + GIVENCRYPT,
19806 + NUM_OP
19807 +};
19808 +
19809 +/**
19810 + * caam_request - the request structure the driver application should fill while
19811 + * submitting a job to driver.
19812 + * @fd_flt: Frame list table defining input and output
19813 + * fd_flt[0] - FLE pointing to output buffer
19814 + * fd_flt[1] - FLE pointing to input buffer
19815 + * @fd_flt_dma: DMA address for the frame list table
19816 + * @flc: Flow Context
19817 + * @op_type: operation type
19818 + * @cbk: Callback function to invoke when job is completed
19819 + * @ctx: arbit context attached with request by the application
19820 + * @edesc: extended descriptor; points to one of {ablkcipher,aead}_edesc
19821 + */
19822 +struct caam_request {
19823 + struct dpaa2_fl_entry fd_flt[2];
19824 + dma_addr_t fd_flt_dma;
19825 + struct caam_flc *flc;
19826 + enum optype op_type;
19827 + void (*cbk)(void *ctx, u32 err);
19828 + void *ctx;
19829 + void *edesc;
19830 +};
19831 +
19832 +/**
19833 + * dpaa2_caam_enqueue() - enqueue a crypto request
19834 + * @dev: device associated with the DPSECI object
19835 + * @req: pointer to caam_request
19836 + */
19837 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
19838 +
19839 +#endif /* _CAAMALG_QI2_H_ */
19840 --- a/drivers/crypto/caam/caamhash.c
19841 +++ b/drivers/crypto/caam/caamhash.c
19842 @@ -72,7 +72,7 @@
19843 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
19844
19845 /* length of descriptors text */
19846 -#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
19847 +#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
19848 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
19849 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
19850 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
19851 @@ -103,20 +103,14 @@ struct caam_hash_ctx {
19852 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19853 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19854 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19855 - u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19856 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
19857 dma_addr_t sh_desc_update_first_dma;
19858 dma_addr_t sh_desc_fin_dma;
19859 dma_addr_t sh_desc_digest_dma;
19860 - dma_addr_t sh_desc_finup_dma;
19861 struct device *jrdev;
19862 - u32 alg_type;
19863 - u32 alg_op;
19864 u8 key[CAAM_MAX_HASH_KEY_SIZE];
19865 - dma_addr_t key_dma;
19866 int ctx_len;
19867 - unsigned int split_key_len;
19868 - unsigned int split_key_pad_len;
19869 + struct alginfo adata;
19870 };
19871
19872 /* ahash state */
19873 @@ -143,6 +137,31 @@ struct caam_export_state {
19874 int (*finup)(struct ahash_request *req);
19875 };
19876
19877 +static inline void switch_buf(struct caam_hash_state *state)
19878 +{
19879 + state->current_buf ^= 1;
19880 +}
19881 +
19882 +static inline u8 *current_buf(struct caam_hash_state *state)
19883 +{
19884 + return state->current_buf ? state->buf_1 : state->buf_0;
19885 +}
19886 +
19887 +static inline u8 *alt_buf(struct caam_hash_state *state)
19888 +{
19889 + return state->current_buf ? state->buf_0 : state->buf_1;
19890 +}
19891 +
19892 +static inline int *current_buflen(struct caam_hash_state *state)
19893 +{
19894 + return state->current_buf ? &state->buflen_1 : &state->buflen_0;
19895 +}
19896 +
19897 +static inline int *alt_buflen(struct caam_hash_state *state)
19898 +{
19899 + return state->current_buf ? &state->buflen_0 : &state->buflen_1;
19900 +}
19901 +
19902 /* Common job descriptor seq in/out ptr routines */
19903
19904 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
19905 @@ -175,36 +194,27 @@ static inline dma_addr_t map_seq_out_ptr
19906 return dst_dma;
19907 }
19908
19909 -/* Map current buffer in state and put it in link table */
19910 -static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
19911 - struct sec4_sg_entry *sec4_sg,
19912 - u8 *buf, int buflen)
19913 +/* Map current buffer in state (if length > 0) and put it in link table */
19914 +static inline int buf_map_to_sec4_sg(struct device *jrdev,
19915 + struct sec4_sg_entry *sec4_sg,
19916 + struct caam_hash_state *state)
19917 {
19918 - dma_addr_t buf_dma;
19919 + int buflen = *current_buflen(state);
19920
19921 - buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
19922 - dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
19923 + if (!buflen)
19924 + return 0;
19925
19926 - return buf_dma;
19927 -}
19928 + state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
19929 + DMA_TO_DEVICE);
19930 + if (dma_mapping_error(jrdev, state->buf_dma)) {
19931 + dev_err(jrdev, "unable to map buf\n");
19932 + state->buf_dma = 0;
19933 + return -ENOMEM;
19934 + }
19935
19936 -/*
19937 - * Only put buffer in link table if it contains data, which is possible,
19938 - * since a buffer has previously been used, and needs to be unmapped,
19939 - */
19940 -static inline dma_addr_t
19941 -try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
19942 - u8 *buf, dma_addr_t buf_dma, int buflen,
19943 - int last_buflen)
19944 -{
19945 - if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
19946 - dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
19947 - if (buflen)
19948 - buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
19949 - else
19950 - buf_dma = 0;
19951 + dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
19952
19953 - return buf_dma;
19954 + return 0;
19955 }
19956
19957 /* Map state->caam_ctx, and add it to link table */
19958 @@ -224,89 +234,54 @@ static inline int ctx_map_to_sec4_sg(u32
19959 return 0;
19960 }
19961
19962 -/* Common shared descriptor commands */
19963 -static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
19964 -{
19965 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
19966 - ctx->split_key_len, CLASS_2 |
19967 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
19968 -}
19969 -
19970 -/* Append key if it has been set */
19971 -static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
19972 -{
19973 - u32 *key_jump_cmd;
19974 -
19975 - init_sh_desc(desc, HDR_SHARE_SERIAL);
19976 -
19977 - if (ctx->split_key_len) {
19978 - /* Skip if already shared */
19979 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
19980 - JUMP_COND_SHRD);
19981 -
19982 - append_key_ahash(desc, ctx);
19983 -
19984 - set_jump_tgt_here(desc, key_jump_cmd);
19985 - }
19986 -
19987 - /* Propagate errors from shared to job descriptor */
19988 - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
19989 -}
19990 -
19991 /*
19992 - * For ahash read data from seqin following state->caam_ctx,
19993 - * and write resulting class2 context to seqout, which may be state->caam_ctx
19994 - * or req->result
19995 + * For ahash update, final and finup (import_ctx = true)
19996 + * import context, read and write to seqout
19997 + * For ahash firsts and digest (import_ctx = false)
19998 + * read and write to seqout
19999 */
20000 -static inline void ahash_append_load_str(u32 *desc, int digestsize)
20001 +static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
20002 + struct caam_hash_ctx *ctx, bool import_ctx)
20003 {
20004 - /* Calculate remaining bytes to read */
20005 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
20006 + u32 op = ctx->adata.algtype;
20007 + u32 *skip_key_load;
20008
20009 - /* Read remaining bytes */
20010 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
20011 - FIFOLD_TYPE_MSG | KEY_VLF);
20012 + init_sh_desc(desc, HDR_SHARE_SERIAL);
20013
20014 - /* Store class2 context bytes */
20015 - append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
20016 - LDST_SRCDST_BYTE_CONTEXT);
20017 -}
20018 + /* Append key if it has been set; ahash update excluded */
20019 + if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
20020 + /* Skip key loading if already shared */
20021 + skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
20022 + JUMP_COND_SHRD);
20023
20024 -/*
20025 - * For ahash update, final and finup, import context, read and write to seqout
20026 - */
20027 -static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
20028 - int digestsize,
20029 - struct caam_hash_ctx *ctx)
20030 -{
20031 - init_sh_desc_key_ahash(desc, ctx);
20032 -
20033 - /* Import context from software */
20034 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
20035 - LDST_CLASS_2_CCB | ctx->ctx_len);
20036 + append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
20037 + ctx->adata.keylen, CLASS_2 |
20038 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
20039
20040 - /* Class 2 operation */
20041 - append_operation(desc, op | state | OP_ALG_ENCRYPT);
20042 + set_jump_tgt_here(desc, skip_key_load);
20043
20044 - /*
20045 - * Load from buf and/or src and write to req->result or state->context
20046 - */
20047 - ahash_append_load_str(desc, digestsize);
20048 -}
20049 + op |= OP_ALG_AAI_HMAC_PRECOMP;
20050 + }
20051
20052 -/* For ahash firsts and digest, read and write to seqout */
20053 -static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
20054 - int digestsize, struct caam_hash_ctx *ctx)
20055 -{
20056 - init_sh_desc_key_ahash(desc, ctx);
20057 + /* If needed, import context from software */
20058 + if (import_ctx)
20059 + append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
20060 + LDST_SRCDST_BYTE_CONTEXT);
20061
20062 /* Class 2 operation */
20063 append_operation(desc, op | state | OP_ALG_ENCRYPT);
20064
20065 /*
20066 * Load from buf and/or src and write to req->result or state->context
20067 + * Calculate remaining bytes to read
20068 */
20069 - ahash_append_load_str(desc, digestsize);
20070 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
20071 + /* Read remaining bytes */
20072 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
20073 + FIFOLD_TYPE_MSG | KEY_VLF);
20074 + /* Store class2 context bytes */
20075 + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
20076 + LDST_SRCDST_BYTE_CONTEXT);
20077 }
20078
20079 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
20080 @@ -314,34 +289,13 @@ static int ahash_set_sh_desc(struct cryp
20081 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20082 int digestsize = crypto_ahash_digestsize(ahash);
20083 struct device *jrdev = ctx->jrdev;
20084 - u32 have_key = 0;
20085 u32 *desc;
20086
20087 - if (ctx->split_key_len)
20088 - have_key = OP_ALG_AAI_HMAC_PRECOMP;
20089 -
20090 /* ahash_update shared descriptor */
20091 desc = ctx->sh_desc_update;
20092 -
20093 - init_sh_desc(desc, HDR_SHARE_SERIAL);
20094 -
20095 - /* Import context from software */
20096 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
20097 - LDST_CLASS_2_CCB | ctx->ctx_len);
20098 -
20099 - /* Class 2 operation */
20100 - append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
20101 - OP_ALG_ENCRYPT);
20102 -
20103 - /* Load data and write to result or context */
20104 - ahash_append_load_str(desc, ctx->ctx_len);
20105 -
20106 - ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
20107 - DMA_TO_DEVICE);
20108 - if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
20109 - dev_err(jrdev, "unable to map shared descriptor\n");
20110 - return -ENOMEM;
20111 - }
20112 + ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
20113 + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
20114 + desc_bytes(desc), DMA_TO_DEVICE);
20115 #ifdef DEBUG
20116 print_hex_dump(KERN_ERR,
20117 "ahash update shdesc@"__stringify(__LINE__)": ",
20118 @@ -350,17 +304,9 @@ static int ahash_set_sh_desc(struct cryp
20119
20120 /* ahash_update_first shared descriptor */
20121 desc = ctx->sh_desc_update_first;
20122 -
20123 - ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
20124 - ctx->ctx_len, ctx);
20125 -
20126 - ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
20127 - desc_bytes(desc),
20128 - DMA_TO_DEVICE);
20129 - if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
20130 - dev_err(jrdev, "unable to map shared descriptor\n");
20131 - return -ENOMEM;
20132 - }
20133 + ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
20134 + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
20135 + desc_bytes(desc), DMA_TO_DEVICE);
20136 #ifdef DEBUG
20137 print_hex_dump(KERN_ERR,
20138 "ahash update first shdesc@"__stringify(__LINE__)": ",
20139 @@ -369,53 +315,20 @@ static int ahash_set_sh_desc(struct cryp
20140
20141 /* ahash_final shared descriptor */
20142 desc = ctx->sh_desc_fin;
20143 -
20144 - ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
20145 - OP_ALG_AS_FINALIZE, digestsize, ctx);
20146 -
20147 - ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
20148 - DMA_TO_DEVICE);
20149 - if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
20150 - dev_err(jrdev, "unable to map shared descriptor\n");
20151 - return -ENOMEM;
20152 - }
20153 + ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
20154 + dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
20155 + desc_bytes(desc), DMA_TO_DEVICE);
20156 #ifdef DEBUG
20157 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
20158 DUMP_PREFIX_ADDRESS, 16, 4, desc,
20159 desc_bytes(desc), 1);
20160 #endif
20161
20162 - /* ahash_finup shared descriptor */
20163 - desc = ctx->sh_desc_finup;
20164 -
20165 - ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
20166 - OP_ALG_AS_FINALIZE, digestsize, ctx);
20167 -
20168 - ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
20169 - DMA_TO_DEVICE);
20170 - if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
20171 - dev_err(jrdev, "unable to map shared descriptor\n");
20172 - return -ENOMEM;
20173 - }
20174 -#ifdef DEBUG
20175 - print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
20176 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
20177 - desc_bytes(desc), 1);
20178 -#endif
20179 -
20180 /* ahash_digest shared descriptor */
20181 desc = ctx->sh_desc_digest;
20182 -
20183 - ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
20184 - digestsize, ctx);
20185 -
20186 - ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
20187 - desc_bytes(desc),
20188 - DMA_TO_DEVICE);
20189 - if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
20190 - dev_err(jrdev, "unable to map shared descriptor\n");
20191 - return -ENOMEM;
20192 - }
20193 + ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
20194 + dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
20195 + desc_bytes(desc), DMA_TO_DEVICE);
20196 #ifdef DEBUG
20197 print_hex_dump(KERN_ERR,
20198 "ahash digest shdesc@"__stringify(__LINE__)": ",
20199 @@ -426,14 +339,6 @@ static int ahash_set_sh_desc(struct cryp
20200 return 0;
20201 }
20202
20203 -static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
20204 - u32 keylen)
20205 -{
20206 - return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
20207 - ctx->split_key_pad_len, key_in, keylen,
20208 - ctx->alg_op);
20209 -}
20210 -
20211 /* Digest hash size if it is too large */
20212 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
20213 u32 *keylen, u8 *key_out, u32 digestsize)
20214 @@ -469,7 +374,7 @@ static int hash_digest_key(struct caam_h
20215 }
20216
20217 /* Job descriptor to perform unkeyed hash on key_in */
20218 - append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
20219 + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
20220 OP_ALG_AS_INITFINAL);
20221 append_seq_in_ptr(desc, src_dma, *keylen, 0);
20222 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
20223 @@ -513,10 +418,7 @@ static int hash_digest_key(struct caam_h
20224 static int ahash_setkey(struct crypto_ahash *ahash,
20225 const u8 *key, unsigned int keylen)
20226 {
20227 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
20228 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
20229 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20230 - struct device *jrdev = ctx->jrdev;
20231 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
20232 int digestsize = crypto_ahash_digestsize(ahash);
20233 int ret;
20234 @@ -539,43 +441,19 @@ static int ahash_setkey(struct crypto_ah
20235 key = hashed_key;
20236 }
20237
20238 - /* Pick class 2 key length from algorithm submask */
20239 - ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
20240 - OP_ALG_ALGSEL_SHIFT] * 2;
20241 - ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
20242 -
20243 -#ifdef DEBUG
20244 - printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
20245 - ctx->split_key_len, ctx->split_key_pad_len);
20246 - print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
20247 - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
20248 -#endif
20249 -
20250 - ret = gen_split_hash_key(ctx, key, keylen);
20251 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
20252 + CAAM_MAX_HASH_KEY_SIZE);
20253 if (ret)
20254 goto bad_free_key;
20255
20256 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
20257 - DMA_TO_DEVICE);
20258 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
20259 - dev_err(jrdev, "unable to map key i/o memory\n");
20260 - ret = -ENOMEM;
20261 - goto error_free_key;
20262 - }
20263 #ifdef DEBUG
20264 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
20265 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
20266 - ctx->split_key_pad_len, 1);
20267 + ctx->adata.keylen_pad, 1);
20268 #endif
20269
20270 - ret = ahash_set_sh_desc(ahash);
20271 - if (ret) {
20272 - dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
20273 - DMA_TO_DEVICE);
20274 - }
20275 - error_free_key:
20276 kfree(hashed_key);
20277 - return ret;
20278 + return ahash_set_sh_desc(ahash);
20279 bad_free_key:
20280 kfree(hashed_key);
20281 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
20282 @@ -604,6 +482,8 @@ static inline void ahash_unmap(struct de
20283 struct ahash_edesc *edesc,
20284 struct ahash_request *req, int dst_len)
20285 {
20286 + struct caam_hash_state *state = ahash_request_ctx(req);
20287 +
20288 if (edesc->src_nents)
20289 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
20290 if (edesc->dst_dma)
20291 @@ -612,6 +492,12 @@ static inline void ahash_unmap(struct de
20292 if (edesc->sec4_sg_bytes)
20293 dma_unmap_single(dev, edesc->sec4_sg_dma,
20294 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
20295 +
20296 + if (state->buf_dma) {
20297 + dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
20298 + DMA_TO_DEVICE);
20299 + state->buf_dma = 0;
20300 + }
20301 }
20302
20303 static inline void ahash_unmap_ctx(struct device *dev,
20304 @@ -643,8 +529,7 @@ static void ahash_done(struct device *jr
20305 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20306 #endif
20307
20308 - edesc = (struct ahash_edesc *)((char *)desc -
20309 - offsetof(struct ahash_edesc, hw_desc));
20310 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20311 if (err)
20312 caam_jr_strstatus(jrdev, err);
20313
20314 @@ -671,19 +556,19 @@ static void ahash_done_bi(struct device
20315 struct ahash_edesc *edesc;
20316 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20317 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20318 -#ifdef DEBUG
20319 struct caam_hash_state *state = ahash_request_ctx(req);
20320 +#ifdef DEBUG
20321 int digestsize = crypto_ahash_digestsize(ahash);
20322
20323 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20324 #endif
20325
20326 - edesc = (struct ahash_edesc *)((char *)desc -
20327 - offsetof(struct ahash_edesc, hw_desc));
20328 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20329 if (err)
20330 caam_jr_strstatus(jrdev, err);
20331
20332 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
20333 + switch_buf(state);
20334 kfree(edesc);
20335
20336 #ifdef DEBUG
20337 @@ -713,8 +598,7 @@ static void ahash_done_ctx_src(struct de
20338 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20339 #endif
20340
20341 - edesc = (struct ahash_edesc *)((char *)desc -
20342 - offsetof(struct ahash_edesc, hw_desc));
20343 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20344 if (err)
20345 caam_jr_strstatus(jrdev, err);
20346
20347 @@ -741,19 +625,19 @@ static void ahash_done_ctx_dst(struct de
20348 struct ahash_edesc *edesc;
20349 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20350 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20351 -#ifdef DEBUG
20352 struct caam_hash_state *state = ahash_request_ctx(req);
20353 +#ifdef DEBUG
20354 int digestsize = crypto_ahash_digestsize(ahash);
20355
20356 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20357 #endif
20358
20359 - edesc = (struct ahash_edesc *)((char *)desc -
20360 - offsetof(struct ahash_edesc, hw_desc));
20361 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20362 if (err)
20363 caam_jr_strstatus(jrdev, err);
20364
20365 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
20366 + switch_buf(state);
20367 kfree(edesc);
20368
20369 #ifdef DEBUG
20370 @@ -835,13 +719,12 @@ static int ahash_update_ctx(struct ahash
20371 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20372 struct caam_hash_state *state = ahash_request_ctx(req);
20373 struct device *jrdev = ctx->jrdev;
20374 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20375 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20376 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20377 - int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
20378 - u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
20379 - int *next_buflen = state->current_buf ? &state->buflen_0 :
20380 - &state->buflen_1, last_buflen;
20381 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20382 + GFP_KERNEL : GFP_ATOMIC;
20383 + u8 *buf = current_buf(state);
20384 + int *buflen = current_buflen(state);
20385 + u8 *next_buf = alt_buf(state);
20386 + int *next_buflen = alt_buflen(state), last_buflen;
20387 int in_len = *buflen + req->nbytes, to_hash;
20388 u32 *desc;
20389 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
20390 @@ -895,10 +778,9 @@ static int ahash_update_ctx(struct ahash
20391 if (ret)
20392 goto unmap_ctx;
20393
20394 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
20395 - edesc->sec4_sg + 1,
20396 - buf, state->buf_dma,
20397 - *buflen, last_buflen);
20398 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
20399 + if (ret)
20400 + goto unmap_ctx;
20401
20402 if (mapped_nents) {
20403 sg_to_sec4_sg_last(req->src, mapped_nents,
20404 @@ -909,12 +791,10 @@ static int ahash_update_ctx(struct ahash
20405 to_hash - *buflen,
20406 *next_buflen, 0);
20407 } else {
20408 - (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
20409 - cpu_to_caam32(SEC4_SG_LEN_FIN);
20410 + sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
20411 + 1);
20412 }
20413
20414 - state->current_buf = !state->current_buf;
20415 -
20416 desc = edesc->hw_desc;
20417
20418 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
20419 @@ -969,12 +849,9 @@ static int ahash_final_ctx(struct ahash_
20420 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20421 struct caam_hash_state *state = ahash_request_ctx(req);
20422 struct device *jrdev = ctx->jrdev;
20423 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20424 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20425 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20426 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20427 - int last_buflen = state->current_buf ? state->buflen_0 :
20428 - state->buflen_1;
20429 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20430 + GFP_KERNEL : GFP_ATOMIC;
20431 + int buflen = *current_buflen(state);
20432 u32 *desc;
20433 int sec4_sg_bytes, sec4_sg_src_index;
20434 int digestsize = crypto_ahash_digestsize(ahash);
20435 @@ -1001,11 +878,11 @@ static int ahash_final_ctx(struct ahash_
20436 if (ret)
20437 goto unmap_ctx;
20438
20439 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
20440 - buf, state->buf_dma, buflen,
20441 - last_buflen);
20442 - (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
20443 - cpu_to_caam32(SEC4_SG_LEN_FIN);
20444 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
20445 + if (ret)
20446 + goto unmap_ctx;
20447 +
20448 + sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
20449
20450 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
20451 sec4_sg_bytes, DMA_TO_DEVICE);
20452 @@ -1048,12 +925,9 @@ static int ahash_finup_ctx(struct ahash_
20453 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20454 struct caam_hash_state *state = ahash_request_ctx(req);
20455 struct device *jrdev = ctx->jrdev;
20456 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20457 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20458 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20459 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20460 - int last_buflen = state->current_buf ? state->buflen_0 :
20461 - state->buflen_1;
20462 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20463 + GFP_KERNEL : GFP_ATOMIC;
20464 + int buflen = *current_buflen(state);
20465 u32 *desc;
20466 int sec4_sg_src_index;
20467 int src_nents, mapped_nents;
20468 @@ -1082,7 +956,7 @@ static int ahash_finup_ctx(struct ahash_
20469
20470 /* allocate space for base edesc and hw desc commands, link tables */
20471 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
20472 - ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
20473 + ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
20474 flags);
20475 if (!edesc) {
20476 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
20477 @@ -1098,9 +972,9 @@ static int ahash_finup_ctx(struct ahash_
20478 if (ret)
20479 goto unmap_ctx;
20480
20481 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
20482 - buf, state->buf_dma, buflen,
20483 - last_buflen);
20484 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
20485 + if (ret)
20486 + goto unmap_ctx;
20487
20488 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
20489 sec4_sg_src_index, ctx->ctx_len + buflen,
20490 @@ -1136,15 +1010,18 @@ static int ahash_digest(struct ahash_req
20491 {
20492 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20493 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20494 + struct caam_hash_state *state = ahash_request_ctx(req);
20495 struct device *jrdev = ctx->jrdev;
20496 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20497 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20498 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20499 + GFP_KERNEL : GFP_ATOMIC;
20500 u32 *desc;
20501 int digestsize = crypto_ahash_digestsize(ahash);
20502 int src_nents, mapped_nents;
20503 struct ahash_edesc *edesc;
20504 int ret;
20505
20506 + state->buf_dma = 0;
20507 +
20508 src_nents = sg_nents_for_len(req->src, req->nbytes);
20509 if (src_nents < 0) {
20510 dev_err(jrdev, "Invalid number of src SG.\n");
20511 @@ -1215,10 +1092,10 @@ static int ahash_final_no_ctx(struct aha
20512 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20513 struct caam_hash_state *state = ahash_request_ctx(req);
20514 struct device *jrdev = ctx->jrdev;
20515 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20516 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20517 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20518 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20519 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20520 + GFP_KERNEL : GFP_ATOMIC;
20521 + u8 *buf = current_buf(state);
20522 + int buflen = *current_buflen(state);
20523 u32 *desc;
20524 int digestsize = crypto_ahash_digestsize(ahash);
20525 struct ahash_edesc *edesc;
20526 @@ -1276,13 +1153,12 @@ static int ahash_update_no_ctx(struct ah
20527 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20528 struct caam_hash_state *state = ahash_request_ctx(req);
20529 struct device *jrdev = ctx->jrdev;
20530 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20531 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20532 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20533 - int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
20534 - u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
20535 - int *next_buflen = state->current_buf ? &state->buflen_0 :
20536 - &state->buflen_1;
20537 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20538 + GFP_KERNEL : GFP_ATOMIC;
20539 + u8 *buf = current_buf(state);
20540 + int *buflen = current_buflen(state);
20541 + u8 *next_buf = alt_buf(state);
20542 + int *next_buflen = alt_buflen(state);
20543 int in_len = *buflen + req->nbytes, to_hash;
20544 int sec4_sg_bytes, src_nents, mapped_nents;
20545 struct ahash_edesc *edesc;
20546 @@ -1331,8 +1207,10 @@ static int ahash_update_no_ctx(struct ah
20547 edesc->sec4_sg_bytes = sec4_sg_bytes;
20548 edesc->dst_dma = 0;
20549
20550 - state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
20551 - buf, *buflen);
20552 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
20553 + if (ret)
20554 + goto unmap_ctx;
20555 +
20556 sg_to_sec4_sg_last(req->src, mapped_nents,
20557 edesc->sec4_sg + 1, 0);
20558
20559 @@ -1342,8 +1220,6 @@ static int ahash_update_no_ctx(struct ah
20560 *next_buflen, 0);
20561 }
20562
20563 - state->current_buf = !state->current_buf;
20564 -
20565 desc = edesc->hw_desc;
20566
20567 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
20568 @@ -1403,12 +1279,9 @@ static int ahash_finup_no_ctx(struct aha
20569 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20570 struct caam_hash_state *state = ahash_request_ctx(req);
20571 struct device *jrdev = ctx->jrdev;
20572 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20573 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20574 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20575 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20576 - int last_buflen = state->current_buf ? state->buflen_0 :
20577 - state->buflen_1;
20578 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20579 + GFP_KERNEL : GFP_ATOMIC;
20580 + int buflen = *current_buflen(state);
20581 u32 *desc;
20582 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
20583 int digestsize = crypto_ahash_digestsize(ahash);
20584 @@ -1450,9 +1323,9 @@ static int ahash_finup_no_ctx(struct aha
20585 edesc->src_nents = src_nents;
20586 edesc->sec4_sg_bytes = sec4_sg_bytes;
20587
20588 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
20589 - state->buf_dma, buflen,
20590 - last_buflen);
20591 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
20592 + if (ret)
20593 + goto unmap;
20594
20595 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
20596 req->nbytes);
20597 @@ -1496,11 +1369,10 @@ static int ahash_update_first(struct aha
20598 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20599 struct caam_hash_state *state = ahash_request_ctx(req);
20600 struct device *jrdev = ctx->jrdev;
20601 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20602 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20603 - u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
20604 - int *next_buflen = state->current_buf ?
20605 - &state->buflen_1 : &state->buflen_0;
20606 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20607 + GFP_KERNEL : GFP_ATOMIC;
20608 + u8 *next_buf = alt_buf(state);
20609 + int *next_buflen = alt_buflen(state);
20610 int to_hash;
20611 u32 *desc;
20612 int src_nents, mapped_nents;
20613 @@ -1582,6 +1454,7 @@ static int ahash_update_first(struct aha
20614 state->final = ahash_final_no_ctx;
20615 scatterwalk_map_and_copy(next_buf, req->src, 0,
20616 req->nbytes, 0);
20617 + switch_buf(state);
20618 }
20619 #ifdef DEBUG
20620 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
20621 @@ -1688,7 +1561,6 @@ struct caam_hash_template {
20622 unsigned int blocksize;
20623 struct ahash_alg template_ahash;
20624 u32 alg_type;
20625 - u32 alg_op;
20626 };
20627
20628 /* ahash descriptors */
20629 @@ -1714,7 +1586,6 @@ static struct caam_hash_template driver_
20630 },
20631 },
20632 .alg_type = OP_ALG_ALGSEL_SHA1,
20633 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
20634 }, {
20635 .name = "sha224",
20636 .driver_name = "sha224-caam",
20637 @@ -1736,7 +1607,6 @@ static struct caam_hash_template driver_
20638 },
20639 },
20640 .alg_type = OP_ALG_ALGSEL_SHA224,
20641 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
20642 }, {
20643 .name = "sha256",
20644 .driver_name = "sha256-caam",
20645 @@ -1758,7 +1628,6 @@ static struct caam_hash_template driver_
20646 },
20647 },
20648 .alg_type = OP_ALG_ALGSEL_SHA256,
20649 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
20650 }, {
20651 .name = "sha384",
20652 .driver_name = "sha384-caam",
20653 @@ -1780,7 +1649,6 @@ static struct caam_hash_template driver_
20654 },
20655 },
20656 .alg_type = OP_ALG_ALGSEL_SHA384,
20657 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
20658 }, {
20659 .name = "sha512",
20660 .driver_name = "sha512-caam",
20661 @@ -1802,7 +1670,6 @@ static struct caam_hash_template driver_
20662 },
20663 },
20664 .alg_type = OP_ALG_ALGSEL_SHA512,
20665 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
20666 }, {
20667 .name = "md5",
20668 .driver_name = "md5-caam",
20669 @@ -1824,14 +1691,12 @@ static struct caam_hash_template driver_
20670 },
20671 },
20672 .alg_type = OP_ALG_ALGSEL_MD5,
20673 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
20674 },
20675 };
20676
20677 struct caam_hash_alg {
20678 struct list_head entry;
20679 int alg_type;
20680 - int alg_op;
20681 struct ahash_alg ahash_alg;
20682 };
20683
20684 @@ -1853,6 +1718,7 @@ static int caam_hash_cra_init(struct cry
20685 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
20686 HASH_MSG_LEN + 64,
20687 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
20688 + dma_addr_t dma_addr;
20689
20690 /*
20691 * Get a Job ring from Job Ring driver to ensure in-order
20692 @@ -1863,11 +1729,31 @@ static int caam_hash_cra_init(struct cry
20693 pr_err("Job Ring Device allocation for transform failed\n");
20694 return PTR_ERR(ctx->jrdev);
20695 }
20696 +
20697 + dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
20698 + offsetof(struct caam_hash_ctx,
20699 + sh_desc_update_dma),
20700 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
20701 + if (dma_mapping_error(ctx->jrdev, dma_addr)) {
20702 + dev_err(ctx->jrdev, "unable to map shared descriptors\n");
20703 + caam_jr_free(ctx->jrdev);
20704 + return -ENOMEM;
20705 + }
20706 +
20707 + ctx->sh_desc_update_dma = dma_addr;
20708 + ctx->sh_desc_update_first_dma = dma_addr +
20709 + offsetof(struct caam_hash_ctx,
20710 + sh_desc_update_first);
20711 + ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
20712 + sh_desc_fin);
20713 + ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
20714 + sh_desc_digest);
20715 +
20716 /* copy descriptor header template value */
20717 - ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
20718 - ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
20719 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
20720
20721 - ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
20722 + ctx->ctx_len = runninglen[(ctx->adata.algtype &
20723 + OP_ALG_ALGSEL_SUBMASK) >>
20724 OP_ALG_ALGSEL_SHIFT];
20725
20726 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
20727 @@ -1879,30 +1765,10 @@ static void caam_hash_cra_exit(struct cr
20728 {
20729 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
20730
20731 - if (ctx->sh_desc_update_dma &&
20732 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
20733 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
20734 - desc_bytes(ctx->sh_desc_update),
20735 - DMA_TO_DEVICE);
20736 - if (ctx->sh_desc_update_first_dma &&
20737 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
20738 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
20739 - desc_bytes(ctx->sh_desc_update_first),
20740 - DMA_TO_DEVICE);
20741 - if (ctx->sh_desc_fin_dma &&
20742 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
20743 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
20744 - desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
20745 - if (ctx->sh_desc_digest_dma &&
20746 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
20747 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
20748 - desc_bytes(ctx->sh_desc_digest),
20749 - DMA_TO_DEVICE);
20750 - if (ctx->sh_desc_finup_dma &&
20751 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
20752 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
20753 - desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
20754 -
20755 + dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
20756 + offsetof(struct caam_hash_ctx,
20757 + sh_desc_update_dma),
20758 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
20759 caam_jr_free(ctx->jrdev);
20760 }
20761
20762 @@ -1961,7 +1827,6 @@ caam_hash_alloc(struct caam_hash_templat
20763 alg->cra_type = &crypto_ahash_type;
20764
20765 t_alg->alg_type = template->alg_type;
20766 - t_alg->alg_op = template->alg_op;
20767
20768 return t_alg;
20769 }
20770 --- a/drivers/crypto/caam/caampkc.c
20771 +++ b/drivers/crypto/caam/caampkc.c
20772 @@ -18,6 +18,10 @@
20773 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
20774 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
20775 sizeof(struct rsa_priv_f1_pdb))
20776 +#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
20777 + sizeof(struct rsa_priv_f2_pdb))
20778 +#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
20779 + sizeof(struct rsa_priv_f3_pdb))
20780
20781 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
20782 struct akcipher_request *req)
20783 @@ -54,6 +58,42 @@ static void rsa_priv_f1_unmap(struct dev
20784 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
20785 }
20786
20787 +static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
20788 + struct akcipher_request *req)
20789 +{
20790 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20791 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20792 + struct caam_rsa_key *key = &ctx->key;
20793 + struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
20794 + size_t p_sz = key->p_sz;
20795 + size_t q_sz = key->p_sz;
20796 +
20797 + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
20798 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
20799 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
20800 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
20801 + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
20802 +}
20803 +
20804 +static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
20805 + struct akcipher_request *req)
20806 +{
20807 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20808 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20809 + struct caam_rsa_key *key = &ctx->key;
20810 + struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
20811 + size_t p_sz = key->p_sz;
20812 + size_t q_sz = key->p_sz;
20813 +
20814 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
20815 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
20816 + dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
20817 + dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
20818 + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
20819 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
20820 + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
20821 +}
20822 +
20823 /* RSA Job Completion handler */
20824 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
20825 {
20826 @@ -90,6 +130,42 @@ static void rsa_priv_f1_done(struct devi
20827 akcipher_request_complete(req, err);
20828 }
20829
20830 +static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
20831 + void *context)
20832 +{
20833 + struct akcipher_request *req = context;
20834 + struct rsa_edesc *edesc;
20835 +
20836 + if (err)
20837 + caam_jr_strstatus(dev, err);
20838 +
20839 + edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
20840 +
20841 + rsa_priv_f2_unmap(dev, edesc, req);
20842 + rsa_io_unmap(dev, edesc, req);
20843 + kfree(edesc);
20844 +
20845 + akcipher_request_complete(req, err);
20846 +}
20847 +
20848 +static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
20849 + void *context)
20850 +{
20851 + struct akcipher_request *req = context;
20852 + struct rsa_edesc *edesc;
20853 +
20854 + if (err)
20855 + caam_jr_strstatus(dev, err);
20856 +
20857 + edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
20858 +
20859 + rsa_priv_f3_unmap(dev, edesc, req);
20860 + rsa_io_unmap(dev, edesc, req);
20861 + kfree(edesc);
20862 +
20863 + akcipher_request_complete(req, err);
20864 +}
20865 +
20866 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
20867 size_t desclen)
20868 {
20869 @@ -97,8 +173,8 @@ static struct rsa_edesc *rsa_edesc_alloc
20870 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20871 struct device *dev = ctx->dev;
20872 struct rsa_edesc *edesc;
20873 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20874 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20875 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20876 + GFP_KERNEL : GFP_ATOMIC;
20877 int sgc;
20878 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
20879 int src_nents, dst_nents;
20880 @@ -258,6 +334,172 @@ static int set_rsa_priv_f1_pdb(struct ak
20881 return 0;
20882 }
20883
20884 +static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
20885 + struct rsa_edesc *edesc)
20886 +{
20887 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20888 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20889 + struct caam_rsa_key *key = &ctx->key;
20890 + struct device *dev = ctx->dev;
20891 + struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
20892 + int sec4_sg_index = 0;
20893 + size_t p_sz = key->p_sz;
20894 + size_t q_sz = key->p_sz;
20895 +
20896 + pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
20897 + if (dma_mapping_error(dev, pdb->d_dma)) {
20898 + dev_err(dev, "Unable to map RSA private exponent memory\n");
20899 + return -ENOMEM;
20900 + }
20901 +
20902 + pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
20903 + if (dma_mapping_error(dev, pdb->p_dma)) {
20904 + dev_err(dev, "Unable to map RSA prime factor p memory\n");
20905 + goto unmap_d;
20906 + }
20907 +
20908 + pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
20909 + if (dma_mapping_error(dev, pdb->q_dma)) {
20910 + dev_err(dev, "Unable to map RSA prime factor q memory\n");
20911 + goto unmap_p;
20912 + }
20913 +
20914 + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
20915 + if (dma_mapping_error(dev, pdb->tmp1_dma)) {
20916 + dev_err(dev, "Unable to map RSA tmp1 memory\n");
20917 + goto unmap_q;
20918 + }
20919 +
20920 + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
20921 + if (dma_mapping_error(dev, pdb->tmp2_dma)) {
20922 + dev_err(dev, "Unable to map RSA tmp2 memory\n");
20923 + goto unmap_tmp1;
20924 + }
20925 +
20926 + if (edesc->src_nents > 1) {
20927 + pdb->sgf |= RSA_PRIV_PDB_SGF_G;
20928 + pdb->g_dma = edesc->sec4_sg_dma;
20929 + sec4_sg_index += edesc->src_nents;
20930 + } else {
20931 + pdb->g_dma = sg_dma_address(req->src);
20932 + }
20933 +
20934 + if (edesc->dst_nents > 1) {
20935 + pdb->sgf |= RSA_PRIV_PDB_SGF_F;
20936 + pdb->f_dma = edesc->sec4_sg_dma +
20937 + sec4_sg_index * sizeof(struct sec4_sg_entry);
20938 + } else {
20939 + pdb->f_dma = sg_dma_address(req->dst);
20940 + }
20941 +
20942 + pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
20943 + pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
20944 +
20945 + return 0;
20946 +
20947 +unmap_tmp1:
20948 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
20949 +unmap_q:
20950 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
20951 +unmap_p:
20952 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
20953 +unmap_d:
20954 + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
20955 +
20956 + return -ENOMEM;
20957 +}
20958 +
20959 +static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
20960 + struct rsa_edesc *edesc)
20961 +{
20962 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20963 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20964 + struct caam_rsa_key *key = &ctx->key;
20965 + struct device *dev = ctx->dev;
20966 + struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
20967 + int sec4_sg_index = 0;
20968 + size_t p_sz = key->p_sz;
20969 + size_t q_sz = key->p_sz;
20970 +
20971 + pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
20972 + if (dma_mapping_error(dev, pdb->p_dma)) {
20973 + dev_err(dev, "Unable to map RSA prime factor p memory\n");
20974 + return -ENOMEM;
20975 + }
20976 +
20977 + pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
20978 + if (dma_mapping_error(dev, pdb->q_dma)) {
20979 + dev_err(dev, "Unable to map RSA prime factor q memory\n");
20980 + goto unmap_p;
20981 + }
20982 +
20983 + pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
20984 + if (dma_mapping_error(dev, pdb->dp_dma)) {
20985 + dev_err(dev, "Unable to map RSA exponent dp memory\n");
20986 + goto unmap_q;
20987 + }
20988 +
20989 + pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
20990 + if (dma_mapping_error(dev, pdb->dq_dma)) {
20991 + dev_err(dev, "Unable to map RSA exponent dq memory\n");
20992 + goto unmap_dp;
20993 + }
20994 +
20995 + pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
20996 + if (dma_mapping_error(dev, pdb->c_dma)) {
20997 + dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
20998 + goto unmap_dq;
20999 + }
21000 +
21001 + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
21002 + if (dma_mapping_error(dev, pdb->tmp1_dma)) {
21003 + dev_err(dev, "Unable to map RSA tmp1 memory\n");
21004 + goto unmap_qinv;
21005 + }
21006 +
21007 + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
21008 + if (dma_mapping_error(dev, pdb->tmp2_dma)) {
21009 + dev_err(dev, "Unable to map RSA tmp2 memory\n");
21010 + goto unmap_tmp1;
21011 + }
21012 +
21013 + if (edesc->src_nents > 1) {
21014 + pdb->sgf |= RSA_PRIV_PDB_SGF_G;
21015 + pdb->g_dma = edesc->sec4_sg_dma;
21016 + sec4_sg_index += edesc->src_nents;
21017 + } else {
21018 + pdb->g_dma = sg_dma_address(req->src);
21019 + }
21020 +
21021 + if (edesc->dst_nents > 1) {
21022 + pdb->sgf |= RSA_PRIV_PDB_SGF_F;
21023 + pdb->f_dma = edesc->sec4_sg_dma +
21024 + sec4_sg_index * sizeof(struct sec4_sg_entry);
21025 + } else {
21026 + pdb->f_dma = sg_dma_address(req->dst);
21027 + }
21028 +
21029 + pdb->sgf |= key->n_sz;
21030 + pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
21031 +
21032 + return 0;
21033 +
21034 +unmap_tmp1:
21035 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
21036 +unmap_qinv:
21037 + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
21038 +unmap_dq:
21039 + dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
21040 +unmap_dp:
21041 + dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
21042 +unmap_q:
21043 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
21044 +unmap_p:
21045 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
21046 +
21047 + return -ENOMEM;
21048 +}
21049 +
21050 static int caam_rsa_enc(struct akcipher_request *req)
21051 {
21052 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21053 @@ -301,24 +543,14 @@ init_fail:
21054 return ret;
21055 }
21056
21057 -static int caam_rsa_dec(struct akcipher_request *req)
21058 +static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
21059 {
21060 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21061 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21062 - struct caam_rsa_key *key = &ctx->key;
21063 struct device *jrdev = ctx->dev;
21064 struct rsa_edesc *edesc;
21065 int ret;
21066
21067 - if (unlikely(!key->n || !key->d))
21068 - return -EINVAL;
21069 -
21070 - if (req->dst_len < key->n_sz) {
21071 - req->dst_len = key->n_sz;
21072 - dev_err(jrdev, "Output buffer length less than parameter n\n");
21073 - return -EOVERFLOW;
21074 - }
21075 -
21076 /* Allocate extended descriptor */
21077 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
21078 if (IS_ERR(edesc))
21079 @@ -344,17 +576,147 @@ init_fail:
21080 return ret;
21081 }
21082
21083 +static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
21084 +{
21085 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21086 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21087 + struct device *jrdev = ctx->dev;
21088 + struct rsa_edesc *edesc;
21089 + int ret;
21090 +
21091 + /* Allocate extended descriptor */
21092 + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
21093 + if (IS_ERR(edesc))
21094 + return PTR_ERR(edesc);
21095 +
21096 + /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
21097 + ret = set_rsa_priv_f2_pdb(req, edesc);
21098 + if (ret)
21099 + goto init_fail;
21100 +
21101 + /* Initialize Job Descriptor */
21102 + init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
21103 +
21104 + ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
21105 + if (!ret)
21106 + return -EINPROGRESS;
21107 +
21108 + rsa_priv_f2_unmap(jrdev, edesc, req);
21109 +
21110 +init_fail:
21111 + rsa_io_unmap(jrdev, edesc, req);
21112 + kfree(edesc);
21113 + return ret;
21114 +}
21115 +
21116 +static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
21117 +{
21118 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21119 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21120 + struct device *jrdev = ctx->dev;
21121 + struct rsa_edesc *edesc;
21122 + int ret;
21123 +
21124 + /* Allocate extended descriptor */
21125 + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
21126 + if (IS_ERR(edesc))
21127 + return PTR_ERR(edesc);
21128 +
21129 + /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
21130 + ret = set_rsa_priv_f3_pdb(req, edesc);
21131 + if (ret)
21132 + goto init_fail;
21133 +
21134 + /* Initialize Job Descriptor */
21135 + init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
21136 +
21137 + ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
21138 + if (!ret)
21139 + return -EINPROGRESS;
21140 +
21141 + rsa_priv_f3_unmap(jrdev, edesc, req);
21142 +
21143 +init_fail:
21144 + rsa_io_unmap(jrdev, edesc, req);
21145 + kfree(edesc);
21146 + return ret;
21147 +}
21148 +
21149 +static int caam_rsa_dec(struct akcipher_request *req)
21150 +{
21151 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21152 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21153 + struct caam_rsa_key *key = &ctx->key;
21154 + int ret;
21155 +
21156 + if (unlikely(!key->n || !key->d))
21157 + return -EINVAL;
21158 +
21159 + if (req->dst_len < key->n_sz) {
21160 + req->dst_len = key->n_sz;
21161 + dev_err(ctx->dev, "Output buffer length less than parameter n\n");
21162 + return -EOVERFLOW;
21163 + }
21164 +
21165 + if (key->priv_form == FORM3)
21166 + ret = caam_rsa_dec_priv_f3(req);
21167 + else if (key->priv_form == FORM2)
21168 + ret = caam_rsa_dec_priv_f2(req);
21169 + else
21170 + ret = caam_rsa_dec_priv_f1(req);
21171 +
21172 + return ret;
21173 +}
21174 +
21175 static void caam_rsa_free_key(struct caam_rsa_key *key)
21176 {
21177 kzfree(key->d);
21178 + kzfree(key->p);
21179 + kzfree(key->q);
21180 + kzfree(key->dp);
21181 + kzfree(key->dq);
21182 + kzfree(key->qinv);
21183 + kzfree(key->tmp1);
21184 + kzfree(key->tmp2);
21185 kfree(key->e);
21186 kfree(key->n);
21187 - key->d = NULL;
21188 - key->e = NULL;
21189 - key->n = NULL;
21190 - key->d_sz = 0;
21191 - key->e_sz = 0;
21192 - key->n_sz = 0;
21193 + memset(key, 0, sizeof(*key));
21194 +}
21195 +
21196 +static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
21197 +{
21198 + while (!**ptr && *nbytes) {
21199 + (*ptr)++;
21200 + (*nbytes)--;
21201 + }
21202 +}
21203 +
21204 +/**
21205 + * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
21206 + * dP, dQ and qInv could decode to less than corresponding p, q length, as the
21207 + * BER-encoding requires that the minimum number of bytes be used to encode the
21208 + * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
21209 + * length.
21210 + *
21211 + * @ptr : pointer to {dP, dQ, qInv} CRT member
21212 + * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
21213 + * @dstlen: length in bytes of corresponding p or q prime factor
21214 + */
21215 +static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
21216 +{
21217 + u8 *dst;
21218 +
21219 + caam_rsa_drop_leading_zeros(&ptr, &nbytes);
21220 + if (!nbytes)
21221 + return NULL;
21222 +
21223 + dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
21224 + if (!dst)
21225 + return NULL;
21226 +
21227 + memcpy(dst + (dstlen - nbytes), ptr, nbytes);
21228 +
21229 + return dst;
21230 }
21231
21232 /**
21233 @@ -370,10 +732,9 @@ static inline u8 *caam_read_raw_data(con
21234 {
21235 u8 *val;
21236
21237 - while (!*buf && *nbytes) {
21238 - buf++;
21239 - (*nbytes)--;
21240 - }
21241 + caam_rsa_drop_leading_zeros(&buf, nbytes);
21242 + if (!*nbytes)
21243 + return NULL;
21244
21245 val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
21246 if (!val)
21247 @@ -395,7 +756,7 @@ static int caam_rsa_set_pub_key(struct c
21248 unsigned int keylen)
21249 {
21250 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21251 - struct rsa_key raw_key = {0};
21252 + struct rsa_key raw_key = {NULL};
21253 struct caam_rsa_key *rsa_key = &ctx->key;
21254 int ret;
21255
21256 @@ -437,11 +798,69 @@ err:
21257 return -ENOMEM;
21258 }
21259
21260 +static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
21261 + struct rsa_key *raw_key)
21262 +{
21263 + struct caam_rsa_key *rsa_key = &ctx->key;
21264 + size_t p_sz = raw_key->p_sz;
21265 + size_t q_sz = raw_key->q_sz;
21266 +
21267 + rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
21268 + if (!rsa_key->p)
21269 + return;
21270 + rsa_key->p_sz = p_sz;
21271 +
21272 + rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
21273 + if (!rsa_key->q)
21274 + goto free_p;
21275 + rsa_key->q_sz = q_sz;
21276 +
21277 + rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
21278 + if (!rsa_key->tmp1)
21279 + goto free_q;
21280 +
21281 + rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
21282 + if (!rsa_key->tmp2)
21283 + goto free_tmp1;
21284 +
21285 + rsa_key->priv_form = FORM2;
21286 +
21287 + rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
21288 + if (!rsa_key->dp)
21289 + goto free_tmp2;
21290 +
21291 + rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
21292 + if (!rsa_key->dq)
21293 + goto free_dp;
21294 +
21295 + rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
21296 + q_sz);
21297 + if (!rsa_key->qinv)
21298 + goto free_dq;
21299 +
21300 + rsa_key->priv_form = FORM3;
21301 +
21302 + return;
21303 +
21304 +free_dq:
21305 + kzfree(rsa_key->dq);
21306 +free_dp:
21307 + kzfree(rsa_key->dp);
21308 +free_tmp2:
21309 + kzfree(rsa_key->tmp2);
21310 +free_tmp1:
21311 + kzfree(rsa_key->tmp1);
21312 +free_q:
21313 + kzfree(rsa_key->q);
21314 +free_p:
21315 + kzfree(rsa_key->p);
21316 +}
21317 +
21318 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
21319 unsigned int keylen)
21320 {
21321 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21322 - struct rsa_key raw_key = {0};
21323 + struct rsa_key raw_key = {NULL};
21324 struct caam_rsa_key *rsa_key = &ctx->key;
21325 int ret;
21326
21327 @@ -483,6 +902,8 @@ static int caam_rsa_set_priv_key(struct
21328 memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
21329 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
21330
21331 + caam_rsa_set_priv_key_form(ctx, &raw_key);
21332 +
21333 return 0;
21334
21335 err:
21336 --- a/drivers/crypto/caam/caampkc.h
21337 +++ b/drivers/crypto/caam/caampkc.h
21338 @@ -13,21 +13,75 @@
21339 #include "pdb.h"
21340
21341 /**
21342 + * caam_priv_key_form - CAAM RSA private key representation
21343 + * CAAM RSA private key may have either of three forms.
21344 + *
21345 + * 1. The first representation consists of the pair (n, d), where the
21346 + * components have the following meanings:
21347 + * n the RSA modulus
21348 + * d the RSA private exponent
21349 + *
21350 + * 2. The second representation consists of the triplet (p, q, d), where the
21351 + * components have the following meanings:
21352 + * p the first prime factor of the RSA modulus n
21353 + * q the second prime factor of the RSA modulus n
21354 + * d the RSA private exponent
21355 + *
21356 + * 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv),
21357 + * where the components have the following meanings:
21358 + * p the first prime factor of the RSA modulus n
21359 + * q the second prime factor of the RSA modulus n
21360 + * dP the first factors's CRT exponent
21361 + * dQ the second factors's CRT exponent
21362 + * qInv the (first) CRT coefficient
21363 + *
21364 + * The benefit of using the third or the second key form is lower computational
21365 + * cost for the decryption and signature operations.
21366 + */
21367 +enum caam_priv_key_form {
21368 + FORM1,
21369 + FORM2,
21370 + FORM3
21371 +};
21372 +
21373 +/**
21374 * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
21375 * @n : RSA modulus raw byte stream
21376 * @e : RSA public exponent raw byte stream
21377 * @d : RSA private exponent raw byte stream
21378 + * @p : RSA prime factor p of RSA modulus n
21379 + * @q : RSA prime factor q of RSA modulus n
21380 + * @dp : RSA CRT exponent of p
21381 + * @dp : RSA CRT exponent of q
21382 + * @qinv : RSA CRT coefficient
21383 + * @tmp1 : CAAM uses this temporary buffer as internal state buffer.
21384 + * It is assumed to be as long as p.
21385 + * @tmp2 : CAAM uses this temporary buffer as internal state buffer.
21386 + * It is assumed to be as long as q.
21387 * @n_sz : length in bytes of RSA modulus n
21388 * @e_sz : length in bytes of RSA public exponent
21389 * @d_sz : length in bytes of RSA private exponent
21390 + * @p_sz : length in bytes of RSA prime factor p of RSA modulus n
21391 + * @q_sz : length in bytes of RSA prime factor q of RSA modulus n
21392 + * @priv_form : CAAM RSA private key representation
21393 */
21394 struct caam_rsa_key {
21395 u8 *n;
21396 u8 *e;
21397 u8 *d;
21398 + u8 *p;
21399 + u8 *q;
21400 + u8 *dp;
21401 + u8 *dq;
21402 + u8 *qinv;
21403 + u8 *tmp1;
21404 + u8 *tmp2;
21405 size_t n_sz;
21406 size_t e_sz;
21407 size_t d_sz;
21408 + size_t p_sz;
21409 + size_t q_sz;
21410 + enum caam_priv_key_form priv_form;
21411 };
21412
21413 /**
21414 @@ -59,6 +113,8 @@ struct rsa_edesc {
21415 union {
21416 struct rsa_pub_pdb pub;
21417 struct rsa_priv_f1_pdb priv_f1;
21418 + struct rsa_priv_f2_pdb priv_f2;
21419 + struct rsa_priv_f3_pdb priv_f3;
21420 } pdb;
21421 u32 hw_desc[];
21422 };
21423 @@ -66,5 +122,7 @@ struct rsa_edesc {
21424 /* Descriptor construction primitives. */
21425 void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
21426 void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
21427 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb);
21428 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb);
21429
21430 #endif
21431 --- a/drivers/crypto/caam/caamrng.c
21432 +++ b/drivers/crypto/caam/caamrng.c
21433 @@ -52,7 +52,7 @@
21434
21435 /* length of descriptors */
21436 #define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
21437 -#define DESC_RNG_LEN (4 * CAAM_CMD_SZ)
21438 +#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
21439
21440 /* Buffer, its dma address and lock */
21441 struct buf_data {
21442 @@ -100,8 +100,7 @@ static void rng_done(struct device *jrde
21443 {
21444 struct buf_data *bd;
21445
21446 - bd = (struct buf_data *)((char *)desc -
21447 - offsetof(struct buf_data, hw_desc));
21448 + bd = container_of(desc, struct buf_data, hw_desc[0]);
21449
21450 if (err)
21451 caam_jr_strstatus(jrdev, err);
21452 @@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(str
21453
21454 init_sh_desc(desc, HDR_SHARE_SERIAL);
21455
21456 - /* Propagate errors from shared to job descriptor */
21457 - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
21458 -
21459 /* Generate random bytes */
21460 append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
21461
21462 @@ -289,11 +285,7 @@ static int caam_init_rng(struct caam_rng
21463 if (err)
21464 return err;
21465
21466 - err = caam_init_buf(ctx, 1);
21467 - if (err)
21468 - return err;
21469 -
21470 - return 0;
21471 + return caam_init_buf(ctx, 1);
21472 }
21473
21474 static struct hwrng caam_rng = {
21475 @@ -351,7 +343,7 @@ static int __init caam_rng_init(void)
21476 pr_err("Job Ring Device allocation for transform failed\n");
21477 return PTR_ERR(dev);
21478 }
21479 - rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
21480 + rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
21481 if (!rng_ctx) {
21482 err = -ENOMEM;
21483 goto free_caam_alloc;
21484 --- a/drivers/crypto/caam/compat.h
21485 +++ b/drivers/crypto/caam/compat.h
21486 @@ -16,6 +16,7 @@
21487 #include <linux/of_platform.h>
21488 #include <linux/dma-mapping.h>
21489 #include <linux/io.h>
21490 +#include <linux/iommu.h>
21491 #include <linux/spinlock.h>
21492 #include <linux/rtnetlink.h>
21493 #include <linux/in.h>
21494 --- a/drivers/crypto/caam/ctrl.c
21495 +++ b/drivers/crypto/caam/ctrl.c
21496 @@ -2,40 +2,41 @@
21497 * Controller-level driver, kernel property detection, initialization
21498 *
21499 * Copyright 2008-2012 Freescale Semiconductor, Inc.
21500 + * Copyright 2017 NXP
21501 */
21502
21503 #include <linux/device.h>
21504 #include <linux/of_address.h>
21505 #include <linux/of_irq.h>
21506 +#include <linux/sys_soc.h>
21507
21508 #include "compat.h"
21509 #include "regs.h"
21510 #include "intern.h"
21511 #include "jr.h"
21512 #include "desc_constr.h"
21513 -#include "error.h"
21514 #include "ctrl.h"
21515
21516 bool caam_little_end;
21517 EXPORT_SYMBOL(caam_little_end);
21518 +bool caam_imx;
21519 +EXPORT_SYMBOL(caam_imx);
21520 +bool caam_dpaa2;
21521 +EXPORT_SYMBOL(caam_dpaa2);
21522 +
21523 +#ifdef CONFIG_CAAM_QI
21524 +#include "qi.h"
21525 +#endif
21526
21527 /*
21528 * i.MX targets tend to have clock control subsystems that can
21529 * enable/disable clocking to our device.
21530 */
21531 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
21532 -static inline struct clk *caam_drv_identify_clk(struct device *dev,
21533 - char *clk_name)
21534 -{
21535 - return devm_clk_get(dev, clk_name);
21536 -}
21537 -#else
21538 static inline struct clk *caam_drv_identify_clk(struct device *dev,
21539 char *clk_name)
21540 {
21541 - return NULL;
21542 + return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
21543 }
21544 -#endif
21545
21546 /*
21547 * Descriptor to instantiate RNG State Handle 0 in normal mode and
21548 @@ -270,7 +271,7 @@ static int deinstantiate_rng(struct devi
21549 /*
21550 * If the corresponding bit is set, then it means the state
21551 * handle was initialized by us, and thus it needs to be
21552 - * deintialized as well
21553 + * deinitialized as well
21554 */
21555 if ((1 << sh_idx) & state_handle_mask) {
21556 /*
21557 @@ -303,20 +304,24 @@ static int caam_remove(struct platform_d
21558 struct device *ctrldev;
21559 struct caam_drv_private *ctrlpriv;
21560 struct caam_ctrl __iomem *ctrl;
21561 - int ring;
21562
21563 ctrldev = &pdev->dev;
21564 ctrlpriv = dev_get_drvdata(ctrldev);
21565 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
21566
21567 - /* Remove platform devices for JobRs */
21568 - for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
21569 - if (ctrlpriv->jrpdev[ring])
21570 - of_device_unregister(ctrlpriv->jrpdev[ring]);
21571 - }
21572 + /* Remove platform devices under the crypto node */
21573 + of_platform_depopulate(ctrldev);
21574 +
21575 +#ifdef CONFIG_CAAM_QI
21576 + if (ctrlpriv->qidev)
21577 + caam_qi_shutdown(ctrlpriv->qidev);
21578 +#endif
21579
21580 - /* De-initialize RNG state handles initialized by this driver. */
21581 - if (ctrlpriv->rng4_sh_init)
21582 + /*
21583 + * De-initialize RNG state handles initialized by this driver.
21584 + * In case of DPAA 2.x, RNG is managed by MC firmware.
21585 + */
21586 + if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
21587 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
21588
21589 /* Shut down debug views */
21590 @@ -331,8 +336,8 @@ static int caam_remove(struct platform_d
21591 clk_disable_unprepare(ctrlpriv->caam_ipg);
21592 clk_disable_unprepare(ctrlpriv->caam_mem);
21593 clk_disable_unprepare(ctrlpriv->caam_aclk);
21594 - clk_disable_unprepare(ctrlpriv->caam_emi_slow);
21595 -
21596 + if (ctrlpriv->caam_emi_slow)
21597 + clk_disable_unprepare(ctrlpriv->caam_emi_slow);
21598 return 0;
21599 }
21600
21601 @@ -366,11 +371,8 @@ static void kick_trng(struct platform_de
21602 */
21603 val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
21604 >> RTSDCTL_ENT_DLY_SHIFT;
21605 - if (ent_delay <= val) {
21606 - /* put RNG4 into run mode */
21607 - clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
21608 - return;
21609 - }
21610 + if (ent_delay <= val)
21611 + goto start_rng;
21612
21613 val = rd_reg32(&r4tst->rtsdctl);
21614 val = (val & ~RTSDCTL_ENT_DLY_MASK) |
21615 @@ -382,15 +384,12 @@ static void kick_trng(struct platform_de
21616 wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
21617 /* read the control register */
21618 val = rd_reg32(&r4tst->rtmctl);
21619 +start_rng:
21620 /*
21621 * select raw sampling in both entropy shifter
21622 - * and statistical checker
21623 + * and statistical checker; ; put RNG4 into run mode
21624 */
21625 - clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
21626 - /* put RNG4 into run mode */
21627 - clrsetbits_32(&val, RTMCTL_PRGM, 0);
21628 - /* write back the control register */
21629 - wr_reg32(&r4tst->rtmctl, val);
21630 + clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
21631 }
21632
21633 /**
21634 @@ -411,28 +410,26 @@ int caam_get_era(void)
21635 }
21636 EXPORT_SYMBOL(caam_get_era);
21637
21638 -#ifdef CONFIG_DEBUG_FS
21639 -static int caam_debugfs_u64_get(void *data, u64 *val)
21640 -{
21641 - *val = caam64_to_cpu(*(u64 *)data);
21642 - return 0;
21643 -}
21644 -
21645 -static int caam_debugfs_u32_get(void *data, u64 *val)
21646 -{
21647 - *val = caam32_to_cpu(*(u32 *)data);
21648 - return 0;
21649 -}
21650 -
21651 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
21652 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
21653 -#endif
21654 +static const struct of_device_id caam_match[] = {
21655 + {
21656 + .compatible = "fsl,sec-v4.0",
21657 + },
21658 + {
21659 + .compatible = "fsl,sec4.0",
21660 + },
21661 + {},
21662 +};
21663 +MODULE_DEVICE_TABLE(of, caam_match);
21664
21665 /* Probe routine for CAAM top (controller) level */
21666 static int caam_probe(struct platform_device *pdev)
21667 {
21668 - int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
21669 + int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
21670 u64 caam_id;
21671 + static const struct soc_device_attribute imx_soc[] = {
21672 + {.family = "Freescale i.MX"},
21673 + {},
21674 + };
21675 struct device *dev;
21676 struct device_node *nprop, *np;
21677 struct caam_ctrl __iomem *ctrl;
21678 @@ -452,9 +449,10 @@ static int caam_probe(struct platform_de
21679
21680 dev = &pdev->dev;
21681 dev_set_drvdata(dev, ctrlpriv);
21682 - ctrlpriv->pdev = pdev;
21683 nprop = pdev->dev.of_node;
21684
21685 + caam_imx = (bool)soc_device_match(imx_soc);
21686 +
21687 /* Enable clocking */
21688 clk = caam_drv_identify_clk(&pdev->dev, "ipg");
21689 if (IS_ERR(clk)) {
21690 @@ -483,14 +481,16 @@ static int caam_probe(struct platform_de
21691 }
21692 ctrlpriv->caam_aclk = clk;
21693
21694 - clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
21695 - if (IS_ERR(clk)) {
21696 - ret = PTR_ERR(clk);
21697 - dev_err(&pdev->dev,
21698 - "can't identify CAAM emi_slow clk: %d\n", ret);
21699 - return ret;
21700 + if (!of_machine_is_compatible("fsl,imx6ul")) {
21701 + clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
21702 + if (IS_ERR(clk)) {
21703 + ret = PTR_ERR(clk);
21704 + dev_err(&pdev->dev,
21705 + "can't identify CAAM emi_slow clk: %d\n", ret);
21706 + return ret;
21707 + }
21708 + ctrlpriv->caam_emi_slow = clk;
21709 }
21710 - ctrlpriv->caam_emi_slow = clk;
21711
21712 ret = clk_prepare_enable(ctrlpriv->caam_ipg);
21713 if (ret < 0) {
21714 @@ -511,11 +511,13 @@ static int caam_probe(struct platform_de
21715 goto disable_caam_mem;
21716 }
21717
21718 - ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
21719 - if (ret < 0) {
21720 - dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
21721 - ret);
21722 - goto disable_caam_aclk;
21723 + if (ctrlpriv->caam_emi_slow) {
21724 + ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
21725 + if (ret < 0) {
21726 + dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
21727 + ret);
21728 + goto disable_caam_aclk;
21729 + }
21730 }
21731
21732 /* Get configuration properties from device tree */
21733 @@ -542,13 +544,13 @@ static int caam_probe(struct platform_de
21734 else
21735 BLOCK_OFFSET = PG_SIZE_64K;
21736
21737 - ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
21738 - ctrlpriv->assure = (struct caam_assurance __force *)
21739 - ((uint8_t *)ctrl +
21740 + ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
21741 + ctrlpriv->assure = (struct caam_assurance __iomem __force *)
21742 + ((__force uint8_t *)ctrl +
21743 BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
21744 );
21745 - ctrlpriv->deco = (struct caam_deco __force *)
21746 - ((uint8_t *)ctrl +
21747 + ctrlpriv->deco = (struct caam_deco __iomem __force *)
21748 + ((__force uint8_t *)ctrl +
21749 BLOCK_OFFSET * DECO_BLOCK_NUMBER
21750 );
21751
21752 @@ -557,12 +559,17 @@ static int caam_probe(struct platform_de
21753
21754 /*
21755 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
21756 - * long pointers in master configuration register
21757 + * long pointers in master configuration register.
21758 + * In case of DPAA 2.x, Management Complex firmware performs
21759 + * the configuration.
21760 */
21761 - clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
21762 - MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
21763 - MCFGR_WDENABLE | MCFGR_LARGE_BURST |
21764 - (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
21765 + caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
21766 + if (!caam_dpaa2)
21767 + clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
21768 + MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
21769 + MCFGR_WDENABLE | MCFGR_LARGE_BURST |
21770 + (sizeof(dma_addr_t) == sizeof(u64) ?
21771 + MCFGR_LONG_PTR : 0));
21772
21773 /*
21774 * Read the Compile Time paramters and SCFGR to determine
21775 @@ -590,64 +597,67 @@ static int caam_probe(struct platform_de
21776 JRSTART_JR1_START | JRSTART_JR2_START |
21777 JRSTART_JR3_START);
21778
21779 - if (sizeof(dma_addr_t) == sizeof(u64))
21780 - if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
21781 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
21782 + if (sizeof(dma_addr_t) == sizeof(u64)) {
21783 + if (caam_dpaa2)
21784 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
21785 + else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
21786 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
21787 else
21788 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
21789 - else
21790 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
21791 -
21792 - /*
21793 - * Detect and enable JobRs
21794 - * First, find out how many ring spec'ed, allocate references
21795 - * for all, then go probe each one.
21796 - */
21797 - rspec = 0;
21798 - for_each_available_child_of_node(nprop, np)
21799 - if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
21800 - of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
21801 - rspec++;
21802 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
21803 + } else {
21804 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
21805 + }
21806 + if (ret) {
21807 + dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
21808 + goto iounmap_ctrl;
21809 + }
21810
21811 - ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
21812 - sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
21813 - if (ctrlpriv->jrpdev == NULL) {
21814 - ret = -ENOMEM;
21815 + ret = of_platform_populate(nprop, caam_match, NULL, dev);
21816 + if (ret) {
21817 + dev_err(dev, "JR platform devices creation error\n");
21818 goto iounmap_ctrl;
21819 }
21820
21821 +#ifdef CONFIG_DEBUG_FS
21822 + /*
21823 + * FIXME: needs better naming distinction, as some amalgamation of
21824 + * "caam" and nprop->full_name. The OF name isn't distinctive,
21825 + * but does separate instances
21826 + */
21827 + perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
21828 +
21829 + ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
21830 + ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
21831 +#endif
21832 ring = 0;
21833 - ctrlpriv->total_jobrs = 0;
21834 for_each_available_child_of_node(nprop, np)
21835 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
21836 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
21837 - ctrlpriv->jrpdev[ring] =
21838 - of_platform_device_create(np, NULL, dev);
21839 - if (!ctrlpriv->jrpdev[ring]) {
21840 - pr_warn("JR%d Platform device creation error\n",
21841 - ring);
21842 - continue;
21843 - }
21844 - ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
21845 - ((uint8_t *)ctrl +
21846 + ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
21847 + ((__force uint8_t *)ctrl +
21848 (ring + JR_BLOCK_NUMBER) *
21849 BLOCK_OFFSET
21850 );
21851 ctrlpriv->total_jobrs++;
21852 ring++;
21853 - }
21854 + }
21855
21856 - /* Check to see if QI present. If so, enable */
21857 - ctrlpriv->qi_present =
21858 - !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
21859 - CTPR_MS_QI_MASK);
21860 - if (ctrlpriv->qi_present) {
21861 - ctrlpriv->qi = (struct caam_queue_if __force *)
21862 - ((uint8_t *)ctrl +
21863 + /* Check to see if (DPAA 1.x) QI present. If so, enable */
21864 + ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
21865 + if (ctrlpriv->qi_present && !caam_dpaa2) {
21866 + ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
21867 + ((__force uint8_t *)ctrl +
21868 BLOCK_OFFSET * QI_BLOCK_NUMBER
21869 );
21870 /* This is all that's required to physically enable QI */
21871 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
21872 +
21873 + /* If QMAN driver is present, init CAAM-QI backend */
21874 +#ifdef CONFIG_CAAM_QI
21875 + ret = caam_qi_init(pdev);
21876 + if (ret)
21877 + dev_err(dev, "caam qi i/f init failed: %d\n", ret);
21878 +#endif
21879 }
21880
21881 /* If no QI and no rings specified, quit and go home */
21882 @@ -662,8 +672,10 @@ static int caam_probe(struct platform_de
21883 /*
21884 * If SEC has RNG version >= 4 and RNG state handle has not been
21885 * already instantiated, do RNG instantiation
21886 + * In case of DPAA 2.x, RNG is managed by MC firmware.
21887 */
21888 - if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
21889 + if (!caam_dpaa2 &&
21890 + (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
21891 ctrlpriv->rng4_sh_init =
21892 rd_reg32(&ctrl->r4tst[0].rdsta);
21893 /*
21894 @@ -731,77 +743,46 @@ static int caam_probe(struct platform_de
21895 /* Report "alive" for developer to see */
21896 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
21897 caam_get_era());
21898 - dev_info(dev, "job rings = %d, qi = %d\n",
21899 - ctrlpriv->total_jobrs, ctrlpriv->qi_present);
21900 + dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
21901 + ctrlpriv->total_jobrs, ctrlpriv->qi_present,
21902 + caam_dpaa2 ? "yes" : "no");
21903
21904 #ifdef CONFIG_DEBUG_FS
21905 - /*
21906 - * FIXME: needs better naming distinction, as some amalgamation of
21907 - * "caam" and nprop->full_name. The OF name isn't distinctive,
21908 - * but does separate instances
21909 - */
21910 - perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
21911 -
21912 - ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
21913 - ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
21914 -
21915 - /* Controller-level - performance monitor counters */
21916 -
21917 - ctrlpriv->ctl_rq_dequeued =
21918 - debugfs_create_file("rq_dequeued",
21919 - S_IRUSR | S_IRGRP | S_IROTH,
21920 - ctrlpriv->ctl, &perfmon->req_dequeued,
21921 - &caam_fops_u64_ro);
21922 - ctrlpriv->ctl_ob_enc_req =
21923 - debugfs_create_file("ob_rq_encrypted",
21924 - S_IRUSR | S_IRGRP | S_IROTH,
21925 - ctrlpriv->ctl, &perfmon->ob_enc_req,
21926 - &caam_fops_u64_ro);
21927 - ctrlpriv->ctl_ib_dec_req =
21928 - debugfs_create_file("ib_rq_decrypted",
21929 - S_IRUSR | S_IRGRP | S_IROTH,
21930 - ctrlpriv->ctl, &perfmon->ib_dec_req,
21931 - &caam_fops_u64_ro);
21932 - ctrlpriv->ctl_ob_enc_bytes =
21933 - debugfs_create_file("ob_bytes_encrypted",
21934 - S_IRUSR | S_IRGRP | S_IROTH,
21935 - ctrlpriv->ctl, &perfmon->ob_enc_bytes,
21936 - &caam_fops_u64_ro);
21937 - ctrlpriv->ctl_ob_prot_bytes =
21938 - debugfs_create_file("ob_bytes_protected",
21939 - S_IRUSR | S_IRGRP | S_IROTH,
21940 - ctrlpriv->ctl, &perfmon->ob_prot_bytes,
21941 - &caam_fops_u64_ro);
21942 - ctrlpriv->ctl_ib_dec_bytes =
21943 - debugfs_create_file("ib_bytes_decrypted",
21944 - S_IRUSR | S_IRGRP | S_IROTH,
21945 - ctrlpriv->ctl, &perfmon->ib_dec_bytes,
21946 - &caam_fops_u64_ro);
21947 - ctrlpriv->ctl_ib_valid_bytes =
21948 - debugfs_create_file("ib_bytes_validated",
21949 - S_IRUSR | S_IRGRP | S_IROTH,
21950 - ctrlpriv->ctl, &perfmon->ib_valid_bytes,
21951 - &caam_fops_u64_ro);
21952 + debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
21953 + ctrlpriv->ctl, &perfmon->req_dequeued,
21954 + &caam_fops_u64_ro);
21955 + debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
21956 + ctrlpriv->ctl, &perfmon->ob_enc_req,
21957 + &caam_fops_u64_ro);
21958 + debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
21959 + ctrlpriv->ctl, &perfmon->ib_dec_req,
21960 + &caam_fops_u64_ro);
21961 + debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
21962 + ctrlpriv->ctl, &perfmon->ob_enc_bytes,
21963 + &caam_fops_u64_ro);
21964 + debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
21965 + ctrlpriv->ctl, &perfmon->ob_prot_bytes,
21966 + &caam_fops_u64_ro);
21967 + debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
21968 + ctrlpriv->ctl, &perfmon->ib_dec_bytes,
21969 + &caam_fops_u64_ro);
21970 + debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
21971 + ctrlpriv->ctl, &perfmon->ib_valid_bytes,
21972 + &caam_fops_u64_ro);
21973
21974 /* Controller level - global status values */
21975 - ctrlpriv->ctl_faultaddr =
21976 - debugfs_create_file("fault_addr",
21977 - S_IRUSR | S_IRGRP | S_IROTH,
21978 - ctrlpriv->ctl, &perfmon->faultaddr,
21979 - &caam_fops_u32_ro);
21980 - ctrlpriv->ctl_faultdetail =
21981 - debugfs_create_file("fault_detail",
21982 - S_IRUSR | S_IRGRP | S_IROTH,
21983 - ctrlpriv->ctl, &perfmon->faultdetail,
21984 - &caam_fops_u32_ro);
21985 - ctrlpriv->ctl_faultstatus =
21986 - debugfs_create_file("fault_status",
21987 - S_IRUSR | S_IRGRP | S_IROTH,
21988 - ctrlpriv->ctl, &perfmon->status,
21989 - &caam_fops_u32_ro);
21990 + debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
21991 + ctrlpriv->ctl, &perfmon->faultaddr,
21992 + &caam_fops_u32_ro);
21993 + debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
21994 + ctrlpriv->ctl, &perfmon->faultdetail,
21995 + &caam_fops_u32_ro);
21996 + debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
21997 + ctrlpriv->ctl, &perfmon->status,
21998 + &caam_fops_u32_ro);
21999
22000 /* Internal covering keys (useful in non-secure mode only) */
22001 - ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
22002 + ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
22003 ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
22004 ctrlpriv->ctl_kek = debugfs_create_blob("kek",
22005 S_IRUSR |
22006 @@ -809,7 +790,7 @@ static int caam_probe(struct platform_de
22007 ctrlpriv->ctl,
22008 &ctrlpriv->ctl_kek_wrap);
22009
22010 - ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
22011 + ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
22012 ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
22013 ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
22014 S_IRUSR |
22015 @@ -817,7 +798,7 @@ static int caam_probe(struct platform_de
22016 ctrlpriv->ctl,
22017 &ctrlpriv->ctl_tkek_wrap);
22018
22019 - ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
22020 + ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
22021 ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
22022 ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
22023 S_IRUSR |
22024 @@ -828,13 +809,17 @@ static int caam_probe(struct platform_de
22025 return 0;
22026
22027 caam_remove:
22028 +#ifdef CONFIG_DEBUG_FS
22029 + debugfs_remove_recursive(ctrlpriv->dfs_root);
22030 +#endif
22031 caam_remove(pdev);
22032 return ret;
22033
22034 iounmap_ctrl:
22035 iounmap(ctrl);
22036 disable_caam_emi_slow:
22037 - clk_disable_unprepare(ctrlpriv->caam_emi_slow);
22038 + if (ctrlpriv->caam_emi_slow)
22039 + clk_disable_unprepare(ctrlpriv->caam_emi_slow);
22040 disable_caam_aclk:
22041 clk_disable_unprepare(ctrlpriv->caam_aclk);
22042 disable_caam_mem:
22043 @@ -844,17 +829,6 @@ disable_caam_ipg:
22044 return ret;
22045 }
22046
22047 -static struct of_device_id caam_match[] = {
22048 - {
22049 - .compatible = "fsl,sec-v4.0",
22050 - },
22051 - {
22052 - .compatible = "fsl,sec4.0",
22053 - },
22054 - {},
22055 -};
22056 -MODULE_DEVICE_TABLE(of, caam_match);
22057 -
22058 static struct platform_driver caam_driver = {
22059 .driver = {
22060 .name = "caam",
22061 --- a/drivers/crypto/caam/ctrl.h
22062 +++ b/drivers/crypto/caam/ctrl.h
22063 @@ -10,4 +10,6 @@
22064 /* Prototypes for backend-level services exposed to APIs */
22065 int caam_get_era(void);
22066
22067 +extern bool caam_dpaa2;
22068 +
22069 #endif /* CTRL_H */
22070 --- a/drivers/crypto/caam/desc.h
22071 +++ b/drivers/crypto/caam/desc.h
22072 @@ -22,12 +22,6 @@
22073 #define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
22074 #define SEC4_SG_OFFSET_MASK 0x00001fff
22075
22076 -struct sec4_sg_entry {
22077 - u64 ptr;
22078 - u32 len;
22079 - u32 bpid_offset;
22080 -};
22081 -
22082 /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
22083 #define MAX_CAAM_DESCSIZE 64
22084
22085 @@ -47,6 +41,7 @@ struct sec4_sg_entry {
22086 #define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
22087 #define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
22088 #define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
22089 +#define CMD_MOVEB (0x07 << CMD_SHIFT)
22090 #define CMD_STORE (0x0a << CMD_SHIFT)
22091 #define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
22092 #define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
22093 @@ -90,8 +85,8 @@ struct sec4_sg_entry {
22094 #define HDR_ZRO 0x00008000
22095
22096 /* Start Index or SharedDesc Length */
22097 -#define HDR_START_IDX_MASK 0x3f
22098 #define HDR_START_IDX_SHIFT 16
22099 +#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
22100
22101 /* If shared descriptor header, 6-bit length */
22102 #define HDR_DESCLEN_SHR_MASK 0x3f
22103 @@ -121,10 +116,10 @@ struct sec4_sg_entry {
22104 #define HDR_PROP_DNR 0x00000800
22105
22106 /* JobDesc/SharedDesc share property */
22107 -#define HDR_SD_SHARE_MASK 0x03
22108 #define HDR_SD_SHARE_SHIFT 8
22109 -#define HDR_JD_SHARE_MASK 0x07
22110 +#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
22111 #define HDR_JD_SHARE_SHIFT 8
22112 +#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
22113
22114 #define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
22115 #define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
22116 @@ -235,7 +230,7 @@ struct sec4_sg_entry {
22117 #define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
22118 #define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
22119 #define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
22120 -#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
22121 +#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
22122 #define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
22123 #define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
22124 #define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
22125 @@ -400,7 +395,7 @@ struct sec4_sg_entry {
22126 #define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
22127 #define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
22128 #define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
22129 -#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
22130 +#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
22131 #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
22132 #define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
22133 #define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
22134 @@ -1107,8 +1102,8 @@ struct sec4_sg_entry {
22135 /* For non-protocol/alg-only op commands */
22136 #define OP_ALG_TYPE_SHIFT 24
22137 #define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
22138 -#define OP_ALG_TYPE_CLASS1 2
22139 -#define OP_ALG_TYPE_CLASS2 4
22140 +#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
22141 +#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
22142
22143 #define OP_ALG_ALGSEL_SHIFT 16
22144 #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
22145 @@ -1249,7 +1244,7 @@ struct sec4_sg_entry {
22146 #define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
22147
22148 /* PKHA mode copy-memory functions */
22149 -#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
22150 +#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
22151 #define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
22152 #define OP_ALG_PKMODE_DST_REG_SHIFT 10
22153 #define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
22154 @@ -1445,7 +1440,7 @@ struct sec4_sg_entry {
22155 #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
22156 #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
22157 #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
22158 -#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
22159 +#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
22160 #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
22161 #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
22162 #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
22163 @@ -1629,4 +1624,31 @@ struct sec4_sg_entry {
22164 /* Frame Descriptor Command for Replacement Job Descriptor */
22165 #define FD_CMD_REPLACE_JOB_DESC 0x20000000
22166
22167 +/* CHA Control Register bits */
22168 +#define CCTRL_RESET_CHA_ALL 0x1
22169 +#define CCTRL_RESET_CHA_AESA 0x2
22170 +#define CCTRL_RESET_CHA_DESA 0x4
22171 +#define CCTRL_RESET_CHA_AFHA 0x8
22172 +#define CCTRL_RESET_CHA_KFHA 0x10
22173 +#define CCTRL_RESET_CHA_SF8A 0x20
22174 +#define CCTRL_RESET_CHA_PKHA 0x40
22175 +#define CCTRL_RESET_CHA_MDHA 0x80
22176 +#define CCTRL_RESET_CHA_CRCA 0x100
22177 +#define CCTRL_RESET_CHA_RNG 0x200
22178 +#define CCTRL_RESET_CHA_SF9A 0x400
22179 +#define CCTRL_RESET_CHA_ZUCE 0x800
22180 +#define CCTRL_RESET_CHA_ZUCA 0x1000
22181 +#define CCTRL_UNLOAD_PK_A0 0x10000
22182 +#define CCTRL_UNLOAD_PK_A1 0x20000
22183 +#define CCTRL_UNLOAD_PK_A2 0x40000
22184 +#define CCTRL_UNLOAD_PK_A3 0x80000
22185 +#define CCTRL_UNLOAD_PK_B0 0x100000
22186 +#define CCTRL_UNLOAD_PK_B1 0x200000
22187 +#define CCTRL_UNLOAD_PK_B2 0x400000
22188 +#define CCTRL_UNLOAD_PK_B3 0x800000
22189 +#define CCTRL_UNLOAD_PK_N 0x1000000
22190 +#define CCTRL_UNLOAD_PK_A 0x4000000
22191 +#define CCTRL_UNLOAD_PK_B 0x8000000
22192 +#define CCTRL_UNLOAD_SBOX 0x10000000
22193 +
22194 #endif /* DESC_H */
22195 --- a/drivers/crypto/caam/desc_constr.h
22196 +++ b/drivers/crypto/caam/desc_constr.h
22197 @@ -4,6 +4,9 @@
22198 * Copyright 2008-2012 Freescale Semiconductor, Inc.
22199 */
22200
22201 +#ifndef DESC_CONSTR_H
22202 +#define DESC_CONSTR_H
22203 +
22204 #include "desc.h"
22205 #include "regs.h"
22206
22207 @@ -33,38 +36,39 @@
22208
22209 extern bool caam_little_end;
22210
22211 -static inline int desc_len(u32 *desc)
22212 +static inline int desc_len(u32 * const desc)
22213 {
22214 return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
22215 }
22216
22217 -static inline int desc_bytes(void *desc)
22218 +static inline int desc_bytes(void * const desc)
22219 {
22220 return desc_len(desc) * CAAM_CMD_SZ;
22221 }
22222
22223 -static inline u32 *desc_end(u32 *desc)
22224 +static inline u32 *desc_end(u32 * const desc)
22225 {
22226 return desc + desc_len(desc);
22227 }
22228
22229 -static inline void *sh_desc_pdb(u32 *desc)
22230 +static inline void *sh_desc_pdb(u32 * const desc)
22231 {
22232 return desc + 1;
22233 }
22234
22235 -static inline void init_desc(u32 *desc, u32 options)
22236 +static inline void init_desc(u32 * const desc, u32 options)
22237 {
22238 *desc = cpu_to_caam32((options | HDR_ONE) + 1);
22239 }
22240
22241 -static inline void init_sh_desc(u32 *desc, u32 options)
22242 +static inline void init_sh_desc(u32 * const desc, u32 options)
22243 {
22244 PRINT_POS;
22245 init_desc(desc, CMD_SHARED_DESC_HDR | options);
22246 }
22247
22248 -static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
22249 +static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
22250 + size_t pdb_bytes)
22251 {
22252 u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
22253
22254 @@ -72,19 +76,20 @@ static inline void init_sh_desc_pdb(u32
22255 options);
22256 }
22257
22258 -static inline void init_job_desc(u32 *desc, u32 options)
22259 +static inline void init_job_desc(u32 * const desc, u32 options)
22260 {
22261 init_desc(desc, CMD_DESC_HDR | options);
22262 }
22263
22264 -static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
22265 +static inline void init_job_desc_pdb(u32 * const desc, u32 options,
22266 + size_t pdb_bytes)
22267 {
22268 u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
22269
22270 init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
22271 }
22272
22273 -static inline void append_ptr(u32 *desc, dma_addr_t ptr)
22274 +static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
22275 {
22276 dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
22277
22278 @@ -94,8 +99,8 @@ static inline void append_ptr(u32 *desc,
22279 CAAM_PTR_SZ / CAAM_CMD_SZ);
22280 }
22281
22282 -static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
22283 - u32 options)
22284 +static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
22285 + int len, u32 options)
22286 {
22287 PRINT_POS;
22288 init_job_desc(desc, HDR_SHARED | options |
22289 @@ -103,7 +108,7 @@ static inline void init_job_desc_shared(
22290 append_ptr(desc, ptr);
22291 }
22292
22293 -static inline void append_data(u32 *desc, void *data, int len)
22294 +static inline void append_data(u32 * const desc, void *data, int len)
22295 {
22296 u32 *offset = desc_end(desc);
22297
22298 @@ -114,7 +119,7 @@ static inline void append_data(u32 *desc
22299 (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
22300 }
22301
22302 -static inline void append_cmd(u32 *desc, u32 command)
22303 +static inline void append_cmd(u32 * const desc, u32 command)
22304 {
22305 u32 *cmd = desc_end(desc);
22306
22307 @@ -125,7 +130,7 @@ static inline void append_cmd(u32 *desc,
22308
22309 #define append_u32 append_cmd
22310
22311 -static inline void append_u64(u32 *desc, u64 data)
22312 +static inline void append_u64(u32 * const desc, u64 data)
22313 {
22314 u32 *offset = desc_end(desc);
22315
22316 @@ -142,14 +147,14 @@ static inline void append_u64(u32 *desc,
22317 }
22318
22319 /* Write command without affecting header, and return pointer to next word */
22320 -static inline u32 *write_cmd(u32 *desc, u32 command)
22321 +static inline u32 *write_cmd(u32 * const desc, u32 command)
22322 {
22323 *desc = cpu_to_caam32(command);
22324
22325 return desc + 1;
22326 }
22327
22328 -static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
22329 +static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
22330 u32 command)
22331 {
22332 append_cmd(desc, command | len);
22333 @@ -157,7 +162,7 @@ static inline void append_cmd_ptr(u32 *d
22334 }
22335
22336 /* Write length after pointer, rather than inside command */
22337 -static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
22338 +static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
22339 unsigned int len, u32 command)
22340 {
22341 append_cmd(desc, command);
22342 @@ -166,7 +171,7 @@ static inline void append_cmd_ptr_extlen
22343 append_cmd(desc, len);
22344 }
22345
22346 -static inline void append_cmd_data(u32 *desc, void *data, int len,
22347 +static inline void append_cmd_data(u32 * const desc, void *data, int len,
22348 u32 command)
22349 {
22350 append_cmd(desc, command | IMMEDIATE | len);
22351 @@ -174,7 +179,7 @@ static inline void append_cmd_data(u32 *
22352 }
22353
22354 #define APPEND_CMD_RET(cmd, op) \
22355 -static inline u32 *append_##cmd(u32 *desc, u32 options) \
22356 +static inline u32 *append_##cmd(u32 * const desc, u32 options) \
22357 { \
22358 u32 *cmd = desc_end(desc); \
22359 PRINT_POS; \
22360 @@ -183,14 +188,15 @@ static inline u32 *append_##cmd(u32 *des
22361 }
22362 APPEND_CMD_RET(jump, JUMP)
22363 APPEND_CMD_RET(move, MOVE)
22364 +APPEND_CMD_RET(moveb, MOVEB)
22365
22366 -static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
22367 +static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
22368 {
22369 *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
22370 (desc_len(desc) - (jump_cmd - desc)));
22371 }
22372
22373 -static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
22374 +static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
22375 {
22376 u32 val = caam32_to_cpu(*move_cmd);
22377
22378 @@ -200,7 +206,7 @@ static inline void set_move_tgt_here(u32
22379 }
22380
22381 #define APPEND_CMD(cmd, op) \
22382 -static inline void append_##cmd(u32 *desc, u32 options) \
22383 +static inline void append_##cmd(u32 * const desc, u32 options) \
22384 { \
22385 PRINT_POS; \
22386 append_cmd(desc, CMD_##op | options); \
22387 @@ -208,7 +214,8 @@ static inline void append_##cmd(u32 *des
22388 APPEND_CMD(operation, OPERATION)
22389
22390 #define APPEND_CMD_LEN(cmd, op) \
22391 -static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
22392 +static inline void append_##cmd(u32 * const desc, unsigned int len, \
22393 + u32 options) \
22394 { \
22395 PRINT_POS; \
22396 append_cmd(desc, CMD_##op | len | options); \
22397 @@ -220,8 +227,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_L
22398 APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
22399
22400 #define APPEND_CMD_PTR(cmd, op) \
22401 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
22402 - u32 options) \
22403 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
22404 + unsigned int len, u32 options) \
22405 { \
22406 PRINT_POS; \
22407 append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
22408 @@ -231,8 +238,8 @@ APPEND_CMD_PTR(load, LOAD)
22409 APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
22410 APPEND_CMD_PTR(fifo_store, FIFO_STORE)
22411
22412 -static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
22413 - u32 options)
22414 +static inline void append_store(u32 * const desc, dma_addr_t ptr,
22415 + unsigned int len, u32 options)
22416 {
22417 u32 cmd_src;
22418
22419 @@ -249,7 +256,8 @@ static inline void append_store(u32 *des
22420 }
22421
22422 #define APPEND_SEQ_PTR_INTLEN(cmd, op) \
22423 -static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
22424 +static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
22425 + dma_addr_t ptr, \
22426 unsigned int len, \
22427 u32 options) \
22428 { \
22429 @@ -263,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
22430 APPEND_SEQ_PTR_INTLEN(out, OUT)
22431
22432 #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
22433 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
22434 +static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
22435 unsigned int len, u32 options) \
22436 { \
22437 PRINT_POS; \
22438 @@ -273,7 +281,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
22439 APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
22440
22441 #define APPEND_CMD_PTR_EXTLEN(cmd, op) \
22442 -static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
22443 +static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
22444 unsigned int len, u32 options) \
22445 { \
22446 PRINT_POS; \
22447 @@ -287,7 +295,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_O
22448 * the size of its type
22449 */
22450 #define APPEND_CMD_PTR_LEN(cmd, op, type) \
22451 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
22452 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
22453 type len, u32 options) \
22454 { \
22455 PRINT_POS; \
22456 @@ -304,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
22457 * from length of immediate data provided, e.g., split keys
22458 */
22459 #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
22460 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
22461 +static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
22462 unsigned int data_len, \
22463 unsigned int len, u32 options) \
22464 { \
22465 @@ -315,7 +323,7 @@ static inline void append_##cmd##_as_imm
22466 APPEND_CMD_PTR_TO_IMM2(key, KEY);
22467
22468 #define APPEND_CMD_RAW_IMM(cmd, op, type) \
22469 -static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
22470 +static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
22471 u32 options) \
22472 { \
22473 PRINT_POS; \
22474 @@ -426,3 +434,66 @@ do { \
22475 APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
22476 #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
22477 APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
22478 +
22479 +/**
22480 + * struct alginfo - Container for algorithm details
22481 + * @algtype: algorithm selector; for valid values, see documentation of the
22482 + * functions where it is used.
22483 + * @keylen: length of the provided algorithm key, in bytes
22484 + * @keylen_pad: padded length of the provided algorithm key, in bytes
22485 + * @key: address where algorithm key resides; virtual address if key_inline
22486 + * is true, dma (bus) address if key_inline is false.
22487 + * @key_inline: true - key can be inlined in the descriptor; false - key is
22488 + * referenced by the descriptor
22489 + */
22490 +struct alginfo {
22491 + u32 algtype;
22492 + unsigned int keylen;
22493 + unsigned int keylen_pad;
22494 + union {
22495 + dma_addr_t key_dma;
22496 + void *key_virt;
22497 + };
22498 + bool key_inline;
22499 +};
22500 +
22501 +/**
22502 + * desc_inline_query() - Provide indications on which data items can be inlined
22503 + * and which shall be referenced in a shared descriptor.
22504 + * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
22505 + * excluding the data items to be inlined (or corresponding
22506 + * pointer if an item is not inlined). Each cnstr_* function that
22507 + * generates descriptors should have a define mentioning
22508 + * corresponding length.
22509 + * @jd_len: Maximum length of the job descriptor(s) that will be used
22510 + * together with the shared descriptor.
22511 + * @data_len: Array of lengths of the data items trying to be inlined
22512 + * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
22513 + * otherwise.
22514 + * @count: Number of data items (size of @data_len array); must be <= 32
22515 + *
22516 + * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
22517 + * check @inl_mask for details.
22518 + */
22519 +static inline int desc_inline_query(unsigned int sd_base_len,
22520 + unsigned int jd_len, unsigned int *data_len,
22521 + u32 *inl_mask, unsigned int count)
22522 +{
22523 + int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
22524 + unsigned int i;
22525 +
22526 + *inl_mask = 0;
22527 + for (i = 0; (i < count) && (rem_bytes > 0); i++) {
22528 + if (rem_bytes - (int)(data_len[i] +
22529 + (count - i - 1) * CAAM_PTR_SZ) >= 0) {
22530 + rem_bytes -= data_len[i];
22531 + *inl_mask |= (1 << i);
22532 + } else {
22533 + rem_bytes -= CAAM_PTR_SZ;
22534 + }
22535 + }
22536 +
22537 + return (rem_bytes >= 0) ? 0 : -1;
22538 +}
22539 +
22540 +#endif /* DESC_CONSTR_H */
22541 --- /dev/null
22542 +++ b/drivers/crypto/caam/dpseci.c
22543 @@ -0,0 +1,859 @@
22544 +/*
22545 + * Copyright 2013-2016 Freescale Semiconductor Inc.
22546 + * Copyright 2017 NXP
22547 + *
22548 + * Redistribution and use in source and binary forms, with or without
22549 + * modification, are permitted provided that the following conditions are met:
22550 + * * Redistributions of source code must retain the above copyright
22551 + * notice, this list of conditions and the following disclaimer.
22552 + * * Redistributions in binary form must reproduce the above copyright
22553 + * notice, this list of conditions and the following disclaimer in the
22554 + * documentation and/or other materials provided with the distribution.
22555 + * * Neither the names of the above-listed copyright holders nor the
22556 + * names of any contributors may be used to endorse or promote products
22557 + * derived from this software without specific prior written permission.
22558 + *
22559 + *
22560 + * ALTERNATIVELY, this software may be distributed under the terms of the
22561 + * GNU General Public License ("GPL") as published by the Free Software
22562 + * Foundation, either version 2 of that License or (at your option) any
22563 + * later version.
22564 + *
22565 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22566 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22567 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22568 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22569 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22570 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22571 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22572 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22573 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22574 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22575 + * POSSIBILITY OF SUCH DAMAGE.
22576 + */
22577 +
22578 +#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
22579 +#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
22580 +#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
22581 +#include "dpseci.h"
22582 +#include "dpseci_cmd.h"
22583 +
22584 +/**
22585 + * dpseci_open() - Open a control session for the specified object
22586 + * @mc_io: Pointer to MC portal's I/O object
22587 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22588 + * @dpseci_id: DPSECI unique ID
22589 + * @token: Returned token; use in subsequent API calls
22590 + *
22591 + * This function can be used to open a control session for an already created
22592 + * object; an object may have been declared in the DPL or by calling the
22593 + * dpseci_create() function.
22594 + * This function returns a unique authentication token, associated with the
22595 + * specific object ID and the specific MC portal; this token must be used in all
22596 + * subsequent commands for this specific object.
22597 + *
22598 + * Return: '0' on success, error code otherwise
22599 + */
22600 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
22601 + u16 *token)
22602 +{
22603 + struct mc_command cmd = { 0 };
22604 + struct dpseci_cmd_open *cmd_params;
22605 + int err;
22606 +
22607 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
22608 + cmd_flags,
22609 + 0);
22610 + cmd_params = (struct dpseci_cmd_open *)cmd.params;
22611 + cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
22612 + err = mc_send_command(mc_io, &cmd);
22613 + if (err)
22614 + return err;
22615 +
22616 + *token = mc_cmd_hdr_read_token(&cmd);
22617 +
22618 + return 0;
22619 +}
22620 +
22621 +/**
22622 + * dpseci_close() - Close the control session of the object
22623 + * @mc_io: Pointer to MC portal's I/O object
22624 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22625 + * @token: Token of DPSECI object
22626 + *
22627 + * After this function is called, no further operations are allowed on the
22628 + * object without opening a new control session.
22629 + *
22630 + * Return: '0' on success, error code otherwise
22631 + */
22632 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22633 +{
22634 + struct mc_command cmd = { 0 };
22635 +
22636 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
22637 + cmd_flags,
22638 + token);
22639 + return mc_send_command(mc_io, &cmd);
22640 +}
22641 +
22642 +/**
22643 + * dpseci_create() - Create the DPSECI object
22644 + * @mc_io: Pointer to MC portal's I/O object
22645 + * @dprc_token: Parent container token; '0' for default container
22646 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22647 + * @cfg: Configuration structure
22648 + * @obj_id: returned object id
22649 + *
22650 + * Create the DPSECI object, allocate required resources and perform required
22651 + * initialization.
22652 + *
22653 + * The object can be created either by declaring it in the DPL file, or by
22654 + * calling this function.
22655 + *
22656 + * The function accepts an authentication token of a parent container that this
22657 + * object should be assigned to. The token can be '0' so the object will be
22658 + * assigned to the default container.
22659 + * The newly created object can be opened with the returned object id and using
22660 + * the container's associated tokens and MC portals.
22661 + *
22662 + * Return: '0' on success, error code otherwise
22663 + */
22664 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
22665 + const struct dpseci_cfg *cfg, u32 *obj_id)
22666 +{
22667 + struct mc_command cmd = { 0 };
22668 + struct dpseci_cmd_create *cmd_params;
22669 + int i, err;
22670 +
22671 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
22672 + cmd_flags,
22673 + dprc_token);
22674 + cmd_params = (struct dpseci_cmd_create *)cmd.params;
22675 + for (i = 0; i < 8; i++)
22676 + cmd_params->priorities[i] = cfg->priorities[i];
22677 + cmd_params->num_tx_queues = cfg->num_tx_queues;
22678 + cmd_params->num_rx_queues = cfg->num_rx_queues;
22679 + cmd_params->options = cpu_to_le32(cfg->options);
22680 + err = mc_send_command(mc_io, &cmd);
22681 + if (err)
22682 + return err;
22683 +
22684 + *obj_id = mc_cmd_read_object_id(&cmd);
22685 +
22686 + return 0;
22687 +}
22688 +
22689 +/**
22690 + * dpseci_destroy() - Destroy the DPSECI object and release all its resources
22691 + * @mc_io: Pointer to MC portal's I/O object
22692 + * @dprc_token: Parent container token; '0' for default container
22693 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22694 + * @object_id: The object id; it must be a valid id within the container that
22695 + * created this object
22696 + *
22697 + * The function accepts the authentication token of the parent container that
22698 + * created the object (not the one that currently owns the object). The object
22699 + * is searched within parent using the provided 'object_id'.
22700 + * All tokens to the object must be closed before calling destroy.
22701 + *
22702 + * Return: '0' on success, error code otherwise
22703 + */
22704 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
22705 + u32 object_id)
22706 +{
22707 + struct mc_command cmd = { 0 };
22708 + struct dpseci_cmd_destroy *cmd_params;
22709 +
22710 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
22711 + cmd_flags,
22712 + dprc_token);
22713 + cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
22714 + cmd_params->object_id = cpu_to_le32(object_id);
22715 +
22716 + return mc_send_command(mc_io, &cmd);
22717 +}
22718 +
22719 +/**
22720 + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
22721 + * @mc_io: Pointer to MC portal's I/O object
22722 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22723 + * @token: Token of DPSECI object
22724 + *
22725 + * Return: '0' on success, error code otherwise
22726 + */
22727 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22728 +{
22729 + struct mc_command cmd = { 0 };
22730 +
22731 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
22732 + cmd_flags,
22733 + token);
22734 + return mc_send_command(mc_io, &cmd);
22735 +}
22736 +
22737 +/**
22738 + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
22739 + * @mc_io: Pointer to MC portal's I/O object
22740 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22741 + * @token: Token of DPSECI object
22742 + *
22743 + * Return: '0' on success, error code otherwise
22744 + */
22745 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22746 +{
22747 + struct mc_command cmd = { 0 };
22748 +
22749 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
22750 + cmd_flags,
22751 + token);
22752 +
22753 + return mc_send_command(mc_io, &cmd);
22754 +}
22755 +
22756 +/**
22757 + * dpseci_is_enabled() - Check if the DPSECI is enabled.
22758 + * @mc_io: Pointer to MC portal's I/O object
22759 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22760 + * @token: Token of DPSECI object
22761 + * @en: Returns '1' if object is enabled; '0' otherwise
22762 + *
22763 + * Return: '0' on success, error code otherwise
22764 + */
22765 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22766 + int *en)
22767 +{
22768 + struct mc_command cmd = { 0 };
22769 + struct dpseci_rsp_is_enabled *rsp_params;
22770 + int err;
22771 +
22772 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
22773 + cmd_flags,
22774 + token);
22775 + err = mc_send_command(mc_io, &cmd);
22776 + if (err)
22777 + return err;
22778 +
22779 + rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
22780 + *en = le32_to_cpu(rsp_params->is_enabled);
22781 +
22782 + return 0;
22783 +}
22784 +
22785 +/**
22786 + * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
22787 + * @mc_io: Pointer to MC portal's I/O object
22788 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22789 + * @token: Token of DPSECI object
22790 + *
22791 + * Return: '0' on success, error code otherwise
22792 + */
22793 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22794 +{
22795 + struct mc_command cmd = { 0 };
22796 +
22797 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
22798 + cmd_flags,
22799 + token);
22800 +
22801 + return mc_send_command(mc_io, &cmd);
22802 +}
22803 +
22804 +/**
22805 + * dpseci_get_irq_enable() - Get overall interrupt state
22806 + * @mc_io: Pointer to MC portal's I/O object
22807 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22808 + * @token: Token of DPSECI object
22809 + * @irq_index: The interrupt index to configure
22810 + * @en: Returned Interrupt state - enable = 1, disable = 0
22811 + *
22812 + * Return: '0' on success, error code otherwise
22813 + */
22814 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22815 + u8 irq_index, u8 *en)
22816 +{
22817 + struct mc_command cmd = { 0 };
22818 + struct dpseci_cmd_irq_enable *cmd_params;
22819 + struct dpseci_rsp_get_irq_enable *rsp_params;
22820 + int err;
22821 +
22822 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
22823 + cmd_flags,
22824 + token);
22825 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
22826 + cmd_params->irq_index = irq_index;
22827 + err = mc_send_command(mc_io, &cmd);
22828 + if (err)
22829 + return err;
22830 +
22831 + rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
22832 + *en = rsp_params->enable_state;
22833 +
22834 + return 0;
22835 +}
22836 +
22837 +/**
22838 + * dpseci_set_irq_enable() - Set overall interrupt state.
22839 + * @mc_io: Pointer to MC portal's I/O object
22840 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22841 + * @token: Token of DPSECI object
22842 + * @irq_index: The interrupt index to configure
22843 + * @en: Interrupt state - enable = 1, disable = 0
22844 + *
22845 + * Allows GPP software to control when interrupts are generated.
22846 + * Each interrupt can have up to 32 causes. The enable/disable control's the
22847 + * overall interrupt state. If the interrupt is disabled no causes will cause
22848 + * an interrupt.
22849 + *
22850 + * Return: '0' on success, error code otherwise
22851 + */
22852 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22853 + u8 irq_index, u8 en)
22854 +{
22855 + struct mc_command cmd = { 0 };
22856 + struct dpseci_cmd_irq_enable *cmd_params;
22857 +
22858 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
22859 + cmd_flags,
22860 + token);
22861 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
22862 + cmd_params->irq_index = irq_index;
22863 + cmd_params->enable_state = en;
22864 +
22865 + return mc_send_command(mc_io, &cmd);
22866 +}
22867 +
22868 +/**
22869 + * dpseci_get_irq_mask() - Get interrupt mask.
22870 + * @mc_io: Pointer to MC portal's I/O object
22871 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22872 + * @token: Token of DPSECI object
22873 + * @irq_index: The interrupt index to configure
22874 + * @mask: Returned event mask to trigger interrupt
22875 + *
22876 + * Every interrupt can have up to 32 causes and the interrupt model supports
22877 + * masking/unmasking each cause independently.
22878 + *
22879 + * Return: '0' on success, error code otherwise
22880 + */
22881 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22882 + u8 irq_index, u32 *mask)
22883 +{
22884 + struct mc_command cmd = { 0 };
22885 + struct dpseci_cmd_irq_mask *cmd_params;
22886 + int err;
22887 +
22888 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
22889 + cmd_flags,
22890 + token);
22891 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
22892 + cmd_params->irq_index = irq_index;
22893 + err = mc_send_command(mc_io, &cmd);
22894 + if (err)
22895 + return err;
22896 +
22897 + *mask = le32_to_cpu(cmd_params->mask);
22898 +
22899 + return 0;
22900 +}
22901 +
22902 +/**
22903 + * dpseci_set_irq_mask() - Set interrupt mask.
22904 + * @mc_io: Pointer to MC portal's I/O object
22905 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22906 + * @token: Token of DPSECI object
22907 + * @irq_index: The interrupt index to configure
22908 + * @mask: event mask to trigger interrupt;
22909 + * each bit:
22910 + * 0 = ignore event
22911 + * 1 = consider event for asserting IRQ
22912 + *
22913 + * Every interrupt can have up to 32 causes and the interrupt model supports
22914 + * masking/unmasking each cause independently
22915 + *
22916 + * Return: '0' on success, error code otherwise
22917 + */
22918 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22919 + u8 irq_index, u32 mask)
22920 +{
22921 + struct mc_command cmd = { 0 };
22922 + struct dpseci_cmd_irq_mask *cmd_params;
22923 +
22924 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
22925 + cmd_flags,
22926 + token);
22927 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
22928 + cmd_params->mask = cpu_to_le32(mask);
22929 + cmd_params->irq_index = irq_index;
22930 +
22931 + return mc_send_command(mc_io, &cmd);
22932 +}
22933 +
22934 +/**
22935 + * dpseci_get_irq_status() - Get the current status of any pending interrupts
22936 + * @mc_io: Pointer to MC portal's I/O object
22937 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22938 + * @token: Token of DPSECI object
22939 + * @irq_index: The interrupt index to configure
22940 + * @status: Returned interrupts status - one bit per cause:
22941 + * 0 = no interrupt pending
22942 + * 1 = interrupt pending
22943 + *
22944 + * Return: '0' on success, error code otherwise
22945 + */
22946 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22947 + u8 irq_index, u32 *status)
22948 +{
22949 + struct mc_command cmd = { 0 };
22950 + struct dpseci_cmd_irq_status *cmd_params;
22951 + int err;
22952 +
22953 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
22954 + cmd_flags,
22955 + token);
22956 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
22957 + cmd_params->status = cpu_to_le32(*status);
22958 + cmd_params->irq_index = irq_index;
22959 + err = mc_send_command(mc_io, &cmd);
22960 + if (err)
22961 + return err;
22962 +
22963 + *status = le32_to_cpu(cmd_params->status);
22964 +
22965 + return 0;
22966 +}
22967 +
22968 +/**
22969 + * dpseci_clear_irq_status() - Clear a pending interrupt's status
22970 + * @mc_io: Pointer to MC portal's I/O object
22971 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22972 + * @token: Token of DPSECI object
22973 + * @irq_index: The interrupt index to configure
22974 + * @status: bits to clear (W1C) - one bit per cause:
22975 + * 0 = don't change
22976 + * 1 = clear status bit
22977 + *
22978 + * Return: '0' on success, error code otherwise
22979 + */
22980 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22981 + u8 irq_index, u32 status)
22982 +{
22983 + struct mc_command cmd = { 0 };
22984 + struct dpseci_cmd_irq_status *cmd_params;
22985 +
22986 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
22987 + cmd_flags,
22988 + token);
22989 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
22990 + cmd_params->status = cpu_to_le32(status);
22991 + cmd_params->irq_index = irq_index;
22992 +
22993 + return mc_send_command(mc_io, &cmd);
22994 +}
22995 +
22996 +/**
22997 + * dpseci_get_attributes() - Retrieve DPSECI attributes
22998 + * @mc_io: Pointer to MC portal's I/O object
22999 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23000 + * @token: Token of DPSECI object
23001 + * @attr: Returned object's attributes
23002 + *
23003 + * Return: '0' on success, error code otherwise
23004 + */
23005 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23006 + struct dpseci_attr *attr)
23007 +{
23008 + struct mc_command cmd = { 0 };
23009 + struct dpseci_rsp_get_attributes *rsp_params;
23010 + int err;
23011 +
23012 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
23013 + cmd_flags,
23014 + token);
23015 + err = mc_send_command(mc_io, &cmd);
23016 + if (err)
23017 + return err;
23018 +
23019 + rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
23020 + attr->id = le32_to_cpu(rsp_params->id);
23021 + attr->num_tx_queues = rsp_params->num_tx_queues;
23022 + attr->num_rx_queues = rsp_params->num_rx_queues;
23023 + attr->options = le32_to_cpu(rsp_params->options);
23024 +
23025 + return 0;
23026 +}
23027 +
23028 +/**
23029 + * dpseci_set_rx_queue() - Set Rx queue configuration
23030 + * @mc_io: Pointer to MC portal's I/O object
23031 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23032 + * @token: Token of DPSECI object
23033 + * @queue: Select the queue relative to number of priorities configured at
23034 + * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
23035 + * Rx queues identically.
23036 + * @cfg: Rx queue configuration
23037 + *
23038 + * Return: '0' on success, error code otherwise
23039 + */
23040 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23041 + u8 queue, const struct dpseci_rx_queue_cfg *cfg)
23042 +{
23043 + struct mc_command cmd = { 0 };
23044 + struct dpseci_cmd_queue *cmd_params;
23045 +
23046 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
23047 + cmd_flags,
23048 + token);
23049 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
23050 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
23051 + cmd_params->priority = cfg->dest_cfg.priority;
23052 + cmd_params->queue = queue;
23053 + cmd_params->dest_type = cfg->dest_cfg.dest_type;
23054 + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
23055 + cmd_params->options = cpu_to_le32(cfg->options);
23056 + cmd_params->order_preservation_en =
23057 + cpu_to_le32(cfg->order_preservation_en);
23058 +
23059 + return mc_send_command(mc_io, &cmd);
23060 +}
23061 +
23062 +/**
23063 + * dpseci_get_rx_queue() - Retrieve Rx queue attributes
23064 + * @mc_io: Pointer to MC portal's I/O object
23065 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23066 + * @token: Token of DPSECI object
23067 + * @queue: Select the queue relative to number of priorities configured at
23068 + * DPSECI creation
23069 + * @attr: Returned Rx queue attributes
23070 + *
23071 + * Return: '0' on success, error code otherwise
23072 + */
23073 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23074 + u8 queue, struct dpseci_rx_queue_attr *attr)
23075 +{
23076 + struct mc_command cmd = { 0 };
23077 + struct dpseci_cmd_queue *cmd_params;
23078 + int err;
23079 +
23080 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
23081 + cmd_flags,
23082 + token);
23083 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
23084 + cmd_params->queue = queue;
23085 + err = mc_send_command(mc_io, &cmd);
23086 + if (err)
23087 + return err;
23088 +
23089 + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
23090 + attr->dest_cfg.priority = cmd_params->priority;
23091 + attr->dest_cfg.dest_type = cmd_params->dest_type;
23092 + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
23093 + attr->fqid = le32_to_cpu(cmd_params->fqid);
23094 + attr->order_preservation_en =
23095 + le32_to_cpu(cmd_params->order_preservation_en);
23096 +
23097 + return 0;
23098 +}
23099 +
23100 +/**
23101 + * dpseci_get_tx_queue() - Retrieve Tx queue attributes
23102 + * @mc_io: Pointer to MC portal's I/O object
23103 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23104 + * @token: Token of DPSECI object
23105 + * @queue: Select the queue relative to number of priorities configured at
23106 + * DPSECI creation
23107 + * @attr: Returned Tx queue attributes
23108 + *
23109 + * Return: '0' on success, error code otherwise
23110 + */
23111 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23112 + u8 queue, struct dpseci_tx_queue_attr *attr)
23113 +{
23114 + struct mc_command cmd = { 0 };
23115 + struct dpseci_cmd_queue *cmd_params;
23116 + struct dpseci_rsp_get_tx_queue *rsp_params;
23117 + int err;
23118 +
23119 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
23120 + cmd_flags,
23121 + token);
23122 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
23123 + cmd_params->queue = queue;
23124 + err = mc_send_command(mc_io, &cmd);
23125 + if (err)
23126 + return err;
23127 +
23128 + rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
23129 + attr->fqid = le32_to_cpu(rsp_params->fqid);
23130 + attr->priority = rsp_params->priority;
23131 +
23132 + return 0;
23133 +}
23134 +
23135 +/**
23136 + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
23137 + * @mc_io: Pointer to MC portal's I/O object
23138 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23139 + * @token: Token of DPSECI object
23140 + * @attr: Returned SEC attributes
23141 + *
23142 + * Return: '0' on success, error code otherwise
23143 + */
23144 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23145 + struct dpseci_sec_attr *attr)
23146 +{
23147 + struct mc_command cmd = { 0 };
23148 + struct dpseci_rsp_get_sec_attr *rsp_params;
23149 + int err;
23150 +
23151 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
23152 + cmd_flags,
23153 + token);
23154 + err = mc_send_command(mc_io, &cmd);
23155 + if (err)
23156 + return err;
23157 +
23158 + rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
23159 + attr->ip_id = le16_to_cpu(rsp_params->ip_id);
23160 + attr->major_rev = rsp_params->major_rev;
23161 + attr->minor_rev = rsp_params->minor_rev;
23162 + attr->era = rsp_params->era;
23163 + attr->deco_num = rsp_params->deco_num;
23164 + attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
23165 + attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
23166 + attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
23167 + attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
23168 + attr->crc_acc_num = rsp_params->crc_acc_num;
23169 + attr->pk_acc_num = rsp_params->pk_acc_num;
23170 + attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
23171 + attr->rng_acc_num = rsp_params->rng_acc_num;
23172 + attr->md_acc_num = rsp_params->md_acc_num;
23173 + attr->arc4_acc_num = rsp_params->arc4_acc_num;
23174 + attr->des_acc_num = rsp_params->des_acc_num;
23175 + attr->aes_acc_num = rsp_params->aes_acc_num;
23176 +
23177 + return 0;
23178 +}
23179 +
23180 +/**
23181 + * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
23182 + * @mc_io: Pointer to MC portal's I/O object
23183 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23184 + * @token: Token of DPSECI object
23185 + * @counters: Returned SEC counters
23186 + *
23187 + * Return: '0' on success, error code otherwise
23188 + */
23189 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23190 + struct dpseci_sec_counters *counters)
23191 +{
23192 + struct mc_command cmd = { 0 };
23193 + struct dpseci_rsp_get_sec_counters *rsp_params;
23194 + int err;
23195 +
23196 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
23197 + cmd_flags,
23198 + token);
23199 + err = mc_send_command(mc_io, &cmd);
23200 + if (err)
23201 + return err;
23202 +
23203 + rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
23204 + counters->dequeued_requests =
23205 + le64_to_cpu(rsp_params->dequeued_requests);
23206 + counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
23207 + counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
23208 + counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
23209 + counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
23210 + counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
23211 + counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
23212 +
23213 + return 0;
23214 +}
23215 +
23216 +/**
23217 + * dpseci_get_api_version() - Get Data Path SEC Interface API version
23218 + * @mc_io: Pointer to MC portal's I/O object
23219 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23220 + * @major_ver: Major version of data path sec API
23221 + * @minor_ver: Minor version of data path sec API
23222 + *
23223 + * Return: '0' on success, error code otherwise
23224 + */
23225 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
23226 + u16 *major_ver, u16 *minor_ver)
23227 +{
23228 + struct mc_command cmd = { 0 };
23229 + struct dpseci_rsp_get_api_version *rsp_params;
23230 + int err;
23231 +
23232 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
23233 + cmd_flags, 0);
23234 + err = mc_send_command(mc_io, &cmd);
23235 + if (err)
23236 + return err;
23237 +
23238 + rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
23239 + *major_ver = le16_to_cpu(rsp_params->major);
23240 + *minor_ver = le16_to_cpu(rsp_params->minor);
23241 +
23242 + return 0;
23243 +}
23244 +
23245 +/**
23246 + * dpseci_set_opr() - Set Order Restoration configuration
23247 + * @mc_io: Pointer to MC portal's I/O object
23248 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23249 + * @token: Token of DPSECI object
23250 + * @index: The queue index
23251 + * @options: Configuration mode options; can be OPR_OPT_CREATE or
23252 + * OPR_OPT_RETIRE
23253 + * @cfg: Configuration options for the OPR
23254 + *
23255 + * Return: '0' on success, error code otherwise
23256 + */
23257 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23258 + u8 options, struct opr_cfg *cfg)
23259 +{
23260 + struct mc_command cmd = { 0 };
23261 + struct dpseci_cmd_opr *cmd_params;
23262 +
23263 + cmd.header = mc_encode_cmd_header(
23264 + DPSECI_CMDID_SET_OPR,
23265 + cmd_flags,
23266 + token);
23267 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
23268 + cmd_params->index = index;
23269 + cmd_params->options = options;
23270 + cmd_params->oloe = cfg->oloe;
23271 + cmd_params->oeane = cfg->oeane;
23272 + cmd_params->olws = cfg->olws;
23273 + cmd_params->oa = cfg->oa;
23274 + cmd_params->oprrws = cfg->oprrws;
23275 +
23276 + return mc_send_command(mc_io, &cmd);
23277 +}
23278 +
23279 +/**
23280 + * dpseci_get_opr() - Retrieve Order Restoration config and query
23281 + * @mc_io: Pointer to MC portal's I/O object
23282 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23283 + * @token: Token of DPSECI object
23284 + * @index: The queue index
23285 + * @cfg: Returned OPR configuration
23286 + * @qry: Returned OPR query
23287 + *
23288 + * Return: '0' on success, error code otherwise
23289 + */
23290 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23291 + struct opr_cfg *cfg, struct opr_qry *qry)
23292 +{
23293 + struct mc_command cmd = { 0 };
23294 + struct dpseci_cmd_opr *cmd_params;
23295 + struct dpseci_rsp_get_opr *rsp_params;
23296 + int err;
23297 +
23298 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
23299 + cmd_flags,
23300 + token);
23301 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
23302 + cmd_params->index = index;
23303 + err = mc_send_command(mc_io, &cmd);
23304 + if (err)
23305 + return err;
23306 +
23307 + rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
23308 + qry->rip = dpseci_get_field(rsp_params->rip_enable, OPR_RIP);
23309 + qry->enable = dpseci_get_field(rsp_params->rip_enable, OPR_ENABLE);
23310 + cfg->oloe = rsp_params->oloe;
23311 + cfg->oeane = rsp_params->oeane;
23312 + cfg->olws = rsp_params->olws;
23313 + cfg->oa = rsp_params->oa;
23314 + cfg->oprrws = rsp_params->oprrws;
23315 + qry->nesn = le16_to_cpu(rsp_params->nesn);
23316 + qry->ndsn = le16_to_cpu(rsp_params->ndsn);
23317 + qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
23318 + qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
23319 + qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
23320 + qry->hseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_HSEQ_NLIS);
23321 + qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
23322 + qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
23323 + qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
23324 + qry->opr_id = le16_to_cpu(rsp_params->opr_id);
23325 +
23326 + return 0;
23327 +}
23328 +
23329 +/**
23330 + * dpseci_set_congestion_notification() - Set congestion group
23331 + * notification configuration
23332 + * @mc_io: Pointer to MC portal's I/O object
23333 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23334 + * @token: Token of DPSECI object
23335 + * @cfg: congestion notification configuration
23336 + *
23337 + * Return: '0' on success, error code otherwise
23338 + */
23339 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23340 + u16 token, const struct dpseci_congestion_notification_cfg *cfg)
23341 +{
23342 + struct mc_command cmd = { 0 };
23343 + struct dpseci_cmd_congestion_notification *cmd_params;
23344 +
23345 + cmd.header = mc_encode_cmd_header(
23346 + DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
23347 + cmd_flags,
23348 + token);
23349 + cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
23350 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
23351 + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
23352 + cmd_params->priority = cfg->dest_cfg.priority;
23353 + dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
23354 + cfg->dest_cfg.dest_type);
23355 + dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
23356 + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
23357 + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
23358 + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
23359 + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
23360 +
23361 + return mc_send_command(mc_io, &cmd);
23362 +}
23363 +
23364 +/**
23365 + * dpseci_get_congestion_notification() - Get congestion group notification
23366 + * configuration
23367 + * @mc_io: Pointer to MC portal's I/O object
23368 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23369 + * @token: Token of DPSECI object
23370 + * @cfg: congestion notification configuration
23371 + *
23372 + * Return: '0' on success, error code otherwise
23373 + */
23374 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23375 + u16 token, struct dpseci_congestion_notification_cfg *cfg)
23376 +{
23377 + struct mc_command cmd = { 0 };
23378 + struct dpseci_cmd_congestion_notification *rsp_params;
23379 + int err;
23380 +
23381 + cmd.header = mc_encode_cmd_header(
23382 + DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
23383 + cmd_flags,
23384 + token);
23385 + err = mc_send_command(mc_io, &cmd);
23386 + if (err)
23387 + return err;
23388 +
23389 + rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
23390 + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
23391 + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
23392 + cfg->dest_cfg.priority = rsp_params->priority;
23393 + cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
23394 + CGN_DEST_TYPE);
23395 + cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
23396 + cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
23397 + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
23398 + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
23399 + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
23400 +
23401 + return 0;
23402 +}
23403 --- /dev/null
23404 +++ b/drivers/crypto/caam/dpseci.h
23405 @@ -0,0 +1,395 @@
23406 +/*
23407 + * Copyright 2013-2016 Freescale Semiconductor Inc.
23408 + * Copyright 2017 NXP
23409 + *
23410 + * Redistribution and use in source and binary forms, with or without
23411 + * modification, are permitted provided that the following conditions are met:
23412 + * * Redistributions of source code must retain the above copyright
23413 + * notice, this list of conditions and the following disclaimer.
23414 + * * Redistributions in binary form must reproduce the above copyright
23415 + * notice, this list of conditions and the following disclaimer in the
23416 + * documentation and/or other materials provided with the distribution.
23417 + * * Neither the names of the above-listed copyright holders nor the
23418 + * names of any contributors may be used to endorse or promote products
23419 + * derived from this software without specific prior written permission.
23420 + *
23421 + *
23422 + * ALTERNATIVELY, this software may be distributed under the terms of the
23423 + * GNU General Public License ("GPL") as published by the Free Software
23424 + * Foundation, either version 2 of that License or (at your option) any
23425 + * later version.
23426 + *
23427 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23428 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23429 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23430 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
23431 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23432 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23433 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23434 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23435 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23436 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23437 + * POSSIBILITY OF SUCH DAMAGE.
23438 + */
23439 +#ifndef _DPSECI_H_
23440 +#define _DPSECI_H_
23441 +
23442 +/*
23443 + * Data Path SEC Interface API
23444 + * Contains initialization APIs and runtime control APIs for DPSECI
23445 + */
23446 +
23447 +struct fsl_mc_io;
23448 +struct opr_cfg;
23449 +struct opr_qry;
23450 +
23451 +/**
23452 + * General DPSECI macros
23453 + */
23454 +
23455 +/**
23456 + * Maximum number of Tx/Rx priorities per DPSECI object
23457 + */
23458 +#define DPSECI_PRIO_NUM 8
23459 +
23460 +/**
23461 + * All queues considered; see dpseci_set_rx_queue()
23462 + */
23463 +#define DPSECI_ALL_QUEUES (u8)(-1)
23464 +
23465 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
23466 + u16 *token);
23467 +
23468 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23469 +
23470 +/**
23471 + * Enable the Congestion Group support
23472 + */
23473 +#define DPSECI_OPT_HAS_CG 0x000020
23474 +
23475 +/**
23476 + * Enable the Order Restoration support
23477 + */
23478 +#define DPSECI_OPT_HAS_OPR 0x000040
23479 +
23480 +/**
23481 + * Order Point Records are shared for the entire DPSECI
23482 + */
23483 +#define DPSECI_OPT_OPR_SHARED 0x000080
23484 +
23485 +/**
23486 + * struct dpseci_cfg - Structure representing DPSECI configuration
23487 + * @options: Any combination of the following options:
23488 + * DPSECI_OPT_HAS_CG
23489 + * DPSECI_OPT_HAS_OPR
23490 + * DPSECI_OPT_OPR_SHARED
23491 + * @num_tx_queues: num of queues towards the SEC
23492 + * @num_rx_queues: num of queues back from the SEC
23493 + * @priorities: Priorities for the SEC hardware processing;
23494 + * each place in the array is the priority of the tx queue
23495 + * towards the SEC;
23496 + * valid priorities are configured with values 1-8;
23497 + */
23498 +struct dpseci_cfg {
23499 + u32 options;
23500 + u8 num_tx_queues;
23501 + u8 num_rx_queues;
23502 + u8 priorities[DPSECI_PRIO_NUM];
23503 +};
23504 +
23505 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
23506 + const struct dpseci_cfg *cfg, u32 *obj_id);
23507 +
23508 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
23509 + u32 object_id);
23510 +
23511 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23512 +
23513 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23514 +
23515 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23516 + int *en);
23517 +
23518 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23519 +
23520 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23521 + u8 irq_index, u8 *en);
23522 +
23523 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23524 + u8 irq_index, u8 en);
23525 +
23526 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23527 + u8 irq_index, u32 *mask);
23528 +
23529 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23530 + u8 irq_index, u32 mask);
23531 +
23532 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23533 + u8 irq_index, u32 *status);
23534 +
23535 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23536 + u8 irq_index, u32 status);
23537 +
23538 +/**
23539 + * struct dpseci_attr - Structure representing DPSECI attributes
23540 + * @id: DPSECI object ID
23541 + * @num_tx_queues: number of queues towards the SEC
23542 + * @num_rx_queues: number of queues back from the SEC
23543 + * @options: any combination of the following options:
23544 + * DPSECI_OPT_HAS_CG
23545 + * DPSECI_OPT_HAS_OPR
23546 + * DPSECI_OPT_OPR_SHARED
23547 + */
23548 +struct dpseci_attr {
23549 + int id;
23550 + u8 num_tx_queues;
23551 + u8 num_rx_queues;
23552 + u32 options;
23553 +};
23554 +
23555 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23556 + struct dpseci_attr *attr);
23557 +
23558 +/**
23559 + * enum dpseci_dest - DPSECI destination types
23560 + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
23561 + * and does not generate FQDAN notifications; user is expected to dequeue
23562 + * from the queue based on polling or other user-defined method
23563 + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
23564 + * notifications to the specified DPIO; user is expected to dequeue from
23565 + * the queue only after notification is received
23566 + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
23567 + * FQDAN notifications, but is connected to the specified DPCON object;
23568 + * user is expected to dequeue from the DPCON channel
23569 + */
23570 +enum dpseci_dest {
23571 + DPSECI_DEST_NONE = 0,
23572 + DPSECI_DEST_DPIO,
23573 + DPSECI_DEST_DPCON
23574 +};
23575 +
23576 +/**
23577 + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
23578 + * @dest_type: Destination type
23579 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
23580 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
23581 + * are 0-1 or 0-7, depending on the number of priorities in that channel;
23582 + * not relevant for 'DPSECI_DEST_NONE' option
23583 + */
23584 +struct dpseci_dest_cfg {
23585 + enum dpseci_dest dest_type;
23586 + int dest_id;
23587 + u8 priority;
23588 +};
23589 +
23590 +/**
23591 + * DPSECI queue modification options
23592 + */
23593 +
23594 +/**
23595 + * Select to modify the user's context associated with the queue
23596 + */
23597 +#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
23598 +
23599 +/**
23600 + * Select to modify the queue's destination
23601 + */
23602 +#define DPSECI_QUEUE_OPT_DEST 0x00000002
23603 +
23604 +/**
23605 + * Select to modify the queue's order preservation
23606 + */
23607 +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
23608 +
23609 +/**
23610 + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
23611 + * @options: Flags representing the suggested modifications to the queue;
23612 + * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
23613 + * @order_preservation_en: order preservation configuration for the rx queue
23614 + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
23615 + * @user_ctx: User context value provided in the frame descriptor of each
23616 + * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
23617 + * in 'options'
23618 + * @dest_cfg: Queue destination parameters; valid only if
23619 + * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
23620 + */
23621 +struct dpseci_rx_queue_cfg {
23622 + u32 options;
23623 + int order_preservation_en;
23624 + u64 user_ctx;
23625 + struct dpseci_dest_cfg dest_cfg;
23626 +};
23627 +
23628 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23629 + u8 queue, const struct dpseci_rx_queue_cfg *cfg);
23630 +
23631 +/**
23632 + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
23633 + * @user_ctx: User context value provided in the frame descriptor of each
23634 + * dequeued frame
23635 + * @order_preservation_en: Status of the order preservation configuration on the
23636 + * queue
23637 + * @dest_cfg: Queue destination configuration
23638 + * @fqid: Virtual FQID value to be used for dequeue operations
23639 + */
23640 +struct dpseci_rx_queue_attr {
23641 + u64 user_ctx;
23642 + int order_preservation_en;
23643 + struct dpseci_dest_cfg dest_cfg;
23644 + u32 fqid;
23645 +};
23646 +
23647 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23648 + u8 queue, struct dpseci_rx_queue_attr *attr);
23649 +
23650 +/**
23651 + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
23652 + * @fqid: Virtual FQID to be used for sending frames to SEC hardware
23653 + * @priority: SEC hardware processing priority for the queue
23654 + */
23655 +struct dpseci_tx_queue_attr {
23656 + u32 fqid;
23657 + u8 priority;
23658 +};
23659 +
23660 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23661 + u8 queue, struct dpseci_tx_queue_attr *attr);
23662 +
23663 +/**
23664 + * struct dpseci_sec_attr - Structure representing attributes of the SEC
23665 + * hardware accelerator
23666 + * @ip_id: ID for SEC
23667 + * @major_rev: Major revision number for SEC
23668 + * @minor_rev: Minor revision number for SEC
23669 + * @era: SEC Era
23670 + * @deco_num: The number of copies of the DECO that are implemented in this
23671 + * version of SEC
23672 + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
23673 + * version of SEC
23674 + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
23675 + * version of SEC
23676 + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
23677 + * implemented in this version of SEC
23678 + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
23679 + * implemented in this version of SEC
23680 + * @crc_acc_num: The number of copies of the CRC module that are implemented in
23681 + * this version of SEC
23682 + * @pk_acc_num: The number of copies of the Public Key module that are
23683 + * implemented in this version of SEC
23684 + * @kasumi_acc_num: The number of copies of the Kasumi module that are
23685 + * implemented in this version of SEC
23686 + * @rng_acc_num: The number of copies of the Random Number Generator that are
23687 + * implemented in this version of SEC
23688 + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
23689 + * implemented in this version of SEC
23690 + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
23691 + * in this version of SEC
23692 + * @des_acc_num: The number of copies of the DES module that are implemented in
23693 + * this version of SEC
23694 + * @aes_acc_num: The number of copies of the AES module that are implemented in
23695 + * this version of SEC
23696 + **/
23697 +struct dpseci_sec_attr {
23698 + u16 ip_id;
23699 + u8 major_rev;
23700 + u8 minor_rev;
23701 + u8 era;
23702 + u8 deco_num;
23703 + u8 zuc_auth_acc_num;
23704 + u8 zuc_enc_acc_num;
23705 + u8 snow_f8_acc_num;
23706 + u8 snow_f9_acc_num;
23707 + u8 crc_acc_num;
23708 + u8 pk_acc_num;
23709 + u8 kasumi_acc_num;
23710 + u8 rng_acc_num;
23711 + u8 md_acc_num;
23712 + u8 arc4_acc_num;
23713 + u8 des_acc_num;
23714 + u8 aes_acc_num;
23715 +};
23716 +
23717 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23718 + struct dpseci_sec_attr *attr);
23719 +
23720 +/**
23721 + * struct dpseci_sec_counters - Structure representing global SEC counters and
23722 + * not per dpseci counters
23723 + * @dequeued_requests: Number of Requests Dequeued
23724 + * @ob_enc_requests: Number of Outbound Encrypt Requests
23725 + * @ib_dec_requests: Number of Inbound Decrypt Requests
23726 + * @ob_enc_bytes: Number of Outbound Bytes Encrypted
23727 + * @ob_prot_bytes: Number of Outbound Bytes Protected
23728 + * @ib_dec_bytes: Number of Inbound Bytes Decrypted
23729 + * @ib_valid_bytes: Number of Inbound Bytes Validated
23730 + */
23731 +struct dpseci_sec_counters {
23732 + u64 dequeued_requests;
23733 + u64 ob_enc_requests;
23734 + u64 ib_dec_requests;
23735 + u64 ob_enc_bytes;
23736 + u64 ob_prot_bytes;
23737 + u64 ib_dec_bytes;
23738 + u64 ib_valid_bytes;
23739 +};
23740 +
23741 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23742 + struct dpseci_sec_counters *counters);
23743 +
23744 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
23745 + u16 *major_ver, u16 *minor_ver);
23746 +
23747 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23748 + u8 options, struct opr_cfg *cfg);
23749 +
23750 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23751 + struct opr_cfg *cfg, struct opr_qry *qry);
23752 +
23753 +/**
23754 + * enum dpseci_congestion_unit - DPSECI congestion units
23755 + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
23756 + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
23757 + */
23758 +enum dpseci_congestion_unit {
23759 + DPSECI_CONGESTION_UNIT_BYTES = 0,
23760 + DPSECI_CONGESTION_UNIT_FRAMES
23761 +};
23762 +
23763 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
23764 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
23765 +#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
23766 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
23767 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
23768 +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
23769 +
23770 +/**
23771 + * struct dpseci_congestion_notification_cfg - congestion notification
23772 + * configuration
23773 + * @units: units type
23774 + * @threshold_entry: above this threshold we enter a congestion state.
23775 + * set it to '0' to disable it
23776 + * @threshold_exit: below this threshold we exit the congestion state.
23777 + * @message_ctx: The context that will be part of the CSCN message
23778 + * @message_iova: I/O virtual address (must be in DMA-able memory),
23779 + * must be 16B aligned;
23780 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
23781 + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
23782 + * values
23783 + */
23784 +struct dpseci_congestion_notification_cfg {
23785 + enum dpseci_congestion_unit units;
23786 + u32 threshold_entry;
23787 + u32 threshold_exit;
23788 + u64 message_ctx;
23789 + u64 message_iova;
23790 + struct dpseci_dest_cfg dest_cfg;
23791 + u16 notification_mode;
23792 +};
23793 +
23794 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23795 + u16 token, const struct dpseci_congestion_notification_cfg *cfg);
23796 +
23797 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23798 + u16 token, struct dpseci_congestion_notification_cfg *cfg);
23799 +
23800 +#endif /* _DPSECI_H_ */
23801 --- /dev/null
23802 +++ b/drivers/crypto/caam/dpseci_cmd.h
23803 @@ -0,0 +1,261 @@
23804 +/*
23805 + * Copyright 2013-2016 Freescale Semiconductor Inc.
23806 + * Copyright 2017 NXP
23807 + *
23808 + * Redistribution and use in source and binary forms, with or without
23809 + * modification, are permitted provided that the following conditions are met:
23810 + * * Redistributions of source code must retain the above copyright
23811 + * notice, this list of conditions and the following disclaimer.
23812 + * * Redistributions in binary form must reproduce the above copyright
23813 + * notice, this list of conditions and the following disclaimer in the
23814 + * documentation and/or other materials provided with the distribution.
23815 + * * Neither the names of the above-listed copyright holders nor the
23816 + * names of any contributors may be used to endorse or promote products
23817 + * derived from this software without specific prior written permission.
23818 + *
23819 + *
23820 + * ALTERNATIVELY, this software may be distributed under the terms of the
23821 + * GNU General Public License ("GPL") as published by the Free Software
23822 + * Foundation, either version 2 of that License or (at your option) any
23823 + * later version.
23824 + *
23825 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23826 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23827 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23828 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
23829 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23830 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23831 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23832 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23833 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23834 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23835 + * POSSIBILITY OF SUCH DAMAGE.
23836 + */
23837 +
23838 +#ifndef _DPSECI_CMD_H_
23839 +#define _DPSECI_CMD_H_
23840 +
23841 +/* DPSECI Version */
23842 +#define DPSECI_VER_MAJOR 5
23843 +#define DPSECI_VER_MINOR 1
23844 +
23845 +#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
23846 +#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
23847 +
23848 +/* Command IDs */
23849 +
23850 +#define DPSECI_CMDID_CLOSE 0x8001
23851 +#define DPSECI_CMDID_OPEN 0x8091
23852 +#define DPSECI_CMDID_CREATE 0x9092
23853 +#define DPSECI_CMDID_DESTROY 0x9891
23854 +#define DPSECI_CMDID_GET_API_VERSION 0xa091
23855 +
23856 +#define DPSECI_CMDID_ENABLE 0x0021
23857 +#define DPSECI_CMDID_DISABLE 0x0031
23858 +#define DPSECI_CMDID_GET_ATTR 0x0041
23859 +#define DPSECI_CMDID_RESET 0x0051
23860 +#define DPSECI_CMDID_IS_ENABLED 0x0061
23861 +
23862 +#define DPSECI_CMDID_SET_IRQ_ENABLE 0x0121
23863 +#define DPSECI_CMDID_GET_IRQ_ENABLE 0x0131
23864 +#define DPSECI_CMDID_SET_IRQ_MASK 0x0141
23865 +#define DPSECI_CMDID_GET_IRQ_MASK 0x0151
23866 +#define DPSECI_CMDID_GET_IRQ_STATUS 0x0161
23867 +#define DPSECI_CMDID_CLEAR_IRQ_STATUS 0x0171
23868 +
23869 +#define DPSECI_CMDID_SET_RX_QUEUE 0x1941
23870 +#define DPSECI_CMDID_GET_RX_QUEUE 0x1961
23871 +#define DPSECI_CMDID_GET_TX_QUEUE 0x1971
23872 +#define DPSECI_CMDID_GET_SEC_ATTR 0x1981
23873 +#define DPSECI_CMDID_GET_SEC_COUNTERS 0x1991
23874 +#define DPSECI_CMDID_SET_OPR 0x19A1
23875 +#define DPSECI_CMDID_GET_OPR 0x19B1
23876 +
23877 +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION 0x1701
23878 +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION 0x1711
23879 +
23880 +/* Macros for accessing command fields smaller than 1 byte */
23881 +#define DPSECI_MASK(field) \
23882 + GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
23883 + DPSECI_##field##_SHIFT)
23884 +
23885 +#define dpseci_set_field(var, field, val) \
23886 + ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
23887 +
23888 +#define dpseci_get_field(var, field) \
23889 + (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
23890 +
23891 +struct dpseci_cmd_open {
23892 + __le32 dpseci_id;
23893 +};
23894 +
23895 +struct dpseci_cmd_create {
23896 + u8 priorities[8];
23897 + u8 num_tx_queues;
23898 + u8 num_rx_queues;
23899 + __le16 pad;
23900 + __le32 options;
23901 +};
23902 +
23903 +struct dpseci_cmd_destroy {
23904 + __le32 object_id;
23905 +};
23906 +
23907 +struct dpseci_rsp_is_enabled {
23908 + __le32 is_enabled;
23909 +};
23910 +
23911 +struct dpseci_cmd_irq_enable {
23912 + u8 enable_state;
23913 + u8 pad[3];
23914 + u8 irq_index;
23915 +};
23916 +
23917 +struct dpseci_rsp_get_irq_enable {
23918 + u8 enable_state;
23919 +};
23920 +
23921 +struct dpseci_cmd_irq_mask {
23922 + __le32 mask;
23923 + u8 irq_index;
23924 +};
23925 +
23926 +struct dpseci_cmd_irq_status {
23927 + __le32 status;
23928 + u8 irq_index;
23929 +};
23930 +
23931 +struct dpseci_rsp_get_attributes {
23932 + __le32 id;
23933 + __le32 pad0;
23934 + u8 num_tx_queues;
23935 + u8 num_rx_queues;
23936 + u8 pad1[6];
23937 + __le32 options;
23938 +};
23939 +
23940 +struct dpseci_cmd_queue {
23941 + __le32 dest_id;
23942 + u8 priority;
23943 + u8 queue;
23944 + u8 dest_type;
23945 + u8 pad;
23946 + __le64 user_ctx;
23947 + union {
23948 + __le32 options;
23949 + __le32 fqid;
23950 + };
23951 + __le32 order_preservation_en;
23952 +};
23953 +
23954 +struct dpseci_rsp_get_tx_queue {
23955 + __le32 pad;
23956 + __le32 fqid;
23957 + u8 priority;
23958 +};
23959 +
23960 +struct dpseci_rsp_get_sec_attr {
23961 + __le16 ip_id;
23962 + u8 major_rev;
23963 + u8 minor_rev;
23964 + u8 era;
23965 + u8 pad0[3];
23966 + u8 deco_num;
23967 + u8 zuc_auth_acc_num;
23968 + u8 zuc_enc_acc_num;
23969 + u8 pad1;
23970 + u8 snow_f8_acc_num;
23971 + u8 snow_f9_acc_num;
23972 + u8 crc_acc_num;
23973 + u8 pad2;
23974 + u8 pk_acc_num;
23975 + u8 kasumi_acc_num;
23976 + u8 rng_acc_num;
23977 + u8 pad3;
23978 + u8 md_acc_num;
23979 + u8 arc4_acc_num;
23980 + u8 des_acc_num;
23981 + u8 aes_acc_num;
23982 +};
23983 +
23984 +struct dpseci_rsp_get_sec_counters {
23985 + __le64 dequeued_requests;
23986 + __le64 ob_enc_requests;
23987 + __le64 ib_dec_requests;
23988 + __le64 ob_enc_bytes;
23989 + __le64 ob_prot_bytes;
23990 + __le64 ib_dec_bytes;
23991 + __le64 ib_valid_bytes;
23992 +};
23993 +
23994 +struct dpseci_rsp_get_api_version {
23995 + __le16 major;
23996 + __le16 minor;
23997 +};
23998 +
23999 +struct dpseci_cmd_opr {
24000 + __le16 pad;
24001 + u8 index;
24002 + u8 options;
24003 + u8 pad1[7];
24004 + u8 oloe;
24005 + u8 oeane;
24006 + u8 olws;
24007 + u8 oa;
24008 + u8 oprrws;
24009 +};
24010 +
24011 +#define DPSECI_OPR_RIP_SHIFT 0
24012 +#define DPSECI_OPR_RIP_SIZE 1
24013 +#define DPSECI_OPR_ENABLE_SHIFT 1
24014 +#define DPSECI_OPR_ENABLE_SIZE 1
24015 +#define DPSECI_OPR_TSEQ_NLIS_SHIFT 1
24016 +#define DPSECI_OPR_TSEQ_NLIS_SIZE 1
24017 +#define DPSECI_OPR_HSEQ_NLIS_SHIFT 1
24018 +#define DPSECI_OPR_HSEQ_NLIS_SIZE 1
24019 +
24020 +struct dpseci_rsp_get_opr {
24021 + __le64 pad;
24022 + u8 rip_enable;
24023 + u8 pad0[2];
24024 + u8 oloe;
24025 + u8 oeane;
24026 + u8 olws;
24027 + u8 oa;
24028 + u8 oprrws;
24029 + __le16 nesn;
24030 + __le16 pad1;
24031 + __le16 ndsn;
24032 + __le16 pad2;
24033 + __le16 ea_tseq;
24034 + u8 tseq_nlis;
24035 + u8 pad3;
24036 + __le16 ea_hseq;
24037 + u8 hseq_nlis;
24038 + u8 pad4;
24039 + __le16 ea_hptr;
24040 + __le16 pad5;
24041 + __le16 ea_tptr;
24042 + __le16 pad6;
24043 + __le16 opr_vid;
24044 + __le16 pad7;
24045 + __le16 opr_id;
24046 +};
24047 +
24048 +#define DPSECI_CGN_DEST_TYPE_SHIFT 0
24049 +#define DPSECI_CGN_DEST_TYPE_SIZE 4
24050 +#define DPSECI_CGN_UNITS_SHIFT 4
24051 +#define DPSECI_CGN_UNITS_SIZE 2
24052 +
24053 +struct dpseci_cmd_congestion_notification {
24054 + __le32 dest_id;
24055 + __le16 notification_mode;
24056 + u8 priority;
24057 + u8 options;
24058 + __le64 message_iova;
24059 + __le64 message_ctx;
24060 + __le32 threshold_entry;
24061 + __le32 threshold_exit;
24062 +};
24063 +
24064 +#endif /* _DPSECI_CMD_H_ */
24065 --- a/drivers/crypto/caam/error.c
24066 +++ b/drivers/crypto/caam/error.c
24067 @@ -6,11 +6,54 @@
24068
24069 #include "compat.h"
24070 #include "regs.h"
24071 -#include "intern.h"
24072 #include "desc.h"
24073 -#include "jr.h"
24074 #include "error.h"
24075
24076 +#ifdef DEBUG
24077 +
24078 +#include <linux/highmem.h>
24079 +
24080 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
24081 + int rowsize, int groupsize, struct scatterlist *sg,
24082 + size_t tlen, bool ascii)
24083 +{
24084 + struct scatterlist *it;
24085 + void *it_page;
24086 + size_t len;
24087 + void *buf;
24088 +
24089 + for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
24090 + /*
24091 + * make sure the scatterlist's page
24092 + * has a valid virtual memory mapping
24093 + */
24094 + it_page = kmap_atomic(sg_page(it));
24095 + if (unlikely(!it_page)) {
24096 + pr_err("caam_dump_sg: kmap failed\n");
24097 + return;
24098 + }
24099 +
24100 + buf = it_page + it->offset;
24101 + len = min_t(size_t, tlen, it->length);
24102 + print_hex_dump(level, prefix_str, prefix_type, rowsize,
24103 + groupsize, buf, len, ascii);
24104 + tlen -= len;
24105 +
24106 + kunmap_atomic(it_page);
24107 + }
24108 +}
24109 +
24110 +#else
24111 +
24112 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
24113 + int rowsize, int groupsize, struct scatterlist *sg,
24114 + size_t tlen, bool ascii)
24115 +{}
24116 +
24117 +#endif
24118 +
24119 +EXPORT_SYMBOL(caam_dump_sg);
24120 +
24121 static const struct {
24122 u8 value;
24123 const char *error_text;
24124 @@ -69,6 +112,54 @@ static const struct {
24125 { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
24126 };
24127
24128 +static const struct {
24129 + u8 value;
24130 + const char *error_text;
24131 +} qi_error_list[] = {
24132 + { 0x1F, "Job terminated by FQ or ICID flush" },
24133 + { 0x20, "FD format error"},
24134 + { 0x21, "FD command format error"},
24135 + { 0x23, "FL format error"},
24136 + { 0x25, "CRJD specified in FD, but not enabled in FLC"},
24137 + { 0x30, "Max. buffer size too small"},
24138 + { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
24139 + { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
24140 + { 0x33, "Size over/underflow (allocate mode)"},
24141 + { 0x34, "Size over/underflow (reuse mode)"},
24142 + { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
24143 + { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
24144 + { 0x41, "SBC frame format not supported (allocate mode)"},
24145 + { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
24146 + { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
24147 + { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
24148 + { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
24149 + { 0x46, "Annotation length exceeds offset (reuse mode)"},
24150 + { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
24151 + { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
24152 + { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
24153 + { 0x51, "Unsupported IF reuse mode"},
24154 + { 0x52, "Unsupported FL use mode"},
24155 + { 0x53, "Unsupported RJD use mode"},
24156 + { 0x54, "Unsupported inline descriptor use mode"},
24157 + { 0xC0, "Table buffer pool 0 depletion"},
24158 + { 0xC1, "Table buffer pool 1 depletion"},
24159 + { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
24160 + { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
24161 + { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
24162 + { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
24163 + { 0xD0, "FLC read error"},
24164 + { 0xD1, "FL read error"},
24165 + { 0xD2, "FL write error"},
24166 + { 0xD3, "OF SGT write error"},
24167 + { 0xD4, "PTA read error"},
24168 + { 0xD5, "PTA write error"},
24169 + { 0xD6, "OF SGT F-bit write error"},
24170 + { 0xD7, "ASA write error"},
24171 + { 0xE1, "FLC[ICR]=0 ICID error"},
24172 + { 0xE2, "FLC[ICR]=1 ICID error"},
24173 + { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
24174 +};
24175 +
24176 static const char * const cha_id_list[] = {
24177 "",
24178 "AES",
24179 @@ -146,10 +237,9 @@ static void report_ccb_status(struct dev
24180 strlen(rng_err_id_list[err_id])) {
24181 /* RNG-only error */
24182 err_str = rng_err_id_list[err_id];
24183 - } else if (err_id < ARRAY_SIZE(err_id_list))
24184 + } else {
24185 err_str = err_id_list[err_id];
24186 - else
24187 - snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
24188 + }
24189
24190 /*
24191 * CCB ICV check failures are part of normal operation life;
24192 @@ -198,6 +288,27 @@ static void report_deco_status(struct de
24193 status, error, idx_str, idx, err_str, err_err_code);
24194 }
24195
24196 +static void report_qi_status(struct device *qidev, const u32 status,
24197 + const char *error)
24198 +{
24199 + u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
24200 + const char *err_str = "unidentified error value 0x";
24201 + char err_err_code[3] = { 0 };
24202 + int i;
24203 +
24204 + for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
24205 + if (qi_error_list[i].value == err_id)
24206 + break;
24207 +
24208 + if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
24209 + err_str = qi_error_list[i].error_text;
24210 + else
24211 + snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
24212 +
24213 + dev_err(qidev, "%08x: %s: %s%s\n",
24214 + status, error, err_str, err_err_code);
24215 +}
24216 +
24217 static void report_jr_status(struct device *jrdev, const u32 status,
24218 const char *error)
24219 {
24220 @@ -212,7 +323,7 @@ static void report_cond_code_status(stru
24221 status, error, __func__);
24222 }
24223
24224 -void caam_jr_strstatus(struct device *jrdev, u32 status)
24225 +void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
24226 {
24227 static const struct stat_src {
24228 void (*report_ssed)(struct device *jrdev, const u32 status,
24229 @@ -224,7 +335,7 @@ void caam_jr_strstatus(struct device *jr
24230 { report_ccb_status, "CCB" },
24231 { report_jump_status, "Jump" },
24232 { report_deco_status, "DECO" },
24233 - { NULL, "Queue Manager Interface" },
24234 + { report_qi_status, "Queue Manager Interface" },
24235 { report_jr_status, "Job Ring" },
24236 { report_cond_code_status, "Condition Code" },
24237 { NULL, NULL },
24238 @@ -250,4 +361,4 @@ void caam_jr_strstatus(struct device *jr
24239 else
24240 dev_err(jrdev, "%d: unknown error source\n", ssrc);
24241 }
24242 -EXPORT_SYMBOL(caam_jr_strstatus);
24243 +EXPORT_SYMBOL(caam_strstatus);
24244 --- a/drivers/crypto/caam/error.h
24245 +++ b/drivers/crypto/caam/error.h
24246 @@ -7,5 +7,13 @@
24247 #ifndef CAAM_ERROR_H
24248 #define CAAM_ERROR_H
24249 #define CAAM_ERROR_STR_MAX 302
24250 -void caam_jr_strstatus(struct device *jrdev, u32 status);
24251 +
24252 +void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
24253 +
24254 +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
24255 +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
24256 +
24257 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
24258 + int rowsize, int groupsize, struct scatterlist *sg,
24259 + size_t tlen, bool ascii);
24260 #endif /* CAAM_ERROR_H */
24261 --- a/drivers/crypto/caam/intern.h
24262 +++ b/drivers/crypto/caam/intern.h
24263 @@ -41,6 +41,7 @@ struct caam_drv_private_jr {
24264 struct device *dev;
24265 int ridx;
24266 struct caam_job_ring __iomem *rregs; /* JobR's register space */
24267 + struct tasklet_struct irqtask;
24268 int irq; /* One per queue */
24269
24270 /* Number of scatterlist crypt transforms active on the JobR */
24271 @@ -63,10 +64,9 @@ struct caam_drv_private_jr {
24272 * Driver-private storage for a single CAAM block instance
24273 */
24274 struct caam_drv_private {
24275 -
24276 - struct device *dev;
24277 - struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
24278 - struct platform_device *pdev;
24279 +#ifdef CONFIG_CAAM_QI
24280 + struct device *qidev;
24281 +#endif
24282
24283 /* Physical-presence section */
24284 struct caam_ctrl __iomem *ctrl; /* controller region */
24285 @@ -102,11 +102,6 @@ struct caam_drv_private {
24286 #ifdef CONFIG_DEBUG_FS
24287 struct dentry *dfs_root;
24288 struct dentry *ctl; /* controller dir */
24289 - struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
24290 - struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
24291 - struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
24292 - struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
24293 -
24294 struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
24295 struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
24296 #endif
24297 @@ -114,4 +109,22 @@ struct caam_drv_private {
24298
24299 void caam_jr_algapi_init(struct device *dev);
24300 void caam_jr_algapi_remove(struct device *dev);
24301 +
24302 +#ifdef CONFIG_DEBUG_FS
24303 +static int caam_debugfs_u64_get(void *data, u64 *val)
24304 +{
24305 + *val = caam64_to_cpu(*(u64 *)data);
24306 + return 0;
24307 +}
24308 +
24309 +static int caam_debugfs_u32_get(void *data, u64 *val)
24310 +{
24311 + *val = caam32_to_cpu(*(u32 *)data);
24312 + return 0;
24313 +}
24314 +
24315 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
24316 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
24317 +#endif
24318 +
24319 #endif /* INTERN_H */
24320 --- a/drivers/crypto/caam/jr.c
24321 +++ b/drivers/crypto/caam/jr.c
24322 @@ -9,6 +9,7 @@
24323 #include <linux/of_address.h>
24324
24325 #include "compat.h"
24326 +#include "ctrl.h"
24327 #include "regs.h"
24328 #include "jr.h"
24329 #include "desc.h"
24330 @@ -73,6 +74,8 @@ static int caam_jr_shutdown(struct devic
24331
24332 ret = caam_reset_hw_jr(dev);
24333
24334 + tasklet_kill(&jrp->irqtask);
24335 +
24336 /* Release interrupt */
24337 free_irq(jrp->irq, dev);
24338
24339 @@ -128,7 +131,7 @@ static irqreturn_t caam_jr_interrupt(int
24340
24341 /*
24342 * Check the output ring for ready responses, kick
24343 - * the threaded irq if jobs done.
24344 + * tasklet if jobs done.
24345 */
24346 irqstate = rd_reg32(&jrp->rregs->jrintstatus);
24347 if (!irqstate)
24348 @@ -150,13 +153,18 @@ static irqreturn_t caam_jr_interrupt(int
24349 /* Have valid interrupt at this point, just ACK and trigger */
24350 wr_reg32(&jrp->rregs->jrintstatus, irqstate);
24351
24352 - return IRQ_WAKE_THREAD;
24353 + preempt_disable();
24354 + tasklet_schedule(&jrp->irqtask);
24355 + preempt_enable();
24356 +
24357 + return IRQ_HANDLED;
24358 }
24359
24360 -static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
24361 +/* Deferred service handler, run as interrupt-fired tasklet */
24362 +static void caam_jr_dequeue(unsigned long devarg)
24363 {
24364 int hw_idx, sw_idx, i, head, tail;
24365 - struct device *dev = st_dev;
24366 + struct device *dev = (struct device *)devarg;
24367 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
24368 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
24369 u32 *userdesc, userstatus;
24370 @@ -230,8 +238,6 @@ static irqreturn_t caam_jr_threadirq(int
24371
24372 /* reenable / unmask IRQs */
24373 clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
24374 -
24375 - return IRQ_HANDLED;
24376 }
24377
24378 /**
24379 @@ -389,10 +395,11 @@ static int caam_jr_init(struct device *d
24380
24381 jrp = dev_get_drvdata(dev);
24382
24383 + tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
24384 +
24385 /* Connect job ring interrupt handler. */
24386 - error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
24387 - caam_jr_threadirq, IRQF_SHARED,
24388 - dev_name(dev), dev);
24389 + error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
24390 + dev_name(dev), dev);
24391 if (error) {
24392 dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
24393 jrp->ridx, jrp->irq);
24394 @@ -454,6 +461,7 @@ out_free_inpring:
24395 out_free_irq:
24396 free_irq(jrp->irq, dev);
24397 out_kill_deq:
24398 + tasklet_kill(&jrp->irqtask);
24399 return error;
24400 }
24401
24402 @@ -489,15 +497,28 @@ static int caam_jr_probe(struct platform
24403 return -ENOMEM;
24404 }
24405
24406 - jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
24407 + jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
24408
24409 - if (sizeof(dma_addr_t) == sizeof(u64))
24410 - if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
24411 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
24412 + if (sizeof(dma_addr_t) == sizeof(u64)) {
24413 + if (caam_dpaa2)
24414 + error = dma_set_mask_and_coherent(jrdev,
24415 + DMA_BIT_MASK(49));
24416 + else if (of_device_is_compatible(nprop,
24417 + "fsl,sec-v5.0-job-ring"))
24418 + error = dma_set_mask_and_coherent(jrdev,
24419 + DMA_BIT_MASK(40));
24420 else
24421 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
24422 - else
24423 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
24424 + error = dma_set_mask_and_coherent(jrdev,
24425 + DMA_BIT_MASK(36));
24426 + } else {
24427 + error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
24428 + }
24429 + if (error) {
24430 + dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
24431 + error);
24432 + iounmap(ctrl);
24433 + return error;
24434 + }
24435
24436 /* Identify the interrupt */
24437 jrpriv->irq = irq_of_parse_and_map(nprop, 0);
24438 @@ -520,7 +541,7 @@ static int caam_jr_probe(struct platform
24439 return 0;
24440 }
24441
24442 -static struct of_device_id caam_jr_match[] = {
24443 +static const struct of_device_id caam_jr_match[] = {
24444 {
24445 .compatible = "fsl,sec-v4.0-job-ring",
24446 },
24447 --- a/drivers/crypto/caam/key_gen.c
24448 +++ b/drivers/crypto/caam/key_gen.c
24449 @@ -41,15 +41,29 @@ Split key generation--------------------
24450 [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
24451 @0xffe04000
24452 */
24453 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24454 - int split_key_pad_len, const u8 *key_in, u32 keylen,
24455 - u32 alg_op)
24456 +int gen_split_key(struct device *jrdev, u8 *key_out,
24457 + struct alginfo * const adata, const u8 *key_in, u32 keylen,
24458 + int max_keylen)
24459 {
24460 u32 *desc;
24461 struct split_key_result result;
24462 dma_addr_t dma_addr_in, dma_addr_out;
24463 int ret = -ENOMEM;
24464
24465 + adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
24466 + adata->keylen_pad = split_key_pad_len(adata->algtype &
24467 + OP_ALG_ALGSEL_MASK);
24468 +
24469 +#ifdef DEBUG
24470 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
24471 + adata->keylen, adata->keylen_pad);
24472 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
24473 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
24474 +#endif
24475 +
24476 + if (adata->keylen_pad > max_keylen)
24477 + return -EINVAL;
24478 +
24479 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
24480 if (!desc) {
24481 dev_err(jrdev, "unable to allocate key input memory\n");
24482 @@ -63,7 +77,7 @@ int gen_split_key(struct device *jrdev,
24483 goto out_free;
24484 }
24485
24486 - dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
24487 + dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
24488 DMA_FROM_DEVICE);
24489 if (dma_mapping_error(jrdev, dma_addr_out)) {
24490 dev_err(jrdev, "unable to map key output memory\n");
24491 @@ -74,7 +88,9 @@ int gen_split_key(struct device *jrdev,
24492 append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
24493
24494 /* Sets MDHA up into an HMAC-INIT */
24495 - append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
24496 + append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
24497 + OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
24498 + OP_ALG_AS_INIT);
24499
24500 /*
24501 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
24502 @@ -87,7 +103,7 @@ int gen_split_key(struct device *jrdev,
24503 * FIFO_STORE with the explicit split-key content store
24504 * (0x26 output type)
24505 */
24506 - append_fifo_store(desc, dma_addr_out, split_key_len,
24507 + append_fifo_store(desc, dma_addr_out, adata->keylen,
24508 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
24509
24510 #ifdef DEBUG
24511 @@ -108,11 +124,11 @@ int gen_split_key(struct device *jrdev,
24512 #ifdef DEBUG
24513 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
24514 DUMP_PREFIX_ADDRESS, 16, 4, key_out,
24515 - split_key_pad_len, 1);
24516 + adata->keylen_pad, 1);
24517 #endif
24518 }
24519
24520 - dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
24521 + dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
24522 DMA_FROM_DEVICE);
24523 out_unmap_in:
24524 dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
24525 --- a/drivers/crypto/caam/key_gen.h
24526 +++ b/drivers/crypto/caam/key_gen.h
24527 @@ -5,6 +5,36 @@
24528 *
24529 */
24530
24531 +/**
24532 + * split_key_len - Compute MDHA split key length for a given algorithm
24533 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
24534 + * SHA224, SHA384, SHA512.
24535 + *
24536 + * Return: MDHA split key length
24537 + */
24538 +static inline u32 split_key_len(u32 hash)
24539 +{
24540 + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
24541 + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
24542 + u32 idx;
24543 +
24544 + idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
24545 +
24546 + return (u32)(mdpadlen[idx] * 2);
24547 +}
24548 +
24549 +/**
24550 + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
24551 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
24552 + * SHA224, SHA384, SHA512.
24553 + *
24554 + * Return: MDHA split key pad length
24555 + */
24556 +static inline u32 split_key_pad_len(u32 hash)
24557 +{
24558 + return ALIGN(split_key_len(hash), 16);
24559 +}
24560 +
24561 struct split_key_result {
24562 struct completion completion;
24563 int err;
24564 @@ -12,6 +42,6 @@ struct split_key_result {
24565
24566 void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
24567
24568 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24569 - int split_key_pad_len, const u8 *key_in, u32 keylen,
24570 - u32 alg_op);
24571 +int gen_split_key(struct device *jrdev, u8 *key_out,
24572 + struct alginfo * const adata, const u8 *key_in, u32 keylen,
24573 + int max_keylen);
24574 --- a/drivers/crypto/caam/pdb.h
24575 +++ b/drivers/crypto/caam/pdb.h
24576 @@ -483,6 +483,8 @@ struct dsa_verify_pdb {
24577 #define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT)
24578 #define RSA_PDB_D_SHIFT 12
24579 #define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT)
24580 +#define RSA_PDB_Q_SHIFT 12
24581 +#define RSA_PDB_Q_MASK (0xFFF << RSA_PDB_Q_SHIFT)
24582
24583 #define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT)
24584 #define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT)
24585 @@ -490,6 +492,8 @@ struct dsa_verify_pdb {
24586 #define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT)
24587
24588 #define RSA_PRIV_KEY_FRM_1 0
24589 +#define RSA_PRIV_KEY_FRM_2 1
24590 +#define RSA_PRIV_KEY_FRM_3 2
24591
24592 /**
24593 * RSA Encrypt Protocol Data Block
24594 @@ -525,4 +529,62 @@ struct rsa_priv_f1_pdb {
24595 dma_addr_t d_dma;
24596 } __packed;
24597
24598 +/**
24599 + * RSA Decrypt PDB - Private Key Form #2
24600 + * @sgf : scatter-gather field
24601 + * @g_dma : dma address of encrypted input data
24602 + * @f_dma : dma address of output data
24603 + * @d_dma : dma address of RSA private exponent
24604 + * @p_dma : dma address of RSA prime factor p of RSA modulus n
24605 + * @q_dma : dma address of RSA prime factor q of RSA modulus n
24606 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24607 + * as internal state buffer. It is assumed to be as long as p.
24608 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24609 + * as internal state buffer. It is assumed to be as long as q.
24610 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
24611 + */
24612 +struct rsa_priv_f2_pdb {
24613 + u32 sgf;
24614 + dma_addr_t g_dma;
24615 + dma_addr_t f_dma;
24616 + dma_addr_t d_dma;
24617 + dma_addr_t p_dma;
24618 + dma_addr_t q_dma;
24619 + dma_addr_t tmp1_dma;
24620 + dma_addr_t tmp2_dma;
24621 + u32 p_q_len;
24622 +} __packed;
24623 +
24624 +/**
24625 + * RSA Decrypt PDB - Private Key Form #3
24626 + * This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of
24627 + * the RSA modulus.
24628 + * @sgf : scatter-gather field
24629 + * @g_dma : dma address of encrypted input data
24630 + * @f_dma : dma address of output data
24631 + * @c_dma : dma address of RSA CRT coefficient
24632 + * @p_dma : dma address of RSA prime factor p of RSA modulus n
24633 + * @q_dma : dma address of RSA prime factor q of RSA modulus n
24634 + * @dp_dma : dma address of RSA CRT exponent of RSA prime factor p
24635 + * @dp_dma : dma address of RSA CRT exponent of RSA prime factor q
24636 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24637 + * as internal state buffer. It is assumed to be as long as p.
24638 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24639 + * as internal state buffer. It is assumed to be as long as q.
24640 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
24641 + */
24642 +struct rsa_priv_f3_pdb {
24643 + u32 sgf;
24644 + dma_addr_t g_dma;
24645 + dma_addr_t f_dma;
24646 + dma_addr_t c_dma;
24647 + dma_addr_t p_dma;
24648 + dma_addr_t q_dma;
24649 + dma_addr_t dp_dma;
24650 + dma_addr_t dq_dma;
24651 + dma_addr_t tmp1_dma;
24652 + dma_addr_t tmp2_dma;
24653 + u32 p_q_len;
24654 +} __packed;
24655 +
24656 #endif
24657 --- a/drivers/crypto/caam/pkc_desc.c
24658 +++ b/drivers/crypto/caam/pkc_desc.c
24659 @@ -34,3 +34,39 @@ void init_rsa_priv_f1_desc(u32 *desc, st
24660 append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
24661 RSA_PRIV_KEY_FRM_1);
24662 }
24663 +
24664 +/* Descriptor for RSA Private operation - Private Key Form #2 */
24665 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
24666 +{
24667 + init_job_desc_pdb(desc, 0, sizeof(*pdb));
24668 + append_cmd(desc, pdb->sgf);
24669 + append_ptr(desc, pdb->g_dma);
24670 + append_ptr(desc, pdb->f_dma);
24671 + append_ptr(desc, pdb->d_dma);
24672 + append_ptr(desc, pdb->p_dma);
24673 + append_ptr(desc, pdb->q_dma);
24674 + append_ptr(desc, pdb->tmp1_dma);
24675 + append_ptr(desc, pdb->tmp2_dma);
24676 + append_cmd(desc, pdb->p_q_len);
24677 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
24678 + RSA_PRIV_KEY_FRM_2);
24679 +}
24680 +
24681 +/* Descriptor for RSA Private operation - Private Key Form #3 */
24682 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
24683 +{
24684 + init_job_desc_pdb(desc, 0, sizeof(*pdb));
24685 + append_cmd(desc, pdb->sgf);
24686 + append_ptr(desc, pdb->g_dma);
24687 + append_ptr(desc, pdb->f_dma);
24688 + append_ptr(desc, pdb->c_dma);
24689 + append_ptr(desc, pdb->p_dma);
24690 + append_ptr(desc, pdb->q_dma);
24691 + append_ptr(desc, pdb->dp_dma);
24692 + append_ptr(desc, pdb->dq_dma);
24693 + append_ptr(desc, pdb->tmp1_dma);
24694 + append_ptr(desc, pdb->tmp2_dma);
24695 + append_cmd(desc, pdb->p_q_len);
24696 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
24697 + RSA_PRIV_KEY_FRM_3);
24698 +}
24699 --- /dev/null
24700 +++ b/drivers/crypto/caam/qi.c
24701 @@ -0,0 +1,797 @@
24702 +/*
24703 + * CAAM/SEC 4.x QI transport/backend driver
24704 + * Queue Interface backend functionality
24705 + *
24706 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
24707 + * Copyright 2016-2017 NXP
24708 + */
24709 +
24710 +#include <linux/cpumask.h>
24711 +#include <linux/kthread.h>
24712 +#include <linux/fsl_qman.h>
24713 +
24714 +#include "regs.h"
24715 +#include "qi.h"
24716 +#include "desc.h"
24717 +#include "intern.h"
24718 +#include "desc_constr.h"
24719 +
24720 +#define PREHDR_RSLS_SHIFT 31
24721 +
24722 +/*
24723 + * Use a reasonable backlog of frames (per CPU) as congestion threshold,
24724 + * so that resources used by the in-flight buffers do not become a memory hog.
24725 + */
24726 +#define MAX_RSP_FQ_BACKLOG_PER_CPU 256
24727 +
24728 +#define CAAM_QI_ENQUEUE_RETRIES 10000
24729 +
24730 +#define CAAM_NAPI_WEIGHT 63
24731 +
24732 +/*
24733 + * caam_napi - struct holding CAAM NAPI-related params
24734 + * @irqtask: IRQ task for QI backend
24735 + * @p: QMan portal
24736 + */
24737 +struct caam_napi {
24738 + struct napi_struct irqtask;
24739 + struct qman_portal *p;
24740 +};
24741 +
24742 +/*
24743 + * caam_qi_pcpu_priv - percpu private data structure to main list of pending
24744 + * responses expected on each cpu.
24745 + * @caam_napi: CAAM NAPI params
24746 + * @net_dev: netdev used by NAPI
24747 + * @rsp_fq: response FQ from CAAM
24748 + */
24749 +struct caam_qi_pcpu_priv {
24750 + struct caam_napi caam_napi;
24751 + struct net_device net_dev;
24752 + struct qman_fq *rsp_fq;
24753 +} ____cacheline_aligned;
24754 +
24755 +static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
24756 +static DEFINE_PER_CPU(int, last_cpu);
24757 +
24758 +/*
24759 + * caam_qi_priv - CAAM QI backend private params
24760 + * @cgr: QMan congestion group
24761 + * @qi_pdev: platform device for QI backend
24762 + */
24763 +struct caam_qi_priv {
24764 + struct qman_cgr cgr;
24765 + struct platform_device *qi_pdev;
24766 +};
24767 +
24768 +static struct caam_qi_priv qipriv ____cacheline_aligned;
24769 +
24770 +/*
24771 + * This is written by only one core - the one that initialized the CGR - and
24772 + * read by multiple cores (all the others).
24773 + */
24774 +bool caam_congested __read_mostly;
24775 +EXPORT_SYMBOL(caam_congested);
24776 +
24777 +#ifdef CONFIG_DEBUG_FS
24778 +/*
24779 + * This is a counter for the number of times the congestion group (where all
24780 + * the request and response queueus are) reached congestion. Incremented
24781 + * each time the congestion callback is called with congested == true.
24782 + */
24783 +static u64 times_congested;
24784 +#endif
24785 +
24786 +/*
24787 + * CPU from where the module initialised. This is required because QMan driver
24788 + * requires CGRs to be removed from same CPU from where they were originally
24789 + * allocated.
24790 + */
24791 +static int mod_init_cpu;
24792 +
24793 +/*
24794 + * This is a a cache of buffers, from which the users of CAAM QI driver
24795 + * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
24796 + * doing malloc on the hotpath.
24797 + * NOTE: A more elegant solution would be to have some headroom in the frames
24798 + * being processed. This could be added by the dpaa-ethernet driver.
24799 + * This would pose a problem for userspace application processing which
24800 + * cannot know of this limitation. So for now, this will work.
24801 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
24802 + */
24803 +static struct kmem_cache *qi_cache;
24804 +
24805 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
24806 +{
24807 + struct qm_fd fd;
24808 + int ret;
24809 + int num_retries = 0;
24810 +
24811 + fd.cmd = 0;
24812 + fd.format = qm_fd_compound;
24813 + fd.cong_weight = req->fd_sgt[1].length;
24814 + fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
24815 + DMA_BIDIRECTIONAL);
24816 + if (dma_mapping_error(qidev, fd.addr)) {
24817 + dev_err(qidev, "DMA mapping error for QI enqueue request\n");
24818 + return -EIO;
24819 + }
24820 +
24821 + do {
24822 + ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
24823 + if (likely(!ret))
24824 + return 0;
24825 +
24826 + if (ret != -EBUSY)
24827 + break;
24828 + num_retries++;
24829 + } while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
24830 +
24831 + dev_err(qidev, "qman_enqueue failed: %d\n", ret);
24832 +
24833 + return ret;
24834 +}
24835 +EXPORT_SYMBOL(caam_qi_enqueue);
24836 +
24837 +static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
24838 + const struct qm_mr_entry *msg)
24839 +{
24840 + const struct qm_fd *fd;
24841 + struct caam_drv_req *drv_req;
24842 + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
24843 +
24844 + fd = &msg->ern.fd;
24845 +
24846 + if (fd->format != qm_fd_compound) {
24847 + dev_err(qidev, "Non-compound FD from CAAM\n");
24848 + return;
24849 + }
24850 +
24851 + drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
24852 + if (!drv_req) {
24853 + dev_err(qidev,
24854 + "Can't find original request for CAAM response\n");
24855 + return;
24856 + }
24857 +
24858 + dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
24859 + sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
24860 +
24861 + drv_req->cbk(drv_req, -EIO);
24862 +}
24863 +
24864 +static struct qman_fq *create_caam_req_fq(struct device *qidev,
24865 + struct qman_fq *rsp_fq,
24866 + dma_addr_t hwdesc,
24867 + int fq_sched_flag)
24868 +{
24869 + int ret;
24870 + struct qman_fq *req_fq;
24871 + struct qm_mcc_initfq opts;
24872 +
24873 + req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
24874 + if (!req_fq)
24875 + return ERR_PTR(-ENOMEM);
24876 +
24877 + req_fq->cb.ern = caam_fq_ern_cb;
24878 + req_fq->cb.fqs = NULL;
24879 +
24880 + ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
24881 + QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
24882 + req_fq);
24883 + if (ret) {
24884 + dev_err(qidev, "Failed to create session req FQ\n");
24885 + goto create_req_fq_fail;
24886 + }
24887 +
24888 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
24889 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
24890 + QM_INITFQ_WE_CGID;
24891 + opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
24892 + opts.fqd.dest.channel = qm_channel_caam;
24893 + opts.fqd.dest.wq = 2;
24894 + opts.fqd.context_b = qman_fq_fqid(rsp_fq);
24895 + opts.fqd.context_a.hi = upper_32_bits(hwdesc);
24896 + opts.fqd.context_a.lo = lower_32_bits(hwdesc);
24897 + opts.fqd.cgid = qipriv.cgr.cgrid;
24898 +
24899 + ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
24900 + if (ret) {
24901 + dev_err(qidev, "Failed to init session req FQ\n");
24902 + goto init_req_fq_fail;
24903 + }
24904 +
24905 + dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
24906 + smp_processor_id());
24907 + return req_fq;
24908 +
24909 +init_req_fq_fail:
24910 + qman_destroy_fq(req_fq, 0);
24911 +create_req_fq_fail:
24912 + kfree(req_fq);
24913 + return ERR_PTR(ret);
24914 +}
24915 +
24916 +static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
24917 +{
24918 + int ret;
24919 +
24920 + ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
24921 + QMAN_VOLATILE_FLAG_FINISH,
24922 + QM_VDQCR_PRECEDENCE_VDQCR |
24923 + QM_VDQCR_NUMFRAMES_TILLEMPTY);
24924 + if (ret) {
24925 + dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
24926 + return ret;
24927 + }
24928 +
24929 + do {
24930 + struct qman_portal *p;
24931 +
24932 + p = qman_get_affine_portal(smp_processor_id());
24933 + qman_p_poll_dqrr(p, 16);
24934 + } while (fq->flags & QMAN_FQ_STATE_NE);
24935 +
24936 + return 0;
24937 +}
24938 +
24939 +static int kill_fq(struct device *qidev, struct qman_fq *fq)
24940 +{
24941 + u32 flags;
24942 + int ret;
24943 +
24944 + ret = qman_retire_fq(fq, &flags);
24945 + if (ret < 0) {
24946 + dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
24947 + return ret;
24948 + }
24949 +
24950 + if (!ret)
24951 + goto empty_fq;
24952 +
24953 + /* Async FQ retirement condition */
24954 + if (ret == 1) {
24955 + /* Retry till FQ gets in retired state */
24956 + do {
24957 + msleep(20);
24958 + } while (fq->state != qman_fq_state_retired);
24959 +
24960 + WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
24961 + WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
24962 + }
24963 +
24964 +empty_fq:
24965 + if (fq->flags & QMAN_FQ_STATE_NE) {
24966 + ret = empty_retired_fq(qidev, fq);
24967 + if (ret) {
24968 + dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
24969 + fq->fqid);
24970 + return ret;
24971 + }
24972 + }
24973 +
24974 + ret = qman_oos_fq(fq);
24975 + if (ret)
24976 + dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
24977 +
24978 + qman_destroy_fq(fq, 0);
24979 + kfree(fq);
24980 +
24981 + return ret;
24982 +}
24983 +
24984 +static int empty_caam_fq(struct qman_fq *fq)
24985 +{
24986 + int ret;
24987 + struct qm_mcr_queryfq_np np;
24988 +
24989 + /* Wait till the older CAAM FQ get empty */
24990 + do {
24991 + ret = qman_query_fq_np(fq, &np);
24992 + if (ret)
24993 + return ret;
24994 +
24995 + if (!np.frm_cnt)
24996 + break;
24997 +
24998 + msleep(20);
24999 + } while (1);
25000 +
25001 + /*
25002 + * Give extra time for pending jobs from this FQ in holding tanks
25003 + * to get processed
25004 + */
25005 + msleep(20);
25006 + return 0;
25007 +}
25008 +
25009 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
25010 +{
25011 + int ret;
25012 + u32 num_words;
25013 + struct qman_fq *new_fq, *old_fq;
25014 + struct device *qidev = drv_ctx->qidev;
25015 +
25016 + num_words = desc_len(sh_desc);
25017 + if (num_words > MAX_SDLEN) {
25018 + dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
25019 + return -EINVAL;
25020 + }
25021 +
25022 + /* Note down older req FQ */
25023 + old_fq = drv_ctx->req_fq;
25024 +
25025 + /* Create a new req FQ in parked state */
25026 + new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
25027 + drv_ctx->context_a, 0);
25028 + if (unlikely(IS_ERR_OR_NULL(new_fq))) {
25029 + dev_err(qidev, "FQ allocation for shdesc update failed\n");
25030 + return PTR_ERR(new_fq);
25031 + }
25032 +
25033 + /* Hook up new FQ to context so that new requests keep queuing */
25034 + drv_ctx->req_fq = new_fq;
25035 +
25036 + /* Empty and remove the older FQ */
25037 + ret = empty_caam_fq(old_fq);
25038 + if (ret) {
25039 + dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
25040 +
25041 + /* We can revert to older FQ */
25042 + drv_ctx->req_fq = old_fq;
25043 +
25044 + if (kill_fq(qidev, new_fq))
25045 + dev_warn(qidev, "New CAAM FQ kill failed\n");
25046 +
25047 + return ret;
25048 + }
25049 +
25050 + /*
25051 + * Re-initialise pre-header. Set RSLS and SDLEN.
25052 + * Update the shared descriptor for driver context.
25053 + */
25054 + drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
25055 + num_words);
25056 + memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
25057 + dma_sync_single_for_device(qidev, drv_ctx->context_a,
25058 + sizeof(drv_ctx->sh_desc) +
25059 + sizeof(drv_ctx->prehdr),
25060 + DMA_BIDIRECTIONAL);
25061 +
25062 + /* Put the new FQ in scheduled state */
25063 + ret = qman_schedule_fq(new_fq);
25064 + if (ret) {
25065 + dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
25066 +
25067 + /*
25068 + * We can kill new FQ and revert to old FQ.
25069 + * Since the desc is already modified, it is success case
25070 + */
25071 +
25072 + drv_ctx->req_fq = old_fq;
25073 +
25074 + if (kill_fq(qidev, new_fq))
25075 + dev_warn(qidev, "New CAAM FQ kill failed\n");
25076 + } else if (kill_fq(qidev, old_fq)) {
25077 + dev_warn(qidev, "Old CAAM FQ kill failed\n");
25078 + }
25079 +
25080 + return 0;
25081 +}
25082 +EXPORT_SYMBOL(caam_drv_ctx_update);
25083 +
25084 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
25085 + int *cpu,
25086 + u32 *sh_desc)
25087 +{
25088 + size_t size;
25089 + u32 num_words;
25090 + dma_addr_t hwdesc;
25091 + struct caam_drv_ctx *drv_ctx;
25092 + const cpumask_t *cpus = qman_affine_cpus();
25093 +
25094 + num_words = desc_len(sh_desc);
25095 + if (num_words > MAX_SDLEN) {
25096 + dev_err(qidev, "Invalid descriptor len: %d words\n",
25097 + num_words);
25098 + return ERR_PTR(-EINVAL);
25099 + }
25100 +
25101 + drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
25102 + if (!drv_ctx)
25103 + return ERR_PTR(-ENOMEM);
25104 +
25105 + /*
25106 + * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
25107 + * and dma-map them.
25108 + */
25109 + drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
25110 + num_words);
25111 + memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
25112 + size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
25113 + hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
25114 + DMA_BIDIRECTIONAL);
25115 + if (dma_mapping_error(qidev, hwdesc)) {
25116 + dev_err(qidev, "DMA map error for preheader + shdesc\n");
25117 + kfree(drv_ctx);
25118 + return ERR_PTR(-ENOMEM);
25119 + }
25120 + drv_ctx->context_a = hwdesc;
25121 +
25122 + /* If given CPU does not own the portal, choose another one that does */
25123 + if (!cpumask_test_cpu(*cpu, cpus)) {
25124 + int *pcpu = &get_cpu_var(last_cpu);
25125 +
25126 + *pcpu = cpumask_next(*pcpu, cpus);
25127 + if (*pcpu >= nr_cpu_ids)
25128 + *pcpu = cpumask_first(cpus);
25129 + *cpu = *pcpu;
25130 +
25131 + put_cpu_var(last_cpu);
25132 + }
25133 + drv_ctx->cpu = *cpu;
25134 +
25135 + /* Find response FQ hooked with this CPU */
25136 + drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
25137 +
25138 + /* Attach request FQ */
25139 + drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
25140 + QMAN_INITFQ_FLAG_SCHED);
25141 + if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
25142 + dev_err(qidev, "create_caam_req_fq failed\n");
25143 + dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
25144 + kfree(drv_ctx);
25145 + return ERR_PTR(-ENOMEM);
25146 + }
25147 +
25148 + drv_ctx->qidev = qidev;
25149 + return drv_ctx;
25150 +}
25151 +EXPORT_SYMBOL(caam_drv_ctx_init);
25152 +
25153 +void *qi_cache_alloc(gfp_t flags)
25154 +{
25155 + return kmem_cache_alloc(qi_cache, flags);
25156 +}
25157 +EXPORT_SYMBOL(qi_cache_alloc);
25158 +
25159 +void qi_cache_free(void *obj)
25160 +{
25161 + kmem_cache_free(qi_cache, obj);
25162 +}
25163 +EXPORT_SYMBOL(qi_cache_free);
25164 +
25165 +static int caam_qi_poll(struct napi_struct *napi, int budget)
25166 +{
25167 + struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
25168 +
25169 + int cleaned = qman_p_poll_dqrr(np->p, budget);
25170 +
25171 + if (cleaned < budget) {
25172 + napi_complete(napi);
25173 + qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
25174 + }
25175 +
25176 + return cleaned;
25177 +}
25178 +
25179 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
25180 +{
25181 + if (IS_ERR_OR_NULL(drv_ctx))
25182 + return;
25183 +
25184 + /* Remove request FQ */
25185 + if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
25186 + dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
25187 +
25188 + dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
25189 + sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
25190 + DMA_BIDIRECTIONAL);
25191 + kfree(drv_ctx);
25192 +}
25193 +EXPORT_SYMBOL(caam_drv_ctx_rel);
25194 +
25195 +int caam_qi_shutdown(struct device *qidev)
25196 +{
25197 + int i, ret;
25198 + struct caam_qi_priv *priv = dev_get_drvdata(qidev);
25199 + const cpumask_t *cpus = qman_affine_cpus();
25200 + struct cpumask old_cpumask = current->cpus_allowed;
25201 +
25202 + for_each_cpu(i, cpus) {
25203 + struct napi_struct *irqtask;
25204 +
25205 + irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
25206 + napi_disable(irqtask);
25207 + netif_napi_del(irqtask);
25208 +
25209 + if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
25210 + dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
25211 + }
25212 +
25213 + /*
25214 + * QMan driver requires CGRs to be deleted from same CPU from where they
25215 + * were instantiated. Hence we get the module removal execute from the
25216 + * same CPU from where it was originally inserted.
25217 + */
25218 + set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
25219 +
25220 + ret = qman_delete_cgr(&priv->cgr);
25221 + if (ret)
25222 + dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
25223 + else
25224 + qman_release_cgrid(priv->cgr.cgrid);
25225 +
25226 + kmem_cache_destroy(qi_cache);
25227 +
25228 + /* Now that we're done with the CGRs, restore the cpus allowed mask */
25229 + set_cpus_allowed_ptr(current, &old_cpumask);
25230 +
25231 + platform_device_unregister(priv->qi_pdev);
25232 + return ret;
25233 +}
25234 +
25235 +static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
25236 +{
25237 + caam_congested = congested;
25238 +
25239 + if (congested) {
25240 +#ifdef CONFIG_DEBUG_FS
25241 + times_congested++;
25242 +#endif
25243 + pr_debug_ratelimited("CAAM entered congestion\n");
25244 +
25245 + } else {
25246 + pr_debug_ratelimited("CAAM exited congestion\n");
25247 + }
25248 +}
25249 +
25250 +static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
25251 +{
25252 + /*
25253 + * In case of threaded ISR, for RT kernels in_irq() does not return
25254 + * appropriate value, so use in_serving_softirq to distinguish between
25255 + * softirq and irq contexts.
25256 + */
25257 + if (unlikely(in_irq() || !in_serving_softirq())) {
25258 + /* Disable QMan IRQ source and invoke NAPI */
25259 + qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
25260 + np->p = p;
25261 + napi_schedule(&np->irqtask);
25262 + return 1;
25263 + }
25264 + return 0;
25265 +}
25266 +
25267 +static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
25268 + struct qman_fq *rsp_fq,
25269 + const struct qm_dqrr_entry *dqrr)
25270 +{
25271 + struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
25272 + struct caam_drv_req *drv_req;
25273 + const struct qm_fd *fd;
25274 + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
25275 +
25276 + if (caam_qi_napi_schedule(p, caam_napi))
25277 + return qman_cb_dqrr_stop;
25278 +
25279 + fd = &dqrr->fd;
25280 + if (unlikely(fd->status))
25281 + dev_err(qidev, "Error: %#x in CAAM response FD\n", fd->status);
25282 +
25283 + if (unlikely(fd->format != fd->format)) {
25284 + dev_err(qidev, "Non-compound FD from CAAM\n");
25285 + return qman_cb_dqrr_consume;
25286 + }
25287 +
25288 + drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr);
25289 + if (unlikely(!drv_req)) {
25290 + dev_err(qidev,
25291 + "Can't find original request for caam response\n");
25292 + return qman_cb_dqrr_consume;
25293 + }
25294 +
25295 + dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
25296 + sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
25297 +
25298 + drv_req->cbk(drv_req, fd->status);
25299 + return qman_cb_dqrr_consume;
25300 +}
25301 +
25302 +static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
25303 +{
25304 + struct qm_mcc_initfq opts;
25305 + struct qman_fq *fq;
25306 + int ret;
25307 +
25308 + fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
25309 + if (!fq)
25310 + return -ENOMEM;
25311 +
25312 + fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
25313 +
25314 + ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
25315 + QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
25316 + if (ret) {
25317 + dev_err(qidev, "Rsp FQ create failed\n");
25318 + kfree(fq);
25319 + return -ENODEV;
25320 + }
25321 +
25322 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
25323 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
25324 + QM_INITFQ_WE_CGID;
25325 + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
25326 + QM_FQCTRL_CGE;
25327 + opts.fqd.dest.channel = qman_affine_channel(cpu);
25328 + opts.fqd.dest.wq = 3;
25329 + opts.fqd.cgid = qipriv.cgr.cgrid;
25330 + opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
25331 + QM_STASHING_EXCL_DATA;
25332 + opts.fqd.context_a.stashing.data_cl = 1;
25333 + opts.fqd.context_a.stashing.context_cl = 1;
25334 +
25335 + ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
25336 + if (ret) {
25337 + dev_err(qidev, "Rsp FQ init failed\n");
25338 + kfree(fq);
25339 + return -ENODEV;
25340 + }
25341 +
25342 + per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
25343 +
25344 + dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
25345 + return 0;
25346 +}
25347 +
25348 +static int init_cgr(struct device *qidev)
25349 +{
25350 + int ret;
25351 + struct qm_mcc_initcgr opts;
25352 + const u64 cpus = *(u64 *)qman_affine_cpus();
25353 + const int num_cpus = hweight64(cpus);
25354 + const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
25355 +
25356 + ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
25357 + if (ret) {
25358 + dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
25359 + return ret;
25360 + }
25361 +
25362 + qipriv.cgr.cb = cgr_cb;
25363 + memset(&opts, 0, sizeof(opts));
25364 + opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
25365 + opts.cgr.cscn_en = QM_CGR_EN;
25366 + opts.cgr.mode = QMAN_CGR_MODE_FRAME;
25367 + qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
25368 +
25369 + ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
25370 + if (ret) {
25371 + dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
25372 + qipriv.cgr.cgrid);
25373 + return ret;
25374 + }
25375 +
25376 + dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
25377 + return 0;
25378 +}
25379 +
25380 +static int alloc_rsp_fqs(struct device *qidev)
25381 +{
25382 + int ret, i;
25383 + const cpumask_t *cpus = qman_affine_cpus();
25384 +
25385 + /*Now create response FQs*/
25386 + for_each_cpu(i, cpus) {
25387 + ret = alloc_rsp_fq_cpu(qidev, i);
25388 + if (ret) {
25389 + dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
25390 + return ret;
25391 + }
25392 + }
25393 +
25394 + return 0;
25395 +}
25396 +
25397 +static void free_rsp_fqs(void)
25398 +{
25399 + int i;
25400 + const cpumask_t *cpus = qman_affine_cpus();
25401 +
25402 + for_each_cpu(i, cpus)
25403 + kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
25404 +}
25405 +
25406 +int caam_qi_init(struct platform_device *caam_pdev)
25407 +{
25408 + int err, i;
25409 + struct platform_device *qi_pdev;
25410 + struct device *ctrldev = &caam_pdev->dev, *qidev;
25411 + struct caam_drv_private *ctrlpriv;
25412 + const cpumask_t *cpus = qman_affine_cpus();
25413 + struct cpumask old_cpumask = current->cpus_allowed;
25414 + static struct platform_device_info qi_pdev_info = {
25415 + .name = "caam_qi",
25416 + .id = PLATFORM_DEVID_NONE
25417 + };
25418 +
25419 + /*
25420 + * QMAN requires CGRs to be removed from same CPU+portal from where it
25421 + * was originally allocated. Hence we need to note down the
25422 + * initialisation CPU and use the same CPU for module exit.
25423 + * We select the first CPU to from the list of portal owning CPUs.
25424 + * Then we pin module init to this CPU.
25425 + */
25426 + mod_init_cpu = cpumask_first(cpus);
25427 + set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
25428 +
25429 + qi_pdev_info.parent = ctrldev;
25430 + qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
25431 + qi_pdev = platform_device_register_full(&qi_pdev_info);
25432 + if (IS_ERR(qi_pdev))
25433 + return PTR_ERR(qi_pdev);
25434 + arch_setup_dma_ops(&qi_pdev->dev, 0, 0, NULL, true);
25435 +
25436 + ctrlpriv = dev_get_drvdata(ctrldev);
25437 + qidev = &qi_pdev->dev;
25438 +
25439 + qipriv.qi_pdev = qi_pdev;
25440 + dev_set_drvdata(qidev, &qipriv);
25441 +
25442 + /* Initialize the congestion detection */
25443 + err = init_cgr(qidev);
25444 + if (err) {
25445 + dev_err(qidev, "CGR initialization failed: %d\n", err);
25446 + platform_device_unregister(qi_pdev);
25447 + return err;
25448 + }
25449 +
25450 + /* Initialise response FQs */
25451 + err = alloc_rsp_fqs(qidev);
25452 + if (err) {
25453 + dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
25454 + free_rsp_fqs();
25455 + platform_device_unregister(qi_pdev);
25456 + return err;
25457 + }
25458 +
25459 + /*
25460 + * Enable the NAPI contexts on each of the core which has an affine
25461 + * portal.
25462 + */
25463 + for_each_cpu(i, cpus) {
25464 + struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
25465 + struct caam_napi *caam_napi = &priv->caam_napi;
25466 + struct napi_struct *irqtask = &caam_napi->irqtask;
25467 + struct net_device *net_dev = &priv->net_dev;
25468 +
25469 + net_dev->dev = *qidev;
25470 + INIT_LIST_HEAD(&net_dev->napi_list);
25471 +
25472 + netif_napi_add(net_dev, irqtask, caam_qi_poll,
25473 + CAAM_NAPI_WEIGHT);
25474 +
25475 + napi_enable(irqtask);
25476 + }
25477 +
25478 + /* Hook up QI device to parent controlling caam device */
25479 + ctrlpriv->qidev = qidev;
25480 +
25481 + qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
25482 + SLAB_CACHE_DMA, NULL);
25483 + if (!qi_cache) {
25484 + dev_err(qidev, "Can't allocate CAAM cache\n");
25485 + free_rsp_fqs();
25486 + platform_device_unregister(qi_pdev);
25487 + return -ENOMEM;
25488 + }
25489 +
25490 + /* Done with the CGRs; restore the cpus allowed mask */
25491 + set_cpus_allowed_ptr(current, &old_cpumask);
25492 +#ifdef CONFIG_DEBUG_FS
25493 + debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
25494 + &times_congested, &caam_fops_u64_ro);
25495 +#endif
25496 + dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
25497 + return 0;
25498 +}
25499 --- /dev/null
25500 +++ b/drivers/crypto/caam/qi.h
25501 @@ -0,0 +1,204 @@
25502 +/*
25503 + * Public definitions for the CAAM/QI (Queue Interface) backend.
25504 + *
25505 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
25506 + * Copyright 2016-2017 NXP
25507 + */
25508 +
25509 +#ifndef __QI_H__
25510 +#define __QI_H__
25511 +
25512 +#include <linux/fsl_qman.h>
25513 +#include "compat.h"
25514 +#include "desc.h"
25515 +#include "desc_constr.h"
25516 +
25517 +/*
25518 + * CAAM hardware constructs a job descriptor which points to a shared descriptor
25519 + * (as pointed by context_a of to-CAAM FQ).
25520 + * When the job descriptor is executed by DECO, the whole job descriptor
25521 + * together with shared descriptor gets loaded in DECO buffer, which is
25522 + * 64 words (each 32-bit) long.
25523 + *
25524 + * The job descriptor constructed by CAAM hardware has the following layout:
25525 + *
25526 + * HEADER (1 word)
25527 + * Shdesc ptr (1 or 2 words)
25528 + * SEQ_OUT_PTR (1 word)
25529 + * Out ptr (1 or 2 words)
25530 + * Out length (1 word)
25531 + * SEQ_IN_PTR (1 word)
25532 + * In ptr (1 or 2 words)
25533 + * In length (1 word)
25534 + *
25535 + * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
25536 + *
25537 + * Apart from shdesc contents, the total number of words that get loaded in DECO
25538 + * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
25539 + * storing shared descriptor.
25540 + */
25541 +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
25542 +
25543 +/* Length of a single buffer in the QI driver memory cache */
25544 +#define CAAM_QI_MEMCACHE_SIZE 768
25545 +
25546 +extern bool caam_congested __read_mostly;
25547 +
25548 +/*
25549 + * This is the request structure the driver application should fill while
25550 + * submitting a job to driver.
25551 + */
25552 +struct caam_drv_req;
25553 +
25554 +/*
25555 + * caam_qi_cbk - application's callback function invoked by the driver when the
25556 + * request has been successfully processed.
25557 + * @drv_req: original request that was submitted
25558 + * @status: completion status of request (0 - success, non-zero - error code)
25559 + */
25560 +typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
25561 +
25562 +enum optype {
25563 + ENCRYPT,
25564 + DECRYPT,
25565 + GIVENCRYPT,
25566 + NUM_OP
25567 +};
25568 +
25569 +/**
25570 + * caam_drv_ctx - CAAM/QI backend driver context
25571 + *
25572 + * The jobs are processed by the driver against a driver context.
25573 + * With every cryptographic context, a driver context is attached.
25574 + * The driver context contains data for private use by driver.
25575 + * For the applications, this is an opaque structure.
25576 + *
25577 + * @prehdr: preheader placed before shrd desc
25578 + * @sh_desc: shared descriptor
25579 + * @context_a: shared descriptor dma address
25580 + * @req_fq: to-CAAM request frame queue
25581 + * @rsp_fq: from-CAAM response frame queue
25582 + * @cpu: cpu on which to receive CAAM response
25583 + * @op_type: operation type
25584 + * @qidev: device pointer for CAAM/QI backend
25585 + */
25586 +struct caam_drv_ctx {
25587 + u32 prehdr[2];
25588 + u32 sh_desc[MAX_SDLEN];
25589 + dma_addr_t context_a;
25590 + struct qman_fq *req_fq;
25591 + struct qman_fq *rsp_fq;
25592 + int cpu;
25593 + enum optype op_type;
25594 + struct device *qidev;
25595 +} ____cacheline_aligned;
25596 +
25597 +/**
25598 + * caam_drv_req - The request structure the driver application should fill while
25599 + * submitting a job to driver.
25600 + * @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1])
25601 + * buffers.
25602 + * @cbk: callback function to invoke when job is completed
25603 + * @app_ctx: arbitrary context attached with request by the application
25604 + *
25605 + * The fields mentioned below should not be used by application.
25606 + * These are for private use by driver.
25607 + *
25608 + * @hdr__: linked list header to maintain list of outstanding requests to CAAM
25609 + * @hwaddr: DMA address for the S/G table.
25610 + */
25611 +struct caam_drv_req {
25612 + struct qm_sg_entry fd_sgt[2];
25613 + struct caam_drv_ctx *drv_ctx;
25614 + caam_qi_cbk cbk;
25615 + void *app_ctx;
25616 +} ____cacheline_aligned;
25617 +
25618 +/**
25619 + * caam_drv_ctx_init - Initialise a CAAM/QI driver context
25620 + *
25621 + * A CAAM/QI driver context must be attached with each cryptographic context.
25622 + * This function allocates memory for CAAM/QI context and returns a handle to
25623 + * the application. This handle must be submitted along with each enqueue
25624 + * request to the driver by the application.
25625 + *
25626 + * @cpu: CPU where the application prefers to the driver to receive CAAM
25627 + * responses. The request completion callback would be issued from this
25628 + * CPU.
25629 + * @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver
25630 + * context.
25631 + *
25632 + * Returns a driver context on success or negative error code on failure.
25633 + */
25634 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
25635 + u32 *sh_desc);
25636 +
25637 +/**
25638 + * caam_qi_enqueue - Submit a request to QI backend driver.
25639 + *
25640 + * The request structure must be properly filled as described above.
25641 + *
25642 + * @qidev: device pointer for QI backend
25643 + * @req: CAAM QI request structure
25644 + *
25645 + * Returns 0 on success or negative error code on failure.
25646 + */
25647 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);
25648 +
25649 +/**
25650 + * caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM
25651 + * or too many CAAM responses are pending to be processed.
25652 + * @drv_ctx: driver context for which job is to be submitted
25653 + *
25654 + * Returns caam congestion status 'true/false'
25655 + */
25656 +bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx);
25657 +
25658 +/**
25659 + * caam_drv_ctx_update - Update QI driver context
25660 + *
25661 + * Invoked when shared descriptor is required to be change in driver context.
25662 + *
25663 + * @drv_ctx: driver context to be updated
25664 + * @sh_desc: new shared descriptor pointer to be updated in QI driver context
25665 + *
25666 + * Returns 0 on success or negative error code on failure.
25667 + */
25668 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
25669 +
25670 +/**
25671 + * caam_drv_ctx_rel - Release a QI driver context
25672 + * @drv_ctx: context to be released
25673 + */
25674 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
25675 +
25676 +int caam_qi_init(struct platform_device *pdev);
25677 +int caam_qi_shutdown(struct device *dev);
25678 +
25679 +/**
25680 + * qi_cache_alloc - Allocate buffers from CAAM-QI cache
25681 + *
25682 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has
25683 + * to be allocated on the hotpath. Instead of using malloc, one can use the
25684 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
25685 + * will have a size of 256B, which is sufficient for hosting 16 SG entries.
25686 + *
25687 + * @flags: flags that would be used for the equivalent malloc(..) call
25688 + *
25689 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
25690 + */
25691 +void *qi_cache_alloc(gfp_t flags);
25692 +
25693 +/**
25694 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
25695 + *
25696 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs
25697 + * the buffer previously allocated by a qi_cache_alloc call.
25698 + * No checking is being done, the call is a passthrough call to
25699 + * kmem_cache_free(...)
25700 + *
25701 + * @obj: object previously allocated using qi_cache_alloc()
25702 + */
25703 +void qi_cache_free(void *obj);
25704 +
25705 +#endif /* __QI_H__ */
25706 --- a/drivers/crypto/caam/regs.h
25707 +++ b/drivers/crypto/caam/regs.h
25708 @@ -2,6 +2,7 @@
25709 * CAAM hardware register-level view
25710 *
25711 * Copyright 2008-2011 Freescale Semiconductor, Inc.
25712 + * Copyright 2017 NXP
25713 */
25714
25715 #ifndef REGS_H
25716 @@ -67,6 +68,7 @@
25717 */
25718
25719 extern bool caam_little_end;
25720 +extern bool caam_imx;
25721
25722 #define caam_to_cpu(len) \
25723 static inline u##len caam##len ## _to_cpu(u##len val) \
25724 @@ -154,13 +156,10 @@ static inline u64 rd_reg64(void __iomem
25725 #else /* CONFIG_64BIT */
25726 static inline void wr_reg64(void __iomem *reg, u64 data)
25727 {
25728 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
25729 - if (caam_little_end) {
25730 + if (!caam_imx && caam_little_end) {
25731 wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
25732 wr_reg32((u32 __iomem *)(reg), data);
25733 - } else
25734 -#endif
25735 - {
25736 + } else {
25737 wr_reg32((u32 __iomem *)(reg), data >> 32);
25738 wr_reg32((u32 __iomem *)(reg) + 1, data);
25739 }
25740 @@ -168,41 +167,40 @@ static inline void wr_reg64(void __iomem
25741
25742 static inline u64 rd_reg64(void __iomem *reg)
25743 {
25744 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
25745 - if (caam_little_end)
25746 + if (!caam_imx && caam_little_end)
25747 return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
25748 (u64)rd_reg32((u32 __iomem *)(reg)));
25749 - else
25750 -#endif
25751 - return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
25752 - (u64)rd_reg32((u32 __iomem *)(reg) + 1));
25753 +
25754 + return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
25755 + (u64)rd_reg32((u32 __iomem *)(reg) + 1));
25756 }
25757 #endif /* CONFIG_64BIT */
25758
25759 +static inline u64 cpu_to_caam_dma64(dma_addr_t value)
25760 +{
25761 + if (caam_imx)
25762 + return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
25763 + (u64)cpu_to_caam32(upper_32_bits(value)));
25764 +
25765 + return cpu_to_caam64(value);
25766 +}
25767 +
25768 +static inline u64 caam_dma64_to_cpu(u64 value)
25769 +{
25770 + if (caam_imx)
25771 + return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) |
25772 + (u64)caam32_to_cpu(upper_32_bits(value)));
25773 +
25774 + return caam64_to_cpu(value);
25775 +}
25776 +
25777 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
25778 -#ifdef CONFIG_SOC_IMX7D
25779 -#define cpu_to_caam_dma(value) \
25780 - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
25781 - (u64)cpu_to_caam32(upper_32_bits(value)))
25782 -#define caam_dma_to_cpu(value) \
25783 - (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
25784 - (u64)caam32_to_cpu(upper_32_bits(value)))
25785 -#else
25786 -#define cpu_to_caam_dma(value) cpu_to_caam64(value)
25787 -#define caam_dma_to_cpu(value) caam64_to_cpu(value)
25788 -#endif /* CONFIG_SOC_IMX7D */
25789 +#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
25790 +#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
25791 #else
25792 #define cpu_to_caam_dma(value) cpu_to_caam32(value)
25793 #define caam_dma_to_cpu(value) caam32_to_cpu(value)
25794 -#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
25795 -
25796 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
25797 -#define cpu_to_caam_dma64(value) \
25798 - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
25799 - (u64)cpu_to_caam32(upper_32_bits(value)))
25800 -#else
25801 -#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
25802 -#endif
25803 +#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
25804
25805 /*
25806 * jr_outentry
25807 @@ -293,6 +291,7 @@ struct caam_perfmon {
25808 u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
25809 #define CTPR_MS_QI_SHIFT 25
25810 #define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
25811 +#define CTPR_MS_DPAA2 BIT(13)
25812 #define CTPR_MS_VIRT_EN_INCL 0x00000001
25813 #define CTPR_MS_VIRT_EN_POR 0x00000002
25814 #define CTPR_MS_PG_SZ_MASK 0x10
25815 @@ -628,6 +627,8 @@ struct caam_job_ring {
25816 #define JRSTA_DECOERR_INVSIGN 0x86
25817 #define JRSTA_DECOERR_DSASIGN 0x87
25818
25819 +#define JRSTA_QIERR_ERROR_MASK 0x00ff
25820 +
25821 #define JRSTA_CCBERR_JUMP 0x08000000
25822 #define JRSTA_CCBERR_INDEX_MASK 0xff00
25823 #define JRSTA_CCBERR_INDEX_SHIFT 8
25824 --- /dev/null
25825 +++ b/drivers/crypto/caam/sg_sw_qm.h
25826 @@ -0,0 +1,126 @@
25827 +/*
25828 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
25829 + * Copyright 2016-2017 NXP
25830 + *
25831 + * Redistribution and use in source and binary forms, with or without
25832 + * modification, are permitted provided that the following conditions are met:
25833 + * * Redistributions of source code must retain the above copyright
25834 + * notice, this list of conditions and the following disclaimer.
25835 + * * Redistributions in binary form must reproduce the above copyright
25836 + * notice, this list of conditions and the following disclaimer in the
25837 + * documentation and/or other materials provided with the distribution.
25838 + * * Neither the name of Freescale Semiconductor nor the
25839 + * names of its contributors may be used to endorse or promote products
25840 + * derived from this software without specific prior written permission.
25841 + *
25842 + *
25843 + * ALTERNATIVELY, this software may be distributed under the terms of the
25844 + * GNU General Public License ("GPL") as published by the Free Software
25845 + * Foundation, either version 2 of that License or (at your option) any
25846 + * later version.
25847 + *
25848 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
25849 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25850 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25851 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
25852 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25853 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25854 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25855 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25856 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25857 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25858 + */
25859 +
25860 +#ifndef __SG_SW_QM_H
25861 +#define __SG_SW_QM_H
25862 +
25863 +#include <linux/fsl_qman.h>
25864 +#include "regs.h"
25865 +
25866 +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
25867 +{
25868 + dma_addr_t addr = qm_sg_ptr->opaque;
25869 +
25870 + qm_sg_ptr->opaque = cpu_to_caam64(addr);
25871 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
25872 +}
25873 +
25874 +static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
25875 + u32 len, u16 offset)
25876 +{
25877 + qm_sg_ptr->addr = dma;
25878 + qm_sg_ptr->length = len;
25879 + qm_sg_ptr->__reserved2 = 0;
25880 + qm_sg_ptr->bpid = 0;
25881 + qm_sg_ptr->__reserved3 = 0;
25882 + qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
25883 +
25884 + cpu_to_hw_sg(qm_sg_ptr);
25885 +}
25886 +
25887 +static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
25888 + dma_addr_t dma, u32 len, u16 offset)
25889 +{
25890 + qm_sg_ptr->extension = 0;
25891 + qm_sg_ptr->final = 0;
25892 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
25893 +}
25894 +
25895 +static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
25896 + dma_addr_t dma, u32 len, u16 offset)
25897 +{
25898 + qm_sg_ptr->extension = 0;
25899 + qm_sg_ptr->final = 1;
25900 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
25901 +}
25902 +
25903 +static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
25904 + dma_addr_t dma, u32 len, u16 offset)
25905 +{
25906 + qm_sg_ptr->extension = 1;
25907 + qm_sg_ptr->final = 0;
25908 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
25909 +}
25910 +
25911 +static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
25912 + dma_addr_t dma, u32 len,
25913 + u16 offset)
25914 +{
25915 + qm_sg_ptr->extension = 1;
25916 + qm_sg_ptr->final = 1;
25917 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
25918 +}
25919 +
25920 +/*
25921 + * convert scatterlist to h/w link table format
25922 + * but does not have final bit; instead, returns last entry
25923 + */
25924 +static inline struct qm_sg_entry *
25925 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
25926 + struct qm_sg_entry *qm_sg_ptr, u16 offset)
25927 +{
25928 + while (sg_count && sg) {
25929 + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
25930 + sg_dma_len(sg), offset);
25931 + qm_sg_ptr++;
25932 + sg = sg_next(sg);
25933 + sg_count--;
25934 + }
25935 + return qm_sg_ptr - 1;
25936 +}
25937 +
25938 +/*
25939 + * convert scatterlist to h/w link table format
25940 + * scatterlist must have been previously dma mapped
25941 + */
25942 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
25943 + struct qm_sg_entry *qm_sg_ptr, u16 offset)
25944 +{
25945 + qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
25946 +
25947 + qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
25948 + qm_sg_ptr->final = 1;
25949 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
25950 +}
25951 +
25952 +#endif /* __SG_SW_QM_H */
25953 --- /dev/null
25954 +++ b/drivers/crypto/caam/sg_sw_qm2.h
25955 @@ -0,0 +1,81 @@
25956 +/*
25957 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
25958 + * Copyright 2017 NXP
25959 + *
25960 + * Redistribution and use in source and binary forms, with or without
25961 + * modification, are permitted provided that the following conditions are met:
25962 + * * Redistributions of source code must retain the above copyright
25963 + * notice, this list of conditions and the following disclaimer.
25964 + * * Redistributions in binary form must reproduce the above copyright
25965 + * notice, this list of conditions and the following disclaimer in the
25966 + * documentation and/or other materials provided with the distribution.
25967 + * * Neither the names of the above-listed copyright holders nor the
25968 + * names of any contributors may be used to endorse or promote products
25969 + * derived from this software without specific prior written permission.
25970 + *
25971 + *
25972 + * ALTERNATIVELY, this software may be distributed under the terms of the
25973 + * GNU General Public License ("GPL") as published by the Free Software
25974 + * Foundation, either version 2 of that License or (at your option) any
25975 + * later version.
25976 + *
25977 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25978 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25979 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25980 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
25981 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25982 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25983 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25984 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25985 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25986 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25987 + * POSSIBILITY OF SUCH DAMAGE.
25988 + */
25989 +
25990 +#ifndef _SG_SW_QM2_H_
25991 +#define _SG_SW_QM2_H_
25992 +
25993 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
25994 +
25995 +static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
25996 + dma_addr_t dma, u32 len, u16 offset)
25997 +{
25998 + dpaa2_sg_set_addr(qm_sg_ptr, dma);
25999 + dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single);
26000 + dpaa2_sg_set_final(qm_sg_ptr, false);
26001 + dpaa2_sg_set_len(qm_sg_ptr, len);
26002 + dpaa2_sg_set_bpid(qm_sg_ptr, 0);
26003 + dpaa2_sg_set_offset(qm_sg_ptr, offset);
26004 +}
26005 +
26006 +/*
26007 + * convert scatterlist to h/w link table format
26008 + * but does not have final bit; instead, returns last entry
26009 + */
26010 +static inline struct dpaa2_sg_entry *
26011 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
26012 + struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
26013 +{
26014 + while (sg_count && sg) {
26015 + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
26016 + sg_dma_len(sg), offset);
26017 + qm_sg_ptr++;
26018 + sg = sg_next(sg);
26019 + sg_count--;
26020 + }
26021 + return qm_sg_ptr - 1;
26022 +}
26023 +
26024 +/*
26025 + * convert scatterlist to h/w link table format
26026 + * scatterlist must have been previously dma mapped
26027 + */
26028 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
26029 + struct dpaa2_sg_entry *qm_sg_ptr,
26030 + u16 offset)
26031 +{
26032 + qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
26033 + dpaa2_sg_set_final(qm_sg_ptr, true);
26034 +}
26035 +
26036 +#endif /* _SG_SW_QM2_H_ */
26037 --- a/drivers/crypto/caam/sg_sw_sec4.h
26038 +++ b/drivers/crypto/caam/sg_sw_sec4.h
26039 @@ -5,9 +5,19 @@
26040 *
26041 */
26042
26043 +#ifndef _SG_SW_SEC4_H_
26044 +#define _SG_SW_SEC4_H_
26045 +
26046 +#include "ctrl.h"
26047 #include "regs.h"
26048 +#include "sg_sw_qm2.h"
26049 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
26050
26051 -struct sec4_sg_entry;
26052 +struct sec4_sg_entry {
26053 + u64 ptr;
26054 + u32 len;
26055 + u32 bpid_offset;
26056 +};
26057
26058 /*
26059 * convert single dma address to h/w link table format
26060 @@ -15,9 +25,15 @@ struct sec4_sg_entry;
26061 static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
26062 dma_addr_t dma, u32 len, u16 offset)
26063 {
26064 - sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
26065 - sec4_sg_ptr->len = cpu_to_caam32(len);
26066 - sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
26067 + if (caam_dpaa2) {
26068 + dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len,
26069 + offset);
26070 + } else {
26071 + sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
26072 + sec4_sg_ptr->len = cpu_to_caam32(len);
26073 + sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
26074 + SEC4_SG_OFFSET_MASK);
26075 + }
26076 #ifdef DEBUG
26077 print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
26078 DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
26079 @@ -43,6 +59,14 @@ sg_to_sec4_sg(struct scatterlist *sg, in
26080 return sec4_sg_ptr - 1;
26081 }
26082
26083 +static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
26084 +{
26085 + if (caam_dpaa2)
26086 + dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true);
26087 + else
26088 + sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
26089 +}
26090 +
26091 /*
26092 * convert scatterlist to h/w link table format
26093 * scatterlist must have been previously dma mapped
26094 @@ -52,31 +76,7 @@ static inline void sg_to_sec4_sg_last(st
26095 u16 offset)
26096 {
26097 sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
26098 - sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
26099 -}
26100 -
26101 -static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
26102 - struct scatterlist *sg, unsigned int total,
26103 - struct sec4_sg_entry *sec4_sg_ptr)
26104 -{
26105 - do {
26106 - unsigned int len = min(sg_dma_len(sg), total);
26107 -
26108 - dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0);
26109 - sec4_sg_ptr++;
26110 - sg = sg_next(sg);
26111 - total -= len;
26112 - } while (total);
26113 - return sec4_sg_ptr - 1;
26114 + sg_to_sec4_set_last(sec4_sg_ptr);
26115 }
26116
26117 -/* derive number of elements in scatterlist, but return 0 for 1 */
26118 -static inline int sg_count(struct scatterlist *sg_list, int nbytes)
26119 -{
26120 - int sg_nents = sg_nents_for_len(sg_list, nbytes);
26121 -
26122 - if (likely(sg_nents == 1))
26123 - return 0;
26124 -
26125 - return sg_nents;
26126 -}
26127 +#endif /* _SG_SW_SEC4_H_ */
26128 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c
26129 +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
26130 @@ -516,7 +516,7 @@ err:
26131
26132 /**
26133 * rsi_disconnect() - This function performs the reverse of the probe function,
26134 - * it deintialize the driver structure.
26135 + * it deinitialize the driver structure.
26136 * @pfunction: Pointer to the USB interface structure.
26137 *
26138 * Return: None.
26139 --- a/drivers/staging/wilc1000/linux_wlan.c
26140 +++ b/drivers/staging/wilc1000/linux_wlan.c
26141 @@ -211,7 +211,7 @@ static void deinit_irq(struct net_device
26142 vif = netdev_priv(dev);
26143 wilc = vif->wilc;
26144
26145 - /* Deintialize IRQ */
26146 + /* Deinitialize IRQ */
26147 if (wilc->dev_irq_num) {
26148 free_irq(wilc->dev_irq_num, wilc);
26149 gpio_free(wilc->gpio);
26150 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
26151 +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
26152 @@ -2359,7 +2359,7 @@ int wilc_deinit_host_int(struct net_devi
26153 del_timer_sync(&wilc_during_ip_timer);
26154
26155 if (s32Error)
26156 - netdev_err(net, "Error while deintializing host interface\n");
26157 + netdev_err(net, "Error while deinitializing host interface\n");
26158
26159 return s32Error;
26160 }
26161 --- /dev/null
26162 +++ b/include/crypto/acompress.h
26163 @@ -0,0 +1,269 @@
26164 +/*
26165 + * Asynchronous Compression operations
26166 + *
26167 + * Copyright (c) 2016, Intel Corporation
26168 + * Authors: Weigang Li <weigang.li@intel.com>
26169 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
26170 + *
26171 + * This program is free software; you can redistribute it and/or modify it
26172 + * under the terms of the GNU General Public License as published by the Free
26173 + * Software Foundation; either version 2 of the License, or (at your option)
26174 + * any later version.
26175 + *
26176 + */
26177 +#ifndef _CRYPTO_ACOMP_H
26178 +#define _CRYPTO_ACOMP_H
26179 +#include <linux/crypto.h>
26180 +
26181 +#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
26182 +
26183 +/**
26184 + * struct acomp_req - asynchronous (de)compression request
26185 + *
26186 + * @base: Common attributes for asynchronous crypto requests
26187 + * @src: Source Data
26188 + * @dst: Destination data
26189 + * @slen: Size of the input buffer
26190 + * @dlen: Size of the output buffer and number of bytes produced
26191 + * @flags: Internal flags
26192 + * @__ctx: Start of private context data
26193 + */
26194 +struct acomp_req {
26195 + struct crypto_async_request base;
26196 + struct scatterlist *src;
26197 + struct scatterlist *dst;
26198 + unsigned int slen;
26199 + unsigned int dlen;
26200 + u32 flags;
26201 + void *__ctx[] CRYPTO_MINALIGN_ATTR;
26202 +};
26203 +
26204 +/**
26205 + * struct crypto_acomp - user-instantiated objects which encapsulate
26206 + * algorithms and core processing logic
26207 + *
26208 + * @compress: Function performs a compress operation
26209 + * @decompress: Function performs a de-compress operation
26210 + * @dst_free: Frees destination buffer if allocated inside the
26211 + * algorithm
26212 + * @reqsize: Context size for (de)compression requests
26213 + * @base: Common crypto API algorithm data structure
26214 + */
26215 +struct crypto_acomp {
26216 + int (*compress)(struct acomp_req *req);
26217 + int (*decompress)(struct acomp_req *req);
26218 + void (*dst_free)(struct scatterlist *dst);
26219 + unsigned int reqsize;
26220 + struct crypto_tfm base;
26221 +};
26222 +
26223 +/**
26224 + * struct acomp_alg - asynchronous compression algorithm
26225 + *
26226 + * @compress: Function performs a compress operation
26227 + * @decompress: Function performs a de-compress operation
26228 + * @dst_free: Frees destination buffer if allocated inside the algorithm
26229 + * @init: Initialize the cryptographic transformation object.
26230 + * This function is used to initialize the cryptographic
26231 + * transformation object. This function is called only once at
26232 + * the instantiation time, right after the transformation context
26233 + * was allocated. In case the cryptographic hardware has some
26234 + * special requirements which need to be handled by software, this
26235 + * function shall check for the precise requirement of the
26236 + * transformation and put any software fallbacks in place.
26237 + * @exit: Deinitialize the cryptographic transformation object. This is a
26238 + * counterpart to @init, used to remove various changes set in
26239 + * @init.
26240 + *
26241 + * @reqsize: Context size for (de)compression requests
26242 + * @base: Common crypto API algorithm data structure
26243 + */
26244 +struct acomp_alg {
26245 + int (*compress)(struct acomp_req *req);
26246 + int (*decompress)(struct acomp_req *req);
26247 + void (*dst_free)(struct scatterlist *dst);
26248 + int (*init)(struct crypto_acomp *tfm);
26249 + void (*exit)(struct crypto_acomp *tfm);
26250 + unsigned int reqsize;
26251 + struct crypto_alg base;
26252 +};
26253 +
26254 +/**
26255 + * DOC: Asynchronous Compression API
26256 + *
26257 + * The Asynchronous Compression API is used with the algorithms of type
26258 + * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
26259 + */
26260 +
26261 +/**
26262 + * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
26263 + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
26264 + * compression algorithm e.g. "deflate"
26265 + * @type: specifies the type of the algorithm
26266 + * @mask: specifies the mask for the algorithm
26267 + *
26268 + * Allocate a handle for a compression algorithm. The returned struct
26269 + * crypto_acomp is the handle that is required for any subsequent
26270 + * API invocation for the compression operations.
26271 + *
26272 + * Return: allocated handle in case of success; IS_ERR() is true in case
26273 + * of an error, PTR_ERR() returns the error code.
26274 + */
26275 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
26276 + u32 mask);
26277 +
26278 +static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
26279 +{
26280 + return &tfm->base;
26281 +}
26282 +
26283 +static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
26284 +{
26285 + return container_of(alg, struct acomp_alg, base);
26286 +}
26287 +
26288 +static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
26289 +{
26290 + return container_of(tfm, struct crypto_acomp, base);
26291 +}
26292 +
26293 +static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
26294 +{
26295 + return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
26296 +}
26297 +
26298 +static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
26299 +{
26300 + return tfm->reqsize;
26301 +}
26302 +
26303 +static inline void acomp_request_set_tfm(struct acomp_req *req,
26304 + struct crypto_acomp *tfm)
26305 +{
26306 + req->base.tfm = crypto_acomp_tfm(tfm);
26307 +}
26308 +
26309 +static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
26310 +{
26311 + return __crypto_acomp_tfm(req->base.tfm);
26312 +}
26313 +
26314 +/**
26315 + * crypto_free_acomp() -- free ACOMPRESS tfm handle
26316 + *
26317 + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
26318 + */
26319 +static inline void crypto_free_acomp(struct crypto_acomp *tfm)
26320 +{
26321 + crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
26322 +}
26323 +
26324 +static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
26325 +{
26326 + type &= ~CRYPTO_ALG_TYPE_MASK;
26327 + type |= CRYPTO_ALG_TYPE_ACOMPRESS;
26328 + mask |= CRYPTO_ALG_TYPE_MASK;
26329 +
26330 + return crypto_has_alg(alg_name, type, mask);
26331 +}
26332 +
26333 +/**
26334 + * acomp_request_alloc() -- allocates asynchronous (de)compression request
26335 + *
26336 + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
26337 + *
26338 + * Return: allocated handle in case of success or NULL in case of an error
26339 + */
26340 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
26341 +
26342 +/**
26343 + * acomp_request_free() -- zeroize and free asynchronous (de)compression
26344 + * request as well as the output buffer if allocated
26345 + * inside the algorithm
26346 + *
26347 + * @req: request to free
26348 + */
26349 +void acomp_request_free(struct acomp_req *req);
26350 +
26351 +/**
26352 + * acomp_request_set_callback() -- Sets an asynchronous callback
26353 + *
26354 + * Callback will be called when an asynchronous operation on a given
26355 + * request is finished.
26356 + *
26357 + * @req: request that the callback will be set for
26358 + * @flgs: specify for instance if the operation may backlog
26359 + * @cmlp: callback which will be called
26360 + * @data: private data used by the caller
26361 + */
26362 +static inline void acomp_request_set_callback(struct acomp_req *req,
26363 + u32 flgs,
26364 + crypto_completion_t cmpl,
26365 + void *data)
26366 +{
26367 + req->base.complete = cmpl;
26368 + req->base.data = data;
26369 + req->base.flags = flgs;
26370 +}
26371 +
26372 +/**
26373 + * acomp_request_set_params() -- Sets request parameters
26374 + *
26375 + * Sets parameters required by an acomp operation
26376 + *
26377 + * @req: asynchronous compress request
26378 + * @src: pointer to input buffer scatterlist
26379 + * @dst: pointer to output buffer scatterlist. If this is NULL, the
26380 + * acomp layer will allocate the output memory
26381 + * @slen: size of the input buffer
26382 + * @dlen: size of the output buffer. If dst is NULL, this can be used by
26383 + * the user to specify the maximum amount of memory to allocate
26384 + */
26385 +static inline void acomp_request_set_params(struct acomp_req *req,
26386 + struct scatterlist *src,
26387 + struct scatterlist *dst,
26388 + unsigned int slen,
26389 + unsigned int dlen)
26390 +{
26391 + req->src = src;
26392 + req->dst = dst;
26393 + req->slen = slen;
26394 + req->dlen = dlen;
26395 +
26396 + if (!req->dst)
26397 + req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
26398 +}
26399 +
26400 +/**
26401 + * crypto_acomp_compress() -- Invoke asynchronous compress operation
26402 + *
26403 + * Function invokes the asynchronous compress operation
26404 + *
26405 + * @req: asynchronous compress request
26406 + *
26407 + * Return: zero on success; error code in case of error
26408 + */
26409 +static inline int crypto_acomp_compress(struct acomp_req *req)
26410 +{
26411 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
26412 +
26413 + return tfm->compress(req);
26414 +}
26415 +
26416 +/**
26417 + * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
26418 + *
26419 + * Function invokes the asynchronous decompress operation
26420 + *
26421 + * @req: asynchronous compress request
26422 + *
26423 + * Return: zero on success; error code in case of error
26424 + */
26425 +static inline int crypto_acomp_decompress(struct acomp_req *req)
26426 +{
26427 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
26428 +
26429 + return tfm->decompress(req);
26430 +}
26431 +
26432 +#endif
26433 --- /dev/null
26434 +++ b/include/crypto/internal/acompress.h
26435 @@ -0,0 +1,81 @@
26436 +/*
26437 + * Asynchronous Compression operations
26438 + *
26439 + * Copyright (c) 2016, Intel Corporation
26440 + * Authors: Weigang Li <weigang.li@intel.com>
26441 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
26442 + *
26443 + * This program is free software; you can redistribute it and/or modify it
26444 + * under the terms of the GNU General Public License as published by the Free
26445 + * Software Foundation; either version 2 of the License, or (at your option)
26446 + * any later version.
26447 + *
26448 + */
26449 +#ifndef _CRYPTO_ACOMP_INT_H
26450 +#define _CRYPTO_ACOMP_INT_H
26451 +#include <crypto/acompress.h>
26452 +
26453 +/*
26454 + * Transform internal helpers.
26455 + */
26456 +static inline void *acomp_request_ctx(struct acomp_req *req)
26457 +{
26458 + return req->__ctx;
26459 +}
26460 +
26461 +static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
26462 +{
26463 + return tfm->base.__crt_ctx;
26464 +}
26465 +
26466 +static inline void acomp_request_complete(struct acomp_req *req,
26467 + int err)
26468 +{
26469 + req->base.complete(&req->base, err);
26470 +}
26471 +
26472 +static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
26473 +{
26474 + return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
26475 +}
26476 +
26477 +static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
26478 +{
26479 + struct acomp_req *req;
26480 +
26481 + req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
26482 + if (likely(req))
26483 + acomp_request_set_tfm(req, tfm);
26484 + return req;
26485 +}
26486 +
26487 +static inline void __acomp_request_free(struct acomp_req *req)
26488 +{
26489 + kzfree(req);
26490 +}
26491 +
26492 +/**
26493 + * crypto_register_acomp() -- Register asynchronous compression algorithm
26494 + *
26495 + * Function registers an implementation of an asynchronous
26496 + * compression algorithm
26497 + *
26498 + * @alg: algorithm definition
26499 + *
26500 + * Return: zero on success; error code in case of error
26501 + */
26502 +int crypto_register_acomp(struct acomp_alg *alg);
26503 +
26504 +/**
26505 + * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
26506 + *
26507 + * Function unregisters an implementation of an asynchronous
26508 + * compression algorithm
26509 + *
26510 + * @alg: algorithm definition
26511 + *
26512 + * Return: zero on success; error code in case of error
26513 + */
26514 +int crypto_unregister_acomp(struct acomp_alg *alg);
26515 +
26516 +#endif
26517 --- /dev/null
26518 +++ b/include/crypto/internal/scompress.h
26519 @@ -0,0 +1,136 @@
26520 +/*
26521 + * Synchronous Compression operations
26522 + *
26523 + * Copyright 2015 LG Electronics Inc.
26524 + * Copyright (c) 2016, Intel Corporation
26525 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
26526 + *
26527 + * This program is free software; you can redistribute it and/or modify it
26528 + * under the terms of the GNU General Public License as published by the Free
26529 + * Software Foundation; either version 2 of the License, or (at your option)
26530 + * any later version.
26531 + *
26532 + */
26533 +#ifndef _CRYPTO_SCOMP_INT_H
26534 +#define _CRYPTO_SCOMP_INT_H
26535 +#include <linux/crypto.h>
26536 +
26537 +#define SCOMP_SCRATCH_SIZE 131072
26538 +
26539 +struct crypto_scomp {
26540 + struct crypto_tfm base;
26541 +};
26542 +
26543 +/**
26544 + * struct scomp_alg - synchronous compression algorithm
26545 + *
26546 + * @alloc_ctx: Function allocates algorithm specific context
26547 + * @free_ctx: Function frees context allocated with alloc_ctx
26548 + * @compress: Function performs a compress operation
26549 + * @decompress: Function performs a de-compress operation
26550 + * @init: Initialize the cryptographic transformation object.
26551 + * This function is used to initialize the cryptographic
26552 + * transformation object. This function is called only once at
26553 + * the instantiation time, right after the transformation context
26554 + * was allocated. In case the cryptographic hardware has some
26555 + * special requirements which need to be handled by software, this
26556 + * function shall check for the precise requirement of the
26557 + * transformation and put any software fallbacks in place.
26558 + * @exit: Deinitialize the cryptographic transformation object. This is a
26559 + * counterpart to @init, used to remove various changes set in
26560 + * @init.
26561 + * @base: Common crypto API algorithm data structure
26562 + */
26563 +struct scomp_alg {
26564 + void *(*alloc_ctx)(struct crypto_scomp *tfm);
26565 + void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
26566 + int (*compress)(struct crypto_scomp *tfm, const u8 *src,
26567 + unsigned int slen, u8 *dst, unsigned int *dlen,
26568 + void *ctx);
26569 + int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
26570 + unsigned int slen, u8 *dst, unsigned int *dlen,
26571 + void *ctx);
26572 + struct crypto_alg base;
26573 +};
26574 +
26575 +static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
26576 +{
26577 + return container_of(alg, struct scomp_alg, base);
26578 +}
26579 +
26580 +static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm)
26581 +{
26582 + return container_of(tfm, struct crypto_scomp, base);
26583 +}
26584 +
26585 +static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm)
26586 +{
26587 + return &tfm->base;
26588 +}
26589 +
26590 +static inline void crypto_free_scomp(struct crypto_scomp *tfm)
26591 +{
26592 + crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm));
26593 +}
26594 +
26595 +static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
26596 +{
26597 + return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
26598 +}
26599 +
26600 +static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
26601 +{
26602 + return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
26603 +}
26604 +
26605 +static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
26606 + void *ctx)
26607 +{
26608 + return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
26609 +}
26610 +
26611 +static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
26612 + const u8 *src, unsigned int slen,
26613 + u8 *dst, unsigned int *dlen, void *ctx)
26614 +{
26615 + return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx);
26616 +}
26617 +
26618 +static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
26619 + const u8 *src, unsigned int slen,
26620 + u8 *dst, unsigned int *dlen,
26621 + void *ctx)
26622 +{
26623 + return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen,
26624 + ctx);
26625 +}
26626 +
26627 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
26628 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
26629 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
26630 +
26631 +/**
26632 + * crypto_register_scomp() -- Register synchronous compression algorithm
26633 + *
26634 + * Function registers an implementation of a synchronous
26635 + * compression algorithm
26636 + *
26637 + * @alg: algorithm definition
26638 + *
26639 + * Return: zero on success; error code in case of error
26640 + */
26641 +int crypto_register_scomp(struct scomp_alg *alg);
26642 +
26643 +/**
26644 + * crypto_unregister_scomp() -- Unregister synchronous compression algorithm
26645 + *
26646 + * Function unregisters an implementation of a synchronous
26647 + * compression algorithm
26648 + *
26649 + * @alg: algorithm definition
26650 + *
26651 + * Return: zero on success; error code in case of error
26652 + */
26653 +int crypto_unregister_scomp(struct scomp_alg *alg);
26654 +
26655 +#endif
26656 --- a/include/linux/crypto.h
26657 +++ b/include/linux/crypto.h
26658 @@ -50,6 +50,8 @@
26659 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
26660 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
26661 #define CRYPTO_ALG_TYPE_KPP 0x00000008
26662 +#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
26663 +#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
26664 #define CRYPTO_ALG_TYPE_RNG 0x0000000c
26665 #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
26666 #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
26667 @@ -60,6 +62,7 @@
26668 #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
26669 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
26670 #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
26671 +#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
26672
26673 #define CRYPTO_ALG_LARVAL 0x00000010
26674 #define CRYPTO_ALG_DEAD 0x00000020
26675 --- a/include/uapi/linux/cryptouser.h
26676 +++ b/include/uapi/linux/cryptouser.h
26677 @@ -46,6 +46,7 @@ enum crypto_attr_type_t {
26678 CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
26679 CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
26680 CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */
26681 + CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */
26682 __CRYPTOCFGA_MAX
26683
26684 #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
26685 @@ -112,5 +113,9 @@ struct crypto_report_kpp {
26686 char type[CRYPTO_MAX_NAME];
26687 };
26688
26689 +struct crypto_report_acomp {
26690 + char type[CRYPTO_MAX_NAME];
26691 +};
26692 +
26693 #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
26694 sizeof(struct crypto_report_blkcipher))
26695 --- a/scripts/spelling.txt
26696 +++ b/scripts/spelling.txt
26697 @@ -305,6 +305,9 @@ defintion||definition
26698 defintions||definitions
26699 defualt||default
26700 defult||default
26701 +deintializing||deinitializing
26702 +deintialize||deinitialize
26703 +deintialized||deinitialized
26704 deivce||device
26705 delared||declared
26706 delare||declare
26707 --- a/sound/soc/amd/acp-pcm-dma.c
26708 +++ b/sound/soc/amd/acp-pcm-dma.c
26709 @@ -506,7 +506,7 @@ static int acp_init(void __iomem *acp_mm
26710 return 0;
26711 }
26712
26713 -/* Deintialize ACP */
26714 +/* Deinitialize ACP */
26715 static int acp_deinit(void __iomem *acp_mmio)
26716 {
26717 u32 val;