4340247d6eea239013cc5344d7e1767295c2418c
[openwrt/staging/hauke.git] / target / linux / layerscape / patches-4.9 / 804-crypto-support-layerscape.patch
1 From 0a5b97d1f524c1769b4059e3c7123b52755f7121 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 27 Sep 2017 15:02:01 +0800
4 Subject: [PATCH] crypto: support layerscape
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 This is a integrated patch for layerscape sec support.
10
11 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
12 Signed-off-by: Fabio Estevam <festevam@gmail.com>
13 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
14 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
15 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
16 Signed-off-by: Eric Biggers <ebiggers@google.com>
17 Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
18 Signed-off-by: Xulin Sun <xulin.sun@windriver.com>
19 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
20 Signed-off-by: Marcus Folkesson <marcus.folkesson@gmail.com>
21 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
22 Signed-off-by: Andrew Lutomirski <luto@kernel.org>
23 Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
24 Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
25 Signed-off-by: Marcelo Cerri <marcelo.cerri@canonical.com>
26 Signed-off-by: Arvind Yadav <arvind.yadav.cs@gmail.com>
27 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
28 Signed-off-by: Laura Abbott <labbott@redhat.com>
29 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
30 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
31 ---
32 crypto/Kconfig | 30 +
33 crypto/Makefile | 4 +
34 crypto/acompress.c | 169 +
35 crypto/algboss.c | 12 +-
36 crypto/crypto_user.c | 19 +
37 crypto/scompress.c | 356 ++
38 crypto/tcrypt.c | 17 +-
39 crypto/testmgr.c | 1701 ++++----
40 crypto/testmgr.h | 1125 +++---
41 crypto/tls.c | 607 +++
42 drivers/crypto/caam/Kconfig | 72 +-
43 drivers/crypto/caam/Makefile | 15 +-
44 drivers/crypto/caam/caamalg.c | 2125 +++-------
45 drivers/crypto/caam/caamalg_desc.c | 1913 +++++++++
46 drivers/crypto/caam/caamalg_desc.h | 127 +
47 drivers/crypto/caam/caamalg_qi.c | 2877 +++++++++++++
48 drivers/crypto/caam/caamalg_qi2.c | 4428 +++++++++++++++++++++
49 drivers/crypto/caam/caamalg_qi2.h | 265 ++
50 drivers/crypto/caam/caamhash.c | 521 +--
51 drivers/crypto/caam/caampkc.c | 471 ++-
52 drivers/crypto/caam/caampkc.h | 58 +
53 drivers/crypto/caam/caamrng.c | 16 +-
54 drivers/crypto/caam/compat.h | 1 +
55 drivers/crypto/caam/ctrl.c | 356 +-
56 drivers/crypto/caam/ctrl.h | 2 +
57 drivers/crypto/caam/desc.h | 52 +-
58 drivers/crypto/caam/desc_constr.h | 139 +-
59 drivers/crypto/caam/dpseci.c | 859 ++++
60 drivers/crypto/caam/dpseci.h | 395 ++
61 drivers/crypto/caam/dpseci_cmd.h | 261 ++
62 drivers/crypto/caam/error.c | 127 +-
63 drivers/crypto/caam/error.h | 10 +-
64 drivers/crypto/caam/intern.h | 31 +-
65 drivers/crypto/caam/jr.c | 55 +-
66 drivers/crypto/caam/key_gen.c | 32 +-
67 drivers/crypto/caam/key_gen.h | 36 +-
68 drivers/crypto/caam/pdb.h | 62 +
69 drivers/crypto/caam/pkc_desc.c | 36 +
70 drivers/crypto/caam/qi.c | 797 ++++
71 drivers/crypto/caam/qi.h | 204 +
72 drivers/crypto/caam/regs.h | 63 +-
73 drivers/crypto/caam/sg_sw_qm.h | 126 +
74 drivers/crypto/caam/sg_sw_qm2.h | 81 +
75 drivers/crypto/caam/sg_sw_sec4.h | 60 +-
76 drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +-
77 drivers/staging/wilc1000/linux_wlan.c | 2 +-
78 drivers/staging/wilc1000/wilc_wfi_cfgoperations.c | 2 +-
79 include/crypto/acompress.h | 269 ++
80 include/crypto/internal/acompress.h | 81 +
81 include/crypto/internal/scompress.h | 136 +
82 include/linux/crypto.h | 3 +
83 include/uapi/linux/cryptouser.h | 5 +
84 scripts/spelling.txt | 3 +
85 sound/soc/amd/acp-pcm-dma.c | 2 +-
86 54 files changed, 17263 insertions(+), 3955 deletions(-)
87 create mode 100644 crypto/acompress.c
88 create mode 100644 crypto/scompress.c
89 create mode 100644 crypto/tls.c
90 create mode 100644 drivers/crypto/caam/caamalg_desc.c
91 create mode 100644 drivers/crypto/caam/caamalg_desc.h
92 create mode 100644 drivers/crypto/caam/caamalg_qi.c
93 create mode 100644 drivers/crypto/caam/caamalg_qi2.c
94 create mode 100644 drivers/crypto/caam/caamalg_qi2.h
95 create mode 100644 drivers/crypto/caam/dpseci.c
96 create mode 100644 drivers/crypto/caam/dpseci.h
97 create mode 100644 drivers/crypto/caam/dpseci_cmd.h
98 create mode 100644 drivers/crypto/caam/qi.c
99 create mode 100644 drivers/crypto/caam/qi.h
100 create mode 100644 drivers/crypto/caam/sg_sw_qm.h
101 create mode 100644 drivers/crypto/caam/sg_sw_qm2.h
102 create mode 100644 include/crypto/acompress.h
103 create mode 100644 include/crypto/internal/acompress.h
104 create mode 100644 include/crypto/internal/scompress.h
105
106 diff --git a/crypto/Kconfig b/crypto/Kconfig
107 index 17be110a..00e145e2 100644
108 --- a/crypto/Kconfig
109 +++ b/crypto/Kconfig
110 @@ -102,6 +102,15 @@ config CRYPTO_KPP
111 select CRYPTO_ALGAPI
112 select CRYPTO_KPP2
113
114 +config CRYPTO_ACOMP2
115 + tristate
116 + select CRYPTO_ALGAPI2
117 +
118 +config CRYPTO_ACOMP
119 + tristate
120 + select CRYPTO_ALGAPI
121 + select CRYPTO_ACOMP2
122 +
123 config CRYPTO_RSA
124 tristate "RSA algorithm"
125 select CRYPTO_AKCIPHER
126 @@ -138,6 +147,7 @@ config CRYPTO_MANAGER2
127 select CRYPTO_BLKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
128 select CRYPTO_AKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
129 select CRYPTO_KPP2 if !CRYPTO_MANAGER_DISABLE_TESTS
130 + select CRYPTO_ACOMP2 if !CRYPTO_MANAGER_DISABLE_TESTS
131
132 config CRYPTO_USER
133 tristate "Userspace cryptographic algorithm configuration"
134 @@ -295,6 +305,26 @@ config CRYPTO_ECHAINIV
135 a sequence number xored with a salt. This is the default
136 algorithm for CBC.
137
138 +config CRYPTO_TLS
139 + tristate "TLS support"
140 + select CRYPTO_AEAD
141 + select CRYPTO_BLKCIPHER
142 + select CRYPTO_MANAGER
143 + select CRYPTO_HASH
144 + select CRYPTO_NULL
145 + select CRYPTO_AUTHENC
146 + help
147 + Support for TLS 1.0 record encryption and decryption
148 +
149 + This module adds support for encryption/decryption of TLS 1.0 frames
150 + using blockcipher algorithms. The name of the resulting algorithm is
151 + "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
152 + algorithms are used (e.g. aes-generic, sha1-generic), but hardware
153 + accelerated versions will be used automatically if available.
154 +
155 + User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
156 + operations through AF_ALG or cryptodev interfaces
157 +
158 comment "Block modes"
159
160 config CRYPTO_CBC
161 diff --git a/crypto/Makefile b/crypto/Makefile
162 index 9e52b3c5..936d2b73 100644
163 --- a/crypto/Makefile
164 +++ b/crypto/Makefile
165 @@ -51,6 +51,9 @@ rsa_generic-y += rsa_helper.o
166 rsa_generic-y += rsa-pkcs1pad.o
167 obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
168
169 +obj-$(CONFIG_CRYPTO_ACOMP2) += acompress.o
170 +obj-$(CONFIG_CRYPTO_ACOMP2) += scompress.o
171 +
172 cryptomgr-y := algboss.o testmgr.o
173
174 obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
175 @@ -115,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o
176 obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
177 obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
178 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
179 +obj-$(CONFIG_CRYPTO_TLS) += tls.o
180 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
181 obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
182 obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
183 diff --git a/crypto/acompress.c b/crypto/acompress.c
184 new file mode 100644
185 index 00000000..887783d8
186 --- /dev/null
187 +++ b/crypto/acompress.c
188 @@ -0,0 +1,169 @@
189 +/*
190 + * Asynchronous Compression operations
191 + *
192 + * Copyright (c) 2016, Intel Corporation
193 + * Authors: Weigang Li <weigang.li@intel.com>
194 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
195 + *
196 + * This program is free software; you can redistribute it and/or modify it
197 + * under the terms of the GNU General Public License as published by the Free
198 + * Software Foundation; either version 2 of the License, or (at your option)
199 + * any later version.
200 + *
201 + */
202 +#include <linux/errno.h>
203 +#include <linux/kernel.h>
204 +#include <linux/module.h>
205 +#include <linux/seq_file.h>
206 +#include <linux/slab.h>
207 +#include <linux/string.h>
208 +#include <linux/crypto.h>
209 +#include <crypto/algapi.h>
210 +#include <linux/cryptouser.h>
211 +#include <net/netlink.h>
212 +#include <crypto/internal/acompress.h>
213 +#include <crypto/internal/scompress.h>
214 +#include "internal.h"
215 +
216 +static const struct crypto_type crypto_acomp_type;
217 +
218 +#ifdef CONFIG_NET
219 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
220 +{
221 + struct crypto_report_acomp racomp;
222 +
223 + strncpy(racomp.type, "acomp", sizeof(racomp.type));
224 +
225 + if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
226 + sizeof(struct crypto_report_acomp), &racomp))
227 + goto nla_put_failure;
228 + return 0;
229 +
230 +nla_put_failure:
231 + return -EMSGSIZE;
232 +}
233 +#else
234 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
235 +{
236 + return -ENOSYS;
237 +}
238 +#endif
239 +
240 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
241 + __attribute__ ((unused));
242 +
243 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
244 +{
245 + seq_puts(m, "type : acomp\n");
246 +}
247 +
248 +static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
249 +{
250 + struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
251 + struct acomp_alg *alg = crypto_acomp_alg(acomp);
252 +
253 + alg->exit(acomp);
254 +}
255 +
256 +static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
257 +{
258 + struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
259 + struct acomp_alg *alg = crypto_acomp_alg(acomp);
260 +
261 + if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
262 + return crypto_init_scomp_ops_async(tfm);
263 +
264 + acomp->compress = alg->compress;
265 + acomp->decompress = alg->decompress;
266 + acomp->dst_free = alg->dst_free;
267 + acomp->reqsize = alg->reqsize;
268 +
269 + if (alg->exit)
270 + acomp->base.exit = crypto_acomp_exit_tfm;
271 +
272 + if (alg->init)
273 + return alg->init(acomp);
274 +
275 + return 0;
276 +}
277 +
278 +static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
279 +{
280 + int extsize = crypto_alg_extsize(alg);
281 +
282 + if (alg->cra_type != &crypto_acomp_type)
283 + extsize += sizeof(struct crypto_scomp *);
284 +
285 + return extsize;
286 +}
287 +
288 +static const struct crypto_type crypto_acomp_type = {
289 + .extsize = crypto_acomp_extsize,
290 + .init_tfm = crypto_acomp_init_tfm,
291 +#ifdef CONFIG_PROC_FS
292 + .show = crypto_acomp_show,
293 +#endif
294 + .report = crypto_acomp_report,
295 + .maskclear = ~CRYPTO_ALG_TYPE_MASK,
296 + .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
297 + .type = CRYPTO_ALG_TYPE_ACOMPRESS,
298 + .tfmsize = offsetof(struct crypto_acomp, base),
299 +};
300 +
301 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
302 + u32 mask)
303 +{
304 + return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
305 +}
306 +EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
307 +
308 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
309 +{
310 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
311 + struct acomp_req *req;
312 +
313 + req = __acomp_request_alloc(acomp);
314 + if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
315 + return crypto_acomp_scomp_alloc_ctx(req);
316 +
317 + return req;
318 +}
319 +EXPORT_SYMBOL_GPL(acomp_request_alloc);
320 +
321 +void acomp_request_free(struct acomp_req *req)
322 +{
323 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
324 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
325 +
326 + if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
327 + crypto_acomp_scomp_free_ctx(req);
328 +
329 + if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
330 + acomp->dst_free(req->dst);
331 + req->dst = NULL;
332 + }
333 +
334 + __acomp_request_free(req);
335 +}
336 +EXPORT_SYMBOL_GPL(acomp_request_free);
337 +
338 +int crypto_register_acomp(struct acomp_alg *alg)
339 +{
340 + struct crypto_alg *base = &alg->base;
341 +
342 + base->cra_type = &crypto_acomp_type;
343 + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
344 + base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
345 +
346 + return crypto_register_alg(base);
347 +}
348 +EXPORT_SYMBOL_GPL(crypto_register_acomp);
349 +
350 +int crypto_unregister_acomp(struct acomp_alg *alg)
351 +{
352 + return crypto_unregister_alg(&alg->base);
353 +}
354 +EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
355 +
356 +MODULE_LICENSE("GPL");
357 +MODULE_DESCRIPTION("Asynchronous compression type");
358 diff --git a/crypto/algboss.c b/crypto/algboss.c
359 index 4bde25d6..ccb85e17 100644
360 --- a/crypto/algboss.c
361 +++ b/crypto/algboss.c
362 @@ -247,17 +247,9 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
363 memcpy(param->alg, alg->cra_name, sizeof(param->alg));
364 type = alg->cra_flags;
365
366 - /* This piece of crap needs to disappear into per-type test hooks. */
367 -#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
368 - type |= CRYPTO_ALG_TESTED;
369 -#else
370 - if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
371 - CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
372 - ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
373 - CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
374 - alg->cra_ablkcipher.ivsize))
375 + /* Do not test internal algorithms. */
376 + if (type & CRYPTO_ALG_INTERNAL)
377 type |= CRYPTO_ALG_TESTED;
378 -#endif
379
380 param->type = type;
381
382 diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
383 index 1c570548..a90404a0 100644
384 --- a/crypto/crypto_user.c
385 +++ b/crypto/crypto_user.c
386 @@ -112,6 +112,21 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
387 return -EMSGSIZE;
388 }
389
390 +static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
391 +{
392 + struct crypto_report_acomp racomp;
393 +
394 + strncpy(racomp.type, "acomp", sizeof(racomp.type));
395 +
396 + if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
397 + sizeof(struct crypto_report_acomp), &racomp))
398 + goto nla_put_failure;
399 + return 0;
400 +
401 +nla_put_failure:
402 + return -EMSGSIZE;
403 +}
404 +
405 static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
406 {
407 struct crypto_report_akcipher rakcipher;
408 @@ -186,7 +201,11 @@ static int crypto_report_one(struct crypto_alg *alg,
409 goto nla_put_failure;
410
411 break;
412 + case CRYPTO_ALG_TYPE_ACOMPRESS:
413 + if (crypto_report_acomp(skb, alg))
414 + goto nla_put_failure;
415
416 + break;
417 case CRYPTO_ALG_TYPE_AKCIPHER:
418 if (crypto_report_akcipher(skb, alg))
419 goto nla_put_failure;
420 diff --git a/crypto/scompress.c b/crypto/scompress.c
421 new file mode 100644
422 index 00000000..35e396d1
423 --- /dev/null
424 +++ b/crypto/scompress.c
425 @@ -0,0 +1,356 @@
426 +/*
427 + * Synchronous Compression operations
428 + *
429 + * Copyright 2015 LG Electronics Inc.
430 + * Copyright (c) 2016, Intel Corporation
431 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
432 + *
433 + * This program is free software; you can redistribute it and/or modify it
434 + * under the terms of the GNU General Public License as published by the Free
435 + * Software Foundation; either version 2 of the License, or (at your option)
436 + * any later version.
437 + *
438 + */
439 +#include <linux/errno.h>
440 +#include <linux/kernel.h>
441 +#include <linux/module.h>
442 +#include <linux/seq_file.h>
443 +#include <linux/slab.h>
444 +#include <linux/string.h>
445 +#include <linux/crypto.h>
446 +#include <linux/vmalloc.h>
447 +#include <crypto/algapi.h>
448 +#include <linux/cryptouser.h>
449 +#include <net/netlink.h>
450 +#include <linux/scatterlist.h>
451 +#include <crypto/scatterwalk.h>
452 +#include <crypto/internal/acompress.h>
453 +#include <crypto/internal/scompress.h>
454 +#include "internal.h"
455 +
456 +static const struct crypto_type crypto_scomp_type;
457 +static void * __percpu *scomp_src_scratches;
458 +static void * __percpu *scomp_dst_scratches;
459 +static int scomp_scratch_users;
460 +static DEFINE_MUTEX(scomp_lock);
461 +
462 +#ifdef CONFIG_NET
463 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
464 +{
465 + struct crypto_report_comp rscomp;
466 +
467 + strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
468 +
469 + if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
470 + sizeof(struct crypto_report_comp), &rscomp))
471 + goto nla_put_failure;
472 + return 0;
473 +
474 +nla_put_failure:
475 + return -EMSGSIZE;
476 +}
477 +#else
478 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
479 +{
480 + return -ENOSYS;
481 +}
482 +#endif
483 +
484 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
485 + __attribute__ ((unused));
486 +
487 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
488 +{
489 + seq_puts(m, "type : scomp\n");
490 +}
491 +
492 +static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
493 +{
494 + return 0;
495 +}
496 +
497 +static void crypto_scomp_free_scratches(void * __percpu *scratches)
498 +{
499 + int i;
500 +
501 + if (!scratches)
502 + return;
503 +
504 + for_each_possible_cpu(i)
505 + vfree(*per_cpu_ptr(scratches, i));
506 +
507 + free_percpu(scratches);
508 +}
509 +
510 +static void * __percpu *crypto_scomp_alloc_scratches(void)
511 +{
512 + void * __percpu *scratches;
513 + int i;
514 +
515 + scratches = alloc_percpu(void *);
516 + if (!scratches)
517 + return NULL;
518 +
519 + for_each_possible_cpu(i) {
520 + void *scratch;
521 +
522 + scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
523 + if (!scratch)
524 + goto error;
525 + *per_cpu_ptr(scratches, i) = scratch;
526 + }
527 +
528 + return scratches;
529 +
530 +error:
531 + crypto_scomp_free_scratches(scratches);
532 + return NULL;
533 +}
534 +
535 +static void crypto_scomp_free_all_scratches(void)
536 +{
537 + if (!--scomp_scratch_users) {
538 + crypto_scomp_free_scratches(scomp_src_scratches);
539 + crypto_scomp_free_scratches(scomp_dst_scratches);
540 + scomp_src_scratches = NULL;
541 + scomp_dst_scratches = NULL;
542 + }
543 +}
544 +
545 +static int crypto_scomp_alloc_all_scratches(void)
546 +{
547 + if (!scomp_scratch_users++) {
548 + scomp_src_scratches = crypto_scomp_alloc_scratches();
549 + if (!scomp_src_scratches)
550 + return -ENOMEM;
551 + scomp_dst_scratches = crypto_scomp_alloc_scratches();
552 + if (!scomp_dst_scratches)
553 + return -ENOMEM;
554 + }
555 + return 0;
556 +}
557 +
558 +static void crypto_scomp_sg_free(struct scatterlist *sgl)
559 +{
560 + int i, n;
561 + struct page *page;
562 +
563 + if (!sgl)
564 + return;
565 +
566 + n = sg_nents(sgl);
567 + for_each_sg(sgl, sgl, n, i) {
568 + page = sg_page(sgl);
569 + if (page)
570 + __free_page(page);
571 + }
572 +
573 + kfree(sgl);
574 +}
575 +
576 +static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
577 +{
578 + struct scatterlist *sgl;
579 + struct page *page;
580 + int i, n;
581 +
582 + n = ((size - 1) >> PAGE_SHIFT) + 1;
583 +
584 + sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
585 + if (!sgl)
586 + return NULL;
587 +
588 + sg_init_table(sgl, n);
589 +
590 + for (i = 0; i < n; i++) {
591 + page = alloc_page(gfp);
592 + if (!page)
593 + goto err;
594 + sg_set_page(sgl + i, page, PAGE_SIZE, 0);
595 + }
596 +
597 + return sgl;
598 +
599 +err:
600 + sg_mark_end(sgl + i);
601 + crypto_scomp_sg_free(sgl);
602 + return NULL;
603 +}
604 +
605 +static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
606 +{
607 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
608 + void **tfm_ctx = acomp_tfm_ctx(tfm);
609 + struct crypto_scomp *scomp = *tfm_ctx;
610 + void **ctx = acomp_request_ctx(req);
611 + const int cpu = get_cpu();
612 + u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
613 + u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
614 + int ret;
615 +
616 + if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
617 + ret = -EINVAL;
618 + goto out;
619 + }
620 +
621 + if (req->dst && !req->dlen) {
622 + ret = -EINVAL;
623 + goto out;
624 + }
625 +
626 + if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
627 + req->dlen = SCOMP_SCRATCH_SIZE;
628 +
629 + scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
630 + if (dir)
631 + ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
632 + scratch_dst, &req->dlen, *ctx);
633 + else
634 + ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
635 + scratch_dst, &req->dlen, *ctx);
636 + if (!ret) {
637 + if (!req->dst) {
638 + req->dst = crypto_scomp_sg_alloc(req->dlen,
639 + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
640 + GFP_KERNEL : GFP_ATOMIC);
641 + if (!req->dst)
642 + goto out;
643 + }
644 + scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
645 + 1);
646 + }
647 +out:
648 + put_cpu();
649 + return ret;
650 +}
651 +
652 +static int scomp_acomp_compress(struct acomp_req *req)
653 +{
654 + return scomp_acomp_comp_decomp(req, 1);
655 +}
656 +
657 +static int scomp_acomp_decompress(struct acomp_req *req)
658 +{
659 + return scomp_acomp_comp_decomp(req, 0);
660 +}
661 +
662 +static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
663 +{
664 + struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
665 +
666 + crypto_free_scomp(*ctx);
667 +}
668 +
669 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
670 +{
671 + struct crypto_alg *calg = tfm->__crt_alg;
672 + struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
673 + struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
674 + struct crypto_scomp *scomp;
675 +
676 + if (!crypto_mod_get(calg))
677 + return -EAGAIN;
678 +
679 + scomp = crypto_create_tfm(calg, &crypto_scomp_type);
680 + if (IS_ERR(scomp)) {
681 + crypto_mod_put(calg);
682 + return PTR_ERR(scomp);
683 + }
684 +
685 + *ctx = scomp;
686 + tfm->exit = crypto_exit_scomp_ops_async;
687 +
688 + crt->compress = scomp_acomp_compress;
689 + crt->decompress = scomp_acomp_decompress;
690 + crt->dst_free = crypto_scomp_sg_free;
691 + crt->reqsize = sizeof(void *);
692 +
693 + return 0;
694 +}
695 +
696 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
697 +{
698 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
699 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
700 + struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
701 + struct crypto_scomp *scomp = *tfm_ctx;
702 + void *ctx;
703 +
704 + ctx = crypto_scomp_alloc_ctx(scomp);
705 + if (IS_ERR(ctx)) {
706 + kfree(req);
707 + return NULL;
708 + }
709 +
710 + *req->__ctx = ctx;
711 +
712 + return req;
713 +}
714 +
715 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
716 +{
717 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
718 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
719 + struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
720 + struct crypto_scomp *scomp = *tfm_ctx;
721 + void *ctx = *req->__ctx;
722 +
723 + if (ctx)
724 + crypto_scomp_free_ctx(scomp, ctx);
725 +}
726 +
727 +static const struct crypto_type crypto_scomp_type = {
728 + .extsize = crypto_alg_extsize,
729 + .init_tfm = crypto_scomp_init_tfm,
730 +#ifdef CONFIG_PROC_FS
731 + .show = crypto_scomp_show,
732 +#endif
733 + .report = crypto_scomp_report,
734 + .maskclear = ~CRYPTO_ALG_TYPE_MASK,
735 + .maskset = CRYPTO_ALG_TYPE_MASK,
736 + .type = CRYPTO_ALG_TYPE_SCOMPRESS,
737 + .tfmsize = offsetof(struct crypto_scomp, base),
738 +};
739 +
740 +int crypto_register_scomp(struct scomp_alg *alg)
741 +{
742 + struct crypto_alg *base = &alg->base;
743 + int ret = -ENOMEM;
744 +
745 + mutex_lock(&scomp_lock);
746 + if (crypto_scomp_alloc_all_scratches())
747 + goto error;
748 +
749 + base->cra_type = &crypto_scomp_type;
750 + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
751 + base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
752 +
753 + ret = crypto_register_alg(base);
754 + if (ret)
755 + goto error;
756 +
757 + mutex_unlock(&scomp_lock);
758 + return ret;
759 +
760 +error:
761 + crypto_scomp_free_all_scratches();
762 + mutex_unlock(&scomp_lock);
763 + return ret;
764 +}
765 +EXPORT_SYMBOL_GPL(crypto_register_scomp);
766 +
767 +int crypto_unregister_scomp(struct scomp_alg *alg)
768 +{
769 + int ret;
770 +
771 + mutex_lock(&scomp_lock);
772 + ret = crypto_unregister_alg(&alg->base);
773 + crypto_scomp_free_all_scratches();
774 + mutex_unlock(&scomp_lock);
775 +
776 + return ret;
777 +}
778 +EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
779 +
780 +MODULE_LICENSE("GPL");
781 +MODULE_DESCRIPTION("Synchronous compression type");
782 diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
783 index ae22f05d..bbb35eed 100644
784 --- a/crypto/tcrypt.c
785 +++ b/crypto/tcrypt.c
786 @@ -74,7 +74,7 @@ static char *check[] = {
787 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
788 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
789 "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
790 - NULL
791 + "rsa", NULL
792 };
793
794 struct tcrypt_result {
795 @@ -1329,6 +1329,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
796 ret += tcrypt_test("hmac(sha3-512)");
797 break;
798
799 + case 115:
800 + ret += tcrypt_test("rsa");
801 + break;
802 +
803 case 150:
804 ret += tcrypt_test("ansi_cprng");
805 break;
806 @@ -1390,6 +1394,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
807 case 190:
808 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
809 break;
810 + case 191:
811 + ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
812 + break;
813 case 200:
814 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
815 speed_template_16_24_32);
816 @@ -1404,9 +1411,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
817 test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
818 speed_template_32_40_48);
819 test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
820 - speed_template_32_48_64);
821 + speed_template_32_64);
822 test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
823 - speed_template_32_48_64);
824 + speed_template_32_64);
825 test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
826 speed_template_16_24_32);
827 test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
828 @@ -1837,9 +1844,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
829 test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
830 speed_template_32_40_48);
831 test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
832 - speed_template_32_48_64);
833 + speed_template_32_64);
834 test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
835 - speed_template_32_48_64);
836 + speed_template_32_64);
837 test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
838 speed_template_16_24_32);
839 test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
840 diff --git a/crypto/testmgr.c b/crypto/testmgr.c
841 index 62dffa00..73d91fba 100644
842 --- a/crypto/testmgr.c
843 +++ b/crypto/testmgr.c
844 @@ -33,6 +33,7 @@
845 #include <crypto/drbg.h>
846 #include <crypto/akcipher.h>
847 #include <crypto/kpp.h>
848 +#include <crypto/acompress.h>
849
850 #include "internal.h"
851
852 @@ -62,7 +63,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
853 */
854 #define IDX1 32
855 #define IDX2 32400
856 -#define IDX3 1
857 +#define IDX3 1511
858 #define IDX4 8193
859 #define IDX5 22222
860 #define IDX6 17101
861 @@ -82,47 +83,54 @@ struct tcrypt_result {
862
863 struct aead_test_suite {
864 struct {
865 - struct aead_testvec *vecs;
866 + const struct aead_testvec *vecs;
867 unsigned int count;
868 } enc, dec;
869 };
870
871 struct cipher_test_suite {
872 struct {
873 - struct cipher_testvec *vecs;
874 + const struct cipher_testvec *vecs;
875 unsigned int count;
876 } enc, dec;
877 };
878
879 struct comp_test_suite {
880 struct {
881 - struct comp_testvec *vecs;
882 + const struct comp_testvec *vecs;
883 unsigned int count;
884 } comp, decomp;
885 };
886
887 struct hash_test_suite {
888 - struct hash_testvec *vecs;
889 + const struct hash_testvec *vecs;
890 unsigned int count;
891 };
892
893 struct cprng_test_suite {
894 - struct cprng_testvec *vecs;
895 + const struct cprng_testvec *vecs;
896 unsigned int count;
897 };
898
899 struct drbg_test_suite {
900 - struct drbg_testvec *vecs;
901 + const struct drbg_testvec *vecs;
902 unsigned int count;
903 };
904
905 +struct tls_test_suite {
906 + struct {
907 + struct tls_testvec *vecs;
908 + unsigned int count;
909 + } enc, dec;
910 +};
911 +
912 struct akcipher_test_suite {
913 - struct akcipher_testvec *vecs;
914 + const struct akcipher_testvec *vecs;
915 unsigned int count;
916 };
917
918 struct kpp_test_suite {
919 - struct kpp_testvec *vecs;
920 + const struct kpp_testvec *vecs;
921 unsigned int count;
922 };
923
924 @@ -139,12 +147,14 @@ struct alg_test_desc {
925 struct hash_test_suite hash;
926 struct cprng_test_suite cprng;
927 struct drbg_test_suite drbg;
928 + struct tls_test_suite tls;
929 struct akcipher_test_suite akcipher;
930 struct kpp_test_suite kpp;
931 } suite;
932 };
933
934 -static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
935 +static const unsigned int IDX[8] = {
936 + IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
937
938 static void hexdump(unsigned char *buf, unsigned int len)
939 {
940 @@ -202,7 +212,7 @@ static int wait_async_op(struct tcrypt_result *tr, int ret)
941 }
942
943 static int ahash_partial_update(struct ahash_request **preq,
944 - struct crypto_ahash *tfm, struct hash_testvec *template,
945 + struct crypto_ahash *tfm, const struct hash_testvec *template,
946 void *hash_buff, int k, int temp, struct scatterlist *sg,
947 const char *algo, char *result, struct tcrypt_result *tresult)
948 {
949 @@ -259,11 +269,12 @@ static int ahash_partial_update(struct ahash_request **preq,
950 return ret;
951 }
952
953 -static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
954 - unsigned int tcount, bool use_digest,
955 - const int align_offset)
956 +static int __test_hash(struct crypto_ahash *tfm,
957 + const struct hash_testvec *template, unsigned int tcount,
958 + bool use_digest, const int align_offset)
959 {
960 const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
961 + size_t digest_size = crypto_ahash_digestsize(tfm);
962 unsigned int i, j, k, temp;
963 struct scatterlist sg[8];
964 char *result;
965 @@ -274,7 +285,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
966 char *xbuf[XBUFSIZE];
967 int ret = -ENOMEM;
968
969 - result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
970 + result = kmalloc(digest_size, GFP_KERNEL);
971 if (!result)
972 return ret;
973 key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
974 @@ -304,7 +315,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
975 goto out;
976
977 j++;
978 - memset(result, 0, MAX_DIGEST_SIZE);
979 + memset(result, 0, digest_size);
980
981 hash_buff = xbuf[0];
982 hash_buff += align_offset;
983 @@ -379,7 +390,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
984 continue;
985
986 j++;
987 - memset(result, 0, MAX_DIGEST_SIZE);
988 + memset(result, 0, digest_size);
989
990 temp = 0;
991 sg_init_table(sg, template[i].np);
992 @@ -457,7 +468,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
993 continue;
994
995 j++;
996 - memset(result, 0, MAX_DIGEST_SIZE);
997 + memset(result, 0, digest_size);
998
999 ret = -EINVAL;
1000 hash_buff = xbuf[0];
1001 @@ -536,7 +547,8 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
1002 return ret;
1003 }
1004
1005 -static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
1006 +static int test_hash(struct crypto_ahash *tfm,
1007 + const struct hash_testvec *template,
1008 unsigned int tcount, bool use_digest)
1009 {
1010 unsigned int alignmask;
1011 @@ -564,7 +576,7 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
1012 }
1013
1014 static int __test_aead(struct crypto_aead *tfm, int enc,
1015 - struct aead_testvec *template, unsigned int tcount,
1016 + const struct aead_testvec *template, unsigned int tcount,
1017 const bool diff_dst, const int align_offset)
1018 {
1019 const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1020 @@ -955,7 +967,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
1021 }
1022
1023 static int test_aead(struct crypto_aead *tfm, int enc,
1024 - struct aead_testvec *template, unsigned int tcount)
1025 + const struct aead_testvec *template, unsigned int tcount)
1026 {
1027 unsigned int alignmask;
1028 int ret;
1029 @@ -987,8 +999,236 @@ static int test_aead(struct crypto_aead *tfm, int enc,
1030 return 0;
1031 }
1032
1033 +static int __test_tls(struct crypto_aead *tfm, int enc,
1034 + struct tls_testvec *template, unsigned int tcount,
1035 + const bool diff_dst)
1036 +{
1037 + const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1038 + unsigned int i, k, authsize;
1039 + char *q;
1040 + struct aead_request *req;
1041 + struct scatterlist *sg;
1042 + struct scatterlist *sgout;
1043 + const char *e, *d;
1044 + struct tcrypt_result result;
1045 + void *input;
1046 + void *output;
1047 + void *assoc;
1048 + char *iv;
1049 + char *key;
1050 + char *xbuf[XBUFSIZE];
1051 + char *xoutbuf[XBUFSIZE];
1052 + char *axbuf[XBUFSIZE];
1053 + int ret = -ENOMEM;
1054 +
1055 + if (testmgr_alloc_buf(xbuf))
1056 + goto out_noxbuf;
1057 +
1058 + if (diff_dst && testmgr_alloc_buf(xoutbuf))
1059 + goto out_nooutbuf;
1060 +
1061 + if (testmgr_alloc_buf(axbuf))
1062 + goto out_noaxbuf;
1063 +
1064 + iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
1065 + if (!iv)
1066 + goto out_noiv;
1067 +
1068 + key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
1069 + if (!key)
1070 + goto out_nokey;
1071 +
1072 + sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
1073 + if (!sg)
1074 + goto out_nosg;
1075 +
1076 + sgout = sg + 8;
1077 +
1078 + d = diff_dst ? "-ddst" : "";
1079 + e = enc ? "encryption" : "decryption";
1080 +
1081 + init_completion(&result.completion);
1082 +
1083 + req = aead_request_alloc(tfm, GFP_KERNEL);
1084 + if (!req) {
1085 + pr_err("alg: tls%s: Failed to allocate request for %s\n",
1086 + d, algo);
1087 + goto out;
1088 + }
1089 +
1090 + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1091 + tcrypt_complete, &result);
1092 +
1093 + for (i = 0; i < tcount; i++) {
1094 + input = xbuf[0];
1095 + assoc = axbuf[0];
1096 +
1097 + ret = -EINVAL;
1098 + if (WARN_ON(template[i].ilen > PAGE_SIZE ||
1099 + template[i].alen > PAGE_SIZE))
1100 + goto out;
1101 +
1102 + memcpy(assoc, template[i].assoc, template[i].alen);
1103 + memcpy(input, template[i].input, template[i].ilen);
1104 +
1105 + if (template[i].iv)
1106 + memcpy(iv, template[i].iv, MAX_IVLEN);
1107 + else
1108 + memset(iv, 0, MAX_IVLEN);
1109 +
1110 + crypto_aead_clear_flags(tfm, ~0);
1111 +
1112 + if (template[i].klen > MAX_KEYLEN) {
1113 + pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
1114 + d, i, algo, template[i].klen, MAX_KEYLEN);
1115 + ret = -EINVAL;
1116 + goto out;
1117 + }
1118 + memcpy(key, template[i].key, template[i].klen);
1119 +
1120 + ret = crypto_aead_setkey(tfm, key, template[i].klen);
1121 + if (!ret == template[i].fail) {
1122 + pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
1123 + d, i, algo, crypto_aead_get_flags(tfm));
1124 + goto out;
1125 + } else if (ret)
1126 + continue;
1127 +
1128 + authsize = 20;
1129 + ret = crypto_aead_setauthsize(tfm, authsize);
1130 + if (ret) {
1131 + pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
1132 + d, authsize, i, algo);
1133 + goto out;
1134 + }
1135 +
1136 + k = !!template[i].alen;
1137 + sg_init_table(sg, k + 1);
1138 + sg_set_buf(&sg[0], assoc, template[i].alen);
1139 + sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
1140 + template[i].ilen));
1141 + output = input;
1142 +
1143 + if (diff_dst) {
1144 + sg_init_table(sgout, k + 1);
1145 + sg_set_buf(&sgout[0], assoc, template[i].alen);
1146 +
1147 + output = xoutbuf[0];
1148 + sg_set_buf(&sgout[k], output,
1149 + (enc ? template[i].rlen : template[i].ilen));
1150 + }
1151 +
1152 + aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
1153 + template[i].ilen, iv);
1154 +
1155 + aead_request_set_ad(req, template[i].alen);
1156 +
1157 + ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
1158 +
1159 + switch (ret) {
1160 + case 0:
1161 + if (template[i].novrfy) {
1162 + /* verification was supposed to fail */
1163 + pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
1164 + d, e, i, algo);
1165 + /* so really, we got a bad message */
1166 + ret = -EBADMSG;
1167 + goto out;
1168 + }
1169 + break;
1170 + case -EINPROGRESS:
1171 + case -EBUSY:
1172 + wait_for_completion(&result.completion);
1173 + reinit_completion(&result.completion);
1174 + ret = result.err;
1175 + if (!ret)
1176 + break;
1177 + case -EBADMSG:
1178 + /* verification failure was expected */
1179 + if (template[i].novrfy)
1180 + continue;
1181 + /* fall through */
1182 + default:
1183 + pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
1184 + d, e, i, algo, -ret);
1185 + goto out;
1186 + }
1187 +
1188 + q = output;
1189 + if (memcmp(q, template[i].result, template[i].rlen)) {
1190 + pr_err("alg: tls%s: Test %d failed on %s for %s\n",
1191 + d, i, e, algo);
1192 + hexdump(q, template[i].rlen);
1193 + pr_err("should be:\n");
1194 + hexdump(template[i].result, template[i].rlen);
1195 + ret = -EINVAL;
1196 + goto out;
1197 + }
1198 + }
1199 +
1200 +out:
1201 + aead_request_free(req);
1202 +
1203 + kfree(sg);
1204 +out_nosg:
1205 + kfree(key);
1206 +out_nokey:
1207 + kfree(iv);
1208 +out_noiv:
1209 + testmgr_free_buf(axbuf);
1210 +out_noaxbuf:
1211 + if (diff_dst)
1212 + testmgr_free_buf(xoutbuf);
1213 +out_nooutbuf:
1214 + testmgr_free_buf(xbuf);
1215 +out_noxbuf:
1216 + return ret;
1217 +}
1218 +
1219 +static int test_tls(struct crypto_aead *tfm, int enc,
1220 + struct tls_testvec *template, unsigned int tcount)
1221 +{
1222 + int ret;
1223 + /* test 'dst == src' case */
1224 + ret = __test_tls(tfm, enc, template, tcount, false);
1225 + if (ret)
1226 + return ret;
1227 + /* test 'dst != src' case */
1228 + return __test_tls(tfm, enc, template, tcount, true);
1229 +}
1230 +
1231 +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
1232 + u32 type, u32 mask)
1233 +{
1234 + struct crypto_aead *tfm;
1235 + int err = 0;
1236 +
1237 + tfm = crypto_alloc_aead(driver, type, mask);
1238 + if (IS_ERR(tfm)) {
1239 + pr_err("alg: aead: Failed to load transform for %s: %ld\n",
1240 + driver, PTR_ERR(tfm));
1241 + return PTR_ERR(tfm);
1242 + }
1243 +
1244 + if (desc->suite.tls.enc.vecs) {
1245 + err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
1246 + desc->suite.tls.enc.count);
1247 + if (err)
1248 + goto out;
1249 + }
1250 +
1251 + if (!err && desc->suite.tls.dec.vecs)
1252 + err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
1253 + desc->suite.tls.dec.count);
1254 +
1255 +out:
1256 + crypto_free_aead(tfm);
1257 + return err;
1258 +}
1259 +
1260 static int test_cipher(struct crypto_cipher *tfm, int enc,
1261 - struct cipher_testvec *template, unsigned int tcount)
1262 + const struct cipher_testvec *template,
1263 + unsigned int tcount)
1264 {
1265 const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
1266 unsigned int i, j, k;
1267 @@ -1066,7 +1306,8 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
1268 }
1269
1270 static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
1271 - struct cipher_testvec *template, unsigned int tcount,
1272 + const struct cipher_testvec *template,
1273 + unsigned int tcount,
1274 const bool diff_dst, const int align_offset)
1275 {
1276 const char *algo =
1277 @@ -1330,7 +1571,8 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
1278 }
1279
1280 static int test_skcipher(struct crypto_skcipher *tfm, int enc,
1281 - struct cipher_testvec *template, unsigned int tcount)
1282 + const struct cipher_testvec *template,
1283 + unsigned int tcount)
1284 {
1285 unsigned int alignmask;
1286 int ret;
1287 @@ -1362,8 +1604,10 @@ static int test_skcipher(struct crypto_skcipher *tfm, int enc,
1288 return 0;
1289 }
1290
1291 -static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
1292 - struct comp_testvec *dtemplate, int ctcount, int dtcount)
1293 +static int test_comp(struct crypto_comp *tfm,
1294 + const struct comp_testvec *ctemplate,
1295 + const struct comp_testvec *dtemplate,
1296 + int ctcount, int dtcount)
1297 {
1298 const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
1299 unsigned int i;
1300 @@ -1442,7 +1686,154 @@ static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
1301 return ret;
1302 }
1303
1304 -static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
1305 +static int test_acomp(struct crypto_acomp *tfm,
1306 + const struct comp_testvec *ctemplate,
1307 + const struct comp_testvec *dtemplate,
1308 + int ctcount, int dtcount)
1309 +{
1310 + const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
1311 + unsigned int i;
1312 + char *output;
1313 + int ret;
1314 + struct scatterlist src, dst;
1315 + struct acomp_req *req;
1316 + struct tcrypt_result result;
1317 +
1318 + output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
1319 + if (!output)
1320 + return -ENOMEM;
1321 +
1322 + for (i = 0; i < ctcount; i++) {
1323 + unsigned int dlen = COMP_BUF_SIZE;
1324 + int ilen = ctemplate[i].inlen;
1325 + void *input_vec;
1326 +
1327 + input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
1328 + if (!input_vec) {
1329 + ret = -ENOMEM;
1330 + goto out;
1331 + }
1332 +
1333 + memset(output, 0, dlen);
1334 + init_completion(&result.completion);
1335 + sg_init_one(&src, input_vec, ilen);
1336 + sg_init_one(&dst, output, dlen);
1337 +
1338 + req = acomp_request_alloc(tfm);
1339 + if (!req) {
1340 + pr_err("alg: acomp: request alloc failed for %s\n",
1341 + algo);
1342 + kfree(input_vec);
1343 + ret = -ENOMEM;
1344 + goto out;
1345 + }
1346 +
1347 + acomp_request_set_params(req, &src, &dst, ilen, dlen);
1348 + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1349 + tcrypt_complete, &result);
1350 +
1351 + ret = wait_async_op(&result, crypto_acomp_compress(req));
1352 + if (ret) {
1353 + pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
1354 + i + 1, algo, -ret);
1355 + kfree(input_vec);
1356 + acomp_request_free(req);
1357 + goto out;
1358 + }
1359 +
1360 + if (req->dlen != ctemplate[i].outlen) {
1361 + pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
1362 + i + 1, algo, req->dlen);
1363 + ret = -EINVAL;
1364 + kfree(input_vec);
1365 + acomp_request_free(req);
1366 + goto out;
1367 + }
1368 +
1369 + if (memcmp(output, ctemplate[i].output, req->dlen)) {
1370 + pr_err("alg: acomp: Compression test %d failed for %s\n",
1371 + i + 1, algo);
1372 + hexdump(output, req->dlen);
1373 + ret = -EINVAL;
1374 + kfree(input_vec);
1375 + acomp_request_free(req);
1376 + goto out;
1377 + }
1378 +
1379 + kfree(input_vec);
1380 + acomp_request_free(req);
1381 + }
1382 +
1383 + for (i = 0; i < dtcount; i++) {
1384 + unsigned int dlen = COMP_BUF_SIZE;
1385 + int ilen = dtemplate[i].inlen;
1386 + void *input_vec;
1387 +
1388 + input_vec = kmemdup(dtemplate[i].input, ilen, GFP_KERNEL);
1389 + if (!input_vec) {
1390 + ret = -ENOMEM;
1391 + goto out;
1392 + }
1393 +
1394 + memset(output, 0, dlen);
1395 + init_completion(&result.completion);
1396 + sg_init_one(&src, input_vec, ilen);
1397 + sg_init_one(&dst, output, dlen);
1398 +
1399 + req = acomp_request_alloc(tfm);
1400 + if (!req) {
1401 + pr_err("alg: acomp: request alloc failed for %s\n",
1402 + algo);
1403 + kfree(input_vec);
1404 + ret = -ENOMEM;
1405 + goto out;
1406 + }
1407 +
1408 + acomp_request_set_params(req, &src, &dst, ilen, dlen);
1409 + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1410 + tcrypt_complete, &result);
1411 +
1412 + ret = wait_async_op(&result, crypto_acomp_decompress(req));
1413 + if (ret) {
1414 + pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
1415 + i + 1, algo, -ret);
1416 + kfree(input_vec);
1417 + acomp_request_free(req);
1418 + goto out;
1419 + }
1420 +
1421 + if (req->dlen != dtemplate[i].outlen) {
1422 + pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
1423 + i + 1, algo, req->dlen);
1424 + ret = -EINVAL;
1425 + kfree(input_vec);
1426 + acomp_request_free(req);
1427 + goto out;
1428 + }
1429 +
1430 + if (memcmp(output, dtemplate[i].output, req->dlen)) {
1431 + pr_err("alg: acomp: Decompression test %d failed for %s\n",
1432 + i + 1, algo);
1433 + hexdump(output, req->dlen);
1434 + ret = -EINVAL;
1435 + kfree(input_vec);
1436 + acomp_request_free(req);
1437 + goto out;
1438 + }
1439 +
1440 + kfree(input_vec);
1441 + acomp_request_free(req);
1442 + }
1443 +
1444 + ret = 0;
1445 +
1446 +out:
1447 + kfree(output);
1448 + return ret;
1449 +}
1450 +
1451 +static int test_cprng(struct crypto_rng *tfm,
1452 + const struct cprng_testvec *template,
1453 unsigned int tcount)
1454 {
1455 const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
1456 @@ -1509,7 +1900,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
1457 struct crypto_aead *tfm;
1458 int err = 0;
1459
1460 - tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask);
1461 + tfm = crypto_alloc_aead(driver, type, mask);
1462 if (IS_ERR(tfm)) {
1463 printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
1464 "%ld\n", driver, PTR_ERR(tfm));
1465 @@ -1538,7 +1929,7 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
1466 struct crypto_cipher *tfm;
1467 int err = 0;
1468
1469 - tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1470 + tfm = crypto_alloc_cipher(driver, type, mask);
1471 if (IS_ERR(tfm)) {
1472 printk(KERN_ERR "alg: cipher: Failed to load transform for "
1473 "%s: %ld\n", driver, PTR_ERR(tfm));
1474 @@ -1567,7 +1958,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
1475 struct crypto_skcipher *tfm;
1476 int err = 0;
1477
1478 - tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1479 + tfm = crypto_alloc_skcipher(driver, type, mask);
1480 if (IS_ERR(tfm)) {
1481 printk(KERN_ERR "alg: skcipher: Failed to load transform for "
1482 "%s: %ld\n", driver, PTR_ERR(tfm));
1483 @@ -1593,22 +1984,38 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
1484 static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
1485 u32 type, u32 mask)
1486 {
1487 - struct crypto_comp *tfm;
1488 + struct crypto_comp *comp;
1489 + struct crypto_acomp *acomp;
1490 int err;
1491 + u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
1492 +
1493 + if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
1494 + acomp = crypto_alloc_acomp(driver, type, mask);
1495 + if (IS_ERR(acomp)) {
1496 + pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
1497 + driver, PTR_ERR(acomp));
1498 + return PTR_ERR(acomp);
1499 + }
1500 + err = test_acomp(acomp, desc->suite.comp.comp.vecs,
1501 + desc->suite.comp.decomp.vecs,
1502 + desc->suite.comp.comp.count,
1503 + desc->suite.comp.decomp.count);
1504 + crypto_free_acomp(acomp);
1505 + } else {
1506 + comp = crypto_alloc_comp(driver, type, mask);
1507 + if (IS_ERR(comp)) {
1508 + pr_err("alg: comp: Failed to load transform for %s: %ld\n",
1509 + driver, PTR_ERR(comp));
1510 + return PTR_ERR(comp);
1511 + }
1512
1513 - tfm = crypto_alloc_comp(driver, type, mask);
1514 - if (IS_ERR(tfm)) {
1515 - printk(KERN_ERR "alg: comp: Failed to load transform for %s: "
1516 - "%ld\n", driver, PTR_ERR(tfm));
1517 - return PTR_ERR(tfm);
1518 - }
1519 -
1520 - err = test_comp(tfm, desc->suite.comp.comp.vecs,
1521 - desc->suite.comp.decomp.vecs,
1522 - desc->suite.comp.comp.count,
1523 - desc->suite.comp.decomp.count);
1524 + err = test_comp(comp, desc->suite.comp.comp.vecs,
1525 + desc->suite.comp.decomp.vecs,
1526 + desc->suite.comp.comp.count,
1527 + desc->suite.comp.decomp.count);
1528
1529 - crypto_free_comp(tfm);
1530 + crypto_free_comp(comp);
1531 + }
1532 return err;
1533 }
1534
1535 @@ -1618,7 +2025,7 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
1536 struct crypto_ahash *tfm;
1537 int err;
1538
1539 - tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1540 + tfm = crypto_alloc_ahash(driver, type, mask);
1541 if (IS_ERR(tfm)) {
1542 printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
1543 "%ld\n", driver, PTR_ERR(tfm));
1544 @@ -1646,7 +2053,7 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
1545 if (err)
1546 goto out;
1547
1548 - tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1549 + tfm = crypto_alloc_shash(driver, type, mask);
1550 if (IS_ERR(tfm)) {
1551 printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
1552 "%ld\n", driver, PTR_ERR(tfm));
1553 @@ -1688,7 +2095,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
1554 struct crypto_rng *rng;
1555 int err;
1556
1557 - rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1558 + rng = crypto_alloc_rng(driver, type, mask);
1559 if (IS_ERR(rng)) {
1560 printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
1561 "%ld\n", driver, PTR_ERR(rng));
1562 @@ -1703,7 +2110,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
1563 }
1564
1565
1566 -static int drbg_cavs_test(struct drbg_testvec *test, int pr,
1567 +static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
1568 const char *driver, u32 type, u32 mask)
1569 {
1570 int ret = -EAGAIN;
1571 @@ -1715,7 +2122,7 @@ static int drbg_cavs_test(struct drbg_testvec *test, int pr,
1572 if (!buf)
1573 return -ENOMEM;
1574
1575 - drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1576 + drng = crypto_alloc_rng(driver, type, mask);
1577 if (IS_ERR(drng)) {
1578 printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
1579 "%s\n", driver);
1580 @@ -1777,7 +2184,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
1581 int err = 0;
1582 int pr = 0;
1583 int i = 0;
1584 - struct drbg_testvec *template = desc->suite.drbg.vecs;
1585 + const struct drbg_testvec *template = desc->suite.drbg.vecs;
1586 unsigned int tcount = desc->suite.drbg.count;
1587
1588 if (0 == memcmp(driver, "drbg_pr_", 8))
1589 @@ -1796,7 +2203,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
1590
1591 }
1592
1593 -static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
1594 +static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
1595 const char *alg)
1596 {
1597 struct kpp_request *req;
1598 @@ -1888,7 +2295,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
1599 }
1600
1601 static int test_kpp(struct crypto_kpp *tfm, const char *alg,
1602 - struct kpp_testvec *vecs, unsigned int tcount)
1603 + const struct kpp_testvec *vecs, unsigned int tcount)
1604 {
1605 int ret, i;
1606
1607 @@ -1909,7 +2316,7 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
1608 struct crypto_kpp *tfm;
1609 int err = 0;
1610
1611 - tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask);
1612 + tfm = crypto_alloc_kpp(driver, type, mask);
1613 if (IS_ERR(tfm)) {
1614 pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
1615 driver, PTR_ERR(tfm));
1616 @@ -1924,7 +2331,7 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
1617 }
1618
1619 static int test_akcipher_one(struct crypto_akcipher *tfm,
1620 - struct akcipher_testvec *vecs)
1621 + const struct akcipher_testvec *vecs)
1622 {
1623 char *xbuf[XBUFSIZE];
1624 struct akcipher_request *req;
1625 @@ -2044,7 +2451,8 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
1626 }
1627
1628 static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
1629 - struct akcipher_testvec *vecs, unsigned int tcount)
1630 + const struct akcipher_testvec *vecs,
1631 + unsigned int tcount)
1632 {
1633 const char *algo =
1634 crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
1635 @@ -2068,7 +2476,7 @@ static int alg_test_akcipher(const struct alg_test_desc *desc,
1636 struct crypto_akcipher *tfm;
1637 int err = 0;
1638
1639 - tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1640 + tfm = crypto_alloc_akcipher(driver, type, mask);
1641 if (IS_ERR(tfm)) {
1642 pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
1643 driver, PTR_ERR(tfm));
1644 @@ -2088,112 +2496,23 @@ static int alg_test_null(const struct alg_test_desc *desc,
1645 return 0;
1646 }
1647
1648 +#define __VECS(tv) { .vecs = tv, .count = ARRAY_SIZE(tv) }
1649 +
1650 /* Please keep this list sorted by algorithm name. */
1651 static const struct alg_test_desc alg_test_descs[] = {
1652 {
1653 - .alg = "__cbc-cast5-avx",
1654 - .test = alg_test_null,
1655 - }, {
1656 - .alg = "__cbc-cast6-avx",
1657 - .test = alg_test_null,
1658 - }, {
1659 - .alg = "__cbc-serpent-avx",
1660 - .test = alg_test_null,
1661 - }, {
1662 - .alg = "__cbc-serpent-avx2",
1663 - .test = alg_test_null,
1664 - }, {
1665 - .alg = "__cbc-serpent-sse2",
1666 - .test = alg_test_null,
1667 - }, {
1668 - .alg = "__cbc-twofish-avx",
1669 - .test = alg_test_null,
1670 - }, {
1671 - .alg = "__driver-cbc-aes-aesni",
1672 - .test = alg_test_null,
1673 - .fips_allowed = 1,
1674 - }, {
1675 - .alg = "__driver-cbc-camellia-aesni",
1676 - .test = alg_test_null,
1677 - }, {
1678 - .alg = "__driver-cbc-camellia-aesni-avx2",
1679 - .test = alg_test_null,
1680 - }, {
1681 - .alg = "__driver-cbc-cast5-avx",
1682 - .test = alg_test_null,
1683 - }, {
1684 - .alg = "__driver-cbc-cast6-avx",
1685 - .test = alg_test_null,
1686 - }, {
1687 - .alg = "__driver-cbc-serpent-avx",
1688 - .test = alg_test_null,
1689 - }, {
1690 - .alg = "__driver-cbc-serpent-avx2",
1691 - .test = alg_test_null,
1692 - }, {
1693 - .alg = "__driver-cbc-serpent-sse2",
1694 - .test = alg_test_null,
1695 - }, {
1696 - .alg = "__driver-cbc-twofish-avx",
1697 - .test = alg_test_null,
1698 - }, {
1699 - .alg = "__driver-ecb-aes-aesni",
1700 - .test = alg_test_null,
1701 - .fips_allowed = 1,
1702 - }, {
1703 - .alg = "__driver-ecb-camellia-aesni",
1704 - .test = alg_test_null,
1705 - }, {
1706 - .alg = "__driver-ecb-camellia-aesni-avx2",
1707 - .test = alg_test_null,
1708 - }, {
1709 - .alg = "__driver-ecb-cast5-avx",
1710 - .test = alg_test_null,
1711 - }, {
1712 - .alg = "__driver-ecb-cast6-avx",
1713 - .test = alg_test_null,
1714 - }, {
1715 - .alg = "__driver-ecb-serpent-avx",
1716 - .test = alg_test_null,
1717 - }, {
1718 - .alg = "__driver-ecb-serpent-avx2",
1719 - .test = alg_test_null,
1720 - }, {
1721 - .alg = "__driver-ecb-serpent-sse2",
1722 - .test = alg_test_null,
1723 - }, {
1724 - .alg = "__driver-ecb-twofish-avx",
1725 - .test = alg_test_null,
1726 - }, {
1727 - .alg = "__driver-gcm-aes-aesni",
1728 - .test = alg_test_null,
1729 - .fips_allowed = 1,
1730 - }, {
1731 - .alg = "__ghash-pclmulqdqni",
1732 - .test = alg_test_null,
1733 - .fips_allowed = 1,
1734 - }, {
1735 .alg = "ansi_cprng",
1736 .test = alg_test_cprng,
1737 .suite = {
1738 - .cprng = {
1739 - .vecs = ansi_cprng_aes_tv_template,
1740 - .count = ANSI_CPRNG_AES_TEST_VECTORS
1741 - }
1742 + .cprng = __VECS(ansi_cprng_aes_tv_template)
1743 }
1744 }, {
1745 .alg = "authenc(hmac(md5),ecb(cipher_null))",
1746 .test = alg_test_aead,
1747 .suite = {
1748 .aead = {
1749 - .enc = {
1750 - .vecs = hmac_md5_ecb_cipher_null_enc_tv_template,
1751 - .count = HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS
1752 - },
1753 - .dec = {
1754 - .vecs = hmac_md5_ecb_cipher_null_dec_tv_template,
1755 - .count = HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS
1756 - }
1757 + .enc = __VECS(hmac_md5_ecb_cipher_null_enc_tv_template),
1758 + .dec = __VECS(hmac_md5_ecb_cipher_null_dec_tv_template)
1759 }
1760 }
1761 }, {
1762 @@ -2201,12 +2520,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1763 .test = alg_test_aead,
1764 .suite = {
1765 .aead = {
1766 - .enc = {
1767 - .vecs =
1768 - hmac_sha1_aes_cbc_enc_tv_temp,
1769 - .count =
1770 - HMAC_SHA1_AES_CBC_ENC_TEST_VEC
1771 - }
1772 + .enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp)
1773 }
1774 }
1775 }, {
1776 @@ -2214,12 +2528,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1777 .test = alg_test_aead,
1778 .suite = {
1779 .aead = {
1780 - .enc = {
1781 - .vecs =
1782 - hmac_sha1_des_cbc_enc_tv_temp,
1783 - .count =
1784 - HMAC_SHA1_DES_CBC_ENC_TEST_VEC
1785 - }
1786 + .enc = __VECS(hmac_sha1_des_cbc_enc_tv_temp)
1787 }
1788 }
1789 }, {
1790 @@ -2228,12 +2537,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1791 .fips_allowed = 1,
1792 .suite = {
1793 .aead = {
1794 - .enc = {
1795 - .vecs =
1796 - hmac_sha1_des3_ede_cbc_enc_tv_temp,
1797 - .count =
1798 - HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC
1799 - }
1800 + .enc = __VECS(hmac_sha1_des3_ede_cbc_enc_tv_temp)
1801 }
1802 }
1803 }, {
1804 @@ -2245,18 +2549,8 @@ static const struct alg_test_desc alg_test_descs[] = {
1805 .test = alg_test_aead,
1806 .suite = {
1807 .aead = {
1808 - .enc = {
1809 - .vecs =
1810 - hmac_sha1_ecb_cipher_null_enc_tv_temp,
1811 - .count =
1812 - HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC
1813 - },
1814 - .dec = {
1815 - .vecs =
1816 - hmac_sha1_ecb_cipher_null_dec_tv_temp,
1817 - .count =
1818 - HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC
1819 - }
1820 + .enc = __VECS(hmac_sha1_ecb_cipher_null_enc_tv_temp),
1821 + .dec = __VECS(hmac_sha1_ecb_cipher_null_dec_tv_temp)
1822 }
1823 }
1824 }, {
1825 @@ -2268,12 +2562,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1826 .test = alg_test_aead,
1827 .suite = {
1828 .aead = {
1829 - .enc = {
1830 - .vecs =
1831 - hmac_sha224_des_cbc_enc_tv_temp,
1832 - .count =
1833 - HMAC_SHA224_DES_CBC_ENC_TEST_VEC
1834 - }
1835 + .enc = __VECS(hmac_sha224_des_cbc_enc_tv_temp)
1836 }
1837 }
1838 }, {
1839 @@ -2282,12 +2571,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1840 .fips_allowed = 1,
1841 .suite = {
1842 .aead = {
1843 - .enc = {
1844 - .vecs =
1845 - hmac_sha224_des3_ede_cbc_enc_tv_temp,
1846 - .count =
1847 - HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC
1848 - }
1849 + .enc = __VECS(hmac_sha224_des3_ede_cbc_enc_tv_temp)
1850 }
1851 }
1852 }, {
1853 @@ -2296,12 +2580,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1854 .fips_allowed = 1,
1855 .suite = {
1856 .aead = {
1857 - .enc = {
1858 - .vecs =
1859 - hmac_sha256_aes_cbc_enc_tv_temp,
1860 - .count =
1861 - HMAC_SHA256_AES_CBC_ENC_TEST_VEC
1862 - }
1863 + .enc = __VECS(hmac_sha256_aes_cbc_enc_tv_temp)
1864 }
1865 }
1866 }, {
1867 @@ -2309,12 +2588,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1868 .test = alg_test_aead,
1869 .suite = {
1870 .aead = {
1871 - .enc = {
1872 - .vecs =
1873 - hmac_sha256_des_cbc_enc_tv_temp,
1874 - .count =
1875 - HMAC_SHA256_DES_CBC_ENC_TEST_VEC
1876 - }
1877 + .enc = __VECS(hmac_sha256_des_cbc_enc_tv_temp)
1878 }
1879 }
1880 }, {
1881 @@ -2323,12 +2597,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1882 .fips_allowed = 1,
1883 .suite = {
1884 .aead = {
1885 - .enc = {
1886 - .vecs =
1887 - hmac_sha256_des3_ede_cbc_enc_tv_temp,
1888 - .count =
1889 - HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC
1890 - }
1891 + .enc = __VECS(hmac_sha256_des3_ede_cbc_enc_tv_temp)
1892 }
1893 }
1894 }, {
1895 @@ -2344,12 +2613,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1896 .test = alg_test_aead,
1897 .suite = {
1898 .aead = {
1899 - .enc = {
1900 - .vecs =
1901 - hmac_sha384_des_cbc_enc_tv_temp,
1902 - .count =
1903 - HMAC_SHA384_DES_CBC_ENC_TEST_VEC
1904 - }
1905 + .enc = __VECS(hmac_sha384_des_cbc_enc_tv_temp)
1906 }
1907 }
1908 }, {
1909 @@ -2358,12 +2622,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1910 .fips_allowed = 1,
1911 .suite = {
1912 .aead = {
1913 - .enc = {
1914 - .vecs =
1915 - hmac_sha384_des3_ede_cbc_enc_tv_temp,
1916 - .count =
1917 - HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC
1918 - }
1919 + .enc = __VECS(hmac_sha384_des3_ede_cbc_enc_tv_temp)
1920 }
1921 }
1922 }, {
1923 @@ -2380,12 +2639,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1924 .test = alg_test_aead,
1925 .suite = {
1926 .aead = {
1927 - .enc = {
1928 - .vecs =
1929 - hmac_sha512_aes_cbc_enc_tv_temp,
1930 - .count =
1931 - HMAC_SHA512_AES_CBC_ENC_TEST_VEC
1932 - }
1933 + .enc = __VECS(hmac_sha512_aes_cbc_enc_tv_temp)
1934 }
1935 }
1936 }, {
1937 @@ -2393,12 +2647,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1938 .test = alg_test_aead,
1939 .suite = {
1940 .aead = {
1941 - .enc = {
1942 - .vecs =
1943 - hmac_sha512_des_cbc_enc_tv_temp,
1944 - .count =
1945 - HMAC_SHA512_DES_CBC_ENC_TEST_VEC
1946 - }
1947 + .enc = __VECS(hmac_sha512_des_cbc_enc_tv_temp)
1948 }
1949 }
1950 }, {
1951 @@ -2407,12 +2656,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1952 .fips_allowed = 1,
1953 .suite = {
1954 .aead = {
1955 - .enc = {
1956 - .vecs =
1957 - hmac_sha512_des3_ede_cbc_enc_tv_temp,
1958 - .count =
1959 - HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC
1960 - }
1961 + .enc = __VECS(hmac_sha512_des3_ede_cbc_enc_tv_temp)
1962 }
1963 }
1964 }, {
1965 @@ -2429,14 +2673,8 @@ static const struct alg_test_desc alg_test_descs[] = {
1966 .fips_allowed = 1,
1967 .suite = {
1968 .cipher = {
1969 - .enc = {
1970 - .vecs = aes_cbc_enc_tv_template,
1971 - .count = AES_CBC_ENC_TEST_VECTORS
1972 - },
1973 - .dec = {
1974 - .vecs = aes_cbc_dec_tv_template,
1975 - .count = AES_CBC_DEC_TEST_VECTORS
1976 - }
1977 + .enc = __VECS(aes_cbc_enc_tv_template),
1978 + .dec = __VECS(aes_cbc_dec_tv_template)
1979 }
1980 }
1981 }, {
1982 @@ -2444,14 +2682,8 @@ static const struct alg_test_desc alg_test_descs[] = {
1983 .test = alg_test_skcipher,
1984 .suite = {
1985 .cipher = {
1986 - .enc = {
1987 - .vecs = anubis_cbc_enc_tv_template,
1988 - .count = ANUBIS_CBC_ENC_TEST_VECTORS
1989 - },
1990 - .dec = {
1991 - .vecs = anubis_cbc_dec_tv_template,
1992 - .count = ANUBIS_CBC_DEC_TEST_VECTORS
1993 - }
1994 + .enc = __VECS(anubis_cbc_enc_tv_template),
1995 + .dec = __VECS(anubis_cbc_dec_tv_template)
1996 }
1997 }
1998 }, {
1999 @@ -2459,14 +2691,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2000 .test = alg_test_skcipher,
2001 .suite = {
2002 .cipher = {
2003 - .enc = {
2004 - .vecs = bf_cbc_enc_tv_template,
2005 - .count = BF_CBC_ENC_TEST_VECTORS
2006 - },
2007 - .dec = {
2008 - .vecs = bf_cbc_dec_tv_template,
2009 - .count = BF_CBC_DEC_TEST_VECTORS
2010 - }
2011 + .enc = __VECS(bf_cbc_enc_tv_template),
2012 + .dec = __VECS(bf_cbc_dec_tv_template)
2013 }
2014 }
2015 }, {
2016 @@ -2474,14 +2700,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2017 .test = alg_test_skcipher,
2018 .suite = {
2019 .cipher = {
2020 - .enc = {
2021 - .vecs = camellia_cbc_enc_tv_template,
2022 - .count = CAMELLIA_CBC_ENC_TEST_VECTORS
2023 - },
2024 - .dec = {
2025 - .vecs = camellia_cbc_dec_tv_template,
2026 - .count = CAMELLIA_CBC_DEC_TEST_VECTORS
2027 - }
2028 + .enc = __VECS(camellia_cbc_enc_tv_template),
2029 + .dec = __VECS(camellia_cbc_dec_tv_template)
2030 }
2031 }
2032 }, {
2033 @@ -2489,14 +2709,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2034 .test = alg_test_skcipher,
2035 .suite = {
2036 .cipher = {
2037 - .enc = {
2038 - .vecs = cast5_cbc_enc_tv_template,
2039 - .count = CAST5_CBC_ENC_TEST_VECTORS
2040 - },
2041 - .dec = {
2042 - .vecs = cast5_cbc_dec_tv_template,
2043 - .count = CAST5_CBC_DEC_TEST_VECTORS
2044 - }
2045 + .enc = __VECS(cast5_cbc_enc_tv_template),
2046 + .dec = __VECS(cast5_cbc_dec_tv_template)
2047 }
2048 }
2049 }, {
2050 @@ -2504,14 +2718,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2051 .test = alg_test_skcipher,
2052 .suite = {
2053 .cipher = {
2054 - .enc = {
2055 - .vecs = cast6_cbc_enc_tv_template,
2056 - .count = CAST6_CBC_ENC_TEST_VECTORS
2057 - },
2058 - .dec = {
2059 - .vecs = cast6_cbc_dec_tv_template,
2060 - .count = CAST6_CBC_DEC_TEST_VECTORS
2061 - }
2062 + .enc = __VECS(cast6_cbc_enc_tv_template),
2063 + .dec = __VECS(cast6_cbc_dec_tv_template)
2064 }
2065 }
2066 }, {
2067 @@ -2519,14 +2727,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2068 .test = alg_test_skcipher,
2069 .suite = {
2070 .cipher = {
2071 - .enc = {
2072 - .vecs = des_cbc_enc_tv_template,
2073 - .count = DES_CBC_ENC_TEST_VECTORS
2074 - },
2075 - .dec = {
2076 - .vecs = des_cbc_dec_tv_template,
2077 - .count = DES_CBC_DEC_TEST_VECTORS
2078 - }
2079 + .enc = __VECS(des_cbc_enc_tv_template),
2080 + .dec = __VECS(des_cbc_dec_tv_template)
2081 }
2082 }
2083 }, {
2084 @@ -2535,14 +2737,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2085 .fips_allowed = 1,
2086 .suite = {
2087 .cipher = {
2088 - .enc = {
2089 - .vecs = des3_ede_cbc_enc_tv_template,
2090 - .count = DES3_EDE_CBC_ENC_TEST_VECTORS
2091 - },
2092 - .dec = {
2093 - .vecs = des3_ede_cbc_dec_tv_template,
2094 - .count = DES3_EDE_CBC_DEC_TEST_VECTORS
2095 - }
2096 + .enc = __VECS(des3_ede_cbc_enc_tv_template),
2097 + .dec = __VECS(des3_ede_cbc_dec_tv_template)
2098 }
2099 }
2100 }, {
2101 @@ -2550,14 +2746,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2102 .test = alg_test_skcipher,
2103 .suite = {
2104 .cipher = {
2105 - .enc = {
2106 - .vecs = serpent_cbc_enc_tv_template,
2107 - .count = SERPENT_CBC_ENC_TEST_VECTORS
2108 - },
2109 - .dec = {
2110 - .vecs = serpent_cbc_dec_tv_template,
2111 - .count = SERPENT_CBC_DEC_TEST_VECTORS
2112 - }
2113 + .enc = __VECS(serpent_cbc_enc_tv_template),
2114 + .dec = __VECS(serpent_cbc_dec_tv_template)
2115 }
2116 }
2117 }, {
2118 @@ -2565,30 +2755,25 @@ static const struct alg_test_desc alg_test_descs[] = {
2119 .test = alg_test_skcipher,
2120 .suite = {
2121 .cipher = {
2122 - .enc = {
2123 - .vecs = tf_cbc_enc_tv_template,
2124 - .count = TF_CBC_ENC_TEST_VECTORS
2125 - },
2126 - .dec = {
2127 - .vecs = tf_cbc_dec_tv_template,
2128 - .count = TF_CBC_DEC_TEST_VECTORS
2129 - }
2130 + .enc = __VECS(tf_cbc_enc_tv_template),
2131 + .dec = __VECS(tf_cbc_dec_tv_template)
2132 }
2133 }
2134 + }, {
2135 + .alg = "cbcmac(aes)",
2136 + .fips_allowed = 1,
2137 + .test = alg_test_hash,
2138 + .suite = {
2139 + .hash = __VECS(aes_cbcmac_tv_template)
2140 + }
2141 }, {
2142 .alg = "ccm(aes)",
2143 .test = alg_test_aead,
2144 .fips_allowed = 1,
2145 .suite = {
2146 .aead = {
2147 - .enc = {
2148 - .vecs = aes_ccm_enc_tv_template,
2149 - .count = AES_CCM_ENC_TEST_VECTORS
2150 - },
2151 - .dec = {
2152 - .vecs = aes_ccm_dec_tv_template,
2153 - .count = AES_CCM_DEC_TEST_VECTORS
2154 - }
2155 + .enc = __VECS(aes_ccm_enc_tv_template),
2156 + .dec = __VECS(aes_ccm_dec_tv_template)
2157 }
2158 }
2159 }, {
2160 @@ -2596,14 +2781,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2161 .test = alg_test_skcipher,
2162 .suite = {
2163 .cipher = {
2164 - .enc = {
2165 - .vecs = chacha20_enc_tv_template,
2166 - .count = CHACHA20_ENC_TEST_VECTORS
2167 - },
2168 - .dec = {
2169 - .vecs = chacha20_enc_tv_template,
2170 - .count = CHACHA20_ENC_TEST_VECTORS
2171 - },
2172 + .enc = __VECS(chacha20_enc_tv_template),
2173 + .dec = __VECS(chacha20_enc_tv_template),
2174 }
2175 }
2176 }, {
2177 @@ -2611,20 +2790,14 @@ static const struct alg_test_desc alg_test_descs[] = {
2178 .fips_allowed = 1,
2179 .test = alg_test_hash,
2180 .suite = {
2181 - .hash = {
2182 - .vecs = aes_cmac128_tv_template,
2183 - .count = CMAC_AES_TEST_VECTORS
2184 - }
2185 + .hash = __VECS(aes_cmac128_tv_template)
2186 }
2187 }, {
2188 .alg = "cmac(des3_ede)",
2189 .fips_allowed = 1,
2190 .test = alg_test_hash,
2191 .suite = {
2192 - .hash = {
2193 - .vecs = des3_ede_cmac64_tv_template,
2194 - .count = CMAC_DES3_EDE_TEST_VECTORS
2195 - }
2196 + .hash = __VECS(des3_ede_cmac64_tv_template)
2197 }
2198 }, {
2199 .alg = "compress_null",
2200 @@ -2633,94 +2806,30 @@ static const struct alg_test_desc alg_test_descs[] = {
2201 .alg = "crc32",
2202 .test = alg_test_hash,
2203 .suite = {
2204 - .hash = {
2205 - .vecs = crc32_tv_template,
2206 - .count = CRC32_TEST_VECTORS
2207 - }
2208 + .hash = __VECS(crc32_tv_template)
2209 }
2210 }, {
2211 .alg = "crc32c",
2212 .test = alg_test_crc32c,
2213 .fips_allowed = 1,
2214 .suite = {
2215 - .hash = {
2216 - .vecs = crc32c_tv_template,
2217 - .count = CRC32C_TEST_VECTORS
2218 - }
2219 + .hash = __VECS(crc32c_tv_template)
2220 }
2221 }, {
2222 .alg = "crct10dif",
2223 .test = alg_test_hash,
2224 .fips_allowed = 1,
2225 .suite = {
2226 - .hash = {
2227 - .vecs = crct10dif_tv_template,
2228 - .count = CRCT10DIF_TEST_VECTORS
2229 - }
2230 + .hash = __VECS(crct10dif_tv_template)
2231 }
2232 - }, {
2233 - .alg = "cryptd(__driver-cbc-aes-aesni)",
2234 - .test = alg_test_null,
2235 - .fips_allowed = 1,
2236 - }, {
2237 - .alg = "cryptd(__driver-cbc-camellia-aesni)",
2238 - .test = alg_test_null,
2239 - }, {
2240 - .alg = "cryptd(__driver-cbc-camellia-aesni-avx2)",
2241 - .test = alg_test_null,
2242 - }, {
2243 - .alg = "cryptd(__driver-cbc-serpent-avx2)",
2244 - .test = alg_test_null,
2245 - }, {
2246 - .alg = "cryptd(__driver-ecb-aes-aesni)",
2247 - .test = alg_test_null,
2248 - .fips_allowed = 1,
2249 - }, {
2250 - .alg = "cryptd(__driver-ecb-camellia-aesni)",
2251 - .test = alg_test_null,
2252 - }, {
2253 - .alg = "cryptd(__driver-ecb-camellia-aesni-avx2)",
2254 - .test = alg_test_null,
2255 - }, {
2256 - .alg = "cryptd(__driver-ecb-cast5-avx)",
2257 - .test = alg_test_null,
2258 - }, {
2259 - .alg = "cryptd(__driver-ecb-cast6-avx)",
2260 - .test = alg_test_null,
2261 - }, {
2262 - .alg = "cryptd(__driver-ecb-serpent-avx)",
2263 - .test = alg_test_null,
2264 - }, {
2265 - .alg = "cryptd(__driver-ecb-serpent-avx2)",
2266 - .test = alg_test_null,
2267 - }, {
2268 - .alg = "cryptd(__driver-ecb-serpent-sse2)",
2269 - .test = alg_test_null,
2270 - }, {
2271 - .alg = "cryptd(__driver-ecb-twofish-avx)",
2272 - .test = alg_test_null,
2273 - }, {
2274 - .alg = "cryptd(__driver-gcm-aes-aesni)",
2275 - .test = alg_test_null,
2276 - .fips_allowed = 1,
2277 - }, {
2278 - .alg = "cryptd(__ghash-pclmulqdqni)",
2279 - .test = alg_test_null,
2280 - .fips_allowed = 1,
2281 }, {
2282 .alg = "ctr(aes)",
2283 .test = alg_test_skcipher,
2284 .fips_allowed = 1,
2285 .suite = {
2286 .cipher = {
2287 - .enc = {
2288 - .vecs = aes_ctr_enc_tv_template,
2289 - .count = AES_CTR_ENC_TEST_VECTORS
2290 - },
2291 - .dec = {
2292 - .vecs = aes_ctr_dec_tv_template,
2293 - .count = AES_CTR_DEC_TEST_VECTORS
2294 - }
2295 + .enc = __VECS(aes_ctr_enc_tv_template),
2296 + .dec = __VECS(aes_ctr_dec_tv_template)
2297 }
2298 }
2299 }, {
2300 @@ -2728,14 +2837,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2301 .test = alg_test_skcipher,
2302 .suite = {
2303 .cipher = {
2304 - .enc = {
2305 - .vecs = bf_ctr_enc_tv_template,
2306 - .count = BF_CTR_ENC_TEST_VECTORS
2307 - },
2308 - .dec = {
2309 - .vecs = bf_ctr_dec_tv_template,
2310 - .count = BF_CTR_DEC_TEST_VECTORS
2311 - }
2312 + .enc = __VECS(bf_ctr_enc_tv_template),
2313 + .dec = __VECS(bf_ctr_dec_tv_template)
2314 }
2315 }
2316 }, {
2317 @@ -2743,14 +2846,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2318 .test = alg_test_skcipher,
2319 .suite = {
2320 .cipher = {
2321 - .enc = {
2322 - .vecs = camellia_ctr_enc_tv_template,
2323 - .count = CAMELLIA_CTR_ENC_TEST_VECTORS
2324 - },
2325 - .dec = {
2326 - .vecs = camellia_ctr_dec_tv_template,
2327 - .count = CAMELLIA_CTR_DEC_TEST_VECTORS
2328 - }
2329 + .enc = __VECS(camellia_ctr_enc_tv_template),
2330 + .dec = __VECS(camellia_ctr_dec_tv_template)
2331 }
2332 }
2333 }, {
2334 @@ -2758,14 +2855,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2335 .test = alg_test_skcipher,
2336 .suite = {
2337 .cipher = {
2338 - .enc = {
2339 - .vecs = cast5_ctr_enc_tv_template,
2340 - .count = CAST5_CTR_ENC_TEST_VECTORS
2341 - },
2342 - .dec = {
2343 - .vecs = cast5_ctr_dec_tv_template,
2344 - .count = CAST5_CTR_DEC_TEST_VECTORS
2345 - }
2346 + .enc = __VECS(cast5_ctr_enc_tv_template),
2347 + .dec = __VECS(cast5_ctr_dec_tv_template)
2348 }
2349 }
2350 }, {
2351 @@ -2773,14 +2864,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2352 .test = alg_test_skcipher,
2353 .suite = {
2354 .cipher = {
2355 - .enc = {
2356 - .vecs = cast6_ctr_enc_tv_template,
2357 - .count = CAST6_CTR_ENC_TEST_VECTORS
2358 - },
2359 - .dec = {
2360 - .vecs = cast6_ctr_dec_tv_template,
2361 - .count = CAST6_CTR_DEC_TEST_VECTORS
2362 - }
2363 + .enc = __VECS(cast6_ctr_enc_tv_template),
2364 + .dec = __VECS(cast6_ctr_dec_tv_template)
2365 }
2366 }
2367 }, {
2368 @@ -2788,29 +2873,18 @@ static const struct alg_test_desc alg_test_descs[] = {
2369 .test = alg_test_skcipher,
2370 .suite = {
2371 .cipher = {
2372 - .enc = {
2373 - .vecs = des_ctr_enc_tv_template,
2374 - .count = DES_CTR_ENC_TEST_VECTORS
2375 - },
2376 - .dec = {
2377 - .vecs = des_ctr_dec_tv_template,
2378 - .count = DES_CTR_DEC_TEST_VECTORS
2379 - }
2380 + .enc = __VECS(des_ctr_enc_tv_template),
2381 + .dec = __VECS(des_ctr_dec_tv_template)
2382 }
2383 }
2384 }, {
2385 .alg = "ctr(des3_ede)",
2386 .test = alg_test_skcipher,
2387 + .fips_allowed = 1,
2388 .suite = {
2389 .cipher = {
2390 - .enc = {
2391 - .vecs = des3_ede_ctr_enc_tv_template,
2392 - .count = DES3_EDE_CTR_ENC_TEST_VECTORS
2393 - },
2394 - .dec = {
2395 - .vecs = des3_ede_ctr_dec_tv_template,
2396 - .count = DES3_EDE_CTR_DEC_TEST_VECTORS
2397 - }
2398 + .enc = __VECS(des3_ede_ctr_enc_tv_template),
2399 + .dec = __VECS(des3_ede_ctr_dec_tv_template)
2400 }
2401 }
2402 }, {
2403 @@ -2818,14 +2892,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2404 .test = alg_test_skcipher,
2405 .suite = {
2406 .cipher = {
2407 - .enc = {
2408 - .vecs = serpent_ctr_enc_tv_template,
2409 - .count = SERPENT_CTR_ENC_TEST_VECTORS
2410 - },
2411 - .dec = {
2412 - .vecs = serpent_ctr_dec_tv_template,
2413 - .count = SERPENT_CTR_DEC_TEST_VECTORS
2414 - }
2415 + .enc = __VECS(serpent_ctr_enc_tv_template),
2416 + .dec = __VECS(serpent_ctr_dec_tv_template)
2417 }
2418 }
2419 }, {
2420 @@ -2833,14 +2901,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2421 .test = alg_test_skcipher,
2422 .suite = {
2423 .cipher = {
2424 - .enc = {
2425 - .vecs = tf_ctr_enc_tv_template,
2426 - .count = TF_CTR_ENC_TEST_VECTORS
2427 - },
2428 - .dec = {
2429 - .vecs = tf_ctr_dec_tv_template,
2430 - .count = TF_CTR_DEC_TEST_VECTORS
2431 - }
2432 + .enc = __VECS(tf_ctr_enc_tv_template),
2433 + .dec = __VECS(tf_ctr_dec_tv_template)
2434 }
2435 }
2436 }, {
2437 @@ -2848,14 +2910,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2438 .test = alg_test_skcipher,
2439 .suite = {
2440 .cipher = {
2441 - .enc = {
2442 - .vecs = cts_mode_enc_tv_template,
2443 - .count = CTS_MODE_ENC_TEST_VECTORS
2444 - },
2445 - .dec = {
2446 - .vecs = cts_mode_dec_tv_template,
2447 - .count = CTS_MODE_DEC_TEST_VECTORS
2448 - }
2449 + .enc = __VECS(cts_mode_enc_tv_template),
2450 + .dec = __VECS(cts_mode_dec_tv_template)
2451 }
2452 }
2453 }, {
2454 @@ -2864,14 +2920,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2455 .fips_allowed = 1,
2456 .suite = {
2457 .comp = {
2458 - .comp = {
2459 - .vecs = deflate_comp_tv_template,
2460 - .count = DEFLATE_COMP_TEST_VECTORS
2461 - },
2462 - .decomp = {
2463 - .vecs = deflate_decomp_tv_template,
2464 - .count = DEFLATE_DECOMP_TEST_VECTORS
2465 - }
2466 + .comp = __VECS(deflate_comp_tv_template),
2467 + .decomp = __VECS(deflate_decomp_tv_template)
2468 }
2469 }
2470 }, {
2471 @@ -2879,10 +2929,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2472 .test = alg_test_kpp,
2473 .fips_allowed = 1,
2474 .suite = {
2475 - .kpp = {
2476 - .vecs = dh_tv_template,
2477 - .count = DH_TEST_VECTORS
2478 - }
2479 + .kpp = __VECS(dh_tv_template)
2480 }
2481 }, {
2482 .alg = "digest_null",
2483 @@ -2892,30 +2939,21 @@ static const struct alg_test_desc alg_test_descs[] = {
2484 .test = alg_test_drbg,
2485 .fips_allowed = 1,
2486 .suite = {
2487 - .drbg = {
2488 - .vecs = drbg_nopr_ctr_aes128_tv_template,
2489 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes128_tv_template)
2490 - }
2491 + .drbg = __VECS(drbg_nopr_ctr_aes128_tv_template)
2492 }
2493 }, {
2494 .alg = "drbg_nopr_ctr_aes192",
2495 .test = alg_test_drbg,
2496 .fips_allowed = 1,
2497 .suite = {
2498 - .drbg = {
2499 - .vecs = drbg_nopr_ctr_aes192_tv_template,
2500 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes192_tv_template)
2501 - }
2502 + .drbg = __VECS(drbg_nopr_ctr_aes192_tv_template)
2503 }
2504 }, {
2505 .alg = "drbg_nopr_ctr_aes256",
2506 .test = alg_test_drbg,
2507 .fips_allowed = 1,
2508 .suite = {
2509 - .drbg = {
2510 - .vecs = drbg_nopr_ctr_aes256_tv_template,
2511 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes256_tv_template)
2512 - }
2513 + .drbg = __VECS(drbg_nopr_ctr_aes256_tv_template)
2514 }
2515 }, {
2516 /*
2517 @@ -2930,11 +2968,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2518 .test = alg_test_drbg,
2519 .fips_allowed = 1,
2520 .suite = {
2521 - .drbg = {
2522 - .vecs = drbg_nopr_hmac_sha256_tv_template,
2523 - .count =
2524 - ARRAY_SIZE(drbg_nopr_hmac_sha256_tv_template)
2525 - }
2526 + .drbg = __VECS(drbg_nopr_hmac_sha256_tv_template)
2527 }
2528 }, {
2529 /* covered by drbg_nopr_hmac_sha256 test */
2530 @@ -2954,10 +2988,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2531 .test = alg_test_drbg,
2532 .fips_allowed = 1,
2533 .suite = {
2534 - .drbg = {
2535 - .vecs = drbg_nopr_sha256_tv_template,
2536 - .count = ARRAY_SIZE(drbg_nopr_sha256_tv_template)
2537 - }
2538 + .drbg = __VECS(drbg_nopr_sha256_tv_template)
2539 }
2540 }, {
2541 /* covered by drbg_nopr_sha256 test */
2542 @@ -2973,10 +3004,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2543 .test = alg_test_drbg,
2544 .fips_allowed = 1,
2545 .suite = {
2546 - .drbg = {
2547 - .vecs = drbg_pr_ctr_aes128_tv_template,
2548 - .count = ARRAY_SIZE(drbg_pr_ctr_aes128_tv_template)
2549 - }
2550 + .drbg = __VECS(drbg_pr_ctr_aes128_tv_template)
2551 }
2552 }, {
2553 /* covered by drbg_pr_ctr_aes128 test */
2554 @@ -2996,10 +3024,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2555 .test = alg_test_drbg,
2556 .fips_allowed = 1,
2557 .suite = {
2558 - .drbg = {
2559 - .vecs = drbg_pr_hmac_sha256_tv_template,
2560 - .count = ARRAY_SIZE(drbg_pr_hmac_sha256_tv_template)
2561 - }
2562 + .drbg = __VECS(drbg_pr_hmac_sha256_tv_template)
2563 }
2564 }, {
2565 /* covered by drbg_pr_hmac_sha256 test */
2566 @@ -3019,10 +3044,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2567 .test = alg_test_drbg,
2568 .fips_allowed = 1,
2569 .suite = {
2570 - .drbg = {
2571 - .vecs = drbg_pr_sha256_tv_template,
2572 - .count = ARRAY_SIZE(drbg_pr_sha256_tv_template)
2573 - }
2574 + .drbg = __VECS(drbg_pr_sha256_tv_template)
2575 }
2576 }, {
2577 /* covered by drbg_pr_sha256 test */
2578 @@ -3033,24 +3055,14 @@ static const struct alg_test_desc alg_test_descs[] = {
2579 .alg = "drbg_pr_sha512",
2580 .fips_allowed = 1,
2581 .test = alg_test_null,
2582 - }, {
2583 - .alg = "ecb(__aes-aesni)",
2584 - .test = alg_test_null,
2585 - .fips_allowed = 1,
2586 }, {
2587 .alg = "ecb(aes)",
2588 .test = alg_test_skcipher,
2589 .fips_allowed = 1,
2590 .suite = {
2591 .cipher = {
2592 - .enc = {
2593 - .vecs = aes_enc_tv_template,
2594 - .count = AES_ENC_TEST_VECTORS
2595 - },
2596 - .dec = {
2597 - .vecs = aes_dec_tv_template,
2598 - .count = AES_DEC_TEST_VECTORS
2599 - }
2600 + .enc = __VECS(aes_enc_tv_template),
2601 + .dec = __VECS(aes_dec_tv_template)
2602 }
2603 }
2604 }, {
2605 @@ -3058,14 +3070,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2606 .test = alg_test_skcipher,
2607 .suite = {
2608 .cipher = {
2609 - .enc = {
2610 - .vecs = anubis_enc_tv_template,
2611 - .count = ANUBIS_ENC_TEST_VECTORS
2612 - },
2613 - .dec = {
2614 - .vecs = anubis_dec_tv_template,
2615 - .count = ANUBIS_DEC_TEST_VECTORS
2616 - }
2617 + .enc = __VECS(anubis_enc_tv_template),
2618 + .dec = __VECS(anubis_dec_tv_template)
2619 }
2620 }
2621 }, {
2622 @@ -3073,14 +3079,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2623 .test = alg_test_skcipher,
2624 .suite = {
2625 .cipher = {
2626 - .enc = {
2627 - .vecs = arc4_enc_tv_template,
2628 - .count = ARC4_ENC_TEST_VECTORS
2629 - },
2630 - .dec = {
2631 - .vecs = arc4_dec_tv_template,
2632 - .count = ARC4_DEC_TEST_VECTORS
2633 - }
2634 + .enc = __VECS(arc4_enc_tv_template),
2635 + .dec = __VECS(arc4_dec_tv_template)
2636 }
2637 }
2638 }, {
2639 @@ -3088,14 +3088,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2640 .test = alg_test_skcipher,
2641 .suite = {
2642 .cipher = {
2643 - .enc = {
2644 - .vecs = bf_enc_tv_template,
2645 - .count = BF_ENC_TEST_VECTORS
2646 - },
2647 - .dec = {
2648 - .vecs = bf_dec_tv_template,
2649 - .count = BF_DEC_TEST_VECTORS
2650 - }
2651 + .enc = __VECS(bf_enc_tv_template),
2652 + .dec = __VECS(bf_dec_tv_template)
2653 }
2654 }
2655 }, {
2656 @@ -3103,14 +3097,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2657 .test = alg_test_skcipher,
2658 .suite = {
2659 .cipher = {
2660 - .enc = {
2661 - .vecs = camellia_enc_tv_template,
2662 - .count = CAMELLIA_ENC_TEST_VECTORS
2663 - },
2664 - .dec = {
2665 - .vecs = camellia_dec_tv_template,
2666 - .count = CAMELLIA_DEC_TEST_VECTORS
2667 - }
2668 + .enc = __VECS(camellia_enc_tv_template),
2669 + .dec = __VECS(camellia_dec_tv_template)
2670 }
2671 }
2672 }, {
2673 @@ -3118,14 +3106,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2674 .test = alg_test_skcipher,
2675 .suite = {
2676 .cipher = {
2677 - .enc = {
2678 - .vecs = cast5_enc_tv_template,
2679 - .count = CAST5_ENC_TEST_VECTORS
2680 - },
2681 - .dec = {
2682 - .vecs = cast5_dec_tv_template,
2683 - .count = CAST5_DEC_TEST_VECTORS
2684 - }
2685 + .enc = __VECS(cast5_enc_tv_template),
2686 + .dec = __VECS(cast5_dec_tv_template)
2687 }
2688 }
2689 }, {
2690 @@ -3133,14 +3115,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2691 .test = alg_test_skcipher,
2692 .suite = {
2693 .cipher = {
2694 - .enc = {
2695 - .vecs = cast6_enc_tv_template,
2696 - .count = CAST6_ENC_TEST_VECTORS
2697 - },
2698 - .dec = {
2699 - .vecs = cast6_dec_tv_template,
2700 - .count = CAST6_DEC_TEST_VECTORS
2701 - }
2702 + .enc = __VECS(cast6_enc_tv_template),
2703 + .dec = __VECS(cast6_dec_tv_template)
2704 }
2705 }
2706 }, {
2707 @@ -3151,14 +3127,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2708 .test = alg_test_skcipher,
2709 .suite = {
2710 .cipher = {
2711 - .enc = {
2712 - .vecs = des_enc_tv_template,
2713 - .count = DES_ENC_TEST_VECTORS
2714 - },
2715 - .dec = {
2716 - .vecs = des_dec_tv_template,
2717 - .count = DES_DEC_TEST_VECTORS
2718 - }
2719 + .enc = __VECS(des_enc_tv_template),
2720 + .dec = __VECS(des_dec_tv_template)
2721 }
2722 }
2723 }, {
2724 @@ -3167,14 +3137,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2725 .fips_allowed = 1,
2726 .suite = {
2727 .cipher = {
2728 - .enc = {
2729 - .vecs = des3_ede_enc_tv_template,
2730 - .count = DES3_EDE_ENC_TEST_VECTORS
2731 - },
2732 - .dec = {
2733 - .vecs = des3_ede_dec_tv_template,
2734 - .count = DES3_EDE_DEC_TEST_VECTORS
2735 - }
2736 + .enc = __VECS(des3_ede_enc_tv_template),
2737 + .dec = __VECS(des3_ede_dec_tv_template)
2738 }
2739 }
2740 }, {
2741 @@ -3197,14 +3161,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2742 .test = alg_test_skcipher,
2743 .suite = {
2744 .cipher = {
2745 - .enc = {
2746 - .vecs = khazad_enc_tv_template,
2747 - .count = KHAZAD_ENC_TEST_VECTORS
2748 - },
2749 - .dec = {
2750 - .vecs = khazad_dec_tv_template,
2751 - .count = KHAZAD_DEC_TEST_VECTORS
2752 - }
2753 + .enc = __VECS(khazad_enc_tv_template),
2754 + .dec = __VECS(khazad_dec_tv_template)
2755 }
2756 }
2757 }, {
2758 @@ -3212,14 +3170,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2759 .test = alg_test_skcipher,
2760 .suite = {
2761 .cipher = {
2762 - .enc = {
2763 - .vecs = seed_enc_tv_template,
2764 - .count = SEED_ENC_TEST_VECTORS
2765 - },
2766 - .dec = {
2767 - .vecs = seed_dec_tv_template,
2768 - .count = SEED_DEC_TEST_VECTORS
2769 - }
2770 + .enc = __VECS(seed_enc_tv_template),
2771 + .dec = __VECS(seed_dec_tv_template)
2772 }
2773 }
2774 }, {
2775 @@ -3227,14 +3179,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2776 .test = alg_test_skcipher,
2777 .suite = {
2778 .cipher = {
2779 - .enc = {
2780 - .vecs = serpent_enc_tv_template,
2781 - .count = SERPENT_ENC_TEST_VECTORS
2782 - },
2783 - .dec = {
2784 - .vecs = serpent_dec_tv_template,
2785 - .count = SERPENT_DEC_TEST_VECTORS
2786 - }
2787 + .enc = __VECS(serpent_enc_tv_template),
2788 + .dec = __VECS(serpent_dec_tv_template)
2789 }
2790 }
2791 }, {
2792 @@ -3242,14 +3188,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2793 .test = alg_test_skcipher,
2794 .suite = {
2795 .cipher = {
2796 - .enc = {
2797 - .vecs = tea_enc_tv_template,
2798 - .count = TEA_ENC_TEST_VECTORS
2799 - },
2800 - .dec = {
2801 - .vecs = tea_dec_tv_template,
2802 - .count = TEA_DEC_TEST_VECTORS
2803 - }
2804 + .enc = __VECS(tea_enc_tv_template),
2805 + .dec = __VECS(tea_dec_tv_template)
2806 }
2807 }
2808 }, {
2809 @@ -3257,14 +3197,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2810 .test = alg_test_skcipher,
2811 .suite = {
2812 .cipher = {
2813 - .enc = {
2814 - .vecs = tnepres_enc_tv_template,
2815 - .count = TNEPRES_ENC_TEST_VECTORS
2816 - },
2817 - .dec = {
2818 - .vecs = tnepres_dec_tv_template,
2819 - .count = TNEPRES_DEC_TEST_VECTORS
2820 - }
2821 + .enc = __VECS(tnepres_enc_tv_template),
2822 + .dec = __VECS(tnepres_dec_tv_template)
2823 }
2824 }
2825 }, {
2826 @@ -3272,14 +3206,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2827 .test = alg_test_skcipher,
2828 .suite = {
2829 .cipher = {
2830 - .enc = {
2831 - .vecs = tf_enc_tv_template,
2832 - .count = TF_ENC_TEST_VECTORS
2833 - },
2834 - .dec = {
2835 - .vecs = tf_dec_tv_template,
2836 - .count = TF_DEC_TEST_VECTORS
2837 - }
2838 + .enc = __VECS(tf_enc_tv_template),
2839 + .dec = __VECS(tf_dec_tv_template)
2840 }
2841 }
2842 }, {
2843 @@ -3287,14 +3215,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2844 .test = alg_test_skcipher,
2845 .suite = {
2846 .cipher = {
2847 - .enc = {
2848 - .vecs = xeta_enc_tv_template,
2849 - .count = XETA_ENC_TEST_VECTORS
2850 - },
2851 - .dec = {
2852 - .vecs = xeta_dec_tv_template,
2853 - .count = XETA_DEC_TEST_VECTORS
2854 - }
2855 + .enc = __VECS(xeta_enc_tv_template),
2856 + .dec = __VECS(xeta_dec_tv_template)
2857 }
2858 }
2859 }, {
2860 @@ -3302,14 +3224,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2861 .test = alg_test_skcipher,
2862 .suite = {
2863 .cipher = {
2864 - .enc = {
2865 - .vecs = xtea_enc_tv_template,
2866 - .count = XTEA_ENC_TEST_VECTORS
2867 - },
2868 - .dec = {
2869 - .vecs = xtea_dec_tv_template,
2870 - .count = XTEA_DEC_TEST_VECTORS
2871 - }
2872 + .enc = __VECS(xtea_enc_tv_template),
2873 + .dec = __VECS(xtea_dec_tv_template)
2874 }
2875 }
2876 }, {
2877 @@ -3317,10 +3233,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2878 .test = alg_test_kpp,
2879 .fips_allowed = 1,
2880 .suite = {
2881 - .kpp = {
2882 - .vecs = ecdh_tv_template,
2883 - .count = ECDH_TEST_VECTORS
2884 - }
2885 + .kpp = __VECS(ecdh_tv_template)
2886 }
2887 }, {
2888 .alg = "gcm(aes)",
2889 @@ -3328,14 +3241,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2890 .fips_allowed = 1,
2891 .suite = {
2892 .aead = {
2893 - .enc = {
2894 - .vecs = aes_gcm_enc_tv_template,
2895 - .count = AES_GCM_ENC_TEST_VECTORS
2896 - },
2897 - .dec = {
2898 - .vecs = aes_gcm_dec_tv_template,
2899 - .count = AES_GCM_DEC_TEST_VECTORS
2900 - }
2901 + .enc = __VECS(aes_gcm_enc_tv_template),
2902 + .dec = __VECS(aes_gcm_dec_tv_template)
2903 }
2904 }
2905 }, {
2906 @@ -3343,136 +3250,94 @@ static const struct alg_test_desc alg_test_descs[] = {
2907 .test = alg_test_hash,
2908 .fips_allowed = 1,
2909 .suite = {
2910 - .hash = {
2911 - .vecs = ghash_tv_template,
2912 - .count = GHASH_TEST_VECTORS
2913 - }
2914 + .hash = __VECS(ghash_tv_template)
2915 }
2916 }, {
2917 .alg = "hmac(crc32)",
2918 .test = alg_test_hash,
2919 .suite = {
2920 - .hash = {
2921 - .vecs = bfin_crc_tv_template,
2922 - .count = BFIN_CRC_TEST_VECTORS
2923 - }
2924 + .hash = __VECS(bfin_crc_tv_template)
2925 }
2926 }, {
2927 .alg = "hmac(md5)",
2928 .test = alg_test_hash,
2929 .suite = {
2930 - .hash = {
2931 - .vecs = hmac_md5_tv_template,
2932 - .count = HMAC_MD5_TEST_VECTORS
2933 - }
2934 + .hash = __VECS(hmac_md5_tv_template)
2935 }
2936 }, {
2937 .alg = "hmac(rmd128)",
2938 .test = alg_test_hash,
2939 .suite = {
2940 - .hash = {
2941 - .vecs = hmac_rmd128_tv_template,
2942 - .count = HMAC_RMD128_TEST_VECTORS
2943 - }
2944 + .hash = __VECS(hmac_rmd128_tv_template)
2945 }
2946 }, {
2947 .alg = "hmac(rmd160)",
2948 .test = alg_test_hash,
2949 .suite = {
2950 - .hash = {
2951 - .vecs = hmac_rmd160_tv_template,
2952 - .count = HMAC_RMD160_TEST_VECTORS
2953 - }
2954 + .hash = __VECS(hmac_rmd160_tv_template)
2955 }
2956 }, {
2957 .alg = "hmac(sha1)",
2958 .test = alg_test_hash,
2959 .fips_allowed = 1,
2960 .suite = {
2961 - .hash = {
2962 - .vecs = hmac_sha1_tv_template,
2963 - .count = HMAC_SHA1_TEST_VECTORS
2964 - }
2965 + .hash = __VECS(hmac_sha1_tv_template)
2966 }
2967 }, {
2968 .alg = "hmac(sha224)",
2969 .test = alg_test_hash,
2970 .fips_allowed = 1,
2971 .suite = {
2972 - .hash = {
2973 - .vecs = hmac_sha224_tv_template,
2974 - .count = HMAC_SHA224_TEST_VECTORS
2975 - }
2976 + .hash = __VECS(hmac_sha224_tv_template)
2977 }
2978 }, {
2979 .alg = "hmac(sha256)",
2980 .test = alg_test_hash,
2981 .fips_allowed = 1,
2982 .suite = {
2983 - .hash = {
2984 - .vecs = hmac_sha256_tv_template,
2985 - .count = HMAC_SHA256_TEST_VECTORS
2986 - }
2987 + .hash = __VECS(hmac_sha256_tv_template)
2988 }
2989 }, {
2990 .alg = "hmac(sha3-224)",
2991 .test = alg_test_hash,
2992 .fips_allowed = 1,
2993 .suite = {
2994 - .hash = {
2995 - .vecs = hmac_sha3_224_tv_template,
2996 - .count = HMAC_SHA3_224_TEST_VECTORS
2997 - }
2998 + .hash = __VECS(hmac_sha3_224_tv_template)
2999 }
3000 }, {
3001 .alg = "hmac(sha3-256)",
3002 .test = alg_test_hash,
3003 .fips_allowed = 1,
3004 .suite = {
3005 - .hash = {
3006 - .vecs = hmac_sha3_256_tv_template,
3007 - .count = HMAC_SHA3_256_TEST_VECTORS
3008 - }
3009 + .hash = __VECS(hmac_sha3_256_tv_template)
3010 }
3011 }, {
3012 .alg = "hmac(sha3-384)",
3013 .test = alg_test_hash,
3014 .fips_allowed = 1,
3015 .suite = {
3016 - .hash = {
3017 - .vecs = hmac_sha3_384_tv_template,
3018 - .count = HMAC_SHA3_384_TEST_VECTORS
3019 - }
3020 + .hash = __VECS(hmac_sha3_384_tv_template)
3021 }
3022 }, {
3023 .alg = "hmac(sha3-512)",
3024 .test = alg_test_hash,
3025 .fips_allowed = 1,
3026 .suite = {
3027 - .hash = {
3028 - .vecs = hmac_sha3_512_tv_template,
3029 - .count = HMAC_SHA3_512_TEST_VECTORS
3030 - }
3031 + .hash = __VECS(hmac_sha3_512_tv_template)
3032 }
3033 }, {
3034 .alg = "hmac(sha384)",
3035 .test = alg_test_hash,
3036 .fips_allowed = 1,
3037 .suite = {
3038 - .hash = {
3039 - .vecs = hmac_sha384_tv_template,
3040 - .count = HMAC_SHA384_TEST_VECTORS
3041 - }
3042 + .hash = __VECS(hmac_sha384_tv_template)
3043 }
3044 }, {
3045 .alg = "hmac(sha512)",
3046 .test = alg_test_hash,
3047 .fips_allowed = 1,
3048 .suite = {
3049 - .hash = {
3050 - .vecs = hmac_sha512_tv_template,
3051 - .count = HMAC_SHA512_TEST_VECTORS
3052 - }
3053 + .hash = __VECS(hmac_sha512_tv_template)
3054 }
3055 }, {
3056 .alg = "jitterentropy_rng",
3057 @@ -3484,14 +3349,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3058 .fips_allowed = 1,
3059 .suite = {
3060 .cipher = {
3061 - .enc = {
3062 - .vecs = aes_kw_enc_tv_template,
3063 - .count = ARRAY_SIZE(aes_kw_enc_tv_template)
3064 - },
3065 - .dec = {
3066 - .vecs = aes_kw_dec_tv_template,
3067 - .count = ARRAY_SIZE(aes_kw_dec_tv_template)
3068 - }
3069 + .enc = __VECS(aes_kw_enc_tv_template),
3070 + .dec = __VECS(aes_kw_dec_tv_template)
3071 }
3072 }
3073 }, {
3074 @@ -3499,14 +3358,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3075 .test = alg_test_skcipher,
3076 .suite = {
3077 .cipher = {
3078 - .enc = {
3079 - .vecs = aes_lrw_enc_tv_template,
3080 - .count = AES_LRW_ENC_TEST_VECTORS
3081 - },
3082 - .dec = {
3083 - .vecs = aes_lrw_dec_tv_template,
3084 - .count = AES_LRW_DEC_TEST_VECTORS
3085 - }
3086 + .enc = __VECS(aes_lrw_enc_tv_template),
3087 + .dec = __VECS(aes_lrw_dec_tv_template)
3088 }
3089 }
3090 }, {
3091 @@ -3514,14 +3367,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3092 .test = alg_test_skcipher,
3093 .suite = {
3094 .cipher = {
3095 - .enc = {
3096 - .vecs = camellia_lrw_enc_tv_template,
3097 - .count = CAMELLIA_LRW_ENC_TEST_VECTORS
3098 - },
3099 - .dec = {
3100 - .vecs = camellia_lrw_dec_tv_template,
3101 - .count = CAMELLIA_LRW_DEC_TEST_VECTORS
3102 - }
3103 + .enc = __VECS(camellia_lrw_enc_tv_template),
3104 + .dec = __VECS(camellia_lrw_dec_tv_template)
3105 }
3106 }
3107 }, {
3108 @@ -3529,14 +3376,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3109 .test = alg_test_skcipher,
3110 .suite = {
3111 .cipher = {
3112 - .enc = {
3113 - .vecs = cast6_lrw_enc_tv_template,
3114 - .count = CAST6_LRW_ENC_TEST_VECTORS
3115 - },
3116 - .dec = {
3117 - .vecs = cast6_lrw_dec_tv_template,
3118 - .count = CAST6_LRW_DEC_TEST_VECTORS
3119 - }
3120 + .enc = __VECS(cast6_lrw_enc_tv_template),
3121 + .dec = __VECS(cast6_lrw_dec_tv_template)
3122 }
3123 }
3124 }, {
3125 @@ -3544,14 +3385,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3126 .test = alg_test_skcipher,
3127 .suite = {
3128 .cipher = {
3129 - .enc = {
3130 - .vecs = serpent_lrw_enc_tv_template,
3131 - .count = SERPENT_LRW_ENC_TEST_VECTORS
3132 - },
3133 - .dec = {
3134 - .vecs = serpent_lrw_dec_tv_template,
3135 - .count = SERPENT_LRW_DEC_TEST_VECTORS
3136 - }
3137 + .enc = __VECS(serpent_lrw_enc_tv_template),
3138 + .dec = __VECS(serpent_lrw_dec_tv_template)
3139 }
3140 }
3141 }, {
3142 @@ -3559,14 +3394,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3143 .test = alg_test_skcipher,
3144 .suite = {
3145 .cipher = {
3146 - .enc = {
3147 - .vecs = tf_lrw_enc_tv_template,
3148 - .count = TF_LRW_ENC_TEST_VECTORS
3149 - },
3150 - .dec = {
3151 - .vecs = tf_lrw_dec_tv_template,
3152 - .count = TF_LRW_DEC_TEST_VECTORS
3153 - }
3154 + .enc = __VECS(tf_lrw_enc_tv_template),
3155 + .dec = __VECS(tf_lrw_dec_tv_template)
3156 }
3157 }
3158 }, {
3159 @@ -3575,14 +3404,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3160 .fips_allowed = 1,
3161 .suite = {
3162 .comp = {
3163 - .comp = {
3164 - .vecs = lz4_comp_tv_template,
3165 - .count = LZ4_COMP_TEST_VECTORS
3166 - },
3167 - .decomp = {
3168 - .vecs = lz4_decomp_tv_template,
3169 - .count = LZ4_DECOMP_TEST_VECTORS
3170 - }
3171 + .comp = __VECS(lz4_comp_tv_template),
3172 + .decomp = __VECS(lz4_decomp_tv_template)
3173 }
3174 }
3175 }, {
3176 @@ -3591,14 +3414,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3177 .fips_allowed = 1,
3178 .suite = {
3179 .comp = {
3180 - .comp = {
3181 - .vecs = lz4hc_comp_tv_template,
3182 - .count = LZ4HC_COMP_TEST_VECTORS
3183 - },
3184 - .decomp = {
3185 - .vecs = lz4hc_decomp_tv_template,
3186 - .count = LZ4HC_DECOMP_TEST_VECTORS
3187 - }
3188 + .comp = __VECS(lz4hc_comp_tv_template),
3189 + .decomp = __VECS(lz4hc_decomp_tv_template)
3190 }
3191 }
3192 }, {
3193 @@ -3607,42 +3424,27 @@ static const struct alg_test_desc alg_test_descs[] = {
3194 .fips_allowed = 1,
3195 .suite = {
3196 .comp = {
3197 - .comp = {
3198 - .vecs = lzo_comp_tv_template,
3199 - .count = LZO_COMP_TEST_VECTORS
3200 - },
3201 - .decomp = {
3202 - .vecs = lzo_decomp_tv_template,
3203 - .count = LZO_DECOMP_TEST_VECTORS
3204 - }
3205 + .comp = __VECS(lzo_comp_tv_template),
3206 + .decomp = __VECS(lzo_decomp_tv_template)
3207 }
3208 }
3209 }, {
3210 .alg = "md4",
3211 .test = alg_test_hash,
3212 .suite = {
3213 - .hash = {
3214 - .vecs = md4_tv_template,
3215 - .count = MD4_TEST_VECTORS
3216 - }
3217 + .hash = __VECS(md4_tv_template)
3218 }
3219 }, {
3220 .alg = "md5",
3221 .test = alg_test_hash,
3222 .suite = {
3223 - .hash = {
3224 - .vecs = md5_tv_template,
3225 - .count = MD5_TEST_VECTORS
3226 - }
3227 + .hash = __VECS(md5_tv_template)
3228 }
3229 }, {
3230 .alg = "michael_mic",
3231 .test = alg_test_hash,
3232 .suite = {
3233 - .hash = {
3234 - .vecs = michael_mic_tv_template,
3235 - .count = MICHAEL_MIC_TEST_VECTORS
3236 - }
3237 + .hash = __VECS(michael_mic_tv_template)
3238 }
3239 }, {
3240 .alg = "ofb(aes)",
3241 @@ -3650,14 +3452,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3242 .fips_allowed = 1,
3243 .suite = {
3244 .cipher = {
3245 - .enc = {
3246 - .vecs = aes_ofb_enc_tv_template,
3247 - .count = AES_OFB_ENC_TEST_VECTORS
3248 - },
3249 - .dec = {
3250 - .vecs = aes_ofb_dec_tv_template,
3251 - .count = AES_OFB_DEC_TEST_VECTORS
3252 - }
3253 + .enc = __VECS(aes_ofb_enc_tv_template),
3254 + .dec = __VECS(aes_ofb_dec_tv_template)
3255 }
3256 }
3257 }, {
3258 @@ -3665,24 +3461,15 @@ static const struct alg_test_desc alg_test_descs[] = {
3259 .test = alg_test_skcipher,
3260 .suite = {
3261 .cipher = {
3262 - .enc = {
3263 - .vecs = fcrypt_pcbc_enc_tv_template,
3264 - .count = FCRYPT_ENC_TEST_VECTORS
3265 - },
3266 - .dec = {
3267 - .vecs = fcrypt_pcbc_dec_tv_template,
3268 - .count = FCRYPT_DEC_TEST_VECTORS
3269 - }
3270 + .enc = __VECS(fcrypt_pcbc_enc_tv_template),
3271 + .dec = __VECS(fcrypt_pcbc_dec_tv_template)
3272 }
3273 }
3274 }, {
3275 .alg = "poly1305",
3276 .test = alg_test_hash,
3277 .suite = {
3278 - .hash = {
3279 - .vecs = poly1305_tv_template,
3280 - .count = POLY1305_TEST_VECTORS
3281 - }
3282 + .hash = __VECS(poly1305_tv_template)
3283 }
3284 }, {
3285 .alg = "rfc3686(ctr(aes))",
3286 @@ -3690,14 +3477,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3287 .fips_allowed = 1,
3288 .suite = {
3289 .cipher = {
3290 - .enc = {
3291 - .vecs = aes_ctr_rfc3686_enc_tv_template,
3292 - .count = AES_CTR_3686_ENC_TEST_VECTORS
3293 - },
3294 - .dec = {
3295 - .vecs = aes_ctr_rfc3686_dec_tv_template,
3296 - .count = AES_CTR_3686_DEC_TEST_VECTORS
3297 - }
3298 + .enc = __VECS(aes_ctr_rfc3686_enc_tv_template),
3299 + .dec = __VECS(aes_ctr_rfc3686_dec_tv_template)
3300 }
3301 }
3302 }, {
3303 @@ -3706,14 +3487,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3304 .fips_allowed = 1,
3305 .suite = {
3306 .aead = {
3307 - .enc = {
3308 - .vecs = aes_gcm_rfc4106_enc_tv_template,
3309 - .count = AES_GCM_4106_ENC_TEST_VECTORS
3310 - },
3311 - .dec = {
3312 - .vecs = aes_gcm_rfc4106_dec_tv_template,
3313 - .count = AES_GCM_4106_DEC_TEST_VECTORS
3314 - }
3315 + .enc = __VECS(aes_gcm_rfc4106_enc_tv_template),
3316 + .dec = __VECS(aes_gcm_rfc4106_dec_tv_template)
3317 }
3318 }
3319 }, {
3320 @@ -3722,14 +3497,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3321 .fips_allowed = 1,
3322 .suite = {
3323 .aead = {
3324 - .enc = {
3325 - .vecs = aes_ccm_rfc4309_enc_tv_template,
3326 - .count = AES_CCM_4309_ENC_TEST_VECTORS
3327 - },
3328 - .dec = {
3329 - .vecs = aes_ccm_rfc4309_dec_tv_template,
3330 - .count = AES_CCM_4309_DEC_TEST_VECTORS
3331 - }
3332 + .enc = __VECS(aes_ccm_rfc4309_enc_tv_template),
3333 + .dec = __VECS(aes_ccm_rfc4309_dec_tv_template)
3334 }
3335 }
3336 }, {
3337 @@ -3737,14 +3506,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3338 .test = alg_test_aead,
3339 .suite = {
3340 .aead = {
3341 - .enc = {
3342 - .vecs = aes_gcm_rfc4543_enc_tv_template,
3343 - .count = AES_GCM_4543_ENC_TEST_VECTORS
3344 - },
3345 - .dec = {
3346 - .vecs = aes_gcm_rfc4543_dec_tv_template,
3347 - .count = AES_GCM_4543_DEC_TEST_VECTORS
3348 - },
3349 + .enc = __VECS(aes_gcm_rfc4543_enc_tv_template),
3350 + .dec = __VECS(aes_gcm_rfc4543_dec_tv_template),
3351 }
3352 }
3353 }, {
3354 @@ -3752,14 +3515,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3355 .test = alg_test_aead,
3356 .suite = {
3357 .aead = {
3358 - .enc = {
3359 - .vecs = rfc7539_enc_tv_template,
3360 - .count = RFC7539_ENC_TEST_VECTORS
3361 - },
3362 - .dec = {
3363 - .vecs = rfc7539_dec_tv_template,
3364 - .count = RFC7539_DEC_TEST_VECTORS
3365 - },
3366 + .enc = __VECS(rfc7539_enc_tv_template),
3367 + .dec = __VECS(rfc7539_dec_tv_template),
3368 }
3369 }
3370 }, {
3371 @@ -3767,71 +3524,47 @@ static const struct alg_test_desc alg_test_descs[] = {
3372 .test = alg_test_aead,
3373 .suite = {
3374 .aead = {
3375 - .enc = {
3376 - .vecs = rfc7539esp_enc_tv_template,
3377 - .count = RFC7539ESP_ENC_TEST_VECTORS
3378 - },
3379 - .dec = {
3380 - .vecs = rfc7539esp_dec_tv_template,
3381 - .count = RFC7539ESP_DEC_TEST_VECTORS
3382 - },
3383 + .enc = __VECS(rfc7539esp_enc_tv_template),
3384 + .dec = __VECS(rfc7539esp_dec_tv_template),
3385 }
3386 }
3387 }, {
3388 .alg = "rmd128",
3389 .test = alg_test_hash,
3390 .suite = {
3391 - .hash = {
3392 - .vecs = rmd128_tv_template,
3393 - .count = RMD128_TEST_VECTORS
3394 - }
3395 + .hash = __VECS(rmd128_tv_template)
3396 }
3397 }, {
3398 .alg = "rmd160",
3399 .test = alg_test_hash,
3400 .suite = {
3401 - .hash = {
3402 - .vecs = rmd160_tv_template,
3403 - .count = RMD160_TEST_VECTORS
3404 - }
3405 + .hash = __VECS(rmd160_tv_template)
3406 }
3407 }, {
3408 .alg = "rmd256",
3409 .test = alg_test_hash,
3410 .suite = {
3411 - .hash = {
3412 - .vecs = rmd256_tv_template,
3413 - .count = RMD256_TEST_VECTORS
3414 - }
3415 + .hash = __VECS(rmd256_tv_template)
3416 }
3417 }, {
3418 .alg = "rmd320",
3419 .test = alg_test_hash,
3420 .suite = {
3421 - .hash = {
3422 - .vecs = rmd320_tv_template,
3423 - .count = RMD320_TEST_VECTORS
3424 - }
3425 + .hash = __VECS(rmd320_tv_template)
3426 }
3427 }, {
3428 .alg = "rsa",
3429 .test = alg_test_akcipher,
3430 .fips_allowed = 1,
3431 .suite = {
3432 - .akcipher = {
3433 - .vecs = rsa_tv_template,
3434 - .count = RSA_TEST_VECTORS
3435 - }
3436 + .akcipher = __VECS(rsa_tv_template)
3437 }
3438 }, {
3439 .alg = "salsa20",
3440 .test = alg_test_skcipher,
3441 .suite = {
3442 .cipher = {
3443 - .enc = {
3444 - .vecs = salsa20_stream_enc_tv_template,
3445 - .count = SALSA20_STREAM_ENC_TEST_VECTORS
3446 - }
3447 + .enc = __VECS(salsa20_stream_enc_tv_template)
3448 }
3449 }
3450 }, {
3451 @@ -3839,162 +3572,120 @@ static const struct alg_test_desc alg_test_descs[] = {
3452 .test = alg_test_hash,
3453 .fips_allowed = 1,
3454 .suite = {
3455 - .hash = {
3456 - .vecs = sha1_tv_template,
3457 - .count = SHA1_TEST_VECTORS
3458 - }
3459 + .hash = __VECS(sha1_tv_template)
3460 }
3461 }, {
3462 .alg = "sha224",
3463 .test = alg_test_hash,
3464 .fips_allowed = 1,
3465 .suite = {
3466 - .hash = {
3467 - .vecs = sha224_tv_template,
3468 - .count = SHA224_TEST_VECTORS
3469 - }
3470 + .hash = __VECS(sha224_tv_template)
3471 }
3472 }, {
3473 .alg = "sha256",
3474 .test = alg_test_hash,
3475 .fips_allowed = 1,
3476 .suite = {
3477 - .hash = {
3478 - .vecs = sha256_tv_template,
3479 - .count = SHA256_TEST_VECTORS
3480 - }
3481 + .hash = __VECS(sha256_tv_template)
3482 }
3483 }, {
3484 .alg = "sha3-224",
3485 .test = alg_test_hash,
3486 .fips_allowed = 1,
3487 .suite = {
3488 - .hash = {
3489 - .vecs = sha3_224_tv_template,
3490 - .count = SHA3_224_TEST_VECTORS
3491 - }
3492 + .hash = __VECS(sha3_224_tv_template)
3493 }
3494 }, {
3495 .alg = "sha3-256",
3496 .test = alg_test_hash,
3497 .fips_allowed = 1,
3498 .suite = {
3499 - .hash = {
3500 - .vecs = sha3_256_tv_template,
3501 - .count = SHA3_256_TEST_VECTORS
3502 - }
3503 + .hash = __VECS(sha3_256_tv_template)
3504 }
3505 }, {
3506 .alg = "sha3-384",
3507 .test = alg_test_hash,
3508 .fips_allowed = 1,
3509 .suite = {
3510 - .hash = {
3511 - .vecs = sha3_384_tv_template,
3512 - .count = SHA3_384_TEST_VECTORS
3513 - }
3514 + .hash = __VECS(sha3_384_tv_template)
3515 }
3516 }, {
3517 .alg = "sha3-512",
3518 .test = alg_test_hash,
3519 .fips_allowed = 1,
3520 .suite = {
3521 - .hash = {
3522 - .vecs = sha3_512_tv_template,
3523 - .count = SHA3_512_TEST_VECTORS
3524 - }
3525 + .hash = __VECS(sha3_512_tv_template)
3526 }
3527 }, {
3528 .alg = "sha384",
3529 .test = alg_test_hash,
3530 .fips_allowed = 1,
3531 .suite = {
3532 - .hash = {
3533 - .vecs = sha384_tv_template,
3534 - .count = SHA384_TEST_VECTORS
3535 - }
3536 + .hash = __VECS(sha384_tv_template)
3537 }
3538 }, {
3539 .alg = "sha512",
3540 .test = alg_test_hash,
3541 .fips_allowed = 1,
3542 .suite = {
3543 - .hash = {
3544 - .vecs = sha512_tv_template,
3545 - .count = SHA512_TEST_VECTORS
3546 - }
3547 + .hash = __VECS(sha512_tv_template)
3548 }
3549 }, {
3550 .alg = "tgr128",
3551 .test = alg_test_hash,
3552 .suite = {
3553 - .hash = {
3554 - .vecs = tgr128_tv_template,
3555 - .count = TGR128_TEST_VECTORS
3556 - }
3557 + .hash = __VECS(tgr128_tv_template)
3558 }
3559 }, {
3560 .alg = "tgr160",
3561 .test = alg_test_hash,
3562 .suite = {
3563 - .hash = {
3564 - .vecs = tgr160_tv_template,
3565 - .count = TGR160_TEST_VECTORS
3566 - }
3567 + .hash = __VECS(tgr160_tv_template)
3568 }
3569 }, {
3570 .alg = "tgr192",
3571 .test = alg_test_hash,
3572 .suite = {
3573 - .hash = {
3574 - .vecs = tgr192_tv_template,
3575 - .count = TGR192_TEST_VECTORS
3576 + .hash = __VECS(tgr192_tv_template)
3577 + }
3578 + }, {
3579 + .alg = "tls10(hmac(sha1),cbc(aes))",
3580 + .test = alg_test_tls,
3581 + .suite = {
3582 + .tls = {
3583 + .enc = __VECS(tls_enc_tv_template),
3584 + .dec = __VECS(tls_dec_tv_template)
3585 }
3586 }
3587 }, {
3588 .alg = "vmac(aes)",
3589 .test = alg_test_hash,
3590 .suite = {
3591 - .hash = {
3592 - .vecs = aes_vmac128_tv_template,
3593 - .count = VMAC_AES_TEST_VECTORS
3594 - }
3595 + .hash = __VECS(aes_vmac128_tv_template)
3596 }
3597 }, {
3598 .alg = "wp256",
3599 .test = alg_test_hash,
3600 .suite = {
3601 - .hash = {
3602 - .vecs = wp256_tv_template,
3603 - .count = WP256_TEST_VECTORS
3604 - }
3605 + .hash = __VECS(wp256_tv_template)
3606 }
3607 }, {
3608 .alg = "wp384",
3609 .test = alg_test_hash,
3610 .suite = {
3611 - .hash = {
3612 - .vecs = wp384_tv_template,
3613 - .count = WP384_TEST_VECTORS
3614 - }
3615 + .hash = __VECS(wp384_tv_template)
3616 }
3617 }, {
3618 .alg = "wp512",
3619 .test = alg_test_hash,
3620 .suite = {
3621 - .hash = {
3622 - .vecs = wp512_tv_template,
3623 - .count = WP512_TEST_VECTORS
3624 - }
3625 + .hash = __VECS(wp512_tv_template)
3626 }
3627 }, {
3628 .alg = "xcbc(aes)",
3629 .test = alg_test_hash,
3630 .suite = {
3631 - .hash = {
3632 - .vecs = aes_xcbc128_tv_template,
3633 - .count = XCBC_AES_TEST_VECTORS
3634 - }
3635 + .hash = __VECS(aes_xcbc128_tv_template)
3636 }
3637 }, {
3638 .alg = "xts(aes)",
3639 @@ -4002,14 +3693,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3640 .fips_allowed = 1,
3641 .suite = {
3642 .cipher = {
3643 - .enc = {
3644 - .vecs = aes_xts_enc_tv_template,
3645 - .count = AES_XTS_ENC_TEST_VECTORS
3646 - },
3647 - .dec = {
3648 - .vecs = aes_xts_dec_tv_template,
3649 - .count = AES_XTS_DEC_TEST_VECTORS
3650 - }
3651 + .enc = __VECS(aes_xts_enc_tv_template),
3652 + .dec = __VECS(aes_xts_dec_tv_template)
3653 }
3654 }
3655 }, {
3656 @@ -4017,14 +3702,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3657 .test = alg_test_skcipher,
3658 .suite = {
3659 .cipher = {
3660 - .enc = {
3661 - .vecs = camellia_xts_enc_tv_template,
3662 - .count = CAMELLIA_XTS_ENC_TEST_VECTORS
3663 - },
3664 - .dec = {
3665 - .vecs = camellia_xts_dec_tv_template,
3666 - .count = CAMELLIA_XTS_DEC_TEST_VECTORS
3667 - }
3668 + .enc = __VECS(camellia_xts_enc_tv_template),
3669 + .dec = __VECS(camellia_xts_dec_tv_template)
3670 }
3671 }
3672 }, {
3673 @@ -4032,14 +3711,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3674 .test = alg_test_skcipher,
3675 .suite = {
3676 .cipher = {
3677 - .enc = {
3678 - .vecs = cast6_xts_enc_tv_template,
3679 - .count = CAST6_XTS_ENC_TEST_VECTORS
3680 - },
3681 - .dec = {
3682 - .vecs = cast6_xts_dec_tv_template,
3683 - .count = CAST6_XTS_DEC_TEST_VECTORS
3684 - }
3685 + .enc = __VECS(cast6_xts_enc_tv_template),
3686 + .dec = __VECS(cast6_xts_dec_tv_template)
3687 }
3688 }
3689 }, {
3690 @@ -4047,14 +3720,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3691 .test = alg_test_skcipher,
3692 .suite = {
3693 .cipher = {
3694 - .enc = {
3695 - .vecs = serpent_xts_enc_tv_template,
3696 - .count = SERPENT_XTS_ENC_TEST_VECTORS
3697 - },
3698 - .dec = {
3699 - .vecs = serpent_xts_dec_tv_template,
3700 - .count = SERPENT_XTS_DEC_TEST_VECTORS
3701 - }
3702 + .enc = __VECS(serpent_xts_enc_tv_template),
3703 + .dec = __VECS(serpent_xts_dec_tv_template)
3704 }
3705 }
3706 }, {
3707 @@ -4062,14 +3729,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3708 .test = alg_test_skcipher,
3709 .suite = {
3710 .cipher = {
3711 - .enc = {
3712 - .vecs = tf_xts_enc_tv_template,
3713 - .count = TF_XTS_ENC_TEST_VECTORS
3714 - },
3715 - .dec = {
3716 - .vecs = tf_xts_dec_tv_template,
3717 - .count = TF_XTS_DEC_TEST_VECTORS
3718 - }
3719 + .enc = __VECS(tf_xts_enc_tv_template),
3720 + .dec = __VECS(tf_xts_dec_tv_template)
3721 }
3722 }
3723 }
3724 diff --git a/crypto/testmgr.h b/crypto/testmgr.h
3725 index 9033088c..ce9f4334 100644
3726 --- a/crypto/testmgr.h
3727 +++ b/crypto/testmgr.h
3728 @@ -34,9 +34,9 @@
3729
3730 struct hash_testvec {
3731 /* only used with keyed hash algorithms */
3732 - char *key;
3733 - char *plaintext;
3734 - char *digest;
3735 + const char *key;
3736 + const char *plaintext;
3737 + const char *digest;
3738 unsigned char tap[MAX_TAP];
3739 unsigned short psize;
3740 unsigned char np;
3741 @@ -63,11 +63,11 @@ struct hash_testvec {
3742 */
3743
3744 struct cipher_testvec {
3745 - char *key;
3746 - char *iv;
3747 - char *iv_out;
3748 - char *input;
3749 - char *result;
3750 + const char *key;
3751 + const char *iv;
3752 + const char *iv_out;
3753 + const char *input;
3754 + const char *result;
3755 unsigned short tap[MAX_TAP];
3756 int np;
3757 unsigned char also_non_np;
3758 @@ -80,11 +80,11 @@ struct cipher_testvec {
3759 };
3760
3761 struct aead_testvec {
3762 - char *key;
3763 - char *iv;
3764 - char *input;
3765 - char *assoc;
3766 - char *result;
3767 + const char *key;
3768 + const char *iv;
3769 + const char *input;
3770 + const char *assoc;
3771 + const char *result;
3772 unsigned char tap[MAX_TAP];
3773 unsigned char atap[MAX_TAP];
3774 int np;
3775 @@ -99,10 +99,10 @@ struct aead_testvec {
3776 };
3777
3778 struct cprng_testvec {
3779 - char *key;
3780 - char *dt;
3781 - char *v;
3782 - char *result;
3783 + const char *key;
3784 + const char *dt;
3785 + const char *v;
3786 + const char *result;
3787 unsigned char klen;
3788 unsigned short dtlen;
3789 unsigned short vlen;
3790 @@ -111,24 +111,38 @@ struct cprng_testvec {
3791 };
3792
3793 struct drbg_testvec {
3794 - unsigned char *entropy;
3795 + const unsigned char *entropy;
3796 size_t entropylen;
3797 - unsigned char *entpra;
3798 - unsigned char *entprb;
3799 + const unsigned char *entpra;
3800 + const unsigned char *entprb;
3801 size_t entprlen;
3802 - unsigned char *addtla;
3803 - unsigned char *addtlb;
3804 + const unsigned char *addtla;
3805 + const unsigned char *addtlb;
3806 size_t addtllen;
3807 - unsigned char *pers;
3808 + const unsigned char *pers;
3809 size_t perslen;
3810 - unsigned char *expected;
3811 + const unsigned char *expected;
3812 size_t expectedlen;
3813 };
3814
3815 +struct tls_testvec {
3816 + char *key; /* wrapped keys for encryption and authentication */
3817 + char *iv; /* initialization vector */
3818 + char *input; /* input data */
3819 + char *assoc; /* associated data: seq num, type, version, input len */
3820 + char *result; /* result data */
3821 + unsigned char fail; /* the test failure is expected */
3822 + unsigned char novrfy; /* dec verification failure expected */
3823 + unsigned char klen; /* key length */
3824 + unsigned short ilen; /* input data length */
3825 + unsigned short alen; /* associated data length */
3826 + unsigned short rlen; /* result length */
3827 +};
3828 +
3829 struct akcipher_testvec {
3830 - unsigned char *key;
3831 - unsigned char *m;
3832 - unsigned char *c;
3833 + const unsigned char *key;
3834 + const unsigned char *m;
3835 + const unsigned char *c;
3836 unsigned int key_len;
3837 unsigned int m_size;
3838 unsigned int c_size;
3839 @@ -136,27 +150,227 @@ struct akcipher_testvec {
3840 };
3841
3842 struct kpp_testvec {
3843 - unsigned char *secret;
3844 - unsigned char *b_public;
3845 - unsigned char *expected_a_public;
3846 - unsigned char *expected_ss;
3847 + const unsigned char *secret;
3848 + const unsigned char *b_public;
3849 + const unsigned char *expected_a_public;
3850 + const unsigned char *expected_ss;
3851 unsigned short secret_size;
3852 unsigned short b_public_size;
3853 unsigned short expected_a_public_size;
3854 unsigned short expected_ss_size;
3855 };
3856
3857 -static char zeroed_string[48];
3858 +static const char zeroed_string[48];
3859
3860 /*
3861 - * RSA test vectors. Borrowed from openSSL.
3862 + * TLS1.0 synthetic test vectors
3863 */
3864 -#ifdef CONFIG_CRYPTO_FIPS
3865 -#define RSA_TEST_VECTORS 2
3866 +static struct tls_testvec tls_enc_tv_template[] = {
3867 + {
3868 +#ifdef __LITTLE_ENDIAN
3869 + .key = "\x08\x00" /* rta length */
3870 + "\x01\x00" /* rta type */
3871 +#else
3872 + .key = "\x00\x08" /* rta length */
3873 + "\x00\x01" /* rta type */
3874 +#endif
3875 + "\x00\x00\x00\x10" /* enc key length */
3876 + "authenticationkey20benckeyis16_bytes",
3877 + .klen = 8 + 20 + 16,
3878 + .iv = "iv0123456789abcd",
3879 + .input = "Single block msg",
3880 + .ilen = 16,
3881 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3882 + "\x00\x03\x01\x00\x10",
3883 + .alen = 13,
3884 + .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3885 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3886 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3887 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3888 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3889 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3890 + .rlen = 16 + 20 + 12,
3891 + }, {
3892 +#ifdef __LITTLE_ENDIAN
3893 + .key = "\x08\x00" /* rta length */
3894 + "\x01\x00" /* rta type */
3895 #else
3896 -#define RSA_TEST_VECTORS 5
3897 + .key = "\x00\x08" /* rta length */
3898 + "\x00\x01" /* rta type */
3899 #endif
3900 -static struct akcipher_testvec rsa_tv_template[] = {
3901 + "\x00\x00\x00\x10" /* enc key length */
3902 + "authenticationkey20benckeyis16_bytes",
3903 + .klen = 8 + 20 + 16,
3904 + .iv = "iv0123456789abcd",
3905 + .input = "",
3906 + .ilen = 0,
3907 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3908 + "\x00\x03\x01\x00\x00",
3909 + .alen = 13,
3910 + .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
3911 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
3912 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
3913 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
3914 + .rlen = 20 + 12,
3915 + }, {
3916 +#ifdef __LITTLE_ENDIAN
3917 + .key = "\x08\x00" /* rta length */
3918 + "\x01\x00" /* rta type */
3919 +#else
3920 + .key = "\x00\x08" /* rta length */
3921 + "\x00\x01" /* rta type */
3922 +#endif
3923 + "\x00\x00\x00\x10" /* enc key length */
3924 + "authenticationkey20benckeyis16_bytes",
3925 + .klen = 8 + 20 + 16,
3926 + .iv = "iv0123456789abcd",
3927 + .input = "285 bytes plaintext285 bytes plaintext285 bytes"
3928 + " plaintext285 bytes plaintext285 bytes plaintext285"
3929 + " bytes plaintext285 bytes plaintext285 bytes"
3930 + " plaintext285 bytes plaintext285 bytes plaintext285"
3931 + " bytes plaintext285 bytes plaintext285 bytes"
3932 + " plaintext285 bytes plaintext285 bytes plaintext285"
3933 + " bytes plaintext285 bytes plaintext",
3934 + .ilen = 285,
3935 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3936 + "\x00\x03\x01\x01\x1d",
3937 + .alen = 13,
3938 + .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
3939 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
3940 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
3941 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
3942 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
3943 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
3944 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
3945 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
3946 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
3947 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
3948 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
3949 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
3950 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
3951 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
3952 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
3953 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
3954 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
3955 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
3956 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
3957 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
3958 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
3959 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
3960 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
3961 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
3962 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
3963 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
3964 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
3965 + .rlen = 285 + 20 + 15,
3966 + }
3967 +};
3968 +
3969 +static struct tls_testvec tls_dec_tv_template[] = {
3970 + {
3971 +#ifdef __LITTLE_ENDIAN
3972 + .key = "\x08\x00" /* rta length */
3973 + "\x01\x00" /* rta type */
3974 +#else
3975 + .key = "\x00\x08" /* rta length */
3976 + "\x00\x01" /* rta type */
3977 +#endif
3978 + "\x00\x00\x00\x10" /* enc key length */
3979 + "authenticationkey20benckeyis16_bytes",
3980 + .klen = 8 + 20 + 16,
3981 + .iv = "iv0123456789abcd",
3982 + .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3983 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3984 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3985 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3986 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3987 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3988 + .ilen = 16 + 20 + 12,
3989 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3990 + "\x00\x03\x01\x00\x30",
3991 + .alen = 13,
3992 + .result = "Single block msg",
3993 + .rlen = 16,
3994 + }, {
3995 +#ifdef __LITTLE_ENDIAN
3996 + .key = "\x08\x00" /* rta length */
3997 + "\x01\x00" /* rta type */
3998 +#else
3999 + .key = "\x00\x08" /* rta length */
4000 + "\x00\x01" /* rta type */
4001 +#endif
4002 + "\x00\x00\x00\x10" /* enc key length */
4003 + "authenticationkey20benckeyis16_bytes",
4004 + .klen = 8 + 20 + 16,
4005 + .iv = "iv0123456789abcd",
4006 + .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
4007 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
4008 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
4009 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
4010 + .ilen = 20 + 12,
4011 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
4012 + "\x00\x03\x01\x00\x20",
4013 + .alen = 13,
4014 + .result = "",
4015 + .rlen = 0,
4016 + }, {
4017 +#ifdef __LITTLE_ENDIAN
4018 + .key = "\x08\x00" /* rta length */
4019 + "\x01\x00" /* rta type */
4020 +#else
4021 + .key = "\x00\x08" /* rta length */
4022 + "\x00\x01" /* rta type */
4023 +#endif
4024 + "\x00\x00\x00\x10" /* enc key length */
4025 + "authenticationkey20benckeyis16_bytes",
4026 + .klen = 8 + 20 + 16,
4027 + .iv = "iv0123456789abcd",
4028 + .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
4029 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
4030 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
4031 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
4032 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
4033 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
4034 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
4035 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
4036 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
4037 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
4038 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
4039 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
4040 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
4041 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
4042 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
4043 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
4044 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
4045 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
4046 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
4047 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
4048 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
4049 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
4050 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
4051 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
4052 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
4053 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
4054 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
4055 +
4056 + .ilen = 285 + 20 + 15,
4057 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
4058 + "\x00\x03\x01\x01\x40",
4059 + .alen = 13,
4060 + .result = "285 bytes plaintext285 bytes plaintext285 bytes"
4061 + " plaintext285 bytes plaintext285 bytes plaintext285"
4062 + " bytes plaintext285 bytes plaintext285 bytes"
4063 + " plaintext285 bytes plaintext285 bytes plaintext285"
4064 + " bytes plaintext285 bytes plaintext285 bytes"
4065 + " plaintext285 bytes plaintext285 bytes plaintext",
4066 + .rlen = 285,
4067 + }
4068 +};
4069 +
4070 +/*
4071 + * RSA test vectors. Borrowed from openSSL.
4072 + */
4073 +static const struct akcipher_testvec rsa_tv_template[] = {
4074 {
4075 #ifndef CONFIG_CRYPTO_FIPS
4076 .key =
4077 @@ -340,6 +554,7 @@ static struct akcipher_testvec rsa_tv_template[] = {
4078 .m_size = 8,
4079 .c_size = 256,
4080 .public_key_vec = true,
4081 +#ifndef CONFIG_CRYPTO_FIPS
4082 }, {
4083 .key =
4084 "\x30\x82\x09\x29" /* sequence of 2345 bytes */
4085 @@ -538,12 +753,11 @@ static struct akcipher_testvec rsa_tv_template[] = {
4086 .key_len = 2349,
4087 .m_size = 8,
4088 .c_size = 512,
4089 +#endif
4090 }
4091 };
4092
4093 -#define DH_TEST_VECTORS 2
4094 -
4095 -struct kpp_testvec dh_tv_template[] = {
4096 +static const struct kpp_testvec dh_tv_template[] = {
4097 {
4098 .secret =
4099 #ifdef __LITTLE_ENDIAN
4100 @@ -760,12 +974,7 @@ struct kpp_testvec dh_tv_template[] = {
4101 }
4102 };
4103
4104 -#ifdef CONFIG_CRYPTO_FIPS
4105 -#define ECDH_TEST_VECTORS 1
4106 -#else
4107 -#define ECDH_TEST_VECTORS 2
4108 -#endif
4109 -struct kpp_testvec ecdh_tv_template[] = {
4110 +static const struct kpp_testvec ecdh_tv_template[] = {
4111 {
4112 #ifndef CONFIG_CRYPTO_FIPS
4113 .secret =
4114 @@ -856,9 +1065,7 @@ struct kpp_testvec ecdh_tv_template[] = {
4115 /*
4116 * MD4 test vectors from RFC1320
4117 */
4118 -#define MD4_TEST_VECTORS 7
4119 -
4120 -static struct hash_testvec md4_tv_template [] = {
4121 +static const struct hash_testvec md4_tv_template[] = {
4122 {
4123 .plaintext = "",
4124 .digest = "\x31\xd6\xcf\xe0\xd1\x6a\xe9\x31"
4125 @@ -899,8 +1106,7 @@ static struct hash_testvec md4_tv_template [] = {
4126 },
4127 };
4128
4129 -#define SHA3_224_TEST_VECTORS 3
4130 -static struct hash_testvec sha3_224_tv_template[] = {
4131 +static const struct hash_testvec sha3_224_tv_template[] = {
4132 {
4133 .plaintext = "",
4134 .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7"
4135 @@ -925,8 +1131,7 @@ static struct hash_testvec sha3_224_tv_template[] = {
4136 },
4137 };
4138
4139 -#define SHA3_256_TEST_VECTORS 3
4140 -static struct hash_testvec sha3_256_tv_template[] = {
4141 +static const struct hash_testvec sha3_256_tv_template[] = {
4142 {
4143 .plaintext = "",
4144 .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66"
4145 @@ -952,8 +1157,7 @@ static struct hash_testvec sha3_256_tv_template[] = {
4146 };
4147
4148
4149 -#define SHA3_384_TEST_VECTORS 3
4150 -static struct hash_testvec sha3_384_tv_template[] = {
4151 +static const struct hash_testvec sha3_384_tv_template[] = {
4152 {
4153 .plaintext = "",
4154 .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d"
4155 @@ -985,8 +1189,7 @@ static struct hash_testvec sha3_384_tv_template[] = {
4156 };
4157
4158
4159 -#define SHA3_512_TEST_VECTORS 3
4160 -static struct hash_testvec sha3_512_tv_template[] = {
4161 +static const struct hash_testvec sha3_512_tv_template[] = {
4162 {
4163 .plaintext = "",
4164 .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5"
4165 @@ -1027,9 +1230,7 @@ static struct hash_testvec sha3_512_tv_template[] = {
4166 /*
4167 * MD5 test vectors from RFC1321
4168 */
4169 -#define MD5_TEST_VECTORS 7
4170 -
4171 -static struct hash_testvec md5_tv_template[] = {
4172 +static const struct hash_testvec md5_tv_template[] = {
4173 {
4174 .digest = "\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04"
4175 "\xe9\x80\x09\x98\xec\xf8\x42\x7e",
4176 @@ -1073,9 +1274,7 @@ static struct hash_testvec md5_tv_template[] = {
4177 /*
4178 * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
4179 */
4180 -#define RMD128_TEST_VECTORS 10
4181 -
4182 -static struct hash_testvec rmd128_tv_template[] = {
4183 +static const struct hash_testvec rmd128_tv_template[] = {
4184 {
4185 .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
4186 "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
4187 @@ -1137,9 +1336,7 @@ static struct hash_testvec rmd128_tv_template[] = {
4188 /*
4189 * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
4190 */
4191 -#define RMD160_TEST_VECTORS 10
4192 -
4193 -static struct hash_testvec rmd160_tv_template[] = {
4194 +static const struct hash_testvec rmd160_tv_template[] = {
4195 {
4196 .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28"
4197 "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31",
4198 @@ -1201,9 +1398,7 @@ static struct hash_testvec rmd160_tv_template[] = {
4199 /*
4200 * RIPEMD-256 test vectors
4201 */
4202 -#define RMD256_TEST_VECTORS 8
4203 -
4204 -static struct hash_testvec rmd256_tv_template[] = {
4205 +static const struct hash_testvec rmd256_tv_template[] = {
4206 {
4207 .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
4208 "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
4209 @@ -1269,9 +1464,7 @@ static struct hash_testvec rmd256_tv_template[] = {
4210 /*
4211 * RIPEMD-320 test vectors
4212 */
4213 -#define RMD320_TEST_VECTORS 8
4214 -
4215 -static struct hash_testvec rmd320_tv_template[] = {
4216 +static const struct hash_testvec rmd320_tv_template[] = {
4217 {
4218 .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
4219 "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
4220 @@ -1334,36 +1527,49 @@ static struct hash_testvec rmd320_tv_template[] = {
4221 }
4222 };
4223
4224 -#define CRCT10DIF_TEST_VECTORS 3
4225 -static struct hash_testvec crct10dif_tv_template[] = {
4226 +static const struct hash_testvec crct10dif_tv_template[] = {
4227 {
4228 - .plaintext = "abc",
4229 - .psize = 3,
4230 -#ifdef __LITTLE_ENDIAN
4231 - .digest = "\x3b\x44",
4232 -#else
4233 - .digest = "\x44\x3b",
4234 -#endif
4235 - }, {
4236 - .plaintext = "1234567890123456789012345678901234567890"
4237 - "123456789012345678901234567890123456789",
4238 - .psize = 79,
4239 -#ifdef __LITTLE_ENDIAN
4240 - .digest = "\x70\x4b",
4241 -#else
4242 - .digest = "\x4b\x70",
4243 -#endif
4244 - }, {
4245 - .plaintext =
4246 - "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd",
4247 - .psize = 56,
4248 -#ifdef __LITTLE_ENDIAN
4249 - .digest = "\xe3\x9c",
4250 -#else
4251 - .digest = "\x9c\xe3",
4252 -#endif
4253 - .np = 2,
4254 - .tap = { 28, 28 }
4255 + .plaintext = "abc",
4256 + .psize = 3,
4257 + .digest = (u8 *)(u16 []){ 0x443b },
4258 + }, {
4259 + .plaintext = "1234567890123456789012345678901234567890"
4260 + "123456789012345678901234567890123456789",
4261 + .psize = 79,
4262 + .digest = (u8 *)(u16 []){ 0x4b70 },
4263 + .np = 2,
4264 + .tap = { 63, 16 },
4265 + }, {
4266 + .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd"
4267 + "ddddddddddddd",
4268 + .psize = 56,
4269 + .digest = (u8 *)(u16 []){ 0x9ce3 },
4270 + .np = 8,
4271 + .tap = { 1, 2, 28, 7, 6, 5, 4, 3 },
4272 + }, {
4273 + .plaintext = "1234567890123456789012345678901234567890"
4274 + "1234567890123456789012345678901234567890"
4275 + "1234567890123456789012345678901234567890"
4276 + "1234567890123456789012345678901234567890"
4277 + "1234567890123456789012345678901234567890"
4278 + "1234567890123456789012345678901234567890"
4279 + "1234567890123456789012345678901234567890"
4280 + "123456789012345678901234567890123456789",
4281 + .psize = 319,
4282 + .digest = (u8 *)(u16 []){ 0x44c6 },
4283 + }, {
4284 + .plaintext = "1234567890123456789012345678901234567890"
4285 + "1234567890123456789012345678901234567890"
4286 + "1234567890123456789012345678901234567890"
4287 + "1234567890123456789012345678901234567890"
4288 + "1234567890123456789012345678901234567890"
4289 + "1234567890123456789012345678901234567890"
4290 + "1234567890123456789012345678901234567890"
4291 + "123456789012345678901234567890123456789",
4292 + .psize = 319,
4293 + .digest = (u8 *)(u16 []){ 0x44c6 },
4294 + .np = 4,
4295 + .tap = { 1, 255, 57, 6 },
4296 }
4297 };
4298
4299 @@ -1371,9 +1577,7 @@ static struct hash_testvec crct10dif_tv_template[] = {
4300 * SHA1 test vectors from from FIPS PUB 180-1
4301 * Long vector from CAVS 5.0
4302 */
4303 -#define SHA1_TEST_VECTORS 6
4304 -
4305 -static struct hash_testvec sha1_tv_template[] = {
4306 +static const struct hash_testvec sha1_tv_template[] = {
4307 {
4308 .plaintext = "",
4309 .psize = 0,
4310 @@ -1563,9 +1767,7 @@ static struct hash_testvec sha1_tv_template[] = {
4311 /*
4312 * SHA224 test vectors from from FIPS PUB 180-2
4313 */
4314 -#define SHA224_TEST_VECTORS 5
4315 -
4316 -static struct hash_testvec sha224_tv_template[] = {
4317 +static const struct hash_testvec sha224_tv_template[] = {
4318 {
4319 .plaintext = "",
4320 .psize = 0,
4321 @@ -1737,9 +1939,7 @@ static struct hash_testvec sha224_tv_template[] = {
4322 /*
4323 * SHA256 test vectors from from NIST
4324 */
4325 -#define SHA256_TEST_VECTORS 5
4326 -
4327 -static struct hash_testvec sha256_tv_template[] = {
4328 +static const struct hash_testvec sha256_tv_template[] = {
4329 {
4330 .plaintext = "",
4331 .psize = 0,
4332 @@ -1910,9 +2110,7 @@ static struct hash_testvec sha256_tv_template[] = {
4333 /*
4334 * SHA384 test vectors from from NIST and kerneli
4335 */
4336 -#define SHA384_TEST_VECTORS 6
4337 -
4338 -static struct hash_testvec sha384_tv_template[] = {
4339 +static const struct hash_testvec sha384_tv_template[] = {
4340 {
4341 .plaintext = "",
4342 .psize = 0,
4343 @@ -2104,9 +2302,7 @@ static struct hash_testvec sha384_tv_template[] = {
4344 /*
4345 * SHA512 test vectors from from NIST and kerneli
4346 */
4347 -#define SHA512_TEST_VECTORS 6
4348 -
4349 -static struct hash_testvec sha512_tv_template[] = {
4350 +static const struct hash_testvec sha512_tv_template[] = {
4351 {
4352 .plaintext = "",
4353 .psize = 0,
4354 @@ -2313,9 +2509,7 @@ static struct hash_testvec sha512_tv_template[] = {
4355 * by Vincent Rijmen and Paulo S. L. M. Barreto as part of the NESSIE
4356 * submission
4357 */
4358 -#define WP512_TEST_VECTORS 8
4359 -
4360 -static struct hash_testvec wp512_tv_template[] = {
4361 +static const struct hash_testvec wp512_tv_template[] = {
4362 {
4363 .plaintext = "",
4364 .psize = 0,
4365 @@ -2411,9 +2605,7 @@ static struct hash_testvec wp512_tv_template[] = {
4366 },
4367 };
4368
4369 -#define WP384_TEST_VECTORS 8
4370 -
4371 -static struct hash_testvec wp384_tv_template[] = {
4372 +static const struct hash_testvec wp384_tv_template[] = {
4373 {
4374 .plaintext = "",
4375 .psize = 0,
4376 @@ -2493,9 +2685,7 @@ static struct hash_testvec wp384_tv_template[] = {
4377 },
4378 };
4379
4380 -#define WP256_TEST_VECTORS 8
4381 -
4382 -static struct hash_testvec wp256_tv_template[] = {
4383 +static const struct hash_testvec wp256_tv_template[] = {
4384 {
4385 .plaintext = "",
4386 .psize = 0,
4387 @@ -2562,9 +2752,7 @@ static struct hash_testvec wp256_tv_template[] = {
4388 /*
4389 * TIGER test vectors from Tiger website
4390 */
4391 -#define TGR192_TEST_VECTORS 6
4392 -
4393 -static struct hash_testvec tgr192_tv_template[] = {
4394 +static const struct hash_testvec tgr192_tv_template[] = {
4395 {
4396 .plaintext = "",
4397 .psize = 0,
4398 @@ -2607,9 +2795,7 @@ static struct hash_testvec tgr192_tv_template[] = {
4399 },
4400 };
4401
4402 -#define TGR160_TEST_VECTORS 6
4403 -
4404 -static struct hash_testvec tgr160_tv_template[] = {
4405 +static const struct hash_testvec tgr160_tv_template[] = {
4406 {
4407 .plaintext = "",
4408 .psize = 0,
4409 @@ -2652,9 +2838,7 @@ static struct hash_testvec tgr160_tv_template[] = {
4410 },
4411 };
4412
4413 -#define TGR128_TEST_VECTORS 6
4414 -
4415 -static struct hash_testvec tgr128_tv_template[] = {
4416 +static const struct hash_testvec tgr128_tv_template[] = {
4417 {
4418 .plaintext = "",
4419 .psize = 0,
4420 @@ -2691,9 +2875,7 @@ static struct hash_testvec tgr128_tv_template[] = {
4421 },
4422 };
4423
4424 -#define GHASH_TEST_VECTORS 6
4425 -
4426 -static struct hash_testvec ghash_tv_template[] =
4427 +static const struct hash_testvec ghash_tv_template[] =
4428 {
4429 {
4430 .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03"
4431 @@ -2808,9 +2990,7 @@ static struct hash_testvec ghash_tv_template[] =
4432 * HMAC-MD5 test vectors from RFC2202
4433 * (These need to be fixed to not use strlen).
4434 */
4435 -#define HMAC_MD5_TEST_VECTORS 7
4436 -
4437 -static struct hash_testvec hmac_md5_tv_template[] =
4438 +static const struct hash_testvec hmac_md5_tv_template[] =
4439 {
4440 {
4441 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4442 @@ -2890,9 +3070,7 @@ static struct hash_testvec hmac_md5_tv_template[] =
4443 /*
4444 * HMAC-RIPEMD128 test vectors from RFC2286
4445 */
4446 -#define HMAC_RMD128_TEST_VECTORS 7
4447 -
4448 -static struct hash_testvec hmac_rmd128_tv_template[] = {
4449 +static const struct hash_testvec hmac_rmd128_tv_template[] = {
4450 {
4451 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4452 .ksize = 16,
4453 @@ -2971,9 +3149,7 @@ static struct hash_testvec hmac_rmd128_tv_template[] = {
4454 /*
4455 * HMAC-RIPEMD160 test vectors from RFC2286
4456 */
4457 -#define HMAC_RMD160_TEST_VECTORS 7
4458 -
4459 -static struct hash_testvec hmac_rmd160_tv_template[] = {
4460 +static const struct hash_testvec hmac_rmd160_tv_template[] = {
4461 {
4462 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4463 .ksize = 20,
4464 @@ -3052,9 +3228,7 @@ static struct hash_testvec hmac_rmd160_tv_template[] = {
4465 /*
4466 * HMAC-SHA1 test vectors from RFC2202
4467 */
4468 -#define HMAC_SHA1_TEST_VECTORS 7
4469 -
4470 -static struct hash_testvec hmac_sha1_tv_template[] = {
4471 +static const struct hash_testvec hmac_sha1_tv_template[] = {
4472 {
4473 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4474 .ksize = 20,
4475 @@ -3135,9 +3309,7 @@ static struct hash_testvec hmac_sha1_tv_template[] = {
4476 /*
4477 * SHA224 HMAC test vectors from RFC4231
4478 */
4479 -#define HMAC_SHA224_TEST_VECTORS 4
4480 -
4481 -static struct hash_testvec hmac_sha224_tv_template[] = {
4482 +static const struct hash_testvec hmac_sha224_tv_template[] = {
4483 {
4484 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4485 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4486 @@ -3250,9 +3422,7 @@ static struct hash_testvec hmac_sha224_tv_template[] = {
4487 * HMAC-SHA256 test vectors from
4488 * draft-ietf-ipsec-ciph-sha-256-01.txt
4489 */
4490 -#define HMAC_SHA256_TEST_VECTORS 10
4491 -
4492 -static struct hash_testvec hmac_sha256_tv_template[] = {
4493 +static const struct hash_testvec hmac_sha256_tv_template[] = {
4494 {
4495 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
4496 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
4497 @@ -3387,9 +3557,7 @@ static struct hash_testvec hmac_sha256_tv_template[] = {
4498 },
4499 };
4500
4501 -#define CMAC_AES_TEST_VECTORS 6
4502 -
4503 -static struct hash_testvec aes_cmac128_tv_template[] = {
4504 +static const struct hash_testvec aes_cmac128_tv_template[] = {
4505 { /* From NIST Special Publication 800-38B, AES-128 */
4506 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4507 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4508 @@ -3464,9 +3632,67 @@ static struct hash_testvec aes_cmac128_tv_template[] = {
4509 }
4510 };
4511
4512 -#define CMAC_DES3_EDE_TEST_VECTORS 4
4513 +static const struct hash_testvec aes_cbcmac_tv_template[] = {
4514 + {
4515 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4516 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4517 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4518 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
4519 + .digest = "\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60"
4520 + "\xa8\x9e\xca\xf3\x24\x66\xef\x97",
4521 + .psize = 16,
4522 + .ksize = 16,
4523 + }, {
4524 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4525 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4526 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4527 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4528 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4529 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4530 + "\x30",
4531 + .digest = "\x9d\x0d\xd0\x63\xfb\xcb\x24\x43"
4532 + "\xf8\xf2\x76\x03\xac\x39\xb0\x9d",
4533 + .psize = 33,
4534 + .ksize = 16,
4535 + .np = 2,
4536 + .tap = { 7, 26 },
4537 + }, {
4538 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4539 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4540 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4541 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4542 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4543 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4544 + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4545 + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4546 + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4547 + "\xad\x2b\x41\x7b\xe6\x6c\x37",
4548 + .digest = "\xc0\x71\x73\xb8\xa0\x2c\x11\x7c"
4549 + "\xaf\xdc\xb2\xf8\x89\x32\xa3\x3a",
4550 + .psize = 63,
4551 + .ksize = 16,
4552 + }, {
4553 + .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
4554 + "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
4555 + "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
4556 + "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
4557 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4558 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4559 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4560 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4561 + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4562 + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4563 + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4564 + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10"
4565 + "\x1c",
4566 + .digest = "\x6a\x4e\xdb\x21\x47\x51\xdf\x4f"
4567 + "\xa8\x4d\x4c\x10\x3b\x72\x7d\xd6",
4568 + .psize = 65,
4569 + .ksize = 32,
4570 + }
4571 +};
4572
4573 -static struct hash_testvec des3_ede_cmac64_tv_template[] = {
4574 +static const struct hash_testvec des3_ede_cmac64_tv_template[] = {
4575 /*
4576 * From NIST Special Publication 800-38B, Three Key TDEA
4577 * Corrected test vectors from:
4578 @@ -3512,9 +3738,7 @@ static struct hash_testvec des3_ede_cmac64_tv_template[] = {
4579 }
4580 };
4581
4582 -#define XCBC_AES_TEST_VECTORS 6
4583 -
4584 -static struct hash_testvec aes_xcbc128_tv_template[] = {
4585 +static const struct hash_testvec aes_xcbc128_tv_template[] = {
4586 {
4587 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
4588 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4589 @@ -3580,36 +3804,35 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
4590 }
4591 };
4592
4593 -#define VMAC_AES_TEST_VECTORS 11
4594 -static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4595 - '\x02', '\x03', '\x02', '\x02',
4596 - '\x02', '\x04', '\x01', '\x07',
4597 - '\x04', '\x01', '\x04', '\x03',};
4598 -static char vmac_string2[128] = {'a', 'b', 'c',};
4599 -static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4600 - 'a', 'b', 'c', 'a', 'b', 'c',
4601 - 'a', 'b', 'c', 'a', 'b', 'c',
4602 - 'a', 'b', 'c', 'a', 'b', 'c',
4603 - 'a', 'b', 'c', 'a', 'b', 'c',
4604 - 'a', 'b', 'c', 'a', 'b', 'c',
4605 - 'a', 'b', 'c', 'a', 'b', 'c',
4606 - 'a', 'b', 'c', 'a', 'b', 'c',
4607 - };
4608 -
4609 -static char vmac_string4[17] = {'b', 'c', 'e', 'f',
4610 - 'i', 'j', 'l', 'm',
4611 - 'o', 'p', 'r', 's',
4612 - 't', 'u', 'w', 'x', 'z'};
4613 -
4614 -static char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4615 - 'o', 'l', 'k', ']', '%',
4616 - '9', '2', '7', '!', 'A'};
4617 -
4618 -static char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4619 - 'i', '!', '#', 'w', '0',
4620 - 'z', '/', '4', 'A', 'n'};
4621 -
4622 -static struct hash_testvec aes_vmac128_tv_template[] = {
4623 +static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4624 + '\x02', '\x03', '\x02', '\x02',
4625 + '\x02', '\x04', '\x01', '\x07',
4626 + '\x04', '\x01', '\x04', '\x03',};
4627 +static const char vmac_string2[128] = {'a', 'b', 'c',};
4628 +static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4629 + 'a', 'b', 'c', 'a', 'b', 'c',
4630 + 'a', 'b', 'c', 'a', 'b', 'c',
4631 + 'a', 'b', 'c', 'a', 'b', 'c',
4632 + 'a', 'b', 'c', 'a', 'b', 'c',
4633 + 'a', 'b', 'c', 'a', 'b', 'c',
4634 + 'a', 'b', 'c', 'a', 'b', 'c',
4635 + 'a', 'b', 'c', 'a', 'b', 'c',
4636 + };
4637 +
4638 +static const char vmac_string4[17] = {'b', 'c', 'e', 'f',
4639 + 'i', 'j', 'l', 'm',
4640 + 'o', 'p', 'r', 's',
4641 + 't', 'u', 'w', 'x', 'z'};
4642 +
4643 +static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4644 + 'o', 'l', 'k', ']', '%',
4645 + '9', '2', '7', '!', 'A'};
4646 +
4647 +static const char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4648 + 'i', '!', '#', 'w', '0',
4649 + 'z', '/', '4', 'A', 'n'};
4650 +
4651 +static const struct hash_testvec aes_vmac128_tv_template[] = {
4652 {
4653 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
4654 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4655 @@ -3687,9 +3910,7 @@ static struct hash_testvec aes_vmac128_tv_template[] = {
4656 * SHA384 HMAC test vectors from RFC4231
4657 */
4658
4659 -#define HMAC_SHA384_TEST_VECTORS 4
4660 -
4661 -static struct hash_testvec hmac_sha384_tv_template[] = {
4662 +static const struct hash_testvec hmac_sha384_tv_template[] = {
4663 {
4664 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4665 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4666 @@ -3787,9 +4008,7 @@ static struct hash_testvec hmac_sha384_tv_template[] = {
4667 * SHA512 HMAC test vectors from RFC4231
4668 */
4669
4670 -#define HMAC_SHA512_TEST_VECTORS 4
4671 -
4672 -static struct hash_testvec hmac_sha512_tv_template[] = {
4673 +static const struct hash_testvec hmac_sha512_tv_template[] = {
4674 {
4675 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4676 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4677 @@ -3894,9 +4113,7 @@ static struct hash_testvec hmac_sha512_tv_template[] = {
4678 },
4679 };
4680
4681 -#define HMAC_SHA3_224_TEST_VECTORS 4
4682 -
4683 -static struct hash_testvec hmac_sha3_224_tv_template[] = {
4684 +static const struct hash_testvec hmac_sha3_224_tv_template[] = {
4685 {
4686 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4687 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4688 @@ -3985,9 +4202,7 @@ static struct hash_testvec hmac_sha3_224_tv_template[] = {
4689 },
4690 };
4691
4692 -#define HMAC_SHA3_256_TEST_VECTORS 4
4693 -
4694 -static struct hash_testvec hmac_sha3_256_tv_template[] = {
4695 +static const struct hash_testvec hmac_sha3_256_tv_template[] = {
4696 {
4697 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4698 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4699 @@ -4076,9 +4291,7 @@ static struct hash_testvec hmac_sha3_256_tv_template[] = {
4700 },
4701 };
4702
4703 -#define HMAC_SHA3_384_TEST_VECTORS 4
4704 -
4705 -static struct hash_testvec hmac_sha3_384_tv_template[] = {
4706 +static const struct hash_testvec hmac_sha3_384_tv_template[] = {
4707 {
4708 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4709 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4710 @@ -4175,9 +4388,7 @@ static struct hash_testvec hmac_sha3_384_tv_template[] = {
4711 },
4712 };
4713
4714 -#define HMAC_SHA3_512_TEST_VECTORS 4
4715 -
4716 -static struct hash_testvec hmac_sha3_512_tv_template[] = {
4717 +static const struct hash_testvec hmac_sha3_512_tv_template[] = {
4718 {
4719 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4720 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4721 @@ -4286,9 +4497,7 @@ static struct hash_testvec hmac_sha3_512_tv_template[] = {
4722 * Poly1305 test vectors from RFC7539 A.3.
4723 */
4724
4725 -#define POLY1305_TEST_VECTORS 11
4726 -
4727 -static struct hash_testvec poly1305_tv_template[] = {
4728 +static const struct hash_testvec poly1305_tv_template[] = {
4729 { /* Test Vector #1 */
4730 .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
4731 "\x00\x00\x00\x00\x00\x00\x00\x00"
4732 @@ -4533,20 +4742,7 @@ static struct hash_testvec poly1305_tv_template[] = {
4733 /*
4734 * DES test vectors.
4735 */
4736 -#define DES_ENC_TEST_VECTORS 11
4737 -#define DES_DEC_TEST_VECTORS 5
4738 -#define DES_CBC_ENC_TEST_VECTORS 6
4739 -#define DES_CBC_DEC_TEST_VECTORS 5
4740 -#define DES_CTR_ENC_TEST_VECTORS 2
4741 -#define DES_CTR_DEC_TEST_VECTORS 2
4742 -#define DES3_EDE_ENC_TEST_VECTORS 4
4743 -#define DES3_EDE_DEC_TEST_VECTORS 4
4744 -#define DES3_EDE_CBC_ENC_TEST_VECTORS 2
4745 -#define DES3_EDE_CBC_DEC_TEST_VECTORS 2
4746 -#define DES3_EDE_CTR_ENC_TEST_VECTORS 2
4747 -#define DES3_EDE_CTR_DEC_TEST_VECTORS 2
4748 -
4749 -static struct cipher_testvec des_enc_tv_template[] = {
4750 +static const struct cipher_testvec des_enc_tv_template[] = {
4751 { /* From Applied Cryptography */
4752 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4753 .klen = 8,
4754 @@ -4720,7 +4916,7 @@ static struct cipher_testvec des_enc_tv_template[] = {
4755 },
4756 };
4757
4758 -static struct cipher_testvec des_dec_tv_template[] = {
4759 +static const struct cipher_testvec des_dec_tv_template[] = {
4760 { /* From Applied Cryptography */
4761 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4762 .klen = 8,
4763 @@ -4830,7 +5026,7 @@ static struct cipher_testvec des_dec_tv_template[] = {
4764 },
4765 };
4766
4767 -static struct cipher_testvec des_cbc_enc_tv_template[] = {
4768 +static const struct cipher_testvec des_cbc_enc_tv_template[] = {
4769 { /* From OpenSSL */
4770 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4771 .klen = 8,
4772 @@ -4956,7 +5152,7 @@ static struct cipher_testvec des_cbc_enc_tv_template[] = {
4773 },
4774 };
4775
4776 -static struct cipher_testvec des_cbc_dec_tv_template[] = {
4777 +static const struct cipher_testvec des_cbc_dec_tv_template[] = {
4778 { /* FIPS Pub 81 */
4779 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4780 .klen = 8,
4781 @@ -5065,7 +5261,7 @@ static struct cipher_testvec des_cbc_dec_tv_template[] = {
4782 },
4783 };
4784
4785 -static struct cipher_testvec des_ctr_enc_tv_template[] = {
4786 +static const struct cipher_testvec des_ctr_enc_tv_template[] = {
4787 { /* Generated with Crypto++ */
4788 .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4789 .klen = 8,
4790 @@ -5211,7 +5407,7 @@ static struct cipher_testvec des_ctr_enc_tv_template[] = {
4791 },
4792 };
4793
4794 -static struct cipher_testvec des_ctr_dec_tv_template[] = {
4795 +static const struct cipher_testvec des_ctr_dec_tv_template[] = {
4796 { /* Generated with Crypto++ */
4797 .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4798 .klen = 8,
4799 @@ -5357,7 +5553,7 @@ static struct cipher_testvec des_ctr_dec_tv_template[] = {
4800 },
4801 };
4802
4803 -static struct cipher_testvec des3_ede_enc_tv_template[] = {
4804 +static const struct cipher_testvec des3_ede_enc_tv_template[] = {
4805 { /* These are from openssl */
4806 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4807 "\x55\x55\x55\x55\x55\x55\x55\x55"
4808 @@ -5522,7 +5718,7 @@ static struct cipher_testvec des3_ede_enc_tv_template[] = {
4809 },
4810 };
4811
4812 -static struct cipher_testvec des3_ede_dec_tv_template[] = {
4813 +static const struct cipher_testvec des3_ede_dec_tv_template[] = {
4814 { /* These are from openssl */
4815 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4816 "\x55\x55\x55\x55\x55\x55\x55\x55"
4817 @@ -5687,7 +5883,7 @@ static struct cipher_testvec des3_ede_dec_tv_template[] = {
4818 },
4819 };
4820
4821 -static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4822 +static const struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4823 { /* Generated from openssl */
4824 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4825 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4826 @@ -5867,7 +6063,7 @@ static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4827 },
4828 };
4829
4830 -static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4831 +static const struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4832 { /* Generated from openssl */
4833 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4834 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4835 @@ -6047,7 +6243,7 @@ static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4836 },
4837 };
4838
4839 -static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4840 +static const struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4841 { /* Generated with Crypto++ */
4842 .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4843 "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4844 @@ -6325,7 +6521,7 @@ static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4845 },
4846 };
4847
4848 -static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4849 +static const struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4850 { /* Generated with Crypto++ */
4851 .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4852 "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4853 @@ -6606,14 +6802,7 @@ static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4854 /*
4855 * Blowfish test vectors.
4856 */
4857 -#define BF_ENC_TEST_VECTORS 7
4858 -#define BF_DEC_TEST_VECTORS 7
4859 -#define BF_CBC_ENC_TEST_VECTORS 2
4860 -#define BF_CBC_DEC_TEST_VECTORS 2
4861 -#define BF_CTR_ENC_TEST_VECTORS 2
4862 -#define BF_CTR_DEC_TEST_VECTORS 2
4863 -
4864 -static struct cipher_testvec bf_enc_tv_template[] = {
4865 +static const struct cipher_testvec bf_enc_tv_template[] = {
4866 { /* DES test vectors from OpenSSL */
4867 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
4868 .klen = 8,
4869 @@ -6805,7 +6994,7 @@ static struct cipher_testvec bf_enc_tv_template[] = {
4870 },
4871 };
4872
4873 -static struct cipher_testvec bf_dec_tv_template[] = {
4874 +static const struct cipher_testvec bf_dec_tv_template[] = {
4875 { /* DES test vectors from OpenSSL */
4876 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
4877 .klen = 8,
4878 @@ -6997,7 +7186,7 @@ static struct cipher_testvec bf_dec_tv_template[] = {
4879 },
4880 };
4881
4882 -static struct cipher_testvec bf_cbc_enc_tv_template[] = {
4883 +static const struct cipher_testvec bf_cbc_enc_tv_template[] = {
4884 { /* From OpenSSL */
4885 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4886 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4887 @@ -7154,7 +7343,7 @@ static struct cipher_testvec bf_cbc_enc_tv_template[] = {
4888 },
4889 };
4890
4891 -static struct cipher_testvec bf_cbc_dec_tv_template[] = {
4892 +static const struct cipher_testvec bf_cbc_dec_tv_template[] = {
4893 { /* From OpenSSL */
4894 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4895 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4896 @@ -7311,7 +7500,7 @@ static struct cipher_testvec bf_cbc_dec_tv_template[] = {
4897 },
4898 };
4899
4900 -static struct cipher_testvec bf_ctr_enc_tv_template[] = {
4901 +static const struct cipher_testvec bf_ctr_enc_tv_template[] = {
4902 { /* Generated with Crypto++ */
4903 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4904 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4905 @@ -7723,7 +7912,7 @@ static struct cipher_testvec bf_ctr_enc_tv_template[] = {
4906 },
4907 };
4908
4909 -static struct cipher_testvec bf_ctr_dec_tv_template[] = {
4910 +static const struct cipher_testvec bf_ctr_dec_tv_template[] = {
4911 { /* Generated with Crypto++ */
4912 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4913 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4914 @@ -8138,18 +8327,7 @@ static struct cipher_testvec bf_ctr_dec_tv_template[] = {
4915 /*
4916 * Twofish test vectors.
4917 */
4918 -#define TF_ENC_TEST_VECTORS 4
4919 -#define TF_DEC_TEST_VECTORS 4
4920 -#define TF_CBC_ENC_TEST_VECTORS 5
4921 -#define TF_CBC_DEC_TEST_VECTORS 5
4922 -#define TF_CTR_ENC_TEST_VECTORS 2
4923 -#define TF_CTR_DEC_TEST_VECTORS 2
4924 -#define TF_LRW_ENC_TEST_VECTORS 8
4925 -#define TF_LRW_DEC_TEST_VECTORS 8
4926 -#define TF_XTS_ENC_TEST_VECTORS 5
4927 -#define TF_XTS_DEC_TEST_VECTORS 5
4928 -
4929 -static struct cipher_testvec tf_enc_tv_template[] = {
4930 +static const struct cipher_testvec tf_enc_tv_template[] = {
4931 {
4932 .key = zeroed_string,
4933 .klen = 16,
4934 @@ -8317,7 +8495,7 @@ static struct cipher_testvec tf_enc_tv_template[] = {
4935 },
4936 };
4937
4938 -static struct cipher_testvec tf_dec_tv_template[] = {
4939 +static const struct cipher_testvec tf_dec_tv_template[] = {
4940 {
4941 .key = zeroed_string,
4942 .klen = 16,
4943 @@ -8485,7 +8663,7 @@ static struct cipher_testvec tf_dec_tv_template[] = {
4944 },
4945 };
4946
4947 -static struct cipher_testvec tf_cbc_enc_tv_template[] = {
4948 +static const struct cipher_testvec tf_cbc_enc_tv_template[] = {
4949 { /* Generated with Nettle */
4950 .key = zeroed_string,
4951 .klen = 16,
4952 @@ -8668,7 +8846,7 @@ static struct cipher_testvec tf_cbc_enc_tv_template[] = {
4953 },
4954 };
4955
4956 -static struct cipher_testvec tf_cbc_dec_tv_template[] = {
4957 +static const struct cipher_testvec tf_cbc_dec_tv_template[] = {
4958 { /* Reverse of the first four above */
4959 .key = zeroed_string,
4960 .klen = 16,
4961 @@ -8851,7 +9029,7 @@ static struct cipher_testvec tf_cbc_dec_tv_template[] = {
4962 },
4963 };
4964
4965 -static struct cipher_testvec tf_ctr_enc_tv_template[] = {
4966 +static const struct cipher_testvec tf_ctr_enc_tv_template[] = {
4967 { /* Generated with Crypto++ */
4968 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4969 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4970 @@ -9262,7 +9440,7 @@ static struct cipher_testvec tf_ctr_enc_tv_template[] = {
4971 },
4972 };
4973
4974 -static struct cipher_testvec tf_ctr_dec_tv_template[] = {
4975 +static const struct cipher_testvec tf_ctr_dec_tv_template[] = {
4976 { /* Generated with Crypto++ */
4977 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4978 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4979 @@ -9673,7 +9851,7 @@ static struct cipher_testvec tf_ctr_dec_tv_template[] = {
4980 },
4981 };
4982
4983 -static struct cipher_testvec tf_lrw_enc_tv_template[] = {
4984 +static const struct cipher_testvec tf_lrw_enc_tv_template[] = {
4985 /* Generated from AES-LRW test vectors */
4986 {
4987 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
4988 @@ -9925,7 +10103,7 @@ static struct cipher_testvec tf_lrw_enc_tv_template[] = {
4989 },
4990 };
4991
4992 -static struct cipher_testvec tf_lrw_dec_tv_template[] = {
4993 +static const struct cipher_testvec tf_lrw_dec_tv_template[] = {
4994 /* Generated from AES-LRW test vectors */
4995 /* same as enc vectors with input and result reversed */
4996 {
4997 @@ -10178,7 +10356,7 @@ static struct cipher_testvec tf_lrw_dec_tv_template[] = {
4998 },
4999 };
5000
5001 -static struct cipher_testvec tf_xts_enc_tv_template[] = {
5002 +static const struct cipher_testvec tf_xts_enc_tv_template[] = {
5003 /* Generated from AES-XTS test vectors */
5004 {
5005 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5006 @@ -10520,7 +10698,7 @@ static struct cipher_testvec tf_xts_enc_tv_template[] = {
5007 },
5008 };
5009
5010 -static struct cipher_testvec tf_xts_dec_tv_template[] = {
5011 +static const struct cipher_testvec tf_xts_dec_tv_template[] = {
5012 /* Generated from AES-XTS test vectors */
5013 /* same as enc vectors with input and result reversed */
5014 {
5015 @@ -10867,25 +11045,7 @@ static struct cipher_testvec tf_xts_dec_tv_template[] = {
5016 * Serpent test vectors. These are backwards because Serpent writes
5017 * octet sequences in right-to-left mode.
5018 */
5019 -#define SERPENT_ENC_TEST_VECTORS 5
5020 -#define SERPENT_DEC_TEST_VECTORS 5
5021 -
5022 -#define TNEPRES_ENC_TEST_VECTORS 4
5023 -#define TNEPRES_DEC_TEST_VECTORS 4
5024 -
5025 -#define SERPENT_CBC_ENC_TEST_VECTORS 1
5026 -#define SERPENT_CBC_DEC_TEST_VECTORS 1
5027 -
5028 -#define SERPENT_CTR_ENC_TEST_VECTORS 2
5029 -#define SERPENT_CTR_DEC_TEST_VECTORS 2
5030 -
5031 -#define SERPENT_LRW_ENC_TEST_VECTORS 8
5032 -#define SERPENT_LRW_DEC_TEST_VECTORS 8
5033 -
5034 -#define SERPENT_XTS_ENC_TEST_VECTORS 5
5035 -#define SERPENT_XTS_DEC_TEST_VECTORS 5
5036 -
5037 -static struct cipher_testvec serpent_enc_tv_template[] = {
5038 +static const struct cipher_testvec serpent_enc_tv_template[] = {
5039 {
5040 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
5041 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5042 @@ -11061,7 +11221,7 @@ static struct cipher_testvec serpent_enc_tv_template[] = {
5043 },
5044 };
5045
5046 -static struct cipher_testvec tnepres_enc_tv_template[] = {
5047 +static const struct cipher_testvec tnepres_enc_tv_template[] = {
5048 { /* KeySize=128, PT=0, I=1 */
5049 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
5050 "\x00\x00\x00\x00\x00\x00\x00\x00",
5051 @@ -11111,7 +11271,7 @@ static struct cipher_testvec tnepres_enc_tv_template[] = {
5052 };
5053
5054
5055 -static struct cipher_testvec serpent_dec_tv_template[] = {
5056 +static const struct cipher_testvec serpent_dec_tv_template[] = {
5057 {
5058 .input = "\x12\x07\xfc\xce\x9b\xd0\xd6\x47"
5059 "\x6a\xe9\x8f\xbe\xd1\x43\xa0\xe2",
5060 @@ -11287,7 +11447,7 @@ static struct cipher_testvec serpent_dec_tv_template[] = {
5061 },
5062 };
5063
5064 -static struct cipher_testvec tnepres_dec_tv_template[] = {
5065 +static const struct cipher_testvec tnepres_dec_tv_template[] = {
5066 {
5067 .input = "\x41\xcc\x6b\x31\x59\x31\x45\x97"
5068 "\x6d\x6f\xbb\x38\x4b\x37\x21\x28",
5069 @@ -11328,7 +11488,7 @@ static struct cipher_testvec tnepres_dec_tv_template[] = {
5070 },
5071 };
5072
5073 -static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5074 +static const struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5075 { /* Generated with Crypto++ */
5076 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5077 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5078 @@ -11469,7 +11629,7 @@ static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5079 },
5080 };
5081
5082 -static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5083 +static const struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5084 { /* Generated with Crypto++ */
5085 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5086 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5087 @@ -11610,7 +11770,7 @@ static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5088 },
5089 };
5090
5091 -static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5092 +static const struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5093 { /* Generated with Crypto++ */
5094 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5095 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5096 @@ -12021,7 +12181,7 @@ static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5097 },
5098 };
5099
5100 -static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5101 +static const struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5102 { /* Generated with Crypto++ */
5103 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5104 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5105 @@ -12432,7 +12592,7 @@ static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5106 },
5107 };
5108
5109 -static struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5110 +static const struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5111 /* Generated from AES-LRW test vectors */
5112 {
5113 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5114 @@ -12684,7 +12844,7 @@ static struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5115 },
5116 };
5117
5118 -static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5119 +static const struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5120 /* Generated from AES-LRW test vectors */
5121 /* same as enc vectors with input and result reversed */
5122 {
5123 @@ -12937,7 +13097,7 @@ static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5124 },
5125 };
5126
5127 -static struct cipher_testvec serpent_xts_enc_tv_template[] = {
5128 +static const struct cipher_testvec serpent_xts_enc_tv_template[] = {
5129 /* Generated from AES-XTS test vectors */
5130 {
5131 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5132 @@ -13279,7 +13439,7 @@ static struct cipher_testvec serpent_xts_enc_tv_template[] = {
5133 },
5134 };
5135
5136 -static struct cipher_testvec serpent_xts_dec_tv_template[] = {
5137 +static const struct cipher_testvec serpent_xts_dec_tv_template[] = {
5138 /* Generated from AES-XTS test vectors */
5139 /* same as enc vectors with input and result reversed */
5140 {
5141 @@ -13623,18 +13783,7 @@ static struct cipher_testvec serpent_xts_dec_tv_template[] = {
5142 };
5143
5144 /* Cast6 test vectors from RFC 2612 */
5145 -#define CAST6_ENC_TEST_VECTORS 4
5146 -#define CAST6_DEC_TEST_VECTORS 4
5147 -#define CAST6_CBC_ENC_TEST_VECTORS 1
5148 -#define CAST6_CBC_DEC_TEST_VECTORS 1
5149 -#define CAST6_CTR_ENC_TEST_VECTORS 2
5150 -#define CAST6_CTR_DEC_TEST_VECTORS 2
5151 -#define CAST6_LRW_ENC_TEST_VECTORS 1
5152 -#define CAST6_LRW_DEC_TEST_VECTORS 1
5153 -#define CAST6_XTS_ENC_TEST_VECTORS 1
5154 -#define CAST6_XTS_DEC_TEST_VECTORS 1
5155 -
5156 -static struct cipher_testvec cast6_enc_tv_template[] = {
5157 +static const struct cipher_testvec cast6_enc_tv_template[] = {
5158 {
5159 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5160 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5161 @@ -13805,7 +13954,7 @@ static struct cipher_testvec cast6_enc_tv_template[] = {
5162 },
5163 };
5164
5165 -static struct cipher_testvec cast6_dec_tv_template[] = {
5166 +static const struct cipher_testvec cast6_dec_tv_template[] = {
5167 {
5168 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5169 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5170 @@ -13976,7 +14125,7 @@ static struct cipher_testvec cast6_dec_tv_template[] = {
5171 },
5172 };
5173
5174 -static struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5175 +static const struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5176 { /* Generated from TF test vectors */
5177 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5178 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5179 @@ -14117,7 +14266,7 @@ static struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5180 },
5181 };
5182
5183 -static struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5184 +static const struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5185 { /* Generated from TF test vectors */
5186 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5187 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5188 @@ -14258,7 +14407,7 @@ static struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5189 },
5190 };
5191
5192 -static struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5193 +static const struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5194 { /* Generated from TF test vectors */
5195 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5196 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5197 @@ -14415,7 +14564,7 @@ static struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5198 },
5199 };
5200
5201 -static struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5202 +static const struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5203 { /* Generated from TF test vectors */
5204 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5205 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5206 @@ -14572,7 +14721,7 @@ static struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5207 },
5208 };
5209
5210 -static struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5211 +static const struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5212 { /* Generated from TF test vectors */
5213 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5214 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5215 @@ -14719,7 +14868,7 @@ static struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5216 },
5217 };
5218
5219 -static struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5220 +static const struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5221 { /* Generated from TF test vectors */
5222 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5223 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5224 @@ -14866,7 +15015,7 @@ static struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5225 },
5226 };
5227
5228 -static struct cipher_testvec cast6_xts_enc_tv_template[] = {
5229 +static const struct cipher_testvec cast6_xts_enc_tv_template[] = {
5230 { /* Generated from TF test vectors */
5231 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
5232 "\x23\x53\x60\x28\x74\x71\x35\x26"
5233 @@ -15015,7 +15164,7 @@ static struct cipher_testvec cast6_xts_enc_tv_template[] = {
5234 },
5235 };
5236
5237 -static struct cipher_testvec cast6_xts_dec_tv_template[] = {
5238 +static const struct cipher_testvec cast6_xts_dec_tv_template[] = {
5239 { /* Generated from TF test vectors */
5240 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
5241 "\x23\x53\x60\x28\x74\x71\x35\x26"
5242 @@ -15168,39 +15317,7 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = {
5243 /*
5244 * AES test vectors.
5245 */
5246 -#define AES_ENC_TEST_VECTORS 4
5247 -#define AES_DEC_TEST_VECTORS 4
5248 -#define AES_CBC_ENC_TEST_VECTORS 5
5249 -#define AES_CBC_DEC_TEST_VECTORS 5
5250 -#define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2
5251 -#define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2
5252 -#define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC 2
5253 -#define HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC 2
5254 -#define HMAC_SHA1_AES_CBC_ENC_TEST_VEC 7
5255 -#define HMAC_SHA256_AES_CBC_ENC_TEST_VEC 7
5256 -#define HMAC_SHA512_AES_CBC_ENC_TEST_VEC 7
5257 -#define AES_LRW_ENC_TEST_VECTORS 8
5258 -#define AES_LRW_DEC_TEST_VECTORS 8
5259 -#define AES_XTS_ENC_TEST_VECTORS 5
5260 -#define AES_XTS_DEC_TEST_VECTORS 5
5261 -#define AES_CTR_ENC_TEST_VECTORS 5
5262 -#define AES_CTR_DEC_TEST_VECTORS 5
5263 -#define AES_OFB_ENC_TEST_VECTORS 1
5264 -#define AES_OFB_DEC_TEST_VECTORS 1
5265 -#define AES_CTR_3686_ENC_TEST_VECTORS 7
5266 -#define AES_CTR_3686_DEC_TEST_VECTORS 6
5267 -#define AES_GCM_ENC_TEST_VECTORS 9
5268 -#define AES_GCM_DEC_TEST_VECTORS 8
5269 -#define AES_GCM_4106_ENC_TEST_VECTORS 23
5270 -#define AES_GCM_4106_DEC_TEST_VECTORS 23
5271 -#define AES_GCM_4543_ENC_TEST_VECTORS 1
5272 -#define AES_GCM_4543_DEC_TEST_VECTORS 2
5273 -#define AES_CCM_ENC_TEST_VECTORS 8
5274 -#define AES_CCM_DEC_TEST_VECTORS 7
5275 -#define AES_CCM_4309_ENC_TEST_VECTORS 7
5276 -#define AES_CCM_4309_DEC_TEST_VECTORS 10
5277 -
5278 -static struct cipher_testvec aes_enc_tv_template[] = {
5279 +static const struct cipher_testvec aes_enc_tv_template[] = {
5280 { /* From FIPS-197 */
5281 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
5282 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5283 @@ -15372,7 +15489,7 @@ static struct cipher_testvec aes_enc_tv_template[] = {
5284 },
5285 };
5286
5287 -static struct cipher_testvec aes_dec_tv_template[] = {
5288 +static const struct cipher_testvec aes_dec_tv_template[] = {
5289 { /* From FIPS-197 */
5290 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
5291 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5292 @@ -15544,7 +15661,7 @@ static struct cipher_testvec aes_dec_tv_template[] = {
5293 },
5294 };
5295
5296 -static struct cipher_testvec aes_cbc_enc_tv_template[] = {
5297 +static const struct cipher_testvec aes_cbc_enc_tv_template[] = {
5298 { /* From RFC 3602 */
5299 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5300 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5301 @@ -15766,7 +15883,7 @@ static struct cipher_testvec aes_cbc_enc_tv_template[] = {
5302 },
5303 };
5304
5305 -static struct cipher_testvec aes_cbc_dec_tv_template[] = {
5306 +static const struct cipher_testvec aes_cbc_dec_tv_template[] = {
5307 { /* From RFC 3602 */
5308 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5309 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5310 @@ -15988,7 +16105,7 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = {
5311 },
5312 };
5313
5314 -static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5315 +static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5316 { /* Input data from RFC 2410 Case 1 */
5317 #ifdef __LITTLE_ENDIAN
5318 .key = "\x08\x00" /* rta length */
5319 @@ -16030,7 +16147,7 @@ static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5320 },
5321 };
5322
5323 -static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5324 +static const struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5325 {
5326 #ifdef __LITTLE_ENDIAN
5327 .key = "\x08\x00" /* rta length */
5328 @@ -16072,7 +16189,7 @@ static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5329 },
5330 };
5331
5332 -static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5333 +static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5334 { /* RFC 3602 Case 1 */
5335 #ifdef __LITTLE_ENDIAN
5336 .key = "\x08\x00" /* rta length */
5337 @@ -16341,7 +16458,7 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5338 },
5339 };
5340
5341 -static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5342 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5343 { /* Input data from RFC 2410 Case 1 */
5344 #ifdef __LITTLE_ENDIAN
5345 .key = "\x08\x00" /* rta length */
5346 @@ -16387,7 +16504,7 @@ static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5347 },
5348 };
5349
5350 -static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5351 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5352 {
5353 #ifdef __LITTLE_ENDIAN
5354 .key = "\x08\x00" /* rta length */
5355 @@ -16433,7 +16550,7 @@ static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5356 },
5357 };
5358
5359 -static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5360 +static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5361 { /* RFC 3602 Case 1 */
5362 #ifdef __LITTLE_ENDIAN
5363 .key = "\x08\x00" /* rta length */
5364 @@ -16716,7 +16833,7 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5365 },
5366 };
5367
5368 -static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5369 +static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5370 { /* RFC 3602 Case 1 */
5371 #ifdef __LITTLE_ENDIAN
5372 .key = "\x08\x00" /* rta length */
5373 @@ -17055,9 +17172,7 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5374 },
5375 };
5376
5377 -#define HMAC_SHA1_DES_CBC_ENC_TEST_VEC 1
5378 -
5379 -static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5380 +static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5381 { /*Generated with cryptopp*/
5382 #ifdef __LITTLE_ENDIAN
5383 .key = "\x08\x00" /* rta length */
5384 @@ -17116,9 +17231,7 @@ static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5385 },
5386 };
5387
5388 -#define HMAC_SHA224_DES_CBC_ENC_TEST_VEC 1
5389 -
5390 -static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5391 +static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5392 { /*Generated with cryptopp*/
5393 #ifdef __LITTLE_ENDIAN
5394 .key = "\x08\x00" /* rta length */
5395 @@ -17177,9 +17290,7 @@ static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5396 },
5397 };
5398
5399 -#define HMAC_SHA256_DES_CBC_ENC_TEST_VEC 1
5400 -
5401 -static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5402 +static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5403 { /*Generated with cryptopp*/
5404 #ifdef __LITTLE_ENDIAN
5405 .key = "\x08\x00" /* rta length */
5406 @@ -17240,9 +17351,7 @@ static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5407 },
5408 };
5409
5410 -#define HMAC_SHA384_DES_CBC_ENC_TEST_VEC 1
5411 -
5412 -static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5413 +static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5414 { /*Generated with cryptopp*/
5415 #ifdef __LITTLE_ENDIAN
5416 .key = "\x08\x00" /* rta length */
5417 @@ -17307,9 +17416,7 @@ static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5418 },
5419 };
5420
5421 -#define HMAC_SHA512_DES_CBC_ENC_TEST_VEC 1
5422 -
5423 -static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5424 +static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5425 { /*Generated with cryptopp*/
5426 #ifdef __LITTLE_ENDIAN
5427 .key = "\x08\x00" /* rta length */
5428 @@ -17378,9 +17485,7 @@ static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5429 },
5430 };
5431
5432 -#define HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC 1
5433 -
5434 -static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5435 +static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5436 { /*Generated with cryptopp*/
5437 #ifdef __LITTLE_ENDIAN
5438 .key = "\x08\x00" /* rta length */
5439 @@ -17441,9 +17546,7 @@ static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5440 },
5441 };
5442
5443 -#define HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC 1
5444 -
5445 -static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5446 +static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5447 { /*Generated with cryptopp*/
5448 #ifdef __LITTLE_ENDIAN
5449 .key = "\x08\x00" /* rta length */
5450 @@ -17504,9 +17607,7 @@ static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5451 },
5452 };
5453
5454 -#define HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC 1
5455 -
5456 -static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5457 +static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5458 { /*Generated with cryptopp*/
5459 #ifdef __LITTLE_ENDIAN
5460 .key = "\x08\x00" /* rta length */
5461 @@ -17569,9 +17670,7 @@ static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5462 },
5463 };
5464
5465 -#define HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC 1
5466 -
5467 -static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5468 +static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5469 { /*Generated with cryptopp*/
5470 #ifdef __LITTLE_ENDIAN
5471 .key = "\x08\x00" /* rta length */
5472 @@ -17638,9 +17737,7 @@ static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5473 },
5474 };
5475
5476 -#define HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC 1
5477 -
5478 -static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5479 +static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5480 { /*Generated with cryptopp*/
5481 #ifdef __LITTLE_ENDIAN
5482 .key = "\x08\x00" /* rta length */
5483 @@ -17711,7 +17808,7 @@ static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5484 },
5485 };
5486
5487 -static struct cipher_testvec aes_lrw_enc_tv_template[] = {
5488 +static const struct cipher_testvec aes_lrw_enc_tv_template[] = {
5489 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5490 { /* LRW-32-AES 1 */
5491 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5492 @@ -17964,7 +18061,7 @@ static struct cipher_testvec aes_lrw_enc_tv_template[] = {
5493 }
5494 };
5495
5496 -static struct cipher_testvec aes_lrw_dec_tv_template[] = {
5497 +static const struct cipher_testvec aes_lrw_dec_tv_template[] = {
5498 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5499 /* same as enc vectors with input and result reversed */
5500 { /* LRW-32-AES 1 */
5501 @@ -18218,7 +18315,7 @@ static struct cipher_testvec aes_lrw_dec_tv_template[] = {
5502 }
5503 };
5504
5505 -static struct cipher_testvec aes_xts_enc_tv_template[] = {
5506 +static const struct cipher_testvec aes_xts_enc_tv_template[] = {
5507 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5508 { /* XTS-AES 1 */
5509 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5510 @@ -18561,7 +18658,7 @@ static struct cipher_testvec aes_xts_enc_tv_template[] = {
5511 }
5512 };
5513
5514 -static struct cipher_testvec aes_xts_dec_tv_template[] = {
5515 +static const struct cipher_testvec aes_xts_dec_tv_template[] = {
5516 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5517 { /* XTS-AES 1 */
5518 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5519 @@ -18905,7 +19002,7 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = {
5520 };
5521
5522
5523 -static struct cipher_testvec aes_ctr_enc_tv_template[] = {
5524 +static const struct cipher_testvec aes_ctr_enc_tv_template[] = {
5525 { /* From NIST Special Publication 800-38A, Appendix F.5 */
5526 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5527 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5528 @@ -19260,7 +19357,7 @@ static struct cipher_testvec aes_ctr_enc_tv_template[] = {
5529 },
5530 };
5531
5532 -static struct cipher_testvec aes_ctr_dec_tv_template[] = {
5533 +static const struct cipher_testvec aes_ctr_dec_tv_template[] = {
5534 { /* From NIST Special Publication 800-38A, Appendix F.5 */
5535 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5536 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5537 @@ -19615,7 +19712,7 @@ static struct cipher_testvec aes_ctr_dec_tv_template[] = {
5538 },
5539 };
5540
5541 -static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5542 +static const struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5543 { /* From RFC 3686 */
5544 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5545 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5546 @@ -20747,7 +20844,7 @@ static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5547 },
5548 };
5549
5550 -static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5551 +static const struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5552 { /* From RFC 3686 */
5553 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5554 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5555 @@ -20838,7 +20935,7 @@ static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5556 },
5557 };
5558
5559 -static struct cipher_testvec aes_ofb_enc_tv_template[] = {
5560 +static const struct cipher_testvec aes_ofb_enc_tv_template[] = {
5561 /* From NIST Special Publication 800-38A, Appendix F.5 */
5562 {
5563 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5564 @@ -20867,7 +20964,7 @@ static struct cipher_testvec aes_ofb_enc_tv_template[] = {
5565 }
5566 };
5567
5568 -static struct cipher_testvec aes_ofb_dec_tv_template[] = {
5569 +static const struct cipher_testvec aes_ofb_dec_tv_template[] = {
5570 /* From NIST Special Publication 800-38A, Appendix F.5 */
5571 {
5572 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5573 @@ -20896,7 +20993,7 @@ static struct cipher_testvec aes_ofb_dec_tv_template[] = {
5574 }
5575 };
5576
5577 -static struct aead_testvec aes_gcm_enc_tv_template[] = {
5578 +static const struct aead_testvec aes_gcm_enc_tv_template[] = {
5579 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5580 .key = zeroed_string,
5581 .klen = 16,
5582 @@ -21056,7 +21153,7 @@ static struct aead_testvec aes_gcm_enc_tv_template[] = {
5583 }
5584 };
5585
5586 -static struct aead_testvec aes_gcm_dec_tv_template[] = {
5587 +static const struct aead_testvec aes_gcm_dec_tv_template[] = {
5588 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5589 .key = zeroed_string,
5590 .klen = 32,
5591 @@ -21258,7 +21355,7 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = {
5592 }
5593 };
5594
5595 -static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5596 +static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5597 { /* Generated using Crypto++ */
5598 .key = zeroed_string,
5599 .klen = 20,
5600 @@ -21871,7 +21968,7 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5601 }
5602 };
5603
5604 -static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5605 +static const struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5606 { /* Generated using Crypto++ */
5607 .key = zeroed_string,
5608 .klen = 20,
5609 @@ -22485,7 +22582,7 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5610 }
5611 };
5612
5613 -static struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5614 +static const struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5615 { /* From draft-mcgrew-gcm-test-01 */
5616 .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5617 "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5618 @@ -22516,7 +22613,7 @@ static struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5619 }
5620 };
5621
5622 -static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5623 +static const struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5624 { /* From draft-mcgrew-gcm-test-01 */
5625 .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5626 "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5627 @@ -22575,7 +22672,7 @@ static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5628 },
5629 };
5630
5631 -static struct aead_testvec aes_ccm_enc_tv_template[] = {
5632 +static const struct aead_testvec aes_ccm_enc_tv_template[] = {
5633 { /* From RFC 3610 */
5634 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5635 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5636 @@ -22859,7 +22956,7 @@ static struct aead_testvec aes_ccm_enc_tv_template[] = {
5637 }
5638 };
5639
5640 -static struct aead_testvec aes_ccm_dec_tv_template[] = {
5641 +static const struct aead_testvec aes_ccm_dec_tv_template[] = {
5642 { /* From RFC 3610 */
5643 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5644 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5645 @@ -23191,7 +23288,7 @@ static struct aead_testvec aes_ccm_dec_tv_template[] = {
5646 * These vectors are copied/generated from the ones for rfc4106 with
5647 * the key truncated by one byte..
5648 */
5649 -static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5650 +static const struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5651 { /* Generated using Crypto++ */
5652 .key = zeroed_string,
5653 .klen = 19,
5654 @@ -23804,7 +23901,7 @@ static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5655 }
5656 };
5657
5658 -static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
5659 +static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
5660 { /* Generated using Crypto++ */
5661 .key = zeroed_string,
5662 .klen = 19,
5663 @@ -24420,9 +24517,7 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
5664 /*
5665 * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5.
5666 */
5667 -#define RFC7539_ENC_TEST_VECTORS 2
5668 -#define RFC7539_DEC_TEST_VECTORS 2
5669 -static struct aead_testvec rfc7539_enc_tv_template[] = {
5670 +static const struct aead_testvec rfc7539_enc_tv_template[] = {
5671 {
5672 .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
5673 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5674 @@ -24554,7 +24649,7 @@ static struct aead_testvec rfc7539_enc_tv_template[] = {
5675 },
5676 };
5677
5678 -static struct aead_testvec rfc7539_dec_tv_template[] = {
5679 +static const struct aead_testvec rfc7539_dec_tv_template[] = {
5680 {
5681 .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
5682 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5683 @@ -24689,9 +24784,7 @@ static struct aead_testvec rfc7539_dec_tv_template[] = {
5684 /*
5685 * draft-irtf-cfrg-chacha20-poly1305
5686 */
5687 -#define RFC7539ESP_DEC_TEST_VECTORS 1
5688 -#define RFC7539ESP_ENC_TEST_VECTORS 1
5689 -static struct aead_testvec rfc7539esp_enc_tv_template[] = {
5690 +static const struct aead_testvec rfc7539esp_enc_tv_template[] = {
5691 {
5692 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5693 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5694 @@ -24779,7 +24872,7 @@ static struct aead_testvec rfc7539esp_enc_tv_template[] = {
5695 },
5696 };
5697
5698 -static struct aead_testvec rfc7539esp_dec_tv_template[] = {
5699 +static const struct aead_testvec rfc7539esp_dec_tv_template[] = {
5700 {
5701 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5702 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5703 @@ -24875,7 +24968,7 @@ static struct aead_testvec rfc7539esp_dec_tv_template[] = {
5704 * semiblock of the ciphertext from the test vector. For decryption, iv is
5705 * the first semiblock of the ciphertext.
5706 */
5707 -static struct cipher_testvec aes_kw_enc_tv_template[] = {
5708 +static const struct cipher_testvec aes_kw_enc_tv_template[] = {
5709 {
5710 .key = "\x75\x75\xda\x3a\x93\x60\x7c\xc2"
5711 "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6",
5712 @@ -24890,7 +24983,7 @@ static struct cipher_testvec aes_kw_enc_tv_template[] = {
5713 },
5714 };
5715
5716 -static struct cipher_testvec aes_kw_dec_tv_template[] = {
5717 +static const struct cipher_testvec aes_kw_dec_tv_template[] = {
5718 {
5719 .key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b"
5720 "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71"
5721 @@ -24913,9 +25006,7 @@ static struct cipher_testvec aes_kw_dec_tv_template[] = {
5722 * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf
5723 * Only AES-128 is supported at this time.
5724 */
5725 -#define ANSI_CPRNG_AES_TEST_VECTORS 6
5726 -
5727 -static struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5728 +static const struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5729 {
5730 .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
5731 "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
5732 @@ -25011,7 +25102,7 @@ static struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5733 * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5734 * w/o personalization string, w/ and w/o additional input string).
5735 */
5736 -static struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5737 +static const struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5738 {
5739 .entropy = (unsigned char *)
5740 "\x72\x88\x4c\xcd\x6c\x85\x57\x70\xf7\x0b\x8b\x86"
5741 @@ -25169,7 +25260,7 @@ static struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5742 },
5743 };
5744
5745 -static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5746 +static const struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5747 {
5748 .entropy = (unsigned char *)
5749 "\x99\x69\xe5\x4b\x47\x03\xff\x31\x78\x5b\x87\x9a"
5750 @@ -25327,7 +25418,7 @@ static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5751 },
5752 };
5753
5754 -static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5755 +static const struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5756 {
5757 .entropy = (unsigned char *)
5758 "\xd1\x44\xc6\x61\x81\x6d\xca\x9d\x15\x28\x8a\x42"
5759 @@ -25451,7 +25542,7 @@ static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5760 * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5761 * w/o personalization string, w/ and w/o additional input string).
5762 */
5763 -static struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5764 +static const struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5765 {
5766 .entropy = (unsigned char *)
5767 "\xa6\x5a\xd0\xf3\x45\xdb\x4e\x0e\xff\xe8\x75\xc3"
5768 @@ -25573,7 +25664,7 @@ static struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5769 },
5770 };
5771
5772 -static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5773 +static const struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5774 {
5775 .entropy = (unsigned char *)
5776 "\xca\x85\x19\x11\x34\x93\x84\xbf\xfe\x89\xde\x1c"
5777 @@ -25695,7 +25786,7 @@ static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5778 },
5779 };
5780
5781 -static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5782 +static const struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5783 {
5784 .entropy = (unsigned char *)
5785 "\xc3\x5c\x2f\xa2\xa8\x9d\x52\xa1\x1f\xa3\x2a\xa9"
5786 @@ -25719,7 +25810,7 @@ static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5787 },
5788 };
5789
5790 -static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5791 +static const struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5792 {
5793 .entropy = (unsigned char *)
5794 "\x36\x40\x19\x40\xfa\x8b\x1f\xba\x91\xa1\x66\x1f"
5795 @@ -25743,7 +25834,7 @@ static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5796 },
5797 };
5798
5799 -static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5800 +static const struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5801 {
5802 .entropy = (unsigned char *)
5803 "\x87\xe1\xc5\x32\x99\x7f\x57\xa3\x5c\x28\x6d\xe8"
5804 @@ -25832,14 +25923,7 @@ static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5805 };
5806
5807 /* Cast5 test vectors from RFC 2144 */
5808 -#define CAST5_ENC_TEST_VECTORS 4
5809 -#define CAST5_DEC_TEST_VECTORS 4
5810 -#define CAST5_CBC_ENC_TEST_VECTORS 1
5811 -#define CAST5_CBC_DEC_TEST_VECTORS 1
5812 -#define CAST5_CTR_ENC_TEST_VECTORS 2
5813 -#define CAST5_CTR_DEC_TEST_VECTORS 2
5814 -
5815 -static struct cipher_testvec cast5_enc_tv_template[] = {
5816 +static const struct cipher_testvec cast5_enc_tv_template[] = {
5817 {
5818 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5819 "\x23\x45\x67\x89\x34\x56\x78\x9a",
5820 @@ -26000,7 +26084,7 @@ static struct cipher_testvec cast5_enc_tv_template[] = {
5821 },
5822 };
5823
5824 -static struct cipher_testvec cast5_dec_tv_template[] = {
5825 +static const struct cipher_testvec cast5_dec_tv_template[] = {
5826 {
5827 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5828 "\x23\x45\x67\x89\x34\x56\x78\x9a",
5829 @@ -26161,7 +26245,7 @@ static struct cipher_testvec cast5_dec_tv_template[] = {
5830 },
5831 };
5832
5833 -static struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5834 +static const struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5835 { /* Generated from TF test vectors */
5836 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5837 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5838 @@ -26299,7 +26383,7 @@ static struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5839 },
5840 };
5841
5842 -static struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5843 +static const struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5844 { /* Generated from TF test vectors */
5845 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5846 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5847 @@ -26437,7 +26521,7 @@ static struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5848 },
5849 };
5850
5851 -static struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5852 +static const struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5853 { /* Generated from TF test vectors */
5854 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5855 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5856 @@ -26588,7 +26672,7 @@ static struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5857 },
5858 };
5859
5860 -static struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5861 +static const struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5862 { /* Generated from TF test vectors */
5863 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5864 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5865 @@ -26742,10 +26826,7 @@ static struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5866 /*
5867 * ARC4 test vectors from OpenSSL
5868 */
5869 -#define ARC4_ENC_TEST_VECTORS 7
5870 -#define ARC4_DEC_TEST_VECTORS 7
5871 -
5872 -static struct cipher_testvec arc4_enc_tv_template[] = {
5873 +static const struct cipher_testvec arc4_enc_tv_template[] = {
5874 {
5875 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5876 .klen = 8,
5877 @@ -26811,7 +26892,7 @@ static struct cipher_testvec arc4_enc_tv_template[] = {
5878 },
5879 };
5880
5881 -static struct cipher_testvec arc4_dec_tv_template[] = {
5882 +static const struct cipher_testvec arc4_dec_tv_template[] = {
5883 {
5884 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5885 .klen = 8,
5886 @@ -26880,10 +26961,7 @@ static struct cipher_testvec arc4_dec_tv_template[] = {
5887 /*
5888 * TEA test vectors
5889 */
5890 -#define TEA_ENC_TEST_VECTORS 4
5891 -#define TEA_DEC_TEST_VECTORS 4
5892 -
5893 -static struct cipher_testvec tea_enc_tv_template[] = {
5894 +static const struct cipher_testvec tea_enc_tv_template[] = {
5895 {
5896 .key = zeroed_string,
5897 .klen = 16,
5898 @@ -26926,7 +27004,7 @@ static struct cipher_testvec tea_enc_tv_template[] = {
5899 }
5900 };
5901
5902 -static struct cipher_testvec tea_dec_tv_template[] = {
5903 +static const struct cipher_testvec tea_dec_tv_template[] = {
5904 {
5905 .key = zeroed_string,
5906 .klen = 16,
5907 @@ -26972,10 +27050,7 @@ static struct cipher_testvec tea_dec_tv_template[] = {
5908 /*
5909 * XTEA test vectors
5910 */
5911 -#define XTEA_ENC_TEST_VECTORS 4
5912 -#define XTEA_DEC_TEST_VECTORS 4
5913 -
5914 -static struct cipher_testvec xtea_enc_tv_template[] = {
5915 +static const struct cipher_testvec xtea_enc_tv_template[] = {
5916 {
5917 .key = zeroed_string,
5918 .klen = 16,
5919 @@ -27018,7 +27093,7 @@ static struct cipher_testvec xtea_enc_tv_template[] = {
5920 }
5921 };
5922
5923 -static struct cipher_testvec xtea_dec_tv_template[] = {
5924 +static const struct cipher_testvec xtea_dec_tv_template[] = {
5925 {
5926 .key = zeroed_string,
5927 .klen = 16,
5928 @@ -27064,10 +27139,7 @@ static struct cipher_testvec xtea_dec_tv_template[] = {
5929 /*
5930 * KHAZAD test vectors.
5931 */
5932 -#define KHAZAD_ENC_TEST_VECTORS 5
5933 -#define KHAZAD_DEC_TEST_VECTORS 5
5934 -
5935 -static struct cipher_testvec khazad_enc_tv_template[] = {
5936 +static const struct cipher_testvec khazad_enc_tv_template[] = {
5937 {
5938 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
5939 "\x00\x00\x00\x00\x00\x00\x00\x00",
5940 @@ -27113,7 +27185,7 @@ static struct cipher_testvec khazad_enc_tv_template[] = {
5941 },
5942 };
5943
5944 -static struct cipher_testvec khazad_dec_tv_template[] = {
5945 +static const struct cipher_testvec khazad_dec_tv_template[] = {
5946 {
5947 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
5948 "\x00\x00\x00\x00\x00\x00\x00\x00",
5949 @@ -27163,12 +27235,7 @@ static struct cipher_testvec khazad_dec_tv_template[] = {
5950 * Anubis test vectors.
5951 */
5952
5953 -#define ANUBIS_ENC_TEST_VECTORS 5
5954 -#define ANUBIS_DEC_TEST_VECTORS 5
5955 -#define ANUBIS_CBC_ENC_TEST_VECTORS 2
5956 -#define ANUBIS_CBC_DEC_TEST_VECTORS 2
5957 -
5958 -static struct cipher_testvec anubis_enc_tv_template[] = {
5959 +static const struct cipher_testvec anubis_enc_tv_template[] = {
5960 {
5961 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5962 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5963 @@ -27231,7 +27298,7 @@ static struct cipher_testvec anubis_enc_tv_template[] = {
5964 },
5965 };
5966
5967 -static struct cipher_testvec anubis_dec_tv_template[] = {
5968 +static const struct cipher_testvec anubis_dec_tv_template[] = {
5969 {
5970 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5971 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5972 @@ -27294,7 +27361,7 @@ static struct cipher_testvec anubis_dec_tv_template[] = {
5973 },
5974 };
5975
5976 -static struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5977 +static const struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5978 {
5979 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5980 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5981 @@ -27329,7 +27396,7 @@ static struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5982 },
5983 };
5984
5985 -static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5986 +static const struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5987 {
5988 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5989 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5990 @@ -27367,10 +27434,7 @@ static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5991 /*
5992 * XETA test vectors
5993 */
5994 -#define XETA_ENC_TEST_VECTORS 4
5995 -#define XETA_DEC_TEST_VECTORS 4
5996 -
5997 -static struct cipher_testvec xeta_enc_tv_template[] = {
5998 +static const struct cipher_testvec xeta_enc_tv_template[] = {
5999 {
6000 .key = zeroed_string,
6001 .klen = 16,
6002 @@ -27413,7 +27477,7 @@ static struct cipher_testvec xeta_enc_tv_template[] = {
6003 }
6004 };
6005
6006 -static struct cipher_testvec xeta_dec_tv_template[] = {
6007 +static const struct cipher_testvec xeta_dec_tv_template[] = {
6008 {
6009 .key = zeroed_string,
6010 .klen = 16,
6011 @@ -27459,10 +27523,7 @@ static struct cipher_testvec xeta_dec_tv_template[] = {
6012 /*
6013 * FCrypt test vectors
6014 */
6015 -#define FCRYPT_ENC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_enc_tv_template)
6016 -#define FCRYPT_DEC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_dec_tv_template)
6017 -
6018 -static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
6019 +static const struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
6020 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6021 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6022 .klen = 8,
6023 @@ -27523,7 +27584,7 @@ static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
6024 }
6025 };
6026
6027 -static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6028 +static const struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6029 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6030 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6031 .klen = 8,
6032 @@ -27587,18 +27648,7 @@ static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6033 /*
6034 * CAMELLIA test vectors.
6035 */
6036 -#define CAMELLIA_ENC_TEST_VECTORS 4
6037 -#define CAMELLIA_DEC_TEST_VECTORS 4
6038 -#define CAMELLIA_CBC_ENC_TEST_VECTORS 3
6039 -#define CAMELLIA_CBC_DEC_TEST_VECTORS 3
6040 -#define CAMELLIA_CTR_ENC_TEST_VECTORS 2
6041 -#define CAMELLIA_CTR_DEC_TEST_VECTORS 2
6042 -#define CAMELLIA_LRW_ENC_TEST_VECTORS 8
6043 -#define CAMELLIA_LRW_DEC_TEST_VECTORS 8
6044 -#define CAMELLIA_XTS_ENC_TEST_VECTORS 5
6045 -#define CAMELLIA_XTS_DEC_TEST_VECTORS 5
6046 -
6047 -static struct cipher_testvec camellia_enc_tv_template[] = {
6048 +static const struct cipher_testvec camellia_enc_tv_template[] = {
6049 {
6050 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6051 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6052 @@ -27898,7 +27948,7 @@ static struct cipher_testvec camellia_enc_tv_template[] = {
6053 },
6054 };
6055
6056 -static struct cipher_testvec camellia_dec_tv_template[] = {
6057 +static const struct cipher_testvec camellia_dec_tv_template[] = {
6058 {
6059 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6060 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6061 @@ -28198,7 +28248,7 @@ static struct cipher_testvec camellia_dec_tv_template[] = {
6062 },
6063 };
6064
6065 -static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6066 +static const struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6067 {
6068 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6069 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6070 @@ -28494,7 +28544,7 @@ static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6071 },
6072 };
6073
6074 -static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6075 +static const struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6076 {
6077 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6078 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6079 @@ -28790,7 +28840,7 @@ static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6080 },
6081 };
6082
6083 -static struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6084 +static const struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6085 { /* Generated with Crypto++ */
6086 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6087 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6088 @@ -29457,7 +29507,7 @@ static struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6089 },
6090 };
6091
6092 -static struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6093 +static const struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6094 { /* Generated with Crypto++ */
6095 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6096 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6097 @@ -30124,7 +30174,7 @@ static struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6098 },
6099 };
6100
6101 -static struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6102 +static const struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6103 /* Generated from AES-LRW test vectors */
6104 {
6105 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
6106 @@ -30376,7 +30426,7 @@ static struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6107 },
6108 };
6109
6110 -static struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6111 +static const struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6112 /* Generated from AES-LRW test vectors */
6113 /* same as enc vectors with input and result reversed */
6114 {
6115 @@ -30629,7 +30679,7 @@ static struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6116 },
6117 };
6118
6119 -static struct cipher_testvec camellia_xts_enc_tv_template[] = {
6120 +static const struct cipher_testvec camellia_xts_enc_tv_template[] = {
6121 /* Generated from AES-XTS test vectors */
6122 {
6123 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
6124 @@ -30971,7 +31021,7 @@ static struct cipher_testvec camellia_xts_enc_tv_template[] = {
6125 },
6126 };
6127
6128 -static struct cipher_testvec camellia_xts_dec_tv_template[] = {
6129 +static const struct cipher_testvec camellia_xts_dec_tv_template[] = {
6130 /* Generated from AES-XTS test vectors */
6131 /* same as enc vectors with input and result reversed */
6132 {
6133 @@ -31317,10 +31367,7 @@ static struct cipher_testvec camellia_xts_dec_tv_template[] = {
6134 /*
6135 * SEED test vectors
6136 */
6137 -#define SEED_ENC_TEST_VECTORS 4
6138 -#define SEED_DEC_TEST_VECTORS 4
6139 -
6140 -static struct cipher_testvec seed_enc_tv_template[] = {
6141 +static const struct cipher_testvec seed_enc_tv_template[] = {
6142 {
6143 .key = zeroed_string,
6144 .klen = 16,
6145 @@ -31362,7 +31409,7 @@ static struct cipher_testvec seed_enc_tv_template[] = {
6146 }
6147 };
6148
6149 -static struct cipher_testvec seed_dec_tv_template[] = {
6150 +static const struct cipher_testvec seed_dec_tv_template[] = {
6151 {
6152 .key = zeroed_string,
6153 .klen = 16,
6154 @@ -31404,8 +31451,7 @@ static struct cipher_testvec seed_dec_tv_template[] = {
6155 }
6156 };
6157
6158 -#define SALSA20_STREAM_ENC_TEST_VECTORS 5
6159 -static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6160 +static const struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6161 /*
6162 * Testvectors from verified.test-vectors submitted to ECRYPT.
6163 * They are truncated to size 39, 64, 111, 129 to test a variety
6164 @@ -32574,8 +32620,7 @@ static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6165 },
6166 };
6167
6168 -#define CHACHA20_ENC_TEST_VECTORS 4
6169 -static struct cipher_testvec chacha20_enc_tv_template[] = {
6170 +static const struct cipher_testvec chacha20_enc_tv_template[] = {
6171 { /* RFC7539 A.2. Test Vector #1 */
6172 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
6173 "\x00\x00\x00\x00\x00\x00\x00\x00"
6174 @@ -33086,9 +33131,7 @@ static struct cipher_testvec chacha20_enc_tv_template[] = {
6175 /*
6176 * CTS (Cipher Text Stealing) mode tests
6177 */
6178 -#define CTS_MODE_ENC_TEST_VECTORS 6
6179 -#define CTS_MODE_DEC_TEST_VECTORS 6
6180 -static struct cipher_testvec cts_mode_enc_tv_template[] = {
6181 +static const struct cipher_testvec cts_mode_enc_tv_template[] = {
6182 { /* from rfc3962 */
6183 .klen = 16,
6184 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6185 @@ -33190,7 +33233,7 @@ static struct cipher_testvec cts_mode_enc_tv_template[] = {
6186 }
6187 };
6188
6189 -static struct cipher_testvec cts_mode_dec_tv_template[] = {
6190 +static const struct cipher_testvec cts_mode_dec_tv_template[] = {
6191 { /* from rfc3962 */
6192 .klen = 16,
6193 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6194 @@ -33308,10 +33351,7 @@ struct comp_testvec {
6195 * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
6196 */
6197
6198 -#define DEFLATE_COMP_TEST_VECTORS 2
6199 -#define DEFLATE_DECOMP_TEST_VECTORS 2
6200 -
6201 -static struct comp_testvec deflate_comp_tv_template[] = {
6202 +static const struct comp_testvec deflate_comp_tv_template[] = {
6203 {
6204 .inlen = 70,
6205 .outlen = 38,
6206 @@ -33347,7 +33387,7 @@ static struct comp_testvec deflate_comp_tv_template[] = {
6207 },
6208 };
6209
6210 -static struct comp_testvec deflate_decomp_tv_template[] = {
6211 +static const struct comp_testvec deflate_decomp_tv_template[] = {
6212 {
6213 .inlen = 122,
6214 .outlen = 191,
6215 @@ -33386,10 +33426,7 @@ static struct comp_testvec deflate_decomp_tv_template[] = {
6216 /*
6217 * LZO test vectors (null-terminated strings).
6218 */
6219 -#define LZO_COMP_TEST_VECTORS 2
6220 -#define LZO_DECOMP_TEST_VECTORS 2
6221 -
6222 -static struct comp_testvec lzo_comp_tv_template[] = {
6223 +static const struct comp_testvec lzo_comp_tv_template[] = {
6224 {
6225 .inlen = 70,
6226 .outlen = 57,
6227 @@ -33429,7 +33466,7 @@ static struct comp_testvec lzo_comp_tv_template[] = {
6228 },
6229 };
6230
6231 -static struct comp_testvec lzo_decomp_tv_template[] = {
6232 +static const struct comp_testvec lzo_decomp_tv_template[] = {
6233 {
6234 .inlen = 133,
6235 .outlen = 159,
6236 @@ -33472,7 +33509,7 @@ static struct comp_testvec lzo_decomp_tv_template[] = {
6237 */
6238 #define MICHAEL_MIC_TEST_VECTORS 6
6239
6240 -static struct hash_testvec michael_mic_tv_template[] = {
6241 +static const struct hash_testvec michael_mic_tv_template[] = {
6242 {
6243 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6244 .ksize = 8,
6245 @@ -33520,9 +33557,7 @@ static struct hash_testvec michael_mic_tv_template[] = {
6246 /*
6247 * CRC32 test vectors
6248 */
6249 -#define CRC32_TEST_VECTORS 14
6250 -
6251 -static struct hash_testvec crc32_tv_template[] = {
6252 +static const struct hash_testvec crc32_tv_template[] = {
6253 {
6254 .key = "\x87\xa9\xcb\xed",
6255 .ksize = 4,
6256 @@ -33954,9 +33989,7 @@ static struct hash_testvec crc32_tv_template[] = {
6257 /*
6258 * CRC32C test vectors
6259 */
6260 -#define CRC32C_TEST_VECTORS 15
6261 -
6262 -static struct hash_testvec crc32c_tv_template[] = {
6263 +static const struct hash_testvec crc32c_tv_template[] = {
6264 {
6265 .psize = 0,
6266 .digest = "\x00\x00\x00\x00",
6267 @@ -34392,9 +34425,7 @@ static struct hash_testvec crc32c_tv_template[] = {
6268 /*
6269 * Blakcifn CRC test vectors
6270 */
6271 -#define BFIN_CRC_TEST_VECTORS 6
6272 -
6273 -static struct hash_testvec bfin_crc_tv_template[] = {
6274 +static const struct hash_testvec bfin_crc_tv_template[] = {
6275 {
6276 .psize = 0,
6277 .digest = "\x00\x00\x00\x00",
6278 @@ -34479,9 +34510,6 @@ static struct hash_testvec bfin_crc_tv_template[] = {
6279
6280 };
6281
6282 -#define LZ4_COMP_TEST_VECTORS 1
6283 -#define LZ4_DECOMP_TEST_VECTORS 1
6284 -
6285 static struct comp_testvec lz4_comp_tv_template[] = {
6286 {
6287 .inlen = 70,
6288 @@ -34512,9 +34540,6 @@ static struct comp_testvec lz4_decomp_tv_template[] = {
6289 },
6290 };
6291
6292 -#define LZ4HC_COMP_TEST_VECTORS 1
6293 -#define LZ4HC_DECOMP_TEST_VECTORS 1
6294 -
6295 static struct comp_testvec lz4hc_comp_tv_template[] = {
6296 {
6297 .inlen = 70,
6298 diff --git a/crypto/tls.c b/crypto/tls.c
6299 new file mode 100644
6300 index 00000000..377226f5
6301 --- /dev/null
6302 +++ b/crypto/tls.c
6303 @@ -0,0 +1,607 @@
6304 +/*
6305 + * Copyright 2013 Freescale Semiconductor, Inc.
6306 + * Copyright 2017 NXP Semiconductor, Inc.
6307 + *
6308 + * This program is free software; you can redistribute it and/or modify it
6309 + * under the terms of the GNU General Public License as published by the Free
6310 + * Software Foundation; either version 2 of the License, or (at your option)
6311 + * any later version.
6312 + *
6313 + */
6314 +
6315 +#include <crypto/internal/aead.h>
6316 +#include <crypto/internal/hash.h>
6317 +#include <crypto/internal/skcipher.h>
6318 +#include <crypto/authenc.h>
6319 +#include <crypto/null.h>
6320 +#include <crypto/scatterwalk.h>
6321 +#include <linux/err.h>
6322 +#include <linux/init.h>
6323 +#include <linux/module.h>
6324 +#include <linux/rtnetlink.h>
6325 +
6326 +struct tls_instance_ctx {
6327 + struct crypto_ahash_spawn auth;
6328 + struct crypto_skcipher_spawn enc;
6329 +};
6330 +
6331 +struct crypto_tls_ctx {
6332 + unsigned int reqoff;
6333 + struct crypto_ahash *auth;
6334 + struct crypto_skcipher *enc;
6335 + struct crypto_skcipher *null;
6336 +};
6337 +
6338 +struct tls_request_ctx {
6339 + /*
6340 + * cryptlen holds the payload length in the case of encryption or
6341 + * payload_len + icv_len + padding_len in case of decryption
6342 + */
6343 + unsigned int cryptlen;
6344 + /* working space for partial results */
6345 + struct scatterlist tmp[2];
6346 + struct scatterlist cipher[2];
6347 + struct scatterlist dst[2];
6348 + char tail[];
6349 +};
6350 +
6351 +struct async_op {
6352 + struct completion completion;
6353 + int err;
6354 +};
6355 +
6356 +static void tls_async_op_done(struct crypto_async_request *req, int err)
6357 +{
6358 + struct async_op *areq = req->data;
6359 +
6360 + if (err == -EINPROGRESS)
6361 + return;
6362 +
6363 + areq->err = err;
6364 + complete(&areq->completion);
6365 +}
6366 +
6367 +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
6368 + unsigned int keylen)
6369 +{
6370 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6371 + struct crypto_ahash *auth = ctx->auth;
6372 + struct crypto_skcipher *enc = ctx->enc;
6373 + struct crypto_authenc_keys keys;
6374 + int err = -EINVAL;
6375 +
6376 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
6377 + goto badkey;
6378 +
6379 + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
6380 + crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
6381 + CRYPTO_TFM_REQ_MASK);
6382 + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
6383 + crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
6384 + CRYPTO_TFM_RES_MASK);
6385 +
6386 + if (err)
6387 + goto out;
6388 +
6389 + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
6390 + crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
6391 + CRYPTO_TFM_REQ_MASK);
6392 + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
6393 + crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
6394 + CRYPTO_TFM_RES_MASK);
6395 +
6396 +out:
6397 + return err;
6398 +
6399 +badkey:
6400 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
6401 + goto out;
6402 +}
6403 +
6404 +/**
6405 + * crypto_tls_genicv - Calculate hmac digest for a TLS record
6406 + * @hash: (output) buffer to save the digest into
6407 + * @src: (input) scatterlist with the assoc and payload data
6408 + * @srclen: (input) size of the source buffer (assoclen + cryptlen)
6409 + * @req: (input) aead request
6410 + **/
6411 +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
6412 + unsigned int srclen, struct aead_request *req)
6413 +{
6414 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6415 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6416 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6417 + struct async_op ahash_op;
6418 + struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
6419 + unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
6420 + int err = -EBADMSG;
6421 +
6422 + /* Bail out if the request assoc len is 0 */
6423 + if (!req->assoclen)
6424 + return err;
6425 +
6426 + init_completion(&ahash_op.completion);
6427 +
6428 + /* the hash transform to be executed comes from the original request */
6429 + ahash_request_set_tfm(ahreq, ctx->auth);
6430 + /* prepare the hash request with input data and result pointer */
6431 + ahash_request_set_crypt(ahreq, src, hash, srclen);
6432 + /* set the notifier for when the async hash function returns */
6433 + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
6434 + tls_async_op_done, &ahash_op);
6435 +
6436 + /* Calculate the digest on the given data. The result is put in hash */
6437 + err = crypto_ahash_digest(ahreq);
6438 + if (err == -EINPROGRESS) {
6439 + err = wait_for_completion_interruptible(&ahash_op.completion);
6440 + if (!err)
6441 + err = ahash_op.err;
6442 + }
6443 +
6444 + return err;
6445 +}
6446 +
6447 +/**
6448 + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
6449 + * @hash: (output) buffer to save the digest and padding into
6450 + * @phashlen: (output) the size of digest + padding
6451 + * @req: (input) aead request
6452 + **/
6453 +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
6454 + struct aead_request *req)
6455 +{
6456 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6457 + unsigned int hash_size = crypto_aead_authsize(tls);
6458 + unsigned int block_size = crypto_aead_blocksize(tls);
6459 + unsigned int srclen = req->cryptlen + hash_size;
6460 + unsigned int icvlen = req->cryptlen + req->assoclen;
6461 + unsigned int padlen;
6462 + int err;
6463 +
6464 + err = crypto_tls_genicv(hash, req->src, icvlen, req);
6465 + if (err)
6466 + goto out;
6467 +
6468 + /* add padding after digest */
6469 + padlen = block_size - (srclen % block_size);
6470 + memset(hash + hash_size, padlen - 1, padlen);
6471 +
6472 + *phashlen = hash_size + padlen;
6473 +out:
6474 + return err;
6475 +}
6476 +
6477 +static int crypto_tls_copy_data(struct aead_request *req,
6478 + struct scatterlist *src,
6479 + struct scatterlist *dst,
6480 + unsigned int len)
6481 +{
6482 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6483 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6484 + SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
6485 +
6486 + skcipher_request_set_tfm(skreq, ctx->null);
6487 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6488 + NULL, NULL);
6489 + skcipher_request_set_crypt(skreq, src, dst, len, NULL);
6490 +
6491 + return crypto_skcipher_encrypt(skreq);
6492 +}
6493 +
6494 +static int crypto_tls_encrypt(struct aead_request *req)
6495 +{
6496 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6497 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6498 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6499 + struct skcipher_request *skreq;
6500 + struct scatterlist *cipher = treq_ctx->cipher;
6501 + struct scatterlist *tmp = treq_ctx->tmp;
6502 + struct scatterlist *sg, *src, *dst;
6503 + unsigned int cryptlen, phashlen;
6504 + u8 *hash = treq_ctx->tail;
6505 + int err;
6506 +
6507 + /*
6508 + * The hash result is saved at the beginning of the tls request ctx
6509 + * and is aligned as required by the hash transform. Enough space was
6510 + * allocated in crypto_tls_init_tfm to accommodate the difference. The
6511 + * requests themselves start later at treq_ctx->tail + ctx->reqoff so
6512 + * the result is not overwritten by the second (cipher) request.
6513 + */
6514 + hash = (u8 *)ALIGN((unsigned long)hash +
6515 + crypto_ahash_alignmask(ctx->auth),
6516 + crypto_ahash_alignmask(ctx->auth) + 1);
6517 +
6518 + /*
6519 + * STEP 1: create ICV together with necessary padding
6520 + */
6521 + err = crypto_tls_gen_padicv(hash, &phashlen, req);
6522 + if (err)
6523 + return err;
6524 +
6525 + /*
6526 + * STEP 2: Hash and padding are combined with the payload
6527 + * depending on the form it arrives. Scatter tables must have at least
6528 + * one page of data before chaining with another table and can't have
6529 + * an empty data page. The following code addresses these requirements.
6530 + *
6531 + * If the payload is empty, only the hash is encrypted, otherwise the
6532 + * payload scatterlist is merged with the hash. A special merging case
6533 + * is when the payload has only one page of data. In that case the
6534 + * payload page is moved to another scatterlist and prepared there for
6535 + * encryption.
6536 + */
6537 + if (req->cryptlen) {
6538 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6539 +
6540 + sg_init_table(cipher, 2);
6541 + sg_set_buf(cipher + 1, hash, phashlen);
6542 +
6543 + if (sg_is_last(src)) {
6544 + sg_set_page(cipher, sg_page(src), req->cryptlen,
6545 + src->offset);
6546 + src = cipher;
6547 + } else {
6548 + unsigned int rem_len = req->cryptlen;
6549 +
6550 + for (sg = src; rem_len > sg->length; sg = sg_next(sg))
6551 + rem_len -= min(rem_len, sg->length);
6552 +
6553 + sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
6554 + sg_chain(sg, 1, cipher);
6555 + }
6556 + } else {
6557 + sg_init_one(cipher, hash, phashlen);
6558 + src = cipher;
6559 + }
6560 +
6561 + /**
6562 + * If src != dst copy the associated data from source to destination.
6563 + * In both cases fast-forward passed the associated data in the dest.
6564 + */
6565 + if (req->src != req->dst) {
6566 + err = crypto_tls_copy_data(req, req->src, req->dst,
6567 + req->assoclen);
6568 + if (err)
6569 + return err;
6570 + }
6571 + dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
6572 +
6573 + /*
6574 + * STEP 3: encrypt the frame and return the result
6575 + */
6576 + cryptlen = req->cryptlen + phashlen;
6577 +
6578 + /*
6579 + * The hash and the cipher are applied at different times and their
6580 + * requests can use the same memory space without interference
6581 + */
6582 + skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6583 + skcipher_request_set_tfm(skreq, ctx->enc);
6584 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6585 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6586 + req->base.complete, req->base.data);
6587 + /*
6588 + * Apply the cipher transform. The result will be in req->dst when the
6589 + * asynchronuous call terminates
6590 + */
6591 + err = crypto_skcipher_encrypt(skreq);
6592 +
6593 + return err;
6594 +}
6595 +
6596 +static int crypto_tls_decrypt(struct aead_request *req)
6597 +{
6598 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6599 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6600 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6601 + unsigned int cryptlen = req->cryptlen;
6602 + unsigned int hash_size = crypto_aead_authsize(tls);
6603 + unsigned int block_size = crypto_aead_blocksize(tls);
6604 + struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6605 + struct scatterlist *tmp = treq_ctx->tmp;
6606 + struct scatterlist *src, *dst;
6607 +
6608 + u8 padding[255]; /* padding can be 0-255 bytes */
6609 + u8 pad_size;
6610 + u16 *len_field;
6611 + u8 *ihash, *hash = treq_ctx->tail;
6612 +
6613 + int paderr = 0;
6614 + int err = -EINVAL;
6615 + int i;
6616 + struct async_op ciph_op;
6617 +
6618 + /*
6619 + * Rule out bad packets. The input packet length must be at least one
6620 + * byte more than the hash_size
6621 + */
6622 + if (cryptlen <= hash_size || cryptlen % block_size)
6623 + goto out;
6624 +
6625 + /*
6626 + * Step 1 - Decrypt the source. Fast-forward past the associated data
6627 + * to the encrypted data. The result will be overwritten in place so
6628 + * that the decrypted data will be adjacent to the associated data. The
6629 + * last step (computing the hash) will have it's input data already
6630 + * prepared and ready to be accessed at req->src.
6631 + */
6632 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6633 + dst = src;
6634 +
6635 + init_completion(&ciph_op.completion);
6636 + skcipher_request_set_tfm(skreq, ctx->enc);
6637 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6638 + tls_async_op_done, &ciph_op);
6639 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6640 + err = crypto_skcipher_decrypt(skreq);
6641 + if (err == -EINPROGRESS) {
6642 + err = wait_for_completion_interruptible(&ciph_op.completion);
6643 + if (!err)
6644 + err = ciph_op.err;
6645 + }
6646 + if (err)
6647 + goto out;
6648 +
6649 + /*
6650 + * Step 2 - Verify padding
6651 + * Retrieve the last byte of the payload; this is the padding size.
6652 + */
6653 + cryptlen -= 1;
6654 + scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
6655 +
6656 + /* RFC recommendation for invalid padding size. */
6657 + if (cryptlen < pad_size + hash_size) {
6658 + pad_size = 0;
6659 + paderr = -EBADMSG;
6660 + }
6661 + cryptlen -= pad_size;
6662 + scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
6663 +
6664 + /* Padding content must be equal with pad_size. We verify it all */
6665 + for (i = 0; i < pad_size; i++)
6666 + if (padding[i] != pad_size)
6667 + paderr = -EBADMSG;
6668 +
6669 + /*
6670 + * Step 3 - Verify hash
6671 + * Align the digest result as required by the hash transform. Enough
6672 + * space was allocated in crypto_tls_init_tfm
6673 + */
6674 + hash = (u8 *)ALIGN((unsigned long)hash +
6675 + crypto_ahash_alignmask(ctx->auth),
6676 + crypto_ahash_alignmask(ctx->auth) + 1);
6677 + /*
6678 + * Two bytes at the end of the associated data make the length field.
6679 + * It must be updated with the length of the cleartext message before
6680 + * the hash is calculated.
6681 + */
6682 + len_field = sg_virt(req->src) + req->assoclen - 2;
6683 + cryptlen -= hash_size;
6684 + *len_field = htons(cryptlen);
6685 +
6686 + /* This is the hash from the decrypted packet. Save it for later */
6687 + ihash = hash + hash_size;
6688 + scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
6689 +
6690 + /* Now compute and compare our ICV with the one from the packet */
6691 + err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
6692 + if (!err)
6693 + err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
6694 +
6695 + if (req->src != req->dst) {
6696 + err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
6697 + req->assoclen);
6698 + if (err)
6699 + goto out;
6700 + }
6701 +
6702 + /* return the first found error */
6703 + if (paderr)
6704 + err = paderr;
6705 +
6706 +out:
6707 + aead_request_complete(req, err);
6708 + return err;
6709 +}
6710 +
6711 +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
6712 +{
6713 + struct aead_instance *inst = aead_alg_instance(tfm);
6714 + struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
6715 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6716 + struct crypto_ahash *auth;
6717 + struct crypto_skcipher *enc;
6718 + struct crypto_skcipher *null;
6719 + int err;
6720 +
6721 + auth = crypto_spawn_ahash(&ictx->auth);
6722 + if (IS_ERR(auth))
6723 + return PTR_ERR(auth);
6724 +
6725 + enc = crypto_spawn_skcipher(&ictx->enc);
6726 + err = PTR_ERR(enc);
6727 + if (IS_ERR(enc))
6728 + goto err_free_ahash;
6729 +
6730 + null = crypto_get_default_null_skcipher2();
6731 + err = PTR_ERR(null);
6732 + if (IS_ERR(null))
6733 + goto err_free_skcipher;
6734 +
6735 + ctx->auth = auth;
6736 + ctx->enc = enc;
6737 + ctx->null = null;
6738 +
6739 + /*
6740 + * Allow enough space for two digests. The two digests will be compared
6741 + * during the decryption phase. One will come from the decrypted packet
6742 + * and the other will be calculated. For encryption, one digest is
6743 + * padded (up to a cipher blocksize) and chained with the payload
6744 + */
6745 + ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
6746 + crypto_ahash_alignmask(auth),
6747 + crypto_ahash_alignmask(auth) + 1) +
6748 + max(crypto_ahash_digestsize(auth),
6749 + crypto_skcipher_blocksize(enc));
6750 +
6751 + crypto_aead_set_reqsize(tfm,
6752 + sizeof(struct tls_request_ctx) +
6753 + ctx->reqoff +
6754 + max_t(unsigned int,
6755 + crypto_ahash_reqsize(auth) +
6756 + sizeof(struct ahash_request),
6757 + crypto_skcipher_reqsize(enc) +
6758 + sizeof(struct skcipher_request)));
6759 +
6760 + return 0;
6761 +
6762 +err_free_skcipher:
6763 + crypto_free_skcipher(enc);
6764 +err_free_ahash:
6765 + crypto_free_ahash(auth);
6766 + return err;
6767 +}
6768 +
6769 +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
6770 +{
6771 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6772 +
6773 + crypto_free_ahash(ctx->auth);
6774 + crypto_free_skcipher(ctx->enc);
6775 + crypto_put_default_null_skcipher2();
6776 +}
6777 +
6778 +static void crypto_tls_free(struct aead_instance *inst)
6779 +{
6780 + struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
6781 +
6782 + crypto_drop_skcipher(&ctx->enc);
6783 + crypto_drop_ahash(&ctx->auth);
6784 + kfree(inst);
6785 +}
6786 +
6787 +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
6788 +{
6789 + struct crypto_attr_type *algt;
6790 + struct aead_instance *inst;
6791 + struct hash_alg_common *auth;
6792 + struct crypto_alg *auth_base;
6793 + struct skcipher_alg *enc;
6794 + struct tls_instance_ctx *ctx;
6795 + const char *enc_name;
6796 + int err;
6797 +
6798 + algt = crypto_get_attr_type(tb);
6799 + if (IS_ERR(algt))
6800 + return PTR_ERR(algt);
6801 +
6802 + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
6803 + return -EINVAL;
6804 +
6805 + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
6806 + CRYPTO_ALG_TYPE_AHASH_MASK |
6807 + crypto_requires_sync(algt->type, algt->mask));
6808 + if (IS_ERR(auth))
6809 + return PTR_ERR(auth);
6810 +
6811 + auth_base = &auth->base;
6812 +
6813 + enc_name = crypto_attr_alg_name(tb[2]);
6814 + err = PTR_ERR(enc_name);
6815 + if (IS_ERR(enc_name))
6816 + goto out_put_auth;
6817 +
6818 + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
6819 + err = -ENOMEM;
6820 + if (!inst)
6821 + goto out_put_auth;
6822 +
6823 + ctx = aead_instance_ctx(inst);
6824 +
6825 + err = crypto_init_ahash_spawn(&ctx->auth, auth,
6826 + aead_crypto_instance(inst));
6827 + if (err)
6828 + goto err_free_inst;
6829 +
6830 + crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
6831 + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
6832 + crypto_requires_sync(algt->type,
6833 + algt->mask));
6834 + if (err)
6835 + goto err_drop_auth;
6836 +
6837 + enc = crypto_spawn_skcipher_alg(&ctx->enc);
6838 +
6839 + err = -ENAMETOOLONG;
6840 + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
6841 + "tls10(%s,%s)", auth_base->cra_name,
6842 + enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
6843 + goto err_drop_enc;
6844 +
6845 + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
6846 + "tls10(%s,%s)", auth_base->cra_driver_name,
6847 + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
6848 + goto err_drop_enc;
6849 +
6850 + inst->alg.base.cra_flags = (auth_base->cra_flags |
6851 + enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
6852 + inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
6853 + auth_base->cra_priority;
6854 + inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
6855 + inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
6856 + enc->base.cra_alignmask;
6857 + inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
6858 +
6859 + inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
6860 + inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
6861 + inst->alg.maxauthsize = auth->digestsize;
6862 +
6863 + inst->alg.init = crypto_tls_init_tfm;
6864 + inst->alg.exit = crypto_tls_exit_tfm;
6865 +
6866 + inst->alg.setkey = crypto_tls_setkey;
6867 + inst->alg.encrypt = crypto_tls_encrypt;
6868 + inst->alg.decrypt = crypto_tls_decrypt;
6869 +
6870 + inst->free = crypto_tls_free;
6871 +
6872 + err = aead_register_instance(tmpl, inst);
6873 + if (err)
6874 + goto err_drop_enc;
6875 +
6876 +out:
6877 + crypto_mod_put(auth_base);
6878 + return err;
6879 +
6880 +err_drop_enc:
6881 + crypto_drop_skcipher(&ctx->enc);
6882 +err_drop_auth:
6883 + crypto_drop_ahash(&ctx->auth);
6884 +err_free_inst:
6885 + kfree(inst);
6886 +out_put_auth:
6887 + goto out;
6888 +}
6889 +
6890 +static struct crypto_template crypto_tls_tmpl = {
6891 + .name = "tls10",
6892 + .create = crypto_tls_create,
6893 + .module = THIS_MODULE,
6894 +};
6895 +
6896 +static int __init crypto_tls_module_init(void)
6897 +{
6898 + return crypto_register_template(&crypto_tls_tmpl);
6899 +}
6900 +
6901 +static void __exit crypto_tls_module_exit(void)
6902 +{
6903 + crypto_unregister_template(&crypto_tls_tmpl);
6904 +}
6905 +
6906 +module_init(crypto_tls_module_init);
6907 +module_exit(crypto_tls_module_exit);
6908 +
6909 +MODULE_LICENSE("GPL");
6910 +MODULE_DESCRIPTION("TLS 1.0 record encryption");
6911 diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
6912 index 64bf3024..3831a6f7 100644
6913 --- a/drivers/crypto/caam/Kconfig
6914 +++ b/drivers/crypto/caam/Kconfig
6915 @@ -1,6 +1,11 @@
6916 +config CRYPTO_DEV_FSL_CAAM_COMMON
6917 + tristate
6918 +
6919 config CRYPTO_DEV_FSL_CAAM
6920 - tristate "Freescale CAAM-Multicore driver backend"
6921 + tristate "Freescale CAAM-Multicore platform driver backend"
6922 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
6923 + select CRYPTO_DEV_FSL_CAAM_COMMON
6924 + select SOC_BUS
6925 help
6926 Enables the driver module for Freescale's Cryptographic Accelerator
6927 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
6928 @@ -11,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM
6929 To compile this driver as a module, choose M here: the module
6930 will be called caam.
6931
6932 +if CRYPTO_DEV_FSL_CAAM
6933 +
6934 +config CRYPTO_DEV_FSL_CAAM_DEBUG
6935 + bool "Enable debug output in CAAM driver"
6936 + help
6937 + Selecting this will enable printing of various debug
6938 + information in the CAAM driver.
6939 +
6940 config CRYPTO_DEV_FSL_CAAM_JR
6941 tristate "Freescale CAAM Job Ring driver backend"
6942 - depends on CRYPTO_DEV_FSL_CAAM
6943 default y
6944 help
6945 Enables the driver module for Job Rings which are part of
6946 @@ -24,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
6947 To compile this driver as a module, choose M here: the module
6948 will be called caam_jr.
6949
6950 +if CRYPTO_DEV_FSL_CAAM_JR
6951 +
6952 config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6953 int "Job Ring size"
6954 - depends on CRYPTO_DEV_FSL_CAAM_JR
6955 range 2 9
6956 default "9"
6957 help
6958 @@ -44,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6959
6960 config CRYPTO_DEV_FSL_CAAM_INTC
6961 bool "Job Ring interrupt coalescing"
6962 - depends on CRYPTO_DEV_FSL_CAAM_JR
6963 help
6964 Enable the Job Ring's interrupt coalescing feature.
6965
6966 @@ -74,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
6967
6968 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6969 tristate "Register algorithm implementations with the Crypto API"
6970 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6971 default y
6972 select CRYPTO_AEAD
6973 select CRYPTO_AUTHENC
6974 @@ -87,9 +98,25 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6975 To compile this as a module, choose M here: the module
6976 will be called caamalg.
6977
6978 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
6979 + tristate "Queue Interface as Crypto API backend"
6980 + depends on FSL_SDK_DPA && NET
6981 + default y
6982 + select CRYPTO_AUTHENC
6983 + select CRYPTO_BLKCIPHER
6984 + help
6985 + Selecting this will use CAAM Queue Interface (QI) for sending
6986 + & receiving crypto jobs to/from CAAM. This gives better performance
6987 + than job ring interface when the number of cores are more than the
6988 + number of job rings assigned to the kernel. The number of portals
6989 + assigned to the kernel should also be more than the number of
6990 + job rings.
6991 +
6992 + To compile this as a module, choose M here: the module
6993 + will be called caamalg_qi.
6994 +
6995 config CRYPTO_DEV_FSL_CAAM_AHASH_API
6996 tristate "Register hash algorithm implementations with Crypto API"
6997 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6998 default y
6999 select CRYPTO_HASH
7000 help
7001 @@ -101,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
7002
7003 config CRYPTO_DEV_FSL_CAAM_PKC_API
7004 tristate "Register public key cryptography implementations with Crypto API"
7005 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
7006 default y
7007 select CRYPTO_RSA
7008 help
7009 @@ -113,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
7010
7011 config CRYPTO_DEV_FSL_CAAM_RNG_API
7012 tristate "Register caam device for hwrng API"
7013 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
7014 default y
7015 select CRYPTO_RNG
7016 select HW_RANDOM
7017 @@ -124,13 +149,26 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
7018 To compile this as a module, choose M here: the module
7019 will be called caamrng.
7020
7021 -config CRYPTO_DEV_FSL_CAAM_IMX
7022 - def_bool SOC_IMX6 || SOC_IMX7D
7023 - depends on CRYPTO_DEV_FSL_CAAM
7024 +endif # CRYPTO_DEV_FSL_CAAM_JR
7025
7026 -config CRYPTO_DEV_FSL_CAAM_DEBUG
7027 - bool "Enable debug output in CAAM driver"
7028 - depends on CRYPTO_DEV_FSL_CAAM
7029 - help
7030 - Selecting this will enable printing of various debug
7031 - information in the CAAM driver.
7032 +endif # CRYPTO_DEV_FSL_CAAM
7033 +
7034 +config CRYPTO_DEV_FSL_DPAA2_CAAM
7035 + tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
7036 + depends on FSL_MC_DPIO
7037 + select CRYPTO_DEV_FSL_CAAM_COMMON
7038 + select CRYPTO_BLKCIPHER
7039 + select CRYPTO_AUTHENC
7040 + select CRYPTO_AEAD
7041 + ---help---
7042 + CAAM driver for QorIQ Data Path Acceleration Architecture 2.
7043 + It handles DPSECI DPAA2 objects that sit on the Management Complex
7044 + (MC) fsl-mc bus.
7045 +
7046 + To compile this as a module, choose M here: the module
7047 + will be called dpaa2_caam.
7048 +
7049 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
7050 + def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
7051 + CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
7052 + CRYPTO_DEV_FSL_DPAA2_CAAM)
7053 diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
7054 index 08bf5515..01f73a25 100644
7055 --- a/drivers/crypto/caam/Makefile
7056 +++ b/drivers/crypto/caam/Makefile
7057 @@ -5,13 +5,26 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
7058 ccflags-y := -DDEBUG
7059 endif
7060
7061 +ccflags-y += -DVERSION=\"\"
7062 +
7063 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
7064 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
7065 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
7066 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
7067 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
7068 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
7069 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
7070 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
7071 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
7072
7073 caam-objs := ctrl.o
7074 -caam_jr-objs := jr.o key_gen.o error.o
7075 +caam_jr-objs := jr.o key_gen.o
7076 caam_pkc-y := caampkc.o pkc_desc.o
7077 +ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
7078 + ccflags-y += -DCONFIG_CAAM_QI
7079 + caam-objs += qi.o
7080 +endif
7081 +
7082 +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
7083 +
7084 +dpaa2_caam-y := caamalg_qi2.o dpseci.o
7085 diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
7086 index 0d743c63..abf2f52b 100644
7087 --- a/drivers/crypto/caam/caamalg.c
7088 +++ b/drivers/crypto/caam/caamalg.c
7089 @@ -2,6 +2,7 @@
7090 * caam - Freescale FSL CAAM support for crypto API
7091 *
7092 * Copyright 2008-2011 Freescale Semiconductor, Inc.
7093 + * Copyright 2016 NXP
7094 *
7095 * Based on talitos crypto API driver.
7096 *
7097 @@ -53,6 +54,7 @@
7098 #include "error.h"
7099 #include "sg_sw_sec4.h"
7100 #include "key_gen.h"
7101 +#include "caamalg_desc.h"
7102
7103 /*
7104 * crypto alg
7105 @@ -62,8 +64,6 @@
7106 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
7107 CTR_RFC3686_NONCE_SIZE + \
7108 SHA512_DIGEST_SIZE * 2)
7109 -/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
7110 -#define CAAM_MAX_IV_LENGTH 16
7111
7112 #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
7113 #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
7114 @@ -71,37 +71,6 @@
7115 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
7116 CAAM_CMD_SZ * 5)
7117
7118 -/* length of descriptors text */
7119 -#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
7120 -#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
7121 -#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
7122 -#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
7123 -
7124 -/* Note: Nonce is counted in enckeylen */
7125 -#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
7126 -
7127 -#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
7128 -#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
7129 -#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
7130 -
7131 -#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
7132 -#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
7133 -#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
7134 -
7135 -#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
7136 -#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7137 -#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7138 -
7139 -#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
7140 -#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
7141 -#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
7142 -
7143 -#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
7144 -#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
7145 - 20 * CAAM_CMD_SZ)
7146 -#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
7147 - 15 * CAAM_CMD_SZ)
7148 -
7149 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
7150 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
7151
7152 @@ -112,47 +81,11 @@
7153 #define debug(format, arg...)
7154 #endif
7155
7156 -#ifdef DEBUG
7157 -#include <linux/highmem.h>
7158 -
7159 -static void dbg_dump_sg(const char *level, const char *prefix_str,
7160 - int prefix_type, int rowsize, int groupsize,
7161 - struct scatterlist *sg, size_t tlen, bool ascii,
7162 - bool may_sleep)
7163 -{
7164 - struct scatterlist *it;
7165 - void *it_page;
7166 - size_t len;
7167 - void *buf;
7168 -
7169 - for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
7170 - /*
7171 - * make sure the scatterlist's page
7172 - * has a valid virtual memory mapping
7173 - */
7174 - it_page = kmap_atomic(sg_page(it));
7175 - if (unlikely(!it_page)) {
7176 - printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
7177 - return;
7178 - }
7179 -
7180 - buf = it_page + it->offset;
7181 - len = min_t(size_t, tlen, it->length);
7182 - print_hex_dump(level, prefix_str, prefix_type, rowsize,
7183 - groupsize, buf, len, ascii);
7184 - tlen -= len;
7185 -
7186 - kunmap_atomic(it_page);
7187 - }
7188 -}
7189 -#endif
7190 -
7191 static struct list_head alg_list;
7192
7193 struct caam_alg_entry {
7194 int class1_alg_type;
7195 int class2_alg_type;
7196 - int alg_op;
7197 bool rfc3686;
7198 bool geniv;
7199 };
7200 @@ -163,302 +96,67 @@ struct caam_aead_alg {
7201 bool registered;
7202 };
7203
7204 -/* Set DK bit in class 1 operation if shared */
7205 -static inline void append_dec_op1(u32 *desc, u32 type)
7206 -{
7207 - u32 *jump_cmd, *uncond_jump_cmd;
7208 -
7209 - /* DK bit is valid only for AES */
7210 - if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
7211 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7212 - OP_ALG_DECRYPT);
7213 - return;
7214 - }
7215 -
7216 - jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
7217 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7218 - OP_ALG_DECRYPT);
7219 - uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7220 - set_jump_tgt_here(desc, jump_cmd);
7221 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7222 - OP_ALG_DECRYPT | OP_ALG_AAI_DK);
7223 - set_jump_tgt_here(desc, uncond_jump_cmd);
7224 -}
7225 -
7226 -/*
7227 - * For aead functions, read payload and write payload,
7228 - * both of which are specified in req->src and req->dst
7229 - */
7230 -static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
7231 -{
7232 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7233 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
7234 - KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
7235 -}
7236 -
7237 -/*
7238 - * For ablkcipher encrypt and decrypt, read from req->src and
7239 - * write to req->dst
7240 - */
7241 -static inline void ablkcipher_append_src_dst(u32 *desc)
7242 -{
7243 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7244 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7245 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
7246 - KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7247 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7248 -}
7249 -
7250 /*
7251 * per-session context
7252 */
7253 struct caam_ctx {
7254 - struct device *jrdev;
7255 u32 sh_desc_enc[DESC_MAX_USED_LEN];
7256 u32 sh_desc_dec[DESC_MAX_USED_LEN];
7257 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
7258 + u8 key[CAAM_MAX_KEY_SIZE];
7259 dma_addr_t sh_desc_enc_dma;
7260 dma_addr_t sh_desc_dec_dma;
7261 dma_addr_t sh_desc_givenc_dma;
7262 - u32 class1_alg_type;
7263 - u32 class2_alg_type;
7264 - u32 alg_op;
7265 - u8 key[CAAM_MAX_KEY_SIZE];
7266 dma_addr_t key_dma;
7267 - unsigned int enckeylen;
7268 - unsigned int split_key_len;
7269 - unsigned int split_key_pad_len;
7270 + struct device *jrdev;
7271 + struct alginfo adata;
7272 + struct alginfo cdata;
7273 unsigned int authsize;
7274 };
7275
7276 -static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
7277 - int keys_fit_inline, bool is_rfc3686)
7278 -{
7279 - u32 *nonce;
7280 - unsigned int enckeylen = ctx->enckeylen;
7281 -
7282 - /*
7283 - * RFC3686 specific:
7284 - * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
7285 - * | enckeylen = encryption key size + nonce size
7286 - */
7287 - if (is_rfc3686)
7288 - enckeylen -= CTR_RFC3686_NONCE_SIZE;
7289 -
7290 - if (keys_fit_inline) {
7291 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7292 - ctx->split_key_len, CLASS_2 |
7293 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7294 - append_key_as_imm(desc, (void *)ctx->key +
7295 - ctx->split_key_pad_len, enckeylen,
7296 - enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7297 - } else {
7298 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7299 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7300 - append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
7301 - enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7302 - }
7303 -
7304 - /* Load Counter into CONTEXT1 reg */
7305 - if (is_rfc3686) {
7306 - nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
7307 - enckeylen);
7308 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
7309 - LDST_CLASS_IND_CCB |
7310 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
7311 - append_move(desc,
7312 - MOVE_SRC_OUTFIFO |
7313 - MOVE_DEST_CLASS1CTX |
7314 - (16 << MOVE_OFFSET_SHIFT) |
7315 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
7316 - }
7317 -}
7318 -
7319 -static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
7320 - int keys_fit_inline, bool is_rfc3686)
7321 -{
7322 - u32 *key_jump_cmd;
7323 -
7324 - /* Note: Context registers are saved. */
7325 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
7326 -
7327 - /* Skip if already shared */
7328 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7329 - JUMP_COND_SHRD);
7330 -
7331 - append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7332 -
7333 - set_jump_tgt_here(desc, key_jump_cmd);
7334 -}
7335 -
7336 static int aead_null_set_sh_desc(struct crypto_aead *aead)
7337 {
7338 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7339 struct device *jrdev = ctx->jrdev;
7340 - bool keys_fit_inline = false;
7341 - u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
7342 u32 *desc;
7343 + int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
7344 + ctx->adata.keylen_pad;
7345
7346 /*
7347 * Job Descriptor and Shared Descriptors
7348 * must all fit into the 64-word Descriptor h/w Buffer
7349 */
7350 - if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
7351 - ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7352 - keys_fit_inline = true;
7353 + if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
7354 + ctx->adata.key_inline = true;
7355 + ctx->adata.key_virt = ctx->key;
7356 + } else {
7357 + ctx->adata.key_inline = false;
7358 + ctx->adata.key_dma = ctx->key_dma;
7359 + }
7360
7361 /* aead_encrypt shared descriptor */
7362 desc = ctx->sh_desc_enc;
7363 -
7364 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7365 -
7366 - /* Skip if already shared */
7367 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7368 - JUMP_COND_SHRD);
7369 - if (keys_fit_inline)
7370 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7371 - ctx->split_key_len, CLASS_2 |
7372 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7373 - else
7374 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7375 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7376 - set_jump_tgt_here(desc, key_jump_cmd);
7377 -
7378 - /* assoclen + cryptlen = seqinlen */
7379 - append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
7380 -
7381 - /* Prepare to read and write cryptlen + assoclen bytes */
7382 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7383 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7384 -
7385 - /*
7386 - * MOVE_LEN opcode is not available in all SEC HW revisions,
7387 - * thus need to do some magic, i.e. self-patch the descriptor
7388 - * buffer.
7389 - */
7390 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7391 - MOVE_DEST_MATH3 |
7392 - (0x6 << MOVE_LEN_SHIFT));
7393 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
7394 - MOVE_DEST_DESCBUF |
7395 - MOVE_WAITCOMP |
7396 - (0x8 << MOVE_LEN_SHIFT));
7397 -
7398 - /* Class 2 operation */
7399 - append_operation(desc, ctx->class2_alg_type |
7400 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7401 -
7402 - /* Read and write cryptlen bytes */
7403 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7404 -
7405 - set_move_tgt_here(desc, read_move_cmd);
7406 - set_move_tgt_here(desc, write_move_cmd);
7407 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7408 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7409 - MOVE_AUX_LS);
7410 -
7411 - /* Write ICV */
7412 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7413 - LDST_SRCDST_BYTE_CONTEXT);
7414 -
7415 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7416 - desc_bytes(desc),
7417 - DMA_TO_DEVICE);
7418 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7419 - dev_err(jrdev, "unable to map shared descriptor\n");
7420 - return -ENOMEM;
7421 - }
7422 -#ifdef DEBUG
7423 - print_hex_dump(KERN_ERR,
7424 - "aead null enc shdesc@"__stringify(__LINE__)": ",
7425 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7426 - desc_bytes(desc), 1);
7427 -#endif
7428 + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
7429 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7430 + desc_bytes(desc), DMA_TO_DEVICE);
7431
7432 /*
7433 * Job Descriptor and Shared Descriptors
7434 * must all fit into the 64-word Descriptor h/w Buffer
7435 */
7436 - keys_fit_inline = false;
7437 - if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
7438 - ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7439 - keys_fit_inline = true;
7440 -
7441 - desc = ctx->sh_desc_dec;
7442 + if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
7443 + ctx->adata.key_inline = true;
7444 + ctx->adata.key_virt = ctx->key;
7445 + } else {
7446 + ctx->adata.key_inline = false;
7447 + ctx->adata.key_dma = ctx->key_dma;
7448 + }
7449
7450 /* aead_decrypt shared descriptor */
7451 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7452 -
7453 - /* Skip if already shared */
7454 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7455 - JUMP_COND_SHRD);
7456 - if (keys_fit_inline)
7457 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7458 - ctx->split_key_len, CLASS_2 |
7459 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7460 - else
7461 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7462 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7463 - set_jump_tgt_here(desc, key_jump_cmd);
7464 -
7465 - /* Class 2 operation */
7466 - append_operation(desc, ctx->class2_alg_type |
7467 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7468 -
7469 - /* assoclen + cryptlen = seqoutlen */
7470 - append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7471 -
7472 - /* Prepare to read and write cryptlen + assoclen bytes */
7473 - append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
7474 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
7475 -
7476 - /*
7477 - * MOVE_LEN opcode is not available in all SEC HW revisions,
7478 - * thus need to do some magic, i.e. self-patch the descriptor
7479 - * buffer.
7480 - */
7481 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7482 - MOVE_DEST_MATH2 |
7483 - (0x6 << MOVE_LEN_SHIFT));
7484 - write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
7485 - MOVE_DEST_DESCBUF |
7486 - MOVE_WAITCOMP |
7487 - (0x8 << MOVE_LEN_SHIFT));
7488 -
7489 - /* Read and write cryptlen bytes */
7490 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7491 -
7492 - /*
7493 - * Insert a NOP here, since we need at least 4 instructions between
7494 - * code patching the descriptor buffer and the location being patched.
7495 - */
7496 - jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7497 - set_jump_tgt_here(desc, jump_cmd);
7498 -
7499 - set_move_tgt_here(desc, read_move_cmd);
7500 - set_move_tgt_here(desc, write_move_cmd);
7501 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7502 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7503 - MOVE_AUX_LS);
7504 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7505 -
7506 - /* Load ICV */
7507 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7508 - FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7509 -
7510 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7511 - desc_bytes(desc),
7512 - DMA_TO_DEVICE);
7513 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7514 - dev_err(jrdev, "unable to map shared descriptor\n");
7515 - return -ENOMEM;
7516 - }
7517 -#ifdef DEBUG
7518 - print_hex_dump(KERN_ERR,
7519 - "aead null dec shdesc@"__stringify(__LINE__)": ",
7520 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7521 - desc_bytes(desc), 1);
7522 -#endif
7523 + desc = ctx->sh_desc_dec;
7524 + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
7525 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7526 + desc_bytes(desc), DMA_TO_DEVICE);
7527
7528 return 0;
7529 }
7530 @@ -470,11 +168,11 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
7531 unsigned int ivsize = crypto_aead_ivsize(aead);
7532 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7533 struct device *jrdev = ctx->jrdev;
7534 - bool keys_fit_inline;
7535 - u32 geniv, moveiv;
7536 u32 ctx1_iv_off = 0;
7537 - u32 *desc;
7538 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
7539 + u32 *desc, *nonce = NULL;
7540 + u32 inl_mask;
7541 + unsigned int data_len[2];
7542 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
7543 OP_ALG_AAI_CTR_MOD128);
7544 const bool is_rfc3686 = alg->caam.rfc3686;
7545
7546 @@ -482,7 +180,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
7547 return 0;
7548
7549 /* NULL encryption / decryption */
7550 - if (!ctx->enckeylen)
7551 + if (!ctx->cdata.keylen)
7552 return aead_null_set_sh_desc(aead);
7553
7554 /*
7555 @@ -497,8 +195,14 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
7556 * RFC3686 specific:
7557 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
7558 */
7559 - if (is_rfc3686)
7560 + if (is_rfc3686) {
7561 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
7562 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
7563 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
7564 + }
7565 +
7566 + data_len[0] = ctx->adata.keylen_pad;
7567 + data_len[1] = ctx->cdata.keylen;
7568
7569 if (alg->caam.geniv)
7570 goto skip_enc;
7571 @@ -507,146 +211,64 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
7572 * Job Descriptor and Shared Descriptors
7573 * must all fit into the 64-word Descriptor h/w Buffer
7574 */
7575 - keys_fit_inline = false;
7576 - if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7577 - ctx->split_key_pad_len + ctx->enckeylen +
7578 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7579 - CAAM_DESC_BYTES_MAX)
7580 - keys_fit_inline = true;
7581 -
7582 - /* aead_encrypt shared descriptor */
7583 - desc = ctx->sh_desc_enc;
7584 -
7585 - /* Note: Context registers are saved. */
7586 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7587 -
7588 - /* Class 2 operation */
7589 - append_operation(desc, ctx->class2_alg_type |
7590 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7591 + if (desc_inline_query(DESC_AEAD_ENC_LEN +
7592 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7593 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7594 + ARRAY_SIZE(data_len)) < 0)
7595 + return -EINVAL;
7596
7597 - /* Read and write assoclen bytes */
7598 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7599 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7600 + if (inl_mask & 1)
7601 + ctx->adata.key_virt = ctx->key;
7602 + else
7603 + ctx->adata.key_dma = ctx->key_dma;
7604
7605 - /* Skip assoc data */
7606 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7607 + if (inl_mask & 2)
7608 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7609 + else
7610 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7611
7612 - /* read assoc before reading payload */
7613 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7614 - FIFOLDST_VLF);
7615 + ctx->adata.key_inline = !!(inl_mask & 1);
7616 + ctx->cdata.key_inline = !!(inl_mask & 2);
7617
7618 - /* Load Counter into CONTEXT1 reg */
7619 - if (is_rfc3686)
7620 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7621 - LDST_SRCDST_BYTE_CONTEXT |
7622 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7623 - LDST_OFFSET_SHIFT));
7624 -
7625 - /* Class 1 operation */
7626 - append_operation(desc, ctx->class1_alg_type |
7627 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7628 -
7629 - /* Read and write cryptlen bytes */
7630 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7631 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7632 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
7633 -
7634 - /* Write ICV */
7635 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7636 - LDST_SRCDST_BYTE_CONTEXT);
7637 -
7638 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7639 - desc_bytes(desc),
7640 - DMA_TO_DEVICE);
7641 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7642 - dev_err(jrdev, "unable to map shared descriptor\n");
7643 - return -ENOMEM;
7644 - }
7645 -#ifdef DEBUG
7646 - print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
7647 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7648 - desc_bytes(desc), 1);
7649 -#endif
7650 + /* aead_encrypt shared descriptor */
7651 + desc = ctx->sh_desc_enc;
7652 + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
7653 + ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
7654 + false);
7655 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7656 + desc_bytes(desc), DMA_TO_DEVICE);
7657
7658 skip_enc:
7659 /*
7660 * Job Descriptor and Shared Descriptors
7661 * must all fit into the 64-word Descriptor h/w Buffer
7662 */
7663 - keys_fit_inline = false;
7664 - if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7665 - ctx->split_key_pad_len + ctx->enckeylen +
7666 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7667 - CAAM_DESC_BYTES_MAX)
7668 - keys_fit_inline = true;
7669 -
7670 - /* aead_decrypt shared descriptor */
7671 - desc = ctx->sh_desc_dec;
7672 -
7673 - /* Note: Context registers are saved. */
7674 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7675 -
7676 - /* Class 2 operation */
7677 - append_operation(desc, ctx->class2_alg_type |
7678 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7679 + if (desc_inline_query(DESC_AEAD_DEC_LEN +
7680 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7681 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7682 + ARRAY_SIZE(data_len)) < 0)
7683 + return -EINVAL;
7684
7685 - /* Read and write assoclen bytes */
7686 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7687 - if (alg->caam.geniv)
7688 - append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
7689 + if (inl_mask & 1)
7690 + ctx->adata.key_virt = ctx->key;
7691 else
7692 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7693 -
7694 - /* Skip assoc data */
7695 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7696 + ctx->adata.key_dma = ctx->key_dma;
7697
7698 - /* read assoc before reading payload */
7699 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7700 - KEY_VLF);
7701 -
7702 - if (alg->caam.geniv) {
7703 - append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
7704 - LDST_SRCDST_BYTE_CONTEXT |
7705 - (ctx1_iv_off << LDST_OFFSET_SHIFT));
7706 - append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
7707 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
7708 - }
7709 -
7710 - /* Load Counter into CONTEXT1 reg */
7711 - if (is_rfc3686)
7712 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7713 - LDST_SRCDST_BYTE_CONTEXT |
7714 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7715 - LDST_OFFSET_SHIFT));
7716 -
7717 - /* Choose operation */
7718 - if (ctr_mode)
7719 - append_operation(desc, ctx->class1_alg_type |
7720 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
7721 + if (inl_mask & 2)
7722 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7723 else
7724 - append_dec_op1(desc, ctx->class1_alg_type);
7725 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7726
7727 - /* Read and write cryptlen bytes */
7728 - append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7729 - append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7730 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
7731 + ctx->adata.key_inline = !!(inl_mask & 1);
7732 + ctx->cdata.key_inline = !!(inl_mask & 2);
7733
7734 - /* Load ICV */
7735 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7736 - FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7737 -
7738 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7739 - desc_bytes(desc),
7740 - DMA_TO_DEVICE);
7741 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7742 - dev_err(jrdev, "unable to map shared descriptor\n");
7743 - return -ENOMEM;
7744 - }
7745 -#ifdef DEBUG
7746 - print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
7747 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7748 - desc_bytes(desc), 1);
7749 -#endif
7750 + /* aead_decrypt shared descriptor */
7751 + desc = ctx->sh_desc_dec;
7752 + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
7753 + ctx->authsize, alg->caam.geniv, is_rfc3686,
7754 + nonce, ctx1_iv_off, false);
7755 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7756 + desc_bytes(desc), DMA_TO_DEVICE);
7757
7758 if (!alg->caam.geniv)
7759 goto skip_givenc;
7760 @@ -655,107 +277,32 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
7761 * Job Descriptor and Shared Descriptors
7762 * must all fit into the 64-word Descriptor h/w Buffer
7763 */
7764 - keys_fit_inline = false;
7765 - if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7766 - ctx->split_key_pad_len + ctx->enckeylen +
7767 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7768 - CAAM_DESC_BYTES_MAX)
7769 - keys_fit_inline = true;
7770 + if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
7771 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7772 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7773 + ARRAY_SIZE(data_len)) < 0)
7774 + return -EINVAL;
7775
7776 - /* aead_givencrypt shared descriptor */
7777 - desc = ctx->sh_desc_enc;
7778 + if (inl_mask & 1)
7779 + ctx->adata.key_virt = ctx->key;
7780 + else
7781 + ctx->adata.key_dma = ctx->key_dma;
7782
7783 - /* Note: Context registers are saved. */
7784 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7785 + if (inl_mask & 2)
7786 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7787 + else
7788 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7789
7790 - if (is_rfc3686)
7791 - goto copy_iv;
7792 -
7793 - /* Generate IV */
7794 - geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
7795 - NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
7796 - NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7797 - append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
7798 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7799 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7800 - append_move(desc, MOVE_WAITCOMP |
7801 - MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
7802 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7803 - (ivsize << MOVE_LEN_SHIFT));
7804 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7805 -
7806 -copy_iv:
7807 - /* Copy IV to class 1 context */
7808 - append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
7809 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7810 - (ivsize << MOVE_LEN_SHIFT));
7811 -
7812 - /* Return to encryption */
7813 - append_operation(desc, ctx->class2_alg_type |
7814 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7815 -
7816 - /* Read and write assoclen bytes */
7817 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7818 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7819 -
7820 - /* ivsize + cryptlen = seqoutlen - authsize */
7821 - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
7822 -
7823 - /* Skip assoc data */
7824 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7825 -
7826 - /* read assoc before reading payload */
7827 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7828 - KEY_VLF);
7829 -
7830 - /* Copy iv from outfifo to class 2 fifo */
7831 - moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
7832 - NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7833 - append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
7834 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7835 - append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
7836 - LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
7837 -
7838 - /* Load Counter into CONTEXT1 reg */
7839 - if (is_rfc3686)
7840 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7841 - LDST_SRCDST_BYTE_CONTEXT |
7842 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7843 - LDST_OFFSET_SHIFT));
7844 -
7845 - /* Class 1 operation */
7846 - append_operation(desc, ctx->class1_alg_type |
7847 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7848 -
7849 - /* Will write ivsize + cryptlen */
7850 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7851 -
7852 - /* Not need to reload iv */
7853 - append_seq_fifo_load(desc, ivsize,
7854 - FIFOLD_CLASS_SKIP);
7855 -
7856 - /* Will read cryptlen */
7857 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7858 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
7859 - FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
7860 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7861 -
7862 - /* Write ICV */
7863 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7864 - LDST_SRCDST_BYTE_CONTEXT);
7865 -
7866 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7867 - desc_bytes(desc),
7868 - DMA_TO_DEVICE);
7869 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7870 - dev_err(jrdev, "unable to map shared descriptor\n");
7871 - return -ENOMEM;
7872 - }
7873 -#ifdef DEBUG
7874 - print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
7875 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7876 - desc_bytes(desc), 1);
7877 -#endif
7878 + ctx->adata.key_inline = !!(inl_mask & 1);
7879 + ctx->cdata.key_inline = !!(inl_mask & 2);
7880 +
7881 + /* aead_givencrypt shared descriptor */
7882 + desc = ctx->sh_desc_enc;
7883 + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
7884 + ctx->authsize, is_rfc3686, nonce,
7885 + ctx1_iv_off, false);
7886 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7887 + desc_bytes(desc), DMA_TO_DEVICE);
7888
7889 skip_givenc:
7890 return 0;
7891 @@ -776,12 +323,12 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
7892 {
7893 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7894 struct device *jrdev = ctx->jrdev;
7895 - bool keys_fit_inline = false;
7896 - u32 *key_jump_cmd, *zero_payload_jump_cmd,
7897 - *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
7898 + unsigned int ivsize = crypto_aead_ivsize(aead);
7899 u32 *desc;
7900 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
7901 + ctx->cdata.keylen;
7902
7903 - if (!ctx->enckeylen || !ctx->authsize)
7904 + if (!ctx->cdata.keylen || !ctx->authsize)
7905 return 0;
7906
7907 /*
7908 @@ -789,175 +336,35 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
7909 * Job Descriptor and Shared Descriptor
7910 * must fit into the 64-word Descriptor h/w Buffer
7911 */
7912 - if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
7913 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
7914 - keys_fit_inline = true;
7915 + if (rem_bytes >= DESC_GCM_ENC_LEN) {
7916 + ctx->cdata.key_inline = true;
7917 + ctx->cdata.key_virt = ctx->key;
7918 + } else {
7919 + ctx->cdata.key_inline = false;
7920 + ctx->cdata.key_dma = ctx->key_dma;
7921 + }
7922
7923 desc = ctx->sh_desc_enc;
7924 -
7925 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7926 -
7927 - /* skip key loading if they are loaded due to sharing */
7928 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7929 - JUMP_COND_SHRD | JUMP_COND_SELF);
7930 - if (keys_fit_inline)
7931 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
7932 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7933 - else
7934 - append_key(desc, ctx->key_dma, ctx->enckeylen,
7935 - CLASS_1 | KEY_DEST_CLASS_REG);
7936 - set_jump_tgt_here(desc, key_jump_cmd);
7937 -
7938 - /* class 1 operation */
7939 - append_operation(desc, ctx->class1_alg_type |
7940 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7941 -
7942 - /* if assoclen + cryptlen is ZERO, skip to ICV write */
7943 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7944 - zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
7945 - JUMP_COND_MATH_Z);
7946 -
7947 - /* if assoclen is ZERO, skip reading the assoc data */
7948 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7949 - zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
7950 - JUMP_COND_MATH_Z);
7951 -
7952 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7953 -
7954 - /* skip assoc data */
7955 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7956 -
7957 - /* cryptlen = seqinlen - assoclen */
7958 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
7959 -
7960 - /* if cryptlen is ZERO jump to zero-payload commands */
7961 - zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
7962 - JUMP_COND_MATH_Z);
7963 -
7964 - /* read assoc data */
7965 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7966 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
7967 - set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
7968 -
7969 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7970 -
7971 - /* write encrypted data */
7972 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
7973 -
7974 - /* read payload data */
7975 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7976 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7977 -
7978 - /* jump the zero-payload commands */
7979 - append_jump(desc, JUMP_TEST_ALL | 2);
7980 -
7981 - /* zero-payload commands */
7982 - set_jump_tgt_here(desc, zero_payload_jump_cmd);
7983 -
7984 - /* read assoc data */
7985 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7986 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
7987 -
7988 - /* There is no input data */
7989 - set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
7990 -
7991 - /* write ICV */
7992 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
7993 - LDST_SRCDST_BYTE_CONTEXT);
7994 -
7995 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7996 - desc_bytes(desc),
7997 - DMA_TO_DEVICE);
7998 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7999 - dev_err(jrdev, "unable to map shared descriptor\n");
8000 - return -ENOMEM;
8001 - }
8002 -#ifdef DEBUG
8003 - print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
8004 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8005 - desc_bytes(desc), 1);
8006 -#endif
8007 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
8008 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8009 + desc_bytes(desc), DMA_TO_DEVICE);
8010
8011 /*
8012 * Job Descriptor and Shared Descriptors
8013 * must all fit into the 64-word Descriptor h/w Buffer
8014 */
8015 - keys_fit_inline = false;
8016 - if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
8017 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8018 - keys_fit_inline = true;
8019 + if (rem_bytes >= DESC_GCM_DEC_LEN) {
8020 + ctx->cdata.key_inline = true;
8021 + ctx->cdata.key_virt = ctx->key;
8022 + } else {
8023 + ctx->cdata.key_inline = false;
8024 + ctx->cdata.key_dma = ctx->key_dma;
8025 + }
8026
8027 desc = ctx->sh_desc_dec;
8028 -
8029 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8030 -
8031 - /* skip key loading if they are loaded due to sharing */
8032 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8033 - JUMP_TEST_ALL | JUMP_COND_SHRD |
8034 - JUMP_COND_SELF);
8035 - if (keys_fit_inline)
8036 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8037 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8038 - else
8039 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8040 - CLASS_1 | KEY_DEST_CLASS_REG);
8041 - set_jump_tgt_here(desc, key_jump_cmd);
8042 -
8043 - /* class 1 operation */
8044 - append_operation(desc, ctx->class1_alg_type |
8045 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8046 -
8047 - /* if assoclen is ZERO, skip reading the assoc data */
8048 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
8049 - zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
8050 - JUMP_COND_MATH_Z);
8051 -
8052 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8053 -
8054 - /* skip assoc data */
8055 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8056 -
8057 - /* read assoc data */
8058 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8059 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8060 -
8061 - set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
8062 -
8063 - /* cryptlen = seqoutlen - assoclen */
8064 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8065 -
8066 - /* jump to zero-payload command if cryptlen is zero */
8067 - zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
8068 - JUMP_COND_MATH_Z);
8069 -
8070 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8071 -
8072 - /* store encrypted data */
8073 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8074 -
8075 - /* read payload data */
8076 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8077 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8078 -
8079 - /* zero-payload command */
8080 - set_jump_tgt_here(desc, zero_payload_jump_cmd);
8081 -
8082 - /* read ICV */
8083 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8084 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8085 -
8086 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8087 - desc_bytes(desc),
8088 - DMA_TO_DEVICE);
8089 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8090 - dev_err(jrdev, "unable to map shared descriptor\n");
8091 - return -ENOMEM;
8092 - }
8093 -#ifdef DEBUG
8094 - print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
8095 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8096 - desc_bytes(desc), 1);
8097 -#endif
8098 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
8099 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8100 + desc_bytes(desc), DMA_TO_DEVICE);
8101
8102 return 0;
8103 }
8104 @@ -976,11 +383,12 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
8105 {
8106 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8107 struct device *jrdev = ctx->jrdev;
8108 - bool keys_fit_inline = false;
8109 - u32 *key_jump_cmd;
8110 + unsigned int ivsize = crypto_aead_ivsize(aead);
8111 u32 *desc;
8112 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8113 + ctx->cdata.keylen;
8114
8115 - if (!ctx->enckeylen || !ctx->authsize)
8116 + if (!ctx->cdata.keylen || !ctx->authsize)
8117 return 0;
8118
8119 /*
8120 @@ -988,148 +396,37 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
8121 * Job Descriptor and Shared Descriptor
8122 * must fit into the 64-word Descriptor h/w Buffer
8123 */
8124 - if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8125 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8126 - keys_fit_inline = true;
8127 + if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
8128 + ctx->cdata.key_inline = true;
8129 + ctx->cdata.key_virt = ctx->key;
8130 + } else {
8131 + ctx->cdata.key_inline = false;
8132 + ctx->cdata.key_dma = ctx->key_dma;
8133 + }
8134
8135 desc = ctx->sh_desc_enc;
8136 -
8137 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8138 -
8139 - /* Skip key loading if it is loaded due to sharing */
8140 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8141 - JUMP_COND_SHRD);
8142 - if (keys_fit_inline)
8143 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8144 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8145 - else
8146 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8147 - CLASS_1 | KEY_DEST_CLASS_REG);
8148 - set_jump_tgt_here(desc, key_jump_cmd);
8149 -
8150 - /* Class 1 operation */
8151 - append_operation(desc, ctx->class1_alg_type |
8152 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8153 -
8154 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8155 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8156 -
8157 - /* Read assoc data */
8158 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8159 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8160 -
8161 - /* Skip IV */
8162 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8163 -
8164 - /* Will read cryptlen bytes */
8165 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8166 -
8167 - /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8168 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8169 -
8170 - /* Skip assoc data */
8171 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8172 -
8173 - /* cryptlen = seqoutlen - assoclen */
8174 - append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
8175 -
8176 - /* Write encrypted data */
8177 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8178 -
8179 - /* Read payload data */
8180 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8181 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
8182 -
8183 - /* Write ICV */
8184 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8185 - LDST_SRCDST_BYTE_CONTEXT);
8186 -
8187 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8188 - desc_bytes(desc),
8189 - DMA_TO_DEVICE);
8190 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8191 - dev_err(jrdev, "unable to map shared descriptor\n");
8192 - return -ENOMEM;
8193 - }
8194 -#ifdef DEBUG
8195 - print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
8196 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8197 - desc_bytes(desc), 1);
8198 -#endif
8199 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8200 + false);
8201 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8202 + desc_bytes(desc), DMA_TO_DEVICE);
8203
8204 /*
8205 * Job Descriptor and Shared Descriptors
8206 * must all fit into the 64-word Descriptor h/w Buffer
8207 */
8208 - keys_fit_inline = false;
8209 - if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
8210 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8211 - keys_fit_inline = true;
8212 + if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
8213 + ctx->cdata.key_inline = true;
8214 + ctx->cdata.key_virt = ctx->key;
8215 + } else {
8216 + ctx->cdata.key_inline = false;
8217 + ctx->cdata.key_dma = ctx->key_dma;
8218 + }
8219
8220 desc = ctx->sh_desc_dec;
8221 -
8222 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8223 -
8224 - /* Skip key loading if it is loaded due to sharing */
8225 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8226 - JUMP_TEST_ALL | JUMP_COND_SHRD);
8227 - if (keys_fit_inline)
8228 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8229 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8230 - else
8231 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8232 - CLASS_1 | KEY_DEST_CLASS_REG);
8233 - set_jump_tgt_here(desc, key_jump_cmd);
8234 -
8235 - /* Class 1 operation */
8236 - append_operation(desc, ctx->class1_alg_type |
8237 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8238 -
8239 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8240 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8241 -
8242 - /* Read assoc data */
8243 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8244 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8245 -
8246 - /* Skip IV */
8247 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8248 -
8249 - /* Will read cryptlen bytes */
8250 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
8251 -
8252 - /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8253 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8254 -
8255 - /* Skip assoc data */
8256 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8257 -
8258 - /* Will write cryptlen bytes */
8259 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8260 -
8261 - /* Store payload data */
8262 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8263 -
8264 - /* Read encrypted data */
8265 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8266 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8267 -
8268 - /* Read ICV */
8269 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8270 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8271 -
8272 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8273 - desc_bytes(desc),
8274 - DMA_TO_DEVICE);
8275 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8276 - dev_err(jrdev, "unable to map shared descriptor\n");
8277 - return -ENOMEM;
8278 - }
8279 -#ifdef DEBUG
8280 - print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
8281 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8282 - desc_bytes(desc), 1);
8283 -#endif
8284 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8285 + false);
8286 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8287 + desc_bytes(desc), DMA_TO_DEVICE);
8288
8289 return 0;
8290 }
8291 @@ -1149,12 +446,12 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
8292 {
8293 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8294 struct device *jrdev = ctx->jrdev;
8295 - bool keys_fit_inline = false;
8296 - u32 *key_jump_cmd;
8297 - u32 *read_move_cmd, *write_move_cmd;
8298 + unsigned int ivsize = crypto_aead_ivsize(aead);
8299 u32 *desc;
8300 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8301 + ctx->cdata.keylen;
8302
8303 - if (!ctx->enckeylen || !ctx->authsize)
8304 + if (!ctx->cdata.keylen || !ctx->authsize)
8305 return 0;
8306
8307 /*
8308 @@ -1162,151 +459,37 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
8309 * Job Descriptor and Shared Descriptor
8310 * must fit into the 64-word Descriptor h/w Buffer
8311 */
8312 - if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8313 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8314 - keys_fit_inline = true;
8315 -
8316 - desc = ctx->sh_desc_enc;
8317 -
8318 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8319 -
8320 - /* Skip key loading if it is loaded due to sharing */
8321 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8322 - JUMP_COND_SHRD);
8323 - if (keys_fit_inline)
8324 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8325 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8326 - else
8327 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8328 - CLASS_1 | KEY_DEST_CLASS_REG);
8329 - set_jump_tgt_here(desc, key_jump_cmd);
8330 -
8331 - /* Class 1 operation */
8332 - append_operation(desc, ctx->class1_alg_type |
8333 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8334 -
8335 - /* assoclen + cryptlen = seqinlen */
8336 - append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
8337 -
8338 - /*
8339 - * MOVE_LEN opcode is not available in all SEC HW revisions,
8340 - * thus need to do some magic, i.e. self-patch the descriptor
8341 - * buffer.
8342 - */
8343 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8344 - (0x6 << MOVE_LEN_SHIFT));
8345 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8346 - (0x8 << MOVE_LEN_SHIFT));
8347 -
8348 - /* Will read assoclen + cryptlen bytes */
8349 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8350 -
8351 - /* Will write assoclen + cryptlen bytes */
8352 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8353 -
8354 - /* Read and write assoclen + cryptlen bytes */
8355 - aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
8356 -
8357 - set_move_tgt_here(desc, read_move_cmd);
8358 - set_move_tgt_here(desc, write_move_cmd);
8359 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8360 - /* Move payload data to OFIFO */
8361 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8362 -
8363 - /* Write ICV */
8364 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8365 - LDST_SRCDST_BYTE_CONTEXT);
8366 -
8367 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8368 - desc_bytes(desc),
8369 - DMA_TO_DEVICE);
8370 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8371 - dev_err(jrdev, "unable to map shared descriptor\n");
8372 - return -ENOMEM;
8373 + if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
8374 + ctx->cdata.key_inline = true;
8375 + ctx->cdata.key_virt = ctx->key;
8376 + } else {
8377 + ctx->cdata.key_inline = false;
8378 + ctx->cdata.key_dma = ctx->key_dma;
8379 }
8380 -#ifdef DEBUG
8381 - print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
8382 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8383 - desc_bytes(desc), 1);
8384 -#endif
8385 +
8386 + desc = ctx->sh_desc_enc;
8387 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8388 + false);
8389 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8390 + desc_bytes(desc), DMA_TO_DEVICE);
8391
8392 /*
8393 * Job Descriptor and Shared Descriptors
8394 * must all fit into the 64-word Descriptor h/w Buffer
8395 */
8396 - keys_fit_inline = false;
8397 - if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
8398 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8399 - keys_fit_inline = true;
8400 + if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
8401 + ctx->cdata.key_inline = true;
8402 + ctx->cdata.key_virt = ctx->key;
8403 + } else {
8404 + ctx->cdata.key_inline = false;
8405 + ctx->cdata.key_dma = ctx->key_dma;
8406 + }
8407
8408 desc = ctx->sh_desc_dec;
8409 -
8410 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8411 -
8412 - /* Skip key loading if it is loaded due to sharing */
8413 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8414 - JUMP_TEST_ALL | JUMP_COND_SHRD);
8415 - if (keys_fit_inline)
8416 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8417 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8418 - else
8419 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8420 - CLASS_1 | KEY_DEST_CLASS_REG);
8421 - set_jump_tgt_here(desc, key_jump_cmd);
8422 -
8423 - /* Class 1 operation */
8424 - append_operation(desc, ctx->class1_alg_type |
8425 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8426 -
8427 - /* assoclen + cryptlen = seqoutlen */
8428 - append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8429 -
8430 - /*
8431 - * MOVE_LEN opcode is not available in all SEC HW revisions,
8432 - * thus need to do some magic, i.e. self-patch the descriptor
8433 - * buffer.
8434 - */
8435 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8436 - (0x6 << MOVE_LEN_SHIFT));
8437 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8438 - (0x8 << MOVE_LEN_SHIFT));
8439 -
8440 - /* Will read assoclen + cryptlen bytes */
8441 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8442 -
8443 - /* Will write assoclen + cryptlen bytes */
8444 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8445 -
8446 - /* Store payload data */
8447 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8448 -
8449 - /* In-snoop assoclen + cryptlen data */
8450 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
8451 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
8452 -
8453 - set_move_tgt_here(desc, read_move_cmd);
8454 - set_move_tgt_here(desc, write_move_cmd);
8455 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8456 - /* Move payload data to OFIFO */
8457 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8458 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8459 -
8460 - /* Read ICV */
8461 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8462 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8463 -
8464 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8465 - desc_bytes(desc),
8466 - DMA_TO_DEVICE);
8467 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8468 - dev_err(jrdev, "unable to map shared descriptor\n");
8469 - return -ENOMEM;
8470 - }
8471 -#ifdef DEBUG
8472 - print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
8473 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8474 - desc_bytes(desc), 1);
8475 -#endif
8476 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8477 + false);
8478 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8479 + desc_bytes(desc), DMA_TO_DEVICE);
8480
8481 return 0;
8482 }
8483 @@ -1322,19 +505,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
8484 return 0;
8485 }
8486
8487 -static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
8488 - u32 authkeylen)
8489 -{
8490 - return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
8491 - ctx->split_key_pad_len, key_in, authkeylen,
8492 - ctx->alg_op);
8493 -}
8494 -
8495 static int aead_setkey(struct crypto_aead *aead,
8496 const u8 *key, unsigned int keylen)
8497 {
8498 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
8499 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
8500 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8501 struct device *jrdev = ctx->jrdev;
8502 struct crypto_authenc_keys keys;
8503 @@ -1343,53 +516,32 @@ static int aead_setkey(struct crypto_aead *aead,
8504 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
8505 goto badkey;
8506
8507 - /* Pick class 2 key length from algorithm submask */
8508 - ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
8509 - OP_ALG_ALGSEL_SHIFT] * 2;
8510 - ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
8511 -
8512 - if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
8513 - goto badkey;
8514 -
8515 #ifdef DEBUG
8516 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
8517 keys.authkeylen + keys.enckeylen, keys.enckeylen,
8518 keys.authkeylen);
8519 - printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
8520 - ctx->split_key_len, ctx->split_key_pad_len);
8521 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8522 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8523 #endif
8524
8525 - ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
8526 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
8527 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
8528 + keys.enckeylen);
8529 if (ret) {
8530 goto badkey;
8531 }
8532
8533 /* postpend encryption key to auth split key */
8534 - memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
8535 -
8536 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
8537 - keys.enckeylen, DMA_TO_DEVICE);
8538 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8539 - dev_err(jrdev, "unable to map key i/o memory\n");
8540 - return -ENOMEM;
8541 - }
8542 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
8543 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
8544 + keys.enckeylen, DMA_TO_DEVICE);
8545 #ifdef DEBUG
8546 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
8547 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
8548 - ctx->split_key_pad_len + keys.enckeylen, 1);
8549 + ctx->adata.keylen_pad + keys.enckeylen, 1);
8550 #endif
8551 -
8552 - ctx->enckeylen = keys.enckeylen;
8553 -
8554 - ret = aead_set_sh_desc(aead);
8555 - if (ret) {
8556 - dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
8557 - keys.enckeylen, DMA_TO_DEVICE);
8558 - }
8559 -
8560 - return ret;
8561 + ctx->cdata.keylen = keys.enckeylen;
8562 + return aead_set_sh_desc(aead);
8563 badkey:
8564 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
8565 return -EINVAL;
8566 @@ -1400,7 +552,6 @@ static int gcm_setkey(struct crypto_aead *aead,
8567 {
8568 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8569 struct device *jrdev = ctx->jrdev;
8570 - int ret = 0;
8571
8572 #ifdef DEBUG
8573 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8574 @@ -1408,21 +559,10 @@ static int gcm_setkey(struct crypto_aead *aead,
8575 #endif
8576
8577 memcpy(ctx->key, key, keylen);
8578 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8579 - DMA_TO_DEVICE);
8580 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8581 - dev_err(jrdev, "unable to map key i/o memory\n");
8582 - return -ENOMEM;
8583 - }
8584 - ctx->enckeylen = keylen;
8585 -
8586 - ret = gcm_set_sh_desc(aead);
8587 - if (ret) {
8588 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8589 - DMA_TO_DEVICE);
8590 - }
8591 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8592 + ctx->cdata.keylen = keylen;
8593
8594 - return ret;
8595 + return gcm_set_sh_desc(aead);
8596 }
8597
8598 static int rfc4106_setkey(struct crypto_aead *aead,
8599 @@ -1430,7 +570,6 @@ static int rfc4106_setkey(struct crypto_aead *aead,
8600 {
8601 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8602 struct device *jrdev = ctx->jrdev;
8603 - int ret = 0;
8604
8605 if (keylen < 4)
8606 return -EINVAL;
8607 @@ -1446,22 +585,10 @@ static int rfc4106_setkey(struct crypto_aead *aead,
8608 * The last four bytes of the key material are used as the salt value
8609 * in the nonce. Update the AES key length.
8610 */
8611 - ctx->enckeylen = keylen - 4;
8612 -
8613 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8614 - DMA_TO_DEVICE);
8615 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8616 - dev_err(jrdev, "unable to map key i/o memory\n");
8617 - return -ENOMEM;
8618 - }
8619 -
8620 - ret = rfc4106_set_sh_desc(aead);
8621 - if (ret) {
8622 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8623 - DMA_TO_DEVICE);
8624 - }
8625 -
8626 - return ret;
8627 + ctx->cdata.keylen = keylen - 4;
8628 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8629 + DMA_TO_DEVICE);
8630 + return rfc4106_set_sh_desc(aead);
8631 }
8632
8633 static int rfc4543_setkey(struct crypto_aead *aead,
8634 @@ -1469,7 +596,6 @@ static int rfc4543_setkey(struct crypto_aead *aead,
8635 {
8636 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8637 struct device *jrdev = ctx->jrdev;
8638 - int ret = 0;
8639
8640 if (keylen < 4)
8641 return -EINVAL;
8642 @@ -1485,43 +611,28 @@ static int rfc4543_setkey(struct crypto_aead *aead,
8643 * The last four bytes of the key material are used as the salt value
8644 * in the nonce. Update the AES key length.
8645 */
8646 - ctx->enckeylen = keylen - 4;
8647 -
8648 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8649 - DMA_TO_DEVICE);
8650 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8651 - dev_err(jrdev, "unable to map key i/o memory\n");
8652 - return -ENOMEM;
8653 - }
8654 -
8655 - ret = rfc4543_set_sh_desc(aead);
8656 - if (ret) {
8657 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8658 - DMA_TO_DEVICE);
8659 - }
8660 -
8661 - return ret;
8662 + ctx->cdata.keylen = keylen - 4;
8663 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8664 + DMA_TO_DEVICE);
8665 + return rfc4543_set_sh_desc(aead);
8666 }
8667
8668 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8669 const u8 *key, unsigned int keylen)
8670 {
8671 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8672 - struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
8673 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
8674 const char *alg_name = crypto_tfm_alg_name(tfm);
8675 struct device *jrdev = ctx->jrdev;
8676 - int ret = 0;
8677 - u32 *key_jump_cmd;
8678 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
8679 u32 *desc;
8680 - u8 *nonce;
8681 - u32 geniv;
8682 u32 ctx1_iv_off = 0;
8683 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
8684 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
8685 OP_ALG_AAI_CTR_MOD128);
8686 const bool is_rfc3686 = (ctr_mode &&
8687 (strstr(alg_name, "rfc3686") != NULL));
8688
8689 + memcpy(ctx->key, key, keylen);
8690 #ifdef DEBUG
8691 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8692 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8693 @@ -1544,215 +655,33 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8694 keylen -= CTR_RFC3686_NONCE_SIZE;
8695 }
8696
8697 - memcpy(ctx->key, key, keylen);
8698 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8699 - DMA_TO_DEVICE);
8700 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8701 - dev_err(jrdev, "unable to map key i/o memory\n");
8702 - return -ENOMEM;
8703 - }
8704 - ctx->enckeylen = keylen;
8705 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8706 + ctx->cdata.keylen = keylen;
8707 + ctx->cdata.key_virt = ctx->key;
8708 + ctx->cdata.key_inline = true;
8709
8710 /* ablkcipher_encrypt shared descriptor */
8711 desc = ctx->sh_desc_enc;
8712 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8713 - /* Skip if already shared */
8714 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8715 - JUMP_COND_SHRD);
8716 -
8717 - /* Load class1 key only */
8718 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8719 - ctx->enckeylen, CLASS_1 |
8720 - KEY_DEST_CLASS_REG);
8721 -
8722 - /* Load nonce into CONTEXT1 reg */
8723 - if (is_rfc3686) {
8724 - nonce = (u8 *)key + keylen;
8725 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8726 - LDST_CLASS_IND_CCB |
8727 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8728 - append_move(desc, MOVE_WAITCOMP |
8729 - MOVE_SRC_OUTFIFO |
8730 - MOVE_DEST_CLASS1CTX |
8731 - (16 << MOVE_OFFSET_SHIFT) |
8732 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8733 - }
8734 -
8735 - set_jump_tgt_here(desc, key_jump_cmd);
8736 + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
8737 + ctx1_iv_off);
8738 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8739 + desc_bytes(desc), DMA_TO_DEVICE);
8740
8741 - /* Load iv */
8742 - append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8743 - LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8744 -
8745 - /* Load counter into CONTEXT1 reg */
8746 - if (is_rfc3686)
8747 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8748 - LDST_SRCDST_BYTE_CONTEXT |
8749 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8750 - LDST_OFFSET_SHIFT));
8751 -
8752 - /* Load operation */
8753 - append_operation(desc, ctx->class1_alg_type |
8754 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8755 -
8756 - /* Perform operation */
8757 - ablkcipher_append_src_dst(desc);
8758 -
8759 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8760 - desc_bytes(desc),
8761 - DMA_TO_DEVICE);
8762 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8763 - dev_err(jrdev, "unable to map shared descriptor\n");
8764 - return -ENOMEM;
8765 - }
8766 -#ifdef DEBUG
8767 - print_hex_dump(KERN_ERR,
8768 - "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
8769 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8770 - desc_bytes(desc), 1);
8771 -#endif
8772 /* ablkcipher_decrypt shared descriptor */
8773 desc = ctx->sh_desc_dec;
8774 + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
8775 + ctx1_iv_off);
8776 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8777 + desc_bytes(desc), DMA_TO_DEVICE);
8778
8779 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8780 - /* Skip if already shared */
8781 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8782 - JUMP_COND_SHRD);
8783 -
8784 - /* Load class1 key only */
8785 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8786 - ctx->enckeylen, CLASS_1 |
8787 - KEY_DEST_CLASS_REG);
8788 -
8789 - /* Load nonce into CONTEXT1 reg */
8790 - if (is_rfc3686) {
8791 - nonce = (u8 *)key + keylen;
8792 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8793 - LDST_CLASS_IND_CCB |
8794 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8795 - append_move(desc, MOVE_WAITCOMP |
8796 - MOVE_SRC_OUTFIFO |
8797 - MOVE_DEST_CLASS1CTX |
8798 - (16 << MOVE_OFFSET_SHIFT) |
8799 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8800 - }
8801 -
8802 - set_jump_tgt_here(desc, key_jump_cmd);
8803 -
8804 - /* load IV */
8805 - append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8806 - LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8807 -
8808 - /* Load counter into CONTEXT1 reg */
8809 - if (is_rfc3686)
8810 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8811 - LDST_SRCDST_BYTE_CONTEXT |
8812 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8813 - LDST_OFFSET_SHIFT));
8814 -
8815 - /* Choose operation */
8816 - if (ctr_mode)
8817 - append_operation(desc, ctx->class1_alg_type |
8818 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
8819 - else
8820 - append_dec_op1(desc, ctx->class1_alg_type);
8821 -
8822 - /* Perform operation */
8823 - ablkcipher_append_src_dst(desc);
8824 -
8825 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8826 - desc_bytes(desc),
8827 - DMA_TO_DEVICE);
8828 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8829 - dev_err(jrdev, "unable to map shared descriptor\n");
8830 - return -ENOMEM;
8831 - }
8832 -
8833 -#ifdef DEBUG
8834 - print_hex_dump(KERN_ERR,
8835 - "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
8836 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8837 - desc_bytes(desc), 1);
8838 -#endif
8839 /* ablkcipher_givencrypt shared descriptor */
8840 desc = ctx->sh_desc_givenc;
8841 + cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
8842 + ctx1_iv_off);
8843 + dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
8844 + desc_bytes(desc), DMA_TO_DEVICE);
8845
8846 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8847 - /* Skip if already shared */
8848 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8849 - JUMP_COND_SHRD);
8850 -
8851 - /* Load class1 key only */
8852 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8853 - ctx->enckeylen, CLASS_1 |
8854 - KEY_DEST_CLASS_REG);
8855 -
8856 - /* Load Nonce into CONTEXT1 reg */
8857 - if (is_rfc3686) {
8858 - nonce = (u8 *)key + keylen;
8859 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8860 - LDST_CLASS_IND_CCB |
8861 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8862 - append_move(desc, MOVE_WAITCOMP |
8863 - MOVE_SRC_OUTFIFO |
8864 - MOVE_DEST_CLASS1CTX |
8865 - (16 << MOVE_OFFSET_SHIFT) |
8866 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8867 - }
8868 - set_jump_tgt_here(desc, key_jump_cmd);
8869 -
8870 - /* Generate IV */
8871 - geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
8872 - NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
8873 - NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
8874 - append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
8875 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
8876 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8877 - append_move(desc, MOVE_WAITCOMP |
8878 - MOVE_SRC_INFIFO |
8879 - MOVE_DEST_CLASS1CTX |
8880 - (crt->ivsize << MOVE_LEN_SHIFT) |
8881 - (ctx1_iv_off << MOVE_OFFSET_SHIFT));
8882 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8883 -
8884 - /* Copy generated IV to memory */
8885 - append_seq_store(desc, crt->ivsize,
8886 - LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
8887 - (ctx1_iv_off << LDST_OFFSET_SHIFT));
8888 -
8889 - /* Load Counter into CONTEXT1 reg */
8890 - if (is_rfc3686)
8891 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8892 - LDST_SRCDST_BYTE_CONTEXT |
8893 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8894 - LDST_OFFSET_SHIFT));
8895 -
8896 - if (ctx1_iv_off)
8897 - append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
8898 - (1 << JUMP_OFFSET_SHIFT));
8899 -
8900 - /* Load operation */
8901 - append_operation(desc, ctx->class1_alg_type |
8902 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8903 -
8904 - /* Perform operation */
8905 - ablkcipher_append_src_dst(desc);
8906 -
8907 - ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
8908 - desc_bytes(desc),
8909 - DMA_TO_DEVICE);
8910 - if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
8911 - dev_err(jrdev, "unable to map shared descriptor\n");
8912 - return -ENOMEM;
8913 - }
8914 -#ifdef DEBUG
8915 - print_hex_dump(KERN_ERR,
8916 - "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
8917 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8918 - desc_bytes(desc), 1);
8919 -#endif
8920 -
8921 - return ret;
8922 + return 0;
8923 }
8924
8925 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8926 @@ -1760,8 +689,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8927 {
8928 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8929 struct device *jrdev = ctx->jrdev;
8930 - u32 *key_jump_cmd, *desc;
8931 - __be64 sector_size = cpu_to_be64(512);
8932 + u32 *desc;
8933
8934 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
8935 crypto_ablkcipher_set_flags(ablkcipher,
8936 @@ -1771,126 +699,38 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8937 }
8938
8939 memcpy(ctx->key, key, keylen);
8940 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
8941 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8942 - dev_err(jrdev, "unable to map key i/o memory\n");
8943 - return -ENOMEM;
8944 - }
8945 - ctx->enckeylen = keylen;
8946 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8947 + ctx->cdata.keylen = keylen;
8948 + ctx->cdata.key_virt = ctx->key;
8949 + ctx->cdata.key_inline = true;
8950
8951 /* xts_ablkcipher_encrypt shared descriptor */
8952 desc = ctx->sh_desc_enc;
8953 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8954 - /* Skip if already shared */
8955 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8956 - JUMP_COND_SHRD);
8957 -
8958 - /* Load class1 keys only */
8959 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8960 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8961 -
8962 - /* Load sector size with index 40 bytes (0x28) */
8963 - append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
8964 - LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
8965 - append_data(desc, (void *)&sector_size, 8);
8966 -
8967 - set_jump_tgt_here(desc, key_jump_cmd);
8968 -
8969 - /*
8970 - * create sequence for loading the sector index
8971 - * Upper 8B of IV - will be used as sector index
8972 - * Lower 8B of IV - will be discarded
8973 - */
8974 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
8975 - LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
8976 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8977 -
8978 - /* Load operation */
8979 - append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
8980 - OP_ALG_ENCRYPT);
8981 -
8982 - /* Perform operation */
8983 - ablkcipher_append_src_dst(desc);
8984 -
8985 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
8986 - DMA_TO_DEVICE);
8987 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8988 - dev_err(jrdev, "unable to map shared descriptor\n");
8989 - return -ENOMEM;
8990 - }
8991 -#ifdef DEBUG
8992 - print_hex_dump(KERN_ERR,
8993 - "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
8994 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8995 -#endif
8996 + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
8997 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8998 + desc_bytes(desc), DMA_TO_DEVICE);
8999
9000 /* xts_ablkcipher_decrypt shared descriptor */
9001 desc = ctx->sh_desc_dec;
9002 -
9003 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
9004 - /* Skip if already shared */
9005 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
9006 - JUMP_COND_SHRD);
9007 -
9008 - /* Load class1 key only */
9009 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
9010 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
9011 -
9012 - /* Load sector size with index 40 bytes (0x28) */
9013 - append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
9014 - LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
9015 - append_data(desc, (void *)&sector_size, 8);
9016 -
9017 - set_jump_tgt_here(desc, key_jump_cmd);
9018 -
9019 - /*
9020 - * create sequence for loading the sector index
9021 - * Upper 8B of IV - will be used as sector index
9022 - * Lower 8B of IV - will be discarded
9023 - */
9024 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
9025 - LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
9026 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
9027 -
9028 - /* Load operation */
9029 - append_dec_op1(desc, ctx->class1_alg_type);
9030 -
9031 - /* Perform operation */
9032 - ablkcipher_append_src_dst(desc);
9033 -
9034 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
9035 - DMA_TO_DEVICE);
9036 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
9037 - dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
9038 - desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
9039 - dev_err(jrdev, "unable to map shared descriptor\n");
9040 - return -ENOMEM;
9041 - }
9042 -#ifdef DEBUG
9043 - print_hex_dump(KERN_ERR,
9044 - "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
9045 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
9046 -#endif
9047 + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
9048 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
9049 + desc_bytes(desc), DMA_TO_DEVICE);
9050
9051 return 0;
9052 }
9053
9054 /*
9055 * aead_edesc - s/w-extended aead descriptor
9056 - * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
9057 - * @src_nents: number of segments in input scatterlist
9058 - * @dst_nents: number of segments in output scatterlist
9059 - * @iv_dma: dma address of iv for checking continuity and link table
9060 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9061 + * @src_nents: number of segments in input s/w scatterlist
9062 + * @dst_nents: number of segments in output s/w scatterlist
9063 * @sec4_sg_bytes: length of dma mapped sec4_sg space
9064 * @sec4_sg_dma: bus physical mapped address of h/w link table
9065 + * @sec4_sg: pointer to h/w link table
9066 * @hw_desc: the h/w job descriptor followed by any referenced link tables
9067 */
9068 struct aead_edesc {
9069 - int assoc_nents;
9070 int src_nents;
9071 int dst_nents;
9072 - dma_addr_t iv_dma;
9073 int sec4_sg_bytes;
9074 dma_addr_t sec4_sg_dma;
9075 struct sec4_sg_entry *sec4_sg;
9076 @@ -1899,12 +739,12 @@ struct aead_edesc {
9077
9078 /*
9079 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
9080 - * @src_nents: number of segments in input scatterlist
9081 - * @dst_nents: number of segments in output scatterlist
9082 + * @src_nents: number of segments in input s/w scatterlist
9083 + * @dst_nents: number of segments in output s/w scatterlist
9084 * @iv_dma: dma address of iv for checking continuity and link table
9085 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9086 * @sec4_sg_bytes: length of dma mapped sec4_sg space
9087 * @sec4_sg_dma: bus physical mapped address of h/w link table
9088 + * @sec4_sg: pointer to h/w link table
9089 * @hw_desc: the h/w job descriptor followed by any referenced link tables
9090 */
9091 struct ablkcipher_edesc {
9092 @@ -1924,10 +764,11 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
9093 int sec4_sg_bytes)
9094 {
9095 if (dst != src) {
9096 - dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
9097 - dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
9098 + if (src_nents)
9099 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
9100 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
9101 } else {
9102 - dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
9103 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
9104 }
9105
9106 if (iv_dma)
9107 @@ -2021,8 +862,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
9108 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9109 #endif
9110
9111 - edesc = (struct ablkcipher_edesc *)((char *)desc -
9112 - offsetof(struct ablkcipher_edesc, hw_desc));
9113 + edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9114
9115 if (err)
9116 caam_jr_strstatus(jrdev, err);
9117 @@ -2031,10 +871,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
9118 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
9119 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9120 edesc->src_nents > 1 ? 100 : ivsize, 1);
9121 - dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
9122 - DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9123 - edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9124 #endif
9125 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
9126 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9127 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9128
9129 ablkcipher_unmap(jrdev, edesc, req);
9130
9131 @@ -2062,8 +902,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
9132 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9133 #endif
9134
9135 - edesc = (struct ablkcipher_edesc *)((char *)desc -
9136 - offsetof(struct ablkcipher_edesc, hw_desc));
9137 + edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9138 if (err)
9139 caam_jr_strstatus(jrdev, err);
9140
9141 @@ -2071,10 +910,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
9142 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
9143 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9144 ivsize, 1);
9145 - dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
9146 - DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9147 - edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9148 #endif
9149 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
9150 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9151 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9152
9153 ablkcipher_unmap(jrdev, edesc, req);
9154
9155 @@ -2114,7 +953,7 @@ static void init_aead_job(struct aead_request *req,
9156 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9157
9158 if (all_contig) {
9159 - src_dma = sg_dma_address(req->src);
9160 + src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
9161 in_options = 0;
9162 } else {
9163 src_dma = edesc->sec4_sg_dma;
9164 @@ -2129,7 +968,7 @@ static void init_aead_job(struct aead_request *req,
9165 out_options = in_options;
9166
9167 if (unlikely(req->src != req->dst)) {
9168 - if (!edesc->dst_nents) {
9169 + if (edesc->dst_nents == 1) {
9170 dst_dma = sg_dma_address(req->dst);
9171 } else {
9172 dst_dma = edesc->sec4_sg_dma +
9173 @@ -2175,7 +1014,7 @@ static void init_gcm_job(struct aead_request *req,
9174 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
9175 /* Append Salt */
9176 if (!generic_gcm)
9177 - append_data(desc, ctx->key + ctx->enckeylen, 4);
9178 + append_data(desc, ctx->key + ctx->cdata.keylen, 4);
9179 /* Append IV */
9180 append_data(desc, req->iv, ivsize);
9181 /* End of blank commands */
9182 @@ -2190,7 +1029,7 @@ static void init_authenc_job(struct aead_request *req,
9183 struct caam_aead_alg, aead);
9184 unsigned int ivsize = crypto_aead_ivsize(aead);
9185 struct caam_ctx *ctx = crypto_aead_ctx(aead);
9186 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
9187 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
9188 OP_ALG_AAI_CTR_MOD128);
9189 const bool is_rfc3686 = alg->caam.rfc3686;
9190 u32 *desc = edesc->hw_desc;
9191 @@ -2236,16 +1075,15 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
9192 int len, sec4_sg_index = 0;
9193
9194 #ifdef DEBUG
9195 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9196 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9197 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
9198 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9199 ivsize, 1);
9200 - printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
9201 - dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
9202 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9203 - edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9204 + pr_err("asked=%d, nbytes%d\n",
9205 + (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
9206 #endif
9207 + caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
9208 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9209 + edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9210
9211 len = desc_len(sh_desc);
9212 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9213 @@ -2261,7 +1099,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
9214 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
9215
9216 if (likely(req->src == req->dst)) {
9217 - if (!edesc->src_nents && iv_contig) {
9218 + if (edesc->src_nents == 1 && iv_contig) {
9219 dst_dma = sg_dma_address(req->src);
9220 } else {
9221 dst_dma = edesc->sec4_sg_dma +
9222 @@ -2269,7 +1107,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
9223 out_options = LDST_SGF;
9224 }
9225 } else {
9226 - if (!edesc->dst_nents) {
9227 + if (edesc->dst_nents == 1) {
9228 dst_dma = sg_dma_address(req->dst);
9229 } else {
9230 dst_dma = edesc->sec4_sg_dma +
9231 @@ -2296,20 +1134,18 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
9232 int len, sec4_sg_index = 0;
9233
9234 #ifdef DEBUG
9235 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9236 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9237 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
9238 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9239 ivsize, 1);
9240 - dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
9241 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9242 - edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9243 #endif
9244 + caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
9245 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9246 + edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9247
9248 len = desc_len(sh_desc);
9249 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9250
9251 - if (!edesc->src_nents) {
9252 + if (edesc->src_nents == 1) {
9253 src_dma = sg_dma_address(req->src);
9254 in_options = 0;
9255 } else {
9256 @@ -2340,87 +1176,100 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
9257 struct crypto_aead *aead = crypto_aead_reqtfm(req);
9258 struct caam_ctx *ctx = crypto_aead_ctx(aead);
9259 struct device *jrdev = ctx->jrdev;
9260 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9261 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
9262 - int src_nents, dst_nents = 0;
9263 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9264 + GFP_KERNEL : GFP_ATOMIC;
9265 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9266 struct aead_edesc *edesc;
9267 - int sgc;
9268 - bool all_contig = true;
9269 - int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
9270 + int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
9271 unsigned int authsize = ctx->authsize;
9272
9273 if (unlikely(req->dst != req->src)) {
9274 - src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
9275 - dst_nents = sg_count(req->dst,
9276 - req->assoclen + req->cryptlen +
9277 - (encrypt ? authsize : (-authsize)));
9278 - } else {
9279 - src_nents = sg_count(req->src,
9280 - req->assoclen + req->cryptlen +
9281 - (encrypt ? authsize : 0));
9282 - }
9283 -
9284 - /* Check if data are contiguous. */
9285 - all_contig = !src_nents;
9286 - if (!all_contig) {
9287 - src_nents = src_nents ? : 1;
9288 - sec4_sg_len = src_nents;
9289 - }
9290 -
9291 - sec4_sg_len += dst_nents;
9292 -
9293 - sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9294 + src_nents = sg_nents_for_len(req->src, req->assoclen +
9295 + req->cryptlen);
9296 + if (unlikely(src_nents < 0)) {
9297 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9298 + req->assoclen + req->cryptlen);
9299 + return ERR_PTR(src_nents);
9300 + }
9301
9302 - /* allocate space for base edesc and hw desc commands, link tables */
9303 - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9304 - GFP_DMA | flags);
9305 - if (!edesc) {
9306 - dev_err(jrdev, "could not allocate extended descriptor\n");
9307 - return ERR_PTR(-ENOMEM);
9308 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
9309 + req->cryptlen +
9310 + (encrypt ? authsize :
9311 + (-authsize)));
9312 + if (unlikely(dst_nents < 0)) {
9313 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9314 + req->assoclen + req->cryptlen +
9315 + (encrypt ? authsize : (-authsize)));
9316 + return ERR_PTR(dst_nents);
9317 + }
9318 + } else {
9319 + src_nents = sg_nents_for_len(req->src, req->assoclen +
9320 + req->cryptlen +
9321 + (encrypt ? authsize : 0));
9322 + if (unlikely(src_nents < 0)) {
9323 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9324 + req->assoclen + req->cryptlen +
9325 + (encrypt ? authsize : 0));
9326 + return ERR_PTR(src_nents);
9327 + }
9328 }
9329
9330 if (likely(req->src == req->dst)) {
9331 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9332 - DMA_BIDIRECTIONAL);
9333 - if (unlikely(!sgc)) {
9334 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9335 + DMA_BIDIRECTIONAL);
9336 + if (unlikely(!mapped_src_nents)) {
9337 dev_err(jrdev, "unable to map source\n");
9338 - kfree(edesc);
9339 return ERR_PTR(-ENOMEM);
9340 }
9341 } else {
9342 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9343 - DMA_TO_DEVICE);
9344 - if (unlikely(!sgc)) {
9345 - dev_err(jrdev, "unable to map source\n");
9346 - kfree(edesc);
9347 - return ERR_PTR(-ENOMEM);
9348 + /* Cover also the case of null (zero length) input data */
9349 + if (src_nents) {
9350 + mapped_src_nents = dma_map_sg(jrdev, req->src,
9351 + src_nents, DMA_TO_DEVICE);
9352 + if (unlikely(!mapped_src_nents)) {
9353 + dev_err(jrdev, "unable to map source\n");
9354 + return ERR_PTR(-ENOMEM);
9355 + }
9356 + } else {
9357 + mapped_src_nents = 0;
9358 }
9359
9360 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9361 - DMA_FROM_DEVICE);
9362 - if (unlikely(!sgc)) {
9363 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9364 + DMA_FROM_DEVICE);
9365 + if (unlikely(!mapped_dst_nents)) {
9366 dev_err(jrdev, "unable to map destination\n");
9367 - dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
9368 - DMA_TO_DEVICE);
9369 - kfree(edesc);
9370 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9371 return ERR_PTR(-ENOMEM);
9372 }
9373 }
9374
9375 + sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
9376 + sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9377 + sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9378 +
9379 + /* allocate space for base edesc and hw desc commands, link tables */
9380 + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9381 + GFP_DMA | flags);
9382 + if (!edesc) {
9383 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9384 + 0, 0, 0);
9385 + return ERR_PTR(-ENOMEM);
9386 + }
9387 +
9388 edesc->src_nents = src_nents;
9389 edesc->dst_nents = dst_nents;
9390 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
9391 desc_bytes;
9392 - *all_contig_ptr = all_contig;
9393 + *all_contig_ptr = !(mapped_src_nents > 1);
9394
9395 sec4_sg_index = 0;
9396 - if (!all_contig) {
9397 - sg_to_sec4_sg_last(req->src, src_nents,
9398 - edesc->sec4_sg + sec4_sg_index, 0);
9399 - sec4_sg_index += src_nents;
9400 + if (mapped_src_nents > 1) {
9401 + sg_to_sec4_sg_last(req->src, mapped_src_nents,
9402 + edesc->sec4_sg + sec4_sg_index, 0);
9403 + sec4_sg_index += mapped_src_nents;
9404 }
9405 - if (dst_nents) {
9406 - sg_to_sec4_sg_last(req->dst, dst_nents,
9407 + if (mapped_dst_nents > 1) {
9408 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9409 edesc->sec4_sg + sec4_sg_index, 0);
9410 }
9411
9412 @@ -2573,13 +1422,9 @@ static int aead_decrypt(struct aead_request *req)
9413 u32 *desc;
9414 int ret = 0;
9415
9416 -#ifdef DEBUG
9417 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9418 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9419 - dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
9420 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9421 - req->assoclen + req->cryptlen, 1, may_sleep);
9422 -#endif
9423 + caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
9424 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9425 + req->assoclen + req->cryptlen, 1);
9426
9427 /* allocate extended descriptor */
9428 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
9429 @@ -2619,51 +1464,80 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
9430 struct device *jrdev = ctx->jrdev;
9431 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9432 GFP_KERNEL : GFP_ATOMIC;
9433 - int src_nents, dst_nents = 0, sec4_sg_bytes;
9434 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9435 struct ablkcipher_edesc *edesc;
9436 dma_addr_t iv_dma = 0;
9437 - bool iv_contig = false;
9438 - int sgc;
9439 + bool in_contig;
9440 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9441 - int sec4_sg_index;
9442 + int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9443
9444 - src_nents = sg_count(req->src, req->nbytes);
9445 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9446 + if (unlikely(src_nents < 0)) {
9447 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9448 + req->nbytes);
9449 + return ERR_PTR(src_nents);
9450 + }
9451
9452 - if (req->dst != req->src)
9453 - dst_nents = sg_count(req->dst, req->nbytes);
9454 + if (req->dst != req->src) {
9455 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9456 + if (unlikely(dst_nents < 0)) {
9457 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9458 + req->nbytes);
9459 + return ERR_PTR(dst_nents);
9460 + }
9461 + }
9462
9463 if (likely(req->src == req->dst)) {
9464 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9465 - DMA_BIDIRECTIONAL);
9466 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9467 + DMA_BIDIRECTIONAL);
9468 + if (unlikely(!mapped_src_nents)) {
9469 + dev_err(jrdev, "unable to map source\n");
9470 + return ERR_PTR(-ENOMEM);
9471 + }
9472 } else {
9473 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9474 - DMA_TO_DEVICE);
9475 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9476 - DMA_FROM_DEVICE);
9477 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9478 + DMA_TO_DEVICE);
9479 + if (unlikely(!mapped_src_nents)) {
9480 + dev_err(jrdev, "unable to map source\n");
9481 + return ERR_PTR(-ENOMEM);
9482 + }
9483 +
9484 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9485 + DMA_FROM_DEVICE);
9486 + if (unlikely(!mapped_dst_nents)) {
9487 + dev_err(jrdev, "unable to map destination\n");
9488 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9489 + return ERR_PTR(-ENOMEM);
9490 + }
9491 }
9492
9493 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
9494 if (dma_mapping_error(jrdev, iv_dma)) {
9495 dev_err(jrdev, "unable to map IV\n");
9496 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9497 + 0, 0, 0);
9498 return ERR_PTR(-ENOMEM);
9499 }
9500
9501 - /*
9502 - * Check if iv can be contiguous with source and destination.
9503 - * If so, include it. If not, create scatterlist.
9504 - */
9505 - if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
9506 - iv_contig = true;
9507 - else
9508 - src_nents = src_nents ? : 1;
9509 - sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9510 - sizeof(struct sec4_sg_entry);
9511 + if (mapped_src_nents == 1 &&
9512 + iv_dma + ivsize == sg_dma_address(req->src)) {
9513 + in_contig = true;
9514 + sec4_sg_ents = 0;
9515 + } else {
9516 + in_contig = false;
9517 + sec4_sg_ents = 1 + mapped_src_nents;
9518 + }
9519 + dst_sg_idx = sec4_sg_ents;
9520 + sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9521 + sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9522
9523 /* allocate space for base edesc and hw desc commands, link tables */
9524 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9525 GFP_DMA | flags);
9526 if (!edesc) {
9527 dev_err(jrdev, "could not allocate extended descriptor\n");
9528 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9529 + iv_dma, ivsize, 0, 0);
9530 return ERR_PTR(-ENOMEM);
9531 }
9532
9533 @@ -2673,23 +1547,24 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
9534 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9535 desc_bytes;
9536
9537 - sec4_sg_index = 0;
9538 - if (!iv_contig) {
9539 + if (!in_contig) {
9540 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
9541 - sg_to_sec4_sg_last(req->src, src_nents,
9542 + sg_to_sec4_sg_last(req->src, mapped_src_nents,
9543 edesc->sec4_sg + 1, 0);
9544 - sec4_sg_index += 1 + src_nents;
9545 }
9546
9547 - if (dst_nents) {
9548 - sg_to_sec4_sg_last(req->dst, dst_nents,
9549 - edesc->sec4_sg + sec4_sg_index, 0);
9550 + if (mapped_dst_nents > 1) {
9551 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9552 + edesc->sec4_sg + dst_sg_idx, 0);
9553 }
9554
9555 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9556 sec4_sg_bytes, DMA_TO_DEVICE);
9557 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9558 dev_err(jrdev, "unable to map S/G table\n");
9559 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9560 + iv_dma, ivsize, 0, 0);
9561 + kfree(edesc);
9562 return ERR_PTR(-ENOMEM);
9563 }
9564
9565 @@ -2701,7 +1576,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
9566 sec4_sg_bytes, 1);
9567 #endif
9568
9569 - *iv_contig_out = iv_contig;
9570 + *iv_contig_out = in_contig;
9571 return edesc;
9572 }
9573
9574 @@ -2792,30 +1667,54 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
9575 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9576 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9577 struct device *jrdev = ctx->jrdev;
9578 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9579 - CRYPTO_TFM_REQ_MAY_SLEEP)) ?
9580 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9581 GFP_KERNEL : GFP_ATOMIC;
9582 - int src_nents, dst_nents = 0, sec4_sg_bytes;
9583 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
9584 struct ablkcipher_edesc *edesc;
9585 dma_addr_t iv_dma = 0;
9586 - bool iv_contig = false;
9587 - int sgc;
9588 + bool out_contig;
9589 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9590 - int sec4_sg_index;
9591 + int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9592
9593 - src_nents = sg_count(req->src, req->nbytes);
9594 -
9595 - if (unlikely(req->dst != req->src))
9596 - dst_nents = sg_count(req->dst, req->nbytes);
9597 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9598 + if (unlikely(src_nents < 0)) {
9599 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9600 + req->nbytes);
9601 + return ERR_PTR(src_nents);
9602 + }
9603
9604 if (likely(req->src == req->dst)) {
9605 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9606 - DMA_BIDIRECTIONAL);
9607 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9608 + DMA_BIDIRECTIONAL);
9609 + if (unlikely(!mapped_src_nents)) {
9610 + dev_err(jrdev, "unable to map source\n");
9611 + return ERR_PTR(-ENOMEM);
9612 + }
9613 +
9614 + dst_nents = src_nents;
9615 + mapped_dst_nents = src_nents;
9616 } else {
9617 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9618 - DMA_TO_DEVICE);
9619 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9620 - DMA_FROM_DEVICE);
9621 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9622 + DMA_TO_DEVICE);
9623 + if (unlikely(!mapped_src_nents)) {
9624 + dev_err(jrdev, "unable to map source\n");
9625 + return ERR_PTR(-ENOMEM);
9626 + }
9627 +
9628 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9629 + if (unlikely(dst_nents < 0)) {
9630 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9631 + req->nbytes);
9632 + return ERR_PTR(dst_nents);
9633 + }
9634 +
9635 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9636 + DMA_FROM_DEVICE);
9637 + if (unlikely(!mapped_dst_nents)) {
9638 + dev_err(jrdev, "unable to map destination\n");
9639 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9640 + return ERR_PTR(-ENOMEM);
9641 + }
9642 }
9643
9644 /*
9645 @@ -2825,21 +1724,29 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
9646 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
9647 if (dma_mapping_error(jrdev, iv_dma)) {
9648 dev_err(jrdev, "unable to map IV\n");
9649 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9650 + 0, 0, 0);
9651 return ERR_PTR(-ENOMEM);
9652 }
9653
9654 - if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
9655 - iv_contig = true;
9656 - else
9657 - dst_nents = dst_nents ? : 1;
9658 - sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9659 - sizeof(struct sec4_sg_entry);
9660 + sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
9661 + dst_sg_idx = sec4_sg_ents;
9662 + if (mapped_dst_nents == 1 &&
9663 + iv_dma + ivsize == sg_dma_address(req->dst)) {
9664 + out_contig = true;
9665 + } else {
9666 + out_contig = false;
9667 + sec4_sg_ents += 1 + mapped_dst_nents;
9668 + }
9669
9670 /* allocate space for base edesc and hw desc commands, link tables */
9671 + sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9672 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9673 GFP_DMA | flags);
9674 if (!edesc) {
9675 dev_err(jrdev, "could not allocate extended descriptor\n");
9676 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9677 + iv_dma, ivsize, 0, 0);
9678 return ERR_PTR(-ENOMEM);
9679 }
9680
9681 @@ -2849,24 +1756,24 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
9682 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9683 desc_bytes;
9684
9685 - sec4_sg_index = 0;
9686 - if (src_nents) {
9687 - sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
9688 - sec4_sg_index += src_nents;
9689 - }
9690 + if (mapped_src_nents > 1)
9691 + sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
9692 + 0);
9693
9694 - if (!iv_contig) {
9695 - dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
9696 + if (!out_contig) {
9697 + dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
9698 iv_dma, ivsize, 0);
9699 - sec4_sg_index += 1;
9700 - sg_to_sec4_sg_last(req->dst, dst_nents,
9701 - edesc->sec4_sg + sec4_sg_index, 0);
9702 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9703 + edesc->sec4_sg + dst_sg_idx + 1, 0);
9704 }
9705
9706 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9707 sec4_sg_bytes, DMA_TO_DEVICE);
9708 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9709 dev_err(jrdev, "unable to map S/G table\n");
9710 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9711 + iv_dma, ivsize, 0, 0);
9712 + kfree(edesc);
9713 return ERR_PTR(-ENOMEM);
9714 }
9715 edesc->iv_dma = iv_dma;
9716 @@ -2878,7 +1785,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
9717 sec4_sg_bytes, 1);
9718 #endif
9719
9720 - *iv_contig_out = iv_contig;
9721 + *iv_contig_out = out_contig;
9722 return edesc;
9723 }
9724
9725 @@ -2889,7 +1796,7 @@ static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
9726 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9727 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9728 struct device *jrdev = ctx->jrdev;
9729 - bool iv_contig;
9730 + bool iv_contig = false;
9731 u32 *desc;
9732 int ret = 0;
9733
9734 @@ -2933,7 +1840,6 @@ struct caam_alg_template {
9735 } template_u;
9736 u32 class1_alg_type;
9737 u32 class2_alg_type;
9738 - u32 alg_op;
9739 };
9740
9741 static struct caam_alg_template driver_algs[] = {
9742 @@ -3118,7 +2024,6 @@ static struct caam_aead_alg driver_aeads[] = {
9743 .caam = {
9744 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9745 OP_ALG_AAI_HMAC_PRECOMP,
9746 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9747 },
9748 },
9749 {
9750 @@ -3140,7 +2045,6 @@ static struct caam_aead_alg driver_aeads[] = {
9751 .caam = {
9752 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9753 OP_ALG_AAI_HMAC_PRECOMP,
9754 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9755 },
9756 },
9757 {
9758 @@ -3162,7 +2066,6 @@ static struct caam_aead_alg driver_aeads[] = {
9759 .caam = {
9760 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9761 OP_ALG_AAI_HMAC_PRECOMP,
9762 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9763 },
9764 },
9765 {
9766 @@ -3184,7 +2087,6 @@ static struct caam_aead_alg driver_aeads[] = {
9767 .caam = {
9768 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9769 OP_ALG_AAI_HMAC_PRECOMP,
9770 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9771 },
9772 },
9773 {
9774 @@ -3206,7 +2108,6 @@ static struct caam_aead_alg driver_aeads[] = {
9775 .caam = {
9776 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9777 OP_ALG_AAI_HMAC_PRECOMP,
9778 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9779 },
9780 },
9781 {
9782 @@ -3228,7 +2129,6 @@ static struct caam_aead_alg driver_aeads[] = {
9783 .caam = {
9784 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9785 OP_ALG_AAI_HMAC_PRECOMP,
9786 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9787 },
9788 },
9789 {
9790 @@ -3250,7 +2150,6 @@ static struct caam_aead_alg driver_aeads[] = {
9791 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9792 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9793 OP_ALG_AAI_HMAC_PRECOMP,
9794 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9795 },
9796 },
9797 {
9798 @@ -3273,7 +2172,6 @@ static struct caam_aead_alg driver_aeads[] = {
9799 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9800 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9801 OP_ALG_AAI_HMAC_PRECOMP,
9802 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9803 .geniv = true,
9804 },
9805 },
9806 @@ -3296,7 +2194,6 @@ static struct caam_aead_alg driver_aeads[] = {
9807 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9808 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9809 OP_ALG_AAI_HMAC_PRECOMP,
9810 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9811 },
9812 },
9813 {
9814 @@ -3319,7 +2216,6 @@ static struct caam_aead_alg driver_aeads[] = {
9815 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9816 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9817 OP_ALG_AAI_HMAC_PRECOMP,
9818 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9819 .geniv = true,
9820 },
9821 },
9822 @@ -3342,7 +2238,6 @@ static struct caam_aead_alg driver_aeads[] = {
9823 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9824 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9825 OP_ALG_AAI_HMAC_PRECOMP,
9826 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9827 },
9828 },
9829 {
9830 @@ -3365,7 +2260,6 @@ static struct caam_aead_alg driver_aeads[] = {
9831 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9832 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9833 OP_ALG_AAI_HMAC_PRECOMP,
9834 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9835 .geniv = true,
9836 },
9837 },
9838 @@ -3388,7 +2282,6 @@ static struct caam_aead_alg driver_aeads[] = {
9839 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9840 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9841 OP_ALG_AAI_HMAC_PRECOMP,
9842 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9843 },
9844 },
9845 {
9846 @@ -3411,7 +2304,6 @@ static struct caam_aead_alg driver_aeads[] = {
9847 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9848 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9849 OP_ALG_AAI_HMAC_PRECOMP,
9850 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9851 .geniv = true,
9852 },
9853 },
9854 @@ -3434,7 +2326,6 @@ static struct caam_aead_alg driver_aeads[] = {
9855 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9856 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9857 OP_ALG_AAI_HMAC_PRECOMP,
9858 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9859 },
9860 },
9861 {
9862 @@ -3457,7 +2348,6 @@ static struct caam_aead_alg driver_aeads[] = {
9863 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9864 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9865 OP_ALG_AAI_HMAC_PRECOMP,
9866 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9867 .geniv = true,
9868 },
9869 },
9870 @@ -3480,7 +2370,6 @@ static struct caam_aead_alg driver_aeads[] = {
9871 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9872 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9873 OP_ALG_AAI_HMAC_PRECOMP,
9874 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9875 },
9876 },
9877 {
9878 @@ -3503,7 +2392,6 @@ static struct caam_aead_alg driver_aeads[] = {
9879 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9880 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9881 OP_ALG_AAI_HMAC_PRECOMP,
9882 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9883 .geniv = true,
9884 },
9885 },
9886 @@ -3526,7 +2414,6 @@ static struct caam_aead_alg driver_aeads[] = {
9887 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9888 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9889 OP_ALG_AAI_HMAC_PRECOMP,
9890 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9891 }
9892 },
9893 {
9894 @@ -3549,7 +2436,6 @@ static struct caam_aead_alg driver_aeads[] = {
9895 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9896 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9897 OP_ALG_AAI_HMAC_PRECOMP,
9898 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9899 .geniv = true,
9900 }
9901 },
9902 @@ -3573,7 +2459,6 @@ static struct caam_aead_alg driver_aeads[] = {
9903 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9904 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9905 OP_ALG_AAI_HMAC_PRECOMP,
9906 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9907 },
9908 },
9909 {
9910 @@ -3597,7 +2482,6 @@ static struct caam_aead_alg driver_aeads[] = {
9911 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9912 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9913 OP_ALG_AAI_HMAC_PRECOMP,
9914 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9915 .geniv = true,
9916 },
9917 },
9918 @@ -3621,7 +2505,6 @@ static struct caam_aead_alg driver_aeads[] = {
9919 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9920 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9921 OP_ALG_AAI_HMAC_PRECOMP,
9922 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9923 },
9924 },
9925 {
9926 @@ -3645,7 +2528,6 @@ static struct caam_aead_alg driver_aeads[] = {
9927 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9928 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9929 OP_ALG_AAI_HMAC_PRECOMP,
9930 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9931 .geniv = true,
9932 },
9933 },
9934 @@ -3669,7 +2551,6 @@ static struct caam_aead_alg driver_aeads[] = {
9935 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9936 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9937 OP_ALG_AAI_HMAC_PRECOMP,
9938 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9939 },
9940 },
9941 {
9942 @@ -3693,7 +2574,6 @@ static struct caam_aead_alg driver_aeads[] = {
9943 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9944 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9945 OP_ALG_AAI_HMAC_PRECOMP,
9946 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9947 .geniv = true,
9948 },
9949 },
9950 @@ -3717,7 +2597,6 @@ static struct caam_aead_alg driver_aeads[] = {
9951 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9952 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9953 OP_ALG_AAI_HMAC_PRECOMP,
9954 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9955 },
9956 },
9957 {
9958 @@ -3741,7 +2620,6 @@ static struct caam_aead_alg driver_aeads[] = {
9959 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9960 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9961 OP_ALG_AAI_HMAC_PRECOMP,
9962 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9963 .geniv = true,
9964 },
9965 },
9966 @@ -3765,7 +2643,6 @@ static struct caam_aead_alg driver_aeads[] = {
9967 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9968 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9969 OP_ALG_AAI_HMAC_PRECOMP,
9970 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9971 },
9972 },
9973 {
9974 @@ -3789,7 +2666,6 @@ static struct caam_aead_alg driver_aeads[] = {
9975 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9976 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9977 OP_ALG_AAI_HMAC_PRECOMP,
9978 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9979 .geniv = true,
9980 },
9981 },
9982 @@ -3812,7 +2688,6 @@ static struct caam_aead_alg driver_aeads[] = {
9983 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9984 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9985 OP_ALG_AAI_HMAC_PRECOMP,
9986 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9987 },
9988 },
9989 {
9990 @@ -3835,7 +2710,6 @@ static struct caam_aead_alg driver_aeads[] = {
9991 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9992 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9993 OP_ALG_AAI_HMAC_PRECOMP,
9994 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9995 .geniv = true,
9996 },
9997 },
9998 @@ -3858,7 +2732,6 @@ static struct caam_aead_alg driver_aeads[] = {
9999 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10000 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10001 OP_ALG_AAI_HMAC_PRECOMP,
10002 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10003 },
10004 },
10005 {
10006 @@ -3881,7 +2754,6 @@ static struct caam_aead_alg driver_aeads[] = {
10007 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10008 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10009 OP_ALG_AAI_HMAC_PRECOMP,
10010 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10011 .geniv = true,
10012 },
10013 },
10014 @@ -3904,7 +2776,6 @@ static struct caam_aead_alg driver_aeads[] = {
10015 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10016 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10017 OP_ALG_AAI_HMAC_PRECOMP,
10018 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10019 },
10020 },
10021 {
10022 @@ -3927,7 +2798,6 @@ static struct caam_aead_alg driver_aeads[] = {
10023 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10024 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10025 OP_ALG_AAI_HMAC_PRECOMP,
10026 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10027 .geniv = true,
10028 },
10029 },
10030 @@ -3950,7 +2820,6 @@ static struct caam_aead_alg driver_aeads[] = {
10031 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10032 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10033 OP_ALG_AAI_HMAC_PRECOMP,
10034 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10035 },
10036 },
10037 {
10038 @@ -3973,7 +2842,6 @@ static struct caam_aead_alg driver_aeads[] = {
10039 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10040 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10041 OP_ALG_AAI_HMAC_PRECOMP,
10042 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10043 .geniv = true,
10044 },
10045 },
10046 @@ -3996,7 +2864,6 @@ static struct caam_aead_alg driver_aeads[] = {
10047 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10048 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10049 OP_ALG_AAI_HMAC_PRECOMP,
10050 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10051 },
10052 },
10053 {
10054 @@ -4019,7 +2886,6 @@ static struct caam_aead_alg driver_aeads[] = {
10055 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10056 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10057 OP_ALG_AAI_HMAC_PRECOMP,
10058 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10059 .geniv = true,
10060 },
10061 },
10062 @@ -4042,7 +2908,6 @@ static struct caam_aead_alg driver_aeads[] = {
10063 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10064 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10065 OP_ALG_AAI_HMAC_PRECOMP,
10066 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10067 },
10068 },
10069 {
10070 @@ -4065,7 +2930,6 @@ static struct caam_aead_alg driver_aeads[] = {
10071 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10072 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10073 OP_ALG_AAI_HMAC_PRECOMP,
10074 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10075 .geniv = true,
10076 },
10077 },
10078 @@ -4090,7 +2954,6 @@ static struct caam_aead_alg driver_aeads[] = {
10079 OP_ALG_AAI_CTR_MOD128,
10080 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10081 OP_ALG_AAI_HMAC_PRECOMP,
10082 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10083 .rfc3686 = true,
10084 },
10085 },
10086 @@ -4115,7 +2978,6 @@ static struct caam_aead_alg driver_aeads[] = {
10087 OP_ALG_AAI_CTR_MOD128,
10088 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10089 OP_ALG_AAI_HMAC_PRECOMP,
10090 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10091 .rfc3686 = true,
10092 .geniv = true,
10093 },
10094 @@ -4141,7 +3003,6 @@ static struct caam_aead_alg driver_aeads[] = {
10095 OP_ALG_AAI_CTR_MOD128,
10096 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10097 OP_ALG_AAI_HMAC_PRECOMP,
10098 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10099 .rfc3686 = true,
10100 },
10101 },
10102 @@ -4166,7 +3027,6 @@ static struct caam_aead_alg driver_aeads[] = {
10103 OP_ALG_AAI_CTR_MOD128,
10104 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10105 OP_ALG_AAI_HMAC_PRECOMP,
10106 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10107 .rfc3686 = true,
10108 .geniv = true,
10109 },
10110 @@ -4192,7 +3052,6 @@ static struct caam_aead_alg driver_aeads[] = {
10111 OP_ALG_AAI_CTR_MOD128,
10112 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10113 OP_ALG_AAI_HMAC_PRECOMP,
10114 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10115 .rfc3686 = true,
10116 },
10117 },
10118 @@ -4217,7 +3076,6 @@ static struct caam_aead_alg driver_aeads[] = {
10119 OP_ALG_AAI_CTR_MOD128,
10120 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10121 OP_ALG_AAI_HMAC_PRECOMP,
10122 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10123 .rfc3686 = true,
10124 .geniv = true,
10125 },
10126 @@ -4243,7 +3101,6 @@ static struct caam_aead_alg driver_aeads[] = {
10127 OP_ALG_AAI_CTR_MOD128,
10128 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10129 OP_ALG_AAI_HMAC_PRECOMP,
10130 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10131 .rfc3686 = true,
10132 },
10133 },
10134 @@ -4268,7 +3125,6 @@ static struct caam_aead_alg driver_aeads[] = {
10135 OP_ALG_AAI_CTR_MOD128,
10136 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10137 OP_ALG_AAI_HMAC_PRECOMP,
10138 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10139 .rfc3686 = true,
10140 .geniv = true,
10141 },
10142 @@ -4294,7 +3150,6 @@ static struct caam_aead_alg driver_aeads[] = {
10143 OP_ALG_AAI_CTR_MOD128,
10144 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10145 OP_ALG_AAI_HMAC_PRECOMP,
10146 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10147 .rfc3686 = true,
10148 },
10149 },
10150 @@ -4319,7 +3174,6 @@ static struct caam_aead_alg driver_aeads[] = {
10151 OP_ALG_AAI_CTR_MOD128,
10152 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10153 OP_ALG_AAI_HMAC_PRECOMP,
10154 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10155 .rfc3686 = true,
10156 .geniv = true,
10157 },
10158 @@ -4345,7 +3199,6 @@ static struct caam_aead_alg driver_aeads[] = {
10159 OP_ALG_AAI_CTR_MOD128,
10160 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10161 OP_ALG_AAI_HMAC_PRECOMP,
10162 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10163 .rfc3686 = true,
10164 },
10165 },
10166 @@ -4370,7 +3223,6 @@ static struct caam_aead_alg driver_aeads[] = {
10167 OP_ALG_AAI_CTR_MOD128,
10168 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10169 OP_ALG_AAI_HMAC_PRECOMP,
10170 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10171 .rfc3686 = true,
10172 .geniv = true,
10173 },
10174 @@ -4385,16 +3237,34 @@ struct caam_crypto_alg {
10175
10176 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
10177 {
10178 + dma_addr_t dma_addr;
10179 +
10180 ctx->jrdev = caam_jr_alloc();
10181 if (IS_ERR(ctx->jrdev)) {
10182 pr_err("Job Ring Device allocation for transform failed\n");
10183 return PTR_ERR(ctx->jrdev);
10184 }
10185
10186 + dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
10187 + offsetof(struct caam_ctx,
10188 + sh_desc_enc_dma),
10189 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10190 + if (dma_mapping_error(ctx->jrdev, dma_addr)) {
10191 + dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
10192 + caam_jr_free(ctx->jrdev);
10193 + return -ENOMEM;
10194 + }
10195 +
10196 + ctx->sh_desc_enc_dma = dma_addr;
10197 + ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
10198 + sh_desc_dec);
10199 + ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
10200 + sh_desc_givenc);
10201 + ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
10202 +
10203 /* copy descriptor header template value */
10204 - ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10205 - ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10206 - ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
10207 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10208 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10209
10210 return 0;
10211 }
10212 @@ -4421,25 +3291,9 @@ static int caam_aead_init(struct crypto_aead *tfm)
10213
10214 static void caam_exit_common(struct caam_ctx *ctx)
10215 {
10216 - if (ctx->sh_desc_enc_dma &&
10217 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
10218 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
10219 - desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
10220 - if (ctx->sh_desc_dec_dma &&
10221 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
10222 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
10223 - desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
10224 - if (ctx->sh_desc_givenc_dma &&
10225 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
10226 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
10227 - desc_bytes(ctx->sh_desc_givenc),
10228 - DMA_TO_DEVICE);
10229 - if (ctx->key_dma &&
10230 - !dma_mapping_error(ctx->jrdev, ctx->key_dma))
10231 - dma_unmap_single(ctx->jrdev, ctx->key_dma,
10232 - ctx->enckeylen + ctx->split_key_pad_len,
10233 - DMA_TO_DEVICE);
10234 -
10235 + dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
10236 + offsetof(struct caam_ctx, sh_desc_enc_dma),
10237 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10238 caam_jr_free(ctx->jrdev);
10239 }
10240
10241 @@ -4515,7 +3369,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
10242
10243 t_alg->caam.class1_alg_type = template->class1_alg_type;
10244 t_alg->caam.class2_alg_type = template->class2_alg_type;
10245 - t_alg->caam.alg_op = template->alg_op;
10246
10247 return t_alg;
10248 }
10249 diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
10250 new file mode 100644
10251 index 00000000..d162120a
10252 --- /dev/null
10253 +++ b/drivers/crypto/caam/caamalg_desc.c
10254 @@ -0,0 +1,1913 @@
10255 +/*
10256 + * Shared descriptors for aead, ablkcipher algorithms
10257 + *
10258 + * Copyright 2016 NXP
10259 + */
10260 +
10261 +#include "compat.h"
10262 +#include "desc_constr.h"
10263 +#include "caamalg_desc.h"
10264 +
10265 +/*
10266 + * For aead functions, read payload and write payload,
10267 + * both of which are specified in req->src and req->dst
10268 + */
10269 +static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
10270 +{
10271 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10272 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
10273 + KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
10274 +}
10275 +
10276 +/* Set DK bit in class 1 operation if shared */
10277 +static inline void append_dec_op1(u32 *desc, u32 type)
10278 +{
10279 + u32 *jump_cmd, *uncond_jump_cmd;
10280 +
10281 + /* DK bit is valid only for AES */
10282 + if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
10283 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10284 + OP_ALG_DECRYPT);
10285 + return;
10286 + }
10287 +
10288 + jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
10289 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10290 + OP_ALG_DECRYPT);
10291 + uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10292 + set_jump_tgt_here(desc, jump_cmd);
10293 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10294 + OP_ALG_DECRYPT | OP_ALG_AAI_DK);
10295 + set_jump_tgt_here(desc, uncond_jump_cmd);
10296 +}
10297 +
10298 +/**
10299 + * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
10300 + * (non-protocol) with no (null) encryption.
10301 + * @desc: pointer to buffer used for descriptor construction
10302 + * @adata: pointer to authentication transform definitions. Note that since a
10303 + * split key is to be used, the size of the split key itself is
10304 + * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10305 + * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10306 + * @icvsize: integrity check value (ICV) size (truncated or full)
10307 + *
10308 + * Note: Requires an MDHA split key.
10309 + */
10310 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
10311 + unsigned int icvsize)
10312 +{
10313 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
10314 +
10315 + init_sh_desc(desc, HDR_SHARE_SERIAL);
10316 +
10317 + /* Skip if already shared */
10318 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10319 + JUMP_COND_SHRD);
10320 + if (adata->key_inline)
10321 + append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10322 + adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
10323 + KEY_ENC);
10324 + else
10325 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10326 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10327 + set_jump_tgt_here(desc, key_jump_cmd);
10328 +
10329 + /* assoclen + cryptlen = seqinlen */
10330 + append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
10331 +
10332 + /* Prepare to read and write cryptlen + assoclen bytes */
10333 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10334 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10335 +
10336 + /*
10337 + * MOVE_LEN opcode is not available in all SEC HW revisions,
10338 + * thus need to do some magic, i.e. self-patch the descriptor
10339 + * buffer.
10340 + */
10341 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10342 + MOVE_DEST_MATH3 |
10343 + (0x6 << MOVE_LEN_SHIFT));
10344 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
10345 + MOVE_DEST_DESCBUF |
10346 + MOVE_WAITCOMP |
10347 + (0x8 << MOVE_LEN_SHIFT));
10348 +
10349 + /* Class 2 operation */
10350 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10351 + OP_ALG_ENCRYPT);
10352 +
10353 + /* Read and write cryptlen bytes */
10354 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10355 +
10356 + set_move_tgt_here(desc, read_move_cmd);
10357 + set_move_tgt_here(desc, write_move_cmd);
10358 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10359 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10360 + MOVE_AUX_LS);
10361 +
10362 + /* Write ICV */
10363 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10364 + LDST_SRCDST_BYTE_CONTEXT);
10365 +
10366 +#ifdef DEBUG
10367 + print_hex_dump(KERN_ERR,
10368 + "aead null enc shdesc@" __stringify(__LINE__)": ",
10369 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10370 +#endif
10371 +}
10372 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
10373 +
10374 +/**
10375 + * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
10376 + * (non-protocol) with no (null) decryption.
10377 + * @desc: pointer to buffer used for descriptor construction
10378 + * @adata: pointer to authentication transform definitions. Note that since a
10379 + * split key is to be used, the size of the split key itself is
10380 + * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10381 + * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10382 + * @icvsize: integrity check value (ICV) size (truncated or full)
10383 + *
10384 + * Note: Requires an MDHA split key.
10385 + */
10386 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
10387 + unsigned int icvsize)
10388 +{
10389 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
10390 +
10391 + init_sh_desc(desc, HDR_SHARE_SERIAL);
10392 +
10393 + /* Skip if already shared */
10394 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10395 + JUMP_COND_SHRD);
10396 + if (adata->key_inline)
10397 + append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10398 + adata->keylen, CLASS_2 |
10399 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10400 + else
10401 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10402 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10403 + set_jump_tgt_here(desc, key_jump_cmd);
10404 +
10405 + /* Class 2 operation */
10406 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10407 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10408 +
10409 + /* assoclen + cryptlen = seqoutlen */
10410 + append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10411 +
10412 + /* Prepare to read and write cryptlen + assoclen bytes */
10413 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
10414 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
10415 +
10416 + /*
10417 + * MOVE_LEN opcode is not available in all SEC HW revisions,
10418 + * thus need to do some magic, i.e. self-patch the descriptor
10419 + * buffer.
10420 + */
10421 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10422 + MOVE_DEST_MATH2 |
10423 + (0x6 << MOVE_LEN_SHIFT));
10424 + write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
10425 + MOVE_DEST_DESCBUF |
10426 + MOVE_WAITCOMP |
10427 + (0x8 << MOVE_LEN_SHIFT));
10428 +
10429 + /* Read and write cryptlen bytes */
10430 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10431 +
10432 + /*
10433 + * Insert a NOP here, since we need at least 4 instructions between
10434 + * code patching the descriptor buffer and the location being patched.
10435 + */
10436 + jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10437 + set_jump_tgt_here(desc, jump_cmd);
10438 +
10439 + set_move_tgt_here(desc, read_move_cmd);
10440 + set_move_tgt_here(desc, write_move_cmd);
10441 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10442 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10443 + MOVE_AUX_LS);
10444 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10445 +
10446 + /* Load ICV */
10447 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10448 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10449 +
10450 +#ifdef DEBUG
10451 + print_hex_dump(KERN_ERR,
10452 + "aead null dec shdesc@" __stringify(__LINE__)": ",
10453 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10454 +#endif
10455 +}
10456 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
10457 +
10458 +static void init_sh_desc_key_aead(u32 * const desc,
10459 + struct alginfo * const cdata,
10460 + struct alginfo * const adata,
10461 + const bool is_rfc3686, u32 *nonce)
10462 +{
10463 + u32 *key_jump_cmd;
10464 + unsigned int enckeylen = cdata->keylen;
10465 +
10466 + /* Note: Context registers are saved. */
10467 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
10468 +
10469 + /* Skip if already shared */
10470 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10471 + JUMP_COND_SHRD);
10472 +
10473 + /*
10474 + * RFC3686 specific:
10475 + * | key = {AUTH_KEY, ENC_KEY, NONCE}
10476 + * | enckeylen = encryption key size + nonce size
10477 + */
10478 + if (is_rfc3686)
10479 + enckeylen -= CTR_RFC3686_NONCE_SIZE;
10480 +
10481 + if (adata->key_inline)
10482 + append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10483 + adata->keylen, CLASS_2 |
10484 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10485 + else
10486 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10487 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10488 +
10489 + if (cdata->key_inline)
10490 + append_key_as_imm(desc, cdata->key_virt, enckeylen,
10491 + enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
10492 + else
10493 + append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
10494 + KEY_DEST_CLASS_REG);
10495 +
10496 + /* Load Counter into CONTEXT1 reg */
10497 + if (is_rfc3686) {
10498 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
10499 + LDST_CLASS_IND_CCB |
10500 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
10501 + append_move(desc,
10502 + MOVE_SRC_OUTFIFO |
10503 + MOVE_DEST_CLASS1CTX |
10504 + (16 << MOVE_OFFSET_SHIFT) |
10505 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
10506 + }
10507 +
10508 + set_jump_tgt_here(desc, key_jump_cmd);
10509 +}
10510 +
10511 +/**
10512 + * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
10513 + * (non-protocol).
10514 + * @desc: pointer to buffer used for descriptor construction
10515 + * @cdata: pointer to block cipher transform definitions
10516 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10517 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10518 + * @adata: pointer to authentication transform definitions. Note that since a
10519 + * split key is to be used, the size of the split key itself is
10520 + * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10521 + * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10522 + * @ivsize: initialization vector size
10523 + * @icvsize: integrity check value (ICV) size (truncated or full)
10524 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10525 + * @nonce: pointer to rfc3686 nonce
10526 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10527 + * @is_qi: true when called from caam/qi
10528 + *
10529 + * Note: Requires an MDHA split key.
10530 + */
10531 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
10532 + struct alginfo *adata, unsigned int ivsize,
10533 + unsigned int icvsize, const bool is_rfc3686,
10534 + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
10535 +{
10536 + /* Note: Context registers are saved. */
10537 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
10538 +
10539 + /* Class 2 operation */
10540 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10541 + OP_ALG_ENCRYPT);
10542 +
10543 + if (is_qi) {
10544 + u32 *wait_load_cmd;
10545 +
10546 + /* REG3 = assoclen */
10547 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10548 + LDST_SRCDST_WORD_DECO_MATH3 |
10549 + (4 << LDST_OFFSET_SHIFT));
10550 +
10551 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10552 + JUMP_COND_CALM | JUMP_COND_NCP |
10553 + JUMP_COND_NOP | JUMP_COND_NIP |
10554 + JUMP_COND_NIFP);
10555 + set_jump_tgt_here(desc, wait_load_cmd);
10556 +
10557 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10558 + LDST_SRCDST_BYTE_CONTEXT |
10559 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10560 + }
10561 +
10562 + /* Read and write assoclen bytes */
10563 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10564 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10565 +
10566 + /* Skip assoc data */
10567 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10568 +
10569 + /* read assoc before reading payload */
10570 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10571 + FIFOLDST_VLF);
10572 +
10573 + /* Load Counter into CONTEXT1 reg */
10574 + if (is_rfc3686)
10575 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10576 + LDST_SRCDST_BYTE_CONTEXT |
10577 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10578 + LDST_OFFSET_SHIFT));
10579 +
10580 + /* Class 1 operation */
10581 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10582 + OP_ALG_ENCRYPT);
10583 +
10584 + /* Read and write cryptlen bytes */
10585 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10586 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10587 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
10588 +
10589 + /* Write ICV */
10590 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10591 + LDST_SRCDST_BYTE_CONTEXT);
10592 +
10593 +#ifdef DEBUG
10594 + print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
10595 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10596 +#endif
10597 +}
10598 +EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
10599 +
10600 +/**
10601 + * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
10602 + * (non-protocol).
10603 + * @desc: pointer to buffer used for descriptor construction
10604 + * @cdata: pointer to block cipher transform definitions
10605 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10606 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10607 + * @adata: pointer to authentication transform definitions. Note that since a
10608 + * split key is to be used, the size of the split key itself is
10609 + * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10610 + * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10611 + * @ivsize: initialization vector size
10612 + * @icvsize: integrity check value (ICV) size (truncated or full)
10613 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10614 + * @nonce: pointer to rfc3686 nonce
10615 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10616 + * @is_qi: true when called from caam/qi
10617 + *
10618 + * Note: Requires an MDHA split key.
10619 + */
10620 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
10621 + struct alginfo *adata, unsigned int ivsize,
10622 + unsigned int icvsize, const bool geniv,
10623 + const bool is_rfc3686, u32 *nonce,
10624 + const u32 ctx1_iv_off, const bool is_qi)
10625 +{
10626 + /* Note: Context registers are saved. */
10627 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
10628 +
10629 + /* Class 2 operation */
10630 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10631 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10632 +
10633 + if (is_qi) {
10634 + u32 *wait_load_cmd;
10635 +
10636 + /* REG3 = assoclen */
10637 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10638 + LDST_SRCDST_WORD_DECO_MATH3 |
10639 + (4 << LDST_OFFSET_SHIFT));
10640 +
10641 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10642 + JUMP_COND_CALM | JUMP_COND_NCP |
10643 + JUMP_COND_NOP | JUMP_COND_NIP |
10644 + JUMP_COND_NIFP);
10645 + set_jump_tgt_here(desc, wait_load_cmd);
10646 +
10647 + if (!geniv)
10648 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10649 + LDST_SRCDST_BYTE_CONTEXT |
10650 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10651 + }
10652 +
10653 + /* Read and write assoclen bytes */
10654 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10655 + if (geniv)
10656 + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
10657 + else
10658 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10659 +
10660 + /* Skip assoc data */
10661 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10662 +
10663 + /* read assoc before reading payload */
10664 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10665 + KEY_VLF);
10666 +
10667 + if (geniv) {
10668 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10669 + LDST_SRCDST_BYTE_CONTEXT |
10670 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10671 + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
10672 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
10673 + }
10674 +
10675 + /* Load Counter into CONTEXT1 reg */
10676 + if (is_rfc3686)
10677 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10678 + LDST_SRCDST_BYTE_CONTEXT |
10679 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10680 + LDST_OFFSET_SHIFT));
10681 +
10682 + /* Choose operation */
10683 + if (ctx1_iv_off)
10684 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10685 + OP_ALG_DECRYPT);
10686 + else
10687 + append_dec_op1(desc, cdata->algtype);
10688 +
10689 + /* Read and write cryptlen bytes */
10690 + append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10691 + append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10692 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
10693 +
10694 + /* Load ICV */
10695 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10696 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10697 +
10698 +#ifdef DEBUG
10699 + print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
10700 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10701 +#endif
10702 +}
10703 +EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
10704 +
10705 +/**
10706 + * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
10707 + * (non-protocol) with HW-generated initialization
10708 + * vector.
10709 + * @desc: pointer to buffer used for descriptor construction
10710 + * @cdata: pointer to block cipher transform definitions
10711 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10712 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10713 + * @adata: pointer to authentication transform definitions. Note that since a
10714 + * split key is to be used, the size of the split key itself is
10715 + * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10716 + * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10717 + * @ivsize: initialization vector size
10718 + * @icvsize: integrity check value (ICV) size (truncated or full)
10719 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10720 + * @nonce: pointer to rfc3686 nonce
10721 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10722 + * @is_qi: true when called from caam/qi
10723 + *
10724 + * Note: Requires an MDHA split key.
10725 + */
10726 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
10727 + struct alginfo *adata, unsigned int ivsize,
10728 + unsigned int icvsize, const bool is_rfc3686,
10729 + u32 *nonce, const u32 ctx1_iv_off,
10730 + const bool is_qi)
10731 +{
10732 + u32 geniv, moveiv;
10733 +
10734 + /* Note: Context registers are saved. */
10735 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
10736 +
10737 + if (is_qi) {
10738 + u32 *wait_load_cmd;
10739 +
10740 + /* REG3 = assoclen */
10741 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10742 + LDST_SRCDST_WORD_DECO_MATH3 |
10743 + (4 << LDST_OFFSET_SHIFT));
10744 +
10745 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10746 + JUMP_COND_CALM | JUMP_COND_NCP |
10747 + JUMP_COND_NOP | JUMP_COND_NIP |
10748 + JUMP_COND_NIFP);
10749 + set_jump_tgt_here(desc, wait_load_cmd);
10750 + }
10751 +
10752 + if (is_rfc3686) {
10753 + if (is_qi)
10754 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10755 + LDST_SRCDST_BYTE_CONTEXT |
10756 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10757 +
10758 + goto copy_iv;
10759 + }
10760 +
10761 + /* Generate IV */
10762 + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
10763 + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
10764 + NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10765 + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
10766 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10767 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10768 + append_move(desc, MOVE_WAITCOMP |
10769 + MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
10770 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10771 + (ivsize << MOVE_LEN_SHIFT));
10772 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10773 +
10774 +copy_iv:
10775 + /* Copy IV to class 1 context */
10776 + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
10777 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10778 + (ivsize << MOVE_LEN_SHIFT));
10779 +
10780 + /* Return to encryption */
10781 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10782 + OP_ALG_ENCRYPT);
10783 +
10784 + /* Read and write assoclen bytes */
10785 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10786 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10787 +
10788 + /* Skip assoc data */
10789 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10790 +
10791 + /* read assoc before reading payload */
10792 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10793 + KEY_VLF);
10794 +
10795 + /* Copy iv from outfifo to class 2 fifo */
10796 + moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
10797 + NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10798 + append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
10799 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10800 + append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
10801 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
10802 +
10803 + /* Load Counter into CONTEXT1 reg */
10804 + if (is_rfc3686)
10805 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10806 + LDST_SRCDST_BYTE_CONTEXT |
10807 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10808 + LDST_OFFSET_SHIFT));
10809 +
10810 + /* Class 1 operation */
10811 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10812 + OP_ALG_ENCRYPT);
10813 +
10814 + /* Will write ivsize + cryptlen */
10815 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10816 +
10817 + /* Not need to reload iv */
10818 + append_seq_fifo_load(desc, ivsize,
10819 + FIFOLD_CLASS_SKIP);
10820 +
10821 + /* Will read cryptlen */
10822 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10823 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
10824 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
10825 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10826 +
10827 + /* Write ICV */
10828 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10829 + LDST_SRCDST_BYTE_CONTEXT);
10830 +
10831 +#ifdef DEBUG
10832 + print_hex_dump(KERN_ERR,
10833 + "aead givenc shdesc@" __stringify(__LINE__)": ",
10834 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10835 +#endif
10836 +}
10837 +EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
10838 +
10839 +/**
10840 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
10841 + * @desc: pointer to buffer used for descriptor construction
10842 + * @cdata: pointer to block cipher transform definitions
10843 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
10844 + * with OP_ALG_AAI_CBC
10845 + * @adata: pointer to authentication transform definitions. Note that since a
10846 + * split key is to be used, the size of the split key itself is
10847 + * specified. Valid algorithm values OP_ALG_ALGSEL_SHA1 ANDed with
10848 + * OP_ALG_AAI_HMAC_PRECOMP.
10849 + * @assoclen: associated data length
10850 + * @ivsize: initialization vector size
10851 + * @authsize: authentication data size
10852 + * @blocksize: block cipher size
10853 + */
10854 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
10855 + struct alginfo *adata, unsigned int assoclen,
10856 + unsigned int ivsize, unsigned int authsize,
10857 + unsigned int blocksize)
10858 +{
10859 + u32 *key_jump_cmd, *zero_payload_jump_cmd;
10860 + u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
10861 +
10862 + /*
10863 + * Compute the index (in bytes) for the LOAD with destination of
10864 + * Class 1 Data Size Register and for the LOAD that generates padding
10865 + */
10866 + if (adata->key_inline) {
10867 + idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10868 + cdata->keylen - 4 * CAAM_CMD_SZ;
10869 + idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10870 + cdata->keylen - 2 * CAAM_CMD_SZ;
10871 + } else {
10872 + idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
10873 + 4 * CAAM_CMD_SZ;
10874 + idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
10875 + 2 * CAAM_CMD_SZ;
10876 + }
10877 +
10878 + stidx = 1 << HDR_START_IDX_SHIFT;
10879 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
10880 +
10881 + /* skip key loading if they are loaded due to sharing */
10882 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10883 + JUMP_COND_SHRD);
10884 +
10885 + if (adata->key_inline)
10886 + append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10887 + adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
10888 + KEY_ENC);
10889 + else
10890 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10891 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10892 +
10893 + if (cdata->key_inline)
10894 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
10895 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
10896 + else
10897 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
10898 + KEY_DEST_CLASS_REG);
10899 +
10900 + set_jump_tgt_here(desc, key_jump_cmd);
10901 +
10902 + /* class 2 operation */
10903 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10904 + OP_ALG_ENCRYPT);
10905 + /* class 1 operation */
10906 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10907 + OP_ALG_ENCRYPT);
10908 +
10909 + /* payloadlen = input data length - (assoclen + ivlen) */
10910 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
10911 +
10912 + /* math1 = payloadlen + icvlen */
10913 + append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
10914 +
10915 + /* padlen = block_size - math1 % block_size */
10916 + append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
10917 + append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
10918 +
10919 + /* cryptlen = payloadlen + icvlen + padlen */
10920 + append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
10921 +
10922 + /*
10923 + * update immediate data with the padding length value
10924 + * for the LOAD in the class 1 data size register.
10925 + */
10926 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
10927 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
10928 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
10929 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
10930 +
10931 + /* overwrite PL field for the padding iNFO FIFO entry */
10932 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
10933 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
10934 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
10935 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
10936 +
10937 + /* store encrypted payload, icv and padding */
10938 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
10939 +
10940 + /* if payload length is zero, jump to zero-payload commands */
10941 + append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
10942 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
10943 + JUMP_COND_MATH_Z);
10944 +
10945 + /* load iv in context1 */
10946 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
10947 + LDST_CLASS_1_CCB | ivsize);
10948 +
10949 + /* read assoc for authentication */
10950 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
10951 + FIFOLD_TYPE_MSG);
10952 + /* insnoop payload */
10953 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
10954 + FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
10955 +
10956 + /* jump the zero-payload commands */
10957 + append_jump(desc, JUMP_TEST_ALL | 3);
10958 +
10959 + /* zero-payload commands */
10960 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
10961 +
10962 + /* load iv in context1 */
10963 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
10964 + LDST_CLASS_1_CCB | ivsize);
10965 +
10966 + /* assoc data is the only data for authentication */
10967 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
10968 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
10969 +
10970 + /* send icv to encryption */
10971 + append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
10972 + authsize);
10973 +
10974 + /* update class 1 data size register with padding length */
10975 + append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
10976 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
10977 +
10978 + /* generate padding and send it to encryption */
10979 + genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
10980 + NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
10981 + append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
10982 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10983 +
10984 +#ifdef DEBUG
10985 + print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
10986 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
10987 + desc_bytes(desc), 1);
10988 +#endif
10989 +}
10990 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
10991 +
10992 +/**
10993 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
10994 + * @desc: pointer to buffer used for descriptor construction
10995 + * @cdata: pointer to block cipher transform definitions
10996 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
10997 + * with OP_ALG_AAI_CBC
10998 + * @adata: pointer to authentication transform definitions. Note that since a
10999 + * split key is to be used, the size of the split key itself is
11000 + * specified. Valid algorithm values OP_ALG_ALGSEL_ SHA1 ANDed with
11001 + * OP_ALG_AAI_HMAC_PRECOMP.
11002 + * @assoclen: associated data length
11003 + * @ivsize: initialization vector size
11004 + * @authsize: authentication data size
11005 + * @blocksize: block cipher size
11006 + */
11007 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
11008 + struct alginfo *adata, unsigned int assoclen,
11009 + unsigned int ivsize, unsigned int authsize,
11010 + unsigned int blocksize)
11011 +{
11012 + u32 stidx, jumpback;
11013 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
11014 + /*
11015 + * Pointer Size bool determines the size of address pointers.
11016 + * false - Pointers fit in one 32-bit word.
11017 + * true - Pointers fit in two 32-bit words.
11018 + */
11019 + static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
11020 +
11021 + stidx = 1 << HDR_START_IDX_SHIFT;
11022 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
11023 +
11024 + /* skip key loading if they are loaded due to sharing */
11025 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11026 + JUMP_COND_SHRD);
11027 +
11028 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
11029 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
11030 +
11031 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11032 + KEY_DEST_CLASS_REG);
11033 +
11034 + set_jump_tgt_here(desc, key_jump_cmd);
11035 +
11036 + /* class 2 operation */
11037 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
11038 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11039 + /* class 1 operation */
11040 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11041 + OP_ALG_DECRYPT);
11042 +
11043 + /* VSIL = input data length - 2 * block_size */
11044 + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
11045 + blocksize);
11046 +
11047 + /*
11048 + * payloadlen + icvlen + padlen = input data length - (assoclen +
11049 + * ivsize)
11050 + */
11051 + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
11052 +
11053 + /* skip data to the last but one cipher block */
11054 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
11055 +
11056 + /* load iv for the last cipher block */
11057 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
11058 + LDST_CLASS_1_CCB | ivsize);
11059 +
11060 + /* read last cipher block */
11061 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11062 + FIFOLD_TYPE_LAST1 | blocksize);
11063 +
11064 + /* move decrypted block into math0 and math1 */
11065 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
11066 + blocksize);
11067 +
11068 + /* reset AES CHA */
11069 + append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
11070 + LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
11071 +
11072 + /* rewind input sequence */
11073 + append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
11074 +
11075 + /* key1 is in decryption form */
11076 + append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
11077 + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
11078 +
11079 + /* load iv in context1 */
11080 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
11081 + LDST_SRCDST_WORD_CLASS_CTX | ivsize);
11082 +
11083 + /* read sequence number */
11084 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
11085 + /* load Type, Version and Len fields in math0 */
11086 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
11087 + LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
11088 +
11089 + /* compute (padlen - 1) */
11090 + append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
11091 +
11092 + /* math2 = icvlen + (padlen - 1) + 1 */
11093 + append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
11094 +
11095 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11096 +
11097 + /* VSOL = payloadlen + icvlen + padlen */
11098 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
11099 +
11100 +#ifdef __LITTLE_ENDIAN
11101 + append_moveb(desc, MOVE_WAITCOMP |
11102 + MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
11103 +#endif
11104 + /* update Len field */
11105 + append_math_sub(desc, REG0, REG0, REG2, 8);
11106 +
11107 + /* store decrypted payload, icv and padding */
11108 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
11109 +
11110 + /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
11111 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11112 +
11113 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11114 + JUMP_COND_MATH_Z);
11115 +
11116 + /* send Type, Version and Len(pre ICV) fields to authentication */
11117 + append_move(desc, MOVE_WAITCOMP |
11118 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11119 + (3 << MOVE_OFFSET_SHIFT) | 5);
11120 +
11121 + /* outsnooping payload */
11122 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
11123 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
11124 + FIFOLDST_VLF);
11125 + skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
11126 +
11127 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11128 + /* send Type, Version and Len(pre ICV) fields to authentication */
11129 + append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
11130 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11131 + (3 << MOVE_OFFSET_SHIFT) | 5);
11132 +
11133 + set_jump_tgt_here(desc, skip_zero_jump_cmd);
11134 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
11135 +
11136 + /* load icvlen and padlen */
11137 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11138 + FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
11139 +
11140 + /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
11141 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11142 +
11143 + /*
11144 + * Start a new input sequence using the SEQ OUT PTR command options,
11145 + * pointer and length used when the current output sequence was defined.
11146 + */
11147 + if (ps) {
11148 + /*
11149 + * Move the lower 32 bits of Shared Descriptor address, the
11150 + * SEQ OUT PTR command, Output Pointer (2 words) and
11151 + * Output Length into math registers.
11152 + */
11153 +#ifdef __LITTLE_ENDIAN
11154 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11155 + MOVE_DEST_MATH0 | (55 * 4 << MOVE_OFFSET_SHIFT) |
11156 + 20);
11157 +#else
11158 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11159 + MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
11160 + 20);
11161 +#endif
11162 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11163 + append_math_and_imm_u32(desc, REG0, REG0, IMM,
11164 + ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
11165 + /* Append a JUMP command after the copied fields */
11166 + jumpback = CMD_JUMP | (char)-9;
11167 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11168 + LDST_SRCDST_WORD_DECO_MATH2 |
11169 + (4 << LDST_OFFSET_SHIFT));
11170 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11171 + /* Move the updated fields back to the Job Descriptor */
11172 +#ifdef __LITTLE_ENDIAN
11173 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11174 + MOVE_DEST_DESCBUF | (55 * 4 << MOVE_OFFSET_SHIFT) |
11175 + 24);
11176 +#else
11177 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11178 + MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
11179 + 24);
11180 +#endif
11181 + /*
11182 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
11183 + * and then jump back to the next command from the
11184 + * Shared Descriptor.
11185 + */
11186 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
11187 + } else {
11188 + /*
11189 + * Move the SEQ OUT PTR command, Output Pointer (1 word) and
11190 + * Output Length into math registers.
11191 + */
11192 +#ifdef __LITTLE_ENDIAN
11193 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11194 + MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
11195 + 12);
11196 +#else
11197 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11198 + MOVE_DEST_MATH0 | (53 * 4 << MOVE_OFFSET_SHIFT) |
11199 + 12);
11200 +#endif
11201 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11202 + append_math_and_imm_u64(desc, REG0, REG0, IMM,
11203 + ~(((u64)(CMD_SEQ_IN_PTR ^
11204 + CMD_SEQ_OUT_PTR)) << 32));
11205 + /* Append a JUMP command after the copied fields */
11206 + jumpback = CMD_JUMP | (char)-7;
11207 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11208 + LDST_SRCDST_WORD_DECO_MATH1 |
11209 + (4 << LDST_OFFSET_SHIFT));
11210 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11211 + /* Move the updated fields back to the Job Descriptor */
11212 +#ifdef __LITTLE_ENDIAN
11213 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11214 + MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
11215 + 16);
11216 +#else
11217 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11218 + MOVE_DEST_DESCBUF | (53 * 4 << MOVE_OFFSET_SHIFT) |
11219 + 16);
11220 +#endif
11221 + /*
11222 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
11223 + * and then jump back to the next command from the
11224 + * Shared Descriptor.
11225 + */
11226 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
11227 + }
11228 +
11229 + /* skip payload */
11230 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
11231 + /* check icv */
11232 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
11233 + FIFOLD_TYPE_LAST2 | authsize);
11234 +
11235 +#ifdef DEBUG
11236 + print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
11237 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
11238 + desc_bytes(desc), 1);
11239 +#endif
11240 +}
11241 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
11242 +
11243 +/**
11244 + * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
11245 + * @desc: pointer to buffer used for descriptor construction
11246 + * @cdata: pointer to block cipher transform definitions
11247 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11248 + * @ivsize: initialization vector size
11249 + * @icvsize: integrity check value (ICV) size (truncated or full)
11250 + * @is_qi: true when called from caam/qi
11251 + */
11252 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
11253 + unsigned int ivsize, unsigned int icvsize,
11254 + const bool is_qi)
11255 +{
11256 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
11257 + *zero_assoc_jump_cmd2;
11258 +
11259 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11260 +
11261 + /* skip key loading if they are loaded due to sharing */
11262 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11263 + JUMP_COND_SHRD);
11264 + if (cdata->key_inline)
11265 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11266 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11267 + else
11268 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11269 + KEY_DEST_CLASS_REG);
11270 + set_jump_tgt_here(desc, key_jump_cmd);
11271 +
11272 + /* class 1 operation */
11273 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11274 + OP_ALG_ENCRYPT);
11275 +
11276 + if (is_qi) {
11277 + u32 *wait_load_cmd;
11278 +
11279 + /* REG3 = assoclen */
11280 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11281 + LDST_SRCDST_WORD_DECO_MATH3 |
11282 + (4 << LDST_OFFSET_SHIFT));
11283 +
11284 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11285 + JUMP_COND_CALM | JUMP_COND_NCP |
11286 + JUMP_COND_NOP | JUMP_COND_NIP |
11287 + JUMP_COND_NIFP);
11288 + set_jump_tgt_here(desc, wait_load_cmd);
11289 +
11290 + append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
11291 + ivsize);
11292 + } else {
11293 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
11294 + CAAM_CMD_SZ);
11295 + }
11296 +
11297 + /* if assoclen + cryptlen is ZERO, skip to ICV write */
11298 + zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
11299 + JUMP_COND_MATH_Z);
11300 +
11301 + if (is_qi)
11302 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11303 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11304 +
11305 + /* if assoclen is ZERO, skip reading the assoc data */
11306 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11307 + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11308 + JUMP_COND_MATH_Z);
11309 +
11310 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11311 +
11312 + /* skip assoc data */
11313 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11314 +
11315 + /* cryptlen = seqinlen - assoclen */
11316 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
11317 +
11318 + /* if cryptlen is ZERO jump to zero-payload commands */
11319 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11320 + JUMP_COND_MATH_Z);
11321 +
11322 + /* read assoc data */
11323 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11324 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11325 + set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11326 +
11327 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11328 +
11329 + /* write encrypted data */
11330 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11331 +
11332 + /* read payload data */
11333 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11334 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11335 +
11336 + /* jump to ICV writing */
11337 + if (is_qi)
11338 + append_jump(desc, JUMP_TEST_ALL | 4);
11339 + else
11340 + append_jump(desc, JUMP_TEST_ALL | 2);
11341 +
11342 + /* zero-payload commands */
11343 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11344 +
11345 + /* read assoc data */
11346 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11347 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
11348 + if (is_qi)
11349 + /* jump to ICV writing */
11350 + append_jump(desc, JUMP_TEST_ALL | 2);
11351 +
11352 + /* There is no input data */
11353 + set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
11354 +
11355 + if (is_qi)
11356 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11357 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
11358 + FIFOLD_TYPE_LAST1);
11359 +
11360 + /* write ICV */
11361 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11362 + LDST_SRCDST_BYTE_CONTEXT);
11363 +
11364 +#ifdef DEBUG
11365 + print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
11366 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11367 +#endif
11368 +}
11369 +EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
11370 +
11371 +/**
11372 + * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
11373 + * @desc: pointer to buffer used for descriptor construction
11374 + * @cdata: pointer to block cipher transform definitions
11375 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11376 + * @ivsize: initialization vector size
11377 + * @icvsize: integrity check value (ICV) size (truncated or full)
11378 + * @is_qi: true when called from caam/qi
11379 + */
11380 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
11381 + unsigned int ivsize, unsigned int icvsize,
11382 + const bool is_qi)
11383 +{
11384 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
11385 +
11386 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11387 +
11388 + /* skip key loading if they are loaded due to sharing */
11389 + key_jump_cmd = append_jump(desc, JUMP_JSL |
11390 + JUMP_TEST_ALL | JUMP_COND_SHRD);
11391 + if (cdata->key_inline)
11392 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11393 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11394 + else
11395 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11396 + KEY_DEST_CLASS_REG);
11397 + set_jump_tgt_here(desc, key_jump_cmd);
11398 +
11399 + /* class 1 operation */
11400 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11401 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11402 +
11403 + if (is_qi) {
11404 + u32 *wait_load_cmd;
11405 +
11406 + /* REG3 = assoclen */
11407 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11408 + LDST_SRCDST_WORD_DECO_MATH3 |
11409 + (4 << LDST_OFFSET_SHIFT));
11410 +
11411 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11412 + JUMP_COND_CALM | JUMP_COND_NCP |
11413 + JUMP_COND_NOP | JUMP_COND_NIP |
11414 + JUMP_COND_NIFP);
11415 + set_jump_tgt_here(desc, wait_load_cmd);
11416 +
11417 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11418 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11419 + }
11420 +
11421 + /* if assoclen is ZERO, skip reading the assoc data */
11422 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11423 + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11424 + JUMP_COND_MATH_Z);
11425 +
11426 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11427 +
11428 + /* skip assoc data */
11429 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11430 +
11431 + /* read assoc data */
11432 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11433 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11434 +
11435 + set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11436 +
11437 + /* cryptlen = seqoutlen - assoclen */
11438 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11439 +
11440 + /* jump to zero-payload command if cryptlen is zero */
11441 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11442 + JUMP_COND_MATH_Z);
11443 +
11444 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11445 +
11446 + /* store encrypted data */
11447 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11448 +
11449 + /* read payload data */
11450 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11451 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11452 +
11453 + /* zero-payload command */
11454 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11455 +
11456 + /* read ICV */
11457 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11458 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11459 +
11460 +#ifdef DEBUG
11461 + print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
11462 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11463 +#endif
11464 +}
11465 +EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
11466 +
11467 +/**
11468 + * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
11469 + * (non-protocol).
11470 + * @desc: pointer to buffer used for descriptor construction
11471 + * @cdata: pointer to block cipher transform definitions
11472 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11473 + * @ivsize: initialization vector size
11474 + * @icvsize: integrity check value (ICV) size (truncated or full)
11475 + * @is_qi: true when called from caam/qi
11476 + */
11477 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
11478 + unsigned int ivsize, unsigned int icvsize,
11479 + const bool is_qi)
11480 +{
11481 + u32 *key_jump_cmd;
11482 +
11483 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11484 +
11485 + /* Skip key loading if it is loaded due to sharing */
11486 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11487 + JUMP_COND_SHRD);
11488 + if (cdata->key_inline)
11489 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11490 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11491 + else
11492 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11493 + KEY_DEST_CLASS_REG);
11494 + set_jump_tgt_here(desc, key_jump_cmd);
11495 +
11496 + /* Class 1 operation */
11497 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11498 + OP_ALG_ENCRYPT);
11499 +
11500 + if (is_qi) {
11501 + u32 *wait_load_cmd;
11502 +
11503 + /* REG3 = assoclen */
11504 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11505 + LDST_SRCDST_WORD_DECO_MATH3 |
11506 + (4 << LDST_OFFSET_SHIFT));
11507 +
11508 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11509 + JUMP_COND_CALM | JUMP_COND_NCP |
11510 + JUMP_COND_NOP | JUMP_COND_NIP |
11511 + JUMP_COND_NIFP);
11512 + set_jump_tgt_here(desc, wait_load_cmd);
11513 +
11514 + /* Read salt and IV */
11515 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11516 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11517 + FIFOLD_TYPE_IV);
11518 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11519 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11520 + }
11521 +
11522 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11523 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11524 +
11525 + /* Read assoc data */
11526 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11527 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11528 +
11529 + /* Skip IV */
11530 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11531 +
11532 + /* Will read cryptlen bytes */
11533 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11534 +
11535 + /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11536 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11537 +
11538 + /* Skip assoc data */
11539 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11540 +
11541 + /* cryptlen = seqoutlen - assoclen */
11542 + append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
11543 +
11544 + /* Write encrypted data */
11545 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11546 +
11547 + /* Read payload data */
11548 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11549 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11550 +
11551 + /* Write ICV */
11552 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11553 + LDST_SRCDST_BYTE_CONTEXT);
11554 +
11555 +#ifdef DEBUG
11556 + print_hex_dump(KERN_ERR,
11557 + "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
11558 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11559 +#endif
11560 +}
11561 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
11562 +
11563 +/**
11564 + * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
11565 + * (non-protocol).
11566 + * @desc: pointer to buffer used for descriptor construction
11567 + * @cdata: pointer to block cipher transform definitions
11568 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11569 + * @ivsize: initialization vector size
11570 + * @icvsize: integrity check value (ICV) size (truncated or full)
11571 + * @is_qi: true when called from caam/qi
11572 + */
11573 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
11574 + unsigned int ivsize, unsigned int icvsize,
11575 + const bool is_qi)
11576 +{
11577 + u32 *key_jump_cmd;
11578 +
11579 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11580 +
11581 + /* Skip key loading if it is loaded due to sharing */
11582 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11583 + JUMP_COND_SHRD);
11584 + if (cdata->key_inline)
11585 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11586 + cdata->keylen, CLASS_1 |
11587 + KEY_DEST_CLASS_REG);
11588 + else
11589 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11590 + KEY_DEST_CLASS_REG);
11591 + set_jump_tgt_here(desc, key_jump_cmd);
11592 +
11593 + /* Class 1 operation */
11594 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11595 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11596 +
11597 + if (is_qi) {
11598 + u32 *wait_load_cmd;
11599 +
11600 + /* REG3 = assoclen */
11601 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11602 + LDST_SRCDST_WORD_DECO_MATH3 |
11603 + (4 << LDST_OFFSET_SHIFT));
11604 +
11605 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11606 + JUMP_COND_CALM | JUMP_COND_NCP |
11607 + JUMP_COND_NOP | JUMP_COND_NIP |
11608 + JUMP_COND_NIFP);
11609 + set_jump_tgt_here(desc, wait_load_cmd);
11610 +
11611 + /* Read salt and IV */
11612 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11613 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11614 + FIFOLD_TYPE_IV);
11615 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11616 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11617 + }
11618 +
11619 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11620 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11621 +
11622 + /* Read assoc data */
11623 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11624 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11625 +
11626 + /* Skip IV */
11627 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11628 +
11629 + /* Will read cryptlen bytes */
11630 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
11631 +
11632 + /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11633 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11634 +
11635 + /* Skip assoc data */
11636 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11637 +
11638 + /* Will write cryptlen bytes */
11639 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11640 +
11641 + /* Store payload data */
11642 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11643 +
11644 + /* Read encrypted data */
11645 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11646 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11647 +
11648 + /* Read ICV */
11649 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11650 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11651 +
11652 +#ifdef DEBUG
11653 + print_hex_dump(KERN_ERR,
11654 + "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
11655 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11656 +#endif
11657 +}
11658 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
11659 +
11660 +/**
11661 + * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
11662 + * (non-protocol).
11663 + * @desc: pointer to buffer used for descriptor construction
11664 + * @cdata: pointer to block cipher transform definitions
11665 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11666 + * @ivsize: initialization vector size
11667 + * @icvsize: integrity check value (ICV) size (truncated or full)
11668 + * @is_qi: true when called from caam/qi
11669 + */
11670 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
11671 + unsigned int ivsize, unsigned int icvsize,
11672 + const bool is_qi)
11673 +{
11674 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11675 +
11676 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11677 +
11678 + /* Skip key loading if it is loaded due to sharing */
11679 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11680 + JUMP_COND_SHRD);
11681 + if (cdata->key_inline)
11682 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11683 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11684 + else
11685 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11686 + KEY_DEST_CLASS_REG);
11687 + set_jump_tgt_here(desc, key_jump_cmd);
11688 +
11689 + /* Class 1 operation */
11690 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11691 + OP_ALG_ENCRYPT);
11692 +
11693 + if (is_qi) {
11694 + /* assoclen is not needed, skip it */
11695 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11696 +
11697 + /* Read salt and IV */
11698 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11699 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11700 + FIFOLD_TYPE_IV);
11701 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11702 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11703 + }
11704 +
11705 + /* assoclen + cryptlen = seqinlen */
11706 + append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
11707 +
11708 + /*
11709 + * MOVE_LEN opcode is not available in all SEC HW revisions,
11710 + * thus need to do some magic, i.e. self-patch the descriptor
11711 + * buffer.
11712 + */
11713 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11714 + (0x6 << MOVE_LEN_SHIFT));
11715 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11716 + (0x8 << MOVE_LEN_SHIFT));
11717 +
11718 + /* Will read assoclen + cryptlen bytes */
11719 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11720 +
11721 + /* Will write assoclen + cryptlen bytes */
11722 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11723 +
11724 + /* Read and write assoclen + cryptlen bytes */
11725 + aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
11726 +
11727 + set_move_tgt_here(desc, read_move_cmd);
11728 + set_move_tgt_here(desc, write_move_cmd);
11729 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11730 + /* Move payload data to OFIFO */
11731 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11732 +
11733 + /* Write ICV */
11734 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11735 + LDST_SRCDST_BYTE_CONTEXT);
11736 +
11737 +#ifdef DEBUG
11738 + print_hex_dump(KERN_ERR,
11739 + "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
11740 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11741 +#endif
11742 +}
11743 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
11744 +
11745 +/**
11746 + * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
11747 + * (non-protocol).
11748 + * @desc: pointer to buffer used for descriptor construction
11749 + * @cdata: pointer to block cipher transform definitions
11750 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11751 + * @ivsize: initialization vector size
11752 + * @icvsize: integrity check value (ICV) size (truncated or full)
11753 + * @is_qi: true when called from caam/qi
11754 + */
11755 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
11756 + unsigned int ivsize, unsigned int icvsize,
11757 + const bool is_qi)
11758 +{
11759 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11760 +
11761 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11762 +
11763 + /* Skip key loading if it is loaded due to sharing */
11764 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11765 + JUMP_COND_SHRD);
11766 + if (cdata->key_inline)
11767 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11768 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11769 + else
11770 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11771 + KEY_DEST_CLASS_REG);
11772 + set_jump_tgt_here(desc, key_jump_cmd);
11773 +
11774 + /* Class 1 operation */
11775 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11776 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11777 +
11778 + if (is_qi) {
11779 + /* assoclen is not needed, skip it */
11780 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11781 +
11782 + /* Read salt and IV */
11783 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11784 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11785 + FIFOLD_TYPE_IV);
11786 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11787 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11788 + }
11789 +
11790 + /* assoclen + cryptlen = seqoutlen */
11791 + append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11792 +
11793 + /*
11794 + * MOVE_LEN opcode is not available in all SEC HW revisions,
11795 + * thus need to do some magic, i.e. self-patch the descriptor
11796 + * buffer.
11797 + */
11798 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11799 + (0x6 << MOVE_LEN_SHIFT));
11800 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11801 + (0x8 << MOVE_LEN_SHIFT));
11802 +
11803 + /* Will read assoclen + cryptlen bytes */
11804 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11805 +
11806 + /* Will write assoclen + cryptlen bytes */
11807 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11808 +
11809 + /* Store payload data */
11810 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11811 +
11812 + /* In-snoop assoclen + cryptlen data */
11813 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
11814 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
11815 +
11816 + set_move_tgt_here(desc, read_move_cmd);
11817 + set_move_tgt_here(desc, write_move_cmd);
11818 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11819 + /* Move payload data to OFIFO */
11820 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11821 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
11822 +
11823 + /* Read ICV */
11824 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11825 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11826 +
11827 +#ifdef DEBUG
11828 + print_hex_dump(KERN_ERR,
11829 + "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
11830 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11831 +#endif
11832 +}
11833 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
11834 +
11835 +/*
11836 + * For ablkcipher encrypt and decrypt, read from req->src and
11837 + * write to req->dst
11838 + */
11839 +static inline void ablkcipher_append_src_dst(u32 *desc)
11840 +{
11841 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11842 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11843 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
11844 + KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11845 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
11846 +}
11847 +
11848 +/**
11849 + * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
11850 + * @desc: pointer to buffer used for descriptor construction
11851 + * @cdata: pointer to block cipher transform definitions
11852 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11853 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
11854 + * @ivsize: initialization vector size
11855 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11856 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11857 + */
11858 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
11859 + unsigned int ivsize, const bool is_rfc3686,
11860 + const u32 ctx1_iv_off)
11861 +{
11862 + u32 *key_jump_cmd;
11863 +
11864 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
11865 + /* Skip if already shared */
11866 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11867 + JUMP_COND_SHRD);
11868 +
11869 + /* Load class1 key only */
11870 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11871 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11872 +
11873 + /* Load nonce into CONTEXT1 reg */
11874 + if (is_rfc3686) {
11875 + u8 *nonce = cdata->key_virt + cdata->keylen;
11876 +
11877 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
11878 + LDST_CLASS_IND_CCB |
11879 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
11880 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
11881 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
11882 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
11883 + }
11884 +
11885 + set_jump_tgt_here(desc, key_jump_cmd);
11886 +
11887 + /* Load iv */
11888 + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
11889 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
11890 +
11891 + /* Load counter into CONTEXT1 reg */
11892 + if (is_rfc3686)
11893 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
11894 + LDST_SRCDST_BYTE_CONTEXT |
11895 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
11896 + LDST_OFFSET_SHIFT));
11897 +
11898 + /* Load operation */
11899 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11900 + OP_ALG_ENCRYPT);
11901 +
11902 + /* Perform operation */
11903 + ablkcipher_append_src_dst(desc);
11904 +
11905 +#ifdef DEBUG
11906 + print_hex_dump(KERN_ERR,
11907 + "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
11908 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11909 +#endif
11910 +}
11911 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
11912 +
11913 +/**
11914 + * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
11915 + * @desc: pointer to buffer used for descriptor construction
11916 + * @cdata: pointer to block cipher transform definitions
11917 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11918 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
11919 + * @ivsize: initialization vector size
11920 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11921 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11922 + */
11923 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
11924 + unsigned int ivsize, const bool is_rfc3686,
11925 + const u32 ctx1_iv_off)
11926 +{
11927 + u32 *key_jump_cmd;
11928 +
11929 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
11930 + /* Skip if already shared */
11931 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11932 + JUMP_COND_SHRD);
11933 +
11934 + /* Load class1 key only */
11935 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11936 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11937 +
11938 + /* Load nonce into CONTEXT1 reg */
11939 + if (is_rfc3686) {
11940 + u8 *nonce = cdata->key_virt + cdata->keylen;
11941 +
11942 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
11943 + LDST_CLASS_IND_CCB |
11944 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
11945 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
11946 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
11947 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
11948 + }
11949 +
11950 + set_jump_tgt_here(desc, key_jump_cmd);
11951 +
11952 + /* load IV */
11953 + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
11954 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
11955 +
11956 + /* Load counter into CONTEXT1 reg */
11957 + if (is_rfc3686)
11958 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
11959 + LDST_SRCDST_BYTE_CONTEXT |
11960 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
11961 + LDST_OFFSET_SHIFT));
11962 +
11963 + /* Choose operation */
11964 + if (ctx1_iv_off)
11965 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11966 + OP_ALG_DECRYPT);
11967 + else
11968 + append_dec_op1(desc, cdata->algtype);
11969 +
11970 + /* Perform operation */
11971 + ablkcipher_append_src_dst(desc);
11972 +
11973 +#ifdef DEBUG
11974 + print_hex_dump(KERN_ERR,
11975 + "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
11976 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11977 +#endif
11978 +}
11979 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
11980 +
11981 +/**
11982 + * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
11983 + * with HW-generated initialization vector.
11984 + * @desc: pointer to buffer used for descriptor construction
11985 + * @cdata: pointer to block cipher transform definitions
11986 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11987 + * with OP_ALG_AAI_CBC.
11988 + * @ivsize: initialization vector size
11989 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11990 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11991 + */
11992 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
11993 + unsigned int ivsize, const bool is_rfc3686,
11994 + const u32 ctx1_iv_off)
11995 +{
11996 + u32 *key_jump_cmd, geniv;
11997 +
11998 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
11999 + /* Skip if already shared */
12000 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12001 + JUMP_COND_SHRD);
12002 +
12003 + /* Load class1 key only */
12004 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12005 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12006 +
12007 + /* Load Nonce into CONTEXT1 reg */
12008 + if (is_rfc3686) {
12009 + u8 *nonce = cdata->key_virt + cdata->keylen;
12010 +
12011 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
12012 + LDST_CLASS_IND_CCB |
12013 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
12014 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
12015 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
12016 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
12017 + }
12018 + set_jump_tgt_here(desc, key_jump_cmd);
12019 +
12020 + /* Generate IV */
12021 + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
12022 + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
12023 + (ivsize << NFIFOENTRY_DLEN_SHIFT);
12024 + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
12025 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
12026 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
12027 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
12028 + MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
12029 + (ctx1_iv_off << MOVE_OFFSET_SHIFT));
12030 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
12031 +
12032 + /* Copy generated IV to memory */
12033 + append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
12034 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
12035 +
12036 + /* Load Counter into CONTEXT1 reg */
12037 + if (is_rfc3686)
12038 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
12039 + LDST_SRCDST_BYTE_CONTEXT |
12040 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
12041 + LDST_OFFSET_SHIFT));
12042 +
12043 + if (ctx1_iv_off)
12044 + append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
12045 + (1 << JUMP_OFFSET_SHIFT));
12046 +
12047 + /* Load operation */
12048 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12049 + OP_ALG_ENCRYPT);
12050 +
12051 + /* Perform operation */
12052 + ablkcipher_append_src_dst(desc);
12053 +
12054 +#ifdef DEBUG
12055 + print_hex_dump(KERN_ERR,
12056 + "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
12057 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12058 +#endif
12059 +}
12060 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
12061 +
12062 +/**
12063 + * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
12064 + * descriptor
12065 + * @desc: pointer to buffer used for descriptor construction
12066 + * @cdata: pointer to block cipher transform definitions
12067 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12068 + */
12069 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
12070 +{
12071 + __be64 sector_size = cpu_to_be64(512);
12072 + u32 *key_jump_cmd;
12073 +
12074 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12075 + /* Skip if already shared */
12076 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12077 + JUMP_COND_SHRD);
12078 +
12079 + /* Load class1 keys only */
12080 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12081 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12082 +
12083 + /* Load sector size with index 40 bytes (0x28) */
12084 + append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
12085 + LDST_SRCDST_BYTE_CONTEXT |
12086 + (0x28 << LDST_OFFSET_SHIFT));
12087 +
12088 + set_jump_tgt_here(desc, key_jump_cmd);
12089 +
12090 + /*
12091 + * create sequence for loading the sector index
12092 + * Upper 8B of IV - will be used as sector index
12093 + * Lower 8B of IV - will be discarded
12094 + */
12095 + append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12096 + (0x20 << LDST_OFFSET_SHIFT));
12097 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12098 +
12099 + /* Load operation */
12100 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12101 + OP_ALG_ENCRYPT);
12102 +
12103 + /* Perform operation */
12104 + ablkcipher_append_src_dst(desc);
12105 +
12106 +#ifdef DEBUG
12107 + print_hex_dump(KERN_ERR,
12108 + "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
12109 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12110 +#endif
12111 +}
12112 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
12113 +
12114 +/**
12115 + * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
12116 + * descriptor
12117 + * @desc: pointer to buffer used for descriptor construction
12118 + * @cdata: pointer to block cipher transform definitions
12119 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12120 + */
12121 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
12122 +{
12123 + __be64 sector_size = cpu_to_be64(512);
12124 + u32 *key_jump_cmd;
12125 +
12126 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12127 + /* Skip if already shared */
12128 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12129 + JUMP_COND_SHRD);
12130 +
12131 + /* Load class1 key only */
12132 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12133 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12134 +
12135 + /* Load sector size with index 40 bytes (0x28) */
12136 + append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
12137 + LDST_SRCDST_BYTE_CONTEXT |
12138 + (0x28 << LDST_OFFSET_SHIFT));
12139 +
12140 + set_jump_tgt_here(desc, key_jump_cmd);
12141 +
12142 + /*
12143 + * create sequence for loading the sector index
12144 + * Upper 8B of IV - will be used as sector index
12145 + * Lower 8B of IV - will be discarded
12146 + */
12147 + append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12148 + (0x20 << LDST_OFFSET_SHIFT));
12149 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12150 +
12151 + /* Load operation */
12152 + append_dec_op1(desc, cdata->algtype);
12153 +
12154 + /* Perform operation */
12155 + ablkcipher_append_src_dst(desc);
12156 +
12157 +#ifdef DEBUG
12158 + print_hex_dump(KERN_ERR,
12159 + "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
12160 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12161 +#endif
12162 +}
12163 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
12164 +
12165 +MODULE_LICENSE("GPL");
12166 +MODULE_DESCRIPTION("FSL CAAM descriptor support");
12167 +MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
12168 diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
12169 new file mode 100644
12170 index 00000000..6b436f65
12171 --- /dev/null
12172 +++ b/drivers/crypto/caam/caamalg_desc.h
12173 @@ -0,0 +1,127 @@
12174 +/*
12175 + * Shared descriptors for aead, ablkcipher algorithms
12176 + *
12177 + * Copyright 2016 NXP
12178 + */
12179 +
12180 +#ifndef _CAAMALG_DESC_H_
12181 +#define _CAAMALG_DESC_H_
12182 +
12183 +/* length of descriptors text */
12184 +#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
12185 +#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
12186 +#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
12187 +#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
12188 +#define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
12189 +#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
12190 +#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
12191 +
12192 +#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
12193 +#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
12194 +
12195 +/* Note: Nonce is counted in cdata.keylen */
12196 +#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
12197 +
12198 +#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
12199 +#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
12200 +#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
12201 +
12202 +#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
12203 +#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
12204 +#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
12205 +#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
12206 +#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
12207 +
12208 +#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
12209 +#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12210 +#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12211 +#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
12212 +#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
12213 +
12214 +#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
12215 +#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
12216 +#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
12217 +#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
12218 +#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
12219 +
12220 +#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
12221 +#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
12222 + 20 * CAAM_CMD_SZ)
12223 +#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
12224 + 15 * CAAM_CMD_SZ)
12225 +
12226 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
12227 + unsigned int icvsize);
12228 +
12229 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
12230 + unsigned int icvsize);
12231 +
12232 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
12233 + struct alginfo *adata, unsigned int ivsize,
12234 + unsigned int icvsize, const bool is_rfc3686,
12235 + u32 *nonce, const u32 ctx1_iv_off,
12236 + const bool is_qi);
12237 +
12238 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
12239 + struct alginfo *adata, unsigned int ivsize,
12240 + unsigned int icvsize, const bool geniv,
12241 + const bool is_rfc3686, u32 *nonce,
12242 + const u32 ctx1_iv_off, const bool is_qi);
12243 +
12244 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
12245 + struct alginfo *adata, unsigned int ivsize,
12246 + unsigned int icvsize, const bool is_rfc3686,
12247 + u32 *nonce, const u32 ctx1_iv_off,
12248 + const bool is_qi);
12249 +
12250 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
12251 + struct alginfo *adata, unsigned int assoclen,
12252 + unsigned int ivsize, unsigned int authsize,
12253 + unsigned int blocksize);
12254 +
12255 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
12256 + struct alginfo *adata, unsigned int assoclen,
12257 + unsigned int ivsize, unsigned int authsize,
12258 + unsigned int blocksize);
12259 +
12260 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
12261 + unsigned int ivsize, unsigned int icvsize,
12262 + const bool is_qi);
12263 +
12264 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
12265 + unsigned int ivsize, unsigned int icvsize,
12266 + const bool is_qi);
12267 +
12268 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
12269 + unsigned int ivsize, unsigned int icvsize,
12270 + const bool is_qi);
12271 +
12272 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
12273 + unsigned int ivsize, unsigned int icvsize,
12274 + const bool is_qi);
12275 +
12276 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
12277 + unsigned int ivsize, unsigned int icvsize,
12278 + const bool is_qi);
12279 +
12280 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
12281 + unsigned int ivsize, unsigned int icvsize,
12282 + const bool is_qi);
12283 +
12284 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
12285 + unsigned int ivsize, const bool is_rfc3686,
12286 + const u32 ctx1_iv_off);
12287 +
12288 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
12289 + unsigned int ivsize, const bool is_rfc3686,
12290 + const u32 ctx1_iv_off);
12291 +
12292 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
12293 + unsigned int ivsize, const bool is_rfc3686,
12294 + const u32 ctx1_iv_off);
12295 +
12296 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
12297 +
12298 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
12299 +
12300 +#endif /* _CAAMALG_DESC_H_ */
12301 diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
12302 new file mode 100644
12303 index 00000000..d6a9b0c5
12304 --- /dev/null
12305 +++ b/drivers/crypto/caam/caamalg_qi.c
12306 @@ -0,0 +1,2877 @@
12307 +/*
12308 + * Freescale FSL CAAM support for crypto API over QI backend.
12309 + * Based on caamalg.c
12310 + *
12311 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
12312 + * Copyright 2016-2017 NXP
12313 + */
12314 +
12315 +#include "compat.h"
12316 +#include "ctrl.h"
12317 +#include "regs.h"
12318 +#include "intern.h"
12319 +#include "desc_constr.h"
12320 +#include "error.h"
12321 +#include "sg_sw_qm.h"
12322 +#include "key_gen.h"
12323 +#include "qi.h"
12324 +#include "jr.h"
12325 +#include "caamalg_desc.h"
12326 +
12327 +/*
12328 + * crypto alg
12329 + */
12330 +#define CAAM_CRA_PRIORITY 2000
12331 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
12332 +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
12333 + SHA512_DIGEST_SIZE * 2)
12334 +
12335 +#define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
12336 + CAAM_MAX_KEY_SIZE)
12337 +#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
12338 +
12339 +struct caam_alg_entry {
12340 + int class1_alg_type;
12341 + int class2_alg_type;
12342 + bool rfc3686;
12343 + bool geniv;
12344 +};
12345 +
12346 +struct caam_aead_alg {
12347 + struct aead_alg aead;
12348 + struct caam_alg_entry caam;
12349 + bool registered;
12350 +};
12351 +
12352 +/*
12353 + * per-session context
12354 + */
12355 +struct caam_ctx {
12356 + struct device *jrdev;
12357 + u32 sh_desc_enc[DESC_MAX_USED_LEN];
12358 + u32 sh_desc_dec[DESC_MAX_USED_LEN];
12359 + u32 sh_desc_givenc[DESC_MAX_USED_LEN];
12360 + u8 key[CAAM_MAX_KEY_SIZE];
12361 + dma_addr_t key_dma;
12362 + struct alginfo adata;
12363 + struct alginfo cdata;
12364 + unsigned int authsize;
12365 + struct device *qidev;
12366 + spinlock_t lock; /* Protects multiple init of driver context */
12367 + struct caam_drv_ctx *drv_ctx[NUM_OP];
12368 +};
12369 +
12370 +static int aead_set_sh_desc(struct crypto_aead *aead)
12371 +{
12372 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
12373 + typeof(*alg), aead);
12374 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
12375 + unsigned int ivsize = crypto_aead_ivsize(aead);
12376 + u32 ctx1_iv_off = 0;
12377 + u32 *nonce = NULL;
12378 + unsigned int data_len[2];
12379 + u32 inl_mask;
12380 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
12381 + OP_ALG_AAI_CTR_MOD128);
12382 + const bool is_rfc3686 = alg->caam.rfc3686;
12383 +
12384 + if (!ctx->cdata.keylen || !ctx->authsize)
12385 + return 0;
12386 +
12387 + /*
12388 + * AES-CTR needs to load IV in CONTEXT1 reg
12389 + * at an offset of 128bits (16bytes)
12390 + * CONTEXT1[255:128] = IV
12391 + */
12392 + if (ctr_mode)
12393 + ctx1_iv_off = 16;
12394 +
12395 + /*
12396 + * RFC3686 specific:
12397 + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
12398 + */
12399 + if (is_rfc3686) {
12400 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
12401 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
12402 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
12403 + }
12404 +
12405 + data_len[0] = ctx->adata.keylen_pad;
12406 + data_len[1] = ctx->cdata.keylen;
12407 +
12408 + if (alg->caam.geniv)
12409 + goto skip_enc;
12410 +
12411 + /* aead_encrypt shared descriptor */
12412 + if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
12413 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12414 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12415 + ARRAY_SIZE(data_len)) < 0)
12416 + return -EINVAL;
12417 +
12418 + if (inl_mask & 1)
12419 + ctx->adata.key_virt = ctx->key;
12420 + else
12421 + ctx->adata.key_dma = ctx->key_dma;
12422 +
12423 + if (inl_mask & 2)
12424 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12425 + else
12426 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12427 +
12428 + ctx->adata.key_inline = !!(inl_mask & 1);
12429 + ctx->cdata.key_inline = !!(inl_mask & 2);
12430 +
12431 + cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12432 + ivsize, ctx->authsize, is_rfc3686, nonce,
12433 + ctx1_iv_off, true);
12434 +
12435 +skip_enc:
12436 + /* aead_decrypt shared descriptor */
12437 + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
12438 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12439 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12440 + ARRAY_SIZE(data_len)) < 0)
12441 + return -EINVAL;
12442 +
12443 + if (inl_mask & 1)
12444 + ctx->adata.key_virt = ctx->key;
12445 + else
12446 + ctx->adata.key_dma = ctx->key_dma;
12447 +
12448 + if (inl_mask & 2)
12449 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12450 + else
12451 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12452 +
12453 + ctx->adata.key_inline = !!(inl_mask & 1);
12454 + ctx->cdata.key_inline = !!(inl_mask & 2);
12455 +
12456 + cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12457 + ivsize, ctx->authsize, alg->caam.geniv,
12458 + is_rfc3686, nonce, ctx1_iv_off, true);
12459 +
12460 + if (!alg->caam.geniv)
12461 + goto skip_givenc;
12462 +
12463 + /* aead_givencrypt shared descriptor */
12464 + if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
12465 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12466 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12467 + ARRAY_SIZE(data_len)) < 0)
12468 + return -EINVAL;
12469 +
12470 + if (inl_mask & 1)
12471 + ctx->adata.key_virt = ctx->key;
12472 + else
12473 + ctx->adata.key_dma = ctx->key_dma;
12474 +
12475 + if (inl_mask & 2)
12476 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12477 + else
12478 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12479 +
12480 + ctx->adata.key_inline = !!(inl_mask & 1);
12481 + ctx->cdata.key_inline = !!(inl_mask & 2);
12482 +
12483 + cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12484 + ivsize, ctx->authsize, is_rfc3686, nonce,
12485 + ctx1_iv_off, true);
12486 +
12487 +skip_givenc:
12488 + return 0;
12489 +}
12490 +
12491 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
12492 +{
12493 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
12494 +
12495 + ctx->authsize = authsize;
12496 + aead_set_sh_desc(authenc);
12497 +
12498 + return 0;
12499 +}
12500 +
12501 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
12502 + unsigned int keylen)
12503 +{
12504 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
12505 + struct device *jrdev = ctx->jrdev;
12506 + struct crypto_authenc_keys keys;
12507 + int ret = 0;
12508 +
12509 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12510 + goto badkey;
12511 +
12512 +#ifdef DEBUG
12513 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12514 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
12515 + keys.authkeylen);
12516 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12517 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12518 +#endif
12519 +
12520 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12521 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
12522 + keys.enckeylen);
12523 + if (ret)
12524 + goto badkey;
12525 +
12526 + /* postpend encryption key to auth split key */
12527 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12528 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12529 + keys.enckeylen, DMA_TO_DEVICE);
12530 +#ifdef DEBUG
12531 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12532 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12533 + ctx->adata.keylen_pad + keys.enckeylen, 1);
12534 +#endif
12535 +
12536 + ctx->cdata.keylen = keys.enckeylen;
12537 +
12538 + ret = aead_set_sh_desc(aead);
12539 + if (ret)
12540 + goto badkey;
12541 +
12542 + /* Now update the driver contexts with the new shared descriptor */
12543 + if (ctx->drv_ctx[ENCRYPT]) {
12544 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12545 + ctx->sh_desc_enc);
12546 + if (ret) {
12547 + dev_err(jrdev, "driver enc context update failed\n");
12548 + goto badkey;
12549 + }
12550 + }
12551 +
12552 + if (ctx->drv_ctx[DECRYPT]) {
12553 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12554 + ctx->sh_desc_dec);
12555 + if (ret) {
12556 + dev_err(jrdev, "driver dec context update failed\n");
12557 + goto badkey;
12558 + }
12559 + }
12560 +
12561 + return ret;
12562 +badkey:
12563 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
12564 + return -EINVAL;
12565 +}
12566 +
12567 +static int tls_set_sh_desc(struct crypto_aead *tls)
12568 +{
12569 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12570 + unsigned int ivsize = crypto_aead_ivsize(tls);
12571 + unsigned int blocksize = crypto_aead_blocksize(tls);
12572 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
12573 + unsigned int data_len[2];
12574 + u32 inl_mask;
12575 +
12576 + if (!ctx->cdata.keylen || !ctx->authsize)
12577 + return 0;
12578 +
12579 + /*
12580 + * TLS 1.0 encrypt shared descriptor
12581 + * Job Descriptor and Shared Descriptor
12582 + * must fit into the 64-word Descriptor h/w Buffer
12583 + */
12584 + data_len[0] = ctx->adata.keylen_pad;
12585 + data_len[1] = ctx->cdata.keylen;
12586 +
12587 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
12588 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
12589 + return -EINVAL;
12590 +
12591 + if (inl_mask & 1)
12592 + ctx->adata.key_virt = ctx->key;
12593 + else
12594 + ctx->adata.key_dma = ctx->key_dma;
12595 +
12596 + if (inl_mask & 2)
12597 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12598 + else
12599 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12600 +
12601 + ctx->adata.key_inline = !!(inl_mask & 1);
12602 + ctx->cdata.key_inline = !!(inl_mask & 2);
12603 +
12604 + cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12605 + assoclen, ivsize, ctx->authsize, blocksize);
12606 +
12607 + /*
12608 + * TLS 1.0 decrypt shared descriptor
12609 + * Keys do not fit inline, regardless of algorithms used
12610 + */
12611 + ctx->adata.key_dma = ctx->key_dma;
12612 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12613 +
12614 + cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12615 + assoclen, ivsize, ctx->authsize, blocksize);
12616 +
12617 + return 0;
12618 +}
12619 +
12620 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
12621 +{
12622 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12623 +
12624 + ctx->authsize = authsize;
12625 + tls_set_sh_desc(tls);
12626 +
12627 + return 0;
12628 +}
12629 +
12630 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
12631 + unsigned int keylen)
12632 +{
12633 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12634 + struct device *jrdev = ctx->jrdev;
12635 + struct crypto_authenc_keys keys;
12636 + int ret = 0;
12637 +
12638 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12639 + goto badkey;
12640 +
12641 +#ifdef DEBUG
12642 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12643 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
12644 + keys.authkeylen);
12645 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12646 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12647 +#endif
12648 +
12649 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12650 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
12651 + keys.enckeylen);
12652 + if (ret)
12653 + goto badkey;
12654 +
12655 + /* postpend encryption key to auth split key */
12656 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12657 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12658 + keys.enckeylen, DMA_TO_DEVICE);
12659 +
12660 +#ifdef DEBUG
12661 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
12662 + ctx->adata.keylen, ctx->adata.keylen_pad);
12663 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12664 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12665 + ctx->adata.keylen_pad + keys.enckeylen, 1);
12666 +#endif
12667 +
12668 + ctx->cdata.keylen = keys.enckeylen;
12669 +
12670 + ret = tls_set_sh_desc(tls);
12671 + if (ret)
12672 + goto badkey;
12673 +
12674 + /* Now update the driver contexts with the new shared descriptor */
12675 + if (ctx->drv_ctx[ENCRYPT]) {
12676 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12677 + ctx->sh_desc_enc);
12678 + if (ret) {
12679 + dev_err(jrdev, "driver enc context update failed\n");
12680 + goto badkey;
12681 + }
12682 + }
12683 +
12684 + if (ctx->drv_ctx[DECRYPT]) {
12685 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12686 + ctx->sh_desc_dec);
12687 + if (ret) {
12688 + dev_err(jrdev, "driver dec context update failed\n");
12689 + goto badkey;
12690 + }
12691 + }
12692 +
12693 + return ret;
12694 +badkey:
12695 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
12696 + return -EINVAL;
12697 +}
12698 +
12699 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
12700 + const u8 *key, unsigned int keylen)
12701 +{
12702 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
12703 + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
12704 + const char *alg_name = crypto_tfm_alg_name(tfm);
12705 + struct device *jrdev = ctx->jrdev;
12706 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
12707 + u32 ctx1_iv_off = 0;
12708 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
12709 + OP_ALG_AAI_CTR_MOD128);
12710 + const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
12711 + int ret = 0;
12712 +
12713 + memcpy(ctx->key, key, keylen);
12714 +#ifdef DEBUG
12715 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12716 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12717 +#endif
12718 + /*
12719 + * AES-CTR needs to load IV in CONTEXT1 reg
12720 + * at an offset of 128bits (16bytes)
12721 + * CONTEXT1[255:128] = IV
12722 + */
12723 + if (ctr_mode)
12724 + ctx1_iv_off = 16;
12725 +
12726 + /*
12727 + * RFC3686 specific:
12728 + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
12729 + * | *key = {KEY, NONCE}
12730 + */
12731 + if (is_rfc3686) {
12732 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
12733 + keylen -= CTR_RFC3686_NONCE_SIZE;
12734 + }
12735 +
12736 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
12737 + ctx->cdata.keylen = keylen;
12738 + ctx->cdata.key_virt = ctx->key;
12739 + ctx->cdata.key_inline = true;
12740 +
12741 + /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
12742 + cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
12743 + is_rfc3686, ctx1_iv_off);
12744 + cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
12745 + is_rfc3686, ctx1_iv_off);
12746 + cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
12747 + ivsize, is_rfc3686, ctx1_iv_off);
12748 +
12749 + /* Now update the driver contexts with the new shared descriptor */
12750 + if (ctx->drv_ctx[ENCRYPT]) {
12751 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12752 + ctx->sh_desc_enc);
12753 + if (ret) {
12754 + dev_err(jrdev, "driver enc context update failed\n");
12755 + goto badkey;
12756 + }
12757 + }
12758 +
12759 + if (ctx->drv_ctx[DECRYPT]) {
12760 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12761 + ctx->sh_desc_dec);
12762 + if (ret) {
12763 + dev_err(jrdev, "driver dec context update failed\n");
12764 + goto badkey;
12765 + }
12766 + }
12767 +
12768 + if (ctx->drv_ctx[GIVENCRYPT]) {
12769 + ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
12770 + ctx->sh_desc_givenc);
12771 + if (ret) {
12772 + dev_err(jrdev, "driver givenc context update failed\n");
12773 + goto badkey;
12774 + }
12775 + }
12776 +
12777 + return ret;
12778 +badkey:
12779 + crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
12780 + return -EINVAL;
12781 +}
12782 +
12783 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
12784 + const u8 *key, unsigned int keylen)
12785 +{
12786 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
12787 + struct device *jrdev = ctx->jrdev;
12788 + int ret = 0;
12789 +
12790 + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
12791 + crypto_ablkcipher_set_flags(ablkcipher,
12792 + CRYPTO_TFM_RES_BAD_KEY_LEN);
12793 + dev_err(jrdev, "key size mismatch\n");
12794 + return -EINVAL;
12795 + }
12796 +
12797 + memcpy(ctx->key, key, keylen);
12798 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
12799 + ctx->cdata.keylen = keylen;
12800 + ctx->cdata.key_virt = ctx->key;
12801 + ctx->cdata.key_inline = true;
12802 +
12803 + /* xts ablkcipher encrypt, decrypt shared descriptors */
12804 + cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
12805 + cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
12806 +
12807 + /* Now update the driver contexts with the new shared descriptor */
12808 + if (ctx->drv_ctx[ENCRYPT]) {
12809 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12810 + ctx->sh_desc_enc);
12811 + if (ret) {
12812 + dev_err(jrdev, "driver enc context update failed\n");
12813 + goto badkey;
12814 + }
12815 + }
12816 +
12817 + if (ctx->drv_ctx[DECRYPT]) {
12818 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12819 + ctx->sh_desc_dec);
12820 + if (ret) {
12821 + dev_err(jrdev, "driver dec context update failed\n");
12822 + goto badkey;
12823 + }
12824 + }
12825 +
12826 + return ret;
12827 +badkey:
12828 + crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
12829 + return 0;
12830 +}
12831 +
12832 +/*
12833 + * aead_edesc - s/w-extended aead descriptor
12834 + * @src_nents: number of segments in input scatterlist
12835 + * @dst_nents: number of segments in output scatterlist
12836 + * @iv_dma: dma address of iv for checking continuity and link table
12837 + * @qm_sg_bytes: length of dma mapped h/w link table
12838 + * @qm_sg_dma: bus physical mapped address of h/w link table
12839 + * @assoclen: associated data length, in CAAM endianness
12840 + * @assoclen_dma: bus physical mapped address of req->assoclen
12841 + * @drv_req: driver-specific request structure
12842 + * @sgt: the h/w link table
12843 + */
12844 +struct aead_edesc {
12845 + int src_nents;
12846 + int dst_nents;
12847 + dma_addr_t iv_dma;
12848 + int qm_sg_bytes;
12849 + dma_addr_t qm_sg_dma;
12850 + unsigned int assoclen;
12851 + dma_addr_t assoclen_dma;
12852 + struct caam_drv_req drv_req;
12853 +#define CAAM_QI_MAX_AEAD_SG \
12854 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
12855 + sizeof(struct qm_sg_entry))
12856 + struct qm_sg_entry sgt[0];
12857 +};
12858 +
12859 +/*
12860 + * tls_edesc - s/w-extended tls descriptor
12861 + * @src_nents: number of segments in input scatterlist
12862 + * @dst_nents: number of segments in output scatterlist
12863 + * @iv_dma: dma address of iv for checking continuity and link table
12864 + * @qm_sg_bytes: length of dma mapped h/w link table
12865 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
12866 + * @qm_sg_dma: bus physical mapped address of h/w link table
12867 + * @drv_req: driver-specific request structure
12868 + * @sgt: the h/w link table
12869 + */
12870 +struct tls_edesc {
12871 + int src_nents;
12872 + int dst_nents;
12873 + dma_addr_t iv_dma;
12874 + int qm_sg_bytes;
12875 + dma_addr_t qm_sg_dma;
12876 + struct scatterlist tmp[2];
12877 + struct scatterlist *dst;
12878 + struct caam_drv_req drv_req;
12879 + struct qm_sg_entry sgt[0];
12880 +};
12881 +
12882 +/*
12883 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
12884 + * @src_nents: number of segments in input scatterlist
12885 + * @dst_nents: number of segments in output scatterlist
12886 + * @iv_dma: dma address of iv for checking continuity and link table
12887 + * @qm_sg_bytes: length of dma mapped h/w link table
12888 + * @qm_sg_dma: bus physical mapped address of h/w link table
12889 + * @drv_req: driver-specific request structure
12890 + * @sgt: the h/w link table
12891 + */
12892 +struct ablkcipher_edesc {
12893 + int src_nents;
12894 + int dst_nents;
12895 + dma_addr_t iv_dma;
12896 + int qm_sg_bytes;
12897 + dma_addr_t qm_sg_dma;
12898 + struct caam_drv_req drv_req;
12899 +#define CAAM_QI_MAX_ABLKCIPHER_SG \
12900 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
12901 + sizeof(struct qm_sg_entry))
12902 + struct qm_sg_entry sgt[0];
12903 +};
12904 +
12905 +static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
12906 + enum optype type)
12907 +{
12908 + /*
12909 + * This function is called on the fast path with values of 'type'
12910 + * known at compile time. Invalid arguments are not expected and
12911 + * thus no checks are made.
12912 + */
12913 + struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
12914 + u32 *desc;
12915 +
12916 + if (unlikely(!drv_ctx)) {
12917 + spin_lock(&ctx->lock);
12918 +
12919 + /* Read again to check if some other core init drv_ctx */
12920 + drv_ctx = ctx->drv_ctx[type];
12921 + if (!drv_ctx) {
12922 + int cpu;
12923 +
12924 + if (type == ENCRYPT)
12925 + desc = ctx->sh_desc_enc;
12926 + else if (type == DECRYPT)
12927 + desc = ctx->sh_desc_dec;
12928 + else /* (type == GIVENCRYPT) */
12929 + desc = ctx->sh_desc_givenc;
12930 +
12931 + cpu = smp_processor_id();
12932 + drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
12933 + if (likely(!IS_ERR_OR_NULL(drv_ctx)))
12934 + drv_ctx->op_type = type;
12935 +
12936 + ctx->drv_ctx[type] = drv_ctx;
12937 + }
12938 +
12939 + spin_unlock(&ctx->lock);
12940 + }
12941 +
12942 + return drv_ctx;
12943 +}
12944 +
12945 +static void caam_unmap(struct device *dev, struct scatterlist *src,
12946 + struct scatterlist *dst, int src_nents,
12947 + int dst_nents, dma_addr_t iv_dma, int ivsize,
12948 + enum optype op_type, dma_addr_t qm_sg_dma,
12949 + int qm_sg_bytes)
12950 +{
12951 + if (dst != src) {
12952 + if (src_nents)
12953 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
12954 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
12955 + } else {
12956 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
12957 + }
12958 +
12959 + if (iv_dma)
12960 + dma_unmap_single(dev, iv_dma, ivsize,
12961 + op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
12962 + DMA_TO_DEVICE);
12963 + if (qm_sg_bytes)
12964 + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
12965 +}
12966 +
12967 +static void aead_unmap(struct device *dev,
12968 + struct aead_edesc *edesc,
12969 + struct aead_request *req)
12970 +{
12971 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
12972 + int ivsize = crypto_aead_ivsize(aead);
12973 +
12974 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
12975 + edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
12976 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
12977 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
12978 +}
12979 +
12980 +static void tls_unmap(struct device *dev,
12981 + struct tls_edesc *edesc,
12982 + struct aead_request *req)
12983 +{
12984 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
12985 + int ivsize = crypto_aead_ivsize(aead);
12986 +
12987 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
12988 + edesc->dst_nents, edesc->iv_dma, ivsize,
12989 + edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
12990 + edesc->qm_sg_bytes);
12991 +}
12992 +
12993 +static void ablkcipher_unmap(struct device *dev,
12994 + struct ablkcipher_edesc *edesc,
12995 + struct ablkcipher_request *req)
12996 +{
12997 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
12998 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
12999 +
13000 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
13001 + edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
13002 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
13003 +}
13004 +
13005 +static void aead_done(struct caam_drv_req *drv_req, u32 status)
13006 +{
13007 + struct device *qidev;
13008 + struct aead_edesc *edesc;
13009 + struct aead_request *aead_req = drv_req->app_ctx;
13010 + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
13011 + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
13012 + int ecode = 0;
13013 +
13014 + qidev = caam_ctx->qidev;
13015 +
13016 + if (unlikely(status)) {
13017 + caam_jr_strstatus(qidev, status);
13018 + ecode = -EIO;
13019 + }
13020 +
13021 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
13022 + aead_unmap(qidev, edesc, aead_req);
13023 +
13024 + aead_request_complete(aead_req, ecode);
13025 + qi_cache_free(edesc);
13026 +}
13027 +
13028 +/*
13029 + * allocate and map the aead extended descriptor
13030 + */
13031 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
13032 + bool encrypt)
13033 +{
13034 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13035 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13036 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
13037 + typeof(*alg), aead);
13038 + struct device *qidev = ctx->qidev;
13039 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13040 + GFP_KERNEL : GFP_ATOMIC;
13041 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13042 + struct aead_edesc *edesc;
13043 + dma_addr_t qm_sg_dma, iv_dma = 0;
13044 + int ivsize = 0;
13045 + unsigned int authsize = ctx->authsize;
13046 + int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
13047 + int in_len, out_len;
13048 + struct qm_sg_entry *sg_table, *fd_sgt;
13049 + struct caam_drv_ctx *drv_ctx;
13050 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13051 +
13052 + drv_ctx = get_drv_ctx(ctx, op_type);
13053 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13054 + return (struct aead_edesc *)drv_ctx;
13055 +
13056 + /* allocate space for base edesc and hw desc commands, link tables */
13057 + edesc = qi_cache_alloc(GFP_DMA | flags);
13058 + if (unlikely(!edesc)) {
13059 + dev_err(qidev, "could not allocate extended descriptor\n");
13060 + return ERR_PTR(-ENOMEM);
13061 + }
13062 +
13063 + if (likely(req->src == req->dst)) {
13064 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13065 + req->cryptlen +
13066 + (encrypt ? authsize : 0));
13067 + if (unlikely(src_nents < 0)) {
13068 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13069 + req->assoclen + req->cryptlen +
13070 + (encrypt ? authsize : 0));
13071 + qi_cache_free(edesc);
13072 + return ERR_PTR(src_nents);
13073 + }
13074 +
13075 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13076 + DMA_BIDIRECTIONAL);
13077 + if (unlikely(!mapped_src_nents)) {
13078 + dev_err(qidev, "unable to map source\n");
13079 + qi_cache_free(edesc);
13080 + return ERR_PTR(-ENOMEM);
13081 + }
13082 + } else {
13083 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13084 + req->cryptlen);
13085 + if (unlikely(src_nents < 0)) {
13086 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13087 + req->assoclen + req->cryptlen);
13088 + qi_cache_free(edesc);
13089 + return ERR_PTR(src_nents);
13090 + }
13091 +
13092 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
13093 + req->cryptlen +
13094 + (encrypt ? authsize :
13095 + (-authsize)));
13096 + if (unlikely(dst_nents < 0)) {
13097 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13098 + req->assoclen + req->cryptlen +
13099 + (encrypt ? authsize : (-authsize)));
13100 + qi_cache_free(edesc);
13101 + return ERR_PTR(dst_nents);
13102 + }
13103 +
13104 + if (src_nents) {
13105 + mapped_src_nents = dma_map_sg(qidev, req->src,
13106 + src_nents, DMA_TO_DEVICE);
13107 + if (unlikely(!mapped_src_nents)) {
13108 + dev_err(qidev, "unable to map source\n");
13109 + qi_cache_free(edesc);
13110 + return ERR_PTR(-ENOMEM);
13111 + }
13112 + } else {
13113 + mapped_src_nents = 0;
13114 + }
13115 +
13116 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13117 + DMA_FROM_DEVICE);
13118 + if (unlikely(!mapped_dst_nents)) {
13119 + dev_err(qidev, "unable to map destination\n");
13120 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13121 + qi_cache_free(edesc);
13122 + return ERR_PTR(-ENOMEM);
13123 + }
13124 + }
13125 +
13126 + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
13127 + ivsize = crypto_aead_ivsize(aead);
13128 + iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13129 + if (dma_mapping_error(qidev, iv_dma)) {
13130 + dev_err(qidev, "unable to map IV\n");
13131 + caam_unmap(qidev, req->src, req->dst, src_nents,
13132 + dst_nents, 0, 0, op_type, 0, 0);
13133 + qi_cache_free(edesc);
13134 + return ERR_PTR(-ENOMEM);
13135 + }
13136 + }
13137 +
13138 + /*
13139 + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
13140 + * Input is not contiguous.
13141 + */
13142 + qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
13143 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13144 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
13145 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13146 + qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
13147 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13148 + iv_dma, ivsize, op_type, 0, 0);
13149 + qi_cache_free(edesc);
13150 + return ERR_PTR(-ENOMEM);
13151 + }
13152 + sg_table = &edesc->sgt[0];
13153 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13154 +
13155 + edesc->src_nents = src_nents;
13156 + edesc->dst_nents = dst_nents;
13157 + edesc->iv_dma = iv_dma;
13158 + edesc->drv_req.app_ctx = req;
13159 + edesc->drv_req.cbk = aead_done;
13160 + edesc->drv_req.drv_ctx = drv_ctx;
13161 +
13162 + edesc->assoclen = cpu_to_caam32(req->assoclen);
13163 + edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
13164 + DMA_TO_DEVICE);
13165 + if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
13166 + dev_err(qidev, "unable to map assoclen\n");
13167 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13168 + iv_dma, ivsize, op_type, 0, 0);
13169 + qi_cache_free(edesc);
13170 + return ERR_PTR(-ENOMEM);
13171 + }
13172 +
13173 + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
13174 + qm_sg_index++;
13175 + if (ivsize) {
13176 + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
13177 + qm_sg_index++;
13178 + }
13179 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13180 + qm_sg_index += mapped_src_nents;
13181 +
13182 + if (mapped_dst_nents > 1)
13183 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13184 + qm_sg_index, 0);
13185 +
13186 + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13187 + if (dma_mapping_error(qidev, qm_sg_dma)) {
13188 + dev_err(qidev, "unable to map S/G table\n");
13189 + dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
13190 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13191 + iv_dma, ivsize, op_type, 0, 0);
13192 + qi_cache_free(edesc);
13193 + return ERR_PTR(-ENOMEM);
13194 + }
13195 +
13196 + edesc->qm_sg_dma = qm_sg_dma;
13197 + edesc->qm_sg_bytes = qm_sg_bytes;
13198 +
13199 + out_len = req->assoclen + req->cryptlen +
13200 + (encrypt ? ctx->authsize : (-ctx->authsize));
13201 + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
13202 +
13203 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13204 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13205 +
13206 + if (req->dst == req->src) {
13207 + if (mapped_src_nents == 1)
13208 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
13209 + out_len, 0);
13210 + else
13211 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13212 + (1 + !!ivsize) * sizeof(*sg_table),
13213 + out_len, 0);
13214 + } else if (mapped_dst_nents == 1) {
13215 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
13216 + 0);
13217 + } else {
13218 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13219 + qm_sg_index, out_len, 0);
13220 + }
13221 +
13222 + return edesc;
13223 +}
13224 +
13225 +static inline int aead_crypt(struct aead_request *req, bool encrypt)
13226 +{
13227 + struct aead_edesc *edesc;
13228 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13229 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13230 + int ret;
13231 +
13232 + if (unlikely(caam_congested))
13233 + return -EAGAIN;
13234 +
13235 + /* allocate extended descriptor */
13236 + edesc = aead_edesc_alloc(req, encrypt);
13237 + if (IS_ERR_OR_NULL(edesc))
13238 + return PTR_ERR(edesc);
13239 +
13240 + /* Create and submit job descriptor */
13241 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13242 + if (!ret) {
13243 + ret = -EINPROGRESS;
13244 + } else {
13245 + aead_unmap(ctx->qidev, edesc, req);
13246 + qi_cache_free(edesc);
13247 + }
13248 +
13249 + return ret;
13250 +}
13251 +
13252 +static int aead_encrypt(struct aead_request *req)
13253 +{
13254 + return aead_crypt(req, true);
13255 +}
13256 +
13257 +static int aead_decrypt(struct aead_request *req)
13258 +{
13259 + return aead_crypt(req, false);
13260 +}
13261 +
13262 +static void tls_done(struct caam_drv_req *drv_req, u32 status)
13263 +{
13264 + struct device *qidev;
13265 + struct tls_edesc *edesc;
13266 + struct aead_request *aead_req = drv_req->app_ctx;
13267 + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
13268 + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
13269 + int ecode = 0;
13270 +
13271 + qidev = caam_ctx->qidev;
13272 +
13273 + if (unlikely(status)) {
13274 + caam_jr_strstatus(qidev, status);
13275 + ecode = -EIO;
13276 + }
13277 +
13278 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
13279 + tls_unmap(qidev, edesc, aead_req);
13280 +
13281 + aead_request_complete(aead_req, ecode);
13282 + qi_cache_free(edesc);
13283 +}
13284 +
13285 +/*
13286 + * allocate and map the tls extended descriptor
13287 + */
13288 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
13289 +{
13290 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13291 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13292 + unsigned int blocksize = crypto_aead_blocksize(aead);
13293 + unsigned int padsize, authsize;
13294 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
13295 + typeof(*alg), aead);
13296 + struct device *qidev = ctx->qidev;
13297 + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
13298 + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
13299 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13300 + struct tls_edesc *edesc;
13301 + dma_addr_t qm_sg_dma, iv_dma = 0;
13302 + int ivsize = 0;
13303 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
13304 + int in_len, out_len;
13305 + struct qm_sg_entry *sg_table, *fd_sgt;
13306 + struct caam_drv_ctx *drv_ctx;
13307 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13308 + struct scatterlist *dst;
13309 +
13310 + if (encrypt) {
13311 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
13312 + blocksize);
13313 + authsize = ctx->authsize + padsize;
13314 + } else {
13315 + authsize = ctx->authsize;
13316 + }
13317 +
13318 + drv_ctx = get_drv_ctx(ctx, op_type);
13319 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13320 + return (struct tls_edesc *)drv_ctx;
13321 +
13322 + /* allocate space for base edesc and hw desc commands, link tables */
13323 + edesc = qi_cache_alloc(GFP_DMA | flags);
13324 + if (unlikely(!edesc)) {
13325 + dev_err(qidev, "could not allocate extended descriptor\n");
13326 + return ERR_PTR(-ENOMEM);
13327 + }
13328 +
13329 + if (likely(req->src == req->dst)) {
13330 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13331 + req->cryptlen +
13332 + (encrypt ? authsize : 0));
13333 + if (unlikely(src_nents < 0)) {
13334 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13335 + req->assoclen + req->cryptlen +
13336 + (encrypt ? authsize : 0));
13337 + qi_cache_free(edesc);
13338 + return ERR_PTR(src_nents);
13339 + }
13340 +
13341 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13342 + DMA_BIDIRECTIONAL);
13343 + if (unlikely(!mapped_src_nents)) {
13344 + dev_err(qidev, "unable to map source\n");
13345 + qi_cache_free(edesc);
13346 + return ERR_PTR(-ENOMEM);
13347 + }
13348 + dst = req->dst;
13349 + } else {
13350 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13351 + req->cryptlen);
13352 + if (unlikely(src_nents < 0)) {
13353 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13354 + req->assoclen + req->cryptlen);
13355 + qi_cache_free(edesc);
13356 + return ERR_PTR(src_nents);
13357 + }
13358 +
13359 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
13360 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
13361 + (encrypt ? authsize : 0));
13362 + if (unlikely(dst_nents < 0)) {
13363 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13364 + req->cryptlen +
13365 + (encrypt ? authsize : 0));
13366 + qi_cache_free(edesc);
13367 + return ERR_PTR(dst_nents);
13368 + }
13369 +
13370 + if (src_nents) {
13371 + mapped_src_nents = dma_map_sg(qidev, req->src,
13372 + src_nents, DMA_TO_DEVICE);
13373 + if (unlikely(!mapped_src_nents)) {
13374 + dev_err(qidev, "unable to map source\n");
13375 + qi_cache_free(edesc);
13376 + return ERR_PTR(-ENOMEM);
13377 + }
13378 + } else {
13379 + mapped_src_nents = 0;
13380 + }
13381 +
13382 + mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
13383 + DMA_FROM_DEVICE);
13384 + if (unlikely(!mapped_dst_nents)) {
13385 + dev_err(qidev, "unable to map destination\n");
13386 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13387 + qi_cache_free(edesc);
13388 + return ERR_PTR(-ENOMEM);
13389 + }
13390 + }
13391 +
13392 + ivsize = crypto_aead_ivsize(aead);
13393 + iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13394 + if (dma_mapping_error(qidev, iv_dma)) {
13395 + dev_err(qidev, "unable to map IV\n");
13396 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
13397 + op_type, 0, 0);
13398 + qi_cache_free(edesc);
13399 + return ERR_PTR(-ENOMEM);
13400 + }
13401 +
13402 + /*
13403 + * Create S/G table: IV, src, dst.
13404 + * Input is not contiguous.
13405 + */
13406 + qm_sg_ents = 1 + mapped_src_nents +
13407 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13408 + sg_table = &edesc->sgt[0];
13409 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13410 +
13411 + edesc->src_nents = src_nents;
13412 + edesc->dst_nents = dst_nents;
13413 + edesc->dst = dst;
13414 + edesc->iv_dma = iv_dma;
13415 + edesc->drv_req.app_ctx = req;
13416 + edesc->drv_req.cbk = tls_done;
13417 + edesc->drv_req.drv_ctx = drv_ctx;
13418 +
13419 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
13420 + qm_sg_index = 1;
13421 +
13422 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13423 + qm_sg_index += mapped_src_nents;
13424 +
13425 + if (mapped_dst_nents > 1)
13426 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
13427 + qm_sg_index, 0);
13428 +
13429 + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13430 + if (dma_mapping_error(qidev, qm_sg_dma)) {
13431 + dev_err(qidev, "unable to map S/G table\n");
13432 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
13433 + ivsize, op_type, 0, 0);
13434 + qi_cache_free(edesc);
13435 + return ERR_PTR(-ENOMEM);
13436 + }
13437 +
13438 + edesc->qm_sg_dma = qm_sg_dma;
13439 + edesc->qm_sg_bytes = qm_sg_bytes;
13440 +
13441 + out_len = req->cryptlen + (encrypt ? authsize : 0);
13442 + in_len = ivsize + req->assoclen + req->cryptlen;
13443 +
13444 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13445 +
13446 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13447 +
13448 + if (req->dst == req->src)
13449 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13450 + (sg_nents_for_len(req->src, req->assoclen) +
13451 + 1) * sizeof(*sg_table), out_len, 0);
13452 + else if (mapped_dst_nents == 1)
13453 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
13454 + else
13455 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13456 + qm_sg_index, out_len, 0);
13457 +
13458 + return edesc;
13459 +}
13460 +
13461 +static int tls_crypt(struct aead_request *req, bool encrypt)
13462 +{
13463 + struct tls_edesc *edesc;
13464 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13465 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13466 + int ret;
13467 +
13468 + if (unlikely(caam_congested))
13469 + return -EAGAIN;
13470 +
13471 + edesc = tls_edesc_alloc(req, encrypt);
13472 + if (IS_ERR_OR_NULL(edesc))
13473 + return PTR_ERR(edesc);
13474 +
13475 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13476 + if (!ret) {
13477 + ret = -EINPROGRESS;
13478 + } else {
13479 + tls_unmap(ctx->qidev, edesc, req);
13480 + qi_cache_free(edesc);
13481 + }
13482 +
13483 + return ret;
13484 +}
13485 +
13486 +static int tls_encrypt(struct aead_request *req)
13487 +{
13488 + return tls_crypt(req, true);
13489 +}
13490 +
13491 +static int tls_decrypt(struct aead_request *req)
13492 +{
13493 + return tls_crypt(req, false);
13494 +}
13495 +
13496 +static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
13497 +{
13498 + struct ablkcipher_edesc *edesc;
13499 + struct ablkcipher_request *req = drv_req->app_ctx;
13500 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13501 + struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
13502 + struct device *qidev = caam_ctx->qidev;
13503 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13504 +
13505 +#ifdef DEBUG
13506 + dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
13507 +#endif
13508 +
13509 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
13510 +
13511 + if (status)
13512 + caam_jr_strstatus(qidev, status);
13513 +
13514 +#ifdef DEBUG
13515 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
13516 + DUMP_PREFIX_ADDRESS, 16, 4, req->info,
13517 + edesc->src_nents > 1 ? 100 : ivsize, 1);
13518 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
13519 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
13520 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
13521 +#endif
13522 +
13523 + ablkcipher_unmap(qidev, edesc, req);
13524 + qi_cache_free(edesc);
13525 +
13526 + /*
13527 + * The crypto API expects us to set the IV (req->info) to the last
13528 + * ciphertext block. This is used e.g. by the CTS mode.
13529 + */
13530 + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
13531 + ivsize, 0);
13532 +
13533 + ablkcipher_request_complete(req, status);
13534 +}
13535 +
13536 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
13537 + *req, bool encrypt)
13538 +{
13539 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13540 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13541 + struct device *qidev = ctx->qidev;
13542 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13543 + GFP_KERNEL : GFP_ATOMIC;
13544 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13545 + struct ablkcipher_edesc *edesc;
13546 + dma_addr_t iv_dma;
13547 + bool in_contig;
13548 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13549 + int dst_sg_idx, qm_sg_ents;
13550 + struct qm_sg_entry *sg_table, *fd_sgt;
13551 + struct caam_drv_ctx *drv_ctx;
13552 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13553 +
13554 + drv_ctx = get_drv_ctx(ctx, op_type);
13555 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13556 + return (struct ablkcipher_edesc *)drv_ctx;
13557 +
13558 + src_nents = sg_nents_for_len(req->src, req->nbytes);
13559 + if (unlikely(src_nents < 0)) {
13560 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13561 + req->nbytes);
13562 + return ERR_PTR(src_nents);
13563 + }
13564 +
13565 + if (unlikely(req->src != req->dst)) {
13566 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
13567 + if (unlikely(dst_nents < 0)) {
13568 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13569 + req->nbytes);
13570 + return ERR_PTR(dst_nents);
13571 + }
13572 +
13573 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13574 + DMA_TO_DEVICE);
13575 + if (unlikely(!mapped_src_nents)) {
13576 + dev_err(qidev, "unable to map source\n");
13577 + return ERR_PTR(-ENOMEM);
13578 + }
13579 +
13580 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13581 + DMA_FROM_DEVICE);
13582 + if (unlikely(!mapped_dst_nents)) {
13583 + dev_err(qidev, "unable to map destination\n");
13584 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13585 + return ERR_PTR(-ENOMEM);
13586 + }
13587 + } else {
13588 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13589 + DMA_BIDIRECTIONAL);
13590 + if (unlikely(!mapped_src_nents)) {
13591 + dev_err(qidev, "unable to map source\n");
13592 + return ERR_PTR(-ENOMEM);
13593 + }
13594 + }
13595 +
13596 + iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
13597 + if (dma_mapping_error(qidev, iv_dma)) {
13598 + dev_err(qidev, "unable to map IV\n");
13599 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
13600 + 0, 0, 0, 0);
13601 + return ERR_PTR(-ENOMEM);
13602 + }
13603 +
13604 + if (mapped_src_nents == 1 &&
13605 + iv_dma + ivsize == sg_dma_address(req->src)) {
13606 + in_contig = true;
13607 + qm_sg_ents = 0;
13608 + } else {
13609 + in_contig = false;
13610 + qm_sg_ents = 1 + mapped_src_nents;
13611 + }
13612 + dst_sg_idx = qm_sg_ents;
13613 +
13614 + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
13615 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
13616 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13617 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
13618 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13619 + iv_dma, ivsize, op_type, 0, 0);
13620 + return ERR_PTR(-ENOMEM);
13621 + }
13622 +
13623 + /* allocate space for base edesc and link tables */
13624 + edesc = qi_cache_alloc(GFP_DMA | flags);
13625 + if (unlikely(!edesc)) {
13626 + dev_err(qidev, "could not allocate extended descriptor\n");
13627 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13628 + iv_dma, ivsize, op_type, 0, 0);
13629 + return ERR_PTR(-ENOMEM);
13630 + }
13631 +
13632 + edesc->src_nents = src_nents;
13633 + edesc->dst_nents = dst_nents;
13634 + edesc->iv_dma = iv_dma;
13635 + sg_table = &edesc->sgt[0];
13636 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13637 + edesc->drv_req.app_ctx = req;
13638 + edesc->drv_req.cbk = ablkcipher_done;
13639 + edesc->drv_req.drv_ctx = drv_ctx;
13640 +
13641 + if (!in_contig) {
13642 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
13643 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
13644 + }
13645 +
13646 + if (mapped_dst_nents > 1)
13647 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13648 + dst_sg_idx, 0);
13649 +
13650 + edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
13651 + DMA_TO_DEVICE);
13652 + if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
13653 + dev_err(qidev, "unable to map S/G table\n");
13654 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13655 + iv_dma, ivsize, op_type, 0, 0);
13656 + qi_cache_free(edesc);
13657 + return ERR_PTR(-ENOMEM);
13658 + }
13659 +
13660 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13661 +
13662 + if (!in_contig)
13663 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
13664 + ivsize + req->nbytes, 0);
13665 + else
13666 + dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
13667 + 0);
13668 +
13669 + if (req->src == req->dst) {
13670 + if (!in_contig)
13671 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
13672 + sizeof(*sg_table), req->nbytes, 0);
13673 + else
13674 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
13675 + req->nbytes, 0);
13676 + } else if (mapped_dst_nents > 1) {
13677 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
13678 + sizeof(*sg_table), req->nbytes, 0);
13679 + } else {
13680 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
13681 + req->nbytes, 0);
13682 + }
13683 +
13684 + return edesc;
13685 +}
13686 +
13687 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
13688 + struct skcipher_givcrypt_request *creq)
13689 +{
13690 + struct ablkcipher_request *req = &creq->creq;
13691 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13692 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13693 + struct device *qidev = ctx->qidev;
13694 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13695 + GFP_KERNEL : GFP_ATOMIC;
13696 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
13697 + struct ablkcipher_edesc *edesc;
13698 + dma_addr_t iv_dma;
13699 + bool out_contig;
13700 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13701 + struct qm_sg_entry *sg_table, *fd_sgt;
13702 + int dst_sg_idx, qm_sg_ents;
13703 + struct caam_drv_ctx *drv_ctx;
13704 +
13705 + drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
13706 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13707 + return (struct ablkcipher_edesc *)drv_ctx;
13708 +
13709 + src_nents = sg_nents_for_len(req->src, req->nbytes);
13710 + if (unlikely(src_nents < 0)) {
13711 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13712 + req->nbytes);
13713 + return ERR_PTR(src_nents);
13714 + }
13715 +
13716 + if (unlikely(req->src != req->dst)) {
13717 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
13718 + if (unlikely(dst_nents < 0)) {
13719 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13720 + req->nbytes);
13721 + return ERR_PTR(dst_nents);
13722 + }
13723 +
13724 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13725 + DMA_TO_DEVICE);
13726 + if (unlikely(!mapped_src_nents)) {
13727 + dev_err(qidev, "unable to map source\n");
13728 + return ERR_PTR(-ENOMEM);
13729 + }
13730 +
13731 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13732 + DMA_FROM_DEVICE);
13733 + if (unlikely(!mapped_dst_nents)) {
13734 + dev_err(qidev, "unable to map destination\n");
13735 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13736 + return ERR_PTR(-ENOMEM);
13737 + }
13738 + } else {
13739 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13740 + DMA_BIDIRECTIONAL);
13741 + if (unlikely(!mapped_src_nents)) {
13742 + dev_err(qidev, "unable to map source\n");
13743 + return ERR_PTR(-ENOMEM);
13744 + }
13745 +
13746 + dst_nents = src_nents;
13747 + mapped_dst_nents = src_nents;
13748 + }
13749 +
13750 + iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
13751 + if (dma_mapping_error(qidev, iv_dma)) {
13752 + dev_err(qidev, "unable to map IV\n");
13753 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
13754 + 0, 0, 0, 0);
13755 + return ERR_PTR(-ENOMEM);
13756 + }
13757 +
13758 + qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
13759 + dst_sg_idx = qm_sg_ents;
13760 + if (mapped_dst_nents == 1 &&
13761 + iv_dma + ivsize == sg_dma_address(req->dst)) {
13762 + out_contig = true;
13763 + } else {
13764 + out_contig = false;
13765 + qm_sg_ents += 1 + mapped_dst_nents;
13766 + }
13767 +
13768 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
13769 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13770 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
13771 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13772 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
13773 + return ERR_PTR(-ENOMEM);
13774 + }
13775 +
13776 + /* allocate space for base edesc and link tables */
13777 + edesc = qi_cache_alloc(GFP_DMA | flags);
13778 + if (!edesc) {
13779 + dev_err(qidev, "could not allocate extended descriptor\n");
13780 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13781 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
13782 + return ERR_PTR(-ENOMEM);
13783 + }
13784 +
13785 + edesc->src_nents = src_nents;
13786 + edesc->dst_nents = dst_nents;
13787 + edesc->iv_dma = iv_dma;
13788 + sg_table = &edesc->sgt[0];
13789 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13790 + edesc->drv_req.app_ctx = req;
13791 + edesc->drv_req.cbk = ablkcipher_done;
13792 + edesc->drv_req.drv_ctx = drv_ctx;
13793 +
13794 + if (mapped_src_nents > 1)
13795 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
13796 +
13797 + if (!out_contig) {
13798 + dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
13799 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13800 + dst_sg_idx + 1, 0);
13801 + }
13802 +
13803 + edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
13804 + DMA_TO_DEVICE);
13805 + if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
13806 + dev_err(qidev, "unable to map S/G table\n");
13807 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13808 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
13809 + qi_cache_free(edesc);
13810 + return ERR_PTR(-ENOMEM);
13811 + }
13812 +
13813 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13814 +
13815 + if (mapped_src_nents > 1)
13816 + dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
13817 + 0);
13818 + else
13819 + dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
13820 + req->nbytes, 0);
13821 +
13822 + if (!out_contig)
13823 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
13824 + sizeof(*sg_table), ivsize + req->nbytes,
13825 + 0);
13826 + else
13827 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
13828 + ivsize + req->nbytes, 0);
13829 +
13830 + return edesc;
13831 +}
13832 +
13833 +static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
13834 +{
13835 + struct ablkcipher_edesc *edesc;
13836 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13837 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13838 + int ret;
13839 +
13840 + if (unlikely(caam_congested))
13841 + return -EAGAIN;
13842 +
13843 + /* allocate extended descriptor */
13844 + edesc = ablkcipher_edesc_alloc(req, encrypt);
13845 + if (IS_ERR(edesc))
13846 + return PTR_ERR(edesc);
13847 +
13848 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13849 + if (!ret) {
13850 + ret = -EINPROGRESS;
13851 + } else {
13852 + ablkcipher_unmap(ctx->qidev, edesc, req);
13853 + qi_cache_free(edesc);
13854 + }
13855 +
13856 + return ret;
13857 +}
13858 +
13859 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
13860 +{
13861 + return ablkcipher_crypt(req, true);
13862 +}
13863 +
13864 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
13865 +{
13866 + return ablkcipher_crypt(req, false);
13867 +}
13868 +
13869 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
13870 +{
13871 + struct ablkcipher_request *req = &creq->creq;
13872 + struct ablkcipher_edesc *edesc;
13873 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13874 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13875 + int ret;
13876 +
13877 + if (unlikely(caam_congested))
13878 + return -EAGAIN;
13879 +
13880 + /* allocate extended descriptor */
13881 + edesc = ablkcipher_giv_edesc_alloc(creq);
13882 + if (IS_ERR(edesc))
13883 + return PTR_ERR(edesc);
13884 +
13885 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13886 + if (!ret) {
13887 + ret = -EINPROGRESS;
13888 + } else {
13889 + ablkcipher_unmap(ctx->qidev, edesc, req);
13890 + qi_cache_free(edesc);
13891 + }
13892 +
13893 + return ret;
13894 +}
13895 +
13896 +#define template_ablkcipher template_u.ablkcipher
13897 +struct caam_alg_template {
13898 + char name[CRYPTO_MAX_ALG_NAME];
13899 + char driver_name[CRYPTO_MAX_ALG_NAME];
13900 + unsigned int blocksize;
13901 + u32 type;
13902 + union {
13903 + struct ablkcipher_alg ablkcipher;
13904 + } template_u;
13905 + u32 class1_alg_type;
13906 + u32 class2_alg_type;
13907 +};
13908 +
13909 +static struct caam_alg_template driver_algs[] = {
13910 + /* ablkcipher descriptor */
13911 + {
13912 + .name = "cbc(aes)",
13913 + .driver_name = "cbc-aes-caam-qi",
13914 + .blocksize = AES_BLOCK_SIZE,
13915 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13916 + .template_ablkcipher = {
13917 + .setkey = ablkcipher_setkey,
13918 + .encrypt = ablkcipher_encrypt,
13919 + .decrypt = ablkcipher_decrypt,
13920 + .givencrypt = ablkcipher_givencrypt,
13921 + .geniv = "<built-in>",
13922 + .min_keysize = AES_MIN_KEY_SIZE,
13923 + .max_keysize = AES_MAX_KEY_SIZE,
13924 + .ivsize = AES_BLOCK_SIZE,
13925 + },
13926 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
13927 + },
13928 + {
13929 + .name = "cbc(des3_ede)",
13930 + .driver_name = "cbc-3des-caam-qi",
13931 + .blocksize = DES3_EDE_BLOCK_SIZE,
13932 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13933 + .template_ablkcipher = {
13934 + .setkey = ablkcipher_setkey,
13935 + .encrypt = ablkcipher_encrypt,
13936 + .decrypt = ablkcipher_decrypt,
13937 + .givencrypt = ablkcipher_givencrypt,
13938 + .geniv = "<built-in>",
13939 + .min_keysize = DES3_EDE_KEY_SIZE,
13940 + .max_keysize = DES3_EDE_KEY_SIZE,
13941 + .ivsize = DES3_EDE_BLOCK_SIZE,
13942 + },
13943 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
13944 + },
13945 + {
13946 + .name = "cbc(des)",
13947 + .driver_name = "cbc-des-caam-qi",
13948 + .blocksize = DES_BLOCK_SIZE,
13949 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13950 + .template_ablkcipher = {
13951 + .setkey = ablkcipher_setkey,
13952 + .encrypt = ablkcipher_encrypt,
13953 + .decrypt = ablkcipher_decrypt,
13954 + .givencrypt = ablkcipher_givencrypt,
13955 + .geniv = "<built-in>",
13956 + .min_keysize = DES_KEY_SIZE,
13957 + .max_keysize = DES_KEY_SIZE,
13958 + .ivsize = DES_BLOCK_SIZE,
13959 + },
13960 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
13961 + },
13962 + {
13963 + .name = "ctr(aes)",
13964 + .driver_name = "ctr-aes-caam-qi",
13965 + .blocksize = 1,
13966 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
13967 + .template_ablkcipher = {
13968 + .setkey = ablkcipher_setkey,
13969 + .encrypt = ablkcipher_encrypt,
13970 + .decrypt = ablkcipher_decrypt,
13971 + .geniv = "chainiv",
13972 + .min_keysize = AES_MIN_KEY_SIZE,
13973 + .max_keysize = AES_MAX_KEY_SIZE,
13974 + .ivsize = AES_BLOCK_SIZE,
13975 + },
13976 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
13977 + },
13978 + {
13979 + .name = "rfc3686(ctr(aes))",
13980 + .driver_name = "rfc3686-ctr-aes-caam-qi",
13981 + .blocksize = 1,
13982 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13983 + .template_ablkcipher = {
13984 + .setkey = ablkcipher_setkey,
13985 + .encrypt = ablkcipher_encrypt,
13986 + .decrypt = ablkcipher_decrypt,
13987 + .givencrypt = ablkcipher_givencrypt,
13988 + .geniv = "<built-in>",
13989 + .min_keysize = AES_MIN_KEY_SIZE +
13990 + CTR_RFC3686_NONCE_SIZE,
13991 + .max_keysize = AES_MAX_KEY_SIZE +
13992 + CTR_RFC3686_NONCE_SIZE,
13993 + .ivsize = CTR_RFC3686_IV_SIZE,
13994 + },
13995 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
13996 + },
13997 + {
13998 + .name = "xts(aes)",
13999 + .driver_name = "xts-aes-caam-qi",
14000 + .blocksize = AES_BLOCK_SIZE,
14001 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
14002 + .template_ablkcipher = {
14003 + .setkey = xts_ablkcipher_setkey,
14004 + .encrypt = ablkcipher_encrypt,
14005 + .decrypt = ablkcipher_decrypt,
14006 + .geniv = "eseqiv",
14007 + .min_keysize = 2 * AES_MIN_KEY_SIZE,
14008 + .max_keysize = 2 * AES_MAX_KEY_SIZE,
14009 + .ivsize = AES_BLOCK_SIZE,
14010 + },
14011 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
14012 + },
14013 +};
14014 +
14015 +static struct caam_aead_alg driver_aeads[] = {
14016 + /* single-pass ipsec_esp descriptor */
14017 + {
14018 + .aead = {
14019 + .base = {
14020 + .cra_name = "authenc(hmac(md5),cbc(aes))",
14021 + .cra_driver_name = "authenc-hmac-md5-"
14022 + "cbc-aes-caam-qi",
14023 + .cra_blocksize = AES_BLOCK_SIZE,
14024 + },
14025 + .setkey = aead_setkey,
14026 + .setauthsize = aead_setauthsize,
14027 + .encrypt = aead_encrypt,
14028 + .decrypt = aead_decrypt,
14029 + .ivsize = AES_BLOCK_SIZE,
14030 + .maxauthsize = MD5_DIGEST_SIZE,
14031 + },
14032 + .caam = {
14033 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14034 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14035 + OP_ALG_AAI_HMAC_PRECOMP,
14036 + }
14037 + },
14038 + {
14039 + .aead = {
14040 + .base = {
14041 + .cra_name = "echainiv(authenc(hmac(md5),"
14042 + "cbc(aes)))",
14043 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
14044 + "cbc-aes-caam-qi",
14045 + .cra_blocksize = AES_BLOCK_SIZE,
14046 + },
14047 + .setkey = aead_setkey,
14048 + .setauthsize = aead_setauthsize,
14049 + .encrypt = aead_encrypt,
14050 + .decrypt = aead_decrypt,
14051 + .ivsize = AES_BLOCK_SIZE,
14052 + .maxauthsize = MD5_DIGEST_SIZE,
14053 + },
14054 + .caam = {
14055 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14056 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14057 + OP_ALG_AAI_HMAC_PRECOMP,
14058 + .geniv = true,
14059 + }
14060 + },
14061 + {
14062 + .aead = {
14063 + .base = {
14064 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
14065 + .cra_driver_name = "authenc-hmac-sha1-"
14066 + "cbc-aes-caam-qi",
14067 + .cra_blocksize = AES_BLOCK_SIZE,
14068 + },
14069 + .setkey = aead_setkey,
14070 + .setauthsize = aead_setauthsize,
14071 + .encrypt = aead_encrypt,
14072 + .decrypt = aead_decrypt,
14073 + .ivsize = AES_BLOCK_SIZE,
14074 + .maxauthsize = SHA1_DIGEST_SIZE,
14075 + },
14076 + .caam = {
14077 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14078 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14079 + OP_ALG_AAI_HMAC_PRECOMP,
14080 + }
14081 + },
14082 + {
14083 + .aead = {
14084 + .base = {
14085 + .cra_name = "echainiv(authenc(hmac(sha1),"
14086 + "cbc(aes)))",
14087 + .cra_driver_name = "echainiv-authenc-"
14088 + "hmac-sha1-cbc-aes-caam-qi",
14089 + .cra_blocksize = AES_BLOCK_SIZE,
14090 + },
14091 + .setkey = aead_setkey,
14092 + .setauthsize = aead_setauthsize,
14093 + .encrypt = aead_encrypt,
14094 + .decrypt = aead_decrypt,
14095 + .ivsize = AES_BLOCK_SIZE,
14096 + .maxauthsize = SHA1_DIGEST_SIZE,
14097 + },
14098 + .caam = {
14099 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14100 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14101 + OP_ALG_AAI_HMAC_PRECOMP,
14102 + .geniv = true,
14103 + },
14104 + },
14105 + {
14106 + .aead = {
14107 + .base = {
14108 + .cra_name = "authenc(hmac(sha224),cbc(aes))",
14109 + .cra_driver_name = "authenc-hmac-sha224-"
14110 + "cbc-aes-caam-qi",
14111 + .cra_blocksize = AES_BLOCK_SIZE,
14112 + },
14113 + .setkey = aead_setkey,
14114 + .setauthsize = aead_setauthsize,
14115 + .encrypt = aead_encrypt,
14116 + .decrypt = aead_decrypt,
14117 + .ivsize = AES_BLOCK_SIZE,
14118 + .maxauthsize = SHA224_DIGEST_SIZE,
14119 + },
14120 + .caam = {
14121 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14122 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14123 + OP_ALG_AAI_HMAC_PRECOMP,
14124 + }
14125 + },
14126 + {
14127 + .aead = {
14128 + .base = {
14129 + .cra_name = "echainiv(authenc(hmac(sha224),"
14130 + "cbc(aes)))",
14131 + .cra_driver_name = "echainiv-authenc-"
14132 + "hmac-sha224-cbc-aes-caam-qi",
14133 + .cra_blocksize = AES_BLOCK_SIZE,
14134 + },
14135 + .setkey = aead_setkey,
14136 + .setauthsize = aead_setauthsize,
14137 + .encrypt = aead_encrypt,
14138 + .decrypt = aead_decrypt,
14139 + .ivsize = AES_BLOCK_SIZE,
14140 + .maxauthsize = SHA224_DIGEST_SIZE,
14141 + },
14142 + .caam = {
14143 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14144 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14145 + OP_ALG_AAI_HMAC_PRECOMP,
14146 + .geniv = true,
14147 + }
14148 + },
14149 + {
14150 + .aead = {
14151 + .base = {
14152 + .cra_name = "authenc(hmac(sha256),cbc(aes))",
14153 + .cra_driver_name = "authenc-hmac-sha256-"
14154 + "cbc-aes-caam-qi",
14155 + .cra_blocksize = AES_BLOCK_SIZE,
14156 + },
14157 + .setkey = aead_setkey,
14158 + .setauthsize = aead_setauthsize,
14159 + .encrypt = aead_encrypt,
14160 + .decrypt = aead_decrypt,
14161 + .ivsize = AES_BLOCK_SIZE,
14162 + .maxauthsize = SHA256_DIGEST_SIZE,
14163 + },
14164 + .caam = {
14165 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14166 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14167 + OP_ALG_AAI_HMAC_PRECOMP,
14168 + }
14169 + },
14170 + {
14171 + .aead = {
14172 + .base = {
14173 + .cra_name = "echainiv(authenc(hmac(sha256),"
14174 + "cbc(aes)))",
14175 + .cra_driver_name = "echainiv-authenc-"
14176 + "hmac-sha256-cbc-aes-"
14177 + "caam-qi",
14178 + .cra_blocksize = AES_BLOCK_SIZE,
14179 + },
14180 + .setkey = aead_setkey,
14181 + .setauthsize = aead_setauthsize,
14182 + .encrypt = aead_encrypt,
14183 + .decrypt = aead_decrypt,
14184 + .ivsize = AES_BLOCK_SIZE,
14185 + .maxauthsize = SHA256_DIGEST_SIZE,
14186 + },
14187 + .caam = {
14188 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14189 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14190 + OP_ALG_AAI_HMAC_PRECOMP,
14191 + .geniv = true,
14192 + }
14193 + },
14194 + {
14195 + .aead = {
14196 + .base = {
14197 + .cra_name = "authenc(hmac(sha384),cbc(aes))",
14198 + .cra_driver_name = "authenc-hmac-sha384-"
14199 + "cbc-aes-caam-qi",
14200 + .cra_blocksize = AES_BLOCK_SIZE,
14201 + },
14202 + .setkey = aead_setkey,
14203 + .setauthsize = aead_setauthsize,
14204 + .encrypt = aead_encrypt,
14205 + .decrypt = aead_decrypt,
14206 + .ivsize = AES_BLOCK_SIZE,
14207 + .maxauthsize = SHA384_DIGEST_SIZE,
14208 + },
14209 + .caam = {
14210 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14211 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14212 + OP_ALG_AAI_HMAC_PRECOMP,
14213 + }
14214 + },
14215 + {
14216 + .aead = {
14217 + .base = {
14218 + .cra_name = "echainiv(authenc(hmac(sha384),"
14219 + "cbc(aes)))",
14220 + .cra_driver_name = "echainiv-authenc-"
14221 + "hmac-sha384-cbc-aes-"
14222 + "caam-qi",
14223 + .cra_blocksize = AES_BLOCK_SIZE,
14224 + },
14225 + .setkey = aead_setkey,
14226 + .setauthsize = aead_setauthsize,
14227 + .encrypt = aead_encrypt,
14228 + .decrypt = aead_decrypt,
14229 + .ivsize = AES_BLOCK_SIZE,
14230 + .maxauthsize = SHA384_DIGEST_SIZE,
14231 + },
14232 + .caam = {
14233 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14234 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14235 + OP_ALG_AAI_HMAC_PRECOMP,
14236 + .geniv = true,
14237 + }
14238 + },
14239 + {
14240 + .aead = {
14241 + .base = {
14242 + .cra_name = "authenc(hmac(sha512),cbc(aes))",
14243 + .cra_driver_name = "authenc-hmac-sha512-"
14244 + "cbc-aes-caam-qi",
14245 + .cra_blocksize = AES_BLOCK_SIZE,
14246 + },
14247 + .setkey = aead_setkey,
14248 + .setauthsize = aead_setauthsize,
14249 + .encrypt = aead_encrypt,
14250 + .decrypt = aead_decrypt,
14251 + .ivsize = AES_BLOCK_SIZE,
14252 + .maxauthsize = SHA512_DIGEST_SIZE,
14253 + },
14254 + .caam = {
14255 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14256 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14257 + OP_ALG_AAI_HMAC_PRECOMP,
14258 + }
14259 + },
14260 + {
14261 + .aead = {
14262 + .base = {
14263 + .cra_name = "echainiv(authenc(hmac(sha512),"
14264 + "cbc(aes)))",
14265 + .cra_driver_name = "echainiv-authenc-"
14266 + "hmac-sha512-cbc-aes-"
14267 + "caam-qi",
14268 + .cra_blocksize = AES_BLOCK_SIZE,
14269 + },
14270 + .setkey = aead_setkey,
14271 + .setauthsize = aead_setauthsize,
14272 + .encrypt = aead_encrypt,
14273 + .decrypt = aead_decrypt,
14274 + .ivsize = AES_BLOCK_SIZE,
14275 + .maxauthsize = SHA512_DIGEST_SIZE,
14276 + },
14277 + .caam = {
14278 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14279 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14280 + OP_ALG_AAI_HMAC_PRECOMP,
14281 + .geniv = true,
14282 + }
14283 + },
14284 + {
14285 + .aead = {
14286 + .base = {
14287 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
14288 + .cra_driver_name = "authenc-hmac-md5-"
14289 + "cbc-des3_ede-caam-qi",
14290 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14291 + },
14292 + .setkey = aead_setkey,
14293 + .setauthsize = aead_setauthsize,
14294 + .encrypt = aead_encrypt,
14295 + .decrypt = aead_decrypt,
14296 + .ivsize = DES3_EDE_BLOCK_SIZE,
14297 + .maxauthsize = MD5_DIGEST_SIZE,
14298 + },
14299 + .caam = {
14300 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14301 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14302 + OP_ALG_AAI_HMAC_PRECOMP,
14303 + }
14304 + },
14305 + {
14306 + .aead = {
14307 + .base = {
14308 + .cra_name = "echainiv(authenc(hmac(md5),"
14309 + "cbc(des3_ede)))",
14310 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
14311 + "cbc-des3_ede-caam-qi",
14312 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14313 + },
14314 + .setkey = aead_setkey,
14315 + .setauthsize = aead_setauthsize,
14316 + .encrypt = aead_encrypt,
14317 + .decrypt = aead_decrypt,
14318 + .ivsize = DES3_EDE_BLOCK_SIZE,
14319 + .maxauthsize = MD5_DIGEST_SIZE,
14320 + },
14321 + .caam = {
14322 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14323 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14324 + OP_ALG_AAI_HMAC_PRECOMP,
14325 + .geniv = true,
14326 + }
14327 + },
14328 + {
14329 + .aead = {
14330 + .base = {
14331 + .cra_name = "authenc(hmac(sha1),"
14332 + "cbc(des3_ede))",
14333 + .cra_driver_name = "authenc-hmac-sha1-"
14334 + "cbc-des3_ede-caam-qi",
14335 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14336 + },
14337 + .setkey = aead_setkey,
14338 + .setauthsize = aead_setauthsize,
14339 + .encrypt = aead_encrypt,
14340 + .decrypt = aead_decrypt,
14341 + .ivsize = DES3_EDE_BLOCK_SIZE,
14342 + .maxauthsize = SHA1_DIGEST_SIZE,
14343 + },
14344 + .caam = {
14345 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14346 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14347 + OP_ALG_AAI_HMAC_PRECOMP,
14348 + },
14349 + },
14350 + {
14351 + .aead = {
14352 + .base = {
14353 + .cra_name = "echainiv(authenc(hmac(sha1),"
14354 + "cbc(des3_ede)))",
14355 + .cra_driver_name = "echainiv-authenc-"
14356 + "hmac-sha1-"
14357 + "cbc-des3_ede-caam-qi",
14358 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14359 + },
14360 + .setkey = aead_setkey,
14361 + .setauthsize = aead_setauthsize,
14362 + .encrypt = aead_encrypt,
14363 + .decrypt = aead_decrypt,
14364 + .ivsize = DES3_EDE_BLOCK_SIZE,
14365 + .maxauthsize = SHA1_DIGEST_SIZE,
14366 + },
14367 + .caam = {
14368 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14369 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14370 + OP_ALG_AAI_HMAC_PRECOMP,
14371 + .geniv = true,
14372 + }
14373 + },
14374 + {
14375 + .aead = {
14376 + .base = {
14377 + .cra_name = "authenc(hmac(sha224),"
14378 + "cbc(des3_ede))",
14379 + .cra_driver_name = "authenc-hmac-sha224-"
14380 + "cbc-des3_ede-caam-qi",
14381 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14382 + },
14383 + .setkey = aead_setkey,
14384 + .setauthsize = aead_setauthsize,
14385 + .encrypt = aead_encrypt,
14386 + .decrypt = aead_decrypt,
14387 + .ivsize = DES3_EDE_BLOCK_SIZE,
14388 + .maxauthsize = SHA224_DIGEST_SIZE,
14389 + },
14390 + .caam = {
14391 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14392 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14393 + OP_ALG_AAI_HMAC_PRECOMP,
14394 + },
14395 + },
14396 + {
14397 + .aead = {
14398 + .base = {
14399 + .cra_name = "echainiv(authenc(hmac(sha224),"
14400 + "cbc(des3_ede)))",
14401 + .cra_driver_name = "echainiv-authenc-"
14402 + "hmac-sha224-"
14403 + "cbc-des3_ede-caam-qi",
14404 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14405 + },
14406 + .setkey = aead_setkey,
14407 + .setauthsize = aead_setauthsize,
14408 + .encrypt = aead_encrypt,
14409 + .decrypt = aead_decrypt,
14410 + .ivsize = DES3_EDE_BLOCK_SIZE,
14411 + .maxauthsize = SHA224_DIGEST_SIZE,
14412 + },
14413 + .caam = {
14414 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14415 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14416 + OP_ALG_AAI_HMAC_PRECOMP,
14417 + .geniv = true,
14418 + }
14419 + },
14420 + {
14421 + .aead = {
14422 + .base = {
14423 + .cra_name = "authenc(hmac(sha256),"
14424 + "cbc(des3_ede))",
14425 + .cra_driver_name = "authenc-hmac-sha256-"
14426 + "cbc-des3_ede-caam-qi",
14427 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14428 + },
14429 + .setkey = aead_setkey,
14430 + .setauthsize = aead_setauthsize,
14431 + .encrypt = aead_encrypt,
14432 + .decrypt = aead_decrypt,
14433 + .ivsize = DES3_EDE_BLOCK_SIZE,
14434 + .maxauthsize = SHA256_DIGEST_SIZE,
14435 + },
14436 + .caam = {
14437 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14438 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14439 + OP_ALG_AAI_HMAC_PRECOMP,
14440 + },
14441 + },
14442 + {
14443 + .aead = {
14444 + .base = {
14445 + .cra_name = "echainiv(authenc(hmac(sha256),"
14446 + "cbc(des3_ede)))",
14447 + .cra_driver_name = "echainiv-authenc-"
14448 + "hmac-sha256-"
14449 + "cbc-des3_ede-caam-qi",
14450 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14451 + },
14452 + .setkey = aead_setkey,
14453 + .setauthsize = aead_setauthsize,
14454 + .encrypt = aead_encrypt,
14455 + .decrypt = aead_decrypt,
14456 + .ivsize = DES3_EDE_BLOCK_SIZE,
14457 + .maxauthsize = SHA256_DIGEST_SIZE,
14458 + },
14459 + .caam = {
14460 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14461 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14462 + OP_ALG_AAI_HMAC_PRECOMP,
14463 + .geniv = true,
14464 + }
14465 + },
14466 + {
14467 + .aead = {
14468 + .base = {
14469 + .cra_name = "authenc(hmac(sha384),"
14470 + "cbc(des3_ede))",
14471 + .cra_driver_name = "authenc-hmac-sha384-"
14472 + "cbc-des3_ede-caam-qi",
14473 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14474 + },
14475 + .setkey = aead_setkey,
14476 + .setauthsize = aead_setauthsize,
14477 + .encrypt = aead_encrypt,
14478 + .decrypt = aead_decrypt,
14479 + .ivsize = DES3_EDE_BLOCK_SIZE,
14480 + .maxauthsize = SHA384_DIGEST_SIZE,
14481 + },
14482 + .caam = {
14483 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14484 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14485 + OP_ALG_AAI_HMAC_PRECOMP,
14486 + },
14487 + },
14488 + {
14489 + .aead = {
14490 + .base = {
14491 + .cra_name = "echainiv(authenc(hmac(sha384),"
14492 + "cbc(des3_ede)))",
14493 + .cra_driver_name = "echainiv-authenc-"
14494 + "hmac-sha384-"
14495 + "cbc-des3_ede-caam-qi",
14496 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14497 + },
14498 + .setkey = aead_setkey,
14499 + .setauthsize = aead_setauthsize,
14500 + .encrypt = aead_encrypt,
14501 + .decrypt = aead_decrypt,
14502 + .ivsize = DES3_EDE_BLOCK_SIZE,
14503 + .maxauthsize = SHA384_DIGEST_SIZE,
14504 + },
14505 + .caam = {
14506 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14507 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14508 + OP_ALG_AAI_HMAC_PRECOMP,
14509 + .geniv = true,
14510 + }
14511 + },
14512 + {
14513 + .aead = {
14514 + .base = {
14515 + .cra_name = "authenc(hmac(sha512),"
14516 + "cbc(des3_ede))",
14517 + .cra_driver_name = "authenc-hmac-sha512-"
14518 + "cbc-des3_ede-caam-qi",
14519 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14520 + },
14521 + .setkey = aead_setkey,
14522 + .setauthsize = aead_setauthsize,
14523 + .encrypt = aead_encrypt,
14524 + .decrypt = aead_decrypt,
14525 + .ivsize = DES3_EDE_BLOCK_SIZE,
14526 + .maxauthsize = SHA512_DIGEST_SIZE,
14527 + },
14528 + .caam = {
14529 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14530 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14531 + OP_ALG_AAI_HMAC_PRECOMP,
14532 + },
14533 + },
14534 + {
14535 + .aead = {
14536 + .base = {
14537 + .cra_name = "echainiv(authenc(hmac(sha512),"
14538 + "cbc(des3_ede)))",
14539 + .cra_driver_name = "echainiv-authenc-"
14540 + "hmac-sha512-"
14541 + "cbc-des3_ede-caam-qi",
14542 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14543 + },
14544 + .setkey = aead_setkey,
14545 + .setauthsize = aead_setauthsize,
14546 + .encrypt = aead_encrypt,
14547 + .decrypt = aead_decrypt,
14548 + .ivsize = DES3_EDE_BLOCK_SIZE,
14549 + .maxauthsize = SHA512_DIGEST_SIZE,
14550 + },
14551 + .caam = {
14552 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14553 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14554 + OP_ALG_AAI_HMAC_PRECOMP,
14555 + .geniv = true,
14556 + }
14557 + },
14558 + {
14559 + .aead = {
14560 + .base = {
14561 + .cra_name = "authenc(hmac(md5),cbc(des))",
14562 + .cra_driver_name = "authenc-hmac-md5-"
14563 + "cbc-des-caam-qi",
14564 + .cra_blocksize = DES_BLOCK_SIZE,
14565 + },
14566 + .setkey = aead_setkey,
14567 + .setauthsize = aead_setauthsize,
14568 + .encrypt = aead_encrypt,
14569 + .decrypt = aead_decrypt,
14570 + .ivsize = DES_BLOCK_SIZE,
14571 + .maxauthsize = MD5_DIGEST_SIZE,
14572 + },
14573 + .caam = {
14574 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14575 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14576 + OP_ALG_AAI_HMAC_PRECOMP,
14577 + },
14578 + },
14579 + {
14580 + .aead = {
14581 + .base = {
14582 + .cra_name = "echainiv(authenc(hmac(md5),"
14583 + "cbc(des)))",
14584 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
14585 + "cbc-des-caam-qi",
14586 + .cra_blocksize = DES_BLOCK_SIZE,
14587 + },
14588 + .setkey = aead_setkey,
14589 + .setauthsize = aead_setauthsize,
14590 + .encrypt = aead_encrypt,
14591 + .decrypt = aead_decrypt,
14592 + .ivsize = DES_BLOCK_SIZE,
14593 + .maxauthsize = MD5_DIGEST_SIZE,
14594 + },
14595 + .caam = {
14596 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14597 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14598 + OP_ALG_AAI_HMAC_PRECOMP,
14599 + .geniv = true,
14600 + }
14601 + },
14602 + {
14603 + .aead = {
14604 + .base = {
14605 + .cra_name = "authenc(hmac(sha1),cbc(des))",
14606 + .cra_driver_name = "authenc-hmac-sha1-"
14607 + "cbc-des-caam-qi",
14608 + .cra_blocksize = DES_BLOCK_SIZE,
14609 + },
14610 + .setkey = aead_setkey,
14611 + .setauthsize = aead_setauthsize,
14612 + .encrypt = aead_encrypt,
14613 + .decrypt = aead_decrypt,
14614 + .ivsize = DES_BLOCK_SIZE,
14615 + .maxauthsize = SHA1_DIGEST_SIZE,
14616 + },
14617 + .caam = {
14618 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14619 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14620 + OP_ALG_AAI_HMAC_PRECOMP,
14621 + },
14622 + },
14623 + {
14624 + .aead = {
14625 + .base = {
14626 + .cra_name = "echainiv(authenc(hmac(sha1),"
14627 + "cbc(des)))",
14628 + .cra_driver_name = "echainiv-authenc-"
14629 + "hmac-sha1-cbc-des-caam-qi",
14630 + .cra_blocksize = DES_BLOCK_SIZE,
14631 + },
14632 + .setkey = aead_setkey,
14633 + .setauthsize = aead_setauthsize,
14634 + .encrypt = aead_encrypt,
14635 + .decrypt = aead_decrypt,
14636 + .ivsize = DES_BLOCK_SIZE,
14637 + .maxauthsize = SHA1_DIGEST_SIZE,
14638 + },
14639 + .caam = {
14640 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14641 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14642 + OP_ALG_AAI_HMAC_PRECOMP,
14643 + .geniv = true,
14644 + }
14645 + },
14646 + {
14647 + .aead = {
14648 + .base = {
14649 + .cra_name = "authenc(hmac(sha224),cbc(des))",
14650 + .cra_driver_name = "authenc-hmac-sha224-"
14651 + "cbc-des-caam-qi",
14652 + .cra_blocksize = DES_BLOCK_SIZE,
14653 + },
14654 + .setkey = aead_setkey,
14655 + .setauthsize = aead_setauthsize,
14656 + .encrypt = aead_encrypt,
14657 + .decrypt = aead_decrypt,
14658 + .ivsize = DES_BLOCK_SIZE,
14659 + .maxauthsize = SHA224_DIGEST_SIZE,
14660 + },
14661 + .caam = {
14662 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14663 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14664 + OP_ALG_AAI_HMAC_PRECOMP,
14665 + },
14666 + },
14667 + {
14668 + .aead = {
14669 + .base = {
14670 + .cra_name = "echainiv(authenc(hmac(sha224),"
14671 + "cbc(des)))",
14672 + .cra_driver_name = "echainiv-authenc-"
14673 + "hmac-sha224-cbc-des-"
14674 + "caam-qi",
14675 + .cra_blocksize = DES_BLOCK_SIZE,
14676 + },
14677 + .setkey = aead_setkey,
14678 + .setauthsize = aead_setauthsize,
14679 + .encrypt = aead_encrypt,
14680 + .decrypt = aead_decrypt,
14681 + .ivsize = DES_BLOCK_SIZE,
14682 + .maxauthsize = SHA224_DIGEST_SIZE,
14683 + },
14684 + .caam = {
14685 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14686 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14687 + OP_ALG_AAI_HMAC_PRECOMP,
14688 + .geniv = true,
14689 + }
14690 + },
14691 + {
14692 + .aead = {
14693 + .base = {
14694 + .cra_name = "authenc(hmac(sha256),cbc(des))",
14695 + .cra_driver_name = "authenc-hmac-sha256-"
14696 + "cbc-des-caam-qi",
14697 + .cra_blocksize = DES_BLOCK_SIZE,
14698 + },
14699 + .setkey = aead_setkey,
14700 + .setauthsize = aead_setauthsize,
14701 + .encrypt = aead_encrypt,
14702 + .decrypt = aead_decrypt,
14703 + .ivsize = DES_BLOCK_SIZE,
14704 + .maxauthsize = SHA256_DIGEST_SIZE,
14705 + },
14706 + .caam = {
14707 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14708 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14709 + OP_ALG_AAI_HMAC_PRECOMP,
14710 + },
14711 + },
14712 + {
14713 + .aead = {
14714 + .base = {
14715 + .cra_name = "echainiv(authenc(hmac(sha256),"
14716 + "cbc(des)))",
14717 + .cra_driver_name = "echainiv-authenc-"
14718 + "hmac-sha256-cbc-des-"
14719 + "caam-qi",
14720 + .cra_blocksize = DES_BLOCK_SIZE,
14721 + },
14722 + .setkey = aead_setkey,
14723 + .setauthsize = aead_setauthsize,
14724 + .encrypt = aead_encrypt,
14725 + .decrypt = aead_decrypt,
14726 + .ivsize = DES_BLOCK_SIZE,
14727 + .maxauthsize = SHA256_DIGEST_SIZE,
14728 + },
14729 + .caam = {
14730 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14731 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14732 + OP_ALG_AAI_HMAC_PRECOMP,
14733 + .geniv = true,
14734 + },
14735 + },
14736 + {
14737 + .aead = {
14738 + .base = {
14739 + .cra_name = "authenc(hmac(sha384),cbc(des))",
14740 + .cra_driver_name = "authenc-hmac-sha384-"
14741 + "cbc-des-caam-qi",
14742 + .cra_blocksize = DES_BLOCK_SIZE,
14743 + },
14744 + .setkey = aead_setkey,
14745 + .setauthsize = aead_setauthsize,
14746 + .encrypt = aead_encrypt,
14747 + .decrypt = aead_decrypt,
14748 + .ivsize = DES_BLOCK_SIZE,
14749 + .maxauthsize = SHA384_DIGEST_SIZE,
14750 + },
14751 + .caam = {
14752 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14753 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14754 + OP_ALG_AAI_HMAC_PRECOMP,
14755 + },
14756 + },
14757 + {
14758 + .aead = {
14759 + .base = {
14760 + .cra_name = "echainiv(authenc(hmac(sha384),"
14761 + "cbc(des)))",
14762 + .cra_driver_name = "echainiv-authenc-"
14763 + "hmac-sha384-cbc-des-"
14764 + "caam-qi",
14765 + .cra_blocksize = DES_BLOCK_SIZE,
14766 + },
14767 + .setkey = aead_setkey,
14768 + .setauthsize = aead_setauthsize,
14769 + .encrypt = aead_encrypt,
14770 + .decrypt = aead_decrypt,
14771 + .ivsize = DES_BLOCK_SIZE,
14772 + .maxauthsize = SHA384_DIGEST_SIZE,
14773 + },
14774 + .caam = {
14775 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14776 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14777 + OP_ALG_AAI_HMAC_PRECOMP,
14778 + .geniv = true,
14779 + }
14780 + },
14781 + {
14782 + .aead = {
14783 + .base = {
14784 + .cra_name = "authenc(hmac(sha512),cbc(des))",
14785 + .cra_driver_name = "authenc-hmac-sha512-"
14786 + "cbc-des-caam-qi",
14787 + .cra_blocksize = DES_BLOCK_SIZE,
14788 + },
14789 + .setkey = aead_setkey,
14790 + .setauthsize = aead_setauthsize,
14791 + .encrypt = aead_encrypt,
14792 + .decrypt = aead_decrypt,
14793 + .ivsize = DES_BLOCK_SIZE,
14794 + .maxauthsize = SHA512_DIGEST_SIZE,
14795 + },
14796 + .caam = {
14797 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14798 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14799 + OP_ALG_AAI_HMAC_PRECOMP,
14800 + }
14801 + },
14802 + {
14803 + .aead = {
14804 + .base = {
14805 + .cra_name = "echainiv(authenc(hmac(sha512),"
14806 + "cbc(des)))",
14807 + .cra_driver_name = "echainiv-authenc-"
14808 + "hmac-sha512-cbc-des-"
14809 + "caam-qi",
14810 + .cra_blocksize = DES_BLOCK_SIZE,
14811 + },
14812 + .setkey = aead_setkey,
14813 + .setauthsize = aead_setauthsize,
14814 + .encrypt = aead_encrypt,
14815 + .decrypt = aead_decrypt,
14816 + .ivsize = DES_BLOCK_SIZE,
14817 + .maxauthsize = SHA512_DIGEST_SIZE,
14818 + },
14819 + .caam = {
14820 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14821 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14822 + OP_ALG_AAI_HMAC_PRECOMP,
14823 + .geniv = true,
14824 + }
14825 + },
14826 + {
14827 + .aead = {
14828 + .base = {
14829 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
14830 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
14831 + .cra_blocksize = AES_BLOCK_SIZE,
14832 + },
14833 + .setkey = tls_setkey,
14834 + .setauthsize = tls_setauthsize,
14835 + .encrypt = tls_encrypt,
14836 + .decrypt = tls_decrypt,
14837 + .ivsize = AES_BLOCK_SIZE,
14838 + .maxauthsize = SHA1_DIGEST_SIZE,
14839 + },
14840 + .caam = {
14841 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14842 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14843 + OP_ALG_AAI_HMAC_PRECOMP,
14844 + }
14845 + }
14846 +};
14847 +
14848 +struct caam_crypto_alg {
14849 + struct list_head entry;
14850 + struct crypto_alg crypto_alg;
14851 + struct caam_alg_entry caam;
14852 +};
14853 +
14854 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
14855 +{
14856 + struct caam_drv_private *priv;
14857 + /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
14858 + static const u8 digest_size[] = {
14859 + MD5_DIGEST_SIZE,
14860 + SHA1_DIGEST_SIZE,
14861 + SHA224_DIGEST_SIZE,
14862 + SHA256_DIGEST_SIZE,
14863 + SHA384_DIGEST_SIZE,
14864 + SHA512_DIGEST_SIZE
14865 + };
14866 + u8 op_id;
14867 +
14868 + /*
14869 + * distribute tfms across job rings to ensure in-order
14870 + * crypto request processing per tfm
14871 + */
14872 + ctx->jrdev = caam_jr_alloc();
14873 + if (IS_ERR(ctx->jrdev)) {
14874 + pr_err("Job Ring Device allocation for transform failed\n");
14875 + return PTR_ERR(ctx->jrdev);
14876 + }
14877 +
14878 + ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
14879 + DMA_TO_DEVICE);
14880 + if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
14881 + dev_err(ctx->jrdev, "unable to map key\n");
14882 + caam_jr_free(ctx->jrdev);
14883 + return -ENOMEM;
14884 + }
14885 +
14886 + /* copy descriptor header template value */
14887 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
14888 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
14889 +
14890 + if (ctx->adata.algtype) {
14891 + op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
14892 + >> OP_ALG_ALGSEL_SHIFT;
14893 + if (op_id < ARRAY_SIZE(digest_size)) {
14894 + ctx->authsize = digest_size[op_id];
14895 + } else {
14896 + dev_err(ctx->jrdev,
14897 + "incorrect op_id %d; must be less than %zu\n",
14898 + op_id, ARRAY_SIZE(digest_size));
14899 + caam_jr_free(ctx->jrdev);
14900 + return -EINVAL;
14901 + }
14902 + } else {
14903 + ctx->authsize = 0;
14904 + }
14905 +
14906 + priv = dev_get_drvdata(ctx->jrdev->parent);
14907 + ctx->qidev = priv->qidev;
14908 +
14909 + spin_lock_init(&ctx->lock);
14910 + ctx->drv_ctx[ENCRYPT] = NULL;
14911 + ctx->drv_ctx[DECRYPT] = NULL;
14912 + ctx->drv_ctx[GIVENCRYPT] = NULL;
14913 +
14914 + return 0;
14915 +}
14916 +
14917 +static int caam_cra_init(struct crypto_tfm *tfm)
14918 +{
14919 + struct crypto_alg *alg = tfm->__crt_alg;
14920 + struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
14921 + crypto_alg);
14922 + struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
14923 +
14924 + return caam_init_common(ctx, &caam_alg->caam);
14925 +}
14926 +
14927 +static int caam_aead_init(struct crypto_aead *tfm)
14928 +{
14929 + struct aead_alg *alg = crypto_aead_alg(tfm);
14930 + struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
14931 + aead);
14932 + struct caam_ctx *ctx = crypto_aead_ctx(tfm);
14933 +
14934 + return caam_init_common(ctx, &caam_alg->caam);
14935 +}
14936 +
14937 +static void caam_exit_common(struct caam_ctx *ctx)
14938 +{
14939 + caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
14940 + caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
14941 + caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
14942 +
14943 + dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
14944 + DMA_TO_DEVICE);
14945 +
14946 + caam_jr_free(ctx->jrdev);
14947 +}
14948 +
14949 +static void caam_cra_exit(struct crypto_tfm *tfm)
14950 +{
14951 + caam_exit_common(crypto_tfm_ctx(tfm));
14952 +}
14953 +
14954 +static void caam_aead_exit(struct crypto_aead *tfm)
14955 +{
14956 + caam_exit_common(crypto_aead_ctx(tfm));
14957 +}
14958 +
14959 +static struct list_head alg_list;
14960 +static void __exit caam_qi_algapi_exit(void)
14961 +{
14962 + struct caam_crypto_alg *t_alg, *n;
14963 + int i;
14964 +
14965 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
14966 + struct caam_aead_alg *t_alg = driver_aeads + i;
14967 +
14968 + if (t_alg->registered)
14969 + crypto_unregister_aead(&t_alg->aead);
14970 + }
14971 +
14972 + if (!alg_list.next)
14973 + return;
14974 +
14975 + list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
14976 + crypto_unregister_alg(&t_alg->crypto_alg);
14977 + list_del(&t_alg->entry);
14978 + kfree(t_alg);
14979 + }
14980 +}
14981 +
14982 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
14983 + *template)
14984 +{
14985 + struct caam_crypto_alg *t_alg;
14986 + struct crypto_alg *alg;
14987 +
14988 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
14989 + if (!t_alg)
14990 + return ERR_PTR(-ENOMEM);
14991 +
14992 + alg = &t_alg->crypto_alg;
14993 +
14994 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
14995 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
14996 + template->driver_name);
14997 + alg->cra_module = THIS_MODULE;
14998 + alg->cra_init = caam_cra_init;
14999 + alg->cra_exit = caam_cra_exit;
15000 + alg->cra_priority = CAAM_CRA_PRIORITY;
15001 + alg->cra_blocksize = template->blocksize;
15002 + alg->cra_alignmask = 0;
15003 + alg->cra_ctxsize = sizeof(struct caam_ctx);
15004 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
15005 + template->type;
15006 + switch (template->type) {
15007 + case CRYPTO_ALG_TYPE_GIVCIPHER:
15008 + alg->cra_type = &crypto_givcipher_type;
15009 + alg->cra_ablkcipher = template->template_ablkcipher;
15010 + break;
15011 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
15012 + alg->cra_type = &crypto_ablkcipher_type;
15013 + alg->cra_ablkcipher = template->template_ablkcipher;
15014 + break;
15015 + }
15016 +
15017 + t_alg->caam.class1_alg_type = template->class1_alg_type;
15018 + t_alg->caam.class2_alg_type = template->class2_alg_type;
15019 +
15020 + return t_alg;
15021 +}
15022 +
15023 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
15024 +{
15025 + struct aead_alg *alg = &t_alg->aead;
15026 +
15027 + alg->base.cra_module = THIS_MODULE;
15028 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
15029 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
15030 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
15031 +
15032 + alg->init = caam_aead_init;
15033 + alg->exit = caam_aead_exit;
15034 +}
15035 +
15036 +static int __init caam_qi_algapi_init(void)
15037 +{
15038 + struct device_node *dev_node;
15039 + struct platform_device *pdev;
15040 + struct device *ctrldev;
15041 + struct caam_drv_private *priv;
15042 + int i = 0, err = 0;
15043 + u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
15044 + unsigned int md_limit = SHA512_DIGEST_SIZE;
15045 + bool registered = false;
15046 +
15047 + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
15048 + if (!dev_node) {
15049 + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
15050 + if (!dev_node)
15051 + return -ENODEV;
15052 + }
15053 +
15054 + pdev = of_find_device_by_node(dev_node);
15055 + of_node_put(dev_node);
15056 + if (!pdev)
15057 + return -ENODEV;
15058 +
15059 + ctrldev = &pdev->dev;
15060 + priv = dev_get_drvdata(ctrldev);
15061 +
15062 + /*
15063 + * If priv is NULL, it's probably because the caam driver wasn't
15064 + * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
15065 + */
15066 + if (!priv || !priv->qi_present)
15067 + return -ENODEV;
15068 +
15069 + if (caam_dpaa2) {
15070 + dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
15071 + return -ENODEV;
15072 + }
15073 +
15074 + INIT_LIST_HEAD(&alg_list);
15075 +
15076 + /*
15077 + * Register crypto algorithms the device supports.
15078 + * First, detect presence and attributes of DES, AES, and MD blocks.
15079 + */
15080 + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
15081 + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
15082 + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
15083 + aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
15084 + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
15085 +
15086 + /* If MD is present, limit digest size based on LP256 */
15087 + if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
15088 + md_limit = SHA256_DIGEST_SIZE;
15089 +
15090 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
15091 + struct caam_crypto_alg *t_alg;
15092 + struct caam_alg_template *alg = driver_algs + i;
15093 + u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
15094 +
15095 + /* Skip DES algorithms if not supported by device */
15096 + if (!des_inst &&
15097 + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
15098 + (alg_sel == OP_ALG_ALGSEL_DES)))
15099 + continue;
15100 +
15101 + /* Skip AES algorithms if not supported by device */
15102 + if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
15103 + continue;
15104 +
15105 + t_alg = caam_alg_alloc(alg);
15106 + if (IS_ERR(t_alg)) {
15107 + err = PTR_ERR(t_alg);
15108 + dev_warn(priv->qidev, "%s alg allocation failed\n",
15109 + alg->driver_name);
15110 + continue;
15111 + }
15112 +
15113 + err = crypto_register_alg(&t_alg->crypto_alg);
15114 + if (err) {
15115 + dev_warn(priv->qidev, "%s alg registration failed\n",
15116 + t_alg->crypto_alg.cra_driver_name);
15117 + kfree(t_alg);
15118 + continue;
15119 + }
15120 +
15121 + list_add_tail(&t_alg->entry, &alg_list);
15122 + registered = true;
15123 + }
15124 +
15125 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
15126 + struct caam_aead_alg *t_alg = driver_aeads + i;
15127 + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
15128 + OP_ALG_ALGSEL_MASK;
15129 + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
15130 + OP_ALG_ALGSEL_MASK;
15131 + u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
15132 +
15133 + /* Skip DES algorithms if not supported by device */
15134 + if (!des_inst &&
15135 + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
15136 + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
15137 + continue;
15138 +
15139 + /* Skip AES algorithms if not supported by device */
15140 + if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
15141 + continue;
15142 +
15143 + /*
15144 + * Check support for AES algorithms not available
15145 + * on LP devices.
15146 + */
15147 + if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
15148 + (alg_aai == OP_ALG_AAI_GCM))
15149 + continue;
15150 +
15151 + /*
15152 + * Skip algorithms requiring message digests
15153 + * if MD or MD size is not supported by device.
15154 + */
15155 + if (c2_alg_sel &&
15156 + (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
15157 + continue;
15158 +
15159 + caam_aead_alg_init(t_alg);
15160 +
15161 + err = crypto_register_aead(&t_alg->aead);
15162 + if (err) {
15163 + pr_warn("%s alg registration failed\n",
15164 + t_alg->aead.base.cra_driver_name);
15165 + continue;
15166 + }
15167 +
15168 + t_alg->registered = true;
15169 + registered = true;
15170 + }
15171 +
15172 + if (registered)
15173 + dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
15174 +
15175 + return err;
15176 +}
15177 +
15178 +module_init(caam_qi_algapi_init);
15179 +module_exit(caam_qi_algapi_exit);
15180 +
15181 +MODULE_LICENSE("GPL");
15182 +MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
15183 +MODULE_AUTHOR("Freescale Semiconductor");
15184 diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
15185 new file mode 100644
15186 index 00000000..f0316346
15187 --- /dev/null
15188 +++ b/drivers/crypto/caam/caamalg_qi2.c
15189 @@ -0,0 +1,4428 @@
15190 +/*
15191 + * Copyright 2015-2016 Freescale Semiconductor Inc.
15192 + * Copyright 2017 NXP
15193 + *
15194 + * Redistribution and use in source and binary forms, with or without
15195 + * modification, are permitted provided that the following conditions are met:
15196 + * * Redistributions of source code must retain the above copyright
15197 + * notice, this list of conditions and the following disclaimer.
15198 + * * Redistributions in binary form must reproduce the above copyright
15199 + * notice, this list of conditions and the following disclaimer in the
15200 + * documentation and/or other materials provided with the distribution.
15201 + * * Neither the names of the above-listed copyright holders nor the
15202 + * names of any contributors may be used to endorse or promote products
15203 + * derived from this software without specific prior written permission.
15204 + *
15205 + *
15206 + * ALTERNATIVELY, this software may be distributed under the terms of the
15207 + * GNU General Public License ("GPL") as published by the Free Software
15208 + * Foundation, either version 2 of that License or (at your option) any
15209 + * later version.
15210 + *
15211 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15212 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15213 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15214 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
15215 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
15216 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
15217 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
15218 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
15219 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
15220 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
15221 + * POSSIBILITY OF SUCH DAMAGE.
15222 + */
15223 +
15224 +#include "compat.h"
15225 +#include "regs.h"
15226 +#include "caamalg_qi2.h"
15227 +#include "dpseci_cmd.h"
15228 +#include "desc_constr.h"
15229 +#include "error.h"
15230 +#include "sg_sw_sec4.h"
15231 +#include "sg_sw_qm2.h"
15232 +#include "key_gen.h"
15233 +#include "caamalg_desc.h"
15234 +#include "../../../drivers/staging/fsl-mc/include/mc.h"
15235 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
15236 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
15237 +
15238 +#define CAAM_CRA_PRIORITY 2000
15239 +
15240 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
15241 +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
15242 + SHA512_DIGEST_SIZE * 2)
15243 +
15244 +#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM
15245 +bool caam_little_end;
15246 +EXPORT_SYMBOL(caam_little_end);
15247 +bool caam_imx;
15248 +EXPORT_SYMBOL(caam_imx);
15249 +#endif
15250 +
15251 +/*
15252 + * This is a a cache of buffers, from which the users of CAAM QI driver
15253 + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
15254 + * NOTE: A more elegant solution would be to have some headroom in the frames
15255 + * being processed. This can be added by the dpaa2-eth driver. This would
15256 + * pose a problem for userspace application processing which cannot
15257 + * know of this limitation. So for now, this will work.
15258 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
15259 + */
15260 +static struct kmem_cache *qi_cache;
15261 +
15262 +struct caam_alg_entry {
15263 + struct device *dev;
15264 + int class1_alg_type;
15265 + int class2_alg_type;
15266 + bool rfc3686;
15267 + bool geniv;
15268 +};
15269 +
15270 +struct caam_aead_alg {
15271 + struct aead_alg aead;
15272 + struct caam_alg_entry caam;
15273 + bool registered;
15274 +};
15275 +
15276 +/**
15277 + * caam_ctx - per-session context
15278 + * @flc: Flow Contexts array
15279 + * @key: virtual address of the key(s): [authentication key], encryption key
15280 + * @key_dma: I/O virtual address of the key
15281 + * @dev: dpseci device
15282 + * @adata: authentication algorithm details
15283 + * @cdata: encryption algorithm details
15284 + * @authsize: authentication tag (a.k.a. ICV / MAC) size
15285 + */
15286 +struct caam_ctx {
15287 + struct caam_flc flc[NUM_OP];
15288 + u8 key[CAAM_MAX_KEY_SIZE];
15289 + dma_addr_t key_dma;
15290 + struct device *dev;
15291 + struct alginfo adata;
15292 + struct alginfo cdata;
15293 + unsigned int authsize;
15294 +};
15295 +
15296 +void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
15297 + dma_addr_t iova_addr)
15298 +{
15299 + phys_addr_t phys_addr;
15300 +
15301 + phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
15302 + iova_addr;
15303 +
15304 + return phys_to_virt(phys_addr);
15305 +}
15306 +
15307 +/*
15308 + * qi_cache_alloc - Allocate buffers from CAAM-QI cache
15309 + *
15310 + * Allocate data on the hotpath. Instead of using kmalloc, one can use the
15311 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
15312 + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
15313 + * hosting 16 SG entries.
15314 + *
15315 + * @flags - flags that would be used for the equivalent kmalloc(..) call
15316 + *
15317 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
15318 + */
15319 +static inline void *qi_cache_alloc(gfp_t flags)
15320 +{
15321 + return kmem_cache_alloc(qi_cache, flags);
15322 +}
15323 +
15324 +/*
15325 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
15326 + *
15327 + * @obj - buffer previously allocated by qi_cache_alloc
15328 + *
15329 + * No checking is being done, the call is a passthrough call to
15330 + * kmem_cache_free(...)
15331 + */
15332 +static inline void qi_cache_free(void *obj)
15333 +{
15334 + kmem_cache_free(qi_cache, obj);
15335 +}
15336 +
15337 +static struct caam_request *to_caam_req(struct crypto_async_request *areq)
15338 +{
15339 + switch (crypto_tfm_alg_type(areq->tfm)) {
15340 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
15341 + case CRYPTO_ALG_TYPE_GIVCIPHER:
15342 + return ablkcipher_request_ctx(ablkcipher_request_cast(areq));
15343 + case CRYPTO_ALG_TYPE_AEAD:
15344 + return aead_request_ctx(container_of(areq, struct aead_request,
15345 + base));
15346 + default:
15347 + return ERR_PTR(-EINVAL);
15348 + }
15349 +}
15350 +
15351 +static void caam_unmap(struct device *dev, struct scatterlist *src,
15352 + struct scatterlist *dst, int src_nents,
15353 + int dst_nents, dma_addr_t iv_dma, int ivsize,
15354 + enum optype op_type, dma_addr_t qm_sg_dma,
15355 + int qm_sg_bytes)
15356 +{
15357 + if (dst != src) {
15358 + if (src_nents)
15359 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
15360 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
15361 + } else {
15362 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
15363 + }
15364 +
15365 + if (iv_dma)
15366 + dma_unmap_single(dev, iv_dma, ivsize,
15367 + op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
15368 + DMA_TO_DEVICE);
15369 +
15370 + if (qm_sg_bytes)
15371 + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
15372 +}
15373 +
15374 +static int aead_set_sh_desc(struct crypto_aead *aead)
15375 +{
15376 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
15377 + typeof(*alg), aead);
15378 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
15379 + unsigned int ivsize = crypto_aead_ivsize(aead);
15380 + struct device *dev = ctx->dev;
15381 + struct caam_flc *flc;
15382 + u32 *desc;
15383 + u32 ctx1_iv_off = 0;
15384 + u32 *nonce = NULL;
15385 + unsigned int data_len[2];
15386 + u32 inl_mask;
15387 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
15388 + OP_ALG_AAI_CTR_MOD128);
15389 + const bool is_rfc3686 = alg->caam.rfc3686;
15390 +
15391 + if (!ctx->cdata.keylen || !ctx->authsize)
15392 + return 0;
15393 +
15394 + /*
15395 + * AES-CTR needs to load IV in CONTEXT1 reg
15396 + * at an offset of 128bits (16bytes)
15397 + * CONTEXT1[255:128] = IV
15398 + */
15399 + if (ctr_mode)
15400 + ctx1_iv_off = 16;
15401 +
15402 + /*
15403 + * RFC3686 specific:
15404 + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
15405 + */
15406 + if (is_rfc3686) {
15407 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
15408 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
15409 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
15410 + }
15411 +
15412 + data_len[0] = ctx->adata.keylen_pad;
15413 + data_len[1] = ctx->cdata.keylen;
15414 +
15415 + /* aead_encrypt shared descriptor */
15416 + if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
15417 + DESC_QI_AEAD_ENC_LEN) +
15418 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
15419 + DESC_JOB_IO_LEN, data_len, &inl_mask,
15420 + ARRAY_SIZE(data_len)) < 0)
15421 + return -EINVAL;
15422 +
15423 + if (inl_mask & 1)
15424 + ctx->adata.key_virt = ctx->key;
15425 + else
15426 + ctx->adata.key_dma = ctx->key_dma;
15427 +
15428 + if (inl_mask & 2)
15429 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
15430 + else
15431 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
15432 +
15433 + ctx->adata.key_inline = !!(inl_mask & 1);
15434 + ctx->cdata.key_inline = !!(inl_mask & 2);
15435 +
15436 + flc = &ctx->flc[ENCRYPT];
15437 + desc = flc->sh_desc;
15438 +
15439 + if (alg->caam.geniv)
15440 + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
15441 + ivsize, ctx->authsize, is_rfc3686,
15442 + nonce, ctx1_iv_off, true);
15443 + else
15444 + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
15445 + ivsize, ctx->authsize, is_rfc3686, nonce,
15446 + ctx1_iv_off, true);
15447 +
15448 + flc->flc[1] = desc_len(desc); /* SDL */
15449 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
15450 + desc_bytes(desc), DMA_TO_DEVICE);
15451 + if (dma_mapping_error(dev, flc->flc_dma)) {
15452 + dev_err(dev, "unable to map shared descriptor\n");
15453 + return -ENOMEM;
15454 + }
15455 +
15456 + /* aead_decrypt shared descriptor */
15457 + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
15458 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
15459 + DESC_JOB_IO_LEN, data_len, &inl_mask,
15460 + ARRAY_SIZE(data_len)) < 0)
15461 + return -EINVAL;
15462 +
15463 + if (inl_mask & 1)
15464 + ctx->adata.key_virt = ctx->key;
15465 + else
15466 + ctx->adata.key_dma = ctx->key_dma;
15467 +
15468 + if (inl_mask & 2)
15469 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
15470 + else
15471 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
15472 +
15473 + ctx->adata.key_inline = !!(inl_mask & 1);
15474 + ctx->cdata.key_inline = !!(inl_mask & 2);
15475 +
15476 + flc = &ctx->flc[DECRYPT];
15477 + desc = flc->sh_desc;
15478 +
15479 + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
15480 + ivsize, ctx->authsize, alg->caam.geniv,
15481 + is_rfc3686, nonce, ctx1_iv_off, true);
15482 +
15483 + flc->flc[1] = desc_len(desc); /* SDL */
15484 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
15485 + desc_bytes(desc), DMA_TO_DEVICE);
15486 + if (dma_mapping_error(dev, flc->flc_dma)) {
15487 + dev_err(dev, "unable to map shared descriptor\n");
15488 + return -ENOMEM;
15489 + }
15490 +
15491 + return 0;
15492 +}
15493 +
15494 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
15495 +{
15496 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
15497 +
15498 + ctx->authsize = authsize;
15499 + aead_set_sh_desc(authenc);
15500 +
15501 + return 0;
15502 +}
15503 +
15504 +struct split_key_sh_result {
15505 + struct completion completion;
15506 + int err;
15507 + struct device *dev;
15508 +};
15509 +
15510 +static void split_key_sh_done(void *cbk_ctx, u32 err)
15511 +{
15512 + struct split_key_sh_result *res = cbk_ctx;
15513 +
15514 +#ifdef DEBUG
15515 + dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
15516 +#endif
15517 +
15518 + if (err)
15519 + caam_qi2_strstatus(res->dev, err);
15520 +
15521 + res->err = err;
15522 + complete(&res->completion);
15523 +}
15524 +
15525 +static int gen_split_key_sh(struct device *dev, u8 *key_out,
15526 + struct alginfo * const adata, const u8 *key_in,
15527 + u32 keylen)
15528 +{
15529 + struct caam_request *req_ctx;
15530 + u32 *desc;
15531 + struct split_key_sh_result result;
15532 + dma_addr_t dma_addr_in, dma_addr_out;
15533 + struct caam_flc *flc;
15534 + struct dpaa2_fl_entry *in_fle, *out_fle;
15535 + int ret = -ENOMEM;
15536 +
15537 + req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
15538 + if (!req_ctx)
15539 + return -ENOMEM;
15540 +
15541 + in_fle = &req_ctx->fd_flt[1];
15542 + out_fle = &req_ctx->fd_flt[0];
15543 +
15544 + flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
15545 + if (!flc)
15546 + goto err_flc;
15547 +
15548 + dma_addr_in = dma_map_single(dev, (void *)key_in, keylen,
15549 + DMA_TO_DEVICE);
15550 + if (dma_mapping_error(dev, dma_addr_in)) {
15551 + dev_err(dev, "unable to map key input memory\n");
15552 + goto err_dma_addr_in;
15553 + }
15554 +
15555 + dma_addr_out = dma_map_single(dev, key_out, adata->keylen_pad,
15556 + DMA_FROM_DEVICE);
15557 + if (dma_mapping_error(dev, dma_addr_out)) {
15558 + dev_err(dev, "unable to map key output memory\n");
15559 + goto err_dma_addr_out;
15560 + }
15561 +
15562 + desc = flc->sh_desc;
15563 +
15564 + init_sh_desc(desc, 0);
15565 + append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
15566 +
15567 + /* Sets MDHA up into an HMAC-INIT */
15568 + append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
15569 + OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
15570 + OP_ALG_AS_INIT);
15571 +
15572 + /*
15573 + * do a FIFO_LOAD of zero, this will trigger the internal key expansion
15574 + * into both pads inside MDHA
15575 + */
15576 + append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
15577 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
15578 +
15579 + /*
15580 + * FIFO_STORE with the explicit split-key content store
15581 + * (0x26 output type)
15582 + */
15583 + append_fifo_store(desc, dma_addr_out, adata->keylen,
15584 + LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
15585 +
15586 + flc->flc[1] = desc_len(desc); /* SDL */
15587 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
15588 + desc_bytes(desc), DMA_TO_DEVICE);
15589 + if (dma_mapping_error(dev, flc->flc_dma)) {
15590 + dev_err(dev, "unable to map shared descriptor\n");
15591 + goto err_flc_dma;
15592 + }
15593 +
15594 + dpaa2_fl_set_final(in_fle, true);
15595 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
15596 + dpaa2_fl_set_addr(in_fle, dma_addr_in);
15597 + dpaa2_fl_set_len(in_fle, keylen);
15598 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15599 + dpaa2_fl_set_addr(out_fle, dma_addr_out);
15600 + dpaa2_fl_set_len(out_fle, adata->keylen_pad);
15601 +
15602 +#ifdef DEBUG
15603 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15604 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
15605 + print_hex_dump(KERN_ERR, "desc@" __stringify(__LINE__)": ",
15606 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
15607 +#endif
15608 +
15609 + result.err = 0;
15610 + init_completion(&result.completion);
15611 + result.dev = dev;
15612 +
15613 + req_ctx->flc = flc;
15614 + req_ctx->cbk = split_key_sh_done;
15615 + req_ctx->ctx = &result;
15616 +
15617 + ret = dpaa2_caam_enqueue(dev, req_ctx);
15618 + if (ret == -EINPROGRESS) {
15619 + /* in progress */
15620 + wait_for_completion(&result.completion);
15621 + ret = result.err;
15622 +#ifdef DEBUG
15623 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15624 + DUMP_PREFIX_ADDRESS, 16, 4, key_out,
15625 + adata->keylen_pad, 1);
15626 +#endif
15627 + }
15628 +
15629 + dma_unmap_single(dev, flc->flc_dma, sizeof(flc->flc) + desc_bytes(desc),
15630 + DMA_TO_DEVICE);
15631 +err_flc_dma:
15632 + dma_unmap_single(dev, dma_addr_out, adata->keylen_pad, DMA_FROM_DEVICE);
15633 +err_dma_addr_out:
15634 + dma_unmap_single(dev, dma_addr_in, keylen, DMA_TO_DEVICE);
15635 +err_dma_addr_in:
15636 + kfree(flc);
15637 +err_flc:
15638 + kfree(req_ctx);
15639 + return ret;
15640 +}
15641 +
15642 +static int gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
15643 + u32 authkeylen)
15644 +{
15645 + return gen_split_key_sh(ctx->dev, ctx->key, &ctx->adata, key_in,
15646 + authkeylen);
15647 +}
15648 +
15649 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
15650 + unsigned int keylen)
15651 +{
15652 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
15653 + struct device *dev = ctx->dev;
15654 + struct crypto_authenc_keys keys;
15655 + int ret;
15656 +
15657 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
15658 + goto badkey;
15659 +
15660 +#ifdef DEBUG
15661 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
15662 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
15663 + keys.authkeylen);
15664 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
15665 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
15666 +#endif
15667 +
15668 + ctx->adata.keylen = split_key_len(ctx->adata.algtype &
15669 + OP_ALG_ALGSEL_MASK);
15670 + ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
15671 + OP_ALG_ALGSEL_MASK);
15672 +
15673 +#ifdef DEBUG
15674 + dev_err(dev, "split keylen %d split keylen padded %d\n",
15675 + ctx->adata.keylen, ctx->adata.keylen_pad);
15676 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15677 + DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey, keylen, 1);
15678 +#endif
15679 +
15680 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
15681 + goto badkey;
15682 +
15683 + ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
15684 + if (ret)
15685 + goto badkey;
15686 +
15687 + /* postpend encryption key to auth split key */
15688 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
15689 +
15690 + ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
15691 + keys.enckeylen, DMA_TO_DEVICE);
15692 + if (dma_mapping_error(dev, ctx->key_dma)) {
15693 + dev_err(dev, "unable to map key i/o memory\n");
15694 + return -ENOMEM;
15695 + }
15696 +#ifdef DEBUG
15697 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15698 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
15699 + ctx->adata.keylen_pad + keys.enckeylen, 1);
15700 +#endif
15701 +
15702 + ctx->cdata.keylen = keys.enckeylen;
15703 +
15704 + ret = aead_set_sh_desc(aead);
15705 + if (ret)
15706 + dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
15707 + keys.enckeylen, DMA_TO_DEVICE);
15708 +
15709 + return ret;
15710 +badkey:
15711 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
15712 + return -EINVAL;
15713 +}
15714 +
15715 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
15716 + bool encrypt)
15717 +{
15718 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
15719 + struct caam_request *req_ctx = aead_request_ctx(req);
15720 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
15721 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
15722 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
15723 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
15724 + typeof(*alg), aead);
15725 + struct device *dev = ctx->dev;
15726 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
15727 + GFP_KERNEL : GFP_ATOMIC;
15728 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
15729 + struct aead_edesc *edesc;
15730 + dma_addr_t qm_sg_dma, iv_dma = 0;
15731 + int ivsize = 0;
15732 + unsigned int authsize = ctx->authsize;
15733 + int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
15734 + int in_len, out_len;
15735 + struct dpaa2_sg_entry *sg_table;
15736 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
15737 +
15738 + /* allocate space for base edesc and link tables */
15739 + edesc = qi_cache_alloc(GFP_DMA | flags);
15740 + if (unlikely(!edesc)) {
15741 + dev_err(dev, "could not allocate extended descriptor\n");
15742 + return ERR_PTR(-ENOMEM);
15743 + }
15744 +
15745 + if (unlikely(req->dst != req->src)) {
15746 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15747 + req->cryptlen);
15748 + if (unlikely(src_nents < 0)) {
15749 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15750 + req->assoclen + req->cryptlen);
15751 + qi_cache_free(edesc);
15752 + return ERR_PTR(src_nents);
15753 + }
15754 +
15755 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
15756 + req->cryptlen +
15757 + (encrypt ? authsize :
15758 + (-authsize)));
15759 + if (unlikely(dst_nents < 0)) {
15760 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
15761 + req->assoclen + req->cryptlen +
15762 + (encrypt ? authsize : (-authsize)));
15763 + qi_cache_free(edesc);
15764 + return ERR_PTR(dst_nents);
15765 + }
15766 +
15767 + if (src_nents) {
15768 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15769 + DMA_TO_DEVICE);
15770 + if (unlikely(!mapped_src_nents)) {
15771 + dev_err(dev, "unable to map source\n");
15772 + qi_cache_free(edesc);
15773 + return ERR_PTR(-ENOMEM);
15774 + }
15775 + } else {
15776 + mapped_src_nents = 0;
15777 + }
15778 +
15779 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
15780 + DMA_FROM_DEVICE);
15781 + if (unlikely(!mapped_dst_nents)) {
15782 + dev_err(dev, "unable to map destination\n");
15783 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
15784 + qi_cache_free(edesc);
15785 + return ERR_PTR(-ENOMEM);
15786 + }
15787 + } else {
15788 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15789 + req->cryptlen +
15790 + (encrypt ? authsize : 0));
15791 + if (unlikely(src_nents < 0)) {
15792 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15793 + req->assoclen + req->cryptlen +
15794 + (encrypt ? authsize : 0));
15795 + qi_cache_free(edesc);
15796 + return ERR_PTR(src_nents);
15797 + }
15798 +
15799 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15800 + DMA_BIDIRECTIONAL);
15801 + if (unlikely(!mapped_src_nents)) {
15802 + dev_err(dev, "unable to map source\n");
15803 + qi_cache_free(edesc);
15804 + return ERR_PTR(-ENOMEM);
15805 + }
15806 + }
15807 +
15808 + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
15809 + ivsize = crypto_aead_ivsize(aead);
15810 + iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
15811 + if (dma_mapping_error(dev, iv_dma)) {
15812 + dev_err(dev, "unable to map IV\n");
15813 + caam_unmap(dev, req->src, req->dst, src_nents,
15814 + dst_nents, 0, 0, op_type, 0, 0);
15815 + qi_cache_free(edesc);
15816 + return ERR_PTR(-ENOMEM);
15817 + }
15818 + }
15819 +
15820 + /*
15821 + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
15822 + * Input is not contiguous.
15823 + */
15824 + qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
15825 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
15826 + if (unlikely(qm_sg_nents > CAAM_QI_MAX_AEAD_SG)) {
15827 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
15828 + qm_sg_nents, CAAM_QI_MAX_AEAD_SG);
15829 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15830 + iv_dma, ivsize, op_type, 0, 0);
15831 + qi_cache_free(edesc);
15832 + return ERR_PTR(-ENOMEM);
15833 + }
15834 + sg_table = &edesc->sgt[0];
15835 + qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
15836 +
15837 + edesc->src_nents = src_nents;
15838 + edesc->dst_nents = dst_nents;
15839 + edesc->iv_dma = iv_dma;
15840 +
15841 + edesc->assoclen_dma = dma_map_single(dev, &req->assoclen, 4,
15842 + DMA_TO_DEVICE);
15843 + if (dma_mapping_error(dev, edesc->assoclen_dma)) {
15844 + dev_err(dev, "unable to map assoclen\n");
15845 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15846 + iv_dma, ivsize, op_type, 0, 0);
15847 + qi_cache_free(edesc);
15848 + return ERR_PTR(-ENOMEM);
15849 + }
15850 +
15851 + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
15852 + qm_sg_index++;
15853 + if (ivsize) {
15854 + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
15855 + qm_sg_index++;
15856 + }
15857 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
15858 + qm_sg_index += mapped_src_nents;
15859 +
15860 + if (mapped_dst_nents > 1)
15861 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
15862 + qm_sg_index, 0);
15863 +
15864 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
15865 + if (dma_mapping_error(dev, qm_sg_dma)) {
15866 + dev_err(dev, "unable to map S/G table\n");
15867 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
15868 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15869 + iv_dma, ivsize, op_type, 0, 0);
15870 + qi_cache_free(edesc);
15871 + return ERR_PTR(-ENOMEM);
15872 + }
15873 +
15874 + edesc->qm_sg_dma = qm_sg_dma;
15875 + edesc->qm_sg_bytes = qm_sg_bytes;
15876 +
15877 + out_len = req->assoclen + req->cryptlen +
15878 + (encrypt ? ctx->authsize : (-ctx->authsize));
15879 + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
15880 +
15881 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
15882 + dpaa2_fl_set_final(in_fle, true);
15883 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
15884 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
15885 + dpaa2_fl_set_len(in_fle, in_len);
15886 +
15887 + if (req->dst == req->src) {
15888 + if (mapped_src_nents == 1) {
15889 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15890 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
15891 + } else {
15892 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
15893 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
15894 + (1 + !!ivsize) * sizeof(*sg_table));
15895 + }
15896 + } else if (mapped_dst_nents == 1) {
15897 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15898 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
15899 + } else {
15900 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
15901 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
15902 + sizeof(*sg_table));
15903 + }
15904 +
15905 + dpaa2_fl_set_len(out_fle, out_len);
15906 +
15907 + return edesc;
15908 +}
15909 +
15910 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
15911 + bool encrypt)
15912 +{
15913 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
15914 + unsigned int blocksize = crypto_aead_blocksize(tls);
15915 + unsigned int padsize, authsize;
15916 + struct caam_request *req_ctx = aead_request_ctx(req);
15917 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
15918 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
15919 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
15920 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
15921 + typeof(*alg), aead);
15922 + struct device *dev = ctx->dev;
15923 + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
15924 + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
15925 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
15926 + struct tls_edesc *edesc;
15927 + dma_addr_t qm_sg_dma, iv_dma = 0;
15928 + int ivsize = 0;
15929 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
15930 + int in_len, out_len;
15931 + struct dpaa2_sg_entry *sg_table;
15932 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
15933 + struct scatterlist *dst;
15934 +
15935 + if (encrypt) {
15936 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
15937 + blocksize);
15938 + authsize = ctx->authsize + padsize;
15939 + } else {
15940 + authsize = ctx->authsize;
15941 + }
15942 +
15943 + /* allocate space for base edesc and link tables */
15944 + edesc = qi_cache_alloc(GFP_DMA | flags);
15945 + if (unlikely(!edesc)) {
15946 + dev_err(dev, "could not allocate extended descriptor\n");
15947 + return ERR_PTR(-ENOMEM);
15948 + }
15949 +
15950 + if (likely(req->src == req->dst)) {
15951 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15952 + req->cryptlen +
15953 + (encrypt ? authsize : 0));
15954 + if (unlikely(src_nents < 0)) {
15955 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15956 + req->assoclen + req->cryptlen +
15957 + (encrypt ? authsize : 0));
15958 + qi_cache_free(edesc);
15959 + return ERR_PTR(src_nents);
15960 + }
15961 +
15962 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15963 + DMA_BIDIRECTIONAL);
15964 + if (unlikely(!mapped_src_nents)) {
15965 + dev_err(dev, "unable to map source\n");
15966 + qi_cache_free(edesc);
15967 + return ERR_PTR(-ENOMEM);
15968 + }
15969 + dst = req->dst;
15970 + } else {
15971 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15972 + req->cryptlen);
15973 + if (unlikely(src_nents < 0)) {
15974 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15975 + req->assoclen + req->cryptlen);
15976 + qi_cache_free(edesc);
15977 + return ERR_PTR(src_nents);
15978 + }
15979 +
15980 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
15981 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
15982 + (encrypt ? authsize : 0));
15983 + if (unlikely(dst_nents < 0)) {
15984 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
15985 + req->cryptlen +
15986 + (encrypt ? authsize : 0));
15987 + qi_cache_free(edesc);
15988 + return ERR_PTR(dst_nents);
15989 + }
15990 +
15991 + if (src_nents) {
15992 + mapped_src_nents = dma_map_sg(dev, req->src,
15993 + src_nents, DMA_TO_DEVICE);
15994 + if (unlikely(!mapped_src_nents)) {
15995 + dev_err(dev, "unable to map source\n");
15996 + qi_cache_free(edesc);
15997 + return ERR_PTR(-ENOMEM);
15998 + }
15999 + } else {
16000 + mapped_src_nents = 0;
16001 + }
16002 +
16003 + mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
16004 + DMA_FROM_DEVICE);
16005 + if (unlikely(!mapped_dst_nents)) {
16006 + dev_err(dev, "unable to map destination\n");
16007 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16008 + qi_cache_free(edesc);
16009 + return ERR_PTR(-ENOMEM);
16010 + }
16011 + }
16012 +
16013 + ivsize = crypto_aead_ivsize(tls);
16014 + iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
16015 + if (dma_mapping_error(dev, iv_dma)) {
16016 + dev_err(dev, "unable to map IV\n");
16017 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0,
16018 + op_type, 0, 0);
16019 + qi_cache_free(edesc);
16020 + return ERR_PTR(-ENOMEM);
16021 + }
16022 +
16023 + /*
16024 + * Create S/G table: IV, src, dst.
16025 + * Input is not contiguous.
16026 + */
16027 + qm_sg_ents = 1 + mapped_src_nents +
16028 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
16029 + sg_table = &edesc->sgt[0];
16030 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16031 +
16032 + edesc->src_nents = src_nents;
16033 + edesc->dst_nents = dst_nents;
16034 + edesc->dst = dst;
16035 + edesc->iv_dma = iv_dma;
16036 +
16037 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
16038 + qm_sg_index = 1;
16039 +
16040 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
16041 + qm_sg_index += mapped_src_nents;
16042 +
16043 + if (mapped_dst_nents > 1)
16044 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
16045 + qm_sg_index, 0);
16046 +
16047 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
16048 + if (dma_mapping_error(dev, qm_sg_dma)) {
16049 + dev_err(dev, "unable to map S/G table\n");
16050 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
16051 + ivsize, op_type, 0, 0);
16052 + qi_cache_free(edesc);
16053 + return ERR_PTR(-ENOMEM);
16054 + }
16055 +
16056 + edesc->qm_sg_dma = qm_sg_dma;
16057 + edesc->qm_sg_bytes = qm_sg_bytes;
16058 +
16059 + out_len = req->cryptlen + (encrypt ? authsize : 0);
16060 + in_len = ivsize + req->assoclen + req->cryptlen;
16061 +
16062 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16063 + dpaa2_fl_set_final(in_fle, true);
16064 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16065 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
16066 + dpaa2_fl_set_len(in_fle, in_len);
16067 +
16068 + if (req->dst == req->src) {
16069 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16070 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
16071 + (sg_nents_for_len(req->src, req->assoclen) +
16072 + 1) * sizeof(*sg_table));
16073 + } else if (mapped_dst_nents == 1) {
16074 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16075 + dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
16076 + } else {
16077 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16078 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
16079 + sizeof(*sg_table));
16080 + }
16081 +
16082 + dpaa2_fl_set_len(out_fle, out_len);
16083 +
16084 + return edesc;
16085 +}
16086 +
16087 +static int tls_set_sh_desc(struct crypto_aead *tls)
16088 +{
16089 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16090 + unsigned int ivsize = crypto_aead_ivsize(tls);
16091 + unsigned int blocksize = crypto_aead_blocksize(tls);
16092 + struct device *dev = ctx->dev;
16093 + struct caam_flc *flc;
16094 + u32 *desc;
16095 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
16096 + unsigned int data_len[2];
16097 + u32 inl_mask;
16098 +
16099 + if (!ctx->cdata.keylen || !ctx->authsize)
16100 + return 0;
16101 +
16102 + /*
16103 + * TLS 1.0 encrypt shared descriptor
16104 + * Job Descriptor and Shared Descriptor
16105 + * must fit into the 64-word Descriptor h/w Buffer
16106 + */
16107 + data_len[0] = ctx->adata.keylen_pad;
16108 + data_len[1] = ctx->cdata.keylen;
16109 +
16110 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
16111 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
16112 + return -EINVAL;
16113 +
16114 + if (inl_mask & 1)
16115 + ctx->adata.key_virt = ctx->key;
16116 + else
16117 + ctx->adata.key_dma = ctx->key_dma;
16118 +
16119 + if (inl_mask & 2)
16120 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
16121 + else
16122 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16123 +
16124 + ctx->adata.key_inline = !!(inl_mask & 1);
16125 + ctx->cdata.key_inline = !!(inl_mask & 2);
16126 +
16127 + flc = &ctx->flc[ENCRYPT];
16128 + desc = flc->sh_desc;
16129 +
16130 + cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
16131 + assoclen, ivsize, ctx->authsize, blocksize);
16132 +
16133 + flc->flc[1] = desc_len(desc);
16134 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16135 + desc_bytes(desc), DMA_TO_DEVICE);
16136 +
16137 + if (dma_mapping_error(dev, flc->flc_dma)) {
16138 + dev_err(dev, "unable to map shared descriptor\n");
16139 + return -ENOMEM;
16140 + }
16141 +
16142 + /*
16143 + * TLS 1.0 decrypt shared descriptor
16144 + * Keys do not fit inline, regardless of algorithms used
16145 + */
16146 + ctx->adata.key_dma = ctx->key_dma;
16147 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16148 +
16149 + flc = &ctx->flc[DECRYPT];
16150 + desc = flc->sh_desc;
16151 +
16152 + cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
16153 + ctx->authsize, blocksize);
16154 +
16155 + flc->flc[1] = desc_len(desc); /* SDL */
16156 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16157 + desc_bytes(desc), DMA_TO_DEVICE);
16158 + if (dma_mapping_error(dev, flc->flc_dma)) {
16159 + dev_err(dev, "unable to map shared descriptor\n");
16160 + return -ENOMEM;
16161 + }
16162 +
16163 + return 0;
16164 +}
16165 +
16166 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
16167 + unsigned int keylen)
16168 +{
16169 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16170 + struct device *dev = ctx->dev;
16171 + struct crypto_authenc_keys keys;
16172 + int ret;
16173 +
16174 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
16175 + goto badkey;
16176 +
16177 +#ifdef DEBUG
16178 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
16179 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
16180 + keys.authkeylen);
16181 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16182 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16183 +#endif
16184 +
16185 + ctx->adata.keylen = split_key_len(ctx->adata.algtype &
16186 + OP_ALG_ALGSEL_MASK);
16187 + ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
16188 + OP_ALG_ALGSEL_MASK);
16189 +
16190 +#ifdef DEBUG
16191 + dev_err(dev, "split keylen %d split keylen padded %d\n",
16192 + ctx->adata.keylen, ctx->adata.keylen_pad);
16193 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
16194 + DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey,
16195 + keys.authkeylen + keys.enckeylen, 1);
16196 +#endif
16197 +
16198 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
16199 + goto badkey;
16200 +
16201 + ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
16202 + if (ret)
16203 + goto badkey;
16204 +
16205 + /* postpend encryption key to auth split key */
16206 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
16207 +
16208 + ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
16209 + keys.enckeylen, DMA_TO_DEVICE);
16210 + if (dma_mapping_error(dev, ctx->key_dma)) {
16211 + dev_err(dev, "unable to map key i/o memory\n");
16212 + return -ENOMEM;
16213 + }
16214 +#ifdef DEBUG
16215 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
16216 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
16217 + ctx->adata.keylen_pad + keys.enckeylen, 1);
16218 +#endif
16219 +
16220 + ctx->cdata.keylen = keys.enckeylen;
16221 +
16222 + ret = tls_set_sh_desc(tls);
16223 + if (ret)
16224 + dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
16225 + keys.enckeylen, DMA_TO_DEVICE);
16226 +
16227 + return ret;
16228 +badkey:
16229 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
16230 + return -EINVAL;
16231 +}
16232 +
16233 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
16234 +{
16235 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16236 +
16237 + ctx->authsize = authsize;
16238 + tls_set_sh_desc(tls);
16239 +
16240 + return 0;
16241 +}
16242 +
16243 +static int gcm_set_sh_desc(struct crypto_aead *aead)
16244 +{
16245 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16246 + struct device *dev = ctx->dev;
16247 + unsigned int ivsize = crypto_aead_ivsize(aead);
16248 + struct caam_flc *flc;
16249 + u32 *desc;
16250 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16251 + ctx->cdata.keylen;
16252 +
16253 + if (!ctx->cdata.keylen || !ctx->authsize)
16254 + return 0;
16255 +
16256 + /*
16257 + * AES GCM encrypt shared descriptor
16258 + * Job Descriptor and Shared Descriptor
16259 + * must fit into the 64-word Descriptor h/w Buffer
16260 + */
16261 + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
16262 + ctx->cdata.key_inline = true;
16263 + ctx->cdata.key_virt = ctx->key;
16264 + } else {
16265 + ctx->cdata.key_inline = false;
16266 + ctx->cdata.key_dma = ctx->key_dma;
16267 + }
16268 +
16269 + flc = &ctx->flc[ENCRYPT];
16270 + desc = flc->sh_desc;
16271 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16272 +
16273 + flc->flc[1] = desc_len(desc); /* SDL */
16274 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16275 + desc_bytes(desc), DMA_TO_DEVICE);
16276 + if (dma_mapping_error(dev, flc->flc_dma)) {
16277 + dev_err(dev, "unable to map shared descriptor\n");
16278 + return -ENOMEM;
16279 + }
16280 +
16281 + /*
16282 + * Job Descriptor and Shared Descriptors
16283 + * must all fit into the 64-word Descriptor h/w Buffer
16284 + */
16285 + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
16286 + ctx->cdata.key_inline = true;
16287 + ctx->cdata.key_virt = ctx->key;
16288 + } else {
16289 + ctx->cdata.key_inline = false;
16290 + ctx->cdata.key_dma = ctx->key_dma;
16291 + }
16292 +
16293 + flc = &ctx->flc[DECRYPT];
16294 + desc = flc->sh_desc;
16295 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16296 +
16297 + flc->flc[1] = desc_len(desc); /* SDL */
16298 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16299 + desc_bytes(desc), DMA_TO_DEVICE);
16300 + if (dma_mapping_error(dev, flc->flc_dma)) {
16301 + dev_err(dev, "unable to map shared descriptor\n");
16302 + return -ENOMEM;
16303 + }
16304 +
16305 + return 0;
16306 +}
16307 +
16308 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
16309 +{
16310 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16311 +
16312 + ctx->authsize = authsize;
16313 + gcm_set_sh_desc(authenc);
16314 +
16315 + return 0;
16316 +}
16317 +
16318 +static int gcm_setkey(struct crypto_aead *aead,
16319 + const u8 *key, unsigned int keylen)
16320 +{
16321 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16322 + struct device *dev = ctx->dev;
16323 + int ret;
16324 +
16325 +#ifdef DEBUG
16326 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16327 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16328 +#endif
16329 +
16330 + memcpy(ctx->key, key, keylen);
16331 + ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
16332 + if (dma_mapping_error(dev, ctx->key_dma)) {
16333 + dev_err(dev, "unable to map key i/o memory\n");
16334 + return -ENOMEM;
16335 + }
16336 + ctx->cdata.keylen = keylen;
16337 +
16338 + ret = gcm_set_sh_desc(aead);
16339 + if (ret)
16340 + dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
16341 + DMA_TO_DEVICE);
16342 +
16343 + return ret;
16344 +}
16345 +
16346 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
16347 +{
16348 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16349 + struct device *dev = ctx->dev;
16350 + unsigned int ivsize = crypto_aead_ivsize(aead);
16351 + struct caam_flc *flc;
16352 + u32 *desc;
16353 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16354 + ctx->cdata.keylen;
16355 +
16356 + if (!ctx->cdata.keylen || !ctx->authsize)
16357 + return 0;
16358 +
16359 + ctx->cdata.key_virt = ctx->key;
16360 +
16361 + /*
16362 + * RFC4106 encrypt shared descriptor
16363 + * Job Descriptor and Shared Descriptor
16364 + * must fit into the 64-word Descriptor h/w Buffer
16365 + */
16366 + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
16367 + ctx->cdata.key_inline = true;
16368 + } else {
16369 + ctx->cdata.key_inline = false;
16370 + ctx->cdata.key_dma = ctx->key_dma;
16371 + }
16372 +
16373 + flc = &ctx->flc[ENCRYPT];
16374 + desc = flc->sh_desc;
16375 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16376 + true);
16377 +
16378 + flc->flc[1] = desc_len(desc); /* SDL */
16379 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16380 + desc_bytes(desc), DMA_TO_DEVICE);
16381 + if (dma_mapping_error(dev, flc->flc_dma)) {
16382 + dev_err(dev, "unable to map shared descriptor\n");
16383 + return -ENOMEM;
16384 + }
16385 +
16386 + /*
16387 + * Job Descriptor and Shared Descriptors
16388 + * must all fit into the 64-word Descriptor h/w Buffer
16389 + */
16390 + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
16391 + ctx->cdata.key_inline = true;
16392 + } else {
16393 + ctx->cdata.key_inline = false;
16394 + ctx->cdata.key_dma = ctx->key_dma;
16395 + }
16396 +
16397 + flc = &ctx->flc[DECRYPT];
16398 + desc = flc->sh_desc;
16399 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16400 + true);
16401 +
16402 + flc->flc[1] = desc_len(desc); /* SDL */
16403 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16404 + desc_bytes(desc), DMA_TO_DEVICE);
16405 + if (dma_mapping_error(dev, flc->flc_dma)) {
16406 + dev_err(dev, "unable to map shared descriptor\n");
16407 + return -ENOMEM;
16408 + }
16409 +
16410 + return 0;
16411 +}
16412 +
16413 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
16414 + unsigned int authsize)
16415 +{
16416 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16417 +
16418 + ctx->authsize = authsize;
16419 + rfc4106_set_sh_desc(authenc);
16420 +
16421 + return 0;
16422 +}
16423 +
16424 +static int rfc4106_setkey(struct crypto_aead *aead,
16425 + const u8 *key, unsigned int keylen)
16426 +{
16427 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16428 + struct device *dev = ctx->dev;
16429 + int ret;
16430 +
16431 + if (keylen < 4)
16432 + return -EINVAL;
16433 +
16434 +#ifdef DEBUG
16435 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16436 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16437 +#endif
16438 +
16439 + memcpy(ctx->key, key, keylen);
16440 + /*
16441 + * The last four bytes of the key material are used as the salt value
16442 + * in the nonce. Update the AES key length.
16443 + */
16444 + ctx->cdata.keylen = keylen - 4;
16445 + ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
16446 + DMA_TO_DEVICE);
16447 + if (dma_mapping_error(dev, ctx->key_dma)) {
16448 + dev_err(dev, "unable to map key i/o memory\n");
16449 + return -ENOMEM;
16450 + }
16451 +
16452 + ret = rfc4106_set_sh_desc(aead);
16453 + if (ret)
16454 + dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
16455 + DMA_TO_DEVICE);
16456 +
16457 + return ret;
16458 +}
16459 +
16460 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
16461 +{
16462 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16463 + struct device *dev = ctx->dev;
16464 + unsigned int ivsize = crypto_aead_ivsize(aead);
16465 + struct caam_flc *flc;
16466 + u32 *desc;
16467 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16468 + ctx->cdata.keylen;
16469 +
16470 + if (!ctx->cdata.keylen || !ctx->authsize)
16471 + return 0;
16472 +
16473 + ctx->cdata.key_virt = ctx->key;
16474 +
16475 + /*
16476 + * RFC4543 encrypt shared descriptor
16477 + * Job Descriptor and Shared Descriptor
16478 + * must fit into the 64-word Descriptor h/w Buffer
16479 + */
16480 + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
16481 + ctx->cdata.key_inline = true;
16482 + } else {
16483 + ctx->cdata.key_inline = false;
16484 + ctx->cdata.key_dma = ctx->key_dma;
16485 + }
16486 +
16487 + flc = &ctx->flc[ENCRYPT];
16488 + desc = flc->sh_desc;
16489 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16490 + true);
16491 +
16492 + flc->flc[1] = desc_len(desc); /* SDL */
16493 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16494 + desc_bytes(desc), DMA_TO_DEVICE);
16495 + if (dma_mapping_error(dev, flc->flc_dma)) {
16496 + dev_err(dev, "unable to map shared descriptor\n");
16497 + return -ENOMEM;
16498 + }
16499 +
16500 + /*
16501 + * Job Descriptor and Shared Descriptors
16502 + * must all fit into the 64-word Descriptor h/w Buffer
16503 + */
16504 + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
16505 + ctx->cdata.key_inline = true;
16506 + } else {
16507 + ctx->cdata.key_inline = false;
16508 + ctx->cdata.key_dma = ctx->key_dma;
16509 + }
16510 +
16511 + flc = &ctx->flc[DECRYPT];
16512 + desc = flc->sh_desc;
16513 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16514 + true);
16515 +
16516 + flc->flc[1] = desc_len(desc); /* SDL */
16517 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16518 + desc_bytes(desc), DMA_TO_DEVICE);
16519 + if (dma_mapping_error(dev, flc->flc_dma)) {
16520 + dev_err(dev, "unable to map shared descriptor\n");
16521 + return -ENOMEM;
16522 + }
16523 +
16524 + return 0;
16525 +}
16526 +
16527 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
16528 + unsigned int authsize)
16529 +{
16530 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16531 +
16532 + ctx->authsize = authsize;
16533 + rfc4543_set_sh_desc(authenc);
16534 +
16535 + return 0;
16536 +}
16537 +
16538 +static int rfc4543_setkey(struct crypto_aead *aead,
16539 + const u8 *key, unsigned int keylen)
16540 +{
16541 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16542 + struct device *dev = ctx->dev;
16543 + int ret;
16544 +
16545 + if (keylen < 4)
16546 + return -EINVAL;
16547 +
16548 +#ifdef DEBUG
16549 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16550 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16551 +#endif
16552 +
16553 + memcpy(ctx->key, key, keylen);
16554 + /*
16555 + * The last four bytes of the key material are used as the salt value
16556 + * in the nonce. Update the AES key length.
16557 + */
16558 + ctx->cdata.keylen = keylen - 4;
16559 + ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
16560 + DMA_TO_DEVICE);
16561 + if (dma_mapping_error(dev, ctx->key_dma)) {
16562 + dev_err(dev, "unable to map key i/o memory\n");
16563 + return -ENOMEM;
16564 + }
16565 +
16566 + ret = rfc4543_set_sh_desc(aead);
16567 + if (ret)
16568 + dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
16569 + DMA_TO_DEVICE);
16570 +
16571 + return ret;
16572 +}
16573 +
16574 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16575 + const u8 *key, unsigned int keylen)
16576 +{
16577 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16578 + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
16579 + const char *alg_name = crypto_tfm_alg_name(tfm);
16580 + struct device *dev = ctx->dev;
16581 + struct caam_flc *flc;
16582 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16583 + u32 *desc;
16584 + u32 ctx1_iv_off = 0;
16585 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
16586 + OP_ALG_AAI_CTR_MOD128);
16587 + const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
16588 +
16589 + memcpy(ctx->key, key, keylen);
16590 +#ifdef DEBUG
16591 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16592 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16593 +#endif
16594 + /*
16595 + * AES-CTR needs to load IV in CONTEXT1 reg
16596 + * at an offset of 128bits (16bytes)
16597 + * CONTEXT1[255:128] = IV
16598 + */
16599 + if (ctr_mode)
16600 + ctx1_iv_off = 16;
16601 +
16602 + /*
16603 + * RFC3686 specific:
16604 + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
16605 + * | *key = {KEY, NONCE}
16606 + */
16607 + if (is_rfc3686) {
16608 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
16609 + keylen -= CTR_RFC3686_NONCE_SIZE;
16610 + }
16611 +
16612 + ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
16613 + if (dma_mapping_error(dev, ctx->key_dma)) {
16614 + dev_err(dev, "unable to map key i/o memory\n");
16615 + return -ENOMEM;
16616 + }
16617 + ctx->cdata.keylen = keylen;
16618 + ctx->cdata.key_virt = ctx->key;
16619 + ctx->cdata.key_inline = true;
16620 +
16621 + /* ablkcipher_encrypt shared descriptor */
16622 + flc = &ctx->flc[ENCRYPT];
16623 + desc = flc->sh_desc;
16624 +
16625 + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
16626 + is_rfc3686, ctx1_iv_off);
16627 +
16628 + flc->flc[1] = desc_len(desc); /* SDL */
16629 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16630 + desc_bytes(desc), DMA_TO_DEVICE);
16631 + if (dma_mapping_error(dev, flc->flc_dma)) {
16632 + dev_err(dev, "unable to map shared descriptor\n");
16633 + return -ENOMEM;
16634 + }
16635 +
16636 + /* ablkcipher_decrypt shared descriptor */
16637 + flc = &ctx->flc[DECRYPT];
16638 + desc = flc->sh_desc;
16639 +
16640 + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
16641 + is_rfc3686, ctx1_iv_off);
16642 +
16643 + flc->flc[1] = desc_len(desc); /* SDL */
16644 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16645 + desc_bytes(desc), DMA_TO_DEVICE);
16646 + if (dma_mapping_error(dev, flc->flc_dma)) {
16647 + dev_err(dev, "unable to map shared descriptor\n");
16648 + return -ENOMEM;
16649 + }
16650 +
16651 + /* ablkcipher_givencrypt shared descriptor */
16652 + flc = &ctx->flc[GIVENCRYPT];
16653 + desc = flc->sh_desc;
16654 +
16655 + cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata,
16656 + ivsize, is_rfc3686, ctx1_iv_off);
16657 +
16658 + flc->flc[1] = desc_len(desc); /* SDL */
16659 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16660 + desc_bytes(desc), DMA_TO_DEVICE);
16661 + if (dma_mapping_error(dev, flc->flc_dma)) {
16662 + dev_err(dev, "unable to map shared descriptor\n");
16663 + return -ENOMEM;
16664 + }
16665 +
16666 + return 0;
16667 +}
16668 +
16669 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16670 + const u8 *key, unsigned int keylen)
16671 +{
16672 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16673 + struct device *dev = ctx->dev;
16674 + struct caam_flc *flc;
16675 + u32 *desc;
16676 +
16677 + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
16678 + dev_err(dev, "key size mismatch\n");
16679 + crypto_ablkcipher_set_flags(ablkcipher,
16680 + CRYPTO_TFM_RES_BAD_KEY_LEN);
16681 + return -EINVAL;
16682 + }
16683 +
16684 + memcpy(ctx->key, key, keylen);
16685 + ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
16686 + if (dma_mapping_error(dev, ctx->key_dma)) {
16687 + dev_err(dev, "unable to map key i/o memory\n");
16688 + return -ENOMEM;
16689 + }
16690 + ctx->cdata.keylen = keylen;
16691 + ctx->cdata.key_virt = ctx->key;
16692 + ctx->cdata.key_inline = true;
16693 +
16694 + /* xts_ablkcipher_encrypt shared descriptor */
16695 + flc = &ctx->flc[ENCRYPT];
16696 + desc = flc->sh_desc;
16697 + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
16698 +
16699 + flc->flc[1] = desc_len(desc); /* SDL */
16700 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16701 + desc_bytes(desc), DMA_TO_DEVICE);
16702 + if (dma_mapping_error(dev, flc->flc_dma)) {
16703 + dev_err(dev, "unable to map shared descriptor\n");
16704 + return -ENOMEM;
16705 + }
16706 +
16707 + /* xts_ablkcipher_decrypt shared descriptor */
16708 + flc = &ctx->flc[DECRYPT];
16709 + desc = flc->sh_desc;
16710 +
16711 + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
16712 +
16713 + flc->flc[1] = desc_len(desc); /* SDL */
16714 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16715 + desc_bytes(desc), DMA_TO_DEVICE);
16716 + if (dma_mapping_error(dev, flc->flc_dma)) {
16717 + dev_err(dev, "unable to map shared descriptor\n");
16718 + return -ENOMEM;
16719 + }
16720 +
16721 + return 0;
16722 +}
16723 +
16724 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
16725 + *req, bool encrypt)
16726 +{
16727 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16728 + struct caam_request *req_ctx = ablkcipher_request_ctx(req);
16729 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16730 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16731 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16732 + struct device *dev = ctx->dev;
16733 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
16734 + GFP_KERNEL : GFP_ATOMIC;
16735 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
16736 + struct ablkcipher_edesc *edesc;
16737 + dma_addr_t iv_dma;
16738 + bool in_contig;
16739 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16740 + int dst_sg_idx, qm_sg_ents;
16741 + struct dpaa2_sg_entry *sg_table;
16742 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
16743 +
16744 + src_nents = sg_nents_for_len(req->src, req->nbytes);
16745 + if (unlikely(src_nents < 0)) {
16746 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16747 + req->nbytes);
16748 + return ERR_PTR(src_nents);
16749 + }
16750 +
16751 + if (unlikely(req->dst != req->src)) {
16752 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
16753 + if (unlikely(dst_nents < 0)) {
16754 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16755 + req->nbytes);
16756 + return ERR_PTR(dst_nents);
16757 + }
16758 +
16759 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16760 + DMA_TO_DEVICE);
16761 + if (unlikely(!mapped_src_nents)) {
16762 + dev_err(dev, "unable to map source\n");
16763 + return ERR_PTR(-ENOMEM);
16764 + }
16765 +
16766 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
16767 + DMA_FROM_DEVICE);
16768 + if (unlikely(!mapped_dst_nents)) {
16769 + dev_err(dev, "unable to map destination\n");
16770 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16771 + return ERR_PTR(-ENOMEM);
16772 + }
16773 + } else {
16774 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16775 + DMA_BIDIRECTIONAL);
16776 + if (unlikely(!mapped_src_nents)) {
16777 + dev_err(dev, "unable to map source\n");
16778 + return ERR_PTR(-ENOMEM);
16779 + }
16780 + }
16781 +
16782 + iv_dma = dma_map_single(dev, req->info, ivsize, DMA_TO_DEVICE);
16783 + if (dma_mapping_error(dev, iv_dma)) {
16784 + dev_err(dev, "unable to map IV\n");
16785 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
16786 + 0, 0, 0, 0);
16787 + return ERR_PTR(-ENOMEM);
16788 + }
16789 +
16790 + if (mapped_src_nents == 1 &&
16791 + iv_dma + ivsize == sg_dma_address(req->src)) {
16792 + in_contig = true;
16793 + qm_sg_ents = 0;
16794 + } else {
16795 + in_contig = false;
16796 + qm_sg_ents = 1 + mapped_src_nents;
16797 + }
16798 + dst_sg_idx = qm_sg_ents;
16799 +
16800 + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
16801 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
16802 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
16803 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
16804 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16805 + iv_dma, ivsize, op_type, 0, 0);
16806 + return ERR_PTR(-ENOMEM);
16807 + }
16808 +
16809 + /* allocate space for base edesc and link tables */
16810 + edesc = qi_cache_alloc(GFP_DMA | flags);
16811 + if (unlikely(!edesc)) {
16812 + dev_err(dev, "could not allocate extended descriptor\n");
16813 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16814 + iv_dma, ivsize, op_type, 0, 0);
16815 + return ERR_PTR(-ENOMEM);
16816 + }
16817 +
16818 + edesc->src_nents = src_nents;
16819 + edesc->dst_nents = dst_nents;
16820 + edesc->iv_dma = iv_dma;
16821 + sg_table = &edesc->sgt[0];
16822 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16823 +
16824 + if (!in_contig) {
16825 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
16826 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
16827 + }
16828 +
16829 + if (mapped_dst_nents > 1)
16830 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
16831 + dst_sg_idx, 0);
16832 +
16833 + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
16834 + DMA_TO_DEVICE);
16835 + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
16836 + dev_err(dev, "unable to map S/G table\n");
16837 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16838 + iv_dma, ivsize, op_type, 0, 0);
16839 + qi_cache_free(edesc);
16840 + return ERR_PTR(-ENOMEM);
16841 + }
16842 +
16843 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16844 + dpaa2_fl_set_final(in_fle, true);
16845 + dpaa2_fl_set_len(in_fle, req->nbytes + ivsize);
16846 + dpaa2_fl_set_len(out_fle, req->nbytes);
16847 +
16848 + if (!in_contig) {
16849 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16850 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
16851 + } else {
16852 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
16853 + dpaa2_fl_set_addr(in_fle, iv_dma);
16854 + }
16855 +
16856 + if (req->src == req->dst) {
16857 + if (!in_contig) {
16858 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16859 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
16860 + sizeof(*sg_table));
16861 + } else {
16862 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16863 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
16864 + }
16865 + } else if (mapped_dst_nents > 1) {
16866 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16867 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
16868 + sizeof(*sg_table));
16869 + } else {
16870 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16871 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
16872 + }
16873 +
16874 + return edesc;
16875 +}
16876 +
16877 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
16878 + struct skcipher_givcrypt_request *greq)
16879 +{
16880 + struct ablkcipher_request *req = &greq->creq;
16881 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16882 + struct caam_request *req_ctx = ablkcipher_request_ctx(req);
16883 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16884 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16885 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16886 + struct device *dev = ctx->dev;
16887 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
16888 + GFP_KERNEL : GFP_ATOMIC;
16889 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
16890 + struct ablkcipher_edesc *edesc;
16891 + dma_addr_t iv_dma;
16892 + bool out_contig;
16893 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16894 + struct dpaa2_sg_entry *sg_table;
16895 + int dst_sg_idx, qm_sg_ents;
16896 +
16897 + src_nents = sg_nents_for_len(req->src, req->nbytes);
16898 + if (unlikely(src_nents < 0)) {
16899 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16900 + req->nbytes);
16901 + return ERR_PTR(src_nents);
16902 + }
16903 +
16904 + if (unlikely(req->dst != req->src)) {
16905 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
16906 + if (unlikely(dst_nents < 0)) {
16907 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16908 + req->nbytes);
16909 + return ERR_PTR(dst_nents);
16910 + }
16911 +
16912 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16913 + DMA_TO_DEVICE);
16914 + if (unlikely(!mapped_src_nents)) {
16915 + dev_err(dev, "unable to map source\n");
16916 + return ERR_PTR(-ENOMEM);
16917 + }
16918 +
16919 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
16920 + DMA_FROM_DEVICE);
16921 + if (unlikely(!mapped_dst_nents)) {
16922 + dev_err(dev, "unable to map destination\n");
16923 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16924 + return ERR_PTR(-ENOMEM);
16925 + }
16926 + } else {
16927 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16928 + DMA_BIDIRECTIONAL);
16929 + if (unlikely(!mapped_src_nents)) {
16930 + dev_err(dev, "unable to map source\n");
16931 + return ERR_PTR(-ENOMEM);
16932 + }
16933 +
16934 + dst_nents = src_nents;
16935 + mapped_dst_nents = src_nents;
16936 + }
16937 +
16938 + iv_dma = dma_map_single(dev, greq->giv, ivsize, DMA_FROM_DEVICE);
16939 + if (dma_mapping_error(dev, iv_dma)) {
16940 + dev_err(dev, "unable to map IV\n");
16941 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
16942 + 0, 0, 0, 0);
16943 + return ERR_PTR(-ENOMEM);
16944 + }
16945 +
16946 + qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
16947 + dst_sg_idx = qm_sg_ents;
16948 + if (mapped_dst_nents == 1 &&
16949 + iv_dma + ivsize == sg_dma_address(req->dst)) {
16950 + out_contig = true;
16951 + } else {
16952 + out_contig = false;
16953 + qm_sg_ents += 1 + mapped_dst_nents;
16954 + }
16955 +
16956 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
16957 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
16958 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
16959 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16960 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
16961 + return ERR_PTR(-ENOMEM);
16962 + }
16963 +
16964 + /* allocate space for base edesc and link tables */
16965 + edesc = qi_cache_alloc(GFP_DMA | flags);
16966 + if (!edesc) {
16967 + dev_err(dev, "could not allocate extended descriptor\n");
16968 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16969 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
16970 + return ERR_PTR(-ENOMEM);
16971 + }
16972 +
16973 + edesc->src_nents = src_nents;
16974 + edesc->dst_nents = dst_nents;
16975 + edesc->iv_dma = iv_dma;
16976 + sg_table = &edesc->sgt[0];
16977 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16978 +
16979 + if (mapped_src_nents > 1)
16980 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
16981 +
16982 + if (!out_contig) {
16983 + dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
16984 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
16985 + dst_sg_idx + 1, 0);
16986 + }
16987 +
16988 + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
16989 + DMA_TO_DEVICE);
16990 + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
16991 + dev_err(dev, "unable to map S/G table\n");
16992 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16993 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
16994 + qi_cache_free(edesc);
16995 + return ERR_PTR(-ENOMEM);
16996 + }
16997 +
16998 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16999 + dpaa2_fl_set_final(in_fle, true);
17000 + dpaa2_fl_set_len(in_fle, req->nbytes);
17001 + dpaa2_fl_set_len(out_fle, ivsize + req->nbytes);
17002 +
17003 + if (mapped_src_nents > 1) {
17004 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
17005 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
17006 + } else {
17007 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
17008 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
17009 + }
17010 +
17011 + if (!out_contig) {
17012 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
17013 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
17014 + sizeof(*sg_table));
17015 + } else {
17016 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
17017 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
17018 + }
17019 +
17020 + return edesc;
17021 +}
17022 +
17023 +static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
17024 + struct aead_request *req)
17025 +{
17026 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17027 + int ivsize = crypto_aead_ivsize(aead);
17028 + struct caam_request *caam_req = aead_request_ctx(req);
17029 +
17030 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
17031 + edesc->iv_dma, ivsize, caam_req->op_type,
17032 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
17033 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
17034 +}
17035 +
17036 +static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
17037 + struct aead_request *req)
17038 +{
17039 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17040 + int ivsize = crypto_aead_ivsize(tls);
17041 + struct caam_request *caam_req = aead_request_ctx(req);
17042 +
17043 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
17044 + edesc->dst_nents, edesc->iv_dma, ivsize, caam_req->op_type,
17045 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
17046 +}
17047 +
17048 +static void ablkcipher_unmap(struct device *dev,
17049 + struct ablkcipher_edesc *edesc,
17050 + struct ablkcipher_request *req)
17051 +{
17052 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17053 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
17054 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17055 +
17056 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
17057 + edesc->iv_dma, ivsize, caam_req->op_type,
17058 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
17059 +}
17060 +
17061 +static void aead_encrypt_done(void *cbk_ctx, u32 status)
17062 +{
17063 + struct crypto_async_request *areq = cbk_ctx;
17064 + struct aead_request *req = container_of(areq, struct aead_request,
17065 + base);
17066 + struct caam_request *req_ctx = to_caam_req(areq);
17067 + struct aead_edesc *edesc = req_ctx->edesc;
17068 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17069 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17070 + int ecode = 0;
17071 +
17072 +#ifdef DEBUG
17073 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17074 +#endif
17075 +
17076 + if (unlikely(status)) {
17077 + caam_qi2_strstatus(ctx->dev, status);
17078 + ecode = -EIO;
17079 + }
17080 +
17081 + aead_unmap(ctx->dev, edesc, req);
17082 + qi_cache_free(edesc);
17083 + aead_request_complete(req, ecode);
17084 +}
17085 +
17086 +static void aead_decrypt_done(void *cbk_ctx, u32 status)
17087 +{
17088 + struct crypto_async_request *areq = cbk_ctx;
17089 + struct aead_request *req = container_of(areq, struct aead_request,
17090 + base);
17091 + struct caam_request *req_ctx = to_caam_req(areq);
17092 + struct aead_edesc *edesc = req_ctx->edesc;
17093 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17094 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17095 + int ecode = 0;
17096 +
17097 +#ifdef DEBUG
17098 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17099 +#endif
17100 +
17101 + if (unlikely(status)) {
17102 + caam_qi2_strstatus(ctx->dev, status);
17103 + /*
17104 + * verify hw auth check passed else return -EBADMSG
17105 + */
17106 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
17107 + JRSTA_CCBERR_ERRID_ICVCHK)
17108 + ecode = -EBADMSG;
17109 + else
17110 + ecode = -EIO;
17111 + }
17112 +
17113 + aead_unmap(ctx->dev, edesc, req);
17114 + qi_cache_free(edesc);
17115 + aead_request_complete(req, ecode);
17116 +}
17117 +
17118 +static int aead_encrypt(struct aead_request *req)
17119 +{
17120 + struct aead_edesc *edesc;
17121 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17122 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17123 + struct caam_request *caam_req = aead_request_ctx(req);
17124 + int ret;
17125 +
17126 + /* allocate extended descriptor */
17127 + edesc = aead_edesc_alloc(req, true);
17128 + if (IS_ERR(edesc))
17129 + return PTR_ERR(edesc);
17130 +
17131 + caam_req->flc = &ctx->flc[ENCRYPT];
17132 + caam_req->op_type = ENCRYPT;
17133 + caam_req->cbk = aead_encrypt_done;
17134 + caam_req->ctx = &req->base;
17135 + caam_req->edesc = edesc;
17136 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17137 + if (ret != -EINPROGRESS &&
17138 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17139 + aead_unmap(ctx->dev, edesc, req);
17140 + qi_cache_free(edesc);
17141 + }
17142 +
17143 + return ret;
17144 +}
17145 +
17146 +static int aead_decrypt(struct aead_request *req)
17147 +{
17148 + struct aead_edesc *edesc;
17149 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17150 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17151 + struct caam_request *caam_req = aead_request_ctx(req);
17152 + int ret;
17153 +
17154 + /* allocate extended descriptor */
17155 + edesc = aead_edesc_alloc(req, false);
17156 + if (IS_ERR(edesc))
17157 + return PTR_ERR(edesc);
17158 +
17159 + caam_req->flc = &ctx->flc[DECRYPT];
17160 + caam_req->op_type = DECRYPT;
17161 + caam_req->cbk = aead_decrypt_done;
17162 + caam_req->ctx = &req->base;
17163 + caam_req->edesc = edesc;
17164 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17165 + if (ret != -EINPROGRESS &&
17166 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17167 + aead_unmap(ctx->dev, edesc, req);
17168 + qi_cache_free(edesc);
17169 + }
17170 +
17171 + return ret;
17172 +}
17173 +
17174 +static void tls_encrypt_done(void *cbk_ctx, u32 status)
17175 +{
17176 + struct crypto_async_request *areq = cbk_ctx;
17177 + struct aead_request *req = container_of(areq, struct aead_request,
17178 + base);
17179 + struct caam_request *req_ctx = to_caam_req(areq);
17180 + struct tls_edesc *edesc = req_ctx->edesc;
17181 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17182 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17183 + int ecode = 0;
17184 +
17185 +#ifdef DEBUG
17186 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17187 +#endif
17188 +
17189 + if (unlikely(status)) {
17190 + caam_qi2_strstatus(ctx->dev, status);
17191 + ecode = -EIO;
17192 + }
17193 +
17194 + tls_unmap(ctx->dev, edesc, req);
17195 + qi_cache_free(edesc);
17196 + aead_request_complete(req, ecode);
17197 +}
17198 +
17199 +static void tls_decrypt_done(void *cbk_ctx, u32 status)
17200 +{
17201 + struct crypto_async_request *areq = cbk_ctx;
17202 + struct aead_request *req = container_of(areq, struct aead_request,
17203 + base);
17204 + struct caam_request *req_ctx = to_caam_req(areq);
17205 + struct tls_edesc *edesc = req_ctx->edesc;
17206 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17207 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17208 + int ecode = 0;
17209 +
17210 +#ifdef DEBUG
17211 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17212 +#endif
17213 +
17214 + if (unlikely(status)) {
17215 + caam_qi2_strstatus(ctx->dev, status);
17216 + /*
17217 + * verify hw auth check passed else return -EBADMSG
17218 + */
17219 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
17220 + JRSTA_CCBERR_ERRID_ICVCHK)
17221 + ecode = -EBADMSG;
17222 + else
17223 + ecode = -EIO;
17224 + }
17225 +
17226 + tls_unmap(ctx->dev, edesc, req);
17227 + qi_cache_free(edesc);
17228 + aead_request_complete(req, ecode);
17229 +}
17230 +
17231 +static int tls_encrypt(struct aead_request *req)
17232 +{
17233 + struct tls_edesc *edesc;
17234 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17235 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17236 + struct caam_request *caam_req = aead_request_ctx(req);
17237 + int ret;
17238 +
17239 + /* allocate extended descriptor */
17240 + edesc = tls_edesc_alloc(req, true);
17241 + if (IS_ERR(edesc))
17242 + return PTR_ERR(edesc);
17243 +
17244 + caam_req->flc = &ctx->flc[ENCRYPT];
17245 + caam_req->op_type = ENCRYPT;
17246 + caam_req->cbk = tls_encrypt_done;
17247 + caam_req->ctx = &req->base;
17248 + caam_req->edesc = edesc;
17249 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17250 + if (ret != -EINPROGRESS &&
17251 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17252 + tls_unmap(ctx->dev, edesc, req);
17253 + qi_cache_free(edesc);
17254 + }
17255 +
17256 + return ret;
17257 +}
17258 +
17259 +static int tls_decrypt(struct aead_request *req)
17260 +{
17261 + struct tls_edesc *edesc;
17262 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17263 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17264 + struct caam_request *caam_req = aead_request_ctx(req);
17265 + int ret;
17266 +
17267 + /* allocate extended descriptor */
17268 + edesc = tls_edesc_alloc(req, false);
17269 + if (IS_ERR(edesc))
17270 + return PTR_ERR(edesc);
17271 +
17272 + caam_req->flc = &ctx->flc[DECRYPT];
17273 + caam_req->op_type = DECRYPT;
17274 + caam_req->cbk = tls_decrypt_done;
17275 + caam_req->ctx = &req->base;
17276 + caam_req->edesc = edesc;
17277 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17278 + if (ret != -EINPROGRESS &&
17279 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17280 + tls_unmap(ctx->dev, edesc, req);
17281 + qi_cache_free(edesc);
17282 + }
17283 +
17284 + return ret;
17285 +}
17286 +
17287 +static int ipsec_gcm_encrypt(struct aead_request *req)
17288 +{
17289 + if (req->assoclen < 8)
17290 + return -EINVAL;
17291 +
17292 + return aead_encrypt(req);
17293 +}
17294 +
17295 +static int ipsec_gcm_decrypt(struct aead_request *req)
17296 +{
17297 + if (req->assoclen < 8)
17298 + return -EINVAL;
17299 +
17300 + return aead_decrypt(req);
17301 +}
17302 +
17303 +static void ablkcipher_done(void *cbk_ctx, u32 status)
17304 +{
17305 + struct crypto_async_request *areq = cbk_ctx;
17306 + struct ablkcipher_request *req = ablkcipher_request_cast(areq);
17307 + struct caam_request *req_ctx = to_caam_req(areq);
17308 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17309 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17310 + struct ablkcipher_edesc *edesc = req_ctx->edesc;
17311 + int ecode = 0;
17312 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
17313 +
17314 +#ifdef DEBUG
17315 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17316 +#endif
17317 +
17318 + if (unlikely(status)) {
17319 + caam_qi2_strstatus(ctx->dev, status);
17320 + ecode = -EIO;
17321 + }
17322 +
17323 +#ifdef DEBUG
17324 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
17325 + DUMP_PREFIX_ADDRESS, 16, 4, req->info,
17326 + edesc->src_nents > 1 ? 100 : ivsize, 1);
17327 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
17328 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
17329 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
17330 +#endif
17331 +
17332 + ablkcipher_unmap(ctx->dev, edesc, req);
17333 + qi_cache_free(edesc);
17334 +
17335 + /*
17336 + * The crypto API expects us to set the IV (req->info) to the last
17337 + * ciphertext block. This is used e.g. by the CTS mode.
17338 + */
17339 + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
17340 + ivsize, 0);
17341 +
17342 + ablkcipher_request_complete(req, ecode);
17343 +}
17344 +
17345 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
17346 +{
17347 + struct ablkcipher_edesc *edesc;
17348 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17349 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17350 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17351 + int ret;
17352 +
17353 + /* allocate extended descriptor */
17354 + edesc = ablkcipher_edesc_alloc(req, true);
17355 + if (IS_ERR(edesc))
17356 + return PTR_ERR(edesc);
17357 +
17358 + caam_req->flc = &ctx->flc[ENCRYPT];
17359 + caam_req->op_type = ENCRYPT;
17360 + caam_req->cbk = ablkcipher_done;
17361 + caam_req->ctx = &req->base;
17362 + caam_req->edesc = edesc;
17363 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17364 + if (ret != -EINPROGRESS &&
17365 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17366 + ablkcipher_unmap(ctx->dev, edesc, req);
17367 + qi_cache_free(edesc);
17368 + }
17369 +
17370 + return ret;
17371 +}
17372 +
17373 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *greq)
17374 +{
17375 + struct ablkcipher_request *req = &greq->creq;
17376 + struct ablkcipher_edesc *edesc;
17377 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17378 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17379 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17380 + int ret;
17381 +
17382 + /* allocate extended descriptor */
17383 + edesc = ablkcipher_giv_edesc_alloc(greq);
17384 + if (IS_ERR(edesc))
17385 + return PTR_ERR(edesc);
17386 +
17387 + caam_req->flc = &ctx->flc[GIVENCRYPT];
17388 + caam_req->op_type = GIVENCRYPT;
17389 + caam_req->cbk = ablkcipher_done;
17390 + caam_req->ctx = &req->base;
17391 + caam_req->edesc = edesc;
17392 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17393 + if (ret != -EINPROGRESS &&
17394 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17395 + ablkcipher_unmap(ctx->dev, edesc, req);
17396 + qi_cache_free(edesc);
17397 + }
17398 +
17399 + return ret;
17400 +}
17401 +
17402 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
17403 +{
17404 + struct ablkcipher_edesc *edesc;
17405 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17406 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17407 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17408 + int ret;
17409 +
17410 + /* allocate extended descriptor */
17411 + edesc = ablkcipher_edesc_alloc(req, false);
17412 + if (IS_ERR(edesc))
17413 + return PTR_ERR(edesc);
17414 +
17415 + caam_req->flc = &ctx->flc[DECRYPT];
17416 + caam_req->op_type = DECRYPT;
17417 + caam_req->cbk = ablkcipher_done;
17418 + caam_req->ctx = &req->base;
17419 + caam_req->edesc = edesc;
17420 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17421 + if (ret != -EINPROGRESS &&
17422 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17423 + ablkcipher_unmap(ctx->dev, edesc, req);
17424 + qi_cache_free(edesc);
17425 + }
17426 +
17427 + return ret;
17428 +}
17429 +
17430 +struct caam_crypto_alg {
17431 + struct list_head entry;
17432 + struct crypto_alg crypto_alg;
17433 + struct caam_alg_entry caam;
17434 +};
17435 +
17436 +static int caam_cra_init(struct crypto_tfm *tfm)
17437 +{
17438 + struct crypto_alg *alg = tfm->__crt_alg;
17439 + struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
17440 + crypto_alg);
17441 + struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
17442 +
17443 + /* copy descriptor header template value */
17444 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG |
17445 + caam_alg->caam.class1_alg_type;
17446 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG |
17447 + caam_alg->caam.class2_alg_type;
17448 +
17449 + ctx->dev = caam_alg->caam.dev;
17450 +
17451 + return 0;
17452 +}
17453 +
17454 +static int caam_cra_init_ablkcipher(struct crypto_tfm *tfm)
17455 +{
17456 + struct ablkcipher_tfm *ablkcipher_tfm =
17457 + crypto_ablkcipher_crt(__crypto_ablkcipher_cast(tfm));
17458 +
17459 + ablkcipher_tfm->reqsize = sizeof(struct caam_request);
17460 + return caam_cra_init(tfm);
17461 +}
17462 +
17463 +static int caam_cra_init_aead(struct crypto_aead *tfm)
17464 +{
17465 + crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
17466 + return caam_cra_init(crypto_aead_tfm(tfm));
17467 +}
17468 +
17469 +static void caam_exit_common(struct caam_ctx *ctx)
17470 +{
17471 + int i;
17472 +
17473 + for (i = 0; i < NUM_OP; i++) {
17474 + if (!ctx->flc[i].flc_dma)
17475 + continue;
17476 + dma_unmap_single(ctx->dev, ctx->flc[i].flc_dma,
17477 + sizeof(ctx->flc[i].flc) +
17478 + desc_bytes(ctx->flc[i].sh_desc),
17479 + DMA_TO_DEVICE);
17480 + }
17481 +
17482 + if (ctx->key_dma)
17483 + dma_unmap_single(ctx->dev, ctx->key_dma,
17484 + ctx->cdata.keylen + ctx->adata.keylen_pad,
17485 + DMA_TO_DEVICE);
17486 +}
17487 +
17488 +static void caam_cra_exit(struct crypto_tfm *tfm)
17489 +{
17490 + caam_exit_common(crypto_tfm_ctx(tfm));
17491 +}
17492 +
17493 +static void caam_cra_exit_aead(struct crypto_aead *tfm)
17494 +{
17495 + caam_exit_common(crypto_aead_ctx(tfm));
17496 +}
17497 +
17498 +#define template_ablkcipher template_u.ablkcipher
17499 +struct caam_alg_template {
17500 + char name[CRYPTO_MAX_ALG_NAME];
17501 + char driver_name[CRYPTO_MAX_ALG_NAME];
17502 + unsigned int blocksize;
17503 + u32 type;
17504 + union {
17505 + struct ablkcipher_alg ablkcipher;
17506 + } template_u;
17507 + u32 class1_alg_type;
17508 + u32 class2_alg_type;
17509 +};
17510 +
17511 +static struct caam_alg_template driver_algs[] = {
17512 + /* ablkcipher descriptor */
17513 + {
17514 + .name = "cbc(aes)",
17515 + .driver_name = "cbc-aes-caam-qi2",
17516 + .blocksize = AES_BLOCK_SIZE,
17517 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17518 + .template_ablkcipher = {
17519 + .setkey = ablkcipher_setkey,
17520 + .encrypt = ablkcipher_encrypt,
17521 + .decrypt = ablkcipher_decrypt,
17522 + .givencrypt = ablkcipher_givencrypt,
17523 + .geniv = "<built-in>",
17524 + .min_keysize = AES_MIN_KEY_SIZE,
17525 + .max_keysize = AES_MAX_KEY_SIZE,
17526 + .ivsize = AES_BLOCK_SIZE,
17527 + },
17528 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17529 + },
17530 + {
17531 + .name = "cbc(des3_ede)",
17532 + .driver_name = "cbc-3des-caam-qi2",
17533 + .blocksize = DES3_EDE_BLOCK_SIZE,
17534 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17535 + .template_ablkcipher = {
17536 + .setkey = ablkcipher_setkey,
17537 + .encrypt = ablkcipher_encrypt,
17538 + .decrypt = ablkcipher_decrypt,
17539 + .givencrypt = ablkcipher_givencrypt,
17540 + .geniv = "<built-in>",
17541 + .min_keysize = DES3_EDE_KEY_SIZE,
17542 + .max_keysize = DES3_EDE_KEY_SIZE,
17543 + .ivsize = DES3_EDE_BLOCK_SIZE,
17544 + },
17545 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17546 + },
17547 + {
17548 + .name = "cbc(des)",
17549 + .driver_name = "cbc-des-caam-qi2",
17550 + .blocksize = DES_BLOCK_SIZE,
17551 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17552 + .template_ablkcipher = {
17553 + .setkey = ablkcipher_setkey,
17554 + .encrypt = ablkcipher_encrypt,
17555 + .decrypt = ablkcipher_decrypt,
17556 + .givencrypt = ablkcipher_givencrypt,
17557 + .geniv = "<built-in>",
17558 + .min_keysize = DES_KEY_SIZE,
17559 + .max_keysize = DES_KEY_SIZE,
17560 + .ivsize = DES_BLOCK_SIZE,
17561 + },
17562 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
17563 + },
17564 + {
17565 + .name = "ctr(aes)",
17566 + .driver_name = "ctr-aes-caam-qi2",
17567 + .blocksize = 1,
17568 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17569 + .template_ablkcipher = {
17570 + .setkey = ablkcipher_setkey,
17571 + .encrypt = ablkcipher_encrypt,
17572 + .decrypt = ablkcipher_decrypt,
17573 + .geniv = "chainiv",
17574 + .min_keysize = AES_MIN_KEY_SIZE,
17575 + .max_keysize = AES_MAX_KEY_SIZE,
17576 + .ivsize = AES_BLOCK_SIZE,
17577 + },
17578 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17579 + },
17580 + {
17581 + .name = "rfc3686(ctr(aes))",
17582 + .driver_name = "rfc3686-ctr-aes-caam-qi2",
17583 + .blocksize = 1,
17584 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17585 + .template_ablkcipher = {
17586 + .setkey = ablkcipher_setkey,
17587 + .encrypt = ablkcipher_encrypt,
17588 + .decrypt = ablkcipher_decrypt,
17589 + .givencrypt = ablkcipher_givencrypt,
17590 + .geniv = "<built-in>",
17591 + .min_keysize = AES_MIN_KEY_SIZE +
17592 + CTR_RFC3686_NONCE_SIZE,
17593 + .max_keysize = AES_MAX_KEY_SIZE +
17594 + CTR_RFC3686_NONCE_SIZE,
17595 + .ivsize = CTR_RFC3686_IV_SIZE,
17596 + },
17597 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17598 + },
17599 + {
17600 + .name = "xts(aes)",
17601 + .driver_name = "xts-aes-caam-qi2",
17602 + .blocksize = AES_BLOCK_SIZE,
17603 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17604 + .template_ablkcipher = {
17605 + .setkey = xts_ablkcipher_setkey,
17606 + .encrypt = ablkcipher_encrypt,
17607 + .decrypt = ablkcipher_decrypt,
17608 + .geniv = "eseqiv",
17609 + .min_keysize = 2 * AES_MIN_KEY_SIZE,
17610 + .max_keysize = 2 * AES_MAX_KEY_SIZE,
17611 + .ivsize = AES_BLOCK_SIZE,
17612 + },
17613 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
17614 + }
17615 +};
17616 +
17617 +static struct caam_aead_alg driver_aeads[] = {
17618 + {
17619 + .aead = {
17620 + .base = {
17621 + .cra_name = "rfc4106(gcm(aes))",
17622 + .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
17623 + .cra_blocksize = 1,
17624 + },
17625 + .setkey = rfc4106_setkey,
17626 + .setauthsize = rfc4106_setauthsize,
17627 + .encrypt = ipsec_gcm_encrypt,
17628 + .decrypt = ipsec_gcm_decrypt,
17629 + .ivsize = 8,
17630 + .maxauthsize = AES_BLOCK_SIZE,
17631 + },
17632 + .caam = {
17633 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17634 + },
17635 + },
17636 + {
17637 + .aead = {
17638 + .base = {
17639 + .cra_name = "rfc4543(gcm(aes))",
17640 + .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
17641 + .cra_blocksize = 1,
17642 + },
17643 + .setkey = rfc4543_setkey,
17644 + .setauthsize = rfc4543_setauthsize,
17645 + .encrypt = ipsec_gcm_encrypt,
17646 + .decrypt = ipsec_gcm_decrypt,
17647 + .ivsize = 8,
17648 + .maxauthsize = AES_BLOCK_SIZE,
17649 + },
17650 + .caam = {
17651 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17652 + },
17653 + },
17654 + /* Galois Counter Mode */
17655 + {
17656 + .aead = {
17657 + .base = {
17658 + .cra_name = "gcm(aes)",
17659 + .cra_driver_name = "gcm-aes-caam-qi2",
17660 + .cra_blocksize = 1,
17661 + },
17662 + .setkey = gcm_setkey,
17663 + .setauthsize = gcm_setauthsize,
17664 + .encrypt = aead_encrypt,
17665 + .decrypt = aead_decrypt,
17666 + .ivsize = 12,
17667 + .maxauthsize = AES_BLOCK_SIZE,
17668 + },
17669 + .caam = {
17670 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17671 + }
17672 + },
17673 + /* single-pass ipsec_esp descriptor */
17674 + {
17675 + .aead = {
17676 + .base = {
17677 + .cra_name = "authenc(hmac(md5),cbc(aes))",
17678 + .cra_driver_name = "authenc-hmac-md5-"
17679 + "cbc-aes-caam-qi2",
17680 + .cra_blocksize = AES_BLOCK_SIZE,
17681 + },
17682 + .setkey = aead_setkey,
17683 + .setauthsize = aead_setauthsize,
17684 + .encrypt = aead_encrypt,
17685 + .decrypt = aead_decrypt,
17686 + .ivsize = AES_BLOCK_SIZE,
17687 + .maxauthsize = MD5_DIGEST_SIZE,
17688 + },
17689 + .caam = {
17690 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17691 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17692 + OP_ALG_AAI_HMAC_PRECOMP,
17693 + }
17694 + },
17695 + {
17696 + .aead = {
17697 + .base = {
17698 + .cra_name = "echainiv(authenc(hmac(md5),"
17699 + "cbc(aes)))",
17700 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
17701 + "cbc-aes-caam-qi2",
17702 + .cra_blocksize = AES_BLOCK_SIZE,
17703 + },
17704 + .setkey = aead_setkey,
17705 + .setauthsize = aead_setauthsize,
17706 + .encrypt = aead_encrypt,
17707 + .decrypt = aead_decrypt,
17708 + .ivsize = AES_BLOCK_SIZE,
17709 + .maxauthsize = MD5_DIGEST_SIZE,
17710 + },
17711 + .caam = {
17712 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17713 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17714 + OP_ALG_AAI_HMAC_PRECOMP,
17715 + .geniv = true,
17716 + }
17717 + },
17718 + {
17719 + .aead = {
17720 + .base = {
17721 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
17722 + .cra_driver_name = "authenc-hmac-sha1-"
17723 + "cbc-aes-caam-qi2",
17724 + .cra_blocksize = AES_BLOCK_SIZE,
17725 + },
17726 + .setkey = aead_setkey,
17727 + .setauthsize = aead_setauthsize,
17728 + .encrypt = aead_encrypt,
17729 + .decrypt = aead_decrypt,
17730 + .ivsize = AES_BLOCK_SIZE,
17731 + .maxauthsize = SHA1_DIGEST_SIZE,
17732 + },
17733 + .caam = {
17734 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17735 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17736 + OP_ALG_AAI_HMAC_PRECOMP,
17737 + }
17738 + },
17739 + {
17740 + .aead = {
17741 + .base = {
17742 + .cra_name = "echainiv(authenc(hmac(sha1),"
17743 + "cbc(aes)))",
17744 + .cra_driver_name = "echainiv-authenc-"
17745 + "hmac-sha1-cbc-aes-caam-qi2",
17746 + .cra_blocksize = AES_BLOCK_SIZE,
17747 + },
17748 + .setkey = aead_setkey,
17749 + .setauthsize = aead_setauthsize,
17750 + .encrypt = aead_encrypt,
17751 + .decrypt = aead_decrypt,
17752 + .ivsize = AES_BLOCK_SIZE,
17753 + .maxauthsize = SHA1_DIGEST_SIZE,
17754 + },
17755 + .caam = {
17756 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17757 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17758 + OP_ALG_AAI_HMAC_PRECOMP,
17759 + .geniv = true,
17760 + },
17761 + },
17762 + {
17763 + .aead = {
17764 + .base = {
17765 + .cra_name = "authenc(hmac(sha224),cbc(aes))",
17766 + .cra_driver_name = "authenc-hmac-sha224-"
17767 + "cbc-aes-caam-qi2",
17768 + .cra_blocksize = AES_BLOCK_SIZE,
17769 + },
17770 + .setkey = aead_setkey,
17771 + .setauthsize = aead_setauthsize,
17772 + .encrypt = aead_encrypt,
17773 + .decrypt = aead_decrypt,
17774 + .ivsize = AES_BLOCK_SIZE,
17775 + .maxauthsize = SHA224_DIGEST_SIZE,
17776 + },
17777 + .caam = {
17778 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17779 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17780 + OP_ALG_AAI_HMAC_PRECOMP,
17781 + }
17782 + },
17783 + {
17784 + .aead = {
17785 + .base = {
17786 + .cra_name = "echainiv(authenc(hmac(sha224),"
17787 + "cbc(aes)))",
17788 + .cra_driver_name = "echainiv-authenc-"
17789 + "hmac-sha224-cbc-aes-caam-qi2",
17790 + .cra_blocksize = AES_BLOCK_SIZE,
17791 + },
17792 + .setkey = aead_setkey,
17793 + .setauthsize = aead_setauthsize,
17794 + .encrypt = aead_encrypt,
17795 + .decrypt = aead_decrypt,
17796 + .ivsize = AES_BLOCK_SIZE,
17797 + .maxauthsize = SHA224_DIGEST_SIZE,
17798 + },
17799 + .caam = {
17800 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17801 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17802 + OP_ALG_AAI_HMAC_PRECOMP,
17803 + .geniv = true,
17804 + }
17805 + },
17806 + {
17807 + .aead = {
17808 + .base = {
17809 + .cra_name = "authenc(hmac(sha256),cbc(aes))",
17810 + .cra_driver_name = "authenc-hmac-sha256-"
17811 + "cbc-aes-caam-qi2",
17812 + .cra_blocksize = AES_BLOCK_SIZE,
17813 + },
17814 + .setkey = aead_setkey,
17815 + .setauthsize = aead_setauthsize,
17816 + .encrypt = aead_encrypt,
17817 + .decrypt = aead_decrypt,
17818 + .ivsize = AES_BLOCK_SIZE,
17819 + .maxauthsize = SHA256_DIGEST_SIZE,
17820 + },
17821 + .caam = {
17822 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17823 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17824 + OP_ALG_AAI_HMAC_PRECOMP,
17825 + }
17826 + },
17827 + {
17828 + .aead = {
17829 + .base = {
17830 + .cra_name = "echainiv(authenc(hmac(sha256),"
17831 + "cbc(aes)))",
17832 + .cra_driver_name = "echainiv-authenc-"
17833 + "hmac-sha256-cbc-aes-"
17834 + "caam-qi2",
17835 + .cra_blocksize = AES_BLOCK_SIZE,
17836 + },
17837 + .setkey = aead_setkey,
17838 + .setauthsize = aead_setauthsize,
17839 + .encrypt = aead_encrypt,
17840 + .decrypt = aead_decrypt,
17841 + .ivsize = AES_BLOCK_SIZE,
17842 + .maxauthsize = SHA256_DIGEST_SIZE,
17843 + },
17844 + .caam = {
17845 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17846 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17847 + OP_ALG_AAI_HMAC_PRECOMP,
17848 + .geniv = true,
17849 + }
17850 + },
17851 + {
17852 + .aead = {
17853 + .base = {
17854 + .cra_name = "authenc(hmac(sha384),cbc(aes))",
17855 + .cra_driver_name = "authenc-hmac-sha384-"
17856 + "cbc-aes-caam-qi2",
17857 + .cra_blocksize = AES_BLOCK_SIZE,
17858 + },
17859 + .setkey = aead_setkey,
17860 + .setauthsize = aead_setauthsize,
17861 + .encrypt = aead_encrypt,
17862 + .decrypt = aead_decrypt,
17863 + .ivsize = AES_BLOCK_SIZE,
17864 + .maxauthsize = SHA384_DIGEST_SIZE,
17865 + },
17866 + .caam = {
17867 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17868 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
17869 + OP_ALG_AAI_HMAC_PRECOMP,
17870 + }
17871 + },
17872 + {
17873 + .aead = {
17874 + .base = {
17875 + .cra_name = "echainiv(authenc(hmac(sha384),"
17876 + "cbc(aes)))",
17877 + .cra_driver_name = "echainiv-authenc-"
17878 + "hmac-sha384-cbc-aes-"
17879 + "caam-qi2",
17880 + .cra_blocksize = AES_BLOCK_SIZE,
17881 + },
17882 + .setkey = aead_setkey,
17883 + .setauthsize = aead_setauthsize,
17884 + .encrypt = aead_encrypt,
17885 + .decrypt = aead_decrypt,
17886 + .ivsize = AES_BLOCK_SIZE,
17887 + .maxauthsize = SHA384_DIGEST_SIZE,
17888 + },
17889 + .caam = {
17890 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17891 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
17892 + OP_ALG_AAI_HMAC_PRECOMP,
17893 + .geniv = true,
17894 + }
17895 + },
17896 + {
17897 + .aead = {
17898 + .base = {
17899 + .cra_name = "authenc(hmac(sha512),cbc(aes))",
17900 + .cra_driver_name = "authenc-hmac-sha512-"
17901 + "cbc-aes-caam-qi2",
17902 + .cra_blocksize = AES_BLOCK_SIZE,
17903 + },
17904 + .setkey = aead_setkey,
17905 + .setauthsize = aead_setauthsize,
17906 + .encrypt = aead_encrypt,
17907 + .decrypt = aead_decrypt,
17908 + .ivsize = AES_BLOCK_SIZE,
17909 + .maxauthsize = SHA512_DIGEST_SIZE,
17910 + },
17911 + .caam = {
17912 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17913 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
17914 + OP_ALG_AAI_HMAC_PRECOMP,
17915 + }
17916 + },
17917 + {
17918 + .aead = {
17919 + .base = {
17920 + .cra_name = "echainiv(authenc(hmac(sha512),"
17921 + "cbc(aes)))",
17922 + .cra_driver_name = "echainiv-authenc-"
17923 + "hmac-sha512-cbc-aes-"
17924 + "caam-qi2",
17925 + .cra_blocksize = AES_BLOCK_SIZE,
17926 + },
17927 + .setkey = aead_setkey,
17928 + .setauthsize = aead_setauthsize,
17929 + .encrypt = aead_encrypt,
17930 + .decrypt = aead_decrypt,
17931 + .ivsize = AES_BLOCK_SIZE,
17932 + .maxauthsize = SHA512_DIGEST_SIZE,
17933 + },
17934 + .caam = {
17935 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17936 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
17937 + OP_ALG_AAI_HMAC_PRECOMP,
17938 + .geniv = true,
17939 + }
17940 + },
17941 + {
17942 + .aead = {
17943 + .base = {
17944 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
17945 + .cra_driver_name = "authenc-hmac-md5-"
17946 + "cbc-des3_ede-caam-qi2",
17947 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17948 + },
17949 + .setkey = aead_setkey,
17950 + .setauthsize = aead_setauthsize,
17951 + .encrypt = aead_encrypt,
17952 + .decrypt = aead_decrypt,
17953 + .ivsize = DES3_EDE_BLOCK_SIZE,
17954 + .maxauthsize = MD5_DIGEST_SIZE,
17955 + },
17956 + .caam = {
17957 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17958 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17959 + OP_ALG_AAI_HMAC_PRECOMP,
17960 + }
17961 + },
17962 + {
17963 + .aead = {
17964 + .base = {
17965 + .cra_name = "echainiv(authenc(hmac(md5),"
17966 + "cbc(des3_ede)))",
17967 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
17968 + "cbc-des3_ede-caam-qi2",
17969 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17970 + },
17971 + .setkey = aead_setkey,
17972 + .setauthsize = aead_setauthsize,
17973 + .encrypt = aead_encrypt,
17974 + .decrypt = aead_decrypt,
17975 + .ivsize = DES3_EDE_BLOCK_SIZE,
17976 + .maxauthsize = MD5_DIGEST_SIZE,
17977 + },
17978 + .caam = {
17979 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17980 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17981 + OP_ALG_AAI_HMAC_PRECOMP,
17982 + .geniv = true,
17983 + }
17984 + },
17985 + {
17986 + .aead = {
17987 + .base = {
17988 + .cra_name = "authenc(hmac(sha1),"
17989 + "cbc(des3_ede))",
17990 + .cra_driver_name = "authenc-hmac-sha1-"
17991 + "cbc-des3_ede-caam-qi2",
17992 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17993 + },
17994 + .setkey = aead_setkey,
17995 + .setauthsize = aead_setauthsize,
17996 + .encrypt = aead_encrypt,
17997 + .decrypt = aead_decrypt,
17998 + .ivsize = DES3_EDE_BLOCK_SIZE,
17999 + .maxauthsize = SHA1_DIGEST_SIZE,
18000 + },
18001 + .caam = {
18002 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18003 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18004 + OP_ALG_AAI_HMAC_PRECOMP,
18005 + },
18006 + },
18007 + {
18008 + .aead = {
18009 + .base = {
18010 + .cra_name = "echainiv(authenc(hmac(sha1),"
18011 + "cbc(des3_ede)))",
18012 + .cra_driver_name = "echainiv-authenc-"
18013 + "hmac-sha1-"
18014 + "cbc-des3_ede-caam-qi2",
18015 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18016 + },
18017 + .setkey = aead_setkey,
18018 + .setauthsize = aead_setauthsize,
18019 + .encrypt = aead_encrypt,
18020 + .decrypt = aead_decrypt,
18021 + .ivsize = DES3_EDE_BLOCK_SIZE,
18022 + .maxauthsize = SHA1_DIGEST_SIZE,
18023 + },
18024 + .caam = {
18025 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18026 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18027 + OP_ALG_AAI_HMAC_PRECOMP,
18028 + .geniv = true,
18029 + }
18030 + },
18031 + {
18032 + .aead = {
18033 + .base = {
18034 + .cra_name = "authenc(hmac(sha224),"
18035 + "cbc(des3_ede))",
18036 + .cra_driver_name = "authenc-hmac-sha224-"
18037 + "cbc-des3_ede-caam-qi2",
18038 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18039 + },
18040 + .setkey = aead_setkey,
18041 + .setauthsize = aead_setauthsize,
18042 + .encrypt = aead_encrypt,
18043 + .decrypt = aead_decrypt,
18044 + .ivsize = DES3_EDE_BLOCK_SIZE,
18045 + .maxauthsize = SHA224_DIGEST_SIZE,
18046 + },
18047 + .caam = {
18048 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18049 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18050 + OP_ALG_AAI_HMAC_PRECOMP,
18051 + },
18052 + },
18053 + {
18054 + .aead = {
18055 + .base = {
18056 + .cra_name = "echainiv(authenc(hmac(sha224),"
18057 + "cbc(des3_ede)))",
18058 + .cra_driver_name = "echainiv-authenc-"
18059 + "hmac-sha224-"
18060 + "cbc-des3_ede-caam-qi2",
18061 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18062 + },
18063 + .setkey = aead_setkey,
18064 + .setauthsize = aead_setauthsize,
18065 + .encrypt = aead_encrypt,
18066 + .decrypt = aead_decrypt,
18067 + .ivsize = DES3_EDE_BLOCK_SIZE,
18068 + .maxauthsize = SHA224_DIGEST_SIZE,
18069 + },
18070 + .caam = {
18071 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18072 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18073 + OP_ALG_AAI_HMAC_PRECOMP,
18074 + .geniv = true,
18075 + }
18076 + },
18077 + {
18078 + .aead = {
18079 + .base = {
18080 + .cra_name = "authenc(hmac(sha256),"
18081 + "cbc(des3_ede))",
18082 + .cra_driver_name = "authenc-hmac-sha256-"
18083 + "cbc-des3_ede-caam-qi2",
18084 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18085 + },
18086 + .setkey = aead_setkey,
18087 + .setauthsize = aead_setauthsize,
18088 + .encrypt = aead_encrypt,
18089 + .decrypt = aead_decrypt,
18090 + .ivsize = DES3_EDE_BLOCK_SIZE,
18091 + .maxauthsize = SHA256_DIGEST_SIZE,
18092 + },
18093 + .caam = {
18094 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18095 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18096 + OP_ALG_AAI_HMAC_PRECOMP,
18097 + },
18098 + },
18099 + {
18100 + .aead = {
18101 + .base = {
18102 + .cra_name = "echainiv(authenc(hmac(sha256),"
18103 + "cbc(des3_ede)))",
18104 + .cra_driver_name = "echainiv-authenc-"
18105 + "hmac-sha256-"
18106 + "cbc-des3_ede-caam-qi2",
18107 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18108 + },
18109 + .setkey = aead_setkey,
18110 + .setauthsize = aead_setauthsize,
18111 + .encrypt = aead_encrypt,
18112 + .decrypt = aead_decrypt,
18113 + .ivsize = DES3_EDE_BLOCK_SIZE,
18114 + .maxauthsize = SHA256_DIGEST_SIZE,
18115 + },
18116 + .caam = {
18117 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18118 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18119 + OP_ALG_AAI_HMAC_PRECOMP,
18120 + .geniv = true,
18121 + }
18122 + },
18123 + {
18124 + .aead = {
18125 + .base = {
18126 + .cra_name = "authenc(hmac(sha384),"
18127 + "cbc(des3_ede))",
18128 + .cra_driver_name = "authenc-hmac-sha384-"
18129 + "cbc-des3_ede-caam-qi2",
18130 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18131 + },
18132 + .setkey = aead_setkey,
18133 + .setauthsize = aead_setauthsize,
18134 + .encrypt = aead_encrypt,
18135 + .decrypt = aead_decrypt,
18136 + .ivsize = DES3_EDE_BLOCK_SIZE,
18137 + .maxauthsize = SHA384_DIGEST_SIZE,
18138 + },
18139 + .caam = {
18140 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18141 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18142 + OP_ALG_AAI_HMAC_PRECOMP,
18143 + },
18144 + },
18145 + {
18146 + .aead = {
18147 + .base = {
18148 + .cra_name = "echainiv(authenc(hmac(sha384),"
18149 + "cbc(des3_ede)))",
18150 + .cra_driver_name = "echainiv-authenc-"
18151 + "hmac-sha384-"
18152 + "cbc-des3_ede-caam-qi2",
18153 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18154 + },
18155 + .setkey = aead_setkey,
18156 + .setauthsize = aead_setauthsize,
18157 + .encrypt = aead_encrypt,
18158 + .decrypt = aead_decrypt,
18159 + .ivsize = DES3_EDE_BLOCK_SIZE,
18160 + .maxauthsize = SHA384_DIGEST_SIZE,
18161 + },
18162 + .caam = {
18163 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18164 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18165 + OP_ALG_AAI_HMAC_PRECOMP,
18166 + .geniv = true,
18167 + }
18168 + },
18169 + {
18170 + .aead = {
18171 + .base = {
18172 + .cra_name = "authenc(hmac(sha512),"
18173 + "cbc(des3_ede))",
18174 + .cra_driver_name = "authenc-hmac-sha512-"
18175 + "cbc-des3_ede-caam-qi2",
18176 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18177 + },
18178 + .setkey = aead_setkey,
18179 + .setauthsize = aead_setauthsize,
18180 + .encrypt = aead_encrypt,
18181 + .decrypt = aead_decrypt,
18182 + .ivsize = DES3_EDE_BLOCK_SIZE,
18183 + .maxauthsize = SHA512_DIGEST_SIZE,
18184 + },
18185 + .caam = {
18186 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18187 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18188 + OP_ALG_AAI_HMAC_PRECOMP,
18189 + },
18190 + },
18191 + {
18192 + .aead = {
18193 + .base = {
18194 + .cra_name = "echainiv(authenc(hmac(sha512),"
18195 + "cbc(des3_ede)))",
18196 + .cra_driver_name = "echainiv-authenc-"
18197 + "hmac-sha512-"
18198 + "cbc-des3_ede-caam-qi2",
18199 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18200 + },
18201 + .setkey = aead_setkey,
18202 + .setauthsize = aead_setauthsize,
18203 + .encrypt = aead_encrypt,
18204 + .decrypt = aead_decrypt,
18205 + .ivsize = DES3_EDE_BLOCK_SIZE,
18206 + .maxauthsize = SHA512_DIGEST_SIZE,
18207 + },
18208 + .caam = {
18209 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18210 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18211 + OP_ALG_AAI_HMAC_PRECOMP,
18212 + .geniv = true,
18213 + }
18214 + },
18215 + {
18216 + .aead = {
18217 + .base = {
18218 + .cra_name = "authenc(hmac(md5),cbc(des))",
18219 + .cra_driver_name = "authenc-hmac-md5-"
18220 + "cbc-des-caam-qi2",
18221 + .cra_blocksize = DES_BLOCK_SIZE,
18222 + },
18223 + .setkey = aead_setkey,
18224 + .setauthsize = aead_setauthsize,
18225 + .encrypt = aead_encrypt,
18226 + .decrypt = aead_decrypt,
18227 + .ivsize = DES_BLOCK_SIZE,
18228 + .maxauthsize = MD5_DIGEST_SIZE,
18229 + },
18230 + .caam = {
18231 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18232 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18233 + OP_ALG_AAI_HMAC_PRECOMP,
18234 + },
18235 + },
18236 + {
18237 + .aead = {
18238 + .base = {
18239 + .cra_name = "echainiv(authenc(hmac(md5),"
18240 + "cbc(des)))",
18241 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
18242 + "cbc-des-caam-qi2",
18243 + .cra_blocksize = DES_BLOCK_SIZE,
18244 + },
18245 + .setkey = aead_setkey,
18246 + .setauthsize = aead_setauthsize,
18247 + .encrypt = aead_encrypt,
18248 + .decrypt = aead_decrypt,
18249 + .ivsize = DES_BLOCK_SIZE,
18250 + .maxauthsize = MD5_DIGEST_SIZE,
18251 + },
18252 + .caam = {
18253 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18254 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18255 + OP_ALG_AAI_HMAC_PRECOMP,
18256 + .geniv = true,
18257 + }
18258 + },
18259 + {
18260 + .aead = {
18261 + .base = {
18262 + .cra_name = "authenc(hmac(sha1),cbc(des))",
18263 + .cra_driver_name = "authenc-hmac-sha1-"
18264 + "cbc-des-caam-qi2",
18265 + .cra_blocksize = DES_BLOCK_SIZE,
18266 + },
18267 + .setkey = aead_setkey,
18268 + .setauthsize = aead_setauthsize,
18269 + .encrypt = aead_encrypt,
18270 + .decrypt = aead_decrypt,
18271 + .ivsize = DES_BLOCK_SIZE,
18272 + .maxauthsize = SHA1_DIGEST_SIZE,
18273 + },
18274 + .caam = {
18275 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18276 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18277 + OP_ALG_AAI_HMAC_PRECOMP,
18278 + },
18279 + },
18280 + {
18281 + .aead = {
18282 + .base = {
18283 + .cra_name = "echainiv(authenc(hmac(sha1),"
18284 + "cbc(des)))",
18285 + .cra_driver_name = "echainiv-authenc-"
18286 + "hmac-sha1-cbc-des-caam-qi2",
18287 + .cra_blocksize = DES_BLOCK_SIZE,
18288 + },
18289 + .setkey = aead_setkey,
18290 + .setauthsize = aead_setauthsize,
18291 + .encrypt = aead_encrypt,
18292 + .decrypt = aead_decrypt,
18293 + .ivsize = DES_BLOCK_SIZE,
18294 + .maxauthsize = SHA1_DIGEST_SIZE,
18295 + },
18296 + .caam = {
18297 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18298 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18299 + OP_ALG_AAI_HMAC_PRECOMP,
18300 + .geniv = true,
18301 + }
18302 + },
18303 + {
18304 + .aead = {
18305 + .base = {
18306 + .cra_name = "authenc(hmac(sha224),cbc(des))",
18307 + .cra_driver_name = "authenc-hmac-sha224-"
18308 + "cbc-des-caam-qi2",
18309 + .cra_blocksize = DES_BLOCK_SIZE,
18310 + },
18311 + .setkey = aead_setkey,
18312 + .setauthsize = aead_setauthsize,
18313 + .encrypt = aead_encrypt,
18314 + .decrypt = aead_decrypt,
18315 + .ivsize = DES_BLOCK_SIZE,
18316 + .maxauthsize = SHA224_DIGEST_SIZE,
18317 + },
18318 + .caam = {
18319 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18320 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18321 + OP_ALG_AAI_HMAC_PRECOMP,
18322 + },
18323 + },
18324 + {
18325 + .aead = {
18326 + .base = {
18327 + .cra_name = "echainiv(authenc(hmac(sha224),"
18328 + "cbc(des)))",
18329 + .cra_driver_name = "echainiv-authenc-"
18330 + "hmac-sha224-cbc-des-"
18331 + "caam-qi2",
18332 + .cra_blocksize = DES_BLOCK_SIZE,
18333 + },
18334 + .setkey = aead_setkey,
18335 + .setauthsize = aead_setauthsize,
18336 + .encrypt = aead_encrypt,
18337 + .decrypt = aead_decrypt,
18338 + .ivsize = DES_BLOCK_SIZE,
18339 + .maxauthsize = SHA224_DIGEST_SIZE,
18340 + },
18341 + .caam = {
18342 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18343 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18344 + OP_ALG_AAI_HMAC_PRECOMP,
18345 + .geniv = true,
18346 + }
18347 + },
18348 + {
18349 + .aead = {
18350 + .base = {
18351 + .cra_name = "authenc(hmac(sha256),cbc(des))",
18352 + .cra_driver_name = "authenc-hmac-sha256-"
18353 + "cbc-des-caam-qi2",
18354 + .cra_blocksize = DES_BLOCK_SIZE,
18355 + },
18356 + .setkey = aead_setkey,
18357 + .setauthsize = aead_setauthsize,
18358 + .encrypt = aead_encrypt,
18359 + .decrypt = aead_decrypt,
18360 + .ivsize = DES_BLOCK_SIZE,
18361 + .maxauthsize = SHA256_DIGEST_SIZE,
18362 + },
18363 + .caam = {
18364 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18365 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18366 + OP_ALG_AAI_HMAC_PRECOMP,
18367 + },
18368 + },
18369 + {
18370 + .aead = {
18371 + .base = {
18372 + .cra_name = "echainiv(authenc(hmac(sha256),"
18373 + "cbc(des)))",
18374 + .cra_driver_name = "echainiv-authenc-"
18375 + "hmac-sha256-cbc-desi-"
18376 + "caam-qi2",
18377 + .cra_blocksize = DES_BLOCK_SIZE,
18378 + },
18379 + .setkey = aead_setkey,
18380 + .setauthsize = aead_setauthsize,
18381 + .encrypt = aead_encrypt,
18382 + .decrypt = aead_decrypt,
18383 + .ivsize = DES_BLOCK_SIZE,
18384 + .maxauthsize = SHA256_DIGEST_SIZE,
18385 + },
18386 + .caam = {
18387 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18388 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18389 + OP_ALG_AAI_HMAC_PRECOMP,
18390 + .geniv = true,
18391 + },
18392 + },
18393 + {
18394 + .aead = {
18395 + .base = {
18396 + .cra_name = "authenc(hmac(sha384),cbc(des))",
18397 + .cra_driver_name = "authenc-hmac-sha384-"
18398 + "cbc-des-caam-qi2",
18399 + .cra_blocksize = DES_BLOCK_SIZE,
18400 + },
18401 + .setkey = aead_setkey,
18402 + .setauthsize = aead_setauthsize,
18403 + .encrypt = aead_encrypt,
18404 + .decrypt = aead_decrypt,
18405 + .ivsize = DES_BLOCK_SIZE,
18406 + .maxauthsize = SHA384_DIGEST_SIZE,
18407 + },
18408 + .caam = {
18409 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18410 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18411 + OP_ALG_AAI_HMAC_PRECOMP,
18412 + },
18413 + },
18414 + {
18415 + .aead = {
18416 + .base = {
18417 + .cra_name = "echainiv(authenc(hmac(sha384),"
18418 + "cbc(des)))",
18419 + .cra_driver_name = "echainiv-authenc-"
18420 + "hmac-sha384-cbc-des-"
18421 + "caam-qi2",
18422 + .cra_blocksize = DES_BLOCK_SIZE,
18423 + },
18424 + .setkey = aead_setkey,
18425 + .setauthsize = aead_setauthsize,
18426 + .encrypt = aead_encrypt,
18427 + .decrypt = aead_decrypt,
18428 + .ivsize = DES_BLOCK_SIZE,
18429 + .maxauthsize = SHA384_DIGEST_SIZE,
18430 + },
18431 + .caam = {
18432 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18433 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18434 + OP_ALG_AAI_HMAC_PRECOMP,
18435 + .geniv = true,
18436 + }
18437 + },
18438 + {
18439 + .aead = {
18440 + .base = {
18441 + .cra_name = "authenc(hmac(sha512),cbc(des))",
18442 + .cra_driver_name = "authenc-hmac-sha512-"
18443 + "cbc-des-caam-qi2",
18444 + .cra_blocksize = DES_BLOCK_SIZE,
18445 + },
18446 + .setkey = aead_setkey,
18447 + .setauthsize = aead_setauthsize,
18448 + .encrypt = aead_encrypt,
18449 + .decrypt = aead_decrypt,
18450 + .ivsize = DES_BLOCK_SIZE,
18451 + .maxauthsize = SHA512_DIGEST_SIZE,
18452 + },
18453 + .caam = {
18454 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18455 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18456 + OP_ALG_AAI_HMAC_PRECOMP,
18457 + }
18458 + },
18459 + {
18460 + .aead = {
18461 + .base = {
18462 + .cra_name = "echainiv(authenc(hmac(sha512),"
18463 + "cbc(des)))",
18464 + .cra_driver_name = "echainiv-authenc-"
18465 + "hmac-sha512-cbc-des-"
18466 + "caam-qi2",
18467 + .cra_blocksize = DES_BLOCK_SIZE,
18468 + },
18469 + .setkey = aead_setkey,
18470 + .setauthsize = aead_setauthsize,
18471 + .encrypt = aead_encrypt,
18472 + .decrypt = aead_decrypt,
18473 + .ivsize = DES_BLOCK_SIZE,
18474 + .maxauthsize = SHA512_DIGEST_SIZE,
18475 + },
18476 + .caam = {
18477 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18478 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18479 + OP_ALG_AAI_HMAC_PRECOMP,
18480 + .geniv = true,
18481 + }
18482 + },
18483 + {
18484 + .aead = {
18485 + .base = {
18486 + .cra_name = "authenc(hmac(md5),"
18487 + "rfc3686(ctr(aes)))",
18488 + .cra_driver_name = "authenc-hmac-md5-"
18489 + "rfc3686-ctr-aes-caam-qi2",
18490 + .cra_blocksize = 1,
18491 + },
18492 + .setkey = aead_setkey,
18493 + .setauthsize = aead_setauthsize,
18494 + .encrypt = aead_encrypt,
18495 + .decrypt = aead_decrypt,
18496 + .ivsize = CTR_RFC3686_IV_SIZE,
18497 + .maxauthsize = MD5_DIGEST_SIZE,
18498 + },
18499 + .caam = {
18500 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18501 + OP_ALG_AAI_CTR_MOD128,
18502 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18503 + OP_ALG_AAI_HMAC_PRECOMP,
18504 + .rfc3686 = true,
18505 + },
18506 + },
18507 + {
18508 + .aead = {
18509 + .base = {
18510 + .cra_name = "seqiv(authenc("
18511 + "hmac(md5),rfc3686(ctr(aes))))",
18512 + .cra_driver_name = "seqiv-authenc-hmac-md5-"
18513 + "rfc3686-ctr-aes-caam-qi2",
18514 + .cra_blocksize = 1,
18515 + },
18516 + .setkey = aead_setkey,
18517 + .setauthsize = aead_setauthsize,
18518 + .encrypt = aead_encrypt,
18519 + .decrypt = aead_decrypt,
18520 + .ivsize = CTR_RFC3686_IV_SIZE,
18521 + .maxauthsize = MD5_DIGEST_SIZE,
18522 + },
18523 + .caam = {
18524 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18525 + OP_ALG_AAI_CTR_MOD128,
18526 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18527 + OP_ALG_AAI_HMAC_PRECOMP,
18528 + .rfc3686 = true,
18529 + .geniv = true,
18530 + },
18531 + },
18532 + {
18533 + .aead = {
18534 + .base = {
18535 + .cra_name = "authenc(hmac(sha1),"
18536 + "rfc3686(ctr(aes)))",
18537 + .cra_driver_name = "authenc-hmac-sha1-"
18538 + "rfc3686-ctr-aes-caam-qi2",
18539 + .cra_blocksize = 1,
18540 + },
18541 + .setkey = aead_setkey,
18542 + .setauthsize = aead_setauthsize,
18543 + .encrypt = aead_encrypt,
18544 + .decrypt = aead_decrypt,
18545 + .ivsize = CTR_RFC3686_IV_SIZE,
18546 + .maxauthsize = SHA1_DIGEST_SIZE,
18547 + },
18548 + .caam = {
18549 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18550 + OP_ALG_AAI_CTR_MOD128,
18551 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18552 + OP_ALG_AAI_HMAC_PRECOMP,
18553 + .rfc3686 = true,
18554 + },
18555 + },
18556 + {
18557 + .aead = {
18558 + .base = {
18559 + .cra_name = "seqiv(authenc("
18560 + "hmac(sha1),rfc3686(ctr(aes))))",
18561 + .cra_driver_name = "seqiv-authenc-hmac-sha1-"
18562 + "rfc3686-ctr-aes-caam-qi2",
18563 + .cra_blocksize = 1,
18564 + },
18565 + .setkey = aead_setkey,
18566 + .setauthsize = aead_setauthsize,
18567 + .encrypt = aead_encrypt,
18568 + .decrypt = aead_decrypt,
18569 + .ivsize = CTR_RFC3686_IV_SIZE,
18570 + .maxauthsize = SHA1_DIGEST_SIZE,
18571 + },
18572 + .caam = {
18573 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18574 + OP_ALG_AAI_CTR_MOD128,
18575 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18576 + OP_ALG_AAI_HMAC_PRECOMP,
18577 + .rfc3686 = true,
18578 + .geniv = true,
18579 + },
18580 + },
18581 + {
18582 + .aead = {
18583 + .base = {
18584 + .cra_name = "authenc(hmac(sha224),"
18585 + "rfc3686(ctr(aes)))",
18586 + .cra_driver_name = "authenc-hmac-sha224-"
18587 + "rfc3686-ctr-aes-caam-qi2",
18588 + .cra_blocksize = 1,
18589 + },
18590 + .setkey = aead_setkey,
18591 + .setauthsize = aead_setauthsize,
18592 + .encrypt = aead_encrypt,
18593 + .decrypt = aead_decrypt,
18594 + .ivsize = CTR_RFC3686_IV_SIZE,
18595 + .maxauthsize = SHA224_DIGEST_SIZE,
18596 + },
18597 + .caam = {
18598 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18599 + OP_ALG_AAI_CTR_MOD128,
18600 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18601 + OP_ALG_AAI_HMAC_PRECOMP,
18602 + .rfc3686 = true,
18603 + },
18604 + },
18605 + {
18606 + .aead = {
18607 + .base = {
18608 + .cra_name = "seqiv(authenc("
18609 + "hmac(sha224),rfc3686(ctr(aes))))",
18610 + .cra_driver_name = "seqiv-authenc-hmac-sha224-"
18611 + "rfc3686-ctr-aes-caam-qi2",
18612 + .cra_blocksize = 1,
18613 + },
18614 + .setkey = aead_setkey,
18615 + .setauthsize = aead_setauthsize,
18616 + .encrypt = aead_encrypt,
18617 + .decrypt = aead_decrypt,
18618 + .ivsize = CTR_RFC3686_IV_SIZE,
18619 + .maxauthsize = SHA224_DIGEST_SIZE,
18620 + },
18621 + .caam = {
18622 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18623 + OP_ALG_AAI_CTR_MOD128,
18624 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18625 + OP_ALG_AAI_HMAC_PRECOMP,
18626 + .rfc3686 = true,
18627 + .geniv = true,
18628 + },
18629 + },
18630 + {
18631 + .aead = {
18632 + .base = {
18633 + .cra_name = "authenc(hmac(sha256),"
18634 + "rfc3686(ctr(aes)))",
18635 + .cra_driver_name = "authenc-hmac-sha256-"
18636 + "rfc3686-ctr-aes-caam-qi2",
18637 + .cra_blocksize = 1,
18638 + },
18639 + .setkey = aead_setkey,
18640 + .setauthsize = aead_setauthsize,
18641 + .encrypt = aead_encrypt,
18642 + .decrypt = aead_decrypt,
18643 + .ivsize = CTR_RFC3686_IV_SIZE,
18644 + .maxauthsize = SHA256_DIGEST_SIZE,
18645 + },
18646 + .caam = {
18647 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18648 + OP_ALG_AAI_CTR_MOD128,
18649 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18650 + OP_ALG_AAI_HMAC_PRECOMP,
18651 + .rfc3686 = true,
18652 + },
18653 + },
18654 + {
18655 + .aead = {
18656 + .base = {
18657 + .cra_name = "seqiv(authenc(hmac(sha256),"
18658 + "rfc3686(ctr(aes))))",
18659 + .cra_driver_name = "seqiv-authenc-hmac-sha256-"
18660 + "rfc3686-ctr-aes-caam-qi2",
18661 + .cra_blocksize = 1,
18662 + },
18663 + .setkey = aead_setkey,
18664 + .setauthsize = aead_setauthsize,
18665 + .encrypt = aead_encrypt,
18666 + .decrypt = aead_decrypt,
18667 + .ivsize = CTR_RFC3686_IV_SIZE,
18668 + .maxauthsize = SHA256_DIGEST_SIZE,
18669 + },
18670 + .caam = {
18671 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18672 + OP_ALG_AAI_CTR_MOD128,
18673 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18674 + OP_ALG_AAI_HMAC_PRECOMP,
18675 + .rfc3686 = true,
18676 + .geniv = true,
18677 + },
18678 + },
18679 + {
18680 + .aead = {
18681 + .base = {
18682 + .cra_name = "authenc(hmac(sha384),"
18683 + "rfc3686(ctr(aes)))",
18684 + .cra_driver_name = "authenc-hmac-sha384-"
18685 + "rfc3686-ctr-aes-caam-qi2",
18686 + .cra_blocksize = 1,
18687 + },
18688 + .setkey = aead_setkey,
18689 + .setauthsize = aead_setauthsize,
18690 + .encrypt = aead_encrypt,
18691 + .decrypt = aead_decrypt,
18692 + .ivsize = CTR_RFC3686_IV_SIZE,
18693 + .maxauthsize = SHA384_DIGEST_SIZE,
18694 + },
18695 + .caam = {
18696 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18697 + OP_ALG_AAI_CTR_MOD128,
18698 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18699 + OP_ALG_AAI_HMAC_PRECOMP,
18700 + .rfc3686 = true,
18701 + },
18702 + },
18703 + {
18704 + .aead = {
18705 + .base = {
18706 + .cra_name = "seqiv(authenc(hmac(sha384),"
18707 + "rfc3686(ctr(aes))))",
18708 + .cra_driver_name = "seqiv-authenc-hmac-sha384-"
18709 + "rfc3686-ctr-aes-caam-qi2",
18710 + .cra_blocksize = 1,
18711 + },
18712 + .setkey = aead_setkey,
18713 + .setauthsize = aead_setauthsize,
18714 + .encrypt = aead_encrypt,
18715 + .decrypt = aead_decrypt,
18716 + .ivsize = CTR_RFC3686_IV_SIZE,
18717 + .maxauthsize = SHA384_DIGEST_SIZE,
18718 + },
18719 + .caam = {
18720 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18721 + OP_ALG_AAI_CTR_MOD128,
18722 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18723 + OP_ALG_AAI_HMAC_PRECOMP,
18724 + .rfc3686 = true,
18725 + .geniv = true,
18726 + },
18727 + },
18728 + {
18729 + .aead = {
18730 + .base = {
18731 + .cra_name = "authenc(hmac(sha512),"
18732 + "rfc3686(ctr(aes)))",
18733 + .cra_driver_name = "authenc-hmac-sha512-"
18734 + "rfc3686-ctr-aes-caam-qi2",
18735 + .cra_blocksize = 1,
18736 + },
18737 + .setkey = aead_setkey,
18738 + .setauthsize = aead_setauthsize,
18739 + .encrypt = aead_encrypt,
18740 + .decrypt = aead_decrypt,
18741 + .ivsize = CTR_RFC3686_IV_SIZE,
18742 + .maxauthsize = SHA512_DIGEST_SIZE,
18743 + },
18744 + .caam = {
18745 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18746 + OP_ALG_AAI_CTR_MOD128,
18747 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18748 + OP_ALG_AAI_HMAC_PRECOMP,
18749 + .rfc3686 = true,
18750 + },
18751 + },
18752 + {
18753 + .aead = {
18754 + .base = {
18755 + .cra_name = "seqiv(authenc(hmac(sha512),"
18756 + "rfc3686(ctr(aes))))",
18757 + .cra_driver_name = "seqiv-authenc-hmac-sha512-"
18758 + "rfc3686-ctr-aes-caam-qi2",
18759 + .cra_blocksize = 1,
18760 + },
18761 + .setkey = aead_setkey,
18762 + .setauthsize = aead_setauthsize,
18763 + .encrypt = aead_encrypt,
18764 + .decrypt = aead_decrypt,
18765 + .ivsize = CTR_RFC3686_IV_SIZE,
18766 + .maxauthsize = SHA512_DIGEST_SIZE,
18767 + },
18768 + .caam = {
18769 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18770 + OP_ALG_AAI_CTR_MOD128,
18771 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18772 + OP_ALG_AAI_HMAC_PRECOMP,
18773 + .rfc3686 = true,
18774 + .geniv = true,
18775 + },
18776 + },
18777 + {
18778 + .aead = {
18779 + .base = {
18780 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
18781 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
18782 + .cra_blocksize = AES_BLOCK_SIZE,
18783 + },
18784 + .setkey = tls_setkey,
18785 + .setauthsize = tls_setauthsize,
18786 + .encrypt = tls_encrypt,
18787 + .decrypt = tls_decrypt,
18788 + .ivsize = AES_BLOCK_SIZE,
18789 + .maxauthsize = SHA1_DIGEST_SIZE,
18790 + },
18791 + .caam = {
18792 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18793 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18794 + OP_ALG_AAI_HMAC_PRECOMP,
18795 + },
18796 + },
18797 +};
18798 +
18799 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
18800 + *template)
18801 +{
18802 + struct caam_crypto_alg *t_alg;
18803 + struct crypto_alg *alg;
18804 +
18805 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
18806 + if (!t_alg)
18807 + return ERR_PTR(-ENOMEM);
18808 +
18809 + alg = &t_alg->crypto_alg;
18810 +
18811 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
18812 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
18813 + template->driver_name);
18814 + alg->cra_module = THIS_MODULE;
18815 + alg->cra_exit = caam_cra_exit;
18816 + alg->cra_priority = CAAM_CRA_PRIORITY;
18817 + alg->cra_blocksize = template->blocksize;
18818 + alg->cra_alignmask = 0;
18819 + alg->cra_ctxsize = sizeof(struct caam_ctx);
18820 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
18821 + template->type;
18822 + switch (template->type) {
18823 + case CRYPTO_ALG_TYPE_GIVCIPHER:
18824 + alg->cra_init = caam_cra_init_ablkcipher;
18825 + alg->cra_type = &crypto_givcipher_type;
18826 + alg->cra_ablkcipher = template->template_ablkcipher;
18827 + break;
18828 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
18829 + alg->cra_init = caam_cra_init_ablkcipher;
18830 + alg->cra_type = &crypto_ablkcipher_type;
18831 + alg->cra_ablkcipher = template->template_ablkcipher;
18832 + break;
18833 + }
18834 +
18835 + t_alg->caam.class1_alg_type = template->class1_alg_type;
18836 + t_alg->caam.class2_alg_type = template->class2_alg_type;
18837 +
18838 + return t_alg;
18839 +}
18840 +
18841 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
18842 +{
18843 + struct aead_alg *alg = &t_alg->aead;
18844 +
18845 + alg->base.cra_module = THIS_MODULE;
18846 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
18847 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
18848 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
18849 +
18850 + alg->init = caam_cra_init_aead;
18851 + alg->exit = caam_cra_exit_aead;
18852 +}
18853 +
18854 +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
18855 +{
18856 + struct dpaa2_caam_priv_per_cpu *ppriv;
18857 +
18858 + ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
18859 + napi_schedule_irqoff(&ppriv->napi);
18860 +}
18861 +
18862 +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
18863 +{
18864 + struct device *dev = priv->dev;
18865 + struct dpaa2_io_notification_ctx *nctx;
18866 + struct dpaa2_caam_priv_per_cpu *ppriv;
18867 + int err, i = 0, cpu;
18868 +
18869 + for_each_online_cpu(cpu) {
18870 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
18871 + ppriv->priv = priv;
18872 + nctx = &ppriv->nctx;
18873 + nctx->is_cdan = 0;
18874 + nctx->id = ppriv->rsp_fqid;
18875 + nctx->desired_cpu = cpu;
18876 + nctx->cb = dpaa2_caam_fqdan_cb;
18877 +
18878 + /* Register notification callbacks */
18879 + err = dpaa2_io_service_register(NULL, nctx);
18880 + if (unlikely(err)) {
18881 + dev_err(dev, "notification register failed\n");
18882 + nctx->cb = NULL;
18883 + goto err;
18884 + }
18885 +
18886 + ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
18887 + dev);
18888 + if (unlikely(!ppriv->store)) {
18889 + dev_err(dev, "dpaa2_io_store_create() failed\n");
18890 + goto err;
18891 + }
18892 +
18893 + if (++i == priv->num_pairs)
18894 + break;
18895 + }
18896 +
18897 + return 0;
18898 +
18899 +err:
18900 + for_each_online_cpu(cpu) {
18901 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
18902 + if (!ppriv->nctx.cb)
18903 + break;
18904 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
18905 + }
18906 +
18907 + for_each_online_cpu(cpu) {
18908 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
18909 + if (!ppriv->store)
18910 + break;
18911 + dpaa2_io_store_destroy(ppriv->store);
18912 + }
18913 +
18914 + return err;
18915 +}
18916 +
18917 +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
18918 +{
18919 + struct dpaa2_caam_priv_per_cpu *ppriv;
18920 + int i = 0, cpu;
18921 +
18922 + for_each_online_cpu(cpu) {
18923 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
18924 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
18925 + dpaa2_io_store_destroy(ppriv->store);
18926 +
18927 + if (++i == priv->num_pairs)
18928 + return;
18929 + }
18930 +}
18931 +
18932 +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
18933 +{
18934 + struct dpseci_rx_queue_cfg rx_queue_cfg;
18935 + struct device *dev = priv->dev;
18936 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
18937 + struct dpaa2_caam_priv_per_cpu *ppriv;
18938 + int err = 0, i = 0, cpu;
18939 +
18940 + /* Configure Rx queues */
18941 + for_each_online_cpu(cpu) {
18942 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
18943 +
18944 + rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
18945 + DPSECI_QUEUE_OPT_USER_CTX;
18946 + rx_queue_cfg.order_preservation_en = 0;
18947 + rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
18948 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
18949 + /*
18950 + * Rx priority (WQ) doesn't really matter, since we use
18951 + * pull mode, i.e. volatile dequeues from specific FQs
18952 + */
18953 + rx_queue_cfg.dest_cfg.priority = 0;
18954 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
18955 +
18956 + err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
18957 + &rx_queue_cfg);
18958 + if (err) {
18959 + dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
18960 + err);
18961 + return err;
18962 + }
18963 +
18964 + if (++i == priv->num_pairs)
18965 + break;
18966 + }
18967 +
18968 + return err;
18969 +}
18970 +
18971 +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
18972 +{
18973 + struct device *dev = priv->dev;
18974 +
18975 + if (!priv->cscn_mem)
18976 + return;
18977 +
18978 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
18979 + kfree(priv->cscn_mem);
18980 +}
18981 +
18982 +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
18983 +{
18984 + struct device *dev = priv->dev;
18985 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
18986 +
18987 + dpaa2_dpseci_congestion_free(priv);
18988 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
18989 +}
18990 +
18991 +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
18992 + const struct dpaa2_fd *fd)
18993 +{
18994 + struct caam_request *req;
18995 + u32 fd_err;
18996 +
18997 + if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
18998 + dev_err(priv->dev, "Only Frame List FD format is supported!\n");
18999 + return;
19000 + }
19001 +
19002 + fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
19003 + if (unlikely(fd_err))
19004 + dev_err(priv->dev, "FD error: %08x\n", fd_err);
19005 +
19006 + /*
19007 + * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
19008 + * in FD[ERR] or FD[FRC].
19009 + */
19010 + req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
19011 + dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
19012 + DMA_BIDIRECTIONAL);
19013 + req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
19014 +}
19015 +
19016 +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
19017 +{
19018 + int err;
19019 +
19020 + /* Retry while portal is busy */
19021 + do {
19022 + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
19023 + ppriv->store);
19024 + } while (err == -EBUSY);
19025 +
19026 + if (unlikely(err))
19027 + dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
19028 +
19029 + return err;
19030 +}
19031 +
19032 +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
19033 +{
19034 + struct dpaa2_dq *dq;
19035 + int cleaned = 0, is_last;
19036 +
19037 + do {
19038 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
19039 + if (unlikely(!dq)) {
19040 + if (unlikely(!is_last)) {
19041 + dev_dbg(ppriv->priv->dev,
19042 + "FQ %d returned no valid frames\n",
19043 + ppriv->rsp_fqid);
19044 + /*
19045 + * MUST retry until we get some sort of
19046 + * valid response token (be it "empty dequeue"
19047 + * or a valid frame).
19048 + */
19049 + continue;
19050 + }
19051 + break;
19052 + }
19053 +
19054 + /* Process FD */
19055 + dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
19056 + cleaned++;
19057 + } while (!is_last);
19058 +
19059 + return cleaned;
19060 +}
19061 +
19062 +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
19063 +{
19064 + struct dpaa2_caam_priv_per_cpu *ppriv;
19065 + struct dpaa2_caam_priv *priv;
19066 + int err, cleaned = 0, store_cleaned;
19067 +
19068 + ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
19069 + priv = ppriv->priv;
19070 +
19071 + if (unlikely(dpaa2_caam_pull_fq(ppriv)))
19072 + return 0;
19073 +
19074 + do {
19075 + store_cleaned = dpaa2_caam_store_consume(ppriv);
19076 + cleaned += store_cleaned;
19077 +
19078 + if (store_cleaned == 0 ||
19079 + cleaned > budget - DPAA2_CAAM_STORE_SIZE)
19080 + break;
19081 +
19082 + /* Try to dequeue some more */
19083 + err = dpaa2_caam_pull_fq(ppriv);
19084 + if (unlikely(err))
19085 + break;
19086 + } while (1);
19087 +
19088 + if (cleaned < budget) {
19089 + napi_complete_done(napi, cleaned);
19090 + err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
19091 + if (unlikely(err))
19092 + dev_err(priv->dev, "Notification rearm failed: %d\n",
19093 + err);
19094 + }
19095 +
19096 + return cleaned;
19097 +}
19098 +
19099 +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
19100 + u16 token)
19101 +{
19102 + struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
19103 + struct device *dev = priv->dev;
19104 + int err;
19105 +
19106 + /*
19107 + * Congestion group feature supported starting with DPSECI API v5.1
19108 + * and only when object has been created with this capability.
19109 + */
19110 + if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
19111 + !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
19112 + return 0;
19113 +
19114 + priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
19115 + GFP_KERNEL | GFP_DMA);
19116 + if (!priv->cscn_mem)
19117 + return -ENOMEM;
19118 +
19119 + priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
19120 + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
19121 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
19122 + if (dma_mapping_error(dev, priv->cscn_dma)) {
19123 + dev_err(dev, "Error mapping CSCN memory area\n");
19124 + err = -ENOMEM;
19125 + goto err_dma_map;
19126 + }
19127 +
19128 + cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
19129 + cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
19130 + cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
19131 + cong_notif_cfg.message_ctx = (u64)priv;
19132 + cong_notif_cfg.message_iova = priv->cscn_dma;
19133 + cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
19134 + DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
19135 + DPSECI_CGN_MODE_COHERENT_WRITE;
19136 +
19137 + err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
19138 + &cong_notif_cfg);
19139 + if (err) {
19140 + dev_err(dev, "dpseci_set_congestion_notification failed\n");
19141 + goto err_set_cong;
19142 + }
19143 +
19144 + return 0;
19145 +
19146 +err_set_cong:
19147 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
19148 +err_dma_map:
19149 + kfree(priv->cscn_mem);
19150 +
19151 + return err;
19152 +}
19153 +
19154 +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
19155 +{
19156 + struct device *dev = &ls_dev->dev;
19157 + struct dpaa2_caam_priv *priv;
19158 + struct dpaa2_caam_priv_per_cpu *ppriv;
19159 + int err, cpu;
19160 + u8 i;
19161 +
19162 + priv = dev_get_drvdata(dev);
19163 +
19164 + priv->dev = dev;
19165 + priv->dpsec_id = ls_dev->obj_desc.id;
19166 +
19167 + /* Get a handle for the DPSECI this interface is associate with */
19168 + err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
19169 + if (err) {
19170 + dev_err(dev, "dpsec_open() failed: %d\n", err);
19171 + goto err_open;
19172 + }
19173 +
19174 + dev_info(dev, "Opened dpseci object successfully\n");
19175 +
19176 + err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
19177 + &priv->minor_ver);
19178 + if (err) {
19179 + dev_err(dev, "dpseci_get_api_version() failed\n");
19180 + goto err_get_vers;
19181 + }
19182 +
19183 + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
19184 + &priv->dpseci_attr);
19185 + if (err) {
19186 + dev_err(dev, "dpseci_get_attributes() failed\n");
19187 + goto err_get_vers;
19188 + }
19189 +
19190 + err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
19191 + &priv->sec_attr);
19192 + if (err) {
19193 + dev_err(dev, "dpseci_get_sec_attr() failed\n");
19194 + goto err_get_vers;
19195 + }
19196 +
19197 + err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
19198 + if (err) {
19199 + dev_err(dev, "setup_congestion() failed\n");
19200 + goto err_get_vers;
19201 + }
19202 +
19203 + priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
19204 + priv->dpseci_attr.num_tx_queues);
19205 + if (priv->num_pairs > num_online_cpus()) {
19206 + dev_warn(dev, "%d queues won't be used\n",
19207 + priv->num_pairs - num_online_cpus());
19208 + priv->num_pairs = num_online_cpus();
19209 + }
19210 +
19211 + for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
19212 + err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
19213 + &priv->rx_queue_attr[i]);
19214 + if (err) {
19215 + dev_err(dev, "dpseci_get_rx_queue() failed\n");
19216 + goto err_get_rx_queue;
19217 + }
19218 + }
19219 +
19220 + for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
19221 + err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
19222 + &priv->tx_queue_attr[i]);
19223 + if (err) {
19224 + dev_err(dev, "dpseci_get_tx_queue() failed\n");
19225 + goto err_get_rx_queue;
19226 + }
19227 + }
19228 +
19229 + i = 0;
19230 + for_each_online_cpu(cpu) {
19231 + dev_info(dev, "prio %d: rx queue %d, tx queue %d\n", i,
19232 + priv->rx_queue_attr[i].fqid,
19233 + priv->tx_queue_attr[i].fqid);
19234 +
19235 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
19236 + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
19237 + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
19238 + ppriv->prio = i;
19239 +
19240 + ppriv->net_dev.dev = *dev;
19241 + INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
19242 + netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
19243 + DPAA2_CAAM_NAPI_WEIGHT);
19244 + if (++i == priv->num_pairs)
19245 + break;
19246 + }
19247 +
19248 + return 0;
19249 +
19250 +err_get_rx_queue:
19251 + dpaa2_dpseci_congestion_free(priv);
19252 +err_get_vers:
19253 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
19254 +err_open:
19255 + return err;
19256 +}
19257 +
19258 +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
19259 +{
19260 + struct device *dev = priv->dev;
19261 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
19262 + struct dpaa2_caam_priv_per_cpu *ppriv;
19263 + int err, i;
19264 +
19265 + for (i = 0; i < priv->num_pairs; i++) {
19266 + ppriv = per_cpu_ptr(priv->ppriv, i);
19267 + napi_enable(&ppriv->napi);
19268 + }
19269 +
19270 + err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
19271 + if (err) {
19272 + dev_err(dev, "dpseci_enable() failed\n");
19273 + return err;
19274 + }
19275 +
19276 + dev_info(dev, "DPSECI version %d.%d\n",
19277 + priv->major_ver,
19278 + priv->minor_ver);
19279 +
19280 + return 0;
19281 +}
19282 +
19283 +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
19284 +{
19285 + struct device *dev = priv->dev;
19286 + struct dpaa2_caam_priv_per_cpu *ppriv;
19287 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
19288 + int i, err = 0, enabled;
19289 +
19290 + err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
19291 + if (err) {
19292 + dev_err(dev, "dpseci_disable() failed\n");
19293 + return err;
19294 + }
19295 +
19296 + err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
19297 + if (err) {
19298 + dev_err(dev, "dpseci_is_enabled() failed\n");
19299 + return err;
19300 + }
19301 +
19302 + dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
19303 +
19304 + for (i = 0; i < priv->num_pairs; i++) {
19305 + ppriv = per_cpu_ptr(priv->ppriv, i);
19306 + napi_disable(&ppriv->napi);
19307 + netif_napi_del(&ppriv->napi);
19308 + }
19309 +
19310 + return 0;
19311 +}
19312 +
19313 +static struct list_head alg_list;
19314 +
19315 +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
19316 +{
19317 + struct device *dev;
19318 + struct dpaa2_caam_priv *priv;
19319 + int i, err = 0;
19320 + bool registered = false;
19321 +
19322 + /*
19323 + * There is no way to get CAAM endianness - there is no direct register
19324 + * space access and MC f/w does not provide this attribute.
19325 + * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
19326 + * property.
19327 + */
19328 + caam_little_end = true;
19329 +
19330 + caam_imx = false;
19331 +
19332 + dev = &dpseci_dev->dev;
19333 +
19334 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
19335 + if (!priv)
19336 + return -ENOMEM;
19337 +
19338 + dev_set_drvdata(dev, priv);
19339 +
19340 + priv->domain = iommu_get_domain_for_dev(dev);
19341 +
19342 + qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
19343 + 0, SLAB_CACHE_DMA, NULL);
19344 + if (!qi_cache) {
19345 + dev_err(dev, "Can't allocate SEC cache\n");
19346 + err = -ENOMEM;
19347 + goto err_qicache;
19348 + }
19349 +
19350 + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
19351 + if (err) {
19352 + dev_err(dev, "dma_set_mask_and_coherent() failed\n");
19353 + goto err_dma_mask;
19354 + }
19355 +
19356 + /* Obtain a MC portal */
19357 + err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
19358 + if (err) {
19359 + dev_err(dev, "MC portal allocation failed\n");
19360 + goto err_dma_mask;
19361 + }
19362 +
19363 + priv->ppriv = alloc_percpu(*priv->ppriv);
19364 + if (!priv->ppriv) {
19365 + dev_err(dev, "alloc_percpu() failed\n");
19366 + goto err_alloc_ppriv;
19367 + }
19368 +
19369 + /* DPSECI initialization */
19370 + err = dpaa2_dpseci_setup(dpseci_dev);
19371 + if (err < 0) {
19372 + dev_err(dev, "dpaa2_dpseci_setup() failed\n");
19373 + goto err_dpseci_setup;
19374 + }
19375 +
19376 + /* DPIO */
19377 + err = dpaa2_dpseci_dpio_setup(priv);
19378 + if (err) {
19379 + dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
19380 + goto err_dpio_setup;
19381 + }
19382 +
19383 + /* DPSECI binding to DPIO */
19384 + err = dpaa2_dpseci_bind(priv);
19385 + if (err) {
19386 + dev_err(dev, "dpaa2_dpseci_bind() failed\n");
19387 + goto err_bind;
19388 + }
19389 +
19390 + /* DPSECI enable */
19391 + err = dpaa2_dpseci_enable(priv);
19392 + if (err) {
19393 + dev_err(dev, "dpaa2_dpseci_enable() failed");
19394 + goto err_bind;
19395 + }
19396 +
19397 + /* register crypto algorithms the device supports */
19398 + INIT_LIST_HEAD(&alg_list);
19399 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
19400 + struct caam_crypto_alg *t_alg;
19401 + struct caam_alg_template *alg = driver_algs + i;
19402 + u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
19403 +
19404 + /* Skip DES algorithms if not supported by device */
19405 + if (!priv->sec_attr.des_acc_num &&
19406 + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
19407 + (alg_sel == OP_ALG_ALGSEL_DES)))
19408 + continue;
19409 +
19410 + /* Skip AES algorithms if not supported by device */
19411 + if (!priv->sec_attr.aes_acc_num &&
19412 + (alg_sel == OP_ALG_ALGSEL_AES))
19413 + continue;
19414 +
19415 + t_alg = caam_alg_alloc(alg);
19416 + if (IS_ERR(t_alg)) {
19417 + err = PTR_ERR(t_alg);
19418 + dev_warn(dev, "%s alg allocation failed: %d\n",
19419 + alg->driver_name, err);
19420 + continue;
19421 + }
19422 + t_alg->caam.dev = dev;
19423 +
19424 + err = crypto_register_alg(&t_alg->crypto_alg);
19425 + if (err) {
19426 + dev_warn(dev, "%s alg registration failed: %d\n",
19427 + t_alg->crypto_alg.cra_driver_name, err);
19428 + kfree(t_alg);
19429 + continue;
19430 + }
19431 +
19432 + list_add_tail(&t_alg->entry, &alg_list);
19433 + registered = true;
19434 + }
19435 +
19436 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
19437 + struct caam_aead_alg *t_alg = driver_aeads + i;
19438 + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
19439 + OP_ALG_ALGSEL_MASK;
19440 + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
19441 + OP_ALG_ALGSEL_MASK;
19442 +
19443 + /* Skip DES algorithms if not supported by device */
19444 + if (!priv->sec_attr.des_acc_num &&
19445 + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
19446 + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
19447 + continue;
19448 +
19449 + /* Skip AES algorithms if not supported by device */
19450 + if (!priv->sec_attr.aes_acc_num &&
19451 + (c1_alg_sel == OP_ALG_ALGSEL_AES))
19452 + continue;
19453 +
19454 + /*
19455 + * Skip algorithms requiring message digests
19456 + * if MD not supported by device.
19457 + */
19458 + if (!priv->sec_attr.md_acc_num && c2_alg_sel)
19459 + continue;
19460 +
19461 + t_alg->caam.dev = dev;
19462 + caam_aead_alg_init(t_alg);
19463 +
19464 + err = crypto_register_aead(&t_alg->aead);
19465 + if (err) {
19466 + dev_warn(dev, "%s alg registration failed: %d\n",
19467 + t_alg->aead.base.cra_driver_name, err);
19468 + continue;
19469 + }
19470 +
19471 + t_alg->registered = true;
19472 + registered = true;
19473 + }
19474 + if (registered)
19475 + dev_info(dev, "algorithms registered in /proc/crypto\n");
19476 +
19477 + return err;
19478 +
19479 +err_bind:
19480 + dpaa2_dpseci_dpio_free(priv);
19481 +err_dpio_setup:
19482 + dpaa2_dpseci_free(priv);
19483 +err_dpseci_setup:
19484 + free_percpu(priv->ppriv);
19485 +err_alloc_ppriv:
19486 + fsl_mc_portal_free(priv->mc_io);
19487 +err_dma_mask:
19488 + kmem_cache_destroy(qi_cache);
19489 +err_qicache:
19490 + dev_set_drvdata(dev, NULL);
19491 +
19492 + return err;
19493 +}
19494 +
19495 +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
19496 +{
19497 + struct device *dev;
19498 + struct dpaa2_caam_priv *priv;
19499 + int i;
19500 +
19501 + dev = &ls_dev->dev;
19502 + priv = dev_get_drvdata(dev);
19503 +
19504 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
19505 + struct caam_aead_alg *t_alg = driver_aeads + i;
19506 +
19507 + if (t_alg->registered)
19508 + crypto_unregister_aead(&t_alg->aead);
19509 + }
19510 +
19511 + if (alg_list.next) {
19512 + struct caam_crypto_alg *t_alg, *n;
19513 +
19514 + list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
19515 + crypto_unregister_alg(&t_alg->crypto_alg);
19516 + list_del(&t_alg->entry);
19517 + kfree(t_alg);
19518 + }
19519 + }
19520 +
19521 + dpaa2_dpseci_disable(priv);
19522 + dpaa2_dpseci_dpio_free(priv);
19523 + dpaa2_dpseci_free(priv);
19524 + free_percpu(priv->ppriv);
19525 + fsl_mc_portal_free(priv->mc_io);
19526 + dev_set_drvdata(dev, NULL);
19527 + kmem_cache_destroy(qi_cache);
19528 +
19529 + return 0;
19530 +}
19531 +
19532 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
19533 +{
19534 + struct dpaa2_fd fd;
19535 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
19536 + int err = 0, i, id;
19537 +
19538 + if (IS_ERR(req))
19539 + return PTR_ERR(req);
19540 +
19541 + if (priv->cscn_mem) {
19542 + dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
19543 + DPAA2_CSCN_SIZE,
19544 + DMA_FROM_DEVICE);
19545 + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
19546 + dev_dbg_ratelimited(dev, "Dropping request\n");
19547 + return -EBUSY;
19548 + }
19549 + }
19550 +
19551 + dpaa2_fl_set_flc(&req->fd_flt[1], req->flc->flc_dma);
19552 +
19553 + req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
19554 + DMA_BIDIRECTIONAL);
19555 + if (dma_mapping_error(dev, req->fd_flt_dma)) {
19556 + dev_err(dev, "DMA mapping error for QI enqueue request\n");
19557 + goto err_out;
19558 + }
19559 +
19560 + memset(&fd, 0, sizeof(fd));
19561 + dpaa2_fd_set_format(&fd, dpaa2_fd_list);
19562 + dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
19563 + dpaa2_fd_set_len(&fd, req->fd_flt[1].len);
19564 + dpaa2_fd_set_flc(&fd, req->flc->flc_dma);
19565 +
19566 + /*
19567 + * There is no guarantee that preemption is disabled here,
19568 + * thus take action.
19569 + */
19570 + preempt_disable();
19571 + id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
19572 + for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
19573 + err = dpaa2_io_service_enqueue_fq(NULL,
19574 + priv->tx_queue_attr[id].fqid,
19575 + &fd);
19576 + if (err != -EBUSY)
19577 + break;
19578 + }
19579 + preempt_enable();
19580 +
19581 + if (unlikely(err < 0)) {
19582 + dev_err(dev, "Error enqueuing frame: %d\n", err);
19583 + goto err_out;
19584 + }
19585 +
19586 + return -EINPROGRESS;
19587 +
19588 +err_out:
19589 + dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
19590 + DMA_BIDIRECTIONAL);
19591 + return -EIO;
19592 +}
19593 +EXPORT_SYMBOL(dpaa2_caam_enqueue);
19594 +
19595 +const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
19596 + {
19597 + .vendor = FSL_MC_VENDOR_FREESCALE,
19598 + .obj_type = "dpseci",
19599 + },
19600 + { .vendor = 0x0 }
19601 +};
19602 +
19603 +static struct fsl_mc_driver dpaa2_caam_driver = {
19604 + .driver = {
19605 + .name = KBUILD_MODNAME,
19606 + .owner = THIS_MODULE,
19607 + },
19608 + .probe = dpaa2_caam_probe,
19609 + .remove = dpaa2_caam_remove,
19610 + .match_id_table = dpaa2_caam_match_id_table
19611 +};
19612 +
19613 +MODULE_LICENSE("Dual BSD/GPL");
19614 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
19615 +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
19616 +
19617 +module_fsl_mc_driver(dpaa2_caam_driver);
19618 diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
19619 new file mode 100644
19620 index 00000000..2ba179db
19621 --- /dev/null
19622 +++ b/drivers/crypto/caam/caamalg_qi2.h
19623 @@ -0,0 +1,265 @@
19624 +/*
19625 + * Copyright 2015-2016 Freescale Semiconductor Inc.
19626 + * Copyright 2017 NXP
19627 + *
19628 + * Redistribution and use in source and binary forms, with or without
19629 + * modification, are permitted provided that the following conditions are met:
19630 + * * Redistributions of source code must retain the above copyright
19631 + * notice, this list of conditions and the following disclaimer.
19632 + * * Redistributions in binary form must reproduce the above copyright
19633 + * notice, this list of conditions and the following disclaimer in the
19634 + * documentation and/or other materials provided with the distribution.
19635 + * * Neither the names of the above-listed copyright holders nor the
19636 + * names of any contributors may be used to endorse or promote products
19637 + * derived from this software without specific prior written permission.
19638 + *
19639 + *
19640 + * ALTERNATIVELY, this software may be distributed under the terms of the
19641 + * GNU General Public License ("GPL") as published by the Free Software
19642 + * Foundation, either version 2 of that License or (at your option) any
19643 + * later version.
19644 + *
19645 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19646 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19647 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19648 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
19649 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19650 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19651 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19652 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
19653 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
19654 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
19655 + * POSSIBILITY OF SUCH DAMAGE.
19656 + */
19657 +
19658 +#ifndef _CAAMALG_QI2_H_
19659 +#define _CAAMALG_QI2_H_
19660 +
19661 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
19662 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
19663 +#include <linux/threads.h>
19664 +#include "dpseci.h"
19665 +#include "desc_constr.h"
19666 +
19667 +#define DPAA2_CAAM_STORE_SIZE 16
19668 +/* NAPI weight *must* be a multiple of the store size. */
19669 +#define DPAA2_CAAM_NAPI_WEIGHT 64
19670 +
19671 +/* The congestion entrance threshold was chosen so that on LS2088
19672 + * we support the maximum throughput for the available memory
19673 + */
19674 +#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
19675 +#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
19676 +
19677 +/**
19678 + * dpaa2_caam_priv - driver private data
19679 + * @dpseci_id: DPSECI object unique ID
19680 + * @major_ver: DPSECI major version
19681 + * @minor_ver: DPSECI minor version
19682 + * @dpseci_attr: DPSECI attributes
19683 + * @sec_attr: SEC engine attributes
19684 + * @rx_queue_attr: array of Rx queue attributes
19685 + * @tx_queue_attr: array of Tx queue attributes
19686 + * @cscn_mem: pointer to memory region containing the
19687 + * dpaa2_cscn struct; it's size is larger than
19688 + * sizeof(struct dpaa2_cscn) to accommodate alignment
19689 + * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
19690 + * as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
19691 + * @cscn_dma: dma address used by the QMAN to write CSCN messages
19692 + * @dev: device associated with the DPSECI object
19693 + * @mc_io: pointer to MC portal's I/O object
19694 + * @domain: IOMMU domain
19695 + * @ppriv: per CPU pointers to privata data
19696 + */
19697 +struct dpaa2_caam_priv {
19698 + int dpsec_id;
19699 +
19700 + u16 major_ver;
19701 + u16 minor_ver;
19702 +
19703 + struct dpseci_attr dpseci_attr;
19704 + struct dpseci_sec_attr sec_attr;
19705 + struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_PRIO_NUM];
19706 + struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_PRIO_NUM];
19707 + int num_pairs;
19708 +
19709 + /* congestion */
19710 + void *cscn_mem;
19711 + void *cscn_mem_aligned;
19712 + dma_addr_t cscn_dma;
19713 +
19714 + struct device *dev;
19715 + struct fsl_mc_io *mc_io;
19716 + struct iommu_domain *domain;
19717 +
19718 + struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
19719 +};
19720 +
19721 +/**
19722 + * dpaa2_caam_priv_per_cpu - per CPU private data
19723 + * @napi: napi structure
19724 + * @net_dev: netdev used by napi
19725 + * @req_fqid: (virtual) request (Tx / enqueue) FQID
19726 + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
19727 + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
19728 + * @nctx: notification context of response FQ
19729 + * @store: where dequeued frames are stored
19730 + * @priv: backpointer to dpaa2_caam_priv
19731 + */
19732 +struct dpaa2_caam_priv_per_cpu {
19733 + struct napi_struct napi;
19734 + struct net_device net_dev;
19735 + int req_fqid;
19736 + int rsp_fqid;
19737 + int prio;
19738 + struct dpaa2_io_notification_ctx nctx;
19739 + struct dpaa2_io_store *store;
19740 + struct dpaa2_caam_priv *priv;
19741 +};
19742 +
19743 +/*
19744 + * The CAAM QI hardware constructs a job descriptor which points
19745 + * to shared descriptor (as pointed by context_a of FQ to CAAM).
19746 + * When the job descriptor is executed by deco, the whole job
19747 + * descriptor together with shared descriptor gets loaded in
19748 + * deco buffer which is 64 words long (each 32-bit).
19749 + *
19750 + * The job descriptor constructed by QI hardware has layout:
19751 + *
19752 + * HEADER (1 word)
19753 + * Shdesc ptr (1 or 2 words)
19754 + * SEQ_OUT_PTR (1 word)
19755 + * Out ptr (1 or 2 words)
19756 + * Out length (1 word)
19757 + * SEQ_IN_PTR (1 word)
19758 + * In ptr (1 or 2 words)
19759 + * In length (1 word)
19760 + *
19761 + * The shdesc ptr is used to fetch shared descriptor contents
19762 + * into deco buffer.
19763 + *
19764 + * Apart from shdesc contents, the total number of words that
19765 + * get loaded in deco buffer are '8' or '11'. The remaining words
19766 + * in deco buffer can be used for storing shared descriptor.
19767 + */
19768 +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
19769 +
19770 +/* Length of a single buffer in the QI driver memory cache */
19771 +#define CAAM_QI_MEMCACHE_SIZE 512
19772 +
19773 +/*
19774 + * aead_edesc - s/w-extended aead descriptor
19775 + * @src_nents: number of segments in input scatterlist
19776 + * @dst_nents: number of segments in output scatterlist
19777 + * @iv_dma: dma address of iv for checking continuity and link table
19778 + * @qm_sg_bytes: length of dma mapped h/w link table
19779 + * @qm_sg_dma: bus physical mapped address of h/w link table
19780 + * @assoclen_dma: bus physical mapped address of req->assoclen
19781 + * @sgt: the h/w link table
19782 + */
19783 +struct aead_edesc {
19784 + int src_nents;
19785 + int dst_nents;
19786 + dma_addr_t iv_dma;
19787 + int qm_sg_bytes;
19788 + dma_addr_t qm_sg_dma;
19789 + dma_addr_t assoclen_dma;
19790 +#define CAAM_QI_MAX_AEAD_SG \
19791 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
19792 + sizeof(struct dpaa2_sg_entry))
19793 + struct dpaa2_sg_entry sgt[0];
19794 +};
19795 +
19796 +/*
19797 + * tls_edesc - s/w-extended tls descriptor
19798 + * @src_nents: number of segments in input scatterlist
19799 + * @dst_nents: number of segments in output scatterlist
19800 + * @iv_dma: dma address of iv for checking continuity and link table
19801 + * @qm_sg_bytes: length of dma mapped h/w link table
19802 + * @qm_sg_dma: bus physical mapped address of h/w link table
19803 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
19804 + * @dst: pointer to output scatterlist, usefull for unmapping
19805 + * @sgt: the h/w link table
19806 + */
19807 +struct tls_edesc {
19808 + int src_nents;
19809 + int dst_nents;
19810 + dma_addr_t iv_dma;
19811 + int qm_sg_bytes;
19812 + dma_addr_t qm_sg_dma;
19813 + struct scatterlist tmp[2];
19814 + struct scatterlist *dst;
19815 + struct dpaa2_sg_entry sgt[0];
19816 +};
19817 +
19818 +/*
19819 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
19820 + * @src_nents: number of segments in input scatterlist
19821 + * @dst_nents: number of segments in output scatterlist
19822 + * @iv_dma: dma address of iv for checking continuity and link table
19823 + * @qm_sg_bytes: length of dma mapped qm_sg space
19824 + * @qm_sg_dma: I/O virtual address of h/w link table
19825 + * @sgt: the h/w link table
19826 + */
19827 +struct ablkcipher_edesc {
19828 + int src_nents;
19829 + int dst_nents;
19830 + dma_addr_t iv_dma;
19831 + int qm_sg_bytes;
19832 + dma_addr_t qm_sg_dma;
19833 +#define CAAM_QI_MAX_ABLKCIPHER_SG \
19834 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
19835 + sizeof(struct dpaa2_sg_entry))
19836 + struct dpaa2_sg_entry sgt[0];
19837 +};
19838 +
19839 +/**
19840 + * caam_flc - Flow Context (FLC)
19841 + * @flc: Flow Context options
19842 + * @sh_desc: Shared Descriptor
19843 + * @flc_dma: DMA address of the Flow Context
19844 + */
19845 +struct caam_flc {
19846 + u32 flc[16];
19847 + u32 sh_desc[MAX_SDLEN];
19848 + dma_addr_t flc_dma;
19849 +} ____cacheline_aligned;
19850 +
19851 +enum optype {
19852 + ENCRYPT = 0,
19853 + DECRYPT,
19854 + GIVENCRYPT,
19855 + NUM_OP
19856 +};
19857 +
19858 +/**
19859 + * caam_request - the request structure the driver application should fill while
19860 + * submitting a job to driver.
19861 + * @fd_flt: Frame list table defining input and output
19862 + * fd_flt[0] - FLE pointing to output buffer
19863 + * fd_flt[1] - FLE pointing to input buffer
19864 + * @fd_flt_dma: DMA address for the frame list table
19865 + * @flc: Flow Context
19866 + * @op_type: operation type
19867 + * @cbk: Callback function to invoke when job is completed
19868 + * @ctx: arbit context attached with request by the application
19869 + * @edesc: extended descriptor; points to one of {ablkcipher,aead}_edesc
19870 + */
19871 +struct caam_request {
19872 + struct dpaa2_fl_entry fd_flt[2];
19873 + dma_addr_t fd_flt_dma;
19874 + struct caam_flc *flc;
19875 + enum optype op_type;
19876 + void (*cbk)(void *ctx, u32 err);
19877 + void *ctx;
19878 + void *edesc;
19879 +};
19880 +
19881 +/**
19882 + * dpaa2_caam_enqueue() - enqueue a crypto request
19883 + * @dev: device associated with the DPSECI object
19884 + * @req: pointer to caam_request
19885 + */
19886 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
19887 +
19888 +#endif /* _CAAMALG_QI2_H_ */
19889 diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
19890 index 631337c2..698580b6 100644
19891 --- a/drivers/crypto/caam/caamhash.c
19892 +++ b/drivers/crypto/caam/caamhash.c
19893 @@ -72,7 +72,7 @@
19894 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
19895
19896 /* length of descriptors text */
19897 -#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
19898 +#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
19899 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
19900 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
19901 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
19902 @@ -103,20 +103,14 @@ struct caam_hash_ctx {
19903 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19904 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19905 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19906 - u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19907 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
19908 dma_addr_t sh_desc_update_first_dma;
19909 dma_addr_t sh_desc_fin_dma;
19910 dma_addr_t sh_desc_digest_dma;
19911 - dma_addr_t sh_desc_finup_dma;
19912 struct device *jrdev;
19913 - u32 alg_type;
19914 - u32 alg_op;
19915 u8 key[CAAM_MAX_HASH_KEY_SIZE];
19916 - dma_addr_t key_dma;
19917 int ctx_len;
19918 - unsigned int split_key_len;
19919 - unsigned int split_key_pad_len;
19920 + struct alginfo adata;
19921 };
19922
19923 /* ahash state */
19924 @@ -143,6 +137,31 @@ struct caam_export_state {
19925 int (*finup)(struct ahash_request *req);
19926 };
19927
19928 +static inline void switch_buf(struct caam_hash_state *state)
19929 +{
19930 + state->current_buf ^= 1;
19931 +}
19932 +
19933 +static inline u8 *current_buf(struct caam_hash_state *state)
19934 +{
19935 + return state->current_buf ? state->buf_1 : state->buf_0;
19936 +}
19937 +
19938 +static inline u8 *alt_buf(struct caam_hash_state *state)
19939 +{
19940 + return state->current_buf ? state->buf_0 : state->buf_1;
19941 +}
19942 +
19943 +static inline int *current_buflen(struct caam_hash_state *state)
19944 +{
19945 + return state->current_buf ? &state->buflen_1 : &state->buflen_0;
19946 +}
19947 +
19948 +static inline int *alt_buflen(struct caam_hash_state *state)
19949 +{
19950 + return state->current_buf ? &state->buflen_0 : &state->buflen_1;
19951 +}
19952 +
19953 /* Common job descriptor seq in/out ptr routines */
19954
19955 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
19956 @@ -175,36 +194,27 @@ static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
19957 return dst_dma;
19958 }
19959
19960 -/* Map current buffer in state and put it in link table */
19961 -static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
19962 - struct sec4_sg_entry *sec4_sg,
19963 - u8 *buf, int buflen)
19964 +/* Map current buffer in state (if length > 0) and put it in link table */
19965 +static inline int buf_map_to_sec4_sg(struct device *jrdev,
19966 + struct sec4_sg_entry *sec4_sg,
19967 + struct caam_hash_state *state)
19968 {
19969 - dma_addr_t buf_dma;
19970 + int buflen = *current_buflen(state);
19971
19972 - buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
19973 - dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
19974 + if (!buflen)
19975 + return 0;
19976
19977 - return buf_dma;
19978 -}
19979 + state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
19980 + DMA_TO_DEVICE);
19981 + if (dma_mapping_error(jrdev, state->buf_dma)) {
19982 + dev_err(jrdev, "unable to map buf\n");
19983 + state->buf_dma = 0;
19984 + return -ENOMEM;
19985 + }
19986
19987 -/*
19988 - * Only put buffer in link table if it contains data, which is possible,
19989 - * since a buffer has previously been used, and needs to be unmapped,
19990 - */
19991 -static inline dma_addr_t
19992 -try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
19993 - u8 *buf, dma_addr_t buf_dma, int buflen,
19994 - int last_buflen)
19995 -{
19996 - if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
19997 - dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
19998 - if (buflen)
19999 - buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
20000 - else
20001 - buf_dma = 0;
20002 -
20003 - return buf_dma;
20004 + dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
20005 +
20006 + return 0;
20007 }
20008
20009 /* Map state->caam_ctx, and add it to link table */
20010 @@ -224,89 +234,54 @@ static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
20011 return 0;
20012 }
20013
20014 -/* Common shared descriptor commands */
20015 -static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
20016 -{
20017 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
20018 - ctx->split_key_len, CLASS_2 |
20019 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
20020 -}
20021 -
20022 -/* Append key if it has been set */
20023 -static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
20024 -{
20025 - u32 *key_jump_cmd;
20026 -
20027 - init_sh_desc(desc, HDR_SHARE_SERIAL);
20028 -
20029 - if (ctx->split_key_len) {
20030 - /* Skip if already shared */
20031 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
20032 - JUMP_COND_SHRD);
20033 -
20034 - append_key_ahash(desc, ctx);
20035 -
20036 - set_jump_tgt_here(desc, key_jump_cmd);
20037 - }
20038 -
20039 - /* Propagate errors from shared to job descriptor */
20040 - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
20041 -}
20042 -
20043 /*
20044 - * For ahash read data from seqin following state->caam_ctx,
20045 - * and write resulting class2 context to seqout, which may be state->caam_ctx
20046 - * or req->result
20047 + * For ahash update, final and finup (import_ctx = true)
20048 + * import context, read and write to seqout
20049 + * For ahash firsts and digest (import_ctx = false)
20050 + * read and write to seqout
20051 */
20052 -static inline void ahash_append_load_str(u32 *desc, int digestsize)
20053 +static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
20054 + struct caam_hash_ctx *ctx, bool import_ctx)
20055 {
20056 - /* Calculate remaining bytes to read */
20057 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
20058 -
20059 - /* Read remaining bytes */
20060 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
20061 - FIFOLD_TYPE_MSG | KEY_VLF);
20062 + u32 op = ctx->adata.algtype;
20063 + u32 *skip_key_load;
20064
20065 - /* Store class2 context bytes */
20066 - append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
20067 - LDST_SRCDST_BYTE_CONTEXT);
20068 -}
20069 + init_sh_desc(desc, HDR_SHARE_SERIAL);
20070
20071 -/*
20072 - * For ahash update, final and finup, import context, read and write to seqout
20073 - */
20074 -static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
20075 - int digestsize,
20076 - struct caam_hash_ctx *ctx)
20077 -{
20078 - init_sh_desc_key_ahash(desc, ctx);
20079 + /* Append key if it has been set; ahash update excluded */
20080 + if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
20081 + /* Skip key loading if already shared */
20082 + skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
20083 + JUMP_COND_SHRD);
20084
20085 - /* Import context from software */
20086 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
20087 - LDST_CLASS_2_CCB | ctx->ctx_len);
20088 + append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
20089 + ctx->adata.keylen, CLASS_2 |
20090 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
20091
20092 - /* Class 2 operation */
20093 - append_operation(desc, op | state | OP_ALG_ENCRYPT);
20094 + set_jump_tgt_here(desc, skip_key_load);
20095
20096 - /*
20097 - * Load from buf and/or src and write to req->result or state->context
20098 - */
20099 - ahash_append_load_str(desc, digestsize);
20100 -}
20101 + op |= OP_ALG_AAI_HMAC_PRECOMP;
20102 + }
20103
20104 -/* For ahash firsts and digest, read and write to seqout */
20105 -static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
20106 - int digestsize, struct caam_hash_ctx *ctx)
20107 -{
20108 - init_sh_desc_key_ahash(desc, ctx);
20109 + /* If needed, import context from software */
20110 + if (import_ctx)
20111 + append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
20112 + LDST_SRCDST_BYTE_CONTEXT);
20113
20114 /* Class 2 operation */
20115 append_operation(desc, op | state | OP_ALG_ENCRYPT);
20116
20117 /*
20118 * Load from buf and/or src and write to req->result or state->context
20119 + * Calculate remaining bytes to read
20120 */
20121 - ahash_append_load_str(desc, digestsize);
20122 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
20123 + /* Read remaining bytes */
20124 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
20125 + FIFOLD_TYPE_MSG | KEY_VLF);
20126 + /* Store class2 context bytes */
20127 + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
20128 + LDST_SRCDST_BYTE_CONTEXT);
20129 }
20130
20131 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
20132 @@ -314,34 +289,13 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
20133 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20134 int digestsize = crypto_ahash_digestsize(ahash);
20135 struct device *jrdev = ctx->jrdev;
20136 - u32 have_key = 0;
20137 u32 *desc;
20138
20139 - if (ctx->split_key_len)
20140 - have_key = OP_ALG_AAI_HMAC_PRECOMP;
20141 -
20142 /* ahash_update shared descriptor */
20143 desc = ctx->sh_desc_update;
20144 -
20145 - init_sh_desc(desc, HDR_SHARE_SERIAL);
20146 -
20147 - /* Import context from software */
20148 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
20149 - LDST_CLASS_2_CCB | ctx->ctx_len);
20150 -
20151 - /* Class 2 operation */
20152 - append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
20153 - OP_ALG_ENCRYPT);
20154 -
20155 - /* Load data and write to result or context */
20156 - ahash_append_load_str(desc, ctx->ctx_len);
20157 -
20158 - ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
20159 - DMA_TO_DEVICE);
20160 - if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
20161 - dev_err(jrdev, "unable to map shared descriptor\n");
20162 - return -ENOMEM;
20163 - }
20164 + ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
20165 + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
20166 + desc_bytes(desc), DMA_TO_DEVICE);
20167 #ifdef DEBUG
20168 print_hex_dump(KERN_ERR,
20169 "ahash update shdesc@"__stringify(__LINE__)": ",
20170 @@ -350,17 +304,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
20171
20172 /* ahash_update_first shared descriptor */
20173 desc = ctx->sh_desc_update_first;
20174 -
20175 - ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
20176 - ctx->ctx_len, ctx);
20177 -
20178 - ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
20179 - desc_bytes(desc),
20180 - DMA_TO_DEVICE);
20181 - if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
20182 - dev_err(jrdev, "unable to map shared descriptor\n");
20183 - return -ENOMEM;
20184 - }
20185 + ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
20186 + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
20187 + desc_bytes(desc), DMA_TO_DEVICE);
20188 #ifdef DEBUG
20189 print_hex_dump(KERN_ERR,
20190 "ahash update first shdesc@"__stringify(__LINE__)": ",
20191 @@ -369,53 +315,20 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
20192
20193 /* ahash_final shared descriptor */
20194 desc = ctx->sh_desc_fin;
20195 -
20196 - ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
20197 - OP_ALG_AS_FINALIZE, digestsize, ctx);
20198 -
20199 - ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
20200 - DMA_TO_DEVICE);
20201 - if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
20202 - dev_err(jrdev, "unable to map shared descriptor\n");
20203 - return -ENOMEM;
20204 - }
20205 + ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
20206 + dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
20207 + desc_bytes(desc), DMA_TO_DEVICE);
20208 #ifdef DEBUG
20209 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
20210 DUMP_PREFIX_ADDRESS, 16, 4, desc,
20211 desc_bytes(desc), 1);
20212 #endif
20213
20214 - /* ahash_finup shared descriptor */
20215 - desc = ctx->sh_desc_finup;
20216 -
20217 - ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
20218 - OP_ALG_AS_FINALIZE, digestsize, ctx);
20219 -
20220 - ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
20221 - DMA_TO_DEVICE);
20222 - if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
20223 - dev_err(jrdev, "unable to map shared descriptor\n");
20224 - return -ENOMEM;
20225 - }
20226 -#ifdef DEBUG
20227 - print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
20228 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
20229 - desc_bytes(desc), 1);
20230 -#endif
20231 -
20232 /* ahash_digest shared descriptor */
20233 desc = ctx->sh_desc_digest;
20234 -
20235 - ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
20236 - digestsize, ctx);
20237 -
20238 - ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
20239 - desc_bytes(desc),
20240 - DMA_TO_DEVICE);
20241 - if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
20242 - dev_err(jrdev, "unable to map shared descriptor\n");
20243 - return -ENOMEM;
20244 - }
20245 + ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
20246 + dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
20247 + desc_bytes(desc), DMA_TO_DEVICE);
20248 #ifdef DEBUG
20249 print_hex_dump(KERN_ERR,
20250 "ahash digest shdesc@"__stringify(__LINE__)": ",
20251 @@ -426,14 +339,6 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
20252 return 0;
20253 }
20254
20255 -static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
20256 - u32 keylen)
20257 -{
20258 - return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
20259 - ctx->split_key_pad_len, key_in, keylen,
20260 - ctx->alg_op);
20261 -}
20262 -
20263 /* Digest hash size if it is too large */
20264 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
20265 u32 *keylen, u8 *key_out, u32 digestsize)
20266 @@ -469,7 +374,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
20267 }
20268
20269 /* Job descriptor to perform unkeyed hash on key_in */
20270 - append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
20271 + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
20272 OP_ALG_AS_INITFINAL);
20273 append_seq_in_ptr(desc, src_dma, *keylen, 0);
20274 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
20275 @@ -513,10 +418,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
20276 static int ahash_setkey(struct crypto_ahash *ahash,
20277 const u8 *key, unsigned int keylen)
20278 {
20279 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
20280 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
20281 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20282 - struct device *jrdev = ctx->jrdev;
20283 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
20284 int digestsize = crypto_ahash_digestsize(ahash);
20285 int ret;
20286 @@ -539,43 +441,19 @@ static int ahash_setkey(struct crypto_ahash *ahash,
20287 key = hashed_key;
20288 }
20289
20290 - /* Pick class 2 key length from algorithm submask */
20291 - ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
20292 - OP_ALG_ALGSEL_SHIFT] * 2;
20293 - ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
20294 -
20295 -#ifdef DEBUG
20296 - printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
20297 - ctx->split_key_len, ctx->split_key_pad_len);
20298 - print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
20299 - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
20300 -#endif
20301 -
20302 - ret = gen_split_hash_key(ctx, key, keylen);
20303 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
20304 + CAAM_MAX_HASH_KEY_SIZE);
20305 if (ret)
20306 goto bad_free_key;
20307
20308 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
20309 - DMA_TO_DEVICE);
20310 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
20311 - dev_err(jrdev, "unable to map key i/o memory\n");
20312 - ret = -ENOMEM;
20313 - goto error_free_key;
20314 - }
20315 #ifdef DEBUG
20316 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
20317 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
20318 - ctx->split_key_pad_len, 1);
20319 + ctx->adata.keylen_pad, 1);
20320 #endif
20321
20322 - ret = ahash_set_sh_desc(ahash);
20323 - if (ret) {
20324 - dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
20325 - DMA_TO_DEVICE);
20326 - }
20327 - error_free_key:
20328 kfree(hashed_key);
20329 - return ret;
20330 + return ahash_set_sh_desc(ahash);
20331 bad_free_key:
20332 kfree(hashed_key);
20333 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
20334 @@ -604,6 +482,8 @@ static inline void ahash_unmap(struct device *dev,
20335 struct ahash_edesc *edesc,
20336 struct ahash_request *req, int dst_len)
20337 {
20338 + struct caam_hash_state *state = ahash_request_ctx(req);
20339 +
20340 if (edesc->src_nents)
20341 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
20342 if (edesc->dst_dma)
20343 @@ -612,6 +492,12 @@ static inline void ahash_unmap(struct device *dev,
20344 if (edesc->sec4_sg_bytes)
20345 dma_unmap_single(dev, edesc->sec4_sg_dma,
20346 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
20347 +
20348 + if (state->buf_dma) {
20349 + dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
20350 + DMA_TO_DEVICE);
20351 + state->buf_dma = 0;
20352 + }
20353 }
20354
20355 static inline void ahash_unmap_ctx(struct device *dev,
20356 @@ -643,8 +529,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
20357 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20358 #endif
20359
20360 - edesc = (struct ahash_edesc *)((char *)desc -
20361 - offsetof(struct ahash_edesc, hw_desc));
20362 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20363 if (err)
20364 caam_jr_strstatus(jrdev, err);
20365
20366 @@ -671,19 +556,19 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
20367 struct ahash_edesc *edesc;
20368 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20369 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20370 -#ifdef DEBUG
20371 struct caam_hash_state *state = ahash_request_ctx(req);
20372 +#ifdef DEBUG
20373 int digestsize = crypto_ahash_digestsize(ahash);
20374
20375 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20376 #endif
20377
20378 - edesc = (struct ahash_edesc *)((char *)desc -
20379 - offsetof(struct ahash_edesc, hw_desc));
20380 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20381 if (err)
20382 caam_jr_strstatus(jrdev, err);
20383
20384 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
20385 + switch_buf(state);
20386 kfree(edesc);
20387
20388 #ifdef DEBUG
20389 @@ -713,8 +598,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
20390 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20391 #endif
20392
20393 - edesc = (struct ahash_edesc *)((char *)desc -
20394 - offsetof(struct ahash_edesc, hw_desc));
20395 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20396 if (err)
20397 caam_jr_strstatus(jrdev, err);
20398
20399 @@ -741,19 +625,19 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
20400 struct ahash_edesc *edesc;
20401 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20402 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20403 -#ifdef DEBUG
20404 struct caam_hash_state *state = ahash_request_ctx(req);
20405 +#ifdef DEBUG
20406 int digestsize = crypto_ahash_digestsize(ahash);
20407
20408 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20409 #endif
20410
20411 - edesc = (struct ahash_edesc *)((char *)desc -
20412 - offsetof(struct ahash_edesc, hw_desc));
20413 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20414 if (err)
20415 caam_jr_strstatus(jrdev, err);
20416
20417 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
20418 + switch_buf(state);
20419 kfree(edesc);
20420
20421 #ifdef DEBUG
20422 @@ -835,13 +719,12 @@ static int ahash_update_ctx(struct ahash_request *req)
20423 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20424 struct caam_hash_state *state = ahash_request_ctx(req);
20425 struct device *jrdev = ctx->jrdev;
20426 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20427 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20428 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20429 - int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
20430 - u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
20431 - int *next_buflen = state->current_buf ? &state->buflen_0 :
20432 - &state->buflen_1, last_buflen;
20433 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20434 + GFP_KERNEL : GFP_ATOMIC;
20435 + u8 *buf = current_buf(state);
20436 + int *buflen = current_buflen(state);
20437 + u8 *next_buf = alt_buf(state);
20438 + int *next_buflen = alt_buflen(state), last_buflen;
20439 int in_len = *buflen + req->nbytes, to_hash;
20440 u32 *desc;
20441 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
20442 @@ -895,10 +778,9 @@ static int ahash_update_ctx(struct ahash_request *req)
20443 if (ret)
20444 goto unmap_ctx;
20445
20446 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
20447 - edesc->sec4_sg + 1,
20448 - buf, state->buf_dma,
20449 - *buflen, last_buflen);
20450 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
20451 + if (ret)
20452 + goto unmap_ctx;
20453
20454 if (mapped_nents) {
20455 sg_to_sec4_sg_last(req->src, mapped_nents,
20456 @@ -909,12 +791,10 @@ static int ahash_update_ctx(struct ahash_request *req)
20457 to_hash - *buflen,
20458 *next_buflen, 0);
20459 } else {
20460 - (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
20461 - cpu_to_caam32(SEC4_SG_LEN_FIN);
20462 + sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
20463 + 1);
20464 }
20465
20466 - state->current_buf = !state->current_buf;
20467 -
20468 desc = edesc->hw_desc;
20469
20470 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
20471 @@ -969,12 +849,9 @@ static int ahash_final_ctx(struct ahash_request *req)
20472 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20473 struct caam_hash_state *state = ahash_request_ctx(req);
20474 struct device *jrdev = ctx->jrdev;
20475 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20476 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20477 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20478 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20479 - int last_buflen = state->current_buf ? state->buflen_0 :
20480 - state->buflen_1;
20481 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20482 + GFP_KERNEL : GFP_ATOMIC;
20483 + int buflen = *current_buflen(state);
20484 u32 *desc;
20485 int sec4_sg_bytes, sec4_sg_src_index;
20486 int digestsize = crypto_ahash_digestsize(ahash);
20487 @@ -1001,11 +878,11 @@ static int ahash_final_ctx(struct ahash_request *req)
20488 if (ret)
20489 goto unmap_ctx;
20490
20491 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
20492 - buf, state->buf_dma, buflen,
20493 - last_buflen);
20494 - (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
20495 - cpu_to_caam32(SEC4_SG_LEN_FIN);
20496 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
20497 + if (ret)
20498 + goto unmap_ctx;
20499 +
20500 + sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
20501
20502 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
20503 sec4_sg_bytes, DMA_TO_DEVICE);
20504 @@ -1048,12 +925,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
20505 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20506 struct caam_hash_state *state = ahash_request_ctx(req);
20507 struct device *jrdev = ctx->jrdev;
20508 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20509 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20510 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20511 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20512 - int last_buflen = state->current_buf ? state->buflen_0 :
20513 - state->buflen_1;
20514 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20515 + GFP_KERNEL : GFP_ATOMIC;
20516 + int buflen = *current_buflen(state);
20517 u32 *desc;
20518 int sec4_sg_src_index;
20519 int src_nents, mapped_nents;
20520 @@ -1082,7 +956,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
20521
20522 /* allocate space for base edesc and hw desc commands, link tables */
20523 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
20524 - ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
20525 + ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
20526 flags);
20527 if (!edesc) {
20528 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
20529 @@ -1098,9 +972,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
20530 if (ret)
20531 goto unmap_ctx;
20532
20533 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
20534 - buf, state->buf_dma, buflen,
20535 - last_buflen);
20536 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
20537 + if (ret)
20538 + goto unmap_ctx;
20539
20540 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
20541 sec4_sg_src_index, ctx->ctx_len + buflen,
20542 @@ -1136,15 +1010,18 @@ static int ahash_digest(struct ahash_request *req)
20543 {
20544 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20545 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20546 + struct caam_hash_state *state = ahash_request_ctx(req);
20547 struct device *jrdev = ctx->jrdev;
20548 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20549 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20550 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20551 + GFP_KERNEL : GFP_ATOMIC;
20552 u32 *desc;
20553 int digestsize = crypto_ahash_digestsize(ahash);
20554 int src_nents, mapped_nents;
20555 struct ahash_edesc *edesc;
20556 int ret;
20557
20558 + state->buf_dma = 0;
20559 +
20560 src_nents = sg_nents_for_len(req->src, req->nbytes);
20561 if (src_nents < 0) {
20562 dev_err(jrdev, "Invalid number of src SG.\n");
20563 @@ -1215,10 +1092,10 @@ static int ahash_final_no_ctx(struct ahash_request *req)
20564 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20565 struct caam_hash_state *state = ahash_request_ctx(req);
20566 struct device *jrdev = ctx->jrdev;
20567 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20568 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20569 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20570 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20571 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20572 + GFP_KERNEL : GFP_ATOMIC;
20573 + u8 *buf = current_buf(state);
20574 + int buflen = *current_buflen(state);
20575 u32 *desc;
20576 int digestsize = crypto_ahash_digestsize(ahash);
20577 struct ahash_edesc *edesc;
20578 @@ -1276,13 +1153,12 @@ static int ahash_update_no_ctx(struct ahash_request *req)
20579 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20580 struct caam_hash_state *state = ahash_request_ctx(req);
20581 struct device *jrdev = ctx->jrdev;
20582 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20583 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20584 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20585 - int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
20586 - u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
20587 - int *next_buflen = state->current_buf ? &state->buflen_0 :
20588 - &state->buflen_1;
20589 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20590 + GFP_KERNEL : GFP_ATOMIC;
20591 + u8 *buf = current_buf(state);
20592 + int *buflen = current_buflen(state);
20593 + u8 *next_buf = alt_buf(state);
20594 + int *next_buflen = alt_buflen(state);
20595 int in_len = *buflen + req->nbytes, to_hash;
20596 int sec4_sg_bytes, src_nents, mapped_nents;
20597 struct ahash_edesc *edesc;
20598 @@ -1331,8 +1207,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
20599 edesc->sec4_sg_bytes = sec4_sg_bytes;
20600 edesc->dst_dma = 0;
20601
20602 - state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
20603 - buf, *buflen);
20604 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
20605 + if (ret)
20606 + goto unmap_ctx;
20607 +
20608 sg_to_sec4_sg_last(req->src, mapped_nents,
20609 edesc->sec4_sg + 1, 0);
20610
20611 @@ -1342,8 +1220,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
20612 *next_buflen, 0);
20613 }
20614
20615 - state->current_buf = !state->current_buf;
20616 -
20617 desc = edesc->hw_desc;
20618
20619 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
20620 @@ -1403,12 +1279,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
20621 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20622 struct caam_hash_state *state = ahash_request_ctx(req);
20623 struct device *jrdev = ctx->jrdev;
20624 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20625 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20626 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20627 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20628 - int last_buflen = state->current_buf ? state->buflen_0 :
20629 - state->buflen_1;
20630 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20631 + GFP_KERNEL : GFP_ATOMIC;
20632 + int buflen = *current_buflen(state);
20633 u32 *desc;
20634 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
20635 int digestsize = crypto_ahash_digestsize(ahash);
20636 @@ -1450,9 +1323,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
20637 edesc->src_nents = src_nents;
20638 edesc->sec4_sg_bytes = sec4_sg_bytes;
20639
20640 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
20641 - state->buf_dma, buflen,
20642 - last_buflen);
20643 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
20644 + if (ret)
20645 + goto unmap;
20646
20647 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
20648 req->nbytes);
20649 @@ -1496,11 +1369,10 @@ static int ahash_update_first(struct ahash_request *req)
20650 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20651 struct caam_hash_state *state = ahash_request_ctx(req);
20652 struct device *jrdev = ctx->jrdev;
20653 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20654 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20655 - u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
20656 - int *next_buflen = state->current_buf ?
20657 - &state->buflen_1 : &state->buflen_0;
20658 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20659 + GFP_KERNEL : GFP_ATOMIC;
20660 + u8 *next_buf = alt_buf(state);
20661 + int *next_buflen = alt_buflen(state);
20662 int to_hash;
20663 u32 *desc;
20664 int src_nents, mapped_nents;
20665 @@ -1582,6 +1454,7 @@ static int ahash_update_first(struct ahash_request *req)
20666 state->final = ahash_final_no_ctx;
20667 scatterwalk_map_and_copy(next_buf, req->src, 0,
20668 req->nbytes, 0);
20669 + switch_buf(state);
20670 }
20671 #ifdef DEBUG
20672 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
20673 @@ -1688,7 +1561,6 @@ struct caam_hash_template {
20674 unsigned int blocksize;
20675 struct ahash_alg template_ahash;
20676 u32 alg_type;
20677 - u32 alg_op;
20678 };
20679
20680 /* ahash descriptors */
20681 @@ -1714,7 +1586,6 @@ static struct caam_hash_template driver_hash[] = {
20682 },
20683 },
20684 .alg_type = OP_ALG_ALGSEL_SHA1,
20685 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
20686 }, {
20687 .name = "sha224",
20688 .driver_name = "sha224-caam",
20689 @@ -1736,7 +1607,6 @@ static struct caam_hash_template driver_hash[] = {
20690 },
20691 },
20692 .alg_type = OP_ALG_ALGSEL_SHA224,
20693 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
20694 }, {
20695 .name = "sha256",
20696 .driver_name = "sha256-caam",
20697 @@ -1758,7 +1628,6 @@ static struct caam_hash_template driver_hash[] = {
20698 },
20699 },
20700 .alg_type = OP_ALG_ALGSEL_SHA256,
20701 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
20702 }, {
20703 .name = "sha384",
20704 .driver_name = "sha384-caam",
20705 @@ -1780,7 +1649,6 @@ static struct caam_hash_template driver_hash[] = {
20706 },
20707 },
20708 .alg_type = OP_ALG_ALGSEL_SHA384,
20709 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
20710 }, {
20711 .name = "sha512",
20712 .driver_name = "sha512-caam",
20713 @@ -1802,7 +1670,6 @@ static struct caam_hash_template driver_hash[] = {
20714 },
20715 },
20716 .alg_type = OP_ALG_ALGSEL_SHA512,
20717 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
20718 }, {
20719 .name = "md5",
20720 .driver_name = "md5-caam",
20721 @@ -1824,14 +1691,12 @@ static struct caam_hash_template driver_hash[] = {
20722 },
20723 },
20724 .alg_type = OP_ALG_ALGSEL_MD5,
20725 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
20726 },
20727 };
20728
20729 struct caam_hash_alg {
20730 struct list_head entry;
20731 int alg_type;
20732 - int alg_op;
20733 struct ahash_alg ahash_alg;
20734 };
20735
20736 @@ -1853,6 +1718,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
20737 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
20738 HASH_MSG_LEN + 64,
20739 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
20740 + dma_addr_t dma_addr;
20741
20742 /*
20743 * Get a Job ring from Job Ring driver to ensure in-order
20744 @@ -1863,11 +1729,31 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
20745 pr_err("Job Ring Device allocation for transform failed\n");
20746 return PTR_ERR(ctx->jrdev);
20747 }
20748 +
20749 + dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
20750 + offsetof(struct caam_hash_ctx,
20751 + sh_desc_update_dma),
20752 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
20753 + if (dma_mapping_error(ctx->jrdev, dma_addr)) {
20754 + dev_err(ctx->jrdev, "unable to map shared descriptors\n");
20755 + caam_jr_free(ctx->jrdev);
20756 + return -ENOMEM;
20757 + }
20758 +
20759 + ctx->sh_desc_update_dma = dma_addr;
20760 + ctx->sh_desc_update_first_dma = dma_addr +
20761 + offsetof(struct caam_hash_ctx,
20762 + sh_desc_update_first);
20763 + ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
20764 + sh_desc_fin);
20765 + ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
20766 + sh_desc_digest);
20767 +
20768 /* copy descriptor header template value */
20769 - ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
20770 - ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
20771 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
20772
20773 - ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
20774 + ctx->ctx_len = runninglen[(ctx->adata.algtype &
20775 + OP_ALG_ALGSEL_SUBMASK) >>
20776 OP_ALG_ALGSEL_SHIFT];
20777
20778 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
20779 @@ -1879,30 +1765,10 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
20780 {
20781 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
20782
20783 - if (ctx->sh_desc_update_dma &&
20784 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
20785 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
20786 - desc_bytes(ctx->sh_desc_update),
20787 - DMA_TO_DEVICE);
20788 - if (ctx->sh_desc_update_first_dma &&
20789 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
20790 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
20791 - desc_bytes(ctx->sh_desc_update_first),
20792 - DMA_TO_DEVICE);
20793 - if (ctx->sh_desc_fin_dma &&
20794 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
20795 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
20796 - desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
20797 - if (ctx->sh_desc_digest_dma &&
20798 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
20799 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
20800 - desc_bytes(ctx->sh_desc_digest),
20801 - DMA_TO_DEVICE);
20802 - if (ctx->sh_desc_finup_dma &&
20803 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
20804 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
20805 - desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
20806 -
20807 + dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
20808 + offsetof(struct caam_hash_ctx,
20809 + sh_desc_update_dma),
20810 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
20811 caam_jr_free(ctx->jrdev);
20812 }
20813
20814 @@ -1961,7 +1827,6 @@ caam_hash_alloc(struct caam_hash_template *template,
20815 alg->cra_type = &crypto_ahash_type;
20816
20817 t_alg->alg_type = template->alg_type;
20818 - t_alg->alg_op = template->alg_op;
20819
20820 return t_alg;
20821 }
20822 diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
20823 index 354a16ab..4fcb378e 100644
20824 --- a/drivers/crypto/caam/caampkc.c
20825 +++ b/drivers/crypto/caam/caampkc.c
20826 @@ -18,6 +18,10 @@
20827 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
20828 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
20829 sizeof(struct rsa_priv_f1_pdb))
20830 +#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
20831 + sizeof(struct rsa_priv_f2_pdb))
20832 +#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
20833 + sizeof(struct rsa_priv_f3_pdb))
20834
20835 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
20836 struct akcipher_request *req)
20837 @@ -54,6 +58,42 @@ static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
20838 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
20839 }
20840
20841 +static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
20842 + struct akcipher_request *req)
20843 +{
20844 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20845 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20846 + struct caam_rsa_key *key = &ctx->key;
20847 + struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
20848 + size_t p_sz = key->p_sz;
20849 + size_t q_sz = key->p_sz;
20850 +
20851 + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
20852 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
20853 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
20854 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
20855 + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
20856 +}
20857 +
20858 +static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
20859 + struct akcipher_request *req)
20860 +{
20861 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20862 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20863 + struct caam_rsa_key *key = &ctx->key;
20864 + struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
20865 + size_t p_sz = key->p_sz;
20866 + size_t q_sz = key->p_sz;
20867 +
20868 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
20869 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
20870 + dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
20871 + dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
20872 + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
20873 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
20874 + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
20875 +}
20876 +
20877 /* RSA Job Completion handler */
20878 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
20879 {
20880 @@ -90,6 +130,42 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
20881 akcipher_request_complete(req, err);
20882 }
20883
20884 +static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
20885 + void *context)
20886 +{
20887 + struct akcipher_request *req = context;
20888 + struct rsa_edesc *edesc;
20889 +
20890 + if (err)
20891 + caam_jr_strstatus(dev, err);
20892 +
20893 + edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
20894 +
20895 + rsa_priv_f2_unmap(dev, edesc, req);
20896 + rsa_io_unmap(dev, edesc, req);
20897 + kfree(edesc);
20898 +
20899 + akcipher_request_complete(req, err);
20900 +}
20901 +
20902 +static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
20903 + void *context)
20904 +{
20905 + struct akcipher_request *req = context;
20906 + struct rsa_edesc *edesc;
20907 +
20908 + if (err)
20909 + caam_jr_strstatus(dev, err);
20910 +
20911 + edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
20912 +
20913 + rsa_priv_f3_unmap(dev, edesc, req);
20914 + rsa_io_unmap(dev, edesc, req);
20915 + kfree(edesc);
20916 +
20917 + akcipher_request_complete(req, err);
20918 +}
20919 +
20920 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
20921 size_t desclen)
20922 {
20923 @@ -97,8 +173,8 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
20924 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20925 struct device *dev = ctx->dev;
20926 struct rsa_edesc *edesc;
20927 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20928 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20929 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20930 + GFP_KERNEL : GFP_ATOMIC;
20931 int sgc;
20932 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
20933 int src_nents, dst_nents;
20934 @@ -258,6 +334,172 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
20935 return 0;
20936 }
20937
20938 +static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
20939 + struct rsa_edesc *edesc)
20940 +{
20941 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20942 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20943 + struct caam_rsa_key *key = &ctx->key;
20944 + struct device *dev = ctx->dev;
20945 + struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
20946 + int sec4_sg_index = 0;
20947 + size_t p_sz = key->p_sz;
20948 + size_t q_sz = key->p_sz;
20949 +
20950 + pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
20951 + if (dma_mapping_error(dev, pdb->d_dma)) {
20952 + dev_err(dev, "Unable to map RSA private exponent memory\n");
20953 + return -ENOMEM;
20954 + }
20955 +
20956 + pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
20957 + if (dma_mapping_error(dev, pdb->p_dma)) {
20958 + dev_err(dev, "Unable to map RSA prime factor p memory\n");
20959 + goto unmap_d;
20960 + }
20961 +
20962 + pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
20963 + if (dma_mapping_error(dev, pdb->q_dma)) {
20964 + dev_err(dev, "Unable to map RSA prime factor q memory\n");
20965 + goto unmap_p;
20966 + }
20967 +
20968 + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
20969 + if (dma_mapping_error(dev, pdb->tmp1_dma)) {
20970 + dev_err(dev, "Unable to map RSA tmp1 memory\n");
20971 + goto unmap_q;
20972 + }
20973 +
20974 + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
20975 + if (dma_mapping_error(dev, pdb->tmp2_dma)) {
20976 + dev_err(dev, "Unable to map RSA tmp2 memory\n");
20977 + goto unmap_tmp1;
20978 + }
20979 +
20980 + if (edesc->src_nents > 1) {
20981 + pdb->sgf |= RSA_PRIV_PDB_SGF_G;
20982 + pdb->g_dma = edesc->sec4_sg_dma;
20983 + sec4_sg_index += edesc->src_nents;
20984 + } else {
20985 + pdb->g_dma = sg_dma_address(req->src);
20986 + }
20987 +
20988 + if (edesc->dst_nents > 1) {
20989 + pdb->sgf |= RSA_PRIV_PDB_SGF_F;
20990 + pdb->f_dma = edesc->sec4_sg_dma +
20991 + sec4_sg_index * sizeof(struct sec4_sg_entry);
20992 + } else {
20993 + pdb->f_dma = sg_dma_address(req->dst);
20994 + }
20995 +
20996 + pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
20997 + pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
20998 +
20999 + return 0;
21000 +
21001 +unmap_tmp1:
21002 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
21003 +unmap_q:
21004 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
21005 +unmap_p:
21006 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
21007 +unmap_d:
21008 + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
21009 +
21010 + return -ENOMEM;
21011 +}
21012 +
21013 +static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
21014 + struct rsa_edesc *edesc)
21015 +{
21016 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21017 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21018 + struct caam_rsa_key *key = &ctx->key;
21019 + struct device *dev = ctx->dev;
21020 + struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
21021 + int sec4_sg_index = 0;
21022 + size_t p_sz = key->p_sz;
21023 + size_t q_sz = key->p_sz;
21024 +
21025 + pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
21026 + if (dma_mapping_error(dev, pdb->p_dma)) {
21027 + dev_err(dev, "Unable to map RSA prime factor p memory\n");
21028 + return -ENOMEM;
21029 + }
21030 +
21031 + pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
21032 + if (dma_mapping_error(dev, pdb->q_dma)) {
21033 + dev_err(dev, "Unable to map RSA prime factor q memory\n");
21034 + goto unmap_p;
21035 + }
21036 +
21037 + pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
21038 + if (dma_mapping_error(dev, pdb->dp_dma)) {
21039 + dev_err(dev, "Unable to map RSA exponent dp memory\n");
21040 + goto unmap_q;
21041 + }
21042 +
21043 + pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
21044 + if (dma_mapping_error(dev, pdb->dq_dma)) {
21045 + dev_err(dev, "Unable to map RSA exponent dq memory\n");
21046 + goto unmap_dp;
21047 + }
21048 +
21049 + pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
21050 + if (dma_mapping_error(dev, pdb->c_dma)) {
21051 + dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
21052 + goto unmap_dq;
21053 + }
21054 +
21055 + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
21056 + if (dma_mapping_error(dev, pdb->tmp1_dma)) {
21057 + dev_err(dev, "Unable to map RSA tmp1 memory\n");
21058 + goto unmap_qinv;
21059 + }
21060 +
21061 + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
21062 + if (dma_mapping_error(dev, pdb->tmp2_dma)) {
21063 + dev_err(dev, "Unable to map RSA tmp2 memory\n");
21064 + goto unmap_tmp1;
21065 + }
21066 +
21067 + if (edesc->src_nents > 1) {
21068 + pdb->sgf |= RSA_PRIV_PDB_SGF_G;
21069 + pdb->g_dma = edesc->sec4_sg_dma;
21070 + sec4_sg_index += edesc->src_nents;
21071 + } else {
21072 + pdb->g_dma = sg_dma_address(req->src);
21073 + }
21074 +
21075 + if (edesc->dst_nents > 1) {
21076 + pdb->sgf |= RSA_PRIV_PDB_SGF_F;
21077 + pdb->f_dma = edesc->sec4_sg_dma +
21078 + sec4_sg_index * sizeof(struct sec4_sg_entry);
21079 + } else {
21080 + pdb->f_dma = sg_dma_address(req->dst);
21081 + }
21082 +
21083 + pdb->sgf |= key->n_sz;
21084 + pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
21085 +
21086 + return 0;
21087 +
21088 +unmap_tmp1:
21089 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
21090 +unmap_qinv:
21091 + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
21092 +unmap_dq:
21093 + dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
21094 +unmap_dp:
21095 + dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
21096 +unmap_q:
21097 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
21098 +unmap_p:
21099 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
21100 +
21101 + return -ENOMEM;
21102 +}
21103 +
21104 static int caam_rsa_enc(struct akcipher_request *req)
21105 {
21106 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21107 @@ -301,24 +543,14 @@ static int caam_rsa_enc(struct akcipher_request *req)
21108 return ret;
21109 }
21110
21111 -static int caam_rsa_dec(struct akcipher_request *req)
21112 +static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
21113 {
21114 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21115 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21116 - struct caam_rsa_key *key = &ctx->key;
21117 struct device *jrdev = ctx->dev;
21118 struct rsa_edesc *edesc;
21119 int ret;
21120
21121 - if (unlikely(!key->n || !key->d))
21122 - return -EINVAL;
21123 -
21124 - if (req->dst_len < key->n_sz) {
21125 - req->dst_len = key->n_sz;
21126 - dev_err(jrdev, "Output buffer length less than parameter n\n");
21127 - return -EOVERFLOW;
21128 - }
21129 -
21130 /* Allocate extended descriptor */
21131 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
21132 if (IS_ERR(edesc))
21133 @@ -344,17 +576,147 @@ static int caam_rsa_dec(struct akcipher_request *req)
21134 return ret;
21135 }
21136
21137 +static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
21138 +{
21139 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21140 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21141 + struct device *jrdev = ctx->dev;
21142 + struct rsa_edesc *edesc;
21143 + int ret;
21144 +
21145 + /* Allocate extended descriptor */
21146 + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
21147 + if (IS_ERR(edesc))
21148 + return PTR_ERR(edesc);
21149 +
21150 + /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
21151 + ret = set_rsa_priv_f2_pdb(req, edesc);
21152 + if (ret)
21153 + goto init_fail;
21154 +
21155 + /* Initialize Job Descriptor */
21156 + init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
21157 +
21158 + ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
21159 + if (!ret)
21160 + return -EINPROGRESS;
21161 +
21162 + rsa_priv_f2_unmap(jrdev, edesc, req);
21163 +
21164 +init_fail:
21165 + rsa_io_unmap(jrdev, edesc, req);
21166 + kfree(edesc);
21167 + return ret;
21168 +}
21169 +
21170 +static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
21171 +{
21172 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21173 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21174 + struct device *jrdev = ctx->dev;
21175 + struct rsa_edesc *edesc;
21176 + int ret;
21177 +
21178 + /* Allocate extended descriptor */
21179 + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
21180 + if (IS_ERR(edesc))
21181 + return PTR_ERR(edesc);
21182 +
21183 + /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
21184 + ret = set_rsa_priv_f3_pdb(req, edesc);
21185 + if (ret)
21186 + goto init_fail;
21187 +
21188 + /* Initialize Job Descriptor */
21189 + init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
21190 +
21191 + ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
21192 + if (!ret)
21193 + return -EINPROGRESS;
21194 +
21195 + rsa_priv_f3_unmap(jrdev, edesc, req);
21196 +
21197 +init_fail:
21198 + rsa_io_unmap(jrdev, edesc, req);
21199 + kfree(edesc);
21200 + return ret;
21201 +}
21202 +
21203 +static int caam_rsa_dec(struct akcipher_request *req)
21204 +{
21205 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21206 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21207 + struct caam_rsa_key *key = &ctx->key;
21208 + int ret;
21209 +
21210 + if (unlikely(!key->n || !key->d))
21211 + return -EINVAL;
21212 +
21213 + if (req->dst_len < key->n_sz) {
21214 + req->dst_len = key->n_sz;
21215 + dev_err(ctx->dev, "Output buffer length less than parameter n\n");
21216 + return -EOVERFLOW;
21217 + }
21218 +
21219 + if (key->priv_form == FORM3)
21220 + ret = caam_rsa_dec_priv_f3(req);
21221 + else if (key->priv_form == FORM2)
21222 + ret = caam_rsa_dec_priv_f2(req);
21223 + else
21224 + ret = caam_rsa_dec_priv_f1(req);
21225 +
21226 + return ret;
21227 +}
21228 +
21229 static void caam_rsa_free_key(struct caam_rsa_key *key)
21230 {
21231 kzfree(key->d);
21232 + kzfree(key->p);
21233 + kzfree(key->q);
21234 + kzfree(key->dp);
21235 + kzfree(key->dq);
21236 + kzfree(key->qinv);
21237 + kzfree(key->tmp1);
21238 + kzfree(key->tmp2);
21239 kfree(key->e);
21240 kfree(key->n);
21241 - key->d = NULL;
21242 - key->e = NULL;
21243 - key->n = NULL;
21244 - key->d_sz = 0;
21245 - key->e_sz = 0;
21246 - key->n_sz = 0;
21247 + memset(key, 0, sizeof(*key));
21248 +}
21249 +
21250 +static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
21251 +{
21252 + while (!**ptr && *nbytes) {
21253 + (*ptr)++;
21254 + (*nbytes)--;
21255 + }
21256 +}
21257 +
21258 +/**
21259 + * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
21260 + * dP, dQ and qInv could decode to less than corresponding p, q length, as the
21261 + * BER-encoding requires that the minimum number of bytes be used to encode the
21262 + * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
21263 + * length.
21264 + *
21265 + * @ptr : pointer to {dP, dQ, qInv} CRT member
21266 + * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
21267 + * @dstlen: length in bytes of corresponding p or q prime factor
21268 + */
21269 +static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
21270 +{
21271 + u8 *dst;
21272 +
21273 + caam_rsa_drop_leading_zeros(&ptr, &nbytes);
21274 + if (!nbytes)
21275 + return NULL;
21276 +
21277 + dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
21278 + if (!dst)
21279 + return NULL;
21280 +
21281 + memcpy(dst + (dstlen - nbytes), ptr, nbytes);
21282 +
21283 + return dst;
21284 }
21285
21286 /**
21287 @@ -370,10 +732,9 @@ static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
21288 {
21289 u8 *val;
21290
21291 - while (!*buf && *nbytes) {
21292 - buf++;
21293 - (*nbytes)--;
21294 - }
21295 + caam_rsa_drop_leading_zeros(&buf, nbytes);
21296 + if (!*nbytes)
21297 + return NULL;
21298
21299 val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
21300 if (!val)
21301 @@ -395,7 +756,7 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
21302 unsigned int keylen)
21303 {
21304 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21305 - struct rsa_key raw_key = {0};
21306 + struct rsa_key raw_key = {NULL};
21307 struct caam_rsa_key *rsa_key = &ctx->key;
21308 int ret;
21309
21310 @@ -437,11 +798,69 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
21311 return -ENOMEM;
21312 }
21313
21314 +static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
21315 + struct rsa_key *raw_key)
21316 +{
21317 + struct caam_rsa_key *rsa_key = &ctx->key;
21318 + size_t p_sz = raw_key->p_sz;
21319 + size_t q_sz = raw_key->q_sz;
21320 +
21321 + rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
21322 + if (!rsa_key->p)
21323 + return;
21324 + rsa_key->p_sz = p_sz;
21325 +
21326 + rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
21327 + if (!rsa_key->q)
21328 + goto free_p;
21329 + rsa_key->q_sz = q_sz;
21330 +
21331 + rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
21332 + if (!rsa_key->tmp1)
21333 + goto free_q;
21334 +
21335 + rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
21336 + if (!rsa_key->tmp2)
21337 + goto free_tmp1;
21338 +
21339 + rsa_key->priv_form = FORM2;
21340 +
21341 + rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
21342 + if (!rsa_key->dp)
21343 + goto free_tmp2;
21344 +
21345 + rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
21346 + if (!rsa_key->dq)
21347 + goto free_dp;
21348 +
21349 + rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
21350 + q_sz);
21351 + if (!rsa_key->qinv)
21352 + goto free_dq;
21353 +
21354 + rsa_key->priv_form = FORM3;
21355 +
21356 + return;
21357 +
21358 +free_dq:
21359 + kzfree(rsa_key->dq);
21360 +free_dp:
21361 + kzfree(rsa_key->dp);
21362 +free_tmp2:
21363 + kzfree(rsa_key->tmp2);
21364 +free_tmp1:
21365 + kzfree(rsa_key->tmp1);
21366 +free_q:
21367 + kzfree(rsa_key->q);
21368 +free_p:
21369 + kzfree(rsa_key->p);
21370 +}
21371 +
21372 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
21373 unsigned int keylen)
21374 {
21375 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21376 - struct rsa_key raw_key = {0};
21377 + struct rsa_key raw_key = {NULL};
21378 struct caam_rsa_key *rsa_key = &ctx->key;
21379 int ret;
21380
21381 @@ -483,6 +902,8 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
21382 memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
21383 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
21384
21385 + caam_rsa_set_priv_key_form(ctx, &raw_key);
21386 +
21387 return 0;
21388
21389 err:
21390 diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
21391 index f595d159..87ab75e9 100644
21392 --- a/drivers/crypto/caam/caampkc.h
21393 +++ b/drivers/crypto/caam/caampkc.h
21394 @@ -12,22 +12,76 @@
21395 #include "compat.h"
21396 #include "pdb.h"
21397
21398 +/**
21399 + * caam_priv_key_form - CAAM RSA private key representation
21400 + * CAAM RSA private key may have either of three forms.
21401 + *
21402 + * 1. The first representation consists of the pair (n, d), where the
21403 + * components have the following meanings:
21404 + * n the RSA modulus
21405 + * d the RSA private exponent
21406 + *
21407 + * 2. The second representation consists of the triplet (p, q, d), where the
21408 + * components have the following meanings:
21409 + * p the first prime factor of the RSA modulus n
21410 + * q the second prime factor of the RSA modulus n
21411 + * d the RSA private exponent
21412 + *
21413 + * 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv),
21414 + * where the components have the following meanings:
21415 + * p the first prime factor of the RSA modulus n
21416 + * q the second prime factor of the RSA modulus n
21417 + * dP the first factors's CRT exponent
21418 + * dQ the second factors's CRT exponent
21419 + * qInv the (first) CRT coefficient
21420 + *
21421 + * The benefit of using the third or the second key form is lower computational
21422 + * cost for the decryption and signature operations.
21423 + */
21424 +enum caam_priv_key_form {
21425 + FORM1,
21426 + FORM2,
21427 + FORM3
21428 +};
21429 +
21430 /**
21431 * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
21432 * @n : RSA modulus raw byte stream
21433 * @e : RSA public exponent raw byte stream
21434 * @d : RSA private exponent raw byte stream
21435 + * @p : RSA prime factor p of RSA modulus n
21436 + * @q : RSA prime factor q of RSA modulus n
21437 + * @dp : RSA CRT exponent of p
21438 + * @dp : RSA CRT exponent of q
21439 + * @qinv : RSA CRT coefficient
21440 + * @tmp1 : CAAM uses this temporary buffer as internal state buffer.
21441 + * It is assumed to be as long as p.
21442 + * @tmp2 : CAAM uses this temporary buffer as internal state buffer.
21443 + * It is assumed to be as long as q.
21444 * @n_sz : length in bytes of RSA modulus n
21445 * @e_sz : length in bytes of RSA public exponent
21446 * @d_sz : length in bytes of RSA private exponent
21447 + * @p_sz : length in bytes of RSA prime factor p of RSA modulus n
21448 + * @q_sz : length in bytes of RSA prime factor q of RSA modulus n
21449 + * @priv_form : CAAM RSA private key representation
21450 */
21451 struct caam_rsa_key {
21452 u8 *n;
21453 u8 *e;
21454 u8 *d;
21455 + u8 *p;
21456 + u8 *q;
21457 + u8 *dp;
21458 + u8 *dq;
21459 + u8 *qinv;
21460 + u8 *tmp1;
21461 + u8 *tmp2;
21462 size_t n_sz;
21463 size_t e_sz;
21464 size_t d_sz;
21465 + size_t p_sz;
21466 + size_t q_sz;
21467 + enum caam_priv_key_form priv_form;
21468 };
21469
21470 /**
21471 @@ -59,6 +113,8 @@ struct rsa_edesc {
21472 union {
21473 struct rsa_pub_pdb pub;
21474 struct rsa_priv_f1_pdb priv_f1;
21475 + struct rsa_priv_f2_pdb priv_f2;
21476 + struct rsa_priv_f3_pdb priv_f3;
21477 } pdb;
21478 u32 hw_desc[];
21479 };
21480 @@ -66,5 +122,7 @@ struct rsa_edesc {
21481 /* Descriptor construction primitives. */
21482 void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
21483 void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
21484 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb);
21485 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb);
21486
21487 #endif
21488 diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
21489 index 9b92af2c..fde07d4f 100644
21490 --- a/drivers/crypto/caam/caamrng.c
21491 +++ b/drivers/crypto/caam/caamrng.c
21492 @@ -52,7 +52,7 @@
21493
21494 /* length of descriptors */
21495 #define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
21496 -#define DESC_RNG_LEN (4 * CAAM_CMD_SZ)
21497 +#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
21498
21499 /* Buffer, its dma address and lock */
21500 struct buf_data {
21501 @@ -100,8 +100,7 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
21502 {
21503 struct buf_data *bd;
21504
21505 - bd = (struct buf_data *)((char *)desc -
21506 - offsetof(struct buf_data, hw_desc));
21507 + bd = container_of(desc, struct buf_data, hw_desc[0]);
21508
21509 if (err)
21510 caam_jr_strstatus(jrdev, err);
21511 @@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
21512
21513 init_sh_desc(desc, HDR_SHARE_SERIAL);
21514
21515 - /* Propagate errors from shared to job descriptor */
21516 - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
21517 -
21518 /* Generate random bytes */
21519 append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
21520
21521 @@ -289,11 +285,7 @@ static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
21522 if (err)
21523 return err;
21524
21525 - err = caam_init_buf(ctx, 1);
21526 - if (err)
21527 - return err;
21528 -
21529 - return 0;
21530 + return caam_init_buf(ctx, 1);
21531 }
21532
21533 static struct hwrng caam_rng = {
21534 @@ -351,7 +343,7 @@ static int __init caam_rng_init(void)
21535 pr_err("Job Ring Device allocation for transform failed\n");
21536 return PTR_ERR(dev);
21537 }
21538 - rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
21539 + rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
21540 if (!rng_ctx) {
21541 err = -ENOMEM;
21542 goto free_caam_alloc;
21543 diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
21544 index 7149cd24..4e084f51 100644
21545 --- a/drivers/crypto/caam/compat.h
21546 +++ b/drivers/crypto/caam/compat.h
21547 @@ -16,6 +16,7 @@
21548 #include <linux/of_platform.h>
21549 #include <linux/dma-mapping.h>
21550 #include <linux/io.h>
21551 +#include <linux/iommu.h>
21552 #include <linux/spinlock.h>
21553 #include <linux/rtnetlink.h>
21554 #include <linux/in.h>
21555 diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
21556 index 98468b96..8f9642c6 100644
21557 --- a/drivers/crypto/caam/ctrl.c
21558 +++ b/drivers/crypto/caam/ctrl.c
21559 @@ -2,40 +2,41 @@
21560 * Controller-level driver, kernel property detection, initialization
21561 *
21562 * Copyright 2008-2012 Freescale Semiconductor, Inc.
21563 + * Copyright 2017 NXP
21564 */
21565
21566 #include <linux/device.h>
21567 #include <linux/of_address.h>
21568 #include <linux/of_irq.h>
21569 +#include <linux/sys_soc.h>
21570
21571 #include "compat.h"
21572 #include "regs.h"
21573 #include "intern.h"
21574 #include "jr.h"
21575 #include "desc_constr.h"
21576 -#include "error.h"
21577 #include "ctrl.h"
21578
21579 bool caam_little_end;
21580 EXPORT_SYMBOL(caam_little_end);
21581 +bool caam_imx;
21582 +EXPORT_SYMBOL(caam_imx);
21583 +bool caam_dpaa2;
21584 +EXPORT_SYMBOL(caam_dpaa2);
21585 +
21586 +#ifdef CONFIG_CAAM_QI
21587 +#include "qi.h"
21588 +#endif
21589
21590 /*
21591 * i.MX targets tend to have clock control subsystems that can
21592 * enable/disable clocking to our device.
21593 */
21594 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
21595 -static inline struct clk *caam_drv_identify_clk(struct device *dev,
21596 - char *clk_name)
21597 -{
21598 - return devm_clk_get(dev, clk_name);
21599 -}
21600 -#else
21601 static inline struct clk *caam_drv_identify_clk(struct device *dev,
21602 char *clk_name)
21603 {
21604 - return NULL;
21605 + return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
21606 }
21607 -#endif
21608
21609 /*
21610 * Descriptor to instantiate RNG State Handle 0 in normal mode and
21611 @@ -270,7 +271,7 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
21612 /*
21613 * If the corresponding bit is set, then it means the state
21614 * handle was initialized by us, and thus it needs to be
21615 - * deintialized as well
21616 + * deinitialized as well
21617 */
21618 if ((1 << sh_idx) & state_handle_mask) {
21619 /*
21620 @@ -303,20 +304,24 @@ static int caam_remove(struct platform_device *pdev)
21621 struct device *ctrldev;
21622 struct caam_drv_private *ctrlpriv;
21623 struct caam_ctrl __iomem *ctrl;
21624 - int ring;
21625
21626 ctrldev = &pdev->dev;
21627 ctrlpriv = dev_get_drvdata(ctrldev);
21628 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
21629
21630 - /* Remove platform devices for JobRs */
21631 - for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
21632 - if (ctrlpriv->jrpdev[ring])
21633 - of_device_unregister(ctrlpriv->jrpdev[ring]);
21634 - }
21635 + /* Remove platform devices under the crypto node */
21636 + of_platform_depopulate(ctrldev);
21637 +
21638 +#ifdef CONFIG_CAAM_QI
21639 + if (ctrlpriv->qidev)
21640 + caam_qi_shutdown(ctrlpriv->qidev);
21641 +#endif
21642
21643 - /* De-initialize RNG state handles initialized by this driver. */
21644 - if (ctrlpriv->rng4_sh_init)
21645 + /*
21646 + * De-initialize RNG state handles initialized by this driver.
21647 + * In case of DPAA 2.x, RNG is managed by MC firmware.
21648 + */
21649 + if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
21650 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
21651
21652 /* Shut down debug views */
21653 @@ -331,8 +336,8 @@ static int caam_remove(struct platform_device *pdev)
21654 clk_disable_unprepare(ctrlpriv->caam_ipg);
21655 clk_disable_unprepare(ctrlpriv->caam_mem);
21656 clk_disable_unprepare(ctrlpriv->caam_aclk);
21657 - clk_disable_unprepare(ctrlpriv->caam_emi_slow);
21658 -
21659 + if (ctrlpriv->caam_emi_slow)
21660 + clk_disable_unprepare(ctrlpriv->caam_emi_slow);
21661 return 0;
21662 }
21663
21664 @@ -366,11 +371,8 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
21665 */
21666 val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
21667 >> RTSDCTL_ENT_DLY_SHIFT;
21668 - if (ent_delay <= val) {
21669 - /* put RNG4 into run mode */
21670 - clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
21671 - return;
21672 - }
21673 + if (ent_delay <= val)
21674 + goto start_rng;
21675
21676 val = rd_reg32(&r4tst->rtsdctl);
21677 val = (val & ~RTSDCTL_ENT_DLY_MASK) |
21678 @@ -382,15 +384,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
21679 wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
21680 /* read the control register */
21681 val = rd_reg32(&r4tst->rtmctl);
21682 +start_rng:
21683 /*
21684 * select raw sampling in both entropy shifter
21685 - * and statistical checker
21686 + * and statistical checker; ; put RNG4 into run mode
21687 */
21688 - clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
21689 - /* put RNG4 into run mode */
21690 - clrsetbits_32(&val, RTMCTL_PRGM, 0);
21691 - /* write back the control register */
21692 - wr_reg32(&r4tst->rtmctl, val);
21693 + clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
21694 }
21695
21696 /**
21697 @@ -411,28 +410,26 @@ int caam_get_era(void)
21698 }
21699 EXPORT_SYMBOL(caam_get_era);
21700
21701 -#ifdef CONFIG_DEBUG_FS
21702 -static int caam_debugfs_u64_get(void *data, u64 *val)
21703 -{
21704 - *val = caam64_to_cpu(*(u64 *)data);
21705 - return 0;
21706 -}
21707 -
21708 -static int caam_debugfs_u32_get(void *data, u64 *val)
21709 -{
21710 - *val = caam32_to_cpu(*(u32 *)data);
21711 - return 0;
21712 -}
21713 -
21714 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
21715 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
21716 -#endif
21717 +static const struct of_device_id caam_match[] = {
21718 + {
21719 + .compatible = "fsl,sec-v4.0",
21720 + },
21721 + {
21722 + .compatible = "fsl,sec4.0",
21723 + },
21724 + {},
21725 +};
21726 +MODULE_DEVICE_TABLE(of, caam_match);
21727
21728 /* Probe routine for CAAM top (controller) level */
21729 static int caam_probe(struct platform_device *pdev)
21730 {
21731 - int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
21732 + int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
21733 u64 caam_id;
21734 + static const struct soc_device_attribute imx_soc[] = {
21735 + {.family = "Freescale i.MX"},
21736 + {},
21737 + };
21738 struct device *dev;
21739 struct device_node *nprop, *np;
21740 struct caam_ctrl __iomem *ctrl;
21741 @@ -452,9 +449,10 @@ static int caam_probe(struct platform_device *pdev)
21742
21743 dev = &pdev->dev;
21744 dev_set_drvdata(dev, ctrlpriv);
21745 - ctrlpriv->pdev = pdev;
21746 nprop = pdev->dev.of_node;
21747
21748 + caam_imx = (bool)soc_device_match(imx_soc);
21749 +
21750 /* Enable clocking */
21751 clk = caam_drv_identify_clk(&pdev->dev, "ipg");
21752 if (IS_ERR(clk)) {
21753 @@ -483,14 +481,16 @@ static int caam_probe(struct platform_device *pdev)
21754 }
21755 ctrlpriv->caam_aclk = clk;
21756
21757 - clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
21758 - if (IS_ERR(clk)) {
21759 - ret = PTR_ERR(clk);
21760 - dev_err(&pdev->dev,
21761 - "can't identify CAAM emi_slow clk: %d\n", ret);
21762 - return ret;
21763 + if (!of_machine_is_compatible("fsl,imx6ul")) {
21764 + clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
21765 + if (IS_ERR(clk)) {
21766 + ret = PTR_ERR(clk);
21767 + dev_err(&pdev->dev,
21768 + "can't identify CAAM emi_slow clk: %d\n", ret);
21769 + return ret;
21770 + }
21771 + ctrlpriv->caam_emi_slow = clk;
21772 }
21773 - ctrlpriv->caam_emi_slow = clk;
21774
21775 ret = clk_prepare_enable(ctrlpriv->caam_ipg);
21776 if (ret < 0) {
21777 @@ -511,11 +511,13 @@ static int caam_probe(struct platform_device *pdev)
21778 goto disable_caam_mem;
21779 }
21780
21781 - ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
21782 - if (ret < 0) {
21783 - dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
21784 - ret);
21785 - goto disable_caam_aclk;
21786 + if (ctrlpriv->caam_emi_slow) {
21787 + ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
21788 + if (ret < 0) {
21789 + dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
21790 + ret);
21791 + goto disable_caam_aclk;
21792 + }
21793 }
21794
21795 /* Get configuration properties from device tree */
21796 @@ -542,13 +544,13 @@ static int caam_probe(struct platform_device *pdev)
21797 else
21798 BLOCK_OFFSET = PG_SIZE_64K;
21799
21800 - ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
21801 - ctrlpriv->assure = (struct caam_assurance __force *)
21802 - ((uint8_t *)ctrl +
21803 + ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
21804 + ctrlpriv->assure = (struct caam_assurance __iomem __force *)
21805 + ((__force uint8_t *)ctrl +
21806 BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
21807 );
21808 - ctrlpriv->deco = (struct caam_deco __force *)
21809 - ((uint8_t *)ctrl +
21810 + ctrlpriv->deco = (struct caam_deco __iomem __force *)
21811 + ((__force uint8_t *)ctrl +
21812 BLOCK_OFFSET * DECO_BLOCK_NUMBER
21813 );
21814
21815 @@ -557,12 +559,17 @@ static int caam_probe(struct platform_device *pdev)
21816
21817 /*
21818 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
21819 - * long pointers in master configuration register
21820 + * long pointers in master configuration register.
21821 + * In case of DPAA 2.x, Management Complex firmware performs
21822 + * the configuration.
21823 */
21824 - clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
21825 - MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
21826 - MCFGR_WDENABLE | MCFGR_LARGE_BURST |
21827 - (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
21828 + caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
21829 + if (!caam_dpaa2)
21830 + clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
21831 + MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
21832 + MCFGR_WDENABLE | MCFGR_LARGE_BURST |
21833 + (sizeof(dma_addr_t) == sizeof(u64) ?
21834 + MCFGR_LONG_PTR : 0));
21835
21836 /*
21837 * Read the Compile Time paramters and SCFGR to determine
21838 @@ -590,64 +597,67 @@ static int caam_probe(struct platform_device *pdev)
21839 JRSTART_JR1_START | JRSTART_JR2_START |
21840 JRSTART_JR3_START);
21841
21842 - if (sizeof(dma_addr_t) == sizeof(u64))
21843 - if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
21844 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
21845 + if (sizeof(dma_addr_t) == sizeof(u64)) {
21846 + if (caam_dpaa2)
21847 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
21848 + else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
21849 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
21850 else
21851 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
21852 - else
21853 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
21854 -
21855 - /*
21856 - * Detect and enable JobRs
21857 - * First, find out how many ring spec'ed, allocate references
21858 - * for all, then go probe each one.
21859 - */
21860 - rspec = 0;
21861 - for_each_available_child_of_node(nprop, np)
21862 - if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
21863 - of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
21864 - rspec++;
21865 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
21866 + } else {
21867 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
21868 + }
21869 + if (ret) {
21870 + dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
21871 + goto iounmap_ctrl;
21872 + }
21873
21874 - ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
21875 - sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
21876 - if (ctrlpriv->jrpdev == NULL) {
21877 - ret = -ENOMEM;
21878 + ret = of_platform_populate(nprop, caam_match, NULL, dev);
21879 + if (ret) {
21880 + dev_err(dev, "JR platform devices creation error\n");
21881 goto iounmap_ctrl;
21882 }
21883
21884 +#ifdef CONFIG_DEBUG_FS
21885 + /*
21886 + * FIXME: needs better naming distinction, as some amalgamation of
21887 + * "caam" and nprop->full_name. The OF name isn't distinctive,
21888 + * but does separate instances
21889 + */
21890 + perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
21891 +
21892 + ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
21893 + ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
21894 +#endif
21895 ring = 0;
21896 - ctrlpriv->total_jobrs = 0;
21897 for_each_available_child_of_node(nprop, np)
21898 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
21899 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
21900 - ctrlpriv->jrpdev[ring] =
21901 - of_platform_device_create(np, NULL, dev);
21902 - if (!ctrlpriv->jrpdev[ring]) {
21903 - pr_warn("JR%d Platform device creation error\n",
21904 - ring);
21905 - continue;
21906 - }
21907 - ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
21908 - ((uint8_t *)ctrl +
21909 + ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
21910 + ((__force uint8_t *)ctrl +
21911 (ring + JR_BLOCK_NUMBER) *
21912 BLOCK_OFFSET
21913 );
21914 ctrlpriv->total_jobrs++;
21915 ring++;
21916 - }
21917 + }
21918
21919 - /* Check to see if QI present. If so, enable */
21920 - ctrlpriv->qi_present =
21921 - !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
21922 - CTPR_MS_QI_MASK);
21923 - if (ctrlpriv->qi_present) {
21924 - ctrlpriv->qi = (struct caam_queue_if __force *)
21925 - ((uint8_t *)ctrl +
21926 + /* Check to see if (DPAA 1.x) QI present. If so, enable */
21927 + ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
21928 + if (ctrlpriv->qi_present && !caam_dpaa2) {
21929 + ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
21930 + ((__force uint8_t *)ctrl +
21931 BLOCK_OFFSET * QI_BLOCK_NUMBER
21932 );
21933 /* This is all that's required to physically enable QI */
21934 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
21935 +
21936 + /* If QMAN driver is present, init CAAM-QI backend */
21937 +#ifdef CONFIG_CAAM_QI
21938 + ret = caam_qi_init(pdev);
21939 + if (ret)
21940 + dev_err(dev, "caam qi i/f init failed: %d\n", ret);
21941 +#endif
21942 }
21943
21944 /* If no QI and no rings specified, quit and go home */
21945 @@ -662,8 +672,10 @@ static int caam_probe(struct platform_device *pdev)
21946 /*
21947 * If SEC has RNG version >= 4 and RNG state handle has not been
21948 * already instantiated, do RNG instantiation
21949 + * In case of DPAA 2.x, RNG is managed by MC firmware.
21950 */
21951 - if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
21952 + if (!caam_dpaa2 &&
21953 + (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
21954 ctrlpriv->rng4_sh_init =
21955 rd_reg32(&ctrl->r4tst[0].rdsta);
21956 /*
21957 @@ -731,77 +743,46 @@ static int caam_probe(struct platform_device *pdev)
21958 /* Report "alive" for developer to see */
21959 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
21960 caam_get_era());
21961 - dev_info(dev, "job rings = %d, qi = %d\n",
21962 - ctrlpriv->total_jobrs, ctrlpriv->qi_present);
21963 + dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
21964 + ctrlpriv->total_jobrs, ctrlpriv->qi_present,
21965 + caam_dpaa2 ? "yes" : "no");
21966
21967 #ifdef CONFIG_DEBUG_FS
21968 - /*
21969 - * FIXME: needs better naming distinction, as some amalgamation of
21970 - * "caam" and nprop->full_name. The OF name isn't distinctive,
21971 - * but does separate instances
21972 - */
21973 - perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
21974 -
21975 - ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
21976 - ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
21977 -
21978 - /* Controller-level - performance monitor counters */
21979 -
21980 - ctrlpriv->ctl_rq_dequeued =
21981 - debugfs_create_file("rq_dequeued",
21982 - S_IRUSR | S_IRGRP | S_IROTH,
21983 - ctrlpriv->ctl, &perfmon->req_dequeued,
21984 - &caam_fops_u64_ro);
21985 - ctrlpriv->ctl_ob_enc_req =
21986 - debugfs_create_file("ob_rq_encrypted",
21987 - S_IRUSR | S_IRGRP | S_IROTH,
21988 - ctrlpriv->ctl, &perfmon->ob_enc_req,
21989 - &caam_fops_u64_ro);
21990 - ctrlpriv->ctl_ib_dec_req =
21991 - debugfs_create_file("ib_rq_decrypted",
21992 - S_IRUSR | S_IRGRP | S_IROTH,
21993 - ctrlpriv->ctl, &perfmon->ib_dec_req,
21994 - &caam_fops_u64_ro);
21995 - ctrlpriv->ctl_ob_enc_bytes =
21996 - debugfs_create_file("ob_bytes_encrypted",
21997 - S_IRUSR | S_IRGRP | S_IROTH,
21998 - ctrlpriv->ctl, &perfmon->ob_enc_bytes,
21999 - &caam_fops_u64_ro);
22000 - ctrlpriv->ctl_ob_prot_bytes =
22001 - debugfs_create_file("ob_bytes_protected",
22002 - S_IRUSR | S_IRGRP | S_IROTH,
22003 - ctrlpriv->ctl, &perfmon->ob_prot_bytes,
22004 - &caam_fops_u64_ro);
22005 - ctrlpriv->ctl_ib_dec_bytes =
22006 - debugfs_create_file("ib_bytes_decrypted",
22007 - S_IRUSR | S_IRGRP | S_IROTH,
22008 - ctrlpriv->ctl, &perfmon->ib_dec_bytes,
22009 - &caam_fops_u64_ro);
22010 - ctrlpriv->ctl_ib_valid_bytes =
22011 - debugfs_create_file("ib_bytes_validated",
22012 - S_IRUSR | S_IRGRP | S_IROTH,
22013 - ctrlpriv->ctl, &perfmon->ib_valid_bytes,
22014 - &caam_fops_u64_ro);
22015 + debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
22016 + ctrlpriv->ctl, &perfmon->req_dequeued,
22017 + &caam_fops_u64_ro);
22018 + debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
22019 + ctrlpriv->ctl, &perfmon->ob_enc_req,
22020 + &caam_fops_u64_ro);
22021 + debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
22022 + ctrlpriv->ctl, &perfmon->ib_dec_req,
22023 + &caam_fops_u64_ro);
22024 + debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
22025 + ctrlpriv->ctl, &perfmon->ob_enc_bytes,
22026 + &caam_fops_u64_ro);
22027 + debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
22028 + ctrlpriv->ctl, &perfmon->ob_prot_bytes,
22029 + &caam_fops_u64_ro);
22030 + debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
22031 + ctrlpriv->ctl, &perfmon->ib_dec_bytes,
22032 + &caam_fops_u64_ro);
22033 + debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
22034 + ctrlpriv->ctl, &perfmon->ib_valid_bytes,
22035 + &caam_fops_u64_ro);
22036
22037 /* Controller level - global status values */
22038 - ctrlpriv->ctl_faultaddr =
22039 - debugfs_create_file("fault_addr",
22040 - S_IRUSR | S_IRGRP | S_IROTH,
22041 - ctrlpriv->ctl, &perfmon->faultaddr,
22042 - &caam_fops_u32_ro);
22043 - ctrlpriv->ctl_faultdetail =
22044 - debugfs_create_file("fault_detail",
22045 - S_IRUSR | S_IRGRP | S_IROTH,
22046 - ctrlpriv->ctl, &perfmon->faultdetail,
22047 - &caam_fops_u32_ro);
22048 - ctrlpriv->ctl_faultstatus =
22049 - debugfs_create_file("fault_status",
22050 - S_IRUSR | S_IRGRP | S_IROTH,
22051 - ctrlpriv->ctl, &perfmon->status,
22052 - &caam_fops_u32_ro);
22053 + debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
22054 + ctrlpriv->ctl, &perfmon->faultaddr,
22055 + &caam_fops_u32_ro);
22056 + debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
22057 + ctrlpriv->ctl, &perfmon->faultdetail,
22058 + &caam_fops_u32_ro);
22059 + debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
22060 + ctrlpriv->ctl, &perfmon->status,
22061 + &caam_fops_u32_ro);
22062
22063 /* Internal covering keys (useful in non-secure mode only) */
22064 - ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
22065 + ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
22066 ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
22067 ctrlpriv->ctl_kek = debugfs_create_blob("kek",
22068 S_IRUSR |
22069 @@ -809,7 +790,7 @@ static int caam_probe(struct platform_device *pdev)
22070 ctrlpriv->ctl,
22071 &ctrlpriv->ctl_kek_wrap);
22072
22073 - ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
22074 + ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
22075 ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
22076 ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
22077 S_IRUSR |
22078 @@ -817,7 +798,7 @@ static int caam_probe(struct platform_device *pdev)
22079 ctrlpriv->ctl,
22080 &ctrlpriv->ctl_tkek_wrap);
22081
22082 - ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
22083 + ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
22084 ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
22085 ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
22086 S_IRUSR |
22087 @@ -828,13 +809,17 @@ static int caam_probe(struct platform_device *pdev)
22088 return 0;
22089
22090 caam_remove:
22091 +#ifdef CONFIG_DEBUG_FS
22092 + debugfs_remove_recursive(ctrlpriv->dfs_root);
22093 +#endif
22094 caam_remove(pdev);
22095 return ret;
22096
22097 iounmap_ctrl:
22098 iounmap(ctrl);
22099 disable_caam_emi_slow:
22100 - clk_disable_unprepare(ctrlpriv->caam_emi_slow);
22101 + if (ctrlpriv->caam_emi_slow)
22102 + clk_disable_unprepare(ctrlpriv->caam_emi_slow);
22103 disable_caam_aclk:
22104 clk_disable_unprepare(ctrlpriv->caam_aclk);
22105 disable_caam_mem:
22106 @@ -844,17 +829,6 @@ static int caam_probe(struct platform_device *pdev)
22107 return ret;
22108 }
22109
22110 -static struct of_device_id caam_match[] = {
22111 - {
22112 - .compatible = "fsl,sec-v4.0",
22113 - },
22114 - {
22115 - .compatible = "fsl,sec4.0",
22116 - },
22117 - {},
22118 -};
22119 -MODULE_DEVICE_TABLE(of, caam_match);
22120 -
22121 static struct platform_driver caam_driver = {
22122 .driver = {
22123 .name = "caam",
22124 diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h
22125 index cac5402a..7e7bf68c 100644
22126 --- a/drivers/crypto/caam/ctrl.h
22127 +++ b/drivers/crypto/caam/ctrl.h
22128 @@ -10,4 +10,6 @@
22129 /* Prototypes for backend-level services exposed to APIs */
22130 int caam_get_era(void);
22131
22132 +extern bool caam_dpaa2;
22133 +
22134 #endif /* CTRL_H */
22135 diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
22136 index 513b6646..6ec6f8c3 100644
22137 --- a/drivers/crypto/caam/desc.h
22138 +++ b/drivers/crypto/caam/desc.h
22139 @@ -22,12 +22,6 @@
22140 #define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
22141 #define SEC4_SG_OFFSET_MASK 0x00001fff
22142
22143 -struct sec4_sg_entry {
22144 - u64 ptr;
22145 - u32 len;
22146 - u32 bpid_offset;
22147 -};
22148 -
22149 /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
22150 #define MAX_CAAM_DESCSIZE 64
22151
22152 @@ -47,6 +41,7 @@ struct sec4_sg_entry {
22153 #define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
22154 #define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
22155 #define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
22156 +#define CMD_MOVEB (0x07 << CMD_SHIFT)
22157 #define CMD_STORE (0x0a << CMD_SHIFT)
22158 #define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
22159 #define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
22160 @@ -90,8 +85,8 @@ struct sec4_sg_entry {
22161 #define HDR_ZRO 0x00008000
22162
22163 /* Start Index or SharedDesc Length */
22164 -#define HDR_START_IDX_MASK 0x3f
22165 #define HDR_START_IDX_SHIFT 16
22166 +#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
22167
22168 /* If shared descriptor header, 6-bit length */
22169 #define HDR_DESCLEN_SHR_MASK 0x3f
22170 @@ -121,10 +116,10 @@ struct sec4_sg_entry {
22171 #define HDR_PROP_DNR 0x00000800
22172
22173 /* JobDesc/SharedDesc share property */
22174 -#define HDR_SD_SHARE_MASK 0x03
22175 #define HDR_SD_SHARE_SHIFT 8
22176 -#define HDR_JD_SHARE_MASK 0x07
22177 +#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
22178 #define HDR_JD_SHARE_SHIFT 8
22179 +#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
22180
22181 #define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
22182 #define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
22183 @@ -235,7 +230,7 @@ struct sec4_sg_entry {
22184 #define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
22185 #define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
22186 #define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
22187 -#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
22188 +#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
22189 #define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
22190 #define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
22191 #define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
22192 @@ -400,7 +395,7 @@ struct sec4_sg_entry {
22193 #define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
22194 #define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
22195 #define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
22196 -#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
22197 +#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
22198 #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
22199 #define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
22200 #define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
22201 @@ -1107,8 +1102,8 @@ struct sec4_sg_entry {
22202 /* For non-protocol/alg-only op commands */
22203 #define OP_ALG_TYPE_SHIFT 24
22204 #define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
22205 -#define OP_ALG_TYPE_CLASS1 2
22206 -#define OP_ALG_TYPE_CLASS2 4
22207 +#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
22208 +#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
22209
22210 #define OP_ALG_ALGSEL_SHIFT 16
22211 #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
22212 @@ -1249,7 +1244,7 @@ struct sec4_sg_entry {
22213 #define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
22214
22215 /* PKHA mode copy-memory functions */
22216 -#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
22217 +#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
22218 #define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
22219 #define OP_ALG_PKMODE_DST_REG_SHIFT 10
22220 #define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
22221 @@ -1445,7 +1440,7 @@ struct sec4_sg_entry {
22222 #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
22223 #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
22224 #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
22225 -#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
22226 +#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
22227 #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
22228 #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
22229 #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
22230 @@ -1629,4 +1624,31 @@ struct sec4_sg_entry {
22231 /* Frame Descriptor Command for Replacement Job Descriptor */
22232 #define FD_CMD_REPLACE_JOB_DESC 0x20000000
22233
22234 +/* CHA Control Register bits */
22235 +#define CCTRL_RESET_CHA_ALL 0x1
22236 +#define CCTRL_RESET_CHA_AESA 0x2
22237 +#define CCTRL_RESET_CHA_DESA 0x4
22238 +#define CCTRL_RESET_CHA_AFHA 0x8
22239 +#define CCTRL_RESET_CHA_KFHA 0x10
22240 +#define CCTRL_RESET_CHA_SF8A 0x20
22241 +#define CCTRL_RESET_CHA_PKHA 0x40
22242 +#define CCTRL_RESET_CHA_MDHA 0x80
22243 +#define CCTRL_RESET_CHA_CRCA 0x100
22244 +#define CCTRL_RESET_CHA_RNG 0x200
22245 +#define CCTRL_RESET_CHA_SF9A 0x400
22246 +#define CCTRL_RESET_CHA_ZUCE 0x800
22247 +#define CCTRL_RESET_CHA_ZUCA 0x1000
22248 +#define CCTRL_UNLOAD_PK_A0 0x10000
22249 +#define CCTRL_UNLOAD_PK_A1 0x20000
22250 +#define CCTRL_UNLOAD_PK_A2 0x40000
22251 +#define CCTRL_UNLOAD_PK_A3 0x80000
22252 +#define CCTRL_UNLOAD_PK_B0 0x100000
22253 +#define CCTRL_UNLOAD_PK_B1 0x200000
22254 +#define CCTRL_UNLOAD_PK_B2 0x400000
22255 +#define CCTRL_UNLOAD_PK_B3 0x800000
22256 +#define CCTRL_UNLOAD_PK_N 0x1000000
22257 +#define CCTRL_UNLOAD_PK_A 0x4000000
22258 +#define CCTRL_UNLOAD_PK_B 0x8000000
22259 +#define CCTRL_UNLOAD_SBOX 0x10000000
22260 +
22261 #endif /* DESC_H */
22262 diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
22263 index a8cd8a78..2d9dbeca 100644
22264 --- a/drivers/crypto/caam/desc_constr.h
22265 +++ b/drivers/crypto/caam/desc_constr.h
22266 @@ -4,6 +4,9 @@
22267 * Copyright 2008-2012 Freescale Semiconductor, Inc.
22268 */
22269
22270 +#ifndef DESC_CONSTR_H
22271 +#define DESC_CONSTR_H
22272 +
22273 #include "desc.h"
22274 #include "regs.h"
22275
22276 @@ -33,38 +36,39 @@
22277
22278 extern bool caam_little_end;
22279
22280 -static inline int desc_len(u32 *desc)
22281 +static inline int desc_len(u32 * const desc)
22282 {
22283 return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
22284 }
22285
22286 -static inline int desc_bytes(void *desc)
22287 +static inline int desc_bytes(void * const desc)
22288 {
22289 return desc_len(desc) * CAAM_CMD_SZ;
22290 }
22291
22292 -static inline u32 *desc_end(u32 *desc)
22293 +static inline u32 *desc_end(u32 * const desc)
22294 {
22295 return desc + desc_len(desc);
22296 }
22297
22298 -static inline void *sh_desc_pdb(u32 *desc)
22299 +static inline void *sh_desc_pdb(u32 * const desc)
22300 {
22301 return desc + 1;
22302 }
22303
22304 -static inline void init_desc(u32 *desc, u32 options)
22305 +static inline void init_desc(u32 * const desc, u32 options)
22306 {
22307 *desc = cpu_to_caam32((options | HDR_ONE) + 1);
22308 }
22309
22310 -static inline void init_sh_desc(u32 *desc, u32 options)
22311 +static inline void init_sh_desc(u32 * const desc, u32 options)
22312 {
22313 PRINT_POS;
22314 init_desc(desc, CMD_SHARED_DESC_HDR | options);
22315 }
22316
22317 -static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
22318 +static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
22319 + size_t pdb_bytes)
22320 {
22321 u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
22322
22323 @@ -72,19 +76,20 @@ static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
22324 options);
22325 }
22326
22327 -static inline void init_job_desc(u32 *desc, u32 options)
22328 +static inline void init_job_desc(u32 * const desc, u32 options)
22329 {
22330 init_desc(desc, CMD_DESC_HDR | options);
22331 }
22332
22333 -static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
22334 +static inline void init_job_desc_pdb(u32 * const desc, u32 options,
22335 + size_t pdb_bytes)
22336 {
22337 u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
22338
22339 init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
22340 }
22341
22342 -static inline void append_ptr(u32 *desc, dma_addr_t ptr)
22343 +static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
22344 {
22345 dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
22346
22347 @@ -94,8 +99,8 @@ static inline void append_ptr(u32 *desc, dma_addr_t ptr)
22348 CAAM_PTR_SZ / CAAM_CMD_SZ);
22349 }
22350
22351 -static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
22352 - u32 options)
22353 +static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
22354 + int len, u32 options)
22355 {
22356 PRINT_POS;
22357 init_job_desc(desc, HDR_SHARED | options |
22358 @@ -103,7 +108,7 @@ static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
22359 append_ptr(desc, ptr);
22360 }
22361
22362 -static inline void append_data(u32 *desc, void *data, int len)
22363 +static inline void append_data(u32 * const desc, void *data, int len)
22364 {
22365 u32 *offset = desc_end(desc);
22366
22367 @@ -114,7 +119,7 @@ static inline void append_data(u32 *desc, void *data, int len)
22368 (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
22369 }
22370
22371 -static inline void append_cmd(u32 *desc, u32 command)
22372 +static inline void append_cmd(u32 * const desc, u32 command)
22373 {
22374 u32 *cmd = desc_end(desc);
22375
22376 @@ -125,7 +130,7 @@ static inline void append_cmd(u32 *desc, u32 command)
22377
22378 #define append_u32 append_cmd
22379
22380 -static inline void append_u64(u32 *desc, u64 data)
22381 +static inline void append_u64(u32 * const desc, u64 data)
22382 {
22383 u32 *offset = desc_end(desc);
22384
22385 @@ -142,14 +147,14 @@ static inline void append_u64(u32 *desc, u64 data)
22386 }
22387
22388 /* Write command without affecting header, and return pointer to next word */
22389 -static inline u32 *write_cmd(u32 *desc, u32 command)
22390 +static inline u32 *write_cmd(u32 * const desc, u32 command)
22391 {
22392 *desc = cpu_to_caam32(command);
22393
22394 return desc + 1;
22395 }
22396
22397 -static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
22398 +static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
22399 u32 command)
22400 {
22401 append_cmd(desc, command | len);
22402 @@ -157,7 +162,7 @@ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
22403 }
22404
22405 /* Write length after pointer, rather than inside command */
22406 -static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
22407 +static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
22408 unsigned int len, u32 command)
22409 {
22410 append_cmd(desc, command);
22411 @@ -166,7 +171,7 @@ static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
22412 append_cmd(desc, len);
22413 }
22414
22415 -static inline void append_cmd_data(u32 *desc, void *data, int len,
22416 +static inline void append_cmd_data(u32 * const desc, void *data, int len,
22417 u32 command)
22418 {
22419 append_cmd(desc, command | IMMEDIATE | len);
22420 @@ -174,7 +179,7 @@ static inline void append_cmd_data(u32 *desc, void *data, int len,
22421 }
22422
22423 #define APPEND_CMD_RET(cmd, op) \
22424 -static inline u32 *append_##cmd(u32 *desc, u32 options) \
22425 +static inline u32 *append_##cmd(u32 * const desc, u32 options) \
22426 { \
22427 u32 *cmd = desc_end(desc); \
22428 PRINT_POS; \
22429 @@ -183,14 +188,15 @@ static inline u32 *append_##cmd(u32 *desc, u32 options) \
22430 }
22431 APPEND_CMD_RET(jump, JUMP)
22432 APPEND_CMD_RET(move, MOVE)
22433 +APPEND_CMD_RET(moveb, MOVEB)
22434
22435 -static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
22436 +static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
22437 {
22438 *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
22439 (desc_len(desc) - (jump_cmd - desc)));
22440 }
22441
22442 -static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
22443 +static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
22444 {
22445 u32 val = caam32_to_cpu(*move_cmd);
22446
22447 @@ -200,7 +206,7 @@ static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
22448 }
22449
22450 #define APPEND_CMD(cmd, op) \
22451 -static inline void append_##cmd(u32 *desc, u32 options) \
22452 +static inline void append_##cmd(u32 * const desc, u32 options) \
22453 { \
22454 PRINT_POS; \
22455 append_cmd(desc, CMD_##op | options); \
22456 @@ -208,7 +214,8 @@ static inline void append_##cmd(u32 *desc, u32 options) \
22457 APPEND_CMD(operation, OPERATION)
22458
22459 #define APPEND_CMD_LEN(cmd, op) \
22460 -static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
22461 +static inline void append_##cmd(u32 * const desc, unsigned int len, \
22462 + u32 options) \
22463 { \
22464 PRINT_POS; \
22465 append_cmd(desc, CMD_##op | len | options); \
22466 @@ -220,8 +227,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
22467 APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
22468
22469 #define APPEND_CMD_PTR(cmd, op) \
22470 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
22471 - u32 options) \
22472 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
22473 + unsigned int len, u32 options) \
22474 { \
22475 PRINT_POS; \
22476 append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
22477 @@ -231,8 +238,8 @@ APPEND_CMD_PTR(load, LOAD)
22478 APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
22479 APPEND_CMD_PTR(fifo_store, FIFO_STORE)
22480
22481 -static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
22482 - u32 options)
22483 +static inline void append_store(u32 * const desc, dma_addr_t ptr,
22484 + unsigned int len, u32 options)
22485 {
22486 u32 cmd_src;
22487
22488 @@ -249,7 +256,8 @@ static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
22489 }
22490
22491 #define APPEND_SEQ_PTR_INTLEN(cmd, op) \
22492 -static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
22493 +static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
22494 + dma_addr_t ptr, \
22495 unsigned int len, \
22496 u32 options) \
22497 { \
22498 @@ -263,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
22499 APPEND_SEQ_PTR_INTLEN(out, OUT)
22500
22501 #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
22502 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
22503 +static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
22504 unsigned int len, u32 options) \
22505 { \
22506 PRINT_POS; \
22507 @@ -273,7 +281,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
22508 APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
22509
22510 #define APPEND_CMD_PTR_EXTLEN(cmd, op) \
22511 -static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
22512 +static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
22513 unsigned int len, u32 options) \
22514 { \
22515 PRINT_POS; \
22516 @@ -287,7 +295,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
22517 * the size of its type
22518 */
22519 #define APPEND_CMD_PTR_LEN(cmd, op, type) \
22520 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
22521 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
22522 type len, u32 options) \
22523 { \
22524 PRINT_POS; \
22525 @@ -304,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
22526 * from length of immediate data provided, e.g., split keys
22527 */
22528 #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
22529 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
22530 +static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
22531 unsigned int data_len, \
22532 unsigned int len, u32 options) \
22533 { \
22534 @@ -315,7 +323,7 @@ static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
22535 APPEND_CMD_PTR_TO_IMM2(key, KEY);
22536
22537 #define APPEND_CMD_RAW_IMM(cmd, op, type) \
22538 -static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
22539 +static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
22540 u32 options) \
22541 { \
22542 PRINT_POS; \
22543 @@ -426,3 +434,66 @@ do { \
22544 APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
22545 #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
22546 APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
22547 +
22548 +/**
22549 + * struct alginfo - Container for algorithm details
22550 + * @algtype: algorithm selector; for valid values, see documentation of the
22551 + * functions where it is used.
22552 + * @keylen: length of the provided algorithm key, in bytes
22553 + * @keylen_pad: padded length of the provided algorithm key, in bytes
22554 + * @key: address where algorithm key resides; virtual address if key_inline
22555 + * is true, dma (bus) address if key_inline is false.
22556 + * @key_inline: true - key can be inlined in the descriptor; false - key is
22557 + * referenced by the descriptor
22558 + */
22559 +struct alginfo {
22560 + u32 algtype;
22561 + unsigned int keylen;
22562 + unsigned int keylen_pad;
22563 + union {
22564 + dma_addr_t key_dma;
22565 + void *key_virt;
22566 + };
22567 + bool key_inline;
22568 +};
22569 +
22570 +/**
22571 + * desc_inline_query() - Provide indications on which data items can be inlined
22572 + * and which shall be referenced in a shared descriptor.
22573 + * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
22574 + * excluding the data items to be inlined (or corresponding
22575 + * pointer if an item is not inlined). Each cnstr_* function that
22576 + * generates descriptors should have a define mentioning
22577 + * corresponding length.
22578 + * @jd_len: Maximum length of the job descriptor(s) that will be used
22579 + * together with the shared descriptor.
22580 + * @data_len: Array of lengths of the data items trying to be inlined
22581 + * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
22582 + * otherwise.
22583 + * @count: Number of data items (size of @data_len array); must be <= 32
22584 + *
22585 + * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
22586 + * check @inl_mask for details.
22587 + */
22588 +static inline int desc_inline_query(unsigned int sd_base_len,
22589 + unsigned int jd_len, unsigned int *data_len,
22590 + u32 *inl_mask, unsigned int count)
22591 +{
22592 + int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
22593 + unsigned int i;
22594 +
22595 + *inl_mask = 0;
22596 + for (i = 0; (i < count) && (rem_bytes > 0); i++) {
22597 + if (rem_bytes - (int)(data_len[i] +
22598 + (count - i - 1) * CAAM_PTR_SZ) >= 0) {
22599 + rem_bytes -= data_len[i];
22600 + *inl_mask |= (1 << i);
22601 + } else {
22602 + rem_bytes -= CAAM_PTR_SZ;
22603 + }
22604 + }
22605 +
22606 + return (rem_bytes >= 0) ? 0 : -1;
22607 +}
22608 +
22609 +#endif /* DESC_CONSTR_H */
22610 diff --git a/drivers/crypto/caam/dpseci.c b/drivers/crypto/caam/dpseci.c
22611 new file mode 100644
22612 index 00000000..410cd790
22613 --- /dev/null
22614 +++ b/drivers/crypto/caam/dpseci.c
22615 @@ -0,0 +1,859 @@
22616 +/*
22617 + * Copyright 2013-2016 Freescale Semiconductor Inc.
22618 + * Copyright 2017 NXP
22619 + *
22620 + * Redistribution and use in source and binary forms, with or without
22621 + * modification, are permitted provided that the following conditions are met:
22622 + * * Redistributions of source code must retain the above copyright
22623 + * notice, this list of conditions and the following disclaimer.
22624 + * * Redistributions in binary form must reproduce the above copyright
22625 + * notice, this list of conditions and the following disclaimer in the
22626 + * documentation and/or other materials provided with the distribution.
22627 + * * Neither the names of the above-listed copyright holders nor the
22628 + * names of any contributors may be used to endorse or promote products
22629 + * derived from this software without specific prior written permission.
22630 + *
22631 + *
22632 + * ALTERNATIVELY, this software may be distributed under the terms of the
22633 + * GNU General Public License ("GPL") as published by the Free Software
22634 + * Foundation, either version 2 of that License or (at your option) any
22635 + * later version.
22636 + *
22637 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22638 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22639 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22640 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22641 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22642 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22643 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22644 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22645 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22646 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22647 + * POSSIBILITY OF SUCH DAMAGE.
22648 + */
22649 +
22650 +#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
22651 +#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
22652 +#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
22653 +#include "dpseci.h"
22654 +#include "dpseci_cmd.h"
22655 +
22656 +/**
22657 + * dpseci_open() - Open a control session for the specified object
22658 + * @mc_io: Pointer to MC portal's I/O object
22659 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22660 + * @dpseci_id: DPSECI unique ID
22661 + * @token: Returned token; use in subsequent API calls
22662 + *
22663 + * This function can be used to open a control session for an already created
22664 + * object; an object may have been declared in the DPL or by calling the
22665 + * dpseci_create() function.
22666 + * This function returns a unique authentication token, associated with the
22667 + * specific object ID and the specific MC portal; this token must be used in all
22668 + * subsequent commands for this specific object.
22669 + *
22670 + * Return: '0' on success, error code otherwise
22671 + */
22672 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
22673 + u16 *token)
22674 +{
22675 + struct mc_command cmd = { 0 };
22676 + struct dpseci_cmd_open *cmd_params;
22677 + int err;
22678 +
22679 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
22680 + cmd_flags,
22681 + 0);
22682 + cmd_params = (struct dpseci_cmd_open *)cmd.params;
22683 + cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
22684 + err = mc_send_command(mc_io, &cmd);
22685 + if (err)
22686 + return err;
22687 +
22688 + *token = mc_cmd_hdr_read_token(&cmd);
22689 +
22690 + return 0;
22691 +}
22692 +
22693 +/**
22694 + * dpseci_close() - Close the control session of the object
22695 + * @mc_io: Pointer to MC portal's I/O object
22696 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22697 + * @token: Token of DPSECI object
22698 + *
22699 + * After this function is called, no further operations are allowed on the
22700 + * object without opening a new control session.
22701 + *
22702 + * Return: '0' on success, error code otherwise
22703 + */
22704 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22705 +{
22706 + struct mc_command cmd = { 0 };
22707 +
22708 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
22709 + cmd_flags,
22710 + token);
22711 + return mc_send_command(mc_io, &cmd);
22712 +}
22713 +
22714 +/**
22715 + * dpseci_create() - Create the DPSECI object
22716 + * @mc_io: Pointer to MC portal's I/O object
22717 + * @dprc_token: Parent container token; '0' for default container
22718 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22719 + * @cfg: Configuration structure
22720 + * @obj_id: returned object id
22721 + *
22722 + * Create the DPSECI object, allocate required resources and perform required
22723 + * initialization.
22724 + *
22725 + * The object can be created either by declaring it in the DPL file, or by
22726 + * calling this function.
22727 + *
22728 + * The function accepts an authentication token of a parent container that this
22729 + * object should be assigned to. The token can be '0' so the object will be
22730 + * assigned to the default container.
22731 + * The newly created object can be opened with the returned object id and using
22732 + * the container's associated tokens and MC portals.
22733 + *
22734 + * Return: '0' on success, error code otherwise
22735 + */
22736 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
22737 + const struct dpseci_cfg *cfg, u32 *obj_id)
22738 +{
22739 + struct mc_command cmd = { 0 };
22740 + struct dpseci_cmd_create *cmd_params;
22741 + int i, err;
22742 +
22743 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
22744 + cmd_flags,
22745 + dprc_token);
22746 + cmd_params = (struct dpseci_cmd_create *)cmd.params;
22747 + for (i = 0; i < 8; i++)
22748 + cmd_params->priorities[i] = cfg->priorities[i];
22749 + cmd_params->num_tx_queues = cfg->num_tx_queues;
22750 + cmd_params->num_rx_queues = cfg->num_rx_queues;
22751 + cmd_params->options = cpu_to_le32(cfg->options);
22752 + err = mc_send_command(mc_io, &cmd);
22753 + if (err)
22754 + return err;
22755 +
22756 + *obj_id = mc_cmd_read_object_id(&cmd);
22757 +
22758 + return 0;
22759 +}
22760 +
22761 +/**
22762 + * dpseci_destroy() - Destroy the DPSECI object and release all its resources
22763 + * @mc_io: Pointer to MC portal's I/O object
22764 + * @dprc_token: Parent container token; '0' for default container
22765 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22766 + * @object_id: The object id; it must be a valid id within the container that
22767 + * created this object
22768 + *
22769 + * The function accepts the authentication token of the parent container that
22770 + * created the object (not the one that currently owns the object). The object
22771 + * is searched within parent using the provided 'object_id'.
22772 + * All tokens to the object must be closed before calling destroy.
22773 + *
22774 + * Return: '0' on success, error code otherwise
22775 + */
22776 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
22777 + u32 object_id)
22778 +{
22779 + struct mc_command cmd = { 0 };
22780 + struct dpseci_cmd_destroy *cmd_params;
22781 +
22782 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
22783 + cmd_flags,
22784 + dprc_token);
22785 + cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
22786 + cmd_params->object_id = cpu_to_le32(object_id);
22787 +
22788 + return mc_send_command(mc_io, &cmd);
22789 +}
22790 +
22791 +/**
22792 + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
22793 + * @mc_io: Pointer to MC portal's I/O object
22794 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22795 + * @token: Token of DPSECI object
22796 + *
22797 + * Return: '0' on success, error code otherwise
22798 + */
22799 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22800 +{
22801 + struct mc_command cmd = { 0 };
22802 +
22803 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
22804 + cmd_flags,
22805 + token);
22806 + return mc_send_command(mc_io, &cmd);
22807 +}
22808 +
22809 +/**
22810 + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
22811 + * @mc_io: Pointer to MC portal's I/O object
22812 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22813 + * @token: Token of DPSECI object
22814 + *
22815 + * Return: '0' on success, error code otherwise
22816 + */
22817 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22818 +{
22819 + struct mc_command cmd = { 0 };
22820 +
22821 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
22822 + cmd_flags,
22823 + token);
22824 +
22825 + return mc_send_command(mc_io, &cmd);
22826 +}
22827 +
22828 +/**
22829 + * dpseci_is_enabled() - Check if the DPSECI is enabled.
22830 + * @mc_io: Pointer to MC portal's I/O object
22831 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22832 + * @token: Token of DPSECI object
22833 + * @en: Returns '1' if object is enabled; '0' otherwise
22834 + *
22835 + * Return: '0' on success, error code otherwise
22836 + */
22837 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22838 + int *en)
22839 +{
22840 + struct mc_command cmd = { 0 };
22841 + struct dpseci_rsp_is_enabled *rsp_params;
22842 + int err;
22843 +
22844 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
22845 + cmd_flags,
22846 + token);
22847 + err = mc_send_command(mc_io, &cmd);
22848 + if (err)
22849 + return err;
22850 +
22851 + rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
22852 + *en = le32_to_cpu(rsp_params->is_enabled);
22853 +
22854 + return 0;
22855 +}
22856 +
22857 +/**
22858 + * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
22859 + * @mc_io: Pointer to MC portal's I/O object
22860 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22861 + * @token: Token of DPSECI object
22862 + *
22863 + * Return: '0' on success, error code otherwise
22864 + */
22865 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22866 +{
22867 + struct mc_command cmd = { 0 };
22868 +
22869 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
22870 + cmd_flags,
22871 + token);
22872 +
22873 + return mc_send_command(mc_io, &cmd);
22874 +}
22875 +
22876 +/**
22877 + * dpseci_get_irq_enable() - Get overall interrupt state
22878 + * @mc_io: Pointer to MC portal's I/O object
22879 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22880 + * @token: Token of DPSECI object
22881 + * @irq_index: The interrupt index to configure
22882 + * @en: Returned Interrupt state - enable = 1, disable = 0
22883 + *
22884 + * Return: '0' on success, error code otherwise
22885 + */
22886 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22887 + u8 irq_index, u8 *en)
22888 +{
22889 + struct mc_command cmd = { 0 };
22890 + struct dpseci_cmd_irq_enable *cmd_params;
22891 + struct dpseci_rsp_get_irq_enable *rsp_params;
22892 + int err;
22893 +
22894 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
22895 + cmd_flags,
22896 + token);
22897 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
22898 + cmd_params->irq_index = irq_index;
22899 + err = mc_send_command(mc_io, &cmd);
22900 + if (err)
22901 + return err;
22902 +
22903 + rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
22904 + *en = rsp_params->enable_state;
22905 +
22906 + return 0;
22907 +}
22908 +
22909 +/**
22910 + * dpseci_set_irq_enable() - Set overall interrupt state.
22911 + * @mc_io: Pointer to MC portal's I/O object
22912 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22913 + * @token: Token of DPSECI object
22914 + * @irq_index: The interrupt index to configure
22915 + * @en: Interrupt state - enable = 1, disable = 0
22916 + *
22917 + * Allows GPP software to control when interrupts are generated.
22918 + * Each interrupt can have up to 32 causes. The enable/disable control's the
22919 + * overall interrupt state. If the interrupt is disabled no causes will cause
22920 + * an interrupt.
22921 + *
22922 + * Return: '0' on success, error code otherwise
22923 + */
22924 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22925 + u8 irq_index, u8 en)
22926 +{
22927 + struct mc_command cmd = { 0 };
22928 + struct dpseci_cmd_irq_enable *cmd_params;
22929 +
22930 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
22931 + cmd_flags,
22932 + token);
22933 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
22934 + cmd_params->irq_index = irq_index;
22935 + cmd_params->enable_state = en;
22936 +
22937 + return mc_send_command(mc_io, &cmd);
22938 +}
22939 +
22940 +/**
22941 + * dpseci_get_irq_mask() - Get interrupt mask.
22942 + * @mc_io: Pointer to MC portal's I/O object
22943 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22944 + * @token: Token of DPSECI object
22945 + * @irq_index: The interrupt index to configure
22946 + * @mask: Returned event mask to trigger interrupt
22947 + *
22948 + * Every interrupt can have up to 32 causes and the interrupt model supports
22949 + * masking/unmasking each cause independently.
22950 + *
22951 + * Return: '0' on success, error code otherwise
22952 + */
22953 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22954 + u8 irq_index, u32 *mask)
22955 +{
22956 + struct mc_command cmd = { 0 };
22957 + struct dpseci_cmd_irq_mask *cmd_params;
22958 + int err;
22959 +
22960 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
22961 + cmd_flags,
22962 + token);
22963 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
22964 + cmd_params->irq_index = irq_index;
22965 + err = mc_send_command(mc_io, &cmd);
22966 + if (err)
22967 + return err;
22968 +
22969 + *mask = le32_to_cpu(cmd_params->mask);
22970 +
22971 + return 0;
22972 +}
22973 +
22974 +/**
22975 + * dpseci_set_irq_mask() - Set interrupt mask.
22976 + * @mc_io: Pointer to MC portal's I/O object
22977 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22978 + * @token: Token of DPSECI object
22979 + * @irq_index: The interrupt index to configure
22980 + * @mask: event mask to trigger interrupt;
22981 + * each bit:
22982 + * 0 = ignore event
22983 + * 1 = consider event for asserting IRQ
22984 + *
22985 + * Every interrupt can have up to 32 causes and the interrupt model supports
22986 + * masking/unmasking each cause independently
22987 + *
22988 + * Return: '0' on success, error code otherwise
22989 + */
22990 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22991 + u8 irq_index, u32 mask)
22992 +{
22993 + struct mc_command cmd = { 0 };
22994 + struct dpseci_cmd_irq_mask *cmd_params;
22995 +
22996 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
22997 + cmd_flags,
22998 + token);
22999 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
23000 + cmd_params->mask = cpu_to_le32(mask);
23001 + cmd_params->irq_index = irq_index;
23002 +
23003 + return mc_send_command(mc_io, &cmd);
23004 +}
23005 +
23006 +/**
23007 + * dpseci_get_irq_status() - Get the current status of any pending interrupts
23008 + * @mc_io: Pointer to MC portal's I/O object
23009 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23010 + * @token: Token of DPSECI object
23011 + * @irq_index: The interrupt index to configure
23012 + * @status: Returned interrupts status - one bit per cause:
23013 + * 0 = no interrupt pending
23014 + * 1 = interrupt pending
23015 + *
23016 + * Return: '0' on success, error code otherwise
23017 + */
23018 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23019 + u8 irq_index, u32 *status)
23020 +{
23021 + struct mc_command cmd = { 0 };
23022 + struct dpseci_cmd_irq_status *cmd_params;
23023 + int err;
23024 +
23025 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
23026 + cmd_flags,
23027 + token);
23028 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
23029 + cmd_params->status = cpu_to_le32(*status);
23030 + cmd_params->irq_index = irq_index;
23031 + err = mc_send_command(mc_io, &cmd);
23032 + if (err)
23033 + return err;
23034 +
23035 + *status = le32_to_cpu(cmd_params->status);
23036 +
23037 + return 0;
23038 +}
23039 +
23040 +/**
23041 + * dpseci_clear_irq_status() - Clear a pending interrupt's status
23042 + * @mc_io: Pointer to MC portal's I/O object
23043 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23044 + * @token: Token of DPSECI object
23045 + * @irq_index: The interrupt index to configure
23046 + * @status: bits to clear (W1C) - one bit per cause:
23047 + * 0 = don't change
23048 + * 1 = clear status bit
23049 + *
23050 + * Return: '0' on success, error code otherwise
23051 + */
23052 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23053 + u8 irq_index, u32 status)
23054 +{
23055 + struct mc_command cmd = { 0 };
23056 + struct dpseci_cmd_irq_status *cmd_params;
23057 +
23058 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
23059 + cmd_flags,
23060 + token);
23061 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
23062 + cmd_params->status = cpu_to_le32(status);
23063 + cmd_params->irq_index = irq_index;
23064 +
23065 + return mc_send_command(mc_io, &cmd);
23066 +}
23067 +
23068 +/**
23069 + * dpseci_get_attributes() - Retrieve DPSECI attributes
23070 + * @mc_io: Pointer to MC portal's I/O object
23071 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23072 + * @token: Token of DPSECI object
23073 + * @attr: Returned object's attributes
23074 + *
23075 + * Return: '0' on success, error code otherwise
23076 + */
23077 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23078 + struct dpseci_attr *attr)
23079 +{
23080 + struct mc_command cmd = { 0 };
23081 + struct dpseci_rsp_get_attributes *rsp_params;
23082 + int err;
23083 +
23084 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
23085 + cmd_flags,
23086 + token);
23087 + err = mc_send_command(mc_io, &cmd);
23088 + if (err)
23089 + return err;
23090 +
23091 + rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
23092 + attr->id = le32_to_cpu(rsp_params->id);
23093 + attr->num_tx_queues = rsp_params->num_tx_queues;
23094 + attr->num_rx_queues = rsp_params->num_rx_queues;
23095 + attr->options = le32_to_cpu(rsp_params->options);
23096 +
23097 + return 0;
23098 +}
23099 +
23100 +/**
23101 + * dpseci_set_rx_queue() - Set Rx queue configuration
23102 + * @mc_io: Pointer to MC portal's I/O object
23103 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23104 + * @token: Token of DPSECI object
23105 + * @queue: Select the queue relative to number of priorities configured at
23106 + * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
23107 + * Rx queues identically.
23108 + * @cfg: Rx queue configuration
23109 + *
23110 + * Return: '0' on success, error code otherwise
23111 + */
23112 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23113 + u8 queue, const struct dpseci_rx_queue_cfg *cfg)
23114 +{
23115 + struct mc_command cmd = { 0 };
23116 + struct dpseci_cmd_queue *cmd_params;
23117 +
23118 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
23119 + cmd_flags,
23120 + token);
23121 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
23122 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
23123 + cmd_params->priority = cfg->dest_cfg.priority;
23124 + cmd_params->queue = queue;
23125 + cmd_params->dest_type = cfg->dest_cfg.dest_type;
23126 + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
23127 + cmd_params->options = cpu_to_le32(cfg->options);
23128 + cmd_params->order_preservation_en =
23129 + cpu_to_le32(cfg->order_preservation_en);
23130 +
23131 + return mc_send_command(mc_io, &cmd);
23132 +}
23133 +
23134 +/**
23135 + * dpseci_get_rx_queue() - Retrieve Rx queue attributes
23136 + * @mc_io: Pointer to MC portal's I/O object
23137 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23138 + * @token: Token of DPSECI object
23139 + * @queue: Select the queue relative to number of priorities configured at
23140 + * DPSECI creation
23141 + * @attr: Returned Rx queue attributes
23142 + *
23143 + * Return: '0' on success, error code otherwise
23144 + */
23145 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23146 + u8 queue, struct dpseci_rx_queue_attr *attr)
23147 +{
23148 + struct mc_command cmd = { 0 };
23149 + struct dpseci_cmd_queue *cmd_params;
23150 + int err;
23151 +
23152 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
23153 + cmd_flags,
23154 + token);
23155 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
23156 + cmd_params->queue = queue;
23157 + err = mc_send_command(mc_io, &cmd);
23158 + if (err)
23159 + return err;
23160 +
23161 + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
23162 + attr->dest_cfg.priority = cmd_params->priority;
23163 + attr->dest_cfg.dest_type = cmd_params->dest_type;
23164 + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
23165 + attr->fqid = le32_to_cpu(cmd_params->fqid);
23166 + attr->order_preservation_en =
23167 + le32_to_cpu(cmd_params->order_preservation_en);
23168 +
23169 + return 0;
23170 +}
23171 +
23172 +/**
23173 + * dpseci_get_tx_queue() - Retrieve Tx queue attributes
23174 + * @mc_io: Pointer to MC portal's I/O object
23175 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23176 + * @token: Token of DPSECI object
23177 + * @queue: Select the queue relative to number of priorities configured at
23178 + * DPSECI creation
23179 + * @attr: Returned Tx queue attributes
23180 + *
23181 + * Return: '0' on success, error code otherwise
23182 + */
23183 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23184 + u8 queue, struct dpseci_tx_queue_attr *attr)
23185 +{
23186 + struct mc_command cmd = { 0 };
23187 + struct dpseci_cmd_queue *cmd_params;
23188 + struct dpseci_rsp_get_tx_queue *rsp_params;
23189 + int err;
23190 +
23191 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
23192 + cmd_flags,
23193 + token);
23194 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
23195 + cmd_params->queue = queue;
23196 + err = mc_send_command(mc_io, &cmd);
23197 + if (err)
23198 + return err;
23199 +
23200 + rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
23201 + attr->fqid = le32_to_cpu(rsp_params->fqid);
23202 + attr->priority = rsp_params->priority;
23203 +
23204 + return 0;
23205 +}
23206 +
23207 +/**
23208 + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
23209 + * @mc_io: Pointer to MC portal's I/O object
23210 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23211 + * @token: Token of DPSECI object
23212 + * @attr: Returned SEC attributes
23213 + *
23214 + * Return: '0' on success, error code otherwise
23215 + */
23216 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23217 + struct dpseci_sec_attr *attr)
23218 +{
23219 + struct mc_command cmd = { 0 };
23220 + struct dpseci_rsp_get_sec_attr *rsp_params;
23221 + int err;
23222 +
23223 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
23224 + cmd_flags,
23225 + token);
23226 + err = mc_send_command(mc_io, &cmd);
23227 + if (err)
23228 + return err;
23229 +
23230 + rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
23231 + attr->ip_id = le16_to_cpu(rsp_params->ip_id);
23232 + attr->major_rev = rsp_params->major_rev;
23233 + attr->minor_rev = rsp_params->minor_rev;
23234 + attr->era = rsp_params->era;
23235 + attr->deco_num = rsp_params->deco_num;
23236 + attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
23237 + attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
23238 + attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
23239 + attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
23240 + attr->crc_acc_num = rsp_params->crc_acc_num;
23241 + attr->pk_acc_num = rsp_params->pk_acc_num;
23242 + attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
23243 + attr->rng_acc_num = rsp_params->rng_acc_num;
23244 + attr->md_acc_num = rsp_params->md_acc_num;
23245 + attr->arc4_acc_num = rsp_params->arc4_acc_num;
23246 + attr->des_acc_num = rsp_params->des_acc_num;
23247 + attr->aes_acc_num = rsp_params->aes_acc_num;
23248 +
23249 + return 0;
23250 +}
23251 +
23252 +/**
23253 + * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
23254 + * @mc_io: Pointer to MC portal's I/O object
23255 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23256 + * @token: Token of DPSECI object
23257 + * @counters: Returned SEC counters
23258 + *
23259 + * Return: '0' on success, error code otherwise
23260 + */
23261 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23262 + struct dpseci_sec_counters *counters)
23263 +{
23264 + struct mc_command cmd = { 0 };
23265 + struct dpseci_rsp_get_sec_counters *rsp_params;
23266 + int err;
23267 +
23268 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
23269 + cmd_flags,
23270 + token);
23271 + err = mc_send_command(mc_io, &cmd);
23272 + if (err)
23273 + return err;
23274 +
23275 + rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
23276 + counters->dequeued_requests =
23277 + le64_to_cpu(rsp_params->dequeued_requests);
23278 + counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
23279 + counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
23280 + counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
23281 + counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
23282 + counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
23283 + counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
23284 +
23285 + return 0;
23286 +}
23287 +
23288 +/**
23289 + * dpseci_get_api_version() - Get Data Path SEC Interface API version
23290 + * @mc_io: Pointer to MC portal's I/O object
23291 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23292 + * @major_ver: Major version of data path sec API
23293 + * @minor_ver: Minor version of data path sec API
23294 + *
23295 + * Return: '0' on success, error code otherwise
23296 + */
23297 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
23298 + u16 *major_ver, u16 *minor_ver)
23299 +{
23300 + struct mc_command cmd = { 0 };
23301 + struct dpseci_rsp_get_api_version *rsp_params;
23302 + int err;
23303 +
23304 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
23305 + cmd_flags, 0);
23306 + err = mc_send_command(mc_io, &cmd);
23307 + if (err)
23308 + return err;
23309 +
23310 + rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
23311 + *major_ver = le16_to_cpu(rsp_params->major);
23312 + *minor_ver = le16_to_cpu(rsp_params->minor);
23313 +
23314 + return 0;
23315 +}
23316 +
23317 +/**
23318 + * dpseci_set_opr() - Set Order Restoration configuration
23319 + * @mc_io: Pointer to MC portal's I/O object
23320 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23321 + * @token: Token of DPSECI object
23322 + * @index: The queue index
23323 + * @options: Configuration mode options; can be OPR_OPT_CREATE or
23324 + * OPR_OPT_RETIRE
23325 + * @cfg: Configuration options for the OPR
23326 + *
23327 + * Return: '0' on success, error code otherwise
23328 + */
23329 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23330 + u8 options, struct opr_cfg *cfg)
23331 +{
23332 + struct mc_command cmd = { 0 };
23333 + struct dpseci_cmd_opr *cmd_params;
23334 +
23335 + cmd.header = mc_encode_cmd_header(
23336 + DPSECI_CMDID_SET_OPR,
23337 + cmd_flags,
23338 + token);
23339 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
23340 + cmd_params->index = index;
23341 + cmd_params->options = options;
23342 + cmd_params->oloe = cfg->oloe;
23343 + cmd_params->oeane = cfg->oeane;
23344 + cmd_params->olws = cfg->olws;
23345 + cmd_params->oa = cfg->oa;
23346 + cmd_params->oprrws = cfg->oprrws;
23347 +
23348 + return mc_send_command(mc_io, &cmd);
23349 +}
23350 +
23351 +/**
23352 + * dpseci_get_opr() - Retrieve Order Restoration config and query
23353 + * @mc_io: Pointer to MC portal's I/O object
23354 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23355 + * @token: Token of DPSECI object
23356 + * @index: The queue index
23357 + * @cfg: Returned OPR configuration
23358 + * @qry: Returned OPR query
23359 + *
23360 + * Return: '0' on success, error code otherwise
23361 + */
23362 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23363 + struct opr_cfg *cfg, struct opr_qry *qry)
23364 +{
23365 + struct mc_command cmd = { 0 };
23366 + struct dpseci_cmd_opr *cmd_params;
23367 + struct dpseci_rsp_get_opr *rsp_params;
23368 + int err;
23369 +
23370 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
23371 + cmd_flags,
23372 + token);
23373 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
23374 + cmd_params->index = index;
23375 + err = mc_send_command(mc_io, &cmd);
23376 + if (err)
23377 + return err;
23378 +
23379 + rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
23380 + qry->rip = dpseci_get_field(rsp_params->rip_enable, OPR_RIP);
23381 + qry->enable = dpseci_get_field(rsp_params->rip_enable, OPR_ENABLE);
23382 + cfg->oloe = rsp_params->oloe;
23383 + cfg->oeane = rsp_params->oeane;
23384 + cfg->olws = rsp_params->olws;
23385 + cfg->oa = rsp_params->oa;
23386 + cfg->oprrws = rsp_params->oprrws;
23387 + qry->nesn = le16_to_cpu(rsp_params->nesn);
23388 + qry->ndsn = le16_to_cpu(rsp_params->ndsn);
23389 + qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
23390 + qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
23391 + qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
23392 + qry->hseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_HSEQ_NLIS);
23393 + qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
23394 + qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
23395 + qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
23396 + qry->opr_id = le16_to_cpu(rsp_params->opr_id);
23397 +
23398 + return 0;
23399 +}
23400 +
23401 +/**
23402 + * dpseci_set_congestion_notification() - Set congestion group
23403 + * notification configuration
23404 + * @mc_io: Pointer to MC portal's I/O object
23405 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23406 + * @token: Token of DPSECI object
23407 + * @cfg: congestion notification configuration
23408 + *
23409 + * Return: '0' on success, error code otherwise
23410 + */
23411 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23412 + u16 token, const struct dpseci_congestion_notification_cfg *cfg)
23413 +{
23414 + struct mc_command cmd = { 0 };
23415 + struct dpseci_cmd_congestion_notification *cmd_params;
23416 +
23417 + cmd.header = mc_encode_cmd_header(
23418 + DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
23419 + cmd_flags,
23420 + token);
23421 + cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
23422 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
23423 + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
23424 + cmd_params->priority = cfg->dest_cfg.priority;
23425 + dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
23426 + cfg->dest_cfg.dest_type);
23427 + dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
23428 + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
23429 + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
23430 + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
23431 + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
23432 +
23433 + return mc_send_command(mc_io, &cmd);
23434 +}
23435 +
23436 +/**
23437 + * dpseci_get_congestion_notification() - Get congestion group notification
23438 + * configuration
23439 + * @mc_io: Pointer to MC portal's I/O object
23440 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23441 + * @token: Token of DPSECI object
23442 + * @cfg: congestion notification configuration
23443 + *
23444 + * Return: '0' on success, error code otherwise
23445 + */
23446 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23447 + u16 token, struct dpseci_congestion_notification_cfg *cfg)
23448 +{
23449 + struct mc_command cmd = { 0 };
23450 + struct dpseci_cmd_congestion_notification *rsp_params;
23451 + int err;
23452 +
23453 + cmd.header = mc_encode_cmd_header(
23454 + DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
23455 + cmd_flags,
23456 + token);
23457 + err = mc_send_command(mc_io, &cmd);
23458 + if (err)
23459 + return err;
23460 +
23461 + rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
23462 + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
23463 + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
23464 + cfg->dest_cfg.priority = rsp_params->priority;
23465 + cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
23466 + CGN_DEST_TYPE);
23467 + cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
23468 + cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
23469 + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
23470 + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
23471 + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
23472 +
23473 + return 0;
23474 +}
23475 diff --git a/drivers/crypto/caam/dpseci.h b/drivers/crypto/caam/dpseci.h
23476 new file mode 100644
23477 index 00000000..d37489c6
23478 --- /dev/null
23479 +++ b/drivers/crypto/caam/dpseci.h
23480 @@ -0,0 +1,395 @@
23481 +/*
23482 + * Copyright 2013-2016 Freescale Semiconductor Inc.
23483 + * Copyright 2017 NXP
23484 + *
23485 + * Redistribution and use in source and binary forms, with or without
23486 + * modification, are permitted provided that the following conditions are met:
23487 + * * Redistributions of source code must retain the above copyright
23488 + * notice, this list of conditions and the following disclaimer.
23489 + * * Redistributions in binary form must reproduce the above copyright
23490 + * notice, this list of conditions and the following disclaimer in the
23491 + * documentation and/or other materials provided with the distribution.
23492 + * * Neither the names of the above-listed copyright holders nor the
23493 + * names of any contributors may be used to endorse or promote products
23494 + * derived from this software without specific prior written permission.
23495 + *
23496 + *
23497 + * ALTERNATIVELY, this software may be distributed under the terms of the
23498 + * GNU General Public License ("GPL") as published by the Free Software
23499 + * Foundation, either version 2 of that License or (at your option) any
23500 + * later version.
23501 + *
23502 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23503 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23504 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23505 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
23506 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23507 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23508 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23509 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23510 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23511 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23512 + * POSSIBILITY OF SUCH DAMAGE.
23513 + */
23514 +#ifndef _DPSECI_H_
23515 +#define _DPSECI_H_
23516 +
23517 +/*
23518 + * Data Path SEC Interface API
23519 + * Contains initialization APIs and runtime control APIs for DPSECI
23520 + */
23521 +
23522 +struct fsl_mc_io;
23523 +struct opr_cfg;
23524 +struct opr_qry;
23525 +
23526 +/**
23527 + * General DPSECI macros
23528 + */
23529 +
23530 +/**
23531 + * Maximum number of Tx/Rx priorities per DPSECI object
23532 + */
23533 +#define DPSECI_PRIO_NUM 8
23534 +
23535 +/**
23536 + * All queues considered; see dpseci_set_rx_queue()
23537 + */
23538 +#define DPSECI_ALL_QUEUES (u8)(-1)
23539 +
23540 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
23541 + u16 *token);
23542 +
23543 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23544 +
23545 +/**
23546 + * Enable the Congestion Group support
23547 + */
23548 +#define DPSECI_OPT_HAS_CG 0x000020
23549 +
23550 +/**
23551 + * Enable the Order Restoration support
23552 + */
23553 +#define DPSECI_OPT_HAS_OPR 0x000040
23554 +
23555 +/**
23556 + * Order Point Records are shared for the entire DPSECI
23557 + */
23558 +#define DPSECI_OPT_OPR_SHARED 0x000080
23559 +
23560 +/**
23561 + * struct dpseci_cfg - Structure representing DPSECI configuration
23562 + * @options: Any combination of the following options:
23563 + * DPSECI_OPT_HAS_CG
23564 + * DPSECI_OPT_HAS_OPR
23565 + * DPSECI_OPT_OPR_SHARED
23566 + * @num_tx_queues: num of queues towards the SEC
23567 + * @num_rx_queues: num of queues back from the SEC
23568 + * @priorities: Priorities for the SEC hardware processing;
23569 + * each place in the array is the priority of the tx queue
23570 + * towards the SEC;
23571 + * valid priorities are configured with values 1-8;
23572 + */
23573 +struct dpseci_cfg {
23574 + u32 options;
23575 + u8 num_tx_queues;
23576 + u8 num_rx_queues;
23577 + u8 priorities[DPSECI_PRIO_NUM];
23578 +};
23579 +
23580 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
23581 + const struct dpseci_cfg *cfg, u32 *obj_id);
23582 +
23583 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
23584 + u32 object_id);
23585 +
23586 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23587 +
23588 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23589 +
23590 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23591 + int *en);
23592 +
23593 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23594 +
23595 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23596 + u8 irq_index, u8 *en);
23597 +
23598 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23599 + u8 irq_index, u8 en);
23600 +
23601 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23602 + u8 irq_index, u32 *mask);
23603 +
23604 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23605 + u8 irq_index, u32 mask);
23606 +
23607 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23608 + u8 irq_index, u32 *status);
23609 +
23610 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23611 + u8 irq_index, u32 status);
23612 +
23613 +/**
23614 + * struct dpseci_attr - Structure representing DPSECI attributes
23615 + * @id: DPSECI object ID
23616 + * @num_tx_queues: number of queues towards the SEC
23617 + * @num_rx_queues: number of queues back from the SEC
23618 + * @options: any combination of the following options:
23619 + * DPSECI_OPT_HAS_CG
23620 + * DPSECI_OPT_HAS_OPR
23621 + * DPSECI_OPT_OPR_SHARED
23622 + */
23623 +struct dpseci_attr {
23624 + int id;
23625 + u8 num_tx_queues;
23626 + u8 num_rx_queues;
23627 + u32 options;
23628 +};
23629 +
23630 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23631 + struct dpseci_attr *attr);
23632 +
23633 +/**
23634 + * enum dpseci_dest - DPSECI destination types
23635 + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
23636 + * and does not generate FQDAN notifications; user is expected to dequeue
23637 + * from the queue based on polling or other user-defined method
23638 + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
23639 + * notifications to the specified DPIO; user is expected to dequeue from
23640 + * the queue only after notification is received
23641 + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
23642 + * FQDAN notifications, but is connected to the specified DPCON object;
23643 + * user is expected to dequeue from the DPCON channel
23644 + */
23645 +enum dpseci_dest {
23646 + DPSECI_DEST_NONE = 0,
23647 + DPSECI_DEST_DPIO,
23648 + DPSECI_DEST_DPCON
23649 +};
23650 +
23651 +/**
23652 + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
23653 + * @dest_type: Destination type
23654 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
23655 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
23656 + * are 0-1 or 0-7, depending on the number of priorities in that channel;
23657 + * not relevant for 'DPSECI_DEST_NONE' option
23658 + */
23659 +struct dpseci_dest_cfg {
23660 + enum dpseci_dest dest_type;
23661 + int dest_id;
23662 + u8 priority;
23663 +};
23664 +
23665 +/**
23666 + * DPSECI queue modification options
23667 + */
23668 +
23669 +/**
23670 + * Select to modify the user's context associated with the queue
23671 + */
23672 +#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
23673 +
23674 +/**
23675 + * Select to modify the queue's destination
23676 + */
23677 +#define DPSECI_QUEUE_OPT_DEST 0x00000002
23678 +
23679 +/**
23680 + * Select to modify the queue's order preservation
23681 + */
23682 +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
23683 +
23684 +/**
23685 + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
23686 + * @options: Flags representing the suggested modifications to the queue;
23687 + * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
23688 + * @order_preservation_en: order preservation configuration for the rx queue
23689 + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
23690 + * @user_ctx: User context value provided in the frame descriptor of each
23691 + * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
23692 + * in 'options'
23693 + * @dest_cfg: Queue destination parameters; valid only if
23694 + * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
23695 + */
23696 +struct dpseci_rx_queue_cfg {
23697 + u32 options;
23698 + int order_preservation_en;
23699 + u64 user_ctx;
23700 + struct dpseci_dest_cfg dest_cfg;
23701 +};
23702 +
23703 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23704 + u8 queue, const struct dpseci_rx_queue_cfg *cfg);
23705 +
23706 +/**
23707 + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
23708 + * @user_ctx: User context value provided in the frame descriptor of each
23709 + * dequeued frame
23710 + * @order_preservation_en: Status of the order preservation configuration on the
23711 + * queue
23712 + * @dest_cfg: Queue destination configuration
23713 + * @fqid: Virtual FQID value to be used for dequeue operations
23714 + */
23715 +struct dpseci_rx_queue_attr {
23716 + u64 user_ctx;
23717 + int order_preservation_en;
23718 + struct dpseci_dest_cfg dest_cfg;
23719 + u32 fqid;
23720 +};
23721 +
23722 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23723 + u8 queue, struct dpseci_rx_queue_attr *attr);
23724 +
23725 +/**
23726 + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
23727 + * @fqid: Virtual FQID to be used for sending frames to SEC hardware
23728 + * @priority: SEC hardware processing priority for the queue
23729 + */
23730 +struct dpseci_tx_queue_attr {
23731 + u32 fqid;
23732 + u8 priority;
23733 +};
23734 +
23735 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23736 + u8 queue, struct dpseci_tx_queue_attr *attr);
23737 +
23738 +/**
23739 + * struct dpseci_sec_attr - Structure representing attributes of the SEC
23740 + * hardware accelerator
23741 + * @ip_id: ID for SEC
23742 + * @major_rev: Major revision number for SEC
23743 + * @minor_rev: Minor revision number for SEC
23744 + * @era: SEC Era
23745 + * @deco_num: The number of copies of the DECO that are implemented in this
23746 + * version of SEC
23747 + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
23748 + * version of SEC
23749 + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
23750 + * version of SEC
23751 + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
23752 + * implemented in this version of SEC
23753 + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
23754 + * implemented in this version of SEC
23755 + * @crc_acc_num: The number of copies of the CRC module that are implemented in
23756 + * this version of SEC
23757 + * @pk_acc_num: The number of copies of the Public Key module that are
23758 + * implemented in this version of SEC
23759 + * @kasumi_acc_num: The number of copies of the Kasumi module that are
23760 + * implemented in this version of SEC
23761 + * @rng_acc_num: The number of copies of the Random Number Generator that are
23762 + * implemented in this version of SEC
23763 + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
23764 + * implemented in this version of SEC
23765 + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
23766 + * in this version of SEC
23767 + * @des_acc_num: The number of copies of the DES module that are implemented in
23768 + * this version of SEC
23769 + * @aes_acc_num: The number of copies of the AES module that are implemented in
23770 + * this version of SEC
23771 + **/
23772 +struct dpseci_sec_attr {
23773 + u16 ip_id;
23774 + u8 major_rev;
23775 + u8 minor_rev;
23776 + u8 era;
23777 + u8 deco_num;
23778 + u8 zuc_auth_acc_num;
23779 + u8 zuc_enc_acc_num;
23780 + u8 snow_f8_acc_num;
23781 + u8 snow_f9_acc_num;
23782 + u8 crc_acc_num;
23783 + u8 pk_acc_num;
23784 + u8 kasumi_acc_num;
23785 + u8 rng_acc_num;
23786 + u8 md_acc_num;
23787 + u8 arc4_acc_num;
23788 + u8 des_acc_num;
23789 + u8 aes_acc_num;
23790 +};
23791 +
23792 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23793 + struct dpseci_sec_attr *attr);
23794 +
23795 +/**
23796 + * struct dpseci_sec_counters - Structure representing global SEC counters and
23797 + * not per dpseci counters
23798 + * @dequeued_requests: Number of Requests Dequeued
23799 + * @ob_enc_requests: Number of Outbound Encrypt Requests
23800 + * @ib_dec_requests: Number of Inbound Decrypt Requests
23801 + * @ob_enc_bytes: Number of Outbound Bytes Encrypted
23802 + * @ob_prot_bytes: Number of Outbound Bytes Protected
23803 + * @ib_dec_bytes: Number of Inbound Bytes Decrypted
23804 + * @ib_valid_bytes: Number of Inbound Bytes Validated
23805 + */
23806 +struct dpseci_sec_counters {
23807 + u64 dequeued_requests;
23808 + u64 ob_enc_requests;
23809 + u64 ib_dec_requests;
23810 + u64 ob_enc_bytes;
23811 + u64 ob_prot_bytes;
23812 + u64 ib_dec_bytes;
23813 + u64 ib_valid_bytes;
23814 +};
23815 +
23816 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23817 + struct dpseci_sec_counters *counters);
23818 +
23819 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
23820 + u16 *major_ver, u16 *minor_ver);
23821 +
23822 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23823 + u8 options, struct opr_cfg *cfg);
23824 +
23825 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23826 + struct opr_cfg *cfg, struct opr_qry *qry);
23827 +
23828 +/**
23829 + * enum dpseci_congestion_unit - DPSECI congestion units
23830 + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
23831 + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
23832 + */
23833 +enum dpseci_congestion_unit {
23834 + DPSECI_CONGESTION_UNIT_BYTES = 0,
23835 + DPSECI_CONGESTION_UNIT_FRAMES
23836 +};
23837 +
23838 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
23839 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
23840 +#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
23841 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
23842 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
23843 +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
23844 +
23845 +/**
23846 + * struct dpseci_congestion_notification_cfg - congestion notification
23847 + * configuration
23848 + * @units: units type
23849 + * @threshold_entry: above this threshold we enter a congestion state.
23850 + * set it to '0' to disable it
23851 + * @threshold_exit: below this threshold we exit the congestion state.
23852 + * @message_ctx: The context that will be part of the CSCN message
23853 + * @message_iova: I/O virtual address (must be in DMA-able memory),
23854 + * must be 16B aligned;
23855 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
23856 + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
23857 + * values
23858 + */
23859 +struct dpseci_congestion_notification_cfg {
23860 + enum dpseci_congestion_unit units;
23861 + u32 threshold_entry;
23862 + u32 threshold_exit;
23863 + u64 message_ctx;
23864 + u64 message_iova;
23865 + struct dpseci_dest_cfg dest_cfg;
23866 + u16 notification_mode;
23867 +};
23868 +
23869 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23870 + u16 token, const struct dpseci_congestion_notification_cfg *cfg);
23871 +
23872 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23873 + u16 token, struct dpseci_congestion_notification_cfg *cfg);
23874 +
23875 +#endif /* _DPSECI_H_ */
23876 diff --git a/drivers/crypto/caam/dpseci_cmd.h b/drivers/crypto/caam/dpseci_cmd.h
23877 new file mode 100644
23878 index 00000000..7624315e
23879 --- /dev/null
23880 +++ b/drivers/crypto/caam/dpseci_cmd.h
23881 @@ -0,0 +1,261 @@
23882 +/*
23883 + * Copyright 2013-2016 Freescale Semiconductor Inc.
23884 + * Copyright 2017 NXP
23885 + *
23886 + * Redistribution and use in source and binary forms, with or without
23887 + * modification, are permitted provided that the following conditions are met:
23888 + * * Redistributions of source code must retain the above copyright
23889 + * notice, this list of conditions and the following disclaimer.
23890 + * * Redistributions in binary form must reproduce the above copyright
23891 + * notice, this list of conditions and the following disclaimer in the
23892 + * documentation and/or other materials provided with the distribution.
23893 + * * Neither the names of the above-listed copyright holders nor the
23894 + * names of any contributors may be used to endorse or promote products
23895 + * derived from this software without specific prior written permission.
23896 + *
23897 + *
23898 + * ALTERNATIVELY, this software may be distributed under the terms of the
23899 + * GNU General Public License ("GPL") as published by the Free Software
23900 + * Foundation, either version 2 of that License or (at your option) any
23901 + * later version.
23902 + *
23903 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23904 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23905 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23906 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
23907 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23908 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23909 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23910 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23911 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23912 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23913 + * POSSIBILITY OF SUCH DAMAGE.
23914 + */
23915 +
23916 +#ifndef _DPSECI_CMD_H_
23917 +#define _DPSECI_CMD_H_
23918 +
23919 +/* DPSECI Version */
23920 +#define DPSECI_VER_MAJOR 5
23921 +#define DPSECI_VER_MINOR 1
23922 +
23923 +#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
23924 +#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
23925 +
23926 +/* Command IDs */
23927 +
23928 +#define DPSECI_CMDID_CLOSE 0x8001
23929 +#define DPSECI_CMDID_OPEN 0x8091
23930 +#define DPSECI_CMDID_CREATE 0x9092
23931 +#define DPSECI_CMDID_DESTROY 0x9891
23932 +#define DPSECI_CMDID_GET_API_VERSION 0xa091
23933 +
23934 +#define DPSECI_CMDID_ENABLE 0x0021
23935 +#define DPSECI_CMDID_DISABLE 0x0031
23936 +#define DPSECI_CMDID_GET_ATTR 0x0041
23937 +#define DPSECI_CMDID_RESET 0x0051
23938 +#define DPSECI_CMDID_IS_ENABLED 0x0061
23939 +
23940 +#define DPSECI_CMDID_SET_IRQ_ENABLE 0x0121
23941 +#define DPSECI_CMDID_GET_IRQ_ENABLE 0x0131
23942 +#define DPSECI_CMDID_SET_IRQ_MASK 0x0141
23943 +#define DPSECI_CMDID_GET_IRQ_MASK 0x0151
23944 +#define DPSECI_CMDID_GET_IRQ_STATUS 0x0161
23945 +#define DPSECI_CMDID_CLEAR_IRQ_STATUS 0x0171
23946 +
23947 +#define DPSECI_CMDID_SET_RX_QUEUE 0x1941
23948 +#define DPSECI_CMDID_GET_RX_QUEUE 0x1961
23949 +#define DPSECI_CMDID_GET_TX_QUEUE 0x1971
23950 +#define DPSECI_CMDID_GET_SEC_ATTR 0x1981
23951 +#define DPSECI_CMDID_GET_SEC_COUNTERS 0x1991
23952 +#define DPSECI_CMDID_SET_OPR 0x19A1
23953 +#define DPSECI_CMDID_GET_OPR 0x19B1
23954 +
23955 +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION 0x1701
23956 +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION 0x1711
23957 +
23958 +/* Macros for accessing command fields smaller than 1 byte */
23959 +#define DPSECI_MASK(field) \
23960 + GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
23961 + DPSECI_##field##_SHIFT)
23962 +
23963 +#define dpseci_set_field(var, field, val) \
23964 + ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
23965 +
23966 +#define dpseci_get_field(var, field) \
23967 + (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
23968 +
23969 +struct dpseci_cmd_open {
23970 + __le32 dpseci_id;
23971 +};
23972 +
23973 +struct dpseci_cmd_create {
23974 + u8 priorities[8];
23975 + u8 num_tx_queues;
23976 + u8 num_rx_queues;
23977 + __le16 pad;
23978 + __le32 options;
23979 +};
23980 +
23981 +struct dpseci_cmd_destroy {
23982 + __le32 object_id;
23983 +};
23984 +
23985 +struct dpseci_rsp_is_enabled {
23986 + __le32 is_enabled;
23987 +};
23988 +
23989 +struct dpseci_cmd_irq_enable {
23990 + u8 enable_state;
23991 + u8 pad[3];
23992 + u8 irq_index;
23993 +};
23994 +
23995 +struct dpseci_rsp_get_irq_enable {
23996 + u8 enable_state;
23997 +};
23998 +
23999 +struct dpseci_cmd_irq_mask {
24000 + __le32 mask;
24001 + u8 irq_index;
24002 +};
24003 +
24004 +struct dpseci_cmd_irq_status {
24005 + __le32 status;
24006 + u8 irq_index;
24007 +};
24008 +
24009 +struct dpseci_rsp_get_attributes {
24010 + __le32 id;
24011 + __le32 pad0;
24012 + u8 num_tx_queues;
24013 + u8 num_rx_queues;
24014 + u8 pad1[6];
24015 + __le32 options;
24016 +};
24017 +
24018 +struct dpseci_cmd_queue {
24019 + __le32 dest_id;
24020 + u8 priority;
24021 + u8 queue;
24022 + u8 dest_type;
24023 + u8 pad;
24024 + __le64 user_ctx;
24025 + union {
24026 + __le32 options;
24027 + __le32 fqid;
24028 + };
24029 + __le32 order_preservation_en;
24030 +};
24031 +
24032 +struct dpseci_rsp_get_tx_queue {
24033 + __le32 pad;
24034 + __le32 fqid;
24035 + u8 priority;
24036 +};
24037 +
24038 +struct dpseci_rsp_get_sec_attr {
24039 + __le16 ip_id;
24040 + u8 major_rev;
24041 + u8 minor_rev;
24042 + u8 era;
24043 + u8 pad0[3];
24044 + u8 deco_num;
24045 + u8 zuc_auth_acc_num;
24046 + u8 zuc_enc_acc_num;
24047 + u8 pad1;
24048 + u8 snow_f8_acc_num;
24049 + u8 snow_f9_acc_num;
24050 + u8 crc_acc_num;
24051 + u8 pad2;
24052 + u8 pk_acc_num;
24053 + u8 kasumi_acc_num;
24054 + u8 rng_acc_num;
24055 + u8 pad3;
24056 + u8 md_acc_num;
24057 + u8 arc4_acc_num;
24058 + u8 des_acc_num;
24059 + u8 aes_acc_num;
24060 +};
24061 +
24062 +struct dpseci_rsp_get_sec_counters {
24063 + __le64 dequeued_requests;
24064 + __le64 ob_enc_requests;
24065 + __le64 ib_dec_requests;
24066 + __le64 ob_enc_bytes;
24067 + __le64 ob_prot_bytes;
24068 + __le64 ib_dec_bytes;
24069 + __le64 ib_valid_bytes;
24070 +};
24071 +
24072 +struct dpseci_rsp_get_api_version {
24073 + __le16 major;
24074 + __le16 minor;
24075 +};
24076 +
24077 +struct dpseci_cmd_opr {
24078 + __le16 pad;
24079 + u8 index;
24080 + u8 options;
24081 + u8 pad1[7];
24082 + u8 oloe;
24083 + u8 oeane;
24084 + u8 olws;
24085 + u8 oa;
24086 + u8 oprrws;
24087 +};
24088 +
24089 +#define DPSECI_OPR_RIP_SHIFT 0
24090 +#define DPSECI_OPR_RIP_SIZE 1
24091 +#define DPSECI_OPR_ENABLE_SHIFT 1
24092 +#define DPSECI_OPR_ENABLE_SIZE 1
24093 +#define DPSECI_OPR_TSEQ_NLIS_SHIFT 1
24094 +#define DPSECI_OPR_TSEQ_NLIS_SIZE 1
24095 +#define DPSECI_OPR_HSEQ_NLIS_SHIFT 1
24096 +#define DPSECI_OPR_HSEQ_NLIS_SIZE 1
24097 +
24098 +struct dpseci_rsp_get_opr {
24099 + __le64 pad;
24100 + u8 rip_enable;
24101 + u8 pad0[2];
24102 + u8 oloe;
24103 + u8 oeane;
24104 + u8 olws;
24105 + u8 oa;
24106 + u8 oprrws;
24107 + __le16 nesn;
24108 + __le16 pad1;
24109 + __le16 ndsn;
24110 + __le16 pad2;
24111 + __le16 ea_tseq;
24112 + u8 tseq_nlis;
24113 + u8 pad3;
24114 + __le16 ea_hseq;
24115 + u8 hseq_nlis;
24116 + u8 pad4;
24117 + __le16 ea_hptr;
24118 + __le16 pad5;
24119 + __le16 ea_tptr;
24120 + __le16 pad6;
24121 + __le16 opr_vid;
24122 + __le16 pad7;
24123 + __le16 opr_id;
24124 +};
24125 +
24126 +#define DPSECI_CGN_DEST_TYPE_SHIFT 0
24127 +#define DPSECI_CGN_DEST_TYPE_SIZE 4
24128 +#define DPSECI_CGN_UNITS_SHIFT 4
24129 +#define DPSECI_CGN_UNITS_SIZE 2
24130 +
24131 +struct dpseci_cmd_congestion_notification {
24132 + __le32 dest_id;
24133 + __le16 notification_mode;
24134 + u8 priority;
24135 + u8 options;
24136 + __le64 message_iova;
24137 + __le64 message_ctx;
24138 + __le32 threshold_entry;
24139 + __le32 threshold_exit;
24140 +};
24141 +
24142 +#endif /* _DPSECI_CMD_H_ */
24143 diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
24144 index 33e41ea8..31963397 100644
24145 --- a/drivers/crypto/caam/error.c
24146 +++ b/drivers/crypto/caam/error.c
24147 @@ -6,11 +6,54 @@
24148
24149 #include "compat.h"
24150 #include "regs.h"
24151 -#include "intern.h"
24152 #include "desc.h"
24153 -#include "jr.h"
24154 #include "error.h"
24155
24156 +#ifdef DEBUG
24157 +
24158 +#include <linux/highmem.h>
24159 +
24160 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
24161 + int rowsize, int groupsize, struct scatterlist *sg,
24162 + size_t tlen, bool ascii)
24163 +{
24164 + struct scatterlist *it;
24165 + void *it_page;
24166 + size_t len;
24167 + void *buf;
24168 +
24169 + for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
24170 + /*
24171 + * make sure the scatterlist's page
24172 + * has a valid virtual memory mapping
24173 + */
24174 + it_page = kmap_atomic(sg_page(it));
24175 + if (unlikely(!it_page)) {
24176 + pr_err("caam_dump_sg: kmap failed\n");
24177 + return;
24178 + }
24179 +
24180 + buf = it_page + it->offset;
24181 + len = min_t(size_t, tlen, it->length);
24182 + print_hex_dump(level, prefix_str, prefix_type, rowsize,
24183 + groupsize, buf, len, ascii);
24184 + tlen -= len;
24185 +
24186 + kunmap_atomic(it_page);
24187 + }
24188 +}
24189 +
24190 +#else
24191 +
24192 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
24193 + int rowsize, int groupsize, struct scatterlist *sg,
24194 + size_t tlen, bool ascii)
24195 +{}
24196 +
24197 +#endif
24198 +
24199 +EXPORT_SYMBOL(caam_dump_sg);
24200 +
24201 static const struct {
24202 u8 value;
24203 const char *error_text;
24204 @@ -69,6 +112,54 @@ static const struct {
24205 { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
24206 };
24207
24208 +static const struct {
24209 + u8 value;
24210 + const char *error_text;
24211 +} qi_error_list[] = {
24212 + { 0x1F, "Job terminated by FQ or ICID flush" },
24213 + { 0x20, "FD format error"},
24214 + { 0x21, "FD command format error"},
24215 + { 0x23, "FL format error"},
24216 + { 0x25, "CRJD specified in FD, but not enabled in FLC"},
24217 + { 0x30, "Max. buffer size too small"},
24218 + { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
24219 + { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
24220 + { 0x33, "Size over/underflow (allocate mode)"},
24221 + { 0x34, "Size over/underflow (reuse mode)"},
24222 + { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
24223 + { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
24224 + { 0x41, "SBC frame format not supported (allocate mode)"},
24225 + { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
24226 + { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
24227 + { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
24228 + { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
24229 + { 0x46, "Annotation length exceeds offset (reuse mode)"},
24230 + { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
24231 + { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
24232 + { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
24233 + { 0x51, "Unsupported IF reuse mode"},
24234 + { 0x52, "Unsupported FL use mode"},
24235 + { 0x53, "Unsupported RJD use mode"},
24236 + { 0x54, "Unsupported inline descriptor use mode"},
24237 + { 0xC0, "Table buffer pool 0 depletion"},
24238 + { 0xC1, "Table buffer pool 1 depletion"},
24239 + { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
24240 + { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
24241 + { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
24242 + { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
24243 + { 0xD0, "FLC read error"},
24244 + { 0xD1, "FL read error"},
24245 + { 0xD2, "FL write error"},
24246 + { 0xD3, "OF SGT write error"},
24247 + { 0xD4, "PTA read error"},
24248 + { 0xD5, "PTA write error"},
24249 + { 0xD6, "OF SGT F-bit write error"},
24250 + { 0xD7, "ASA write error"},
24251 + { 0xE1, "FLC[ICR]=0 ICID error"},
24252 + { 0xE2, "FLC[ICR]=1 ICID error"},
24253 + { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
24254 +};
24255 +
24256 static const char * const cha_id_list[] = {
24257 "",
24258 "AES",
24259 @@ -146,10 +237,9 @@ static void report_ccb_status(struct device *jrdev, const u32 status,
24260 strlen(rng_err_id_list[err_id])) {
24261 /* RNG-only error */
24262 err_str = rng_err_id_list[err_id];
24263 - } else if (err_id < ARRAY_SIZE(err_id_list))
24264 + } else {
24265 err_str = err_id_list[err_id];
24266 - else
24267 - snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
24268 + }
24269
24270 /*
24271 * CCB ICV check failures are part of normal operation life;
24272 @@ -198,6 +288,27 @@ static void report_deco_status(struct device *jrdev, const u32 status,
24273 status, error, idx_str, idx, err_str, err_err_code);
24274 }
24275
24276 +static void report_qi_status(struct device *qidev, const u32 status,
24277 + const char *error)
24278 +{
24279 + u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
24280 + const char *err_str = "unidentified error value 0x";
24281 + char err_err_code[3] = { 0 };
24282 + int i;
24283 +
24284 + for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
24285 + if (qi_error_list[i].value == err_id)
24286 + break;
24287 +
24288 + if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
24289 + err_str = qi_error_list[i].error_text;
24290 + else
24291 + snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
24292 +
24293 + dev_err(qidev, "%08x: %s: %s%s\n",
24294 + status, error, err_str, err_err_code);
24295 +}
24296 +
24297 static void report_jr_status(struct device *jrdev, const u32 status,
24298 const char *error)
24299 {
24300 @@ -212,7 +323,7 @@ static void report_cond_code_status(struct device *jrdev, const u32 status,
24301 status, error, __func__);
24302 }
24303
24304 -void caam_jr_strstatus(struct device *jrdev, u32 status)
24305 +void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
24306 {
24307 static const struct stat_src {
24308 void (*report_ssed)(struct device *jrdev, const u32 status,
24309 @@ -224,7 +335,7 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
24310 { report_ccb_status, "CCB" },
24311 { report_jump_status, "Jump" },
24312 { report_deco_status, "DECO" },
24313 - { NULL, "Queue Manager Interface" },
24314 + { report_qi_status, "Queue Manager Interface" },
24315 { report_jr_status, "Job Ring" },
24316 { report_cond_code_status, "Condition Code" },
24317 { NULL, NULL },
24318 @@ -250,4 +361,4 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
24319 else
24320 dev_err(jrdev, "%d: unknown error source\n", ssrc);
24321 }
24322 -EXPORT_SYMBOL(caam_jr_strstatus);
24323 +EXPORT_SYMBOL(caam_strstatus);
24324 diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
24325 index b6350b0d..751ddcac 100644
24326 --- a/drivers/crypto/caam/error.h
24327 +++ b/drivers/crypto/caam/error.h
24328 @@ -7,5 +7,13 @@
24329 #ifndef CAAM_ERROR_H
24330 #define CAAM_ERROR_H
24331 #define CAAM_ERROR_STR_MAX 302
24332 -void caam_jr_strstatus(struct device *jrdev, u32 status);
24333 +
24334 +void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
24335 +
24336 +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
24337 +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
24338 +
24339 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
24340 + int rowsize, int groupsize, struct scatterlist *sg,
24341 + size_t tlen, bool ascii);
24342 #endif /* CAAM_ERROR_H */
24343 diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
24344 index 5d4c0507..a5236125 100644
24345 --- a/drivers/crypto/caam/intern.h
24346 +++ b/drivers/crypto/caam/intern.h
24347 @@ -41,6 +41,7 @@ struct caam_drv_private_jr {
24348 struct device *dev;
24349 int ridx;
24350 struct caam_job_ring __iomem *rregs; /* JobR's register space */
24351 + struct tasklet_struct irqtask;
24352 int irq; /* One per queue */
24353
24354 /* Number of scatterlist crypt transforms active on the JobR */
24355 @@ -63,10 +64,9 @@ struct caam_drv_private_jr {
24356 * Driver-private storage for a single CAAM block instance
24357 */
24358 struct caam_drv_private {
24359 -
24360 - struct device *dev;
24361 - struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
24362 - struct platform_device *pdev;
24363 +#ifdef CONFIG_CAAM_QI
24364 + struct device *qidev;
24365 +#endif
24366
24367 /* Physical-presence section */
24368 struct caam_ctrl __iomem *ctrl; /* controller region */
24369 @@ -102,11 +102,6 @@ struct caam_drv_private {
24370 #ifdef CONFIG_DEBUG_FS
24371 struct dentry *dfs_root;
24372 struct dentry *ctl; /* controller dir */
24373 - struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
24374 - struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
24375 - struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
24376 - struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
24377 -
24378 struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
24379 struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
24380 #endif
24381 @@ -114,4 +109,22 @@ struct caam_drv_private {
24382
24383 void caam_jr_algapi_init(struct device *dev);
24384 void caam_jr_algapi_remove(struct device *dev);
24385 +
24386 +#ifdef CONFIG_DEBUG_FS
24387 +static int caam_debugfs_u64_get(void *data, u64 *val)
24388 +{
24389 + *val = caam64_to_cpu(*(u64 *)data);
24390 + return 0;
24391 +}
24392 +
24393 +static int caam_debugfs_u32_get(void *data, u64 *val)
24394 +{
24395 + *val = caam32_to_cpu(*(u32 *)data);
24396 + return 0;
24397 +}
24398 +
24399 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
24400 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
24401 +#endif
24402 +
24403 #endif /* INTERN_H */
24404 diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
24405 index 757c27f9..d258953f 100644
24406 --- a/drivers/crypto/caam/jr.c
24407 +++ b/drivers/crypto/caam/jr.c
24408 @@ -9,6 +9,7 @@
24409 #include <linux/of_address.h>
24410
24411 #include "compat.h"
24412 +#include "ctrl.h"
24413 #include "regs.h"
24414 #include "jr.h"
24415 #include "desc.h"
24416 @@ -73,6 +74,8 @@ static int caam_jr_shutdown(struct device *dev)
24417
24418 ret = caam_reset_hw_jr(dev);
24419
24420 + tasklet_kill(&jrp->irqtask);
24421 +
24422 /* Release interrupt */
24423 free_irq(jrp->irq, dev);
24424
24425 @@ -128,7 +131,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
24426
24427 /*
24428 * Check the output ring for ready responses, kick
24429 - * the threaded irq if jobs done.
24430 + * tasklet if jobs done.
24431 */
24432 irqstate = rd_reg32(&jrp->rregs->jrintstatus);
24433 if (!irqstate)
24434 @@ -150,13 +153,18 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
24435 /* Have valid interrupt at this point, just ACK and trigger */
24436 wr_reg32(&jrp->rregs->jrintstatus, irqstate);
24437
24438 - return IRQ_WAKE_THREAD;
24439 + preempt_disable();
24440 + tasklet_schedule(&jrp->irqtask);
24441 + preempt_enable();
24442 +
24443 + return IRQ_HANDLED;
24444 }
24445
24446 -static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
24447 +/* Deferred service handler, run as interrupt-fired tasklet */
24448 +static void caam_jr_dequeue(unsigned long devarg)
24449 {
24450 int hw_idx, sw_idx, i, head, tail;
24451 - struct device *dev = st_dev;
24452 + struct device *dev = (struct device *)devarg;
24453 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
24454 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
24455 u32 *userdesc, userstatus;
24456 @@ -230,8 +238,6 @@ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
24457
24458 /* reenable / unmask IRQs */
24459 clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
24460 -
24461 - return IRQ_HANDLED;
24462 }
24463
24464 /**
24465 @@ -389,10 +395,11 @@ static int caam_jr_init(struct device *dev)
24466
24467 jrp = dev_get_drvdata(dev);
24468
24469 + tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
24470 +
24471 /* Connect job ring interrupt handler. */
24472 - error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
24473 - caam_jr_threadirq, IRQF_SHARED,
24474 - dev_name(dev), dev);
24475 + error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
24476 + dev_name(dev), dev);
24477 if (error) {
24478 dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
24479 jrp->ridx, jrp->irq);
24480 @@ -454,6 +461,7 @@ static int caam_jr_init(struct device *dev)
24481 out_free_irq:
24482 free_irq(jrp->irq, dev);
24483 out_kill_deq:
24484 + tasklet_kill(&jrp->irqtask);
24485 return error;
24486 }
24487
24488 @@ -489,15 +497,28 @@ static int caam_jr_probe(struct platform_device *pdev)
24489 return -ENOMEM;
24490 }
24491
24492 - jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
24493 + jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
24494
24495 - if (sizeof(dma_addr_t) == sizeof(u64))
24496 - if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
24497 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
24498 + if (sizeof(dma_addr_t) == sizeof(u64)) {
24499 + if (caam_dpaa2)
24500 + error = dma_set_mask_and_coherent(jrdev,
24501 + DMA_BIT_MASK(49));
24502 + else if (of_device_is_compatible(nprop,
24503 + "fsl,sec-v5.0-job-ring"))
24504 + error = dma_set_mask_and_coherent(jrdev,
24505 + DMA_BIT_MASK(40));
24506 else
24507 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
24508 - else
24509 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
24510 + error = dma_set_mask_and_coherent(jrdev,
24511 + DMA_BIT_MASK(36));
24512 + } else {
24513 + error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
24514 + }
24515 + if (error) {
24516 + dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
24517 + error);
24518 + iounmap(ctrl);
24519 + return error;
24520 + }
24521
24522 /* Identify the interrupt */
24523 jrpriv->irq = irq_of_parse_and_map(nprop, 0);
24524 @@ -520,7 +541,7 @@ static int caam_jr_probe(struct platform_device *pdev)
24525 return 0;
24526 }
24527
24528 -static struct of_device_id caam_jr_match[] = {
24529 +static const struct of_device_id caam_jr_match[] = {
24530 {
24531 .compatible = "fsl,sec-v4.0-job-ring",
24532 },
24533 diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
24534 index 3ce1d5cd..a523ed77 100644
24535 --- a/drivers/crypto/caam/key_gen.c
24536 +++ b/drivers/crypto/caam/key_gen.c
24537 @@ -41,15 +41,29 @@ Split key generation-----------------------------------------------
24538 [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
24539 @0xffe04000
24540 */
24541 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24542 - int split_key_pad_len, const u8 *key_in, u32 keylen,
24543 - u32 alg_op)
24544 +int gen_split_key(struct device *jrdev, u8 *key_out,
24545 + struct alginfo * const adata, const u8 *key_in, u32 keylen,
24546 + int max_keylen)
24547 {
24548 u32 *desc;
24549 struct split_key_result result;
24550 dma_addr_t dma_addr_in, dma_addr_out;
24551 int ret = -ENOMEM;
24552
24553 + adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
24554 + adata->keylen_pad = split_key_pad_len(adata->algtype &
24555 + OP_ALG_ALGSEL_MASK);
24556 +
24557 +#ifdef DEBUG
24558 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
24559 + adata->keylen, adata->keylen_pad);
24560 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
24561 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
24562 +#endif
24563 +
24564 + if (adata->keylen_pad > max_keylen)
24565 + return -EINVAL;
24566 +
24567 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
24568 if (!desc) {
24569 dev_err(jrdev, "unable to allocate key input memory\n");
24570 @@ -63,7 +77,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24571 goto out_free;
24572 }
24573
24574 - dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
24575 + dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
24576 DMA_FROM_DEVICE);
24577 if (dma_mapping_error(jrdev, dma_addr_out)) {
24578 dev_err(jrdev, "unable to map key output memory\n");
24579 @@ -74,7 +88,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24580 append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
24581
24582 /* Sets MDHA up into an HMAC-INIT */
24583 - append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
24584 + append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
24585 + OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
24586 + OP_ALG_AS_INIT);
24587
24588 /*
24589 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
24590 @@ -87,7 +103,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24591 * FIFO_STORE with the explicit split-key content store
24592 * (0x26 output type)
24593 */
24594 - append_fifo_store(desc, dma_addr_out, split_key_len,
24595 + append_fifo_store(desc, dma_addr_out, adata->keylen,
24596 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
24597
24598 #ifdef DEBUG
24599 @@ -108,11 +124,11 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24600 #ifdef DEBUG
24601 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
24602 DUMP_PREFIX_ADDRESS, 16, 4, key_out,
24603 - split_key_pad_len, 1);
24604 + adata->keylen_pad, 1);
24605 #endif
24606 }
24607
24608 - dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
24609 + dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
24610 DMA_FROM_DEVICE);
24611 out_unmap_in:
24612 dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
24613 diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
24614 index c5588f6d..851a7c86 100644
24615 --- a/drivers/crypto/caam/key_gen.h
24616 +++ b/drivers/crypto/caam/key_gen.h
24617 @@ -5,6 +5,36 @@
24618 *
24619 */
24620
24621 +/**
24622 + * split_key_len - Compute MDHA split key length for a given algorithm
24623 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
24624 + * SHA224, SHA384, SHA512.
24625 + *
24626 + * Return: MDHA split key length
24627 + */
24628 +static inline u32 split_key_len(u32 hash)
24629 +{
24630 + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
24631 + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
24632 + u32 idx;
24633 +
24634 + idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
24635 +
24636 + return (u32)(mdpadlen[idx] * 2);
24637 +}
24638 +
24639 +/**
24640 + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
24641 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
24642 + * SHA224, SHA384, SHA512.
24643 + *
24644 + * Return: MDHA split key pad length
24645 + */
24646 +static inline u32 split_key_pad_len(u32 hash)
24647 +{
24648 + return ALIGN(split_key_len(hash), 16);
24649 +}
24650 +
24651 struct split_key_result {
24652 struct completion completion;
24653 int err;
24654 @@ -12,6 +42,6 @@ struct split_key_result {
24655
24656 void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
24657
24658 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24659 - int split_key_pad_len, const u8 *key_in, u32 keylen,
24660 - u32 alg_op);
24661 +int gen_split_key(struct device *jrdev, u8 *key_out,
24662 + struct alginfo * const adata, const u8 *key_in, u32 keylen,
24663 + int max_keylen);
24664 diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
24665 index aaa00dd1..31e59963 100644
24666 --- a/drivers/crypto/caam/pdb.h
24667 +++ b/drivers/crypto/caam/pdb.h
24668 @@ -483,6 +483,8 @@ struct dsa_verify_pdb {
24669 #define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT)
24670 #define RSA_PDB_D_SHIFT 12
24671 #define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT)
24672 +#define RSA_PDB_Q_SHIFT 12
24673 +#define RSA_PDB_Q_MASK (0xFFF << RSA_PDB_Q_SHIFT)
24674
24675 #define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT)
24676 #define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT)
24677 @@ -490,6 +492,8 @@ struct dsa_verify_pdb {
24678 #define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT)
24679
24680 #define RSA_PRIV_KEY_FRM_1 0
24681 +#define RSA_PRIV_KEY_FRM_2 1
24682 +#define RSA_PRIV_KEY_FRM_3 2
24683
24684 /**
24685 * RSA Encrypt Protocol Data Block
24686 @@ -525,4 +529,62 @@ struct rsa_priv_f1_pdb {
24687 dma_addr_t d_dma;
24688 } __packed;
24689
24690 +/**
24691 + * RSA Decrypt PDB - Private Key Form #2
24692 + * @sgf : scatter-gather field
24693 + * @g_dma : dma address of encrypted input data
24694 + * @f_dma : dma address of output data
24695 + * @d_dma : dma address of RSA private exponent
24696 + * @p_dma : dma address of RSA prime factor p of RSA modulus n
24697 + * @q_dma : dma address of RSA prime factor q of RSA modulus n
24698 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24699 + * as internal state buffer. It is assumed to be as long as p.
24700 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24701 + * as internal state buffer. It is assumed to be as long as q.
24702 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
24703 + */
24704 +struct rsa_priv_f2_pdb {
24705 + u32 sgf;
24706 + dma_addr_t g_dma;
24707 + dma_addr_t f_dma;
24708 + dma_addr_t d_dma;
24709 + dma_addr_t p_dma;
24710 + dma_addr_t q_dma;
24711 + dma_addr_t tmp1_dma;
24712 + dma_addr_t tmp2_dma;
24713 + u32 p_q_len;
24714 +} __packed;
24715 +
24716 +/**
24717 + * RSA Decrypt PDB - Private Key Form #3
24718 + * This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of
24719 + * the RSA modulus.
24720 + * @sgf : scatter-gather field
24721 + * @g_dma : dma address of encrypted input data
24722 + * @f_dma : dma address of output data
24723 + * @c_dma : dma address of RSA CRT coefficient
24724 + * @p_dma : dma address of RSA prime factor p of RSA modulus n
24725 + * @q_dma : dma address of RSA prime factor q of RSA modulus n
24726 + * @dp_dma : dma address of RSA CRT exponent of RSA prime factor p
24727 + * @dp_dma : dma address of RSA CRT exponent of RSA prime factor q
24728 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24729 + * as internal state buffer. It is assumed to be as long as p.
24730 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24731 + * as internal state buffer. It is assumed to be as long as q.
24732 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
24733 + */
24734 +struct rsa_priv_f3_pdb {
24735 + u32 sgf;
24736 + dma_addr_t g_dma;
24737 + dma_addr_t f_dma;
24738 + dma_addr_t c_dma;
24739 + dma_addr_t p_dma;
24740 + dma_addr_t q_dma;
24741 + dma_addr_t dp_dma;
24742 + dma_addr_t dq_dma;
24743 + dma_addr_t tmp1_dma;
24744 + dma_addr_t tmp2_dma;
24745 + u32 p_q_len;
24746 +} __packed;
24747 +
24748 #endif
24749 diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c
24750 index 4e4183e6..9e2ce6fe 100644
24751 --- a/drivers/crypto/caam/pkc_desc.c
24752 +++ b/drivers/crypto/caam/pkc_desc.c
24753 @@ -34,3 +34,39 @@ void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb)
24754 append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
24755 RSA_PRIV_KEY_FRM_1);
24756 }
24757 +
24758 +/* Descriptor for RSA Private operation - Private Key Form #2 */
24759 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
24760 +{
24761 + init_job_desc_pdb(desc, 0, sizeof(*pdb));
24762 + append_cmd(desc, pdb->sgf);
24763 + append_ptr(desc, pdb->g_dma);
24764 + append_ptr(desc, pdb->f_dma);
24765 + append_ptr(desc, pdb->d_dma);
24766 + append_ptr(desc, pdb->p_dma);
24767 + append_ptr(desc, pdb->q_dma);
24768 + append_ptr(desc, pdb->tmp1_dma);
24769 + append_ptr(desc, pdb->tmp2_dma);
24770 + append_cmd(desc, pdb->p_q_len);
24771 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
24772 + RSA_PRIV_KEY_FRM_2);
24773 +}
24774 +
24775 +/* Descriptor for RSA Private operation - Private Key Form #3 */
24776 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
24777 +{
24778 + init_job_desc_pdb(desc, 0, sizeof(*pdb));
24779 + append_cmd(desc, pdb->sgf);
24780 + append_ptr(desc, pdb->g_dma);
24781 + append_ptr(desc, pdb->f_dma);
24782 + append_ptr(desc, pdb->c_dma);
24783 + append_ptr(desc, pdb->p_dma);
24784 + append_ptr(desc, pdb->q_dma);
24785 + append_ptr(desc, pdb->dp_dma);
24786 + append_ptr(desc, pdb->dq_dma);
24787 + append_ptr(desc, pdb->tmp1_dma);
24788 + append_ptr(desc, pdb->tmp2_dma);
24789 + append_cmd(desc, pdb->p_q_len);
24790 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
24791 + RSA_PRIV_KEY_FRM_3);
24792 +}
24793 diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
24794 new file mode 100644
24795 index 00000000..48185d55
24796 --- /dev/null
24797 +++ b/drivers/crypto/caam/qi.c
24798 @@ -0,0 +1,797 @@
24799 +/*
24800 + * CAAM/SEC 4.x QI transport/backend driver
24801 + * Queue Interface backend functionality
24802 + *
24803 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
24804 + * Copyright 2016-2017 NXP
24805 + */
24806 +
24807 +#include <linux/cpumask.h>
24808 +#include <linux/kthread.h>
24809 +#include <linux/fsl_qman.h>
24810 +
24811 +#include "regs.h"
24812 +#include "qi.h"
24813 +#include "desc.h"
24814 +#include "intern.h"
24815 +#include "desc_constr.h"
24816 +
24817 +#define PREHDR_RSLS_SHIFT 31
24818 +
24819 +/*
24820 + * Use a reasonable backlog of frames (per CPU) as congestion threshold,
24821 + * so that resources used by the in-flight buffers do not become a memory hog.
24822 + */
24823 +#define MAX_RSP_FQ_BACKLOG_PER_CPU 256
24824 +
24825 +#define CAAM_QI_ENQUEUE_RETRIES 10000
24826 +
24827 +#define CAAM_NAPI_WEIGHT 63
24828 +
24829 +/*
24830 + * caam_napi - struct holding CAAM NAPI-related params
24831 + * @irqtask: IRQ task for QI backend
24832 + * @p: QMan portal
24833 + */
24834 +struct caam_napi {
24835 + struct napi_struct irqtask;
24836 + struct qman_portal *p;
24837 +};
24838 +
24839 +/*
24840 + * caam_qi_pcpu_priv - percpu private data structure to main list of pending
24841 + * responses expected on each cpu.
24842 + * @caam_napi: CAAM NAPI params
24843 + * @net_dev: netdev used by NAPI
24844 + * @rsp_fq: response FQ from CAAM
24845 + */
24846 +struct caam_qi_pcpu_priv {
24847 + struct caam_napi caam_napi;
24848 + struct net_device net_dev;
24849 + struct qman_fq *rsp_fq;
24850 +} ____cacheline_aligned;
24851 +
24852 +static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
24853 +static DEFINE_PER_CPU(int, last_cpu);
24854 +
24855 +/*
24856 + * caam_qi_priv - CAAM QI backend private params
24857 + * @cgr: QMan congestion group
24858 + * @qi_pdev: platform device for QI backend
24859 + */
24860 +struct caam_qi_priv {
24861 + struct qman_cgr cgr;
24862 + struct platform_device *qi_pdev;
24863 +};
24864 +
24865 +static struct caam_qi_priv qipriv ____cacheline_aligned;
24866 +
24867 +/*
24868 + * This is written by only one core - the one that initialized the CGR - and
24869 + * read by multiple cores (all the others).
24870 + */
24871 +bool caam_congested __read_mostly;
24872 +EXPORT_SYMBOL(caam_congested);
24873 +
24874 +#ifdef CONFIG_DEBUG_FS
24875 +/*
24876 + * This is a counter for the number of times the congestion group (where all
24877 + * the request and response queueus are) reached congestion. Incremented
24878 + * each time the congestion callback is called with congested == true.
24879 + */
24880 +static u64 times_congested;
24881 +#endif
24882 +
24883 +/*
24884 + * CPU from where the module initialised. This is required because QMan driver
24885 + * requires CGRs to be removed from same CPU from where they were originally
24886 + * allocated.
24887 + */
24888 +static int mod_init_cpu;
24889 +
24890 +/*
24891 + * This is a a cache of buffers, from which the users of CAAM QI driver
24892 + * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
24893 + * doing malloc on the hotpath.
24894 + * NOTE: A more elegant solution would be to have some headroom in the frames
24895 + * being processed. This could be added by the dpaa-ethernet driver.
24896 + * This would pose a problem for userspace application processing which
24897 + * cannot know of this limitation. So for now, this will work.
24898 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
24899 + */
24900 +static struct kmem_cache *qi_cache;
24901 +
24902 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
24903 +{
24904 + struct qm_fd fd;
24905 + int ret;
24906 + int num_retries = 0;
24907 +
24908 + fd.cmd = 0;
24909 + fd.format = qm_fd_compound;
24910 + fd.cong_weight = req->fd_sgt[1].length;
24911 + fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
24912 + DMA_BIDIRECTIONAL);
24913 + if (dma_mapping_error(qidev, fd.addr)) {
24914 + dev_err(qidev, "DMA mapping error for QI enqueue request\n");
24915 + return -EIO;
24916 + }
24917 +
24918 + do {
24919 + ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
24920 + if (likely(!ret))
24921 + return 0;
24922 +
24923 + if (ret != -EBUSY)
24924 + break;
24925 + num_retries++;
24926 + } while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
24927 +
24928 + dev_err(qidev, "qman_enqueue failed: %d\n", ret);
24929 +
24930 + return ret;
24931 +}
24932 +EXPORT_SYMBOL(caam_qi_enqueue);
24933 +
24934 +static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
24935 + const struct qm_mr_entry *msg)
24936 +{
24937 + const struct qm_fd *fd;
24938 + struct caam_drv_req *drv_req;
24939 + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
24940 +
24941 + fd = &msg->ern.fd;
24942 +
24943 + if (fd->format != qm_fd_compound) {
24944 + dev_err(qidev, "Non-compound FD from CAAM\n");
24945 + return;
24946 + }
24947 +
24948 + drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
24949 + if (!drv_req) {
24950 + dev_err(qidev,
24951 + "Can't find original request for CAAM response\n");
24952 + return;
24953 + }
24954 +
24955 + dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
24956 + sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
24957 +
24958 + drv_req->cbk(drv_req, -EIO);
24959 +}
24960 +
24961 +static struct qman_fq *create_caam_req_fq(struct device *qidev,
24962 + struct qman_fq *rsp_fq,
24963 + dma_addr_t hwdesc,
24964 + int fq_sched_flag)
24965 +{
24966 + int ret;
24967 + struct qman_fq *req_fq;
24968 + struct qm_mcc_initfq opts;
24969 +
24970 + req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
24971 + if (!req_fq)
24972 + return ERR_PTR(-ENOMEM);
24973 +
24974 + req_fq->cb.ern = caam_fq_ern_cb;
24975 + req_fq->cb.fqs = NULL;
24976 +
24977 + ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
24978 + QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
24979 + req_fq);
24980 + if (ret) {
24981 + dev_err(qidev, "Failed to create session req FQ\n");
24982 + goto create_req_fq_fail;
24983 + }
24984 +
24985 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
24986 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
24987 + QM_INITFQ_WE_CGID;
24988 + opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
24989 + opts.fqd.dest.channel = qm_channel_caam;
24990 + opts.fqd.dest.wq = 2;
24991 + opts.fqd.context_b = qman_fq_fqid(rsp_fq);
24992 + opts.fqd.context_a.hi = upper_32_bits(hwdesc);
24993 + opts.fqd.context_a.lo = lower_32_bits(hwdesc);
24994 + opts.fqd.cgid = qipriv.cgr.cgrid;
24995 +
24996 + ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
24997 + if (ret) {
24998 + dev_err(qidev, "Failed to init session req FQ\n");
24999 + goto init_req_fq_fail;
25000 + }
25001 +
25002 + dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
25003 + smp_processor_id());
25004 + return req_fq;
25005 +
25006 +init_req_fq_fail:
25007 + qman_destroy_fq(req_fq, 0);
25008 +create_req_fq_fail:
25009 + kfree(req_fq);
25010 + return ERR_PTR(ret);
25011 +}
25012 +
25013 +static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
25014 +{
25015 + int ret;
25016 +
25017 + ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
25018 + QMAN_VOLATILE_FLAG_FINISH,
25019 + QM_VDQCR_PRECEDENCE_VDQCR |
25020 + QM_VDQCR_NUMFRAMES_TILLEMPTY);
25021 + if (ret) {
25022 + dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
25023 + return ret;
25024 + }
25025 +
25026 + do {
25027 + struct qman_portal *p;
25028 +
25029 + p = qman_get_affine_portal(smp_processor_id());
25030 + qman_p_poll_dqrr(p, 16);
25031 + } while (fq->flags & QMAN_FQ_STATE_NE);
25032 +
25033 + return 0;
25034 +}
25035 +
25036 +static int kill_fq(struct device *qidev, struct qman_fq *fq)
25037 +{
25038 + u32 flags;
25039 + int ret;
25040 +
25041 + ret = qman_retire_fq(fq, &flags);
25042 + if (ret < 0) {
25043 + dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
25044 + return ret;
25045 + }
25046 +
25047 + if (!ret)
25048 + goto empty_fq;
25049 +
25050 + /* Async FQ retirement condition */
25051 + if (ret == 1) {
25052 + /* Retry till FQ gets in retired state */
25053 + do {
25054 + msleep(20);
25055 + } while (fq->state != qman_fq_state_retired);
25056 +
25057 + WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
25058 + WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
25059 + }
25060 +
25061 +empty_fq:
25062 + if (fq->flags & QMAN_FQ_STATE_NE) {
25063 + ret = empty_retired_fq(qidev, fq);
25064 + if (ret) {
25065 + dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
25066 + fq->fqid);
25067 + return ret;
25068 + }
25069 + }
25070 +
25071 + ret = qman_oos_fq(fq);
25072 + if (ret)
25073 + dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
25074 +
25075 + qman_destroy_fq(fq, 0);
25076 + kfree(fq);
25077 +
25078 + return ret;
25079 +}
25080 +
25081 +static int empty_caam_fq(struct qman_fq *fq)
25082 +{
25083 + int ret;
25084 + struct qm_mcr_queryfq_np np;
25085 +
25086 + /* Wait till the older CAAM FQ get empty */
25087 + do {
25088 + ret = qman_query_fq_np(fq, &np);
25089 + if (ret)
25090 + return ret;
25091 +
25092 + if (!np.frm_cnt)
25093 + break;
25094 +
25095 + msleep(20);
25096 + } while (1);
25097 +
25098 + /*
25099 + * Give extra time for pending jobs from this FQ in holding tanks
25100 + * to get processed
25101 + */
25102 + msleep(20);
25103 + return 0;
25104 +}
25105 +
25106 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
25107 +{
25108 + int ret;
25109 + u32 num_words;
25110 + struct qman_fq *new_fq, *old_fq;
25111 + struct device *qidev = drv_ctx->qidev;
25112 +
25113 + num_words = desc_len(sh_desc);
25114 + if (num_words > MAX_SDLEN) {
25115 + dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
25116 + return -EINVAL;
25117 + }
25118 +
25119 + /* Note down older req FQ */
25120 + old_fq = drv_ctx->req_fq;
25121 +
25122 + /* Create a new req FQ in parked state */
25123 + new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
25124 + drv_ctx->context_a, 0);
25125 + if (unlikely(IS_ERR_OR_NULL(new_fq))) {
25126 + dev_err(qidev, "FQ allocation for shdesc update failed\n");
25127 + return PTR_ERR(new_fq);
25128 + }
25129 +
25130 + /* Hook up new FQ to context so that new requests keep queuing */
25131 + drv_ctx->req_fq = new_fq;
25132 +
25133 + /* Empty and remove the older FQ */
25134 + ret = empty_caam_fq(old_fq);
25135 + if (ret) {
25136 + dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
25137 +
25138 + /* We can revert to older FQ */
25139 + drv_ctx->req_fq = old_fq;
25140 +
25141 + if (kill_fq(qidev, new_fq))
25142 + dev_warn(qidev, "New CAAM FQ kill failed\n");
25143 +
25144 + return ret;
25145 + }
25146 +
25147 + /*
25148 + * Re-initialise pre-header. Set RSLS and SDLEN.
25149 + * Update the shared descriptor for driver context.
25150 + */
25151 + drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
25152 + num_words);
25153 + memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
25154 + dma_sync_single_for_device(qidev, drv_ctx->context_a,
25155 + sizeof(drv_ctx->sh_desc) +
25156 + sizeof(drv_ctx->prehdr),
25157 + DMA_BIDIRECTIONAL);
25158 +
25159 + /* Put the new FQ in scheduled state */
25160 + ret = qman_schedule_fq(new_fq);
25161 + if (ret) {
25162 + dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
25163 +
25164 + /*
25165 + * We can kill new FQ and revert to old FQ.
25166 + * Since the desc is already modified, it is success case
25167 + */
25168 +
25169 + drv_ctx->req_fq = old_fq;
25170 +
25171 + if (kill_fq(qidev, new_fq))
25172 + dev_warn(qidev, "New CAAM FQ kill failed\n");
25173 + } else if (kill_fq(qidev, old_fq)) {
25174 + dev_warn(qidev, "Old CAAM FQ kill failed\n");
25175 + }
25176 +
25177 + return 0;
25178 +}
25179 +EXPORT_SYMBOL(caam_drv_ctx_update);
25180 +
25181 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
25182 + int *cpu,
25183 + u32 *sh_desc)
25184 +{
25185 + size_t size;
25186 + u32 num_words;
25187 + dma_addr_t hwdesc;
25188 + struct caam_drv_ctx *drv_ctx;
25189 + const cpumask_t *cpus = qman_affine_cpus();
25190 +
25191 + num_words = desc_len(sh_desc);
25192 + if (num_words > MAX_SDLEN) {
25193 + dev_err(qidev, "Invalid descriptor len: %d words\n",
25194 + num_words);
25195 + return ERR_PTR(-EINVAL);
25196 + }
25197 +
25198 + drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
25199 + if (!drv_ctx)
25200 + return ERR_PTR(-ENOMEM);
25201 +
25202 + /*
25203 + * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
25204 + * and dma-map them.
25205 + */
25206 + drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
25207 + num_words);
25208 + memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
25209 + size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
25210 + hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
25211 + DMA_BIDIRECTIONAL);
25212 + if (dma_mapping_error(qidev, hwdesc)) {
25213 + dev_err(qidev, "DMA map error for preheader + shdesc\n");
25214 + kfree(drv_ctx);
25215 + return ERR_PTR(-ENOMEM);
25216 + }
25217 + drv_ctx->context_a = hwdesc;
25218 +
25219 + /* If given CPU does not own the portal, choose another one that does */
25220 + if (!cpumask_test_cpu(*cpu, cpus)) {
25221 + int *pcpu = &get_cpu_var(last_cpu);
25222 +
25223 + *pcpu = cpumask_next(*pcpu, cpus);
25224 + if (*pcpu >= nr_cpu_ids)
25225 + *pcpu = cpumask_first(cpus);
25226 + *cpu = *pcpu;
25227 +
25228 + put_cpu_var(last_cpu);
25229 + }
25230 + drv_ctx->cpu = *cpu;
25231 +
25232 + /* Find response FQ hooked with this CPU */
25233 + drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
25234 +
25235 + /* Attach request FQ */
25236 + drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
25237 + QMAN_INITFQ_FLAG_SCHED);
25238 + if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
25239 + dev_err(qidev, "create_caam_req_fq failed\n");
25240 + dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
25241 + kfree(drv_ctx);
25242 + return ERR_PTR(-ENOMEM);
25243 + }
25244 +
25245 + drv_ctx->qidev = qidev;
25246 + return drv_ctx;
25247 +}
25248 +EXPORT_SYMBOL(caam_drv_ctx_init);
25249 +
25250 +void *qi_cache_alloc(gfp_t flags)
25251 +{
25252 + return kmem_cache_alloc(qi_cache, flags);
25253 +}
25254 +EXPORT_SYMBOL(qi_cache_alloc);
25255 +
25256 +void qi_cache_free(void *obj)
25257 +{
25258 + kmem_cache_free(qi_cache, obj);
25259 +}
25260 +EXPORT_SYMBOL(qi_cache_free);
25261 +
25262 +static int caam_qi_poll(struct napi_struct *napi, int budget)
25263 +{
25264 + struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
25265 +
25266 + int cleaned = qman_p_poll_dqrr(np->p, budget);
25267 +
25268 + if (cleaned < budget) {
25269 + napi_complete(napi);
25270 + qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
25271 + }
25272 +
25273 + return cleaned;
25274 +}
25275 +
25276 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
25277 +{
25278 + if (IS_ERR_OR_NULL(drv_ctx))
25279 + return;
25280 +
25281 + /* Remove request FQ */
25282 + if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
25283 + dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
25284 +
25285 + dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
25286 + sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
25287 + DMA_BIDIRECTIONAL);
25288 + kfree(drv_ctx);
25289 +}
25290 +EXPORT_SYMBOL(caam_drv_ctx_rel);
25291 +
25292 +int caam_qi_shutdown(struct device *qidev)
25293 +{
25294 + int i, ret;
25295 + struct caam_qi_priv *priv = dev_get_drvdata(qidev);
25296 + const cpumask_t *cpus = qman_affine_cpus();
25297 + struct cpumask old_cpumask = current->cpus_allowed;
25298 +
25299 + for_each_cpu(i, cpus) {
25300 + struct napi_struct *irqtask;
25301 +
25302 + irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
25303 + napi_disable(irqtask);
25304 + netif_napi_del(irqtask);
25305 +
25306 + if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
25307 + dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
25308 + }
25309 +
25310 + /*
25311 + * QMan driver requires CGRs to be deleted from same CPU from where they
25312 + * were instantiated. Hence we get the module removal execute from the
25313 + * same CPU from where it was originally inserted.
25314 + */
25315 + set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
25316 +
25317 + ret = qman_delete_cgr(&priv->cgr);
25318 + if (ret)
25319 + dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
25320 + else
25321 + qman_release_cgrid(priv->cgr.cgrid);
25322 +
25323 + kmem_cache_destroy(qi_cache);
25324 +
25325 + /* Now that we're done with the CGRs, restore the cpus allowed mask */
25326 + set_cpus_allowed_ptr(current, &old_cpumask);
25327 +
25328 + platform_device_unregister(priv->qi_pdev);
25329 + return ret;
25330 +}
25331 +
25332 +static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
25333 +{
25334 + caam_congested = congested;
25335 +
25336 + if (congested) {
25337 +#ifdef CONFIG_DEBUG_FS
25338 + times_congested++;
25339 +#endif
25340 + pr_debug_ratelimited("CAAM entered congestion\n");
25341 +
25342 + } else {
25343 + pr_debug_ratelimited("CAAM exited congestion\n");
25344 + }
25345 +}
25346 +
25347 +static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
25348 +{
25349 + /*
25350 + * In case of threaded ISR, for RT kernels in_irq() does not return
25351 + * appropriate value, so use in_serving_softirq to distinguish between
25352 + * softirq and irq contexts.
25353 + */
25354 + if (unlikely(in_irq() || !in_serving_softirq())) {
25355 + /* Disable QMan IRQ source and invoke NAPI */
25356 + qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
25357 + np->p = p;
25358 + napi_schedule(&np->irqtask);
25359 + return 1;
25360 + }
25361 + return 0;
25362 +}
25363 +
25364 +static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
25365 + struct qman_fq *rsp_fq,
25366 + const struct qm_dqrr_entry *dqrr)
25367 +{
25368 + struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
25369 + struct caam_drv_req *drv_req;
25370 + const struct qm_fd *fd;
25371 + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
25372 +
25373 + if (caam_qi_napi_schedule(p, caam_napi))
25374 + return qman_cb_dqrr_stop;
25375 +
25376 + fd = &dqrr->fd;
25377 + if (unlikely(fd->status))
25378 + dev_err(qidev, "Error: %#x in CAAM response FD\n", fd->status);
25379 +
25380 + if (unlikely(fd->format != fd->format)) {
25381 + dev_err(qidev, "Non-compound FD from CAAM\n");
25382 + return qman_cb_dqrr_consume;
25383 + }
25384 +
25385 + drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr);
25386 + if (unlikely(!drv_req)) {
25387 + dev_err(qidev,
25388 + "Can't find original request for caam response\n");
25389 + return qman_cb_dqrr_consume;
25390 + }
25391 +
25392 + dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
25393 + sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
25394 +
25395 + drv_req->cbk(drv_req, fd->status);
25396 + return qman_cb_dqrr_consume;
25397 +}
25398 +
25399 +static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
25400 +{
25401 + struct qm_mcc_initfq opts;
25402 + struct qman_fq *fq;
25403 + int ret;
25404 +
25405 + fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
25406 + if (!fq)
25407 + return -ENOMEM;
25408 +
25409 + fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
25410 +
25411 + ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
25412 + QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
25413 + if (ret) {
25414 + dev_err(qidev, "Rsp FQ create failed\n");
25415 + kfree(fq);
25416 + return -ENODEV;
25417 + }
25418 +
25419 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
25420 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
25421 + QM_INITFQ_WE_CGID;
25422 + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
25423 + QM_FQCTRL_CGE;
25424 + opts.fqd.dest.channel = qman_affine_channel(cpu);
25425 + opts.fqd.dest.wq = 3;
25426 + opts.fqd.cgid = qipriv.cgr.cgrid;
25427 + opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
25428 + QM_STASHING_EXCL_DATA;
25429 + opts.fqd.context_a.stashing.data_cl = 1;
25430 + opts.fqd.context_a.stashing.context_cl = 1;
25431 +
25432 + ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
25433 + if (ret) {
25434 + dev_err(qidev, "Rsp FQ init failed\n");
25435 + kfree(fq);
25436 + return -ENODEV;
25437 + }
25438 +
25439 + per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
25440 +
25441 + dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
25442 + return 0;
25443 +}
25444 +
25445 +static int init_cgr(struct device *qidev)
25446 +{
25447 + int ret;
25448 + struct qm_mcc_initcgr opts;
25449 + const u64 cpus = *(u64 *)qman_affine_cpus();
25450 + const int num_cpus = hweight64(cpus);
25451 + const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
25452 +
25453 + ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
25454 + if (ret) {
25455 + dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
25456 + return ret;
25457 + }
25458 +
25459 + qipriv.cgr.cb = cgr_cb;
25460 + memset(&opts, 0, sizeof(opts));
25461 + opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
25462 + opts.cgr.cscn_en = QM_CGR_EN;
25463 + opts.cgr.mode = QMAN_CGR_MODE_FRAME;
25464 + qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
25465 +
25466 + ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
25467 + if (ret) {
25468 + dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
25469 + qipriv.cgr.cgrid);
25470 + return ret;
25471 + }
25472 +
25473 + dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
25474 + return 0;
25475 +}
25476 +
25477 +static int alloc_rsp_fqs(struct device *qidev)
25478 +{
25479 + int ret, i;
25480 + const cpumask_t *cpus = qman_affine_cpus();
25481 +
25482 + /*Now create response FQs*/
25483 + for_each_cpu(i, cpus) {
25484 + ret = alloc_rsp_fq_cpu(qidev, i);
25485 + if (ret) {
25486 + dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
25487 + return ret;
25488 + }
25489 + }
25490 +
25491 + return 0;
25492 +}
25493 +
25494 +static void free_rsp_fqs(void)
25495 +{
25496 + int i;
25497 + const cpumask_t *cpus = qman_affine_cpus();
25498 +
25499 + for_each_cpu(i, cpus)
25500 + kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
25501 +}
25502 +
25503 +int caam_qi_init(struct platform_device *caam_pdev)
25504 +{
25505 + int err, i;
25506 + struct platform_device *qi_pdev;
25507 + struct device *ctrldev = &caam_pdev->dev, *qidev;
25508 + struct caam_drv_private *ctrlpriv;
25509 + const cpumask_t *cpus = qman_affine_cpus();
25510 + struct cpumask old_cpumask = current->cpus_allowed;
25511 + static struct platform_device_info qi_pdev_info = {
25512 + .name = "caam_qi",
25513 + .id = PLATFORM_DEVID_NONE
25514 + };
25515 +
25516 + /*
25517 + * QMAN requires CGRs to be removed from same CPU+portal from where it
25518 + * was originally allocated. Hence we need to note down the
25519 + * initialisation CPU and use the same CPU for module exit.
25520 + * We select the first CPU to from the list of portal owning CPUs.
25521 + * Then we pin module init to this CPU.
25522 + */
25523 + mod_init_cpu = cpumask_first(cpus);
25524 + set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
25525 +
25526 + qi_pdev_info.parent = ctrldev;
25527 + qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
25528 + qi_pdev = platform_device_register_full(&qi_pdev_info);
25529 + if (IS_ERR(qi_pdev))
25530 + return PTR_ERR(qi_pdev);
25531 + arch_setup_dma_ops(&qi_pdev->dev, 0, 0, NULL, true);
25532 +
25533 + ctrlpriv = dev_get_drvdata(ctrldev);
25534 + qidev = &qi_pdev->dev;
25535 +
25536 + qipriv.qi_pdev = qi_pdev;
25537 + dev_set_drvdata(qidev, &qipriv);
25538 +
25539 + /* Initialize the congestion detection */
25540 + err = init_cgr(qidev);
25541 + if (err) {
25542 + dev_err(qidev, "CGR initialization failed: %d\n", err);
25543 + platform_device_unregister(qi_pdev);
25544 + return err;
25545 + }
25546 +
25547 + /* Initialise response FQs */
25548 + err = alloc_rsp_fqs(qidev);
25549 + if (err) {
25550 + dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
25551 + free_rsp_fqs();
25552 + platform_device_unregister(qi_pdev);
25553 + return err;
25554 + }
25555 +
25556 + /*
25557 + * Enable the NAPI contexts on each of the core which has an affine
25558 + * portal.
25559 + */
25560 + for_each_cpu(i, cpus) {
25561 + struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
25562 + struct caam_napi *caam_napi = &priv->caam_napi;
25563 + struct napi_struct *irqtask = &caam_napi->irqtask;
25564 + struct net_device *net_dev = &priv->net_dev;
25565 +
25566 + net_dev->dev = *qidev;
25567 + INIT_LIST_HEAD(&net_dev->napi_list);
25568 +
25569 + netif_napi_add(net_dev, irqtask, caam_qi_poll,
25570 + CAAM_NAPI_WEIGHT);
25571 +
25572 + napi_enable(irqtask);
25573 + }
25574 +
25575 + /* Hook up QI device to parent controlling caam device */
25576 + ctrlpriv->qidev = qidev;
25577 +
25578 + qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
25579 + SLAB_CACHE_DMA, NULL);
25580 + if (!qi_cache) {
25581 + dev_err(qidev, "Can't allocate CAAM cache\n");
25582 + free_rsp_fqs();
25583 + platform_device_unregister(qi_pdev);
25584 + return -ENOMEM;
25585 + }
25586 +
25587 + /* Done with the CGRs; restore the cpus allowed mask */
25588 + set_cpus_allowed_ptr(current, &old_cpumask);
25589 +#ifdef CONFIG_DEBUG_FS
25590 + debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
25591 + &times_congested, &caam_fops_u64_ro);
25592 +#endif
25593 + dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
25594 + return 0;
25595 +}
25596 diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
25597 new file mode 100644
25598 index 00000000..0c2e68b3
25599 --- /dev/null
25600 +++ b/drivers/crypto/caam/qi.h
25601 @@ -0,0 +1,204 @@
25602 +/*
25603 + * Public definitions for the CAAM/QI (Queue Interface) backend.
25604 + *
25605 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
25606 + * Copyright 2016-2017 NXP
25607 + */
25608 +
25609 +#ifndef __QI_H__
25610 +#define __QI_H__
25611 +
25612 +#include <linux/fsl_qman.h>
25613 +#include "compat.h"
25614 +#include "desc.h"
25615 +#include "desc_constr.h"
25616 +
25617 +/*
25618 + * CAAM hardware constructs a job descriptor which points to a shared descriptor
25619 + * (as pointed by context_a of to-CAAM FQ).
25620 + * When the job descriptor is executed by DECO, the whole job descriptor
25621 + * together with shared descriptor gets loaded in DECO buffer, which is
25622 + * 64 words (each 32-bit) long.
25623 + *
25624 + * The job descriptor constructed by CAAM hardware has the following layout:
25625 + *
25626 + * HEADER (1 word)
25627 + * Shdesc ptr (1 or 2 words)
25628 + * SEQ_OUT_PTR (1 word)
25629 + * Out ptr (1 or 2 words)
25630 + * Out length (1 word)
25631 + * SEQ_IN_PTR (1 word)
25632 + * In ptr (1 or 2 words)
25633 + * In length (1 word)
25634 + *
25635 + * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
25636 + *
25637 + * Apart from shdesc contents, the total number of words that get loaded in DECO
25638 + * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
25639 + * storing shared descriptor.
25640 + */
25641 +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
25642 +
25643 +/* Length of a single buffer in the QI driver memory cache */
25644 +#define CAAM_QI_MEMCACHE_SIZE 768
25645 +
25646 +extern bool caam_congested __read_mostly;
25647 +
25648 +/*
25649 + * This is the request structure the driver application should fill while
25650 + * submitting a job to driver.
25651 + */
25652 +struct caam_drv_req;
25653 +
25654 +/*
25655 + * caam_qi_cbk - application's callback function invoked by the driver when the
25656 + * request has been successfully processed.
25657 + * @drv_req: original request that was submitted
25658 + * @status: completion status of request (0 - success, non-zero - error code)
25659 + */
25660 +typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
25661 +
25662 +enum optype {
25663 + ENCRYPT,
25664 + DECRYPT,
25665 + GIVENCRYPT,
25666 + NUM_OP
25667 +};
25668 +
25669 +/**
25670 + * caam_drv_ctx - CAAM/QI backend driver context
25671 + *
25672 + * The jobs are processed by the driver against a driver context.
25673 + * With every cryptographic context, a driver context is attached.
25674 + * The driver context contains data for private use by driver.
25675 + * For the applications, this is an opaque structure.
25676 + *
25677 + * @prehdr: preheader placed before shrd desc
25678 + * @sh_desc: shared descriptor
25679 + * @context_a: shared descriptor dma address
25680 + * @req_fq: to-CAAM request frame queue
25681 + * @rsp_fq: from-CAAM response frame queue
25682 + * @cpu: cpu on which to receive CAAM response
25683 + * @op_type: operation type
25684 + * @qidev: device pointer for CAAM/QI backend
25685 + */
25686 +struct caam_drv_ctx {
25687 + u32 prehdr[2];
25688 + u32 sh_desc[MAX_SDLEN];
25689 + dma_addr_t context_a;
25690 + struct qman_fq *req_fq;
25691 + struct qman_fq *rsp_fq;
25692 + int cpu;
25693 + enum optype op_type;
25694 + struct device *qidev;
25695 +} ____cacheline_aligned;
25696 +
25697 +/**
25698 + * caam_drv_req - The request structure the driver application should fill while
25699 + * submitting a job to driver.
25700 + * @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1])
25701 + * buffers.
25702 + * @cbk: callback function to invoke when job is completed
25703 + * @app_ctx: arbitrary context attached with request by the application
25704 + *
25705 + * The fields mentioned below should not be used by application.
25706 + * These are for private use by driver.
25707 + *
25708 + * @hdr__: linked list header to maintain list of outstanding requests to CAAM
25709 + * @hwaddr: DMA address for the S/G table.
25710 + */
25711 +struct caam_drv_req {
25712 + struct qm_sg_entry fd_sgt[2];
25713 + struct caam_drv_ctx *drv_ctx;
25714 + caam_qi_cbk cbk;
25715 + void *app_ctx;
25716 +} ____cacheline_aligned;
25717 +
25718 +/**
25719 + * caam_drv_ctx_init - Initialise a CAAM/QI driver context
25720 + *
25721 + * A CAAM/QI driver context must be attached with each cryptographic context.
25722 + * This function allocates memory for CAAM/QI context and returns a handle to
25723 + * the application. This handle must be submitted along with each enqueue
25724 + * request to the driver by the application.
25725 + *
25726 + * @cpu: CPU where the application prefers to the driver to receive CAAM
25727 + * responses. The request completion callback would be issued from this
25728 + * CPU.
25729 + * @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver
25730 + * context.
25731 + *
25732 + * Returns a driver context on success or negative error code on failure.
25733 + */
25734 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
25735 + u32 *sh_desc);
25736 +
25737 +/**
25738 + * caam_qi_enqueue - Submit a request to QI backend driver.
25739 + *
25740 + * The request structure must be properly filled as described above.
25741 + *
25742 + * @qidev: device pointer for QI backend
25743 + * @req: CAAM QI request structure
25744 + *
25745 + * Returns 0 on success or negative error code on failure.
25746 + */
25747 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);
25748 +
25749 +/**
25750 + * caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM
25751 + * or too many CAAM responses are pending to be processed.
25752 + * @drv_ctx: driver context for which job is to be submitted
25753 + *
25754 + * Returns caam congestion status 'true/false'
25755 + */
25756 +bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx);
25757 +
25758 +/**
25759 + * caam_drv_ctx_update - Update QI driver context
25760 + *
25761 + * Invoked when shared descriptor is required to be change in driver context.
25762 + *
25763 + * @drv_ctx: driver context to be updated
25764 + * @sh_desc: new shared descriptor pointer to be updated in QI driver context
25765 + *
25766 + * Returns 0 on success or negative error code on failure.
25767 + */
25768 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
25769 +
25770 +/**
25771 + * caam_drv_ctx_rel - Release a QI driver context
25772 + * @drv_ctx: context to be released
25773 + */
25774 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
25775 +
25776 +int caam_qi_init(struct platform_device *pdev);
25777 +int caam_qi_shutdown(struct device *dev);
25778 +
25779 +/**
25780 + * qi_cache_alloc - Allocate buffers from CAAM-QI cache
25781 + *
25782 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has
25783 + * to be allocated on the hotpath. Instead of using malloc, one can use the
25784 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
25785 + * will have a size of 256B, which is sufficient for hosting 16 SG entries.
25786 + *
25787 + * @flags: flags that would be used for the equivalent malloc(..) call
25788 + *
25789 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
25790 + */
25791 +void *qi_cache_alloc(gfp_t flags);
25792 +
25793 +/**
25794 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
25795 + *
25796 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs
25797 + * the buffer previously allocated by a qi_cache_alloc call.
25798 + * No checking is being done, the call is a passthrough call to
25799 + * kmem_cache_free(...)
25800 + *
25801 + * @obj: object previously allocated using qi_cache_alloc()
25802 + */
25803 +void qi_cache_free(void *obj);
25804 +
25805 +#endif /* __QI_H__ */
25806 diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
25807 index 84d2f838..74eb8c6c 100644
25808 --- a/drivers/crypto/caam/regs.h
25809 +++ b/drivers/crypto/caam/regs.h
25810 @@ -2,6 +2,7 @@
25811 * CAAM hardware register-level view
25812 *
25813 * Copyright 2008-2011 Freescale Semiconductor, Inc.
25814 + * Copyright 2017 NXP
25815 */
25816
25817 #ifndef REGS_H
25818 @@ -67,6 +68,7 @@
25819 */
25820
25821 extern bool caam_little_end;
25822 +extern bool caam_imx;
25823
25824 #define caam_to_cpu(len) \
25825 static inline u##len caam##len ## _to_cpu(u##len val) \
25826 @@ -154,13 +156,10 @@ static inline u64 rd_reg64(void __iomem *reg)
25827 #else /* CONFIG_64BIT */
25828 static inline void wr_reg64(void __iomem *reg, u64 data)
25829 {
25830 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
25831 - if (caam_little_end) {
25832 + if (!caam_imx && caam_little_end) {
25833 wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
25834 wr_reg32((u32 __iomem *)(reg), data);
25835 - } else
25836 -#endif
25837 - {
25838 + } else {
25839 wr_reg32((u32 __iomem *)(reg), data >> 32);
25840 wr_reg32((u32 __iomem *)(reg) + 1, data);
25841 }
25842 @@ -168,41 +167,40 @@ static inline void wr_reg64(void __iomem *reg, u64 data)
25843
25844 static inline u64 rd_reg64(void __iomem *reg)
25845 {
25846 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
25847 - if (caam_little_end)
25848 + if (!caam_imx && caam_little_end)
25849 return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
25850 (u64)rd_reg32((u32 __iomem *)(reg)));
25851 - else
25852 -#endif
25853 - return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
25854 - (u64)rd_reg32((u32 __iomem *)(reg) + 1));
25855 +
25856 + return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
25857 + (u64)rd_reg32((u32 __iomem *)(reg) + 1));
25858 }
25859 #endif /* CONFIG_64BIT */
25860
25861 +static inline u64 cpu_to_caam_dma64(dma_addr_t value)
25862 +{
25863 + if (caam_imx)
25864 + return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
25865 + (u64)cpu_to_caam32(upper_32_bits(value)));
25866 +
25867 + return cpu_to_caam64(value);
25868 +}
25869 +
25870 +static inline u64 caam_dma64_to_cpu(u64 value)
25871 +{
25872 + if (caam_imx)
25873 + return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) |
25874 + (u64)caam32_to_cpu(upper_32_bits(value)));
25875 +
25876 + return caam64_to_cpu(value);
25877 +}
25878 +
25879 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
25880 -#ifdef CONFIG_SOC_IMX7D
25881 -#define cpu_to_caam_dma(value) \
25882 - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
25883 - (u64)cpu_to_caam32(upper_32_bits(value)))
25884 -#define caam_dma_to_cpu(value) \
25885 - (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
25886 - (u64)caam32_to_cpu(upper_32_bits(value)))
25887 -#else
25888 -#define cpu_to_caam_dma(value) cpu_to_caam64(value)
25889 -#define caam_dma_to_cpu(value) caam64_to_cpu(value)
25890 -#endif /* CONFIG_SOC_IMX7D */
25891 +#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
25892 +#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
25893 #else
25894 #define cpu_to_caam_dma(value) cpu_to_caam32(value)
25895 #define caam_dma_to_cpu(value) caam32_to_cpu(value)
25896 -#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
25897 -
25898 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
25899 -#define cpu_to_caam_dma64(value) \
25900 - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
25901 - (u64)cpu_to_caam32(upper_32_bits(value)))
25902 -#else
25903 -#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
25904 -#endif
25905 +#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
25906
25907 /*
25908 * jr_outentry
25909 @@ -293,6 +291,7 @@ struct caam_perfmon {
25910 u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
25911 #define CTPR_MS_QI_SHIFT 25
25912 #define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
25913 +#define CTPR_MS_DPAA2 BIT(13)
25914 #define CTPR_MS_VIRT_EN_INCL 0x00000001
25915 #define CTPR_MS_VIRT_EN_POR 0x00000002
25916 #define CTPR_MS_PG_SZ_MASK 0x10
25917 @@ -628,6 +627,8 @@ struct caam_job_ring {
25918 #define JRSTA_DECOERR_INVSIGN 0x86
25919 #define JRSTA_DECOERR_DSASIGN 0x87
25920
25921 +#define JRSTA_QIERR_ERROR_MASK 0x00ff
25922 +
25923 #define JRSTA_CCBERR_JUMP 0x08000000
25924 #define JRSTA_CCBERR_INDEX_MASK 0xff00
25925 #define JRSTA_CCBERR_INDEX_SHIFT 8
25926 diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
25927 new file mode 100644
25928 index 00000000..3b3cabc4
25929 --- /dev/null
25930 +++ b/drivers/crypto/caam/sg_sw_qm.h
25931 @@ -0,0 +1,126 @@
25932 +/*
25933 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
25934 + * Copyright 2016-2017 NXP
25935 + *
25936 + * Redistribution and use in source and binary forms, with or without
25937 + * modification, are permitted provided that the following conditions are met:
25938 + * * Redistributions of source code must retain the above copyright
25939 + * notice, this list of conditions and the following disclaimer.
25940 + * * Redistributions in binary form must reproduce the above copyright
25941 + * notice, this list of conditions and the following disclaimer in the
25942 + * documentation and/or other materials provided with the distribution.
25943 + * * Neither the name of Freescale Semiconductor nor the
25944 + * names of its contributors may be used to endorse or promote products
25945 + * derived from this software without specific prior written permission.
25946 + *
25947 + *
25948 + * ALTERNATIVELY, this software may be distributed under the terms of the
25949 + * GNU General Public License ("GPL") as published by the Free Software
25950 + * Foundation, either version 2 of that License or (at your option) any
25951 + * later version.
25952 + *
25953 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
25954 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25955 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25956 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
25957 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25958 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25959 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25960 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25961 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25962 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25963 + */
25964 +
25965 +#ifndef __SG_SW_QM_H
25966 +#define __SG_SW_QM_H
25967 +
25968 +#include <linux/fsl_qman.h>
25969 +#include "regs.h"
25970 +
25971 +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
25972 +{
25973 + dma_addr_t addr = qm_sg_ptr->opaque;
25974 +
25975 + qm_sg_ptr->opaque = cpu_to_caam64(addr);
25976 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
25977 +}
25978 +
25979 +static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
25980 + u32 len, u16 offset)
25981 +{
25982 + qm_sg_ptr->addr = dma;
25983 + qm_sg_ptr->length = len;
25984 + qm_sg_ptr->__reserved2 = 0;
25985 + qm_sg_ptr->bpid = 0;
25986 + qm_sg_ptr->__reserved3 = 0;
25987 + qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
25988 +
25989 + cpu_to_hw_sg(qm_sg_ptr);
25990 +}
25991 +
25992 +static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
25993 + dma_addr_t dma, u32 len, u16 offset)
25994 +{
25995 + qm_sg_ptr->extension = 0;
25996 + qm_sg_ptr->final = 0;
25997 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
25998 +}
25999 +
26000 +static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
26001 + dma_addr_t dma, u32 len, u16 offset)
26002 +{
26003 + qm_sg_ptr->extension = 0;
26004 + qm_sg_ptr->final = 1;
26005 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
26006 +}
26007 +
26008 +static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
26009 + dma_addr_t dma, u32 len, u16 offset)
26010 +{
26011 + qm_sg_ptr->extension = 1;
26012 + qm_sg_ptr->final = 0;
26013 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
26014 +}
26015 +
26016 +static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
26017 + dma_addr_t dma, u32 len,
26018 + u16 offset)
26019 +{
26020 + qm_sg_ptr->extension = 1;
26021 + qm_sg_ptr->final = 1;
26022 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
26023 +}
26024 +
26025 +/*
26026 + * convert scatterlist to h/w link table format
26027 + * but does not have final bit; instead, returns last entry
26028 + */
26029 +static inline struct qm_sg_entry *
26030 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
26031 + struct qm_sg_entry *qm_sg_ptr, u16 offset)
26032 +{
26033 + while (sg_count && sg) {
26034 + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
26035 + sg_dma_len(sg), offset);
26036 + qm_sg_ptr++;
26037 + sg = sg_next(sg);
26038 + sg_count--;
26039 + }
26040 + return qm_sg_ptr - 1;
26041 +}
26042 +
26043 +/*
26044 + * convert scatterlist to h/w link table format
26045 + * scatterlist must have been previously dma mapped
26046 + */
26047 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
26048 + struct qm_sg_entry *qm_sg_ptr, u16 offset)
26049 +{
26050 + qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
26051 +
26052 + qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
26053 + qm_sg_ptr->final = 1;
26054 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
26055 +}
26056 +
26057 +#endif /* __SG_SW_QM_H */
26058 diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
26059 new file mode 100644
26060 index 00000000..31b44075
26061 --- /dev/null
26062 +++ b/drivers/crypto/caam/sg_sw_qm2.h
26063 @@ -0,0 +1,81 @@
26064 +/*
26065 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
26066 + * Copyright 2017 NXP
26067 + *
26068 + * Redistribution and use in source and binary forms, with or without
26069 + * modification, are permitted provided that the following conditions are met:
26070 + * * Redistributions of source code must retain the above copyright
26071 + * notice, this list of conditions and the following disclaimer.
26072 + * * Redistributions in binary form must reproduce the above copyright
26073 + * notice, this list of conditions and the following disclaimer in the
26074 + * documentation and/or other materials provided with the distribution.
26075 + * * Neither the names of the above-listed copyright holders nor the
26076 + * names of any contributors may be used to endorse or promote products
26077 + * derived from this software without specific prior written permission.
26078 + *
26079 + *
26080 + * ALTERNATIVELY, this software may be distributed under the terms of the
26081 + * GNU General Public License ("GPL") as published by the Free Software
26082 + * Foundation, either version 2 of that License or (at your option) any
26083 + * later version.
26084 + *
26085 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26086 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26087 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26088 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
26089 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26090 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26091 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26092 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26093 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26094 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26095 + * POSSIBILITY OF SUCH DAMAGE.
26096 + */
26097 +
26098 +#ifndef _SG_SW_QM2_H_
26099 +#define _SG_SW_QM2_H_
26100 +
26101 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
26102 +
26103 +static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
26104 + dma_addr_t dma, u32 len, u16 offset)
26105 +{
26106 + dpaa2_sg_set_addr(qm_sg_ptr, dma);
26107 + dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single);
26108 + dpaa2_sg_set_final(qm_sg_ptr, false);
26109 + dpaa2_sg_set_len(qm_sg_ptr, len);
26110 + dpaa2_sg_set_bpid(qm_sg_ptr, 0);
26111 + dpaa2_sg_set_offset(qm_sg_ptr, offset);
26112 +}
26113 +
26114 +/*
26115 + * convert scatterlist to h/w link table format
26116 + * but does not have final bit; instead, returns last entry
26117 + */
26118 +static inline struct dpaa2_sg_entry *
26119 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
26120 + struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
26121 +{
26122 + while (sg_count && sg) {
26123 + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
26124 + sg_dma_len(sg), offset);
26125 + qm_sg_ptr++;
26126 + sg = sg_next(sg);
26127 + sg_count--;
26128 + }
26129 + return qm_sg_ptr - 1;
26130 +}
26131 +
26132 +/*
26133 + * convert scatterlist to h/w link table format
26134 + * scatterlist must have been previously dma mapped
26135 + */
26136 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
26137 + struct dpaa2_sg_entry *qm_sg_ptr,
26138 + u16 offset)
26139 +{
26140 + qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
26141 + dpaa2_sg_set_final(qm_sg_ptr, true);
26142 +}
26143 +
26144 +#endif /* _SG_SW_QM2_H_ */
26145 diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
26146 index 41cd5a35..936b1b63 100644
26147 --- a/drivers/crypto/caam/sg_sw_sec4.h
26148 +++ b/drivers/crypto/caam/sg_sw_sec4.h
26149 @@ -5,9 +5,19 @@
26150 *
26151 */
26152
26153 +#ifndef _SG_SW_SEC4_H_
26154 +#define _SG_SW_SEC4_H_
26155 +
26156 +#include "ctrl.h"
26157 #include "regs.h"
26158 +#include "sg_sw_qm2.h"
26159 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
26160
26161 -struct sec4_sg_entry;
26162 +struct sec4_sg_entry {
26163 + u64 ptr;
26164 + u32 len;
26165 + u32 bpid_offset;
26166 +};
26167
26168 /*
26169 * convert single dma address to h/w link table format
26170 @@ -15,9 +25,15 @@ struct sec4_sg_entry;
26171 static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
26172 dma_addr_t dma, u32 len, u16 offset)
26173 {
26174 - sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
26175 - sec4_sg_ptr->len = cpu_to_caam32(len);
26176 - sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
26177 + if (caam_dpaa2) {
26178 + dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len,
26179 + offset);
26180 + } else {
26181 + sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
26182 + sec4_sg_ptr->len = cpu_to_caam32(len);
26183 + sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
26184 + SEC4_SG_OFFSET_MASK);
26185 + }
26186 #ifdef DEBUG
26187 print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
26188 DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
26189 @@ -43,6 +59,14 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
26190 return sec4_sg_ptr - 1;
26191 }
26192
26193 +static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
26194 +{
26195 + if (caam_dpaa2)
26196 + dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true);
26197 + else
26198 + sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
26199 +}
26200 +
26201 /*
26202 * convert scatterlist to h/w link table format
26203 * scatterlist must have been previously dma mapped
26204 @@ -52,31 +76,7 @@ static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
26205 u16 offset)
26206 {
26207 sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
26208 - sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
26209 -}
26210 -
26211 -static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
26212 - struct scatterlist *sg, unsigned int total,
26213 - struct sec4_sg_entry *sec4_sg_ptr)
26214 -{
26215 - do {
26216 - unsigned int len = min(sg_dma_len(sg), total);
26217 -
26218 - dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0);
26219 - sec4_sg_ptr++;
26220 - sg = sg_next(sg);
26221 - total -= len;
26222 - } while (total);
26223 - return sec4_sg_ptr - 1;
26224 + sg_to_sec4_set_last(sec4_sg_ptr);
26225 }
26226
26227 -/* derive number of elements in scatterlist, but return 0 for 1 */
26228 -static inline int sg_count(struct scatterlist *sg_list, int nbytes)
26229 -{
26230 - int sg_nents = sg_nents_for_len(sg_list, nbytes);
26231 -
26232 - if (likely(sg_nents == 1))
26233 - return 0;
26234 -
26235 - return sg_nents;
26236 -}
26237 +#endif /* _SG_SW_SEC4_H_ */
26238 diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
26239 index ef5d394f..cc8deece 100644
26240 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c
26241 +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
26242 @@ -516,7 +516,7 @@ static int rsi_probe(struct usb_interface *pfunction,
26243
26244 /**
26245 * rsi_disconnect() - This function performs the reverse of the probe function,
26246 - * it deintialize the driver structure.
26247 + * it deinitialize the driver structure.
26248 * @pfunction: Pointer to the USB interface structure.
26249 *
26250 * Return: None.
26251 diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
26252 index defffa75..ec88ed9c 100644
26253 --- a/drivers/staging/wilc1000/linux_wlan.c
26254 +++ b/drivers/staging/wilc1000/linux_wlan.c
26255 @@ -211,7 +211,7 @@ static void deinit_irq(struct net_device *dev)
26256 vif = netdev_priv(dev);
26257 wilc = vif->wilc;
26258
26259 - /* Deintialize IRQ */
26260 + /* Deinitialize IRQ */
26261 if (wilc->dev_irq_num) {
26262 free_irq(wilc->dev_irq_num, wilc);
26263 gpio_free(wilc->gpio);
26264 diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
26265 index 60d8b055..02d3e721 100644
26266 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
26267 +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
26268 @@ -2359,7 +2359,7 @@ int wilc_deinit_host_int(struct net_device *net)
26269 del_timer_sync(&wilc_during_ip_timer);
26270
26271 if (s32Error)
26272 - netdev_err(net, "Error while deintializing host interface\n");
26273 + netdev_err(net, "Error while deinitializing host interface\n");
26274
26275 return s32Error;
26276 }
26277 diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
26278 new file mode 100644
26279 index 00000000..e328b524
26280 --- /dev/null
26281 +++ b/include/crypto/acompress.h
26282 @@ -0,0 +1,269 @@
26283 +/*
26284 + * Asynchronous Compression operations
26285 + *
26286 + * Copyright (c) 2016, Intel Corporation
26287 + * Authors: Weigang Li <weigang.li@intel.com>
26288 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
26289 + *
26290 + * This program is free software; you can redistribute it and/or modify it
26291 + * under the terms of the GNU General Public License as published by the Free
26292 + * Software Foundation; either version 2 of the License, or (at your option)
26293 + * any later version.
26294 + *
26295 + */
26296 +#ifndef _CRYPTO_ACOMP_H
26297 +#define _CRYPTO_ACOMP_H
26298 +#include <linux/crypto.h>
26299 +
26300 +#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
26301 +
26302 +/**
26303 + * struct acomp_req - asynchronous (de)compression request
26304 + *
26305 + * @base: Common attributes for asynchronous crypto requests
26306 + * @src: Source Data
26307 + * @dst: Destination data
26308 + * @slen: Size of the input buffer
26309 + * @dlen: Size of the output buffer and number of bytes produced
26310 + * @flags: Internal flags
26311 + * @__ctx: Start of private context data
26312 + */
26313 +struct acomp_req {
26314 + struct crypto_async_request base;
26315 + struct scatterlist *src;
26316 + struct scatterlist *dst;
26317 + unsigned int slen;
26318 + unsigned int dlen;
26319 + u32 flags;
26320 + void *__ctx[] CRYPTO_MINALIGN_ATTR;
26321 +};
26322 +
26323 +/**
26324 + * struct crypto_acomp - user-instantiated objects which encapsulate
26325 + * algorithms and core processing logic
26326 + *
26327 + * @compress: Function performs a compress operation
26328 + * @decompress: Function performs a de-compress operation
26329 + * @dst_free: Frees destination buffer if allocated inside the
26330 + * algorithm
26331 + * @reqsize: Context size for (de)compression requests
26332 + * @base: Common crypto API algorithm data structure
26333 + */
26334 +struct crypto_acomp {
26335 + int (*compress)(struct acomp_req *req);
26336 + int (*decompress)(struct acomp_req *req);
26337 + void (*dst_free)(struct scatterlist *dst);
26338 + unsigned int reqsize;
26339 + struct crypto_tfm base;
26340 +};
26341 +
26342 +/**
26343 + * struct acomp_alg - asynchronous compression algorithm
26344 + *
26345 + * @compress: Function performs a compress operation
26346 + * @decompress: Function performs a de-compress operation
26347 + * @dst_free: Frees destination buffer if allocated inside the algorithm
26348 + * @init: Initialize the cryptographic transformation object.
26349 + * This function is used to initialize the cryptographic
26350 + * transformation object. This function is called only once at
26351 + * the instantiation time, right after the transformation context
26352 + * was allocated. In case the cryptographic hardware has some
26353 + * special requirements which need to be handled by software, this
26354 + * function shall check for the precise requirement of the
26355 + * transformation and put any software fallbacks in place.
26356 + * @exit: Deinitialize the cryptographic transformation object. This is a
26357 + * counterpart to @init, used to remove various changes set in
26358 + * @init.
26359 + *
26360 + * @reqsize: Context size for (de)compression requests
26361 + * @base: Common crypto API algorithm data structure
26362 + */
26363 +struct acomp_alg {
26364 + int (*compress)(struct acomp_req *req);
26365 + int (*decompress)(struct acomp_req *req);
26366 + void (*dst_free)(struct scatterlist *dst);
26367 + int (*init)(struct crypto_acomp *tfm);
26368 + void (*exit)(struct crypto_acomp *tfm);
26369 + unsigned int reqsize;
26370 + struct crypto_alg base;
26371 +};
26372 +
26373 +/**
26374 + * DOC: Asynchronous Compression API
26375 + *
26376 + * The Asynchronous Compression API is used with the algorithms of type
26377 + * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
26378 + */
26379 +
26380 +/**
26381 + * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
26382 + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
26383 + * compression algorithm e.g. "deflate"
26384 + * @type: specifies the type of the algorithm
26385 + * @mask: specifies the mask for the algorithm
26386 + *
26387 + * Allocate a handle for a compression algorithm. The returned struct
26388 + * crypto_acomp is the handle that is required for any subsequent
26389 + * API invocation for the compression operations.
26390 + *
26391 + * Return: allocated handle in case of success; IS_ERR() is true in case
26392 + * of an error, PTR_ERR() returns the error code.
26393 + */
26394 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
26395 + u32 mask);
26396 +
26397 +static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
26398 +{
26399 + return &tfm->base;
26400 +}
26401 +
26402 +static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
26403 +{
26404 + return container_of(alg, struct acomp_alg, base);
26405 +}
26406 +
26407 +static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
26408 +{
26409 + return container_of(tfm, struct crypto_acomp, base);
26410 +}
26411 +
26412 +static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
26413 +{
26414 + return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
26415 +}
26416 +
26417 +static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
26418 +{
26419 + return tfm->reqsize;
26420 +}
26421 +
26422 +static inline void acomp_request_set_tfm(struct acomp_req *req,
26423 + struct crypto_acomp *tfm)
26424 +{
26425 + req->base.tfm = crypto_acomp_tfm(tfm);
26426 +}
26427 +
26428 +static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
26429 +{
26430 + return __crypto_acomp_tfm(req->base.tfm);
26431 +}
26432 +
26433 +/**
26434 + * crypto_free_acomp() -- free ACOMPRESS tfm handle
26435 + *
26436 + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
26437 + */
26438 +static inline void crypto_free_acomp(struct crypto_acomp *tfm)
26439 +{
26440 + crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
26441 +}
26442 +
26443 +static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
26444 +{
26445 + type &= ~CRYPTO_ALG_TYPE_MASK;
26446 + type |= CRYPTO_ALG_TYPE_ACOMPRESS;
26447 + mask |= CRYPTO_ALG_TYPE_MASK;
26448 +
26449 + return crypto_has_alg(alg_name, type, mask);
26450 +}
26451 +
26452 +/**
26453 + * acomp_request_alloc() -- allocates asynchronous (de)compression request
26454 + *
26455 + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
26456 + *
26457 + * Return: allocated handle in case of success or NULL in case of an error
26458 + */
26459 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
26460 +
26461 +/**
26462 + * acomp_request_free() -- zeroize and free asynchronous (de)compression
26463 + * request as well as the output buffer if allocated
26464 + * inside the algorithm
26465 + *
26466 + * @req: request to free
26467 + */
26468 +void acomp_request_free(struct acomp_req *req);
26469 +
26470 +/**
26471 + * acomp_request_set_callback() -- Sets an asynchronous callback
26472 + *
26473 + * Callback will be called when an asynchronous operation on a given
26474 + * request is finished.
26475 + *
26476 + * @req: request that the callback will be set for
26477 + * @flgs: specify for instance if the operation may backlog
26478 + * @cmlp: callback which will be called
26479 + * @data: private data used by the caller
26480 + */
26481 +static inline void acomp_request_set_callback(struct acomp_req *req,
26482 + u32 flgs,
26483 + crypto_completion_t cmpl,
26484 + void *data)
26485 +{
26486 + req->base.complete = cmpl;
26487 + req->base.data = data;
26488 + req->base.flags = flgs;
26489 +}
26490 +
26491 +/**
26492 + * acomp_request_set_params() -- Sets request parameters
26493 + *
26494 + * Sets parameters required by an acomp operation
26495 + *
26496 + * @req: asynchronous compress request
26497 + * @src: pointer to input buffer scatterlist
26498 + * @dst: pointer to output buffer scatterlist. If this is NULL, the
26499 + * acomp layer will allocate the output memory
26500 + * @slen: size of the input buffer
26501 + * @dlen: size of the output buffer. If dst is NULL, this can be used by
26502 + * the user to specify the maximum amount of memory to allocate
26503 + */
26504 +static inline void acomp_request_set_params(struct acomp_req *req,
26505 + struct scatterlist *src,
26506 + struct scatterlist *dst,
26507 + unsigned int slen,
26508 + unsigned int dlen)
26509 +{
26510 + req->src = src;
26511 + req->dst = dst;
26512 + req->slen = slen;
26513 + req->dlen = dlen;
26514 +
26515 + if (!req->dst)
26516 + req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
26517 +}
26518 +
26519 +/**
26520 + * crypto_acomp_compress() -- Invoke asynchronous compress operation
26521 + *
26522 + * Function invokes the asynchronous compress operation
26523 + *
26524 + * @req: asynchronous compress request
26525 + *
26526 + * Return: zero on success; error code in case of error
26527 + */
26528 +static inline int crypto_acomp_compress(struct acomp_req *req)
26529 +{
26530 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
26531 +
26532 + return tfm->compress(req);
26533 +}
26534 +
26535 +/**
26536 + * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
26537 + *
26538 + * Function invokes the asynchronous decompress operation
26539 + *
26540 + * @req: asynchronous compress request
26541 + *
26542 + * Return: zero on success; error code in case of error
26543 + */
26544 +static inline int crypto_acomp_decompress(struct acomp_req *req)
26545 +{
26546 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
26547 +
26548 + return tfm->decompress(req);
26549 +}
26550 +
26551 +#endif
26552 diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
26553 new file mode 100644
26554 index 00000000..1de2b5af
26555 --- /dev/null
26556 +++ b/include/crypto/internal/acompress.h
26557 @@ -0,0 +1,81 @@
26558 +/*
26559 + * Asynchronous Compression operations
26560 + *
26561 + * Copyright (c) 2016, Intel Corporation
26562 + * Authors: Weigang Li <weigang.li@intel.com>
26563 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
26564 + *
26565 + * This program is free software; you can redistribute it and/or modify it
26566 + * under the terms of the GNU General Public License as published by the Free
26567 + * Software Foundation; either version 2 of the License, or (at your option)
26568 + * any later version.
26569 + *
26570 + */
26571 +#ifndef _CRYPTO_ACOMP_INT_H
26572 +#define _CRYPTO_ACOMP_INT_H
26573 +#include <crypto/acompress.h>
26574 +
26575 +/*
26576 + * Transform internal helpers.
26577 + */
26578 +static inline void *acomp_request_ctx(struct acomp_req *req)
26579 +{
26580 + return req->__ctx;
26581 +}
26582 +
26583 +static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
26584 +{
26585 + return tfm->base.__crt_ctx;
26586 +}
26587 +
26588 +static inline void acomp_request_complete(struct acomp_req *req,
26589 + int err)
26590 +{
26591 + req->base.complete(&req->base, err);
26592 +}
26593 +
26594 +static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
26595 +{
26596 + return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
26597 +}
26598 +
26599 +static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
26600 +{
26601 + struct acomp_req *req;
26602 +
26603 + req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
26604 + if (likely(req))
26605 + acomp_request_set_tfm(req, tfm);
26606 + return req;
26607 +}
26608 +
26609 +static inline void __acomp_request_free(struct acomp_req *req)
26610 +{
26611 + kzfree(req);
26612 +}
26613 +
26614 +/**
26615 + * crypto_register_acomp() -- Register asynchronous compression algorithm
26616 + *
26617 + * Function registers an implementation of an asynchronous
26618 + * compression algorithm
26619 + *
26620 + * @alg: algorithm definition
26621 + *
26622 + * Return: zero on success; error code in case of error
26623 + */
26624 +int crypto_register_acomp(struct acomp_alg *alg);
26625 +
26626 +/**
26627 + * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
26628 + *
26629 + * Function unregisters an implementation of an asynchronous
26630 + * compression algorithm
26631 + *
26632 + * @alg: algorithm definition
26633 + *
26634 + * Return: zero on success; error code in case of error
26635 + */
26636 +int crypto_unregister_acomp(struct acomp_alg *alg);
26637 +
26638 +#endif
26639 diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
26640 new file mode 100644
26641 index 00000000..3fda3c56
26642 --- /dev/null
26643 +++ b/include/crypto/internal/scompress.h
26644 @@ -0,0 +1,136 @@
26645 +/*
26646 + * Synchronous Compression operations
26647 + *
26648 + * Copyright 2015 LG Electronics Inc.
26649 + * Copyright (c) 2016, Intel Corporation
26650 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
26651 + *
26652 + * This program is free software; you can redistribute it and/or modify it
26653 + * under the terms of the GNU General Public License as published by the Free
26654 + * Software Foundation; either version 2 of the License, or (at your option)
26655 + * any later version.
26656 + *
26657 + */
26658 +#ifndef _CRYPTO_SCOMP_INT_H
26659 +#define _CRYPTO_SCOMP_INT_H
26660 +#include <linux/crypto.h>
26661 +
26662 +#define SCOMP_SCRATCH_SIZE 131072
26663 +
26664 +struct crypto_scomp {
26665 + struct crypto_tfm base;
26666 +};
26667 +
26668 +/**
26669 + * struct scomp_alg - synchronous compression algorithm
26670 + *
26671 + * @alloc_ctx: Function allocates algorithm specific context
26672 + * @free_ctx: Function frees context allocated with alloc_ctx
26673 + * @compress: Function performs a compress operation
26674 + * @decompress: Function performs a de-compress operation
26675 + * @init: Initialize the cryptographic transformation object.
26676 + * This function is used to initialize the cryptographic
26677 + * transformation object. This function is called only once at
26678 + * the instantiation time, right after the transformation context
26679 + * was allocated. In case the cryptographic hardware has some
26680 + * special requirements which need to be handled by software, this
26681 + * function shall check for the precise requirement of the
26682 + * transformation and put any software fallbacks in place.
26683 + * @exit: Deinitialize the cryptographic transformation object. This is a
26684 + * counterpart to @init, used to remove various changes set in
26685 + * @init.
26686 + * @base: Common crypto API algorithm data structure
26687 + */
26688 +struct scomp_alg {
26689 + void *(*alloc_ctx)(struct crypto_scomp *tfm);
26690 + void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
26691 + int (*compress)(struct crypto_scomp *tfm, const u8 *src,
26692 + unsigned int slen, u8 *dst, unsigned int *dlen,
26693 + void *ctx);
26694 + int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
26695 + unsigned int slen, u8 *dst, unsigned int *dlen,
26696 + void *ctx);
26697 + struct crypto_alg base;
26698 +};
26699 +
26700 +static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
26701 +{
26702 + return container_of(alg, struct scomp_alg, base);
26703 +}
26704 +
26705 +static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm)
26706 +{
26707 + return container_of(tfm, struct crypto_scomp, base);
26708 +}
26709 +
26710 +static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm)
26711 +{
26712 + return &tfm->base;
26713 +}
26714 +
26715 +static inline void crypto_free_scomp(struct crypto_scomp *tfm)
26716 +{
26717 + crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm));
26718 +}
26719 +
26720 +static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
26721 +{
26722 + return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
26723 +}
26724 +
26725 +static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
26726 +{
26727 + return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
26728 +}
26729 +
26730 +static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
26731 + void *ctx)
26732 +{
26733 + return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
26734 +}
26735 +
26736 +static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
26737 + const u8 *src, unsigned int slen,
26738 + u8 *dst, unsigned int *dlen, void *ctx)
26739 +{
26740 + return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx);
26741 +}
26742 +
26743 +static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
26744 + const u8 *src, unsigned int slen,
26745 + u8 *dst, unsigned int *dlen,
26746 + void *ctx)
26747 +{
26748 + return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen,
26749 + ctx);
26750 +}
26751 +
26752 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
26753 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
26754 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
26755 +
26756 +/**
26757 + * crypto_register_scomp() -- Register synchronous compression algorithm
26758 + *
26759 + * Function registers an implementation of a synchronous
26760 + * compression algorithm
26761 + *
26762 + * @alg: algorithm definition
26763 + *
26764 + * Return: zero on success; error code in case of error
26765 + */
26766 +int crypto_register_scomp(struct scomp_alg *alg);
26767 +
26768 +/**
26769 + * crypto_unregister_scomp() -- Unregister synchronous compression algorithm
26770 + *
26771 + * Function unregisters an implementation of a synchronous
26772 + * compression algorithm
26773 + *
26774 + * @alg: algorithm definition
26775 + *
26776 + * Return: zero on success; error code in case of error
26777 + */
26778 +int crypto_unregister_scomp(struct scomp_alg *alg);
26779 +
26780 +#endif
26781 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
26782 index 7cee5551..8348d83d 100644
26783 --- a/include/linux/crypto.h
26784 +++ b/include/linux/crypto.h
26785 @@ -50,6 +50,8 @@
26786 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
26787 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
26788 #define CRYPTO_ALG_TYPE_KPP 0x00000008
26789 +#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
26790 +#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
26791 #define CRYPTO_ALG_TYPE_RNG 0x0000000c
26792 #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
26793 #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
26794 @@ -60,6 +62,7 @@
26795 #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
26796 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
26797 #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
26798 +#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
26799
26800 #define CRYPTO_ALG_LARVAL 0x00000010
26801 #define CRYPTO_ALG_DEAD 0x00000020
26802 diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
26803 index 79b5ded2..11d21fce 100644
26804 --- a/include/uapi/linux/cryptouser.h
26805 +++ b/include/uapi/linux/cryptouser.h
26806 @@ -46,6 +46,7 @@ enum crypto_attr_type_t {
26807 CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
26808 CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
26809 CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */
26810 + CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */
26811 __CRYPTOCFGA_MAX
26812
26813 #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
26814 @@ -112,5 +113,9 @@ struct crypto_report_kpp {
26815 char type[CRYPTO_MAX_NAME];
26816 };
26817
26818 +struct crypto_report_acomp {
26819 + char type[CRYPTO_MAX_NAME];
26820 +};
26821 +
26822 #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
26823 sizeof(struct crypto_report_blkcipher))
26824 diff --git a/scripts/spelling.txt b/scripts/spelling.txt
26825 index 163c720d..8392f89c 100644
26826 --- a/scripts/spelling.txt
26827 +++ b/scripts/spelling.txt
26828 @@ -305,6 +305,9 @@ defintion||definition
26829 defintions||definitions
26830 defualt||default
26831 defult||default
26832 +deintializing||deinitializing
26833 +deintialize||deinitialize
26834 +deintialized||deinitialized
26835 deivce||device
26836 delared||declared
26837 delare||declare
26838 diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
26839 index 504c7cd7..d8577374 100644
26840 --- a/sound/soc/amd/acp-pcm-dma.c
26841 +++ b/sound/soc/amd/acp-pcm-dma.c
26842 @@ -506,7 +506,7 @@ static int acp_init(void __iomem *acp_mmio)
26843 return 0;
26844 }
26845
26846 -/* Deintialize ACP */
26847 +/* Deinitialize ACP */
26848 static int acp_deinit(void __iomem *acp_mmio)
26849 {
26850 u32 val;
26851 --
26852 2.14.1
26853