ltq-deu: remove redundant code for setting the key in aes
[openwrt/staging/dedeckeh.git] / package / kernel / lantiq / ltq-deu / src / ifxmips_aes.c
1 /******************************************************************************
2 **
3 ** FILE NAME : ifxmips_aes.c
4 ** PROJECT : IFX UEIP
5 ** MODULES : DEU Module
6 **
7 ** DATE : September 8, 2009
8 ** AUTHOR : Mohammad Firdaus
9 ** DESCRIPTION : Data Encryption Unit Driver for AES Algorithm
10 ** COPYRIGHT : Copyright (c) 2009
11 ** Infineon Technologies AG
12 ** Am Campeon 1-12, 85579 Neubiberg, Germany
13 **
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
18 **
19 ** HISTORY
20 ** $Date $Author $Comment
21 ** 08,Sept 2009 Mohammad Firdaus Initial UEIP release
22 *******************************************************************************/
23 /*!
24 \defgroup IFX_DEU IFX_DEU_DRIVERS
25 \ingroup API
26 \brief ifx DEU driver module
27 */
28
29 /*!
30 \file ifxmips_aes.c
31 \ingroup IFX_DEU
32 \brief AES Encryption Driver main file
33 */
34
35 /*!
36 \defgroup IFX_AES_FUNCTIONS IFX_AES_FUNCTIONS
37 \ingroup IFX_DEU
38 \brief IFX AES driver Functions
39 */
40
41
42 /* Project Header Files */
43 #if defined(CONFIG_MODVERSIONS)
44 #define MODVERSIONS
45 #include <linux/modeversions>
46 #endif
47
48 #include <linux/version.h>
49 #include <linux/module.h>
50 #include <linux/init.h>
51 #include <linux/proc_fs.h>
52 #include <linux/fs.h>
53 #include <linux/types.h>
54 #include <linux/errno.h>
55 #include <linux/crypto.h>
56 #include <linux/interrupt.h>
57 #include <linux/delay.h>
58 #include <asm/byteorder.h>
59 #include <crypto/algapi.h>
60 #include <crypto/b128ops.h>
61 #include <crypto/gf128mul.h>
62 #include <crypto/scatterwalk.h>
63 #include <crypto/xts.h>
64 #include <crypto/internal/hash.h>
65 #include <crypto/internal/skcipher.h>
66
67 #include "ifxmips_deu.h"
68
69 #if defined(CONFIG_DANUBE)
70 #include "ifxmips_deu_danube.h"
71 extern int ifx_danube_pre_1_4;
72 #elif defined(CONFIG_AR9)
73 #include "ifxmips_deu_ar9.h"
74 #elif defined(CONFIG_VR9) || defined(CONFIG_AR10)
75 #include "ifxmips_deu_vr9.h"
76 #else
77 #error "Unkown platform"
78 #endif
79
80 /* DMA related header and variables */
81
82 spinlock_t aes_lock;
83 #define CRTCL_SECT_INIT spin_lock_init(&aes_lock)
84 #define CRTCL_SECT_START spin_lock_irqsave(&aes_lock, flag)
85 #define CRTCL_SECT_END spin_unlock_irqrestore(&aes_lock, flag)
86
87 /* Definition of constants */
88 #define AES_START IFX_AES_CON
89 #define AES_MIN_KEY_SIZE 16
90 #define AES_MAX_KEY_SIZE 32
91 #define AES_BLOCK_SIZE 16
92 #define AES_BLOCK_WORDS 4
93 #define CTR_RFC3686_NONCE_SIZE 4
94 #define CTR_RFC3686_IV_SIZE 8
95 #define CTR_RFC3686_MIN_KEY_SIZE (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)
96 #define CTR_RFC3686_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)
97 #define AES_CBCMAC_DBN_TEMP_SIZE 128
98
99 #ifdef CRYPTO_DEBUG
100 extern char debug_level;
101 #define DPRINTF(level, format, args...) if (level < debug_level) printk(KERN_INFO "[%s %s %d]: " format, __FILE__, __func__, __LINE__, ##args);
102 #else
103 #define DPRINTF(level, format, args...)
104 #endif /* CRYPTO_DEBUG */
105
106 /* Function decleration */
107 int aes_chip_init(void);
108 u32 endian_swap(u32 input);
109 u32 input_swap(u32 input);
110 u32* memory_alignment(const u8 *arg, u32 *buff_alloc, int in_out, int nbytes);
111 void aes_dma_memory_copy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes);
112 void des_dma_memory_copy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes);
113 int aes_memory_allocate(int value);
114 int des_memory_allocate(int value);
115 void memory_release(u32 *addr);
116
117
118 extern void ifx_deu_aes (void *ctx_arg, uint8_t *out_arg, const uint8_t *in_arg,
119 uint8_t *iv_arg, size_t nbytes, int encdec, int mode);
120 /* End of function decleration */
121
122 struct aes_ctx {
123 int key_length;
124 u8 buf[AES_MAX_KEY_SIZE];
125 u8 tweakkey[AES_MAX_KEY_SIZE];
126 u8 nonce[CTR_RFC3686_NONCE_SIZE];
127 u8 lastbuffer[4 * XTS_BLOCK_SIZE];
128 int use_tweak;
129 u32 byte_count;
130 u32 dbn;
131 int started;
132 u32 (*temp)[AES_BLOCK_WORDS];
133 u8 block[AES_BLOCK_SIZE];
134 u8 hash[AES_BLOCK_SIZE];
135 };
136
137 extern int disable_deudma;
138 extern int disable_multiblock;
139
140 /*! \fn int aes_set_key (struct crypto_tfm *tfm, const uint8_t *in_key, unsigned int key_len)
141 * \ingroup IFX_AES_FUNCTIONS
142 * \brief sets the AES keys
143 * \param tfm linux crypto algo transform
144 * \param in_key input key
145 * \param key_len key lengths of 16, 24 and 32 bytes supported
146 * \return -EINVAL - bad key length, 0 - SUCCESS
147 */
148 int aes_set_key (struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len)
149 {
150 struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
151
152 //printk("set_key in %s\n", __FILE__);
153
154 //aes_chip_init();
155
156 if (key_len != 16 && key_len != 24 && key_len != 32) {
157 return -EINVAL;
158 }
159
160 ctx->key_length = key_len;
161 ctx->use_tweak = 0;
162 DPRINTF(0, "ctx @%p, key_len %d, ctx->key_length %d\n", ctx, key_len, ctx->key_length);
163 memcpy ((u8 *) (ctx->buf), in_key, key_len);
164
165 return 0;
166 }
167
168
169 /*! \fn int aes_set_key_skcipher (struct crypto_skcipher *tfm, const uint8_t *in_key, unsigned int key_len)
170 * \ingroup IFX_AES_FUNCTIONS
171 * \brief sets the AES keys for skcipher
172 * \param tfm linux crypto skcipher
173 * \param in_key input key
174 * \param key_len key lengths of 16, 24 and 32 bytes supported
175 * \return -EINVAL - bad key length, 0 - SUCCESS
176 */
177 int aes_set_key_skcipher (struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len)
178 {
179 return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
180 }
181
182
183 /*! \fn void aes_set_key_skcipher (void *ctx_arg)
184 * \ingroup IFX_AES_FUNCTIONS
185 * \brief sets the AES key to the hardware, requires spinlock to be set by caller
186 * \param ctx_arg crypto algo context
187 * \return
188 */
189 void aes_set_key_hw (void *ctx_arg)
190 {
191 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
192 volatile struct aes_t *aes = (volatile struct aes_t *) AES_START;
193 struct aes_ctx *ctx = (struct aes_ctx *)ctx_arg;
194 u8 *in_key = ctx->buf;
195 int key_len = ctx->key_length;
196 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
197
198 if (ctx->use_tweak) in_key = ctx->tweakkey;
199
200 /* 128, 192 or 256 bit key length */
201 aes->controlr.K = key_len / 8 - 2;
202 if (key_len == 128 / 8) {
203 aes->K3R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 0));
204 aes->K2R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 1));
205 aes->K1R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 2));
206 aes->K0R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 3));
207 }
208 else if (key_len == 192 / 8) {
209 aes->K5R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 0));
210 aes->K4R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 1));
211 aes->K3R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 2));
212 aes->K2R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 3));
213 aes->K1R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 4));
214 aes->K0R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 5));
215 }
216 else if (key_len == 256 / 8) {
217 aes->K7R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 0));
218 aes->K6R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 1));
219 aes->K5R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 2));
220 aes->K4R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 3));
221 aes->K3R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 4));
222 aes->K2R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 5));
223 aes->K1R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 6));
224 aes->K0R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 7));
225 }
226 else {
227 printk (KERN_ERR "[%s %s %d]: Invalid key_len : %d\n", __FILE__, __func__, __LINE__, key_len);
228 return; //-EINVAL;
229 }
230
231 /* let HW pre-process DEcryption key in any case (even if
232 ENcryption is used). Key Valid (KV) bit is then only
233 checked in decryption routine! */
234 aes->controlr.PNK = 1;
235
236 }
237
238
239 /*! \fn void ifx_deu_aes (void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, size_t nbytes, int encdec, int mode)
240 * \ingroup IFX_AES_FUNCTIONS
241 * \brief main interface to AES hardware
242 * \param ctx_arg crypto algo context
243 * \param out_arg output bytestream
244 * \param in_arg input bytestream
245 * \param iv_arg initialization vector
246 * \param nbytes length of bytestream
247 * \param encdec 1 for encrypt; 0 for decrypt
248 * \param mode operation mode such as ebc, cbc, ctr
249 *
250 */
251 void ifx_deu_aes (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
252 u8 *iv_arg, size_t nbytes, int encdec, int mode)
253
254 {
255 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
256 volatile struct aes_t *aes = (volatile struct aes_t *) AES_START;
257 struct aes_ctx *ctx = (struct aes_ctx *)ctx_arg;
258 unsigned long flag;
259 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
260 int i = 0;
261 int byte_cnt = nbytes;
262
263 CRTCL_SECT_START;
264
265 aes_set_key_hw (ctx_arg);
266
267 aes->controlr.E_D = !encdec; //encryption
268 aes->controlr.O = mode; //0 ECB 1 CBC 2 OFB 3 CFB 4 CTR
269
270 //aes->controlr.F = 128; //default; only for CFB and OFB modes; change only for customer-specific apps
271 if (mode > 0) {
272 aes->IV3R = DEU_ENDIAN_SWAP(*(u32 *) iv_arg);
273 aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
274 aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 2));
275 aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 3));
276 };
277
278
279 i = 0;
280 while (byte_cnt >= 16) {
281
282 aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 0));
283 aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 1));
284 aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 2));
285 aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 3)); /* start crypto */
286
287 while (aes->controlr.BUS) {
288 // this will not take long
289 }
290
291 *((volatile u32 *) out_arg + (i * 4) + 0) = aes->OD3R;
292 *((volatile u32 *) out_arg + (i * 4) + 1) = aes->OD2R;
293 *((volatile u32 *) out_arg + (i * 4) + 2) = aes->OD1R;
294 *((volatile u32 *) out_arg + (i * 4) + 3) = aes->OD0R;
295
296 i++;
297 byte_cnt -= 16;
298 }
299
300 /* To handle all non-aligned bytes (not aligned to 16B size) */
301 if (byte_cnt) {
302 u8 temparea[16] = {0,};
303
304 memcpy(temparea, ((u32 *) in_arg + (i * 4)), byte_cnt);
305
306 aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) temparea + 0));
307 aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) temparea + 1));
308 aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) temparea + 2));
309 aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) temparea + 3)); /* start crypto */
310
311 while (aes->controlr.BUS) {
312 }
313
314 *((volatile u32 *) temparea + 0) = aes->OD3R;
315 *((volatile u32 *) temparea + 1) = aes->OD2R;
316 *((volatile u32 *) temparea + 2) = aes->OD1R;
317 *((volatile u32 *) temparea + 3) = aes->OD0R;
318
319 memcpy(((u32 *) out_arg + (i * 4)), temparea, byte_cnt);
320 }
321
322 //tc.chen : copy iv_arg back
323 if (mode > 0) {
324 *((u32 *) iv_arg) = DEU_ENDIAN_SWAP(aes->IV3R);
325 *((u32 *) iv_arg + 1) = DEU_ENDIAN_SWAP(aes->IV2R);
326 *((u32 *) iv_arg + 2) = DEU_ENDIAN_SWAP(aes->IV1R);
327 *((u32 *) iv_arg + 3) = DEU_ENDIAN_SWAP(aes->IV0R);
328 }
329
330 CRTCL_SECT_END;
331 }
332
333 /*!
334 * \fn int ctr_rfc3686_aes_set_key (struct crypto_tfm *tfm, const uint8_t *in_key, unsigned int key_len)
335 * \ingroup IFX_AES_FUNCTIONS
336 * \brief sets RFC3686 key
337 * \param tfm linux crypto algo transform
338 * \param in_key input key
339 * \param key_len key lengths of 20, 28 and 36 bytes supported; last 4 bytes is nonce
340 * \return 0 - SUCCESS
341 * -EINVAL - bad key length
342 */
343 int ctr_rfc3686_aes_set_key (struct crypto_tfm *tfm, const uint8_t *in_key, unsigned int key_len)
344 {
345 struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
346
347 //printk("ctr_rfc3686_aes_set_key in %s\n", __FILE__);
348
349 memcpy(ctx->nonce, in_key + (key_len - CTR_RFC3686_NONCE_SIZE),
350 CTR_RFC3686_NONCE_SIZE);
351
352 key_len -= CTR_RFC3686_NONCE_SIZE; // remove 4 bytes of nonce
353
354 if (key_len != 16 && key_len != 24 && key_len != 32) {
355 return -EINVAL;
356 }
357
358 ctx->key_length = key_len;
359 ctx->use_tweak = 0;
360
361 memcpy ((u8 *) (ctx->buf), in_key, key_len);
362
363 return 0;
364 }
365
366 /*!
367 * \fn int ctr_rfc3686_aes_set_key_skcipher (struct crypto_skcipher *tfm, const uint8_t *in_key, unsigned int key_len)
368 * \ingroup IFX_AES_FUNCTIONS
369 * \brief sets RFC3686 key for skcipher
370 * \param tfm linux crypto skcipher
371 * \param in_key input key
372 * \param key_len key lengths of 20, 28 and 36 bytes supported; last 4 bytes is nonce
373 * \return 0 - SUCCESS
374 * -EINVAL - bad key length
375 */
376 int ctr_rfc3686_aes_set_key_skcipher (struct crypto_skcipher *tfm, const uint8_t *in_key, unsigned int key_len)
377 {
378 return ctr_rfc3686_aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
379 }
380
381 /*! \fn void ifx_deu_aes (void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode)
382 * \ingroup IFX_AES_FUNCTIONS
383 * \brief main interface with deu hardware in DMA mode
384 * \param ctx_arg crypto algo context
385 * \param out_arg output bytestream
386 * \param in_arg input bytestream
387 * \param iv_arg initialization vector
388 * \param nbytes length of bytestream
389 * \param encdec 1 for encrypt; 0 for decrypt
390 * \param mode operation mode such as ebc, cbc, ctr
391 */
392
393
394 //definitions from linux/include/crypto.h:
395 //#define CRYPTO_TFM_MODE_ECB 0x00000001
396 //#define CRYPTO_TFM_MODE_CBC 0x00000002
397 //#define CRYPTO_TFM_MODE_CFB 0x00000004
398 //#define CRYPTO_TFM_MODE_CTR 0x00000008
399 //#define CRYPTO_TFM_MODE_OFB 0x00000010 // not even defined
400 //but hardware definition: 0 ECB 1 CBC 2 OFB 3 CFB 4 CTR
401
402 /*! \fn void ifx_deu_aes_ecb (void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
403 * \ingroup IFX_AES_FUNCTIONS
404 * \brief sets AES hardware to ECB mode
405 * \param ctx crypto algo context
406 * \param dst output bytestream
407 * \param src input bytestream
408 * \param iv initialization vector
409 * \param nbytes length of bytestream
410 * \param encdec 1 for encrypt; 0 for decrypt
411 * \param inplace not used
412 */
413 void ifx_deu_aes_ecb (void *ctx, uint8_t *dst, const uint8_t *src,
414 uint8_t *iv, size_t nbytes, int encdec, int inplace)
415 {
416 ifx_deu_aes (ctx, dst, src, NULL, nbytes, encdec, 0);
417 }
418
419 /*! \fn void ifx_deu_aes_cbc (void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
420 * \ingroup IFX_AES_FUNCTIONS
421 * \brief sets AES hardware to CBC mode
422 * \param ctx crypto algo context
423 * \param dst output bytestream
424 * \param src input bytestream
425 * \param iv initialization vector
426 * \param nbytes length of bytestream
427 * \param encdec 1 for encrypt; 0 for decrypt
428 * \param inplace not used
429 */
430 void ifx_deu_aes_cbc (void *ctx, uint8_t *dst, const uint8_t *src,
431 uint8_t *iv, size_t nbytes, int encdec, int inplace)
432 {
433 ifx_deu_aes (ctx, dst, src, iv, nbytes, encdec, 1);
434 }
435
436 /*! \fn void ifx_deu_aes_ofb (void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
437 * \ingroup IFX_AES_FUNCTIONS
438 * \brief sets AES hardware to OFB mode
439 * \param ctx crypto algo context
440 * \param dst output bytestream
441 * \param src input bytestream
442 * \param iv initialization vector
443 * \param nbytes length of bytestream
444 * \param encdec 1 for encrypt; 0 for decrypt
445 * \param inplace not used
446 */
447 void ifx_deu_aes_ofb (void *ctx, uint8_t *dst, const uint8_t *src,
448 uint8_t *iv, size_t nbytes, int encdec, int inplace)
449 {
450 ifx_deu_aes (ctx, dst, src, iv, nbytes, encdec, 2);
451 }
452
453 /*! \fn void ifx_deu_aes_cfb (void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
454 * \ingroup IFX_AES_FUNCTIONS
455 * \brief sets AES hardware to CFB mode
456 * \param ctx crypto algo context
457 * \param dst output bytestream
458 * \param src input bytestream
459 * \param iv initialization vector
460 * \param nbytes length of bytestream
461 * \param encdec 1 for encrypt; 0 for decrypt
462 * \param inplace not used
463 */
464 void ifx_deu_aes_cfb (void *ctx, uint8_t *dst, const uint8_t *src,
465 uint8_t *iv, size_t nbytes, int encdec, int inplace)
466 {
467 ifx_deu_aes (ctx, dst, src, iv, nbytes, encdec, 3);
468 }
469
470 /*! \fn void ifx_deu_aes_ctr (void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
471 * \ingroup IFX_AES_FUNCTIONS
472 * \brief sets AES hardware to CTR mode
473 * \param ctx crypto algo context
474 * \param dst output bytestream
475 * \param src input bytestream
476 * \param iv initialization vector
477 * \param nbytes length of bytestream
478 * \param encdec 1 for encrypt; 0 for decrypt
479 * \param inplace not used
480 */
481 void ifx_deu_aes_ctr (void *ctx, uint8_t *dst, const uint8_t *src,
482 uint8_t *iv, size_t nbytes, int encdec, int inplace)
483 {
484 ifx_deu_aes (ctx, dst, src, iv, nbytes, encdec, 4);
485 }
486
487 /*! \fn void aes_encrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
488 * \ingroup IFX_AES_FUNCTIONS
489 * \brief encrypt AES_BLOCK_SIZE of data
490 * \param tfm linux crypto algo transform
491 * \param out output bytestream
492 * \param in input bytestream
493 */
494 void aes_encrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
495 {
496 struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
497 ifx_deu_aes (ctx, out, in, NULL, AES_BLOCK_SIZE,
498 CRYPTO_DIR_ENCRYPT, 0);
499 }
500
501 /*! \fn void aes_decrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
502 * \ingroup IFX_AES_FUNCTIONS
503 * \brief decrypt AES_BLOCK_SIZE of data
504 * \param tfm linux crypto algo transform
505 * \param out output bytestream
506 * \param in input bytestream
507 */
508 void aes_decrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
509 {
510 struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
511 ifx_deu_aes (ctx, out, in, NULL, AES_BLOCK_SIZE,
512 CRYPTO_DIR_DECRYPT, 0);
513 }
514
515 /*
516 * \brief AES function mappings
517 */
518 struct crypto_alg ifxdeu_aes_alg = {
519 .cra_name = "aes",
520 .cra_driver_name = "ifxdeu-aes",
521 .cra_priority = 300,
522 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
523 .cra_blocksize = AES_BLOCK_SIZE,
524 .cra_ctxsize = sizeof(struct aes_ctx),
525 .cra_module = THIS_MODULE,
526 .cra_list = LIST_HEAD_INIT(ifxdeu_aes_alg.cra_list),
527 .cra_u = {
528 .cipher = {
529 .cia_min_keysize = AES_MIN_KEY_SIZE,
530 .cia_max_keysize = AES_MAX_KEY_SIZE,
531 .cia_setkey = aes_set_key,
532 .cia_encrypt = aes_encrypt,
533 .cia_decrypt = aes_decrypt,
534 }
535 }
536 };
537
538 /*! \fn int ecb_aes_encrypt(struct skcipher_req *req)
539 * \ingroup IFX_AES_FUNCTIONS
540 * \brief ECB AES encrypt using linux crypto skcipher
541 * \param req skcipher request
542 * \return err
543 */
544 int ecb_aes_encrypt(struct skcipher_request *req)
545 {
546 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
547 struct skcipher_walk walk;
548 int err;
549 unsigned int enc_bytes, nbytes;
550
551 err = skcipher_walk_virt(&walk, req, false);
552
553 while ((nbytes = enc_bytes = walk.nbytes)) {
554 enc_bytes -= (nbytes % AES_BLOCK_SIZE);
555 ifx_deu_aes_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
556 NULL, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
557 nbytes &= AES_BLOCK_SIZE - 1;
558 err = skcipher_walk_done(&walk, nbytes);
559 }
560
561 return err;
562 }
563
564 /*! \fn int ecb_aes_decrypt(struct skcipher_req *req)
565 * \ingroup IFX_AES_FUNCTIONS
566 * \brief ECB AES decrypt using linux crypto skcipher
567 * \param req skcipher request
568 * \return err
569 */
570 int ecb_aes_decrypt(struct skcipher_request *req)
571 {
572 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
573 struct skcipher_walk walk;
574 int err;
575 unsigned int dec_bytes, nbytes;
576
577 err = skcipher_walk_virt(&walk, req, false);
578
579 while ((nbytes = dec_bytes = walk.nbytes)) {
580 dec_bytes -= (nbytes % AES_BLOCK_SIZE);
581 ifx_deu_aes_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
582 NULL, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
583 nbytes &= AES_BLOCK_SIZE - 1;
584 err = skcipher_walk_done(&walk, nbytes);
585 }
586
587 return err;
588 }
589
590 /*
591 * \brief AES function mappings
592 */
593 struct skcipher_alg ifxdeu_ecb_aes_alg = {
594 .base.cra_name = "ecb(aes)",
595 .base.cra_driver_name = "ifxdeu-ecb(aes)",
596 .base.cra_priority = 400,
597 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
598 .base.cra_blocksize = AES_BLOCK_SIZE,
599 .base.cra_ctxsize = sizeof(struct aes_ctx),
600 .base.cra_module = THIS_MODULE,
601 .base.cra_list = LIST_HEAD_INIT(ifxdeu_ecb_aes_alg.base.cra_list),
602 .min_keysize = AES_MIN_KEY_SIZE,
603 .max_keysize = AES_MAX_KEY_SIZE,
604 .setkey = aes_set_key_skcipher,
605 .encrypt = ecb_aes_encrypt,
606 .decrypt = ecb_aes_decrypt,
607 };
608
609 /*! \fn int ecb_aes_encrypt(struct skcipher_req *req)
610 * \ingroup IFX_AES_FUNCTIONS
611 * \brief CBC AES encrypt using linux crypto skcipher
612 * \param req skcipher request
613 * \return err
614 */
615 int cbc_aes_encrypt(struct skcipher_request *req)
616 {
617 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
618 struct skcipher_walk walk;
619 int err;
620 unsigned int enc_bytes, nbytes;
621
622 err = skcipher_walk_virt(&walk, req, false);
623
624 while ((nbytes = enc_bytes = walk.nbytes)) {
625 u8 *iv = walk.iv;
626 enc_bytes -= (nbytes % AES_BLOCK_SIZE);
627 ifx_deu_aes_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
628 iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
629 nbytes &= AES_BLOCK_SIZE - 1;
630 err = skcipher_walk_done(&walk, nbytes);
631 }
632
633 return err;
634 }
635
636 /*! \fn int cbc_aes_decrypt(struct skcipher_req *req)
637 * \ingroup IFX_AES_FUNCTIONS
638 * \brief CBC AES decrypt using linux crypto skcipher
639 * \param req skcipher request
640 * \return err
641 */
642 int cbc_aes_decrypt(struct skcipher_request *req)
643 {
644 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
645 struct skcipher_walk walk;
646 int err;
647 unsigned int dec_bytes, nbytes;
648
649 err = skcipher_walk_virt(&walk, req, false);
650
651 while ((nbytes = dec_bytes = walk.nbytes)) {
652 u8 *iv = walk.iv;
653 dec_bytes -= (nbytes % AES_BLOCK_SIZE);
654 ifx_deu_aes_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
655 iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
656 nbytes &= AES_BLOCK_SIZE - 1;
657 err = skcipher_walk_done(&walk, nbytes);
658 }
659
660 return err;
661 }
662
663 /*
664 * \brief AES function mappings
665 */
666 struct skcipher_alg ifxdeu_cbc_aes_alg = {
667 .base.cra_name = "cbc(aes)",
668 .base.cra_driver_name = "ifxdeu-cbc(aes)",
669 .base.cra_priority = 400,
670 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
671 .base.cra_blocksize = AES_BLOCK_SIZE,
672 .base.cra_ctxsize = sizeof(struct aes_ctx),
673 .base.cra_module = THIS_MODULE,
674 .base.cra_list = LIST_HEAD_INIT(ifxdeu_cbc_aes_alg.base.cra_list),
675 .min_keysize = AES_MIN_KEY_SIZE,
676 .max_keysize = AES_MAX_KEY_SIZE,
677 .ivsize = AES_BLOCK_SIZE,
678 .setkey = aes_set_key_skcipher,
679 .encrypt = cbc_aes_encrypt,
680 .decrypt = cbc_aes_decrypt,
681 };
682
683 /*! \fn void ifx_deu_aes_xts (void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, size_t nbytes, int encdec)
684 * \ingroup IFX_AES_FUNCTIONS
685 * \brief main interface to AES hardware for XTS impl
686 * \param ctx_arg crypto algo context
687 * \param out_arg output bytestream
688 * \param in_arg input bytestream
689 * \param iv_arg initialization vector
690 * \param nbytes length of bytestream
691 * \param encdec 1 for encrypt; 0 for decrypt
692 *
693 */
694 void ifx_deu_aes_xts (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
695 u8 *iv_arg, size_t nbytes, int encdec)
696 {
697 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
698 volatile struct aes_t *aes = (volatile struct aes_t *) AES_START;
699 struct aes_ctx *ctx = (struct aes_ctx *)ctx_arg;
700 unsigned long flag;
701 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
702 u8 oldiv[16];
703 int i = 0;
704 int byte_cnt = nbytes;
705
706 CRTCL_SECT_START;
707
708 aes_set_key_hw (ctx_arg);
709
710 aes->controlr.E_D = !encdec; //encryption
711 aes->controlr.O = 1; //0 ECB 1 CBC 2 OFB 3 CFB 4 CTR - CBC mode for xts
712
713 i = 0;
714 while (byte_cnt >= 16) {
715
716 if (!encdec) {
717 if (((byte_cnt % 16) > 0) && (byte_cnt < (2*XTS_BLOCK_SIZE))) {
718 memcpy(oldiv, iv_arg, 16);
719 gf128mul_x_ble((le128 *)iv_arg, (le128 *)iv_arg);
720 }
721 u128_xor((u128 *)((u32 *) in_arg + (i * 4) + 0), (u128 *)((u32 *) in_arg + (i * 4) + 0), (u128 *)iv_arg);
722 }
723
724 aes->IV3R = DEU_ENDIAN_SWAP(*(u32 *) iv_arg);
725 aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
726 aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 2));
727 aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 3));
728
729 aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 0));
730 aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 1));
731 aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 2));
732 aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 3)); /* start crypto */
733
734 while (aes->controlr.BUS) {
735 // this will not take long
736 }
737
738 *((volatile u32 *) out_arg + (i * 4) + 0) = aes->OD3R;
739 *((volatile u32 *) out_arg + (i * 4) + 1) = aes->OD2R;
740 *((volatile u32 *) out_arg + (i * 4) + 2) = aes->OD1R;
741 *((volatile u32 *) out_arg + (i * 4) + 3) = aes->OD0R;
742
743 if (encdec) {
744 u128_xor((u128 *)((volatile u32 *) out_arg + (i * 4) + 0), (u128 *)((volatile u32 *) out_arg + (i * 4) + 0), (u128 *)iv_arg);
745 }
746 gf128mul_x_ble((le128 *)iv_arg, (le128 *)iv_arg);
747 i++;
748 byte_cnt -= 16;
749 }
750
751 if (byte_cnt) {
752 u8 state[XTS_BLOCK_SIZE] = {0,};
753
754 if (!encdec) memcpy(iv_arg, oldiv, 16);
755
756 aes->IV3R = DEU_ENDIAN_SWAP(*(u32 *) iv_arg);
757 aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
758 aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 2));
759 aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 3));
760
761 memcpy(state, ((u32 *) in_arg + (i * 4) + 0), byte_cnt);
762 memcpy((state + byte_cnt), (out_arg + ((i - 1) * 16) + byte_cnt), (XTS_BLOCK_SIZE - byte_cnt));
763 if (!encdec) {
764 u128_xor((u128 *)state, (u128 *)state, (u128 *)iv_arg);
765 }
766
767 aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) state + 0));
768 aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) state + 1));
769 aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) state + 2));
770 aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) state + 3)); /* start crypto */
771
772 memcpy(((u32 *) out_arg + (i * 4) + 0), ((u32 *) out_arg + ((i - 1) * 4) + 0), byte_cnt);
773
774 while (aes->controlr.BUS) {
775 // this will not take long
776 }
777
778 *((volatile u32 *) out_arg + ((i-1) * 4) + 0) = aes->OD3R;
779 *((volatile u32 *) out_arg + ((i-1) * 4) + 1) = aes->OD2R;
780 *((volatile u32 *) out_arg + ((i-1) * 4) + 2) = aes->OD1R;
781 *((volatile u32 *) out_arg + ((i-1) * 4) + 3) = aes->OD0R;
782
783 if (encdec) {
784 u128_xor((u128 *)((volatile u32 *) out_arg + ((i-1) * 4) + 0), (u128 *)((volatile u32 *) out_arg + ((i-1) * 4) + 0), (u128 *)iv_arg);
785 }
786 }
787
788 CRTCL_SECT_END;
789 }
790
791 /*! \fn int xts_aes_encrypt(struct skcipher_req *req)
792 * \ingroup IFX_AES_FUNCTIONS
793 * \brief XTS AES encrypt using linux crypto skcipher
794 * \param req skcipher request
795 * \return err
796 */
797 int xts_aes_encrypt(struct skcipher_request *req)
798 {
799 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
800 struct skcipher_walk walk;
801 int err;
802 unsigned int enc_bytes, nbytes, processed;
803
804 err = skcipher_walk_virt(&walk, req, false);
805
806 if (req->cryptlen < XTS_BLOCK_SIZE)
807 return -EINVAL;
808
809 ctx->use_tweak = 1;
810 aes_encrypt(req->base.tfm, walk.iv, walk.iv);
811 ctx->use_tweak = 0;
812 processed = 0;
813
814 while ((nbytes = walk.nbytes) && (walk.nbytes >= (XTS_BLOCK_SIZE * 2)) ) {
815 u8 *iv = walk.iv;
816 if (nbytes == walk.total) {
817 enc_bytes = nbytes;
818 } else {
819 enc_bytes = nbytes & ~(XTS_BLOCK_SIZE - 1);
820 if ((req->cryptlen - processed - enc_bytes) < (XTS_BLOCK_SIZE)) {
821 if (enc_bytes > (2 * XTS_BLOCK_SIZE)) {
822 enc_bytes -= XTS_BLOCK_SIZE;
823 } else {
824 break;
825 }
826 }
827 }
828 ifx_deu_aes_xts(ctx, walk.dst.virt.addr, walk.src.virt.addr,
829 iv, enc_bytes, CRYPTO_DIR_ENCRYPT);
830 err = skcipher_walk_done(&walk, nbytes - enc_bytes);
831 processed += enc_bytes;
832 }
833
834 if ((walk.nbytes)) {
835 u8 *iv = walk.iv;
836 nbytes = req->cryptlen - processed;
837 scatterwalk_map_and_copy(ctx->lastbuffer, req->src, (req->cryptlen - nbytes), nbytes, 0);
838 ifx_deu_aes_xts(ctx, ctx->lastbuffer, ctx->lastbuffer,
839 iv, nbytes, CRYPTO_DIR_ENCRYPT);
840 scatterwalk_map_and_copy(ctx->lastbuffer, req->dst, (req->cryptlen - nbytes), nbytes, 1);
841 skcipher_request_complete(req, 0);
842 }
843
844 return err;
845 }
846
847 /*! \fn int xts_aes_decrypt(struct skcipher_req *req)
848 * \ingroup IFX_AES_FUNCTIONS
849 * \brief XTS AES decrypt using linux crypto skcipher
850 * \param req skcipher request
851 * \return err
852 */
853 int xts_aes_decrypt(struct skcipher_request *req)
854 {
855 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
856 struct skcipher_walk walk;
857 int err;
858 unsigned int dec_bytes, nbytes, processed;
859
860 err = skcipher_walk_virt(&walk, req, false);
861
862 if (req->cryptlen < XTS_BLOCK_SIZE)
863 return -EINVAL;
864
865 ctx->use_tweak = 1;
866 aes_encrypt(req->base.tfm, walk.iv, walk.iv);
867 ctx->use_tweak = 0;
868 processed = 0;
869
870 while ((nbytes = walk.nbytes) && (walk.nbytes >= (XTS_BLOCK_SIZE * 2))) {
871 u8 *iv = walk.iv;
872 if (nbytes == walk.total) {
873 dec_bytes = nbytes;
874 } else {
875 dec_bytes = nbytes & ~(XTS_BLOCK_SIZE - 1);
876 if ((req->cryptlen - processed - dec_bytes) < (XTS_BLOCK_SIZE)) {
877 if (dec_bytes > (2 * XTS_BLOCK_SIZE)) {
878 dec_bytes -= XTS_BLOCK_SIZE;
879 } else {
880 break;
881 }
882 }
883 }
884 ifx_deu_aes_xts(ctx, walk.dst.virt.addr, walk.src.virt.addr,
885 iv, dec_bytes, CRYPTO_DIR_DECRYPT);
886 err = skcipher_walk_done(&walk, nbytes - dec_bytes);
887 processed += dec_bytes;
888 }
889
890 if ((walk.nbytes)) {
891 u8 *iv = walk.iv;
892 nbytes = req->cryptlen - processed;
893 scatterwalk_map_and_copy(ctx->lastbuffer, req->src, (req->cryptlen - nbytes), nbytes, 0);
894 ifx_deu_aes_xts(ctx, ctx->lastbuffer, ctx->lastbuffer,
895 iv, nbytes, CRYPTO_DIR_DECRYPT);
896 scatterwalk_map_and_copy(ctx->lastbuffer, req->dst, (req->cryptlen - nbytes), nbytes, 1);
897 skcipher_request_complete(req, 0);
898 }
899
900 return err;
901 }
902
903 /*! \fn int xts_aes_set_key_skcipher (struct crypto_tfm *tfm, const uint8_t *in_key, unsigned int key_len)
904 * \ingroup IFX_AES_FUNCTIONS
905 * \brief sets the AES keys for XTS
906 * \param tfm linux crypto algo transform
907 * \param in_key input key
908 * \param key_len key lengths of 16, 24 and 32 bytes supported
909 * \return -EINVAL - bad key length, 0 - SUCCESS
910 */
911 int xts_aes_set_key_skcipher (struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len)
912 {
913 struct aes_ctx *ctx = crypto_tfm_ctx(crypto_skcipher_tfm(tfm));
914 unsigned int keylen = (key_len / 2);
915
916 if (key_len % 2) return -EINVAL;
917
918 if (keylen != 16 && keylen != 24 && keylen != 32) {
919 return -EINVAL;
920 }
921
922 ctx->key_length = keylen;
923 ctx->use_tweak = 0;
924 DPRINTF(0, "ctx @%p, key_len %d, ctx->key_length %d\n", ctx, key_len, ctx->key_length);
925 memcpy ((u8 *) (ctx->buf), in_key, keylen);
926 memcpy ((u8 *) (ctx->tweakkey), in_key + keylen, keylen);
927
928 return 0;
929 }
930
931 /*
932 * \brief AES function mappings
933 */
934 struct skcipher_alg ifxdeu_xts_aes_alg = {
935 .base.cra_name = "xts(aes)",
936 .base.cra_driver_name = "ifxdeu-xts(aes)",
937 .base.cra_priority = 400,
938 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
939 .base.cra_blocksize = XTS_BLOCK_SIZE,
940 .base.cra_ctxsize = sizeof(struct aes_ctx),
941 .base.cra_module = THIS_MODULE,
942 .base.cra_list = LIST_HEAD_INIT(ifxdeu_xts_aes_alg.base.cra_list),
943 .min_keysize = AES_MIN_KEY_SIZE * 2,
944 .max_keysize = AES_MAX_KEY_SIZE * 2,
945 .ivsize = XTS_BLOCK_SIZE,
946 .walksize = 2 * XTS_BLOCK_SIZE,
947 .setkey = xts_aes_set_key_skcipher,
948 .encrypt = xts_aes_encrypt,
949 .decrypt = xts_aes_decrypt,
950 };
951
952 /*! \fn int ofb_aes_encrypt(struct skcipher_req *req)
953 * \ingroup IFX_AES_FUNCTIONS
954 * \brief OFB AES encrypt using linux crypto skcipher
955 * \param req skcipher request
956 * \return err
957 */
958 int ofb_aes_encrypt(struct skcipher_request *req)
959 {
960 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
961 struct skcipher_walk walk;
962 int err;
963 unsigned int enc_bytes, nbytes;
964
965 err = skcipher_walk_virt(&walk, req, false);
966
967 while ((nbytes = enc_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
968 enc_bytes -= (nbytes % AES_BLOCK_SIZE);
969 ifx_deu_aes_ofb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
970 walk.iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
971 nbytes &= AES_BLOCK_SIZE - 1;
972 err = skcipher_walk_done(&walk, nbytes);
973 }
974
975 /* to handle remaining bytes < AES_BLOCK_SIZE */
976 if (walk.nbytes) {
977 ifx_deu_aes_ofb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
978 walk.iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
979 err = skcipher_walk_done(&walk, 0);
980 }
981
982 return err;
983 }
984
985 /*! \fn int ofb_aes_decrypt(struct skcipher_req *req)
986 * \ingroup IFX_AES_FUNCTIONS
987 * \brief OFB AES decrypt using linux crypto skcipher
988 * \param req skcipher request
989 * \return err
990 */
991 int ofb_aes_decrypt(struct skcipher_request *req)
992 {
993 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
994 struct skcipher_walk walk;
995 int err;
996 unsigned int dec_bytes, nbytes;
997
998 err = skcipher_walk_virt(&walk, req, false);
999
1000 while ((nbytes = dec_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
1001 dec_bytes -= (nbytes % AES_BLOCK_SIZE);
1002 ifx_deu_aes_ofb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1003 walk.iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
1004 nbytes &= AES_BLOCK_SIZE - 1;
1005 err = skcipher_walk_done(&walk, nbytes);
1006 }
1007
1008 /* to handle remaining bytes < AES_BLOCK_SIZE */
1009 if (walk.nbytes) {
1010 ifx_deu_aes_ofb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1011 walk.iv, walk.nbytes, CRYPTO_DIR_DECRYPT, 0);
1012 err = skcipher_walk_done(&walk, 0);
1013 }
1014
1015 return err;
1016 }
1017
1018 /*
1019 * \brief AES function mappings
1020 */
1021 struct skcipher_alg ifxdeu_ofb_aes_alg = {
1022 .base.cra_name = "ofb(aes)",
1023 .base.cra_driver_name = "ifxdeu-ofb(aes)",
1024 .base.cra_priority = 400,
1025 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
1026 .base.cra_blocksize = 1,
1027 .base.cra_ctxsize = sizeof(struct aes_ctx),
1028 .base.cra_module = THIS_MODULE,
1029 .base.cra_list = LIST_HEAD_INIT(ifxdeu_ofb_aes_alg.base.cra_list),
1030 .min_keysize = AES_MIN_KEY_SIZE,
1031 .max_keysize = AES_MAX_KEY_SIZE,
1032 .ivsize = AES_BLOCK_SIZE,
1033 .chunksize = AES_BLOCK_SIZE,
1034 .walksize = AES_BLOCK_SIZE,
1035 .setkey = aes_set_key_skcipher,
1036 .encrypt = ofb_aes_encrypt,
1037 .decrypt = ofb_aes_decrypt,
1038 };
1039
1040 /*! \fn int cfb_aes_encrypt(struct skcipher_req *req)
1041 * \ingroup IFX_AES_FUNCTIONS
1042 * \brief CFB AES encrypt using linux crypto skcipher
1043 * \param req skcipher request
1044 * \return err
1045 */
1046 int cfb_aes_encrypt(struct skcipher_request *req)
1047 {
1048 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1049 struct skcipher_walk walk;
1050 int err;
1051 unsigned int enc_bytes, nbytes;
1052
1053 err = skcipher_walk_virt(&walk, req, false);
1054
1055 while ((nbytes = enc_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
1056 enc_bytes -= (nbytes % AES_BLOCK_SIZE);
1057 ifx_deu_aes_cfb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1058 walk.iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
1059 nbytes &= AES_BLOCK_SIZE - 1;
1060 err = skcipher_walk_done(&walk, nbytes);
1061 }
1062
1063 /* to handle remaining bytes < AES_BLOCK_SIZE */
1064 if (walk.nbytes) {
1065 ifx_deu_aes_cfb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1066 walk.iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
1067 err = skcipher_walk_done(&walk, 0);
1068 }
1069
1070 return err;
1071 }
1072
1073 /*! \fn int cfb_aes_decrypt(struct skcipher_req *req)
1074 * \ingroup IFX_AES_FUNCTIONS
1075 * \brief CFB AES decrypt using linux crypto skcipher
1076 * \param req skcipher request
1077 * \return err
1078 */
1079 int cfb_aes_decrypt(struct skcipher_request *req)
1080 {
1081 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1082 struct skcipher_walk walk;
1083 int err;
1084 unsigned int dec_bytes, nbytes;
1085
1086 err = skcipher_walk_virt(&walk, req, false);
1087
1088 while ((nbytes = dec_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
1089 dec_bytes -= (nbytes % AES_BLOCK_SIZE);
1090 ifx_deu_aes_cfb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1091 walk.iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
1092 nbytes &= AES_BLOCK_SIZE - 1;
1093 err = skcipher_walk_done(&walk, nbytes);
1094 }
1095
1096 /* to handle remaining bytes < AES_BLOCK_SIZE */
1097 if (walk.nbytes) {
1098 ifx_deu_aes_cfb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1099 walk.iv, walk.nbytes, CRYPTO_DIR_DECRYPT, 0);
1100 err = skcipher_walk_done(&walk, 0);
1101 }
1102
1103 return err;
1104 }
1105
1106 /*
1107 * \brief AES function mappings
1108 */
1109 struct skcipher_alg ifxdeu_cfb_aes_alg = {
1110 .base.cra_name = "cfb(aes)",
1111 .base.cra_driver_name = "ifxdeu-cfb(aes)",
1112 .base.cra_priority = 400,
1113 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
1114 .base.cra_blocksize = 1,
1115 .base.cra_ctxsize = sizeof(struct aes_ctx),
1116 .base.cra_module = THIS_MODULE,
1117 .base.cra_list = LIST_HEAD_INIT(ifxdeu_cfb_aes_alg.base.cra_list),
1118 .min_keysize = AES_MIN_KEY_SIZE,
1119 .max_keysize = AES_MAX_KEY_SIZE,
1120 .ivsize = AES_BLOCK_SIZE,
1121 .chunksize = AES_BLOCK_SIZE,
1122 .walksize = AES_BLOCK_SIZE,
1123 .setkey = aes_set_key_skcipher,
1124 .encrypt = cfb_aes_encrypt,
1125 .decrypt = cfb_aes_decrypt,
1126 };
1127
1128 /*! \fn int ctr_basic_aes_encrypt(struct skcipher_req *req)
1129 * \ingroup IFX_AES_FUNCTIONS
1130 * \brief Counter mode AES encrypt using linux crypto skcipher
1131 * \param req skcipher request
1132 * \return err
1133 */
1134 int ctr_basic_aes_encrypt(struct skcipher_request *req)
1135 {
1136 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1137 struct skcipher_walk walk;
1138 int err;
1139 unsigned int enc_bytes, nbytes;
1140
1141 err = skcipher_walk_virt(&walk, req, false);
1142
1143 while ((nbytes = enc_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
1144 enc_bytes -= (nbytes % AES_BLOCK_SIZE);
1145 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1146 walk.iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
1147 nbytes &= AES_BLOCK_SIZE - 1;
1148 err = skcipher_walk_done(&walk, nbytes);
1149 }
1150
1151 /* to handle remaining bytes < AES_BLOCK_SIZE */
1152 if (walk.nbytes) {
1153 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1154 walk.iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
1155 err = skcipher_walk_done(&walk, 0);
1156 }
1157
1158 return err;
1159 }
1160
1161 /*! \fn int ctr_basic_aes_encrypt(struct skcipher_req *req)
1162 * \ingroup IFX_AES_FUNCTIONS
1163 * \brief Counter mode AES decrypt using linux crypto skcipher
1164 * \param req skcipher request
1165 * \return err
1166 */
1167 int ctr_basic_aes_decrypt(struct skcipher_request *req)
1168 {
1169 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1170 struct skcipher_walk walk;
1171 int err;
1172 unsigned int dec_bytes, nbytes;
1173
1174 err = skcipher_walk_virt(&walk, req, false);
1175
1176 while ((nbytes = dec_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
1177 dec_bytes -= (nbytes % AES_BLOCK_SIZE);
1178 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1179 walk.iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
1180 nbytes &= AES_BLOCK_SIZE - 1;
1181 err = skcipher_walk_done(&walk, nbytes);
1182 }
1183
1184 /* to handle remaining bytes < AES_BLOCK_SIZE */
1185 if (walk.nbytes) {
1186 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1187 walk.iv, walk.nbytes, CRYPTO_DIR_DECRYPT, 0);
1188 err = skcipher_walk_done(&walk, 0);
1189 }
1190
1191 return err;
1192 }
1193
1194 /*
1195 * \brief AES function mappings
1196 */
1197 struct skcipher_alg ifxdeu_ctr_basic_aes_alg = {
1198 .base.cra_name = "ctr(aes)",
1199 .base.cra_driver_name = "ifxdeu-ctr(aes)",
1200 .base.cra_priority = 400,
1201 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
1202 .base.cra_blocksize = 1,
1203 .base.cra_ctxsize = sizeof(struct aes_ctx),
1204 .base.cra_module = THIS_MODULE,
1205 .base.cra_list = LIST_HEAD_INIT(ifxdeu_ctr_basic_aes_alg.base.cra_list),
1206 .min_keysize = AES_MIN_KEY_SIZE,
1207 .max_keysize = AES_MAX_KEY_SIZE,
1208 .ivsize = AES_BLOCK_SIZE,
1209 .walksize = AES_BLOCK_SIZE,
1210 .setkey = aes_set_key_skcipher,
1211 .encrypt = ctr_basic_aes_encrypt,
1212 .decrypt = ctr_basic_aes_decrypt,
1213 };
1214
1215 /*! \fn int ctr_rfc3686_aes_encrypt(struct skcipher_req *req)
1216 * \ingroup IFX_AES_FUNCTIONS
1217 * \brief Counter mode AES (rfc3686) encrypt using linux crypto skcipher
1218 * \param req skcipher request
1219 * \return err
1220 */
1221 int ctr_rfc3686_aes_encrypt(struct skcipher_request *req)
1222 {
1223 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1224 struct skcipher_walk walk;
1225 unsigned int nbytes, enc_bytes;
1226 int err;
1227 u8 rfc3686_iv[16];
1228
1229 err = skcipher_walk_virt(&walk, req, false);
1230 nbytes = walk.nbytes;
1231
1232 /* set up counter block */
1233 memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
1234 memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, walk.iv, CTR_RFC3686_IV_SIZE);
1235
1236 /* initialize counter portion of counter block */
1237 *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
1238 cpu_to_be32(1);
1239
1240 while ((nbytes = enc_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
1241 enc_bytes -= (nbytes % AES_BLOCK_SIZE);
1242 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1243 rfc3686_iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
1244 nbytes &= AES_BLOCK_SIZE - 1;
1245 err = skcipher_walk_done(&walk, nbytes);
1246 }
1247
1248 /* to handle remaining bytes < AES_BLOCK_SIZE */
1249 if (walk.nbytes) {
1250 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1251 rfc3686_iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
1252 err = skcipher_walk_done(&walk, 0);
1253 }
1254
1255 return err;
1256 }
1257
1258 /*! \fn int ctr_rfc3686_aes_decrypt(struct skcipher_req *req)
1259 * \ingroup IFX_AES_FUNCTIONS
1260 * \brief Counter mode AES (rfc3686) decrypt using linux crypto skcipher
1261 * \param req skcipher request
1262 * \return err
1263 */
1264 int ctr_rfc3686_aes_decrypt(struct skcipher_request *req)
1265 {
1266 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1267 struct skcipher_walk walk;
1268 unsigned int nbytes, dec_bytes;
1269 int err;
1270 u8 rfc3686_iv[16];
1271
1272 err = skcipher_walk_virt(&walk, req, false);
1273 nbytes = walk.nbytes;
1274
1275 /* set up counter block */
1276 memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
1277 memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, walk.iv, CTR_RFC3686_IV_SIZE);
1278
1279 /* initialize counter portion of counter block */
1280 *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
1281 cpu_to_be32(1);
1282
1283 while ((nbytes = dec_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
1284 dec_bytes -= (nbytes % AES_BLOCK_SIZE);
1285 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1286 rfc3686_iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
1287 nbytes &= AES_BLOCK_SIZE - 1;
1288 err = skcipher_walk_done(&walk, nbytes);
1289 }
1290
1291 /* to handle remaining bytes < AES_BLOCK_SIZE */
1292 if (walk.nbytes) {
1293 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1294 rfc3686_iv, walk.nbytes, CRYPTO_DIR_DECRYPT, 0);
1295 err = skcipher_walk_done(&walk, 0);
1296 }
1297
1298 return err;
1299 }
1300
1301 /*
1302 * \brief AES function mappings
1303 */
1304 struct skcipher_alg ifxdeu_ctr_rfc3686_aes_alg = {
1305 .base.cra_name = "rfc3686(ctr(aes))",
1306 .base.cra_driver_name = "ifxdeu-ctr-rfc3686(aes)",
1307 .base.cra_priority = 400,
1308 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
1309 .base.cra_blocksize = 1,
1310 .base.cra_ctxsize = sizeof(struct aes_ctx),
1311 .base.cra_module = THIS_MODULE,
1312 .base.cra_list = LIST_HEAD_INIT(ifxdeu_ctr_rfc3686_aes_alg.base.cra_list),
1313 .min_keysize = CTR_RFC3686_MIN_KEY_SIZE,
1314 .max_keysize = CTR_RFC3686_MAX_KEY_SIZE,
1315 .ivsize = CTR_RFC3686_IV_SIZE,
1316 .walksize = AES_BLOCK_SIZE,
1317 .setkey = ctr_rfc3686_aes_set_key_skcipher,
1318 .encrypt = ctr_rfc3686_aes_encrypt,
1319 .decrypt = ctr_rfc3686_aes_decrypt,
1320 };
1321
1322 static int aes_cbcmac_final_impl(struct shash_desc *desc, u8 *out, bool hash_final);
1323
1324 /*! \fn static void aes_cbcmac_transform(struct shash_desc *desc, u8 const *in)
1325 * \ingroup IFX_aes_cbcmac_FUNCTIONS
1326 * \brief save input block to context
1327 * \param desc linux crypto shash descriptor
1328 * \param in 16-byte block of input
1329 */
1330 static void aes_cbcmac_transform(struct shash_desc *desc, u8 const *in)
1331 {
1332 struct aes_ctx *mctx = crypto_shash_ctx(desc->tfm);
1333
1334 if ( ((mctx->dbn)+1) > AES_CBCMAC_DBN_TEMP_SIZE )
1335 {
1336 //printk("aes_cbcmac_DBN_TEMP_SIZE exceeded\n");
1337 aes_cbcmac_final_impl(desc, (u8 *)mctx->hash, false);
1338 }
1339
1340 memcpy(&mctx->temp[mctx->dbn], in, 16); //dbn workaround
1341 mctx->dbn += 1;
1342 }
1343
1344 /*! \fn int aes_cbcmac_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen)
1345 * \ingroup IFX_aes_cbcmac_FUNCTIONS
1346 * \brief sets cbcmac aes key
1347 * \param tfm linux crypto shash transform
1348 * \param key input key
1349 * \param keylen key
1350 */
1351 static int aes_cbcmac_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen)
1352 {
1353 return aes_set_key(crypto_shash_tfm(tfm), key, keylen);
1354
1355 return 0;
1356 }
1357
1358 /*! \fn void aes_cbcmac_init(struct shash_desc *desc)
1359 * \ingroup IFX_aes_cbcmac_FUNCTIONS
1360 * \brief initialize md5 hmac context
1361 * \param desc linux crypto shash descriptor
1362 */
1363 static int aes_cbcmac_init(struct shash_desc *desc)
1364 {
1365
1366 struct aes_ctx *mctx = crypto_shash_ctx(desc->tfm);
1367
1368 mctx->dbn = 0; //dbn workaround
1369 mctx->started = 0;
1370 mctx->byte_count = 0;
1371 memset(mctx->hash, 0, AES_BLOCK_SIZE);
1372
1373 return 0;
1374 }
1375
1376 /*! \fn void aes_cbcmac_update(struct shash_desc *desc, const u8 *data, unsigned int len)
1377 * \ingroup IFX_aes_cbcmac_FUNCTIONS
1378 * \brief on-the-fly cbcmac aes computation
1379 * \param desc linux crypto shash descriptor
1380 * \param data input data
1381 * \param len size of input data
1382 */
1383 static int aes_cbcmac_update(struct shash_desc *desc, const u8 *data, unsigned int len)
1384 {
1385 struct aes_ctx *mctx = crypto_shash_ctx(desc->tfm);
1386 const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x0f);
1387
1388 mctx->byte_count += len;
1389
1390 if (avail > len) {
1391 memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
1392 data, len);
1393 return 0;
1394 }
1395
1396 memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
1397 data, avail);
1398
1399 aes_cbcmac_transform(desc, mctx->block);
1400 data += avail;
1401 len -= avail;
1402
1403 while (len >= sizeof(mctx->block)) {
1404 memcpy(mctx->block, data, sizeof(mctx->block));
1405 aes_cbcmac_transform(desc, mctx->block);
1406 data += sizeof(mctx->block);
1407 len -= sizeof(mctx->block);
1408 }
1409
1410 memcpy(mctx->block, data, len);
1411 return 0;
1412 }
1413
1414 /*! \fn static int aes_cbcmac_final_impl(struct shash_desc *desc, u8 *out, bool hash_final)
1415 * \ingroup IFX_aes_cbcmac_FUNCTIONS
1416 * \brief compute final or intermediate md5 hmac value
1417 * \param desc linux crypto shash descriptor
1418 * \param out final cbcmac aes output value
1419 * \param in finalize or intermediate processing
1420 */
1421 static int aes_cbcmac_final_impl(struct shash_desc *desc, u8 *out, bool hash_final)
1422 {
1423 struct aes_ctx *mctx = crypto_shash_ctx(desc->tfm);
1424 const unsigned int offset = mctx->byte_count & 0x0f;
1425 char *p = (char *)mctx->block + offset;
1426 volatile struct aes_t *aes = (volatile struct aes_t *) AES_START;
1427 unsigned long flag;
1428 int i = 0;
1429 int dbn;
1430 u32 *in = mctx->temp[0];
1431
1432 CRTCL_SECT_START;
1433
1434 aes_set_key_hw (mctx);
1435
1436 aes->controlr.E_D = !CRYPTO_DIR_ENCRYPT; //encryption
1437 aes->controlr.O = 1; //0 ECB 1 CBC 2 OFB 3 CFB 4 CTR
1438
1439 //aes->controlr.F = 128; //default; only for CFB and OFB modes; change only for customer-specific apps
1440
1441 //printk("\ndbn = %d\n", mctx->dbn);
1442
1443 if (mctx->started) {
1444 aes->IV3R = DEU_ENDIAN_SWAP(*(u32 *) mctx->hash);
1445 aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *) mctx->hash + 1));
1446 aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *) mctx->hash + 2));
1447 aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *) mctx->hash + 3));
1448 } else {
1449 mctx->started = 1;
1450 aes->IV3R = 0;
1451 aes->IV2R = 0;
1452 aes->IV1R = 0;
1453 aes->IV0R = 0;
1454 }
1455
1456 i = 0;
1457 for (dbn = 0; dbn < mctx->dbn; dbn++)
1458 {
1459 aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) in + (i * 4) + 0));
1460 aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) in + (i * 4) + 1));
1461 aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) in + (i * 4) + 2));
1462 aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) in + (i * 4) + 3)); /* start crypto */
1463
1464 while (aes->controlr.BUS) {
1465 // this will not take long
1466 }
1467
1468 in += 4;
1469 }
1470
1471 *((u32 *) mctx->hash) = DEU_ENDIAN_SWAP(aes->IV3R);
1472 *((u32 *) mctx->hash + 1) = DEU_ENDIAN_SWAP(aes->IV2R);
1473 *((u32 *) mctx->hash + 2) = DEU_ENDIAN_SWAP(aes->IV1R);
1474 *((u32 *) mctx->hash + 3) = DEU_ENDIAN_SWAP(aes->IV0R);
1475
1476 if (hash_final && offset) {
1477 aes->controlr.O = 0; //0 ECB 1 CBC 2 OFB 3 CFB 4 CTR
1478 crypto_xor(mctx->block, mctx->hash, offset);
1479
1480 memcpy(p, mctx->hash + offset, (AES_BLOCK_SIZE - offset));
1481
1482 aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) mctx->block + 0));
1483 aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) mctx->block + 1));
1484 aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) mctx->block + 2));
1485 aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) mctx->block + 3)); /* start crypto */
1486
1487 while (aes->controlr.BUS) {
1488 // this will not take long
1489 }
1490
1491 *((u32 *) mctx->hash) = DEU_ENDIAN_SWAP(aes->OD3R);
1492 *((u32 *) mctx->hash + 1) = DEU_ENDIAN_SWAP(aes->OD2R);
1493 *((u32 *) mctx->hash + 2) = DEU_ENDIAN_SWAP(aes->OD1R);
1494 *((u32 *) mctx->hash + 3) = DEU_ENDIAN_SWAP(aes->OD0R);
1495 }
1496
1497 CRTCL_SECT_END;
1498
1499 if (hash_final) {
1500 memcpy(out, mctx->hash, AES_BLOCK_SIZE);
1501 /* reset the context after we finish with the hash */
1502 aes_cbcmac_init(desc);
1503 } else {
1504 mctx->dbn = 0;
1505 }
1506 return 0;
1507 }
1508
1509 /*! \fn static int aes_cbcmac_final(struct crypto_tfm *tfm, u8 *out)
1510 * \ingroup IFX_aes_cbcmac_FUNCTIONS
1511 * \brief call aes_cbcmac_final_impl with hash_final true
1512 * \param tfm linux crypto algo transform
1513 * \param out final md5 hmac output value
1514 */
1515 static int aes_cbcmac_final(struct shash_desc *desc, u8 *out)
1516 {
1517 return aes_cbcmac_final_impl(desc, out, true);
1518 }
1519
1520 /*! \fn void aes_cbcmac_init_tfm(struct crypto_tfm *tfm)
1521 * \ingroup IFX_aes_cbcmac_FUNCTIONS
1522 * \brief initialize pointers in aes_ctx
1523 * \param tfm linux crypto shash transform
1524 */
1525 static int aes_cbcmac_init_tfm(struct crypto_tfm *tfm)
1526 {
1527 struct aes_ctx *mctx = crypto_tfm_ctx(tfm);
1528 mctx->temp = kzalloc(AES_BLOCK_SIZE * AES_CBCMAC_DBN_TEMP_SIZE, GFP_KERNEL);
1529 if (IS_ERR(mctx->temp)) return PTR_ERR(mctx->temp);
1530
1531 return 0;
1532 }
1533
1534 /*! \fn void aes_cbcmac_exit_tfm(struct crypto_tfm *tfm)
1535 * \ingroup IFX_aes_cbcmac_FUNCTIONS
1536 * \brief free pointers in aes_ctx
1537 * \param tfm linux crypto shash transform
1538 */
1539 static void aes_cbcmac_exit_tfm(struct crypto_tfm *tfm)
1540 {
1541 struct aes_ctx *mctx = crypto_tfm_ctx(tfm);
1542 kfree(mctx->temp);
1543 }
1544
1545 /*
1546 * \brief aes_cbcmac function mappings
1547 */
1548 static struct shash_alg ifxdeu_cbcmac_aes_alg = {
1549 .digestsize = AES_BLOCK_SIZE,
1550 .init = aes_cbcmac_init,
1551 .update = aes_cbcmac_update,
1552 .final = aes_cbcmac_final,
1553 .setkey = aes_cbcmac_setkey,
1554 .descsize = sizeof(struct aes_ctx),
1555 .base = {
1556 .cra_name = "cbcmac(aes)",
1557 .cra_driver_name= "ifxdeu-cbcmac(aes)",
1558 .cra_priority = 400,
1559 .cra_ctxsize = sizeof(struct aes_ctx),
1560 .cra_flags = CRYPTO_ALG_TYPE_HASH | CRYPTO_ALG_KERN_DRIVER_ONLY,
1561 .cra_blocksize = 1,
1562 .cra_module = THIS_MODULE,
1563 .cra_init = aes_cbcmac_init_tfm,
1564 .cra_exit = aes_cbcmac_exit_tfm,
1565 }
1566 };
1567
1568
1569 /*! \fn int ifxdeu_init_aes (void)
1570 * \ingroup IFX_AES_FUNCTIONS
1571 * \brief function to initialize AES driver
1572 * \return ret
1573 */
1574 int ifxdeu_init_aes (void)
1575 {
1576 int ret = -ENOSYS;
1577
1578 aes_chip_init();
1579
1580 if ((ret = crypto_register_alg(&ifxdeu_aes_alg)))
1581 goto aes_err;
1582
1583 if ((ret = crypto_register_skcipher(&ifxdeu_ecb_aes_alg)))
1584 goto ecb_aes_err;
1585
1586 if ((ret = crypto_register_skcipher(&ifxdeu_cbc_aes_alg)))
1587 goto cbc_aes_err;
1588
1589 if ((ret = crypto_register_skcipher(&ifxdeu_xts_aes_alg)))
1590 goto xts_aes_err;
1591
1592 if ((ret = crypto_register_skcipher(&ifxdeu_ofb_aes_alg)))
1593 goto ofb_aes_err;
1594
1595 if ((ret = crypto_register_skcipher(&ifxdeu_cfb_aes_alg)))
1596 goto cfb_aes_err;
1597
1598 if ((ret = crypto_register_skcipher(&ifxdeu_ctr_basic_aes_alg)))
1599 goto ctr_basic_aes_err;
1600
1601 if ((ret = crypto_register_skcipher(&ifxdeu_ctr_rfc3686_aes_alg)))
1602 goto ctr_rfc3686_aes_err;
1603
1604 if ((ret = crypto_register_shash(&ifxdeu_cbcmac_aes_alg)))
1605 goto cbcmac_aes_err;
1606
1607 CRTCL_SECT_INIT;
1608
1609
1610 printk (KERN_NOTICE "IFX DEU AES initialized%s%s.\n", disable_multiblock ? "" : " (multiblock)", disable_deudma ? "" : " (DMA)");
1611 return ret;
1612
1613 cbcmac_aes_err:
1614 crypto_unregister_shash(&ifxdeu_cbcmac_aes_alg);
1615 printk (KERN_ERR "IFX cbcmac_aes initialization failed!\n");
1616 return ret;
1617 ctr_rfc3686_aes_err:
1618 crypto_unregister_skcipher(&ifxdeu_ctr_rfc3686_aes_alg);
1619 printk (KERN_ERR "IFX ctr_rfc3686_aes initialization failed!\n");
1620 return ret;
1621 ctr_basic_aes_err:
1622 crypto_unregister_skcipher(&ifxdeu_ctr_basic_aes_alg);
1623 printk (KERN_ERR "IFX ctr_basic_aes initialization failed!\n");
1624 return ret;
1625 cfb_aes_err:
1626 crypto_unregister_skcipher(&ifxdeu_cfb_aes_alg);
1627 printk (KERN_ERR "IFX cfb_aes initialization failed!\n");
1628 return ret;
1629 ofb_aes_err:
1630 crypto_unregister_skcipher(&ifxdeu_ofb_aes_alg);
1631 printk (KERN_ERR "IFX ofb_aes initialization failed!\n");
1632 return ret;
1633 xts_aes_err:
1634 crypto_unregister_skcipher(&ifxdeu_xts_aes_alg);
1635 printk (KERN_ERR "IFX xts_aes initialization failed!\n");
1636 return ret;
1637 cbc_aes_err:
1638 crypto_unregister_skcipher(&ifxdeu_cbc_aes_alg);
1639 printk (KERN_ERR "IFX cbc_aes initialization failed!\n");
1640 return ret;
1641 ecb_aes_err:
1642 crypto_unregister_skcipher(&ifxdeu_ecb_aes_alg);
1643 printk (KERN_ERR "IFX aes initialization failed!\n");
1644 return ret;
1645 aes_err:
1646 printk(KERN_ERR "IFX DEU AES initialization failed!\n");
1647
1648 return ret;
1649 }
1650
1651 /*! \fn void ifxdeu_fini_aes (void)
1652 * \ingroup IFX_AES_FUNCTIONS
1653 * \brief unregister aes driver
1654 */
1655 void ifxdeu_fini_aes (void)
1656 {
1657 crypto_unregister_alg (&ifxdeu_aes_alg);
1658 crypto_unregister_skcipher (&ifxdeu_ecb_aes_alg);
1659 crypto_unregister_skcipher (&ifxdeu_cbc_aes_alg);
1660 crypto_unregister_skcipher (&ifxdeu_xts_aes_alg);
1661 crypto_unregister_skcipher (&ifxdeu_ofb_aes_alg);
1662 crypto_unregister_skcipher (&ifxdeu_cfb_aes_alg);
1663 crypto_unregister_skcipher (&ifxdeu_ctr_basic_aes_alg);
1664 crypto_unregister_skcipher (&ifxdeu_ctr_rfc3686_aes_alg);
1665 crypto_unregister_shash (&ifxdeu_cbcmac_aes_alg);
1666 }