1 /******************************************************************************
3 ** FILE NAME : ifxmips_async_aes.c
5 ** MODULES : DEU Module
7 ** DATE : October 11, 2010
8 ** AUTHOR : Mohammad Firdaus
9 ** DESCRIPTION : Data Encryption Unit Driver for AES Algorithm
10 ** COPYRIGHT : Copyright (c) 2010
11 ** Infineon Technologies AG
12 ** Am Campeon 1-12, 85579 Neubiberg, Germany
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
20 ** $Date $Author $Comment
21 ** 08,Sept 2009 Mohammad Firdaus Initial UEIP release
22 ** 11, Oct 2010 Mohammad Firdaus Kernel Port incl. Async. Ablkcipher mode
23 ** 21,March 2011 Mohammad Firdaus Changes for Kernel 2.6.32 and IPSec integration
24 *******************************************************************************/
26 \defgroup IFX_DEU IFX_DEU_DRIVERS
28 \brief ifx DEU driver module
32 \file ifxmips_async_aes.c
34 \brief AES Encryption Driver main file
38 \defgroup IFX_AES_FUNCTIONS IFX_AES_FUNCTIONS
40 \brief IFX AES driver Functions
45 #include <linux/wait.h>
46 #include <linux/crypto.h>
47 #include <linux/kernel.h>
48 #include <linux/kthread.h>
49 #include <linux/interrupt.h>
50 #include <linux/spinlock.h>
51 #include <linux/list.h>
52 #include <crypto/ctr.h>
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/scatterwalk.h>
57 #include <asm/ifx/ifx_regs.h>
58 #include <asm/ifx/ifx_types.h>
59 #include <asm/ifx/common_routines.h>
60 #include <asm/ifx/irq.h>
61 #include <asm/ifx/ifx_pmu.h>
62 #include <asm/ifx/ifx_gpio.h>
63 #include <asm/kmap_types.h>
65 #include "ifxmips_deu.h"
67 #if defined(CONFIG_DANUBE)
68 #include "ifxmips_deu_danube.h"
69 extern int ifx_danube_pre_1_4
;
70 #elif defined(CONFIG_AR9)
71 #include "ifxmips_deu_ar9.h"
72 #elif defined(CONFIG_VR9) || defined(CONFIG_AR10)
73 #include "ifxmips_deu_vr9.h"
75 #error "Unkown platform"
78 /* DMA related header and variables */
81 #define CRTCL_SECT_INIT spin_lock_init(&aes_lock)
82 #define CRTCL_SECT_START spin_lock_irqsave(&aes_lock, flag)
83 #define CRTCL_SECT_END spin_unlock_irqrestore(&aes_lock, flag)
85 /* Definition of constants */
86 //#define AES_START IFX_AES_CON
87 #define AES_MIN_KEY_SIZE 16
88 #define AES_MAX_KEY_SIZE 32
89 #define AES_BLOCK_SIZE 16
90 #define CTR_RFC3686_NONCE_SIZE 4
91 #define CTR_RFC3686_IV_SIZE 8
92 #define CTR_RFC3686_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)
95 extern char debug_level
;
96 #define DPRINTF(level, format, args...) if (level < debug_level) printk(KERN_INFO "[%s %s %d]: " format, __FILE__, __func__, __LINE__, ##args);
98 #define DPRINTF(level, format, args...)
99 #endif /* CRYPTO_DEBUG */
102 static int disable_multiblock
= 0;
103 module_param(disable_multiblock
, int, 0);
105 static int disable_deudma
= 1;
107 /* Function decleration */
108 int aes_chip_init(void);
109 u32
endian_swap(u32 input
);
110 u32
input_swap(u32 input
);
111 u32
* memory_alignment(const u8
*arg
, u32
*buff_alloc
, int in_out
, int nbytes
);
112 void aes_dma_memory_copy(u32
*outcopy
, u32
*out_dma
, u8
*out_arg
, int nbytes
);
113 int aes_memory_allocate(int value
);
114 int des_memory_allocate(int value
);
115 void memory_release(u32
*addr
);
120 u32 buf
[AES_MAX_KEY_SIZE
];
121 u8 nonce
[CTR_RFC3686_NONCE_SIZE
];
125 struct aes_container
{
138 struct ablkcipher_request arequest
;
142 aes_priv_t
*aes_queue
;
143 extern deu_drv_priv_t deu_dma_priv
;
145 void hexdump(unsigned char *buf
, unsigned int len
)
147 print_hex_dump(KERN_CONT
, "", DUMP_PREFIX_OFFSET
,
152 /*! \fn void lq_deu_aes_core (void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg,
153 size_t nbytes, int encdec, int mode)
154 * \ingroup IFX_AES_FUNCTIONS
155 * \brief main interface to AES hardware
156 * \param ctx_arg crypto algo context
157 * \param out_arg output bytestream
158 * \param in_arg input bytestream
159 * \param iv_arg initialization vector
160 * \param nbytes length of bytestream
161 * \param encdec 1 for encrypt; 0 for decrypt
162 * \param mode operation mode such as ebc, cbc, ctr
166 static int lq_deu_aes_core (void *ctx_arg
, u8
*out_arg
, const u8
*in_arg
,
167 u8
*iv_arg
, size_t nbytes
, int encdec
, int mode
)
169 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
170 volatile struct aes_t
*aes
= (volatile struct aes_t
*) AES_START
;
171 struct aes_ctx
*ctx
= (struct aes_ctx
*)ctx_arg
;
172 u32
*in_key
= ctx
->buf
;
174 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
175 int key_len
= ctx
->key_length
;
177 volatile struct deu_dma_t
*dma
= (struct deu_dma_t
*) IFX_DEU_DMA_CON
;
178 struct dma_device_info
*dma_device
= ifx_deu
[0].dma_device
;
179 deu_drv_priv_t
*deu_priv
= (deu_drv_priv_t
*)dma_device
->priv
;
181 //u32 *outcopy = NULL;
182 u32
*dword_mem_aligned_in
= NULL
;
186 /* 128, 192 or 256 bit key length */
187 aes
->controlr
.K
= key_len
/ 8 - 2;
188 if (key_len
== 128 / 8) {
189 aes
->K3R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 0));
190 aes
->K2R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 1));
191 aes
->K1R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 2));
192 aes
->K0R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 3));
194 else if (key_len
== 192 / 8) {
195 aes
->K5R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 0));
196 aes
->K4R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 1));
197 aes
->K3R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 2));
198 aes
->K2R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 3));
199 aes
->K1R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 4));
200 aes
->K0R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 5));
202 else if (key_len
== 256 / 8) {
203 aes
->K7R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 0));
204 aes
->K6R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 1));
205 aes
->K5R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 2));
206 aes
->K4R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 3));
207 aes
->K3R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 4));
208 aes
->K2R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 5));
209 aes
->K1R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 6));
210 aes
->K0R
= DEU_ENDIAN_SWAP(*((u32
*) in_key
+ 7));
213 printk (KERN_ERR
"[%s %s %d]: Invalid key_len : %d\n", __FILE__
, __func__
, __LINE__
, key_len
);
218 /* let HW pre-process DEcryption key in any case (even if
219 ENcryption is used). Key Valid (KV) bit is then only
220 checked in decryption routine! */
221 aes
->controlr
.PNK
= 1;
223 while (aes
->controlr
.BUS
) {
224 // this will not take long
226 AES_DMA_MISC_CONFIG();
228 aes
->controlr
.E_D
= !encdec
; //encryption
229 aes
->controlr
.O
= mode
; //0 ECB 1 CBC 2 OFB 3 CFB 4 CTR
231 //aes->controlr.F = 128; //default; only for CFB and OFB modes; change only for customer-specific apps
233 aes
->IV3R
= DEU_ENDIAN_SWAP(*(u32
*) iv_arg
);
234 aes
->IV2R
= DEU_ENDIAN_SWAP(*((u32
*) iv_arg
+ 1));
235 aes
->IV1R
= DEU_ENDIAN_SWAP(*((u32
*) iv_arg
+ 2));
236 aes
->IV0R
= DEU_ENDIAN_SWAP(*((u32
*) iv_arg
+ 3));
240 /* Prepare Rx buf length used in dma psuedo interrupt */
241 deu_priv
->deu_rx_buf
= (u32
*)out_arg
;
242 deu_priv
->deu_rx_len
= nbytes
;
244 /* memory alignment issue */
245 dword_mem_aligned_in
= (u32
*) DEU_DWORD_REORDERING(in_arg
, aes_buff_in
, BUFFER_IN
, nbytes
);
247 dma
->controlr
.ALGO
= 1; //AES
248 dma
->controlr
.BS
= 0;
249 aes
->controlr
.DAU
= 0;
250 dma
->controlr
.EN
= 1;
252 while (aes
->controlr
.BUS
) {
253 // wait for AES to be ready
256 deu_priv
->outcopy
= (u32
*) DEU_DWORD_REORDERING(out_arg
, aes_buff_out
, BUFFER_OUT
, nbytes
);
257 deu_priv
->event_src
= AES_ASYNC_EVENT
;
259 wlen
= dma_device_write (dma_device
, (u8
*)dword_mem_aligned_in
, nbytes
, NULL
);
260 if (wlen
!= nbytes
) {
261 dma
->controlr
.EN
= 0;
263 printk (KERN_ERR
"[%s %s %d]: dma_device_write fail!\n", __FILE__
, __func__
, __LINE__
);
267 // WAIT_AES_DMA_READY();
272 *((u32
*) iv_arg
) = DEU_ENDIAN_SWAP(*((u32
*) iv_arg
));
273 *((u32
*) iv_arg
+ 1) = DEU_ENDIAN_SWAP(*((u32
*) iv_arg
+ 1));
274 *((u32
*) iv_arg
+ 2) = DEU_ENDIAN_SWAP(*((u32
*) iv_arg
+ 2));
275 *((u32
*) iv_arg
+ 3) = DEU_ENDIAN_SWAP(*((u32
*) iv_arg
+ 3));
281 /* \fn static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
282 * \ingroup IFX_AES_FUNCTIONS
283 * \brief Counts and return the number of scatterlists
284 * \param *sl Function pointer to the scatterlist
285 * \param total_bytes The total number of bytes that needs to be encrypted/decrypted
286 * \return The number of scatterlists
289 static int count_sgs(struct scatterlist
*sl
, unsigned int total_bytes
)
294 total_bytes
-= sl
[i
].length
;
297 } while (total_bytes
> 0);
302 /* \fn void lq_sg_init(struct scatterlist *src,
303 * struct scatterlist *dst)
304 * \ingroup IFX_AES_FUNCTIONS
305 * \brief Maps the scatterlists into a source/destination page.
306 * \param *src Pointer to the source scatterlist
307 * \param *dst Pointer to the destination scatterlist
310 static void lq_sg_init(struct aes_container
*aes_con
,struct scatterlist
*src
,
311 struct scatterlist
*dst
)
314 struct page
*dst_page
, *src_page
;
316 src_page
= sg_virt(src
);
317 aes_con
->src_buf
= (char *) src_page
;
319 dst_page
= sg_virt(dst
);
320 aes_con
->dst_buf
= (char *) dst_page
;
325 /* \fn static void lq_sg_complete(struct aes_container *aes_con)
326 * \ingroup IFX_AES_FUNCTIONS
327 * \brief Free the used up memory after encryt/decrypt.
330 static void lq_sg_complete(struct aes_container
*aes_con
)
332 unsigned long queue_flag
;
334 spin_lock_irqsave(&aes_queue
->lock
, queue_flag
);
336 spin_unlock_irqrestore(&aes_queue
->lock
, queue_flag
);
339 /* \fn static inline struct aes_container *aes_container_cast (
340 * struct scatterlist *dst)
341 * \ingroup IFX_AES_FUNCTIONS
342 * \brief Locate the structure aes_container in memory.
343 * \param *areq Pointer to memory location where ablkcipher_request is located
344 * \return *aes_cointainer The function pointer to aes_container
346 static inline struct aes_container
*aes_container_cast (
347 struct ablkcipher_request
*areq
)
349 return container_of(areq
, struct aes_container
, arequest
);
353 /* \fn static int process_next_packet(struct aes_container *aes_con, struct ablkcipher_request *areq,
355 * \ingroup IFX_AES_FUNCTIONS
356 * \brief Process next packet to be encrypt/decrypt
357 * \param *aes_con AES container structure
358 * \param *areq Pointer to memory location where ablkcipher_request is located
359 * \param state The state of the current packet (part of scatterlist or new packet)
360 * \return -EINVAL: error, -EINPROGRESS: Crypto still running, 1: no more scatterlist
363 static int process_next_packet(struct aes_container
*aes_con
, struct ablkcipher_request
*areq
,
367 int mode
, dir
, err
= -EINVAL
;
368 unsigned long queue_flag
;
369 u32 inc
, nbytes
, remain
, chunk_size
;
370 struct scatterlist
*src
= NULL
;
371 struct scatterlist
*dst
= NULL
;
372 struct crypto_ablkcipher
*cipher
;
375 spin_lock_irqsave(&aes_queue
->lock
, queue_flag
);
377 dir
= aes_con
->encdec
;
378 mode
= aes_con
->mode
;
381 if (state
& PROCESS_SCATTER
) {
382 src
= scatterwalk_sg_next(areq
->src
);
383 dst
= scatterwalk_sg_next(areq
->dst
);
386 spin_unlock_irqrestore(&aes_queue
->lock
, queue_flag
);
390 else if (state
& PROCESS_NEW_PACKET
) {
395 remain
= aes_con
->bytes_processed
;
396 chunk_size
= src
->length
;
398 if (remain
> DEU_MAX_PACKET_SIZE
)
399 inc
= DEU_MAX_PACKET_SIZE
;
400 else if (remain
> chunk_size
)
406 aes_con
->nbytes
= inc
;
408 if (state
& PROCESS_SCATTER
) {
409 aes_con
->src_buf
+= aes_con
->nbytes
;
410 aes_con
->dst_buf
+= aes_con
->nbytes
;
413 lq_sg_init(aes_con
, src
, dst
);
415 nbytes
= aes_con
->nbytes
;
417 //printk("debug - Line: %d, func: %s, reqsize: %d, scattersize: %d\n",
418 // __LINE__, __func__, nbytes, chunk_size);
420 cipher
= crypto_ablkcipher_reqtfm(areq
);
421 ctx
= crypto_ablkcipher_ctx(cipher
);
424 if (aes_queue
->hw_status
== AES_IDLE
)
425 aes_queue
->hw_status
= AES_STARTED
;
427 aes_con
->bytes_processed
-= aes_con
->nbytes
;
428 err
= ablkcipher_enqueue_request(&aes_queue
->list
, &aes_con
->arequest
);
430 spin_unlock_irqrestore(&aes_queue
->lock
, queue_flag
);
431 printk("Failed to enqueue request, ln: %d, err: %d\n",
436 spin_unlock_irqrestore(&aes_queue
->lock
, queue_flag
);
438 err
= lq_deu_aes_core(ctx
, aes_con
->dst_buf
, aes_con
->src_buf
, iv
, nbytes
, dir
, mode
);
443 /* \fn static void process_queue (unsigned long data)
444 * \ingroup IFX_AES_FUNCTIONS
445 * \brief tasklet to signal the dequeuing of the next packet to be processed
446 * \param unsigned long data Not used
450 static void process_queue(unsigned long data
)
453 DEU_WAKEUP_EVENT(deu_dma_priv
.deu_thread_wait
, AES_ASYNC_EVENT
,
454 deu_dma_priv
.aes_event_flags
);
458 /* \fn static int aes_crypto_thread (void *data)
459 * \ingroup IFX_AES_FUNCTIONS
460 * \brief AES thread that handles crypto requests from upper layer & DMA
461 * \param *data Not used
462 * \return -EINVAL: DEU failure, -EBUSY: DEU HW busy, 0: exit thread
464 static int aes_crypto_thread (void *data
)
466 struct aes_container
*aes_con
= NULL
;
467 struct ablkcipher_request
*areq
= NULL
;
469 unsigned long queue_flag
;
471 daemonize("lq_aes_thread");
472 printk("AES Queue Manager Starting\n");
476 DEU_WAIT_EVENT(deu_dma_priv
.deu_thread_wait
, AES_ASYNC_EVENT
,
477 deu_dma_priv
.aes_event_flags
);
479 spin_lock_irqsave(&aes_queue
->lock
, queue_flag
);
481 /* wait to prevent starting a crypto session before
482 * exiting the dma interrupt thread.
484 if (aes_queue
->hw_status
== AES_STARTED
) {
485 areq
= ablkcipher_dequeue_request(&aes_queue
->list
);
486 aes_con
= aes_container_cast(areq
);
487 aes_queue
->hw_status
= AES_BUSY
;
489 else if (aes_queue
->hw_status
== AES_IDLE
) {
490 areq
= ablkcipher_dequeue_request(&aes_queue
->list
);
491 aes_con
= aes_container_cast(areq
);
492 aes_queue
->hw_status
= AES_STARTED
;
494 else if (aes_queue
->hw_status
== AES_BUSY
) {
495 areq
= ablkcipher_dequeue_request(&aes_queue
->list
);
496 aes_con
= aes_container_cast(areq
);
498 else if (aes_queue
->hw_status
== AES_COMPLETED
) {
499 lq_sg_complete(aes_con
);
500 aes_queue
->hw_status
= AES_IDLE
;
501 areq
->base
.complete(&areq
->base
, 0);
502 spin_unlock_irqrestore(&aes_queue
->lock
, queue_flag
);
505 //printk("debug ln: %d, bytes proc: %d\n", __LINE__, aes_con->bytes_processed);
506 spin_unlock_irqrestore(&aes_queue
->lock
, queue_flag
);
509 printk("AES_CON return null\n");
513 if (aes_con
->bytes_processed
== 0) {
517 /* Process new packet or the next packet in a scatterlist */
518 if (aes_con
->flag
& PROCESS_NEW_PACKET
) {
519 aes_con
->flag
= PROCESS_SCATTER
;
520 err
= process_next_packet(aes_con
, areq
, PROCESS_NEW_PACKET
);
523 err
= process_next_packet(aes_con
, areq
, PROCESS_SCATTER
);
525 if (err
== -EINVAL
) {
526 areq
->base
.complete(&areq
->base
, err
);
527 lq_sg_complete(aes_con
);
528 printk("src/dst returned -EINVAL in func: %s\n", __func__
);
531 printk("src/dst returned zero in func: %s\n", __func__
);
538 //printk("debug line - %d, func: %s, qlen: %d\n", __LINE__, __func__, aes_queue->list.qlen);
539 areq
->base
.complete(&areq
->base
, 0);
540 lq_sg_complete(aes_con
);
542 spin_lock_irqsave(&aes_queue
->lock
, queue_flag
);
543 if (aes_queue
->list
.qlen
> 0) {
544 spin_unlock_irqrestore(&aes_queue
->lock
, queue_flag
);
545 tasklet_schedule(&aes_queue
->aes_task
);
548 aes_queue
->hw_status
= AES_IDLE
;
549 spin_unlock_irqrestore(&aes_queue
->lock
, queue_flag
);
556 /* \fn static int lq_aes_queue_mgr(struct aes_ctx *ctx, struct ablkcipher_request *areq,
557 u8 *iv, int dir, int mode)
558 * \ingroup IFX_AES_FUNCTIONS
559 * \brief starts the process of queuing DEU requests
560 * \param *ctx crypto algo contax
561 * \param *areq Pointer to the balkcipher requests
562 * \param *iv Pointer to intput vector location
563 * \param dir Encrypt/Decrypt
564 * \mode The mode AES algo is running
565 * \return 0 if success
568 static int lq_aes_queue_mgr(struct aes_ctx
*ctx
, struct ablkcipher_request
*areq
,
569 u8
*iv
, int dir
, int mode
)
572 unsigned long queue_flag
;
573 struct scatterlist
*src
= areq
->src
;
574 struct scatterlist
*dst
= areq
->dst
;
575 struct aes_container
*aes_con
= NULL
;
576 u32 remain
, inc
, nbytes
= areq
->nbytes
;
577 u32 chunk_bytes
= src
->length
;
580 aes_con
= (struct aes_container
*)kmalloc(sizeof(struct aes_container
),
584 printk("Cannot allocate memory for AES container, fn %s, ln %d\n",
589 /* AES encrypt/decrypt mode */
591 nbytes
= AES_BLOCK_SIZE
;
592 chunk_bytes
= AES_BLOCK_SIZE
;
596 aes_con
->bytes_processed
= nbytes
;
597 aes_con
->arequest
= *(areq
);
600 //printk("debug - Line: %d, func: %s, reqsize: %d, scattersize: %d\n",
601 // __LINE__, __func__, nbytes, chunk_bytes);
603 if (remain
> DEU_MAX_PACKET_SIZE
)
604 inc
= DEU_MAX_PACKET_SIZE
;
605 else if (remain
> chunk_bytes
)
611 lq_sg_init(aes_con
, src
, dst
);
614 aes_con
->complete
= 1;
616 aes_con
->complete
= 0;
618 aes_con
->nbytes
= inc
;
620 aes_con
->mode
= mode
;
621 aes_con
->encdec
= dir
;
623 spin_lock_irqsave(&aes_queue
->lock
, queue_flag
);
625 if (aes_queue
->hw_status
== AES_STARTED
|| aes_queue
->hw_status
== AES_BUSY
||
626 aes_queue
->list
.qlen
> 0) {
628 aes_con
->flag
= PROCESS_NEW_PACKET
;
629 err
= ablkcipher_enqueue_request(&aes_queue
->list
, &aes_con
->arequest
);
631 /* max queue length reached */
633 spin_unlock_irqrestore(&aes_queue
->lock
, queue_flag
);
634 printk("Unable to enqueue request ln: %d, err: %d\n", __LINE__
, err
);
638 spin_unlock_irqrestore(&aes_queue
->lock
, queue_flag
);
641 else if (aes_queue
->hw_status
== AES_IDLE
)
642 aes_queue
->hw_status
= AES_STARTED
;
644 aes_con
->flag
= PROCESS_SCATTER
;
645 aes_con
->bytes_processed
-= aes_con
->nbytes
;
646 /* or enqueue the whole structure so as to get back the info
647 * at the moment that it's queued. nbytes might be different */
648 err
= ablkcipher_enqueue_request(&aes_queue
->list
, &aes_con
->arequest
);
651 spin_unlock_irqrestore(&aes_queue
->lock
, queue_flag
);
652 printk("Unable to enqueue request ln: %d, err: %d\n", __LINE__
, err
);
656 spin_unlock_irqrestore(&aes_queue
->lock
, queue_flag
);
657 return lq_deu_aes_core(ctx
, aes_con
->dst_buf
, aes_con
->src_buf
, iv
, inc
, dir
, mode
);
661 /* \fn static int aes_setkey(struct crypto_ablkcipher *tfm, const u8 *in_key,
662 * unsigned int keylen)
663 * \ingroup IFX_AES_FUNCTIONS
664 * \brief Sets AES key
665 * \param *tfm Pointer to the ablkcipher transform
666 * \param *in_key Pointer to input keys
667 * \param key_len Length of the AES keys
668 * \return 0 is success, -EINVAL if bad key length
671 static int aes_setkey(struct crypto_ablkcipher
*tfm
, const u8
*in_key
,
674 struct aes_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
675 unsigned long *flags
= (unsigned long *) &tfm
->base
.crt_flags
;
677 DPRINTF(2, "set_key in %s\n", __FILE__
);
679 if (keylen
!= 16 && keylen
!= 24 && keylen
!= 32) {
680 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
684 ctx
->key_length
= keylen
;
685 DPRINTF(0, "ctx @%p, keylen %d, ctx->key_length %d\n", ctx
, keylen
, ctx
->key_length
);
686 memcpy ((u8
*) (ctx
->buf
), in_key
, keylen
);
692 /* \fn static int aes_generic_setkey(struct crypto_ablkcipher *tfm, const u8 *in_key,
693 * unsigned int keylen)
694 * \ingroup IFX_AES_FUNCTIONS
695 * \brief Sets AES key
696 * \param *tfm Pointer to the ablkcipher transform
697 * \param *key Pointer to input keys
698 * \param keylen Length of AES keys
699 * \return 0 is success, -EINVAL if bad key length
702 static int aes_generic_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
705 return aes_setkey(tfm
, key
, keylen
);
708 /* \fn static int rfc3686_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *in_key,
709 * unsigned int keylen)
710 * \ingroup IFX_AES_FUNCTIONS
711 * \brief Sets AES key
712 * \param *tfm Pointer to the ablkcipher transform
713 * \param *in_key Pointer to input keys
714 * \param key_len Length of the AES keys
715 * \return 0 is success, -EINVAL if bad key length
718 static int rfc3686_aes_setkey(struct crypto_ablkcipher
*tfm
,
719 const u8
*in_key
, unsigned int keylen
)
721 struct aes_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
722 unsigned long *flags
= (unsigned long *)&tfm
->base
.crt_flags
;
724 DPRINTF(2, "ctr_rfc3686_aes_set_key in %s\n", __FILE__
);
726 memcpy(ctx
->nonce
, in_key
+ (keylen
- CTR_RFC3686_NONCE_SIZE
),
727 CTR_RFC3686_NONCE_SIZE
);
729 keylen
-= CTR_RFC3686_NONCE_SIZE
; // remove 4 bytes of nonce
731 if (keylen
!= 16 && keylen
!= 24 && keylen
!= 32) {
732 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
736 ctx
->key_length
= keylen
;
738 memcpy ((u8
*) (ctx
->buf
), in_key
, keylen
);
743 /* \fn static int aes_encrypt(struct ablkcipher_request *areq)
744 * \ingroup IFX_AES_FUNCTIONS
745 * \brief Encrypt function for AES algo
746 * \param *areq Pointer to ablkcipher request in memory
747 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
750 static int aes_encrypt (struct ablkcipher_request
*areq
)
752 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
753 struct aes_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
755 return lq_aes_queue_mgr(ctx
, areq
, NULL
, CRYPTO_DIR_ENCRYPT
, 5);
759 /* \fn static int aes_decrypt(struct ablkcipher_request *areq)
760 * \ingroup IFX_AES_FUNCTIONS
761 * \brief Decrypt function for AES algo
762 * \param *areq Pointer to ablkcipher request in memory
763 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
765 static int aes_decrypt (struct ablkcipher_request
*areq
)
767 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
768 struct aes_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
770 return lq_aes_queue_mgr(ctx
, areq
, NULL
, CRYPTO_DIR_DECRYPT
, 5);
773 /* \fn static int ecb_aes_decrypt(struct ablkcipher_request *areq)
774 * \ingroup IFX_AES_FUNCTIONS
775 * \brief Encrypt function for AES algo
776 * \param *areq Pointer to ablkcipher request in memory
777 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
780 static int ecb_aes_encrypt (struct ablkcipher_request
*areq
)
782 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
783 struct aes_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
785 return lq_aes_queue_mgr(ctx
, areq
, areq
->info
, CRYPTO_DIR_ENCRYPT
, 0);
788 /* \fn static int ecb_aes_decrypt(struct ablkcipher_request *areq)
789 * \ingroup IFX_AES_FUNCTIONS
790 * \brief Decrypt function for AES algo
791 * \param *areq Pointer to ablkcipher request in memory
792 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
794 static int ecb_aes_decrypt(struct ablkcipher_request
*areq
)
797 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
798 struct aes_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
800 return lq_aes_queue_mgr(ctx
, areq
, areq
->info
, CRYPTO_DIR_DECRYPT
, 0);
803 /* \fn static int cbc_aes_encrypt(struct ablkcipher_request *areq)
804 * \ingroup IFX_AES_FUNCTIONS
805 * \brief Encrypt function for AES algo
806 * \param *areq Pointer to ablkcipher request in memory
807 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
810 static int cbc_aes_encrypt (struct ablkcipher_request
*areq
)
812 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
813 struct aes_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
815 return lq_aes_queue_mgr(ctx
, areq
, areq
->info
, CRYPTO_DIR_ENCRYPT
, 1);
819 /* \fn static int cbc_aes_decrypt(struct ablkcipher_request *areq)
820 * \ingroup IFX_AES_FUNCTIONS
821 * \brief Decrypt function for AES algo
822 * \param *areq Pointer to ablkcipher request in memory
823 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
826 static int cbc_aes_decrypt(struct ablkcipher_request
*areq
)
828 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
829 struct aes_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
831 return lq_aes_queue_mgr(ctx
, areq
, areq
->info
, CRYPTO_DIR_DECRYPT
, 1);
834 static int ofb_aes_encrypt (struct ablkcipher_request
*areq
)
836 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
837 struct aes_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
839 return lq_aes_queue_mgr(ctx
, areq
, areq
->info
, CRYPTO_DIR_ENCRYPT
, 2);
843 static int ofb_aes_decrypt(struct ablkcipher_request
*areq
)
845 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
846 struct aes_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
848 return lq_aes_queue_mgr(ctx
, areq
, areq
->info
, CRYPTO_DIR_DECRYPT
, 2);
851 static int cfb_aes_encrypt (struct ablkcipher_request
*areq
)
853 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
854 struct aes_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
856 return lq_aes_queue_mgr(ctx
, areq
, areq
->info
, CRYPTO_DIR_ENCRYPT
, 3);
860 static int cfb_aes_decrypt(struct ablkcipher_request
*areq
)
862 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
863 struct aes_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
865 return lq_aes_queue_mgr(ctx
, areq
, areq
->info
, CRYPTO_DIR_DECRYPT
, 3);
869 /* \fn static int ctr_aes_encrypt(struct ablkcipher_request *areq)
870 * \ingroup IFX_AES_FUNCTIONS
871 * \brief Encrypt function for AES algo
872 * \param *areq Pointer to ablkcipher request in memory
873 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
876 static int ctr_aes_encrypt (struct ablkcipher_request
*areq
)
878 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
879 struct aes_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
881 return lq_aes_queue_mgr(ctx
, areq
, areq
->info
, CRYPTO_DIR_ENCRYPT
, 4);
885 /* \fn static int ctr_aes_decrypt(struct ablkcipher_request *areq)
886 * \ingroup IFX_AES_FUNCTIONS
887 * \brief Decrypt function for AES algo
888 * \param *areq Pointer to ablkcipher request in memory
889 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
892 static int ctr_aes_decrypt(struct ablkcipher_request
*areq
)
894 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
895 struct aes_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
897 return lq_aes_queue_mgr(ctx
, areq
, areq
->info
, CRYPTO_DIR_DECRYPT
, 4);
900 /* \fn static int rfc3686_aes_encrypt(struct ablkcipher_request *areq)
901 * \ingroup IFX_AES_FUNCTIONS
902 * \brief Encrypt function for AES algo
903 * \param *areq Pointer to ablkcipher request in memory
904 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
907 static int rfc3686_aes_encrypt(struct ablkcipher_request
*areq
)
909 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
910 struct aes_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
912 u8
*info
= areq
->info
;
915 memcpy(rfc3686_iv
, ctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
916 memcpy(rfc3686_iv
+ CTR_RFC3686_NONCE_SIZE
, info
, CTR_RFC3686_IV_SIZE
);
918 /* initialize counter portion of counter block */
919 *(__be32
*)(rfc3686_iv
+ CTR_RFC3686_NONCE_SIZE
+ CTR_RFC3686_IV_SIZE
) =
922 areq
->info
= rfc3686_iv
;
923 ret
= lq_aes_queue_mgr(ctx
, areq
, areq
->info
, CRYPTO_DIR_ENCRYPT
, 4);
928 /* \fn static int rfc3686_aes_decrypt(struct ablkcipher_request *areq)
929 * \ingroup IFX_AES_FUNCTIONS
930 * \brief Decrypt function for AES algo
931 * \param *areq Pointer to ablkcipher request in memory
932 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
935 static int rfc3686_aes_decrypt(struct ablkcipher_request
*areq
)
937 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
938 struct aes_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
940 u8
*info
= areq
->info
;
943 /* set up counter block */
944 memcpy(rfc3686_iv
, ctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
945 memcpy(rfc3686_iv
+ CTR_RFC3686_NONCE_SIZE
, info
, CTR_RFC3686_IV_SIZE
);
947 /* initialize counter portion of counter block */
948 *(__be32
*)(rfc3686_iv
+ CTR_RFC3686_NONCE_SIZE
+ CTR_RFC3686_IV_SIZE
) =
951 areq
->info
= rfc3686_iv
;
952 ret
= lq_aes_queue_mgr(ctx
, areq
, areq
->info
, CRYPTO_DIR_DECRYPT
, 4);
958 struct crypto_alg alg
;
961 /* AES supported algo array */
962 static struct lq_aes_alg aes_drivers_alg
[] = {
966 .cra_driver_name
= "ifxdeu-aes",
967 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
968 .cra_blocksize
= AES_BLOCK_SIZE
,
969 .cra_ctxsize
= sizeof(struct aes_ctx
),
970 .cra_type
= &crypto_ablkcipher_type
,
972 .cra_module
= THIS_MODULE
,
974 .setkey
= aes_setkey
,
975 .encrypt
= aes_encrypt
,
976 .decrypt
= aes_decrypt
,
978 .min_keysize
= AES_MIN_KEY_SIZE
,
979 .max_keysize
= AES_MAX_KEY_SIZE
,
980 .ivsize
= AES_BLOCK_SIZE
,
985 .cra_name
= "ecb(aes)",
986 .cra_driver_name
= "ifxdeu-ecb(aes)",
987 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
988 .cra_blocksize
= AES_BLOCK_SIZE
,
989 .cra_ctxsize
= sizeof(struct aes_ctx
),
990 .cra_type
= &crypto_ablkcipher_type
,
992 .cra_module
= THIS_MODULE
,
994 .setkey
= aes_generic_setkey
,
995 .encrypt
= ecb_aes_encrypt
,
996 .decrypt
= ecb_aes_decrypt
,
998 .min_keysize
= AES_MIN_KEY_SIZE
,
999 .max_keysize
= AES_MAX_KEY_SIZE
,
1000 .ivsize
= AES_BLOCK_SIZE
,
1005 .cra_name
= "cbc(aes)",
1006 .cra_driver_name
= "ifxdeu-cbc(aes)",
1007 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1008 .cra_blocksize
= AES_BLOCK_SIZE
,
1009 .cra_ctxsize
= sizeof(struct aes_ctx
),
1010 .cra_type
= &crypto_ablkcipher_type
,
1011 .cra_priority
= 400,
1012 .cra_module
= THIS_MODULE
,
1014 .setkey
= aes_generic_setkey
,
1015 .encrypt
= cbc_aes_encrypt
,
1016 .decrypt
= cbc_aes_decrypt
,
1018 .min_keysize
= AES_MIN_KEY_SIZE
,
1019 .max_keysize
= AES_MAX_KEY_SIZE
,
1020 .ivsize
= AES_BLOCK_SIZE
,
1025 .cra_name
= "ctr(aes)",
1026 .cra_driver_name
= "ifxdeu-ctr(aes)",
1027 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1028 .cra_blocksize
= AES_BLOCK_SIZE
,
1029 .cra_ctxsize
= sizeof(struct aes_ctx
),
1030 .cra_type
= &crypto_ablkcipher_type
,
1031 .cra_priority
= 400,
1032 .cra_module
= THIS_MODULE
,
1034 .setkey
= aes_generic_setkey
,
1035 .encrypt
= ctr_aes_encrypt
,
1036 .decrypt
= ctr_aes_decrypt
,
1038 .min_keysize
= AES_MIN_KEY_SIZE
,
1039 .max_keysize
= AES_MAX_KEY_SIZE
,
1040 .ivsize
= AES_BLOCK_SIZE
,
1045 .cra_name
= "rfc3686(ctr(aes))",
1046 .cra_driver_name
= "ifxdeu-rfc3686(ctr(aes))",
1047 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1048 .cra_blocksize
= AES_BLOCK_SIZE
,
1049 .cra_ctxsize
= sizeof(struct aes_ctx
),
1050 .cra_type
= &crypto_ablkcipher_type
,
1051 .cra_priority
= 400,
1052 .cra_module
= THIS_MODULE
,
1054 .setkey
= rfc3686_aes_setkey
,
1055 .encrypt
= rfc3686_aes_encrypt
,
1056 .decrypt
= rfc3686_aes_decrypt
,
1058 .min_keysize
= AES_MIN_KEY_SIZE
,
1059 .max_keysize
= CTR_RFC3686_MAX_KEY_SIZE
,
1060 //.max_keysize = AES_MAX_KEY_SIZE,
1061 //.ivsize = CTR_RFC3686_IV_SIZE,
1062 .ivsize
= AES_BLOCK_SIZE
, // else cannot reg
1068 /* \fn int __init lqdeu_async_aes_init (void)
1069 * \ingroup IFX_AES_FUNCTIONS
1070 * \brief Initializes the Async. AES driver
1071 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
1074 int __init
lqdeu_async_aes_init (void)
1076 int i
, j
, ret
= -EINVAL
;
1078 #define IFX_DEU_DRV_VERSION "2.0.0"
1079 printk(KERN_INFO
"Lantiq Technologies DEU Driver version %s\n", IFX_DEU_DRV_VERSION
);
1081 for (i
= 0; i
< ARRAY_SIZE(aes_drivers_alg
); i
++) {
1082 ret
= crypto_register_alg(&aes_drivers_alg
[i
].alg
);
1083 printk("driver: %s\n", aes_drivers_alg
[i
].alg
.cra_name
);
1093 printk (KERN_NOTICE
"Lantiq DEU AES initialized %s %s.\n",
1094 disable_multiblock
? "" : " (multiblock)", disable_deudma
? "" : " (DMA)");
1100 for (j
= 0; j
< i
; j
++)
1101 crypto_unregister_alg(&aes_drivers_alg
[j
].alg
);
1103 printk(KERN_ERR
"Lantiq %s driver initialization failed!\n", (char *)&aes_drivers_alg
[i
].alg
.cra_driver_name
);
1106 ctr_rfc3686_aes_err
:
1107 for (i
= 0; i
< ARRAY_SIZE(aes_drivers_alg
); i
++) {
1108 if (!strcmp((char *)&aes_drivers_alg
[i
].alg
.cra_name
, "rfc3686(ctr(aes))"))
1109 crypto_unregister_alg(&aes_drivers_alg
[j
].alg
);
1111 printk (KERN_ERR
"Lantiq ctr_rfc3686_aes initialization failed!\n");
1115 /*! \fn void __exit ifxdeu_fini_aes (void)
1116 * \ingroup IFX_AES_FUNCTIONS
1117 * \brief unregister aes driver
1119 void __exit
lqdeu_fini_async_aes (void)
1123 for (i
= 0; i
< ARRAY_SIZE(aes_drivers_alg
); i
++)
1124 crypto_unregister_alg(&aes_drivers_alg
[i
].alg
);
1126 aes_queue
->hw_status
= AES_COMPLETED
;
1128 DEU_WAKEUP_EVENT(deu_dma_priv
.deu_thread_wait
, AES_ASYNC_EVENT
,
1129 deu_dma_priv
.aes_event_flags
);