dcd059371fade6095e93a306c06843d6ece1cd9f
[openwrt/staging/noltari.git] / package / kernel / lantiq / ltq-deu / src / ifxmips_async_aes.c
1 /******************************************************************************
2 **
3 ** FILE NAME : ifxmips_async_aes.c
4 ** PROJECT : IFX UEIP
5 ** MODULES : DEU Module
6 **
7 ** DATE : October 11, 2010
8 ** AUTHOR : Mohammad Firdaus
9 ** DESCRIPTION : Data Encryption Unit Driver for AES Algorithm
10 ** COPYRIGHT : Copyright (c) 2010
11 ** Infineon Technologies AG
12 ** Am Campeon 1-12, 85579 Neubiberg, Germany
13 **
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
18 **
19 ** HISTORY
20 ** $Date $Author $Comment
21 ** 08,Sept 2009 Mohammad Firdaus Initial UEIP release
22 ** 11, Oct 2010 Mohammad Firdaus Kernel Port incl. Async. Ablkcipher mode
23 ** 21,March 2011 Mohammad Firdaus Changes for Kernel 2.6.32 and IPSec integration
24 *******************************************************************************/
25 /*!
26 \defgroup IFX_DEU IFX_DEU_DRIVERS
27 \ingroup API
28 \brief ifx DEU driver module
29 */
30
31 /*!
32 \file ifxmips_async_aes.c
33 \ingroup IFX_DEU
34 \brief AES Encryption Driver main file
35 */
36
37 /*!
38 \defgroup IFX_AES_FUNCTIONS IFX_AES_FUNCTIONS
39 \ingroup IFX_DEU
40 \brief IFX AES driver Functions
41 */
42
43
44
45 #include <linux/wait.h>
46 #include <linux/crypto.h>
47 #include <linux/kernel.h>
48 #include <linux/kthread.h>
49 #include <linux/interrupt.h>
50 #include <linux/spinlock.h>
51 #include <linux/list.h>
52 #include <crypto/ctr.h>
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/scatterwalk.h>
56
57 #include <asm/ifx/ifx_regs.h>
58 #include <asm/ifx/ifx_types.h>
59 #include <asm/ifx/common_routines.h>
60 #include <asm/ifx/irq.h>
61 #include <asm/ifx/ifx_pmu.h>
62 #include <asm/ifx/ifx_gpio.h>
63 #include <asm/kmap_types.h>
64
65 #include "ifxmips_deu.h"
66
67 #if defined(CONFIG_DANUBE)
68 #include "ifxmips_deu_danube.h"
69 extern int ifx_danube_pre_1_4;
70 #elif defined(CONFIG_AR9)
71 #include "ifxmips_deu_ar9.h"
72 #elif defined(CONFIG_VR9) || defined(CONFIG_AR10)
73 #include "ifxmips_deu_vr9.h"
74 #else
75 #error "Unkown platform"
76 #endif
77
78 /* DMA related header and variables */
79
80 spinlock_t aes_lock;
81 #define CRTCL_SECT_INIT spin_lock_init(&aes_lock)
82 #define CRTCL_SECT_START spin_lock_irqsave(&aes_lock, flag)
83 #define CRTCL_SECT_END spin_unlock_irqrestore(&aes_lock, flag)
84
85 /* Definition of constants */
86 //#define AES_START IFX_AES_CON
87 #define AES_MIN_KEY_SIZE 16
88 #define AES_MAX_KEY_SIZE 32
89 #define AES_BLOCK_SIZE 16
90 #define CTR_RFC3686_NONCE_SIZE 4
91 #define CTR_RFC3686_IV_SIZE 8
92 #define CTR_RFC3686_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)
93
94 #ifdef CRYPTO_DEBUG
95 extern char debug_level;
96 #define DPRINTF(level, format, args...) if (level < debug_level) printk(KERN_INFO "[%s %s %d]: " format, __FILE__, __func__, __LINE__, ##args);
97 #else
98 #define DPRINTF(level, format, args...)
99 #endif /* CRYPTO_DEBUG */
100
101
102 static int disable_multiblock = 0;
103 module_param(disable_multiblock, int, 0);
104
105 static int disable_deudma = 1;
106
107 /* Function decleration */
108 int aes_chip_init(void);
109 u32 endian_swap(u32 input);
110 u32 input_swap(u32 input);
111 u32* memory_alignment(const u8 *arg, u32 *buff_alloc, int in_out, int nbytes);
112 void aes_dma_memory_copy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes);
113 int aes_memory_allocate(int value);
114 int des_memory_allocate(int value);
115 void memory_release(u32 *addr);
116
117
118 struct aes_ctx {
119 int key_length;
120 u32 buf[AES_MAX_KEY_SIZE];
121 u8 nonce[CTR_RFC3686_NONCE_SIZE];
122
123 };
124
125 struct aes_container {
126 u8 *iv;
127 u8 *src_buf;
128 u8 *dst_buf;
129
130 int mode;
131 int encdec;
132 int complete;
133 int flag;
134
135 u32 bytes_processed;
136 u32 nbytes;
137
138 struct ablkcipher_request arequest;
139
140 };
141
142 aes_priv_t *aes_queue;
143 extern deu_drv_priv_t deu_dma_priv;
144
145 void hexdump(unsigned char *buf, unsigned int len)
146 {
147 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
148 16, 1,
149 buf, len, false);
150 }
151
152 /*! \fn void lq_deu_aes_core (void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg,
153 size_t nbytes, int encdec, int mode)
154 * \ingroup IFX_AES_FUNCTIONS
155 * \brief main interface to AES hardware
156 * \param ctx_arg crypto algo context
157 * \param out_arg output bytestream
158 * \param in_arg input bytestream
159 * \param iv_arg initialization vector
160 * \param nbytes length of bytestream
161 * \param encdec 1 for encrypt; 0 for decrypt
162 * \param mode operation mode such as ebc, cbc, ctr
163 *
164 */
165
166 static int lq_deu_aes_core (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
167 u8 *iv_arg, size_t nbytes, int encdec, int mode)
168 {
169 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
170 volatile struct aes_t *aes = (volatile struct aes_t *) AES_START;
171 struct aes_ctx *ctx = (struct aes_ctx *)ctx_arg;
172 u32 *in_key = ctx->buf;
173 unsigned long flag;
174 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
175 int key_len = ctx->key_length;
176
177 volatile struct deu_dma_t *dma = (struct deu_dma_t *) IFX_DEU_DMA_CON;
178 struct dma_device_info *dma_device = ifx_deu[0].dma_device;
179 deu_drv_priv_t *deu_priv = (deu_drv_priv_t *)dma_device->priv;
180 int wlen = 0;
181 //u32 *outcopy = NULL;
182 u32 *dword_mem_aligned_in = NULL;
183
184 CRTCL_SECT_START;
185
186 /* 128, 192 or 256 bit key length */
187 aes->controlr.K = key_len / 8 - 2;
188 if (key_len == 128 / 8) {
189 aes->K3R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 0));
190 aes->K2R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 1));
191 aes->K1R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 2));
192 aes->K0R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 3));
193 }
194 else if (key_len == 192 / 8) {
195 aes->K5R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 0));
196 aes->K4R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 1));
197 aes->K3R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 2));
198 aes->K2R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 3));
199 aes->K1R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 4));
200 aes->K0R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 5));
201 }
202 else if (key_len == 256 / 8) {
203 aes->K7R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 0));
204 aes->K6R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 1));
205 aes->K5R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 2));
206 aes->K4R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 3));
207 aes->K3R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 4));
208 aes->K2R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 5));
209 aes->K1R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 6));
210 aes->K0R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 7));
211 }
212 else {
213 printk (KERN_ERR "[%s %s %d]: Invalid key_len : %d\n", __FILE__, __func__, __LINE__, key_len);
214 CRTCL_SECT_END;
215 return -EINVAL;
216 }
217
218 /* let HW pre-process DEcryption key in any case (even if
219 ENcryption is used). Key Valid (KV) bit is then only
220 checked in decryption routine! */
221 aes->controlr.PNK = 1;
222
223 while (aes->controlr.BUS) {
224 // this will not take long
225 }
226 AES_DMA_MISC_CONFIG();
227
228 aes->controlr.E_D = !encdec; //encryption
229 aes->controlr.O = mode; //0 ECB 1 CBC 2 OFB 3 CFB 4 CTR
230
231 //aes->controlr.F = 128; //default; only for CFB and OFB modes; change only for customer-specific apps
232 if (mode > 0) {
233 aes->IV3R = DEU_ENDIAN_SWAP(*(u32 *) iv_arg);
234 aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
235 aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 2));
236 aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 3));
237 };
238
239
240 /* Prepare Rx buf length used in dma psuedo interrupt */
241 deu_priv->deu_rx_buf = (u32 *)out_arg;
242 deu_priv->deu_rx_len = nbytes;
243
244 /* memory alignment issue */
245 dword_mem_aligned_in = (u32 *) DEU_DWORD_REORDERING(in_arg, aes_buff_in, BUFFER_IN, nbytes);
246
247 dma->controlr.ALGO = 1; //AES
248 dma->controlr.BS = 0;
249 aes->controlr.DAU = 0;
250 dma->controlr.EN = 1;
251
252 while (aes->controlr.BUS) {
253 // wait for AES to be ready
254 };
255
256 deu_priv->outcopy = (u32 *) DEU_DWORD_REORDERING(out_arg, aes_buff_out, BUFFER_OUT, nbytes);
257 deu_priv->event_src = AES_ASYNC_EVENT;
258
259 wlen = dma_device_write (dma_device, (u8 *)dword_mem_aligned_in, nbytes, NULL);
260 if (wlen != nbytes) {
261 dma->controlr.EN = 0;
262 CRTCL_SECT_END;
263 printk (KERN_ERR "[%s %s %d]: dma_device_write fail!\n", __FILE__, __func__, __LINE__);
264 return -EINVAL;
265 }
266
267 // WAIT_AES_DMA_READY();
268
269 CRTCL_SECT_END;
270
271 if (mode > 0) {
272 *((u32 *) iv_arg) = DEU_ENDIAN_SWAP(*((u32 *) iv_arg));
273 *((u32 *) iv_arg + 1) = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
274 *((u32 *) iv_arg + 2) = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 2));
275 *((u32 *) iv_arg + 3) = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 3));
276 }
277
278 return -EINPROGRESS;
279 }
280
281 /* \fn static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
282 * \ingroup IFX_AES_FUNCTIONS
283 * \brief Counts and return the number of scatterlists
284 * \param *sl Function pointer to the scatterlist
285 * \param total_bytes The total number of bytes that needs to be encrypted/decrypted
286 * \return The number of scatterlists
287 */
288
289 static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
290 {
291 int i = 0;
292
293 do {
294 total_bytes -= sl[i].length;
295 i++;
296
297 } while (total_bytes > 0);
298
299 return i;
300 }
301
302 /* \fn void lq_sg_init(struct scatterlist *src,
303 * struct scatterlist *dst)
304 * \ingroup IFX_AES_FUNCTIONS
305 * \brief Maps the scatterlists into a source/destination page.
306 * \param *src Pointer to the source scatterlist
307 * \param *dst Pointer to the destination scatterlist
308 */
309
310 static void lq_sg_init(struct aes_container *aes_con,struct scatterlist *src,
311 struct scatterlist *dst)
312 {
313
314 struct page *dst_page, *src_page;
315
316 src_page = sg_virt(src);
317 aes_con->src_buf = (char *) src_page;
318
319 dst_page = sg_virt(dst);
320 aes_con->dst_buf = (char *) dst_page;
321
322 }
323
324
325 /* \fn static void lq_sg_complete(struct aes_container *aes_con)
326 * \ingroup IFX_AES_FUNCTIONS
327 * \brief Free the used up memory after encryt/decrypt.
328 */
329
330 static void lq_sg_complete(struct aes_container *aes_con)
331 {
332 unsigned long queue_flag;
333
334 spin_lock_irqsave(&aes_queue->lock, queue_flag);
335 kfree(aes_con);
336 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
337 }
338
339 /* \fn static inline struct aes_container *aes_container_cast (
340 * struct scatterlist *dst)
341 * \ingroup IFX_AES_FUNCTIONS
342 * \brief Locate the structure aes_container in memory.
343 * \param *areq Pointer to memory location where ablkcipher_request is located
344 * \return *aes_cointainer The function pointer to aes_container
345 */
346 static inline struct aes_container *aes_container_cast (
347 struct ablkcipher_request *areq)
348 {
349 return container_of(areq, struct aes_container, arequest);
350 }
351
352
353 /* \fn static int process_next_packet(struct aes_container *aes_con, struct ablkcipher_request *areq,
354 * \ int state)
355 * \ingroup IFX_AES_FUNCTIONS
356 * \brief Process next packet to be encrypt/decrypt
357 * \param *aes_con AES container structure
358 * \param *areq Pointer to memory location where ablkcipher_request is located
359 * \param state The state of the current packet (part of scatterlist or new packet)
360 * \return -EINVAL: error, -EINPROGRESS: Crypto still running, 1: no more scatterlist
361 */
362
363 static int process_next_packet(struct aes_container *aes_con, struct ablkcipher_request *areq,
364 int state)
365 {
366 u8 *iv;
367 int mode, dir, err = -EINVAL;
368 unsigned long queue_flag;
369 u32 inc, nbytes, remain, chunk_size;
370 struct scatterlist *src = NULL;
371 struct scatterlist *dst = NULL;
372 struct crypto_ablkcipher *cipher;
373 struct aes_ctx *ctx;
374
375 spin_lock_irqsave(&aes_queue->lock, queue_flag);
376
377 dir = aes_con->encdec;
378 mode = aes_con->mode;
379 iv = aes_con->iv;
380
381 if (state & PROCESS_SCATTER) {
382 src = scatterwalk_sg_next(areq->src);
383 dst = scatterwalk_sg_next(areq->dst);
384
385 if (!src || !dst) {
386 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
387 return 1;
388 }
389 }
390 else if (state & PROCESS_NEW_PACKET) {
391 src = areq->src;
392 dst = areq->dst;
393 }
394
395 remain = aes_con->bytes_processed;
396 chunk_size = src->length;
397
398 if (remain > DEU_MAX_PACKET_SIZE)
399 inc = DEU_MAX_PACKET_SIZE;
400 else if (remain > chunk_size)
401 inc = chunk_size;
402 else
403 inc = remain;
404
405 remain -= inc;
406 aes_con->nbytes = inc;
407
408 if (state & PROCESS_SCATTER) {
409 aes_con->src_buf += aes_con->nbytes;
410 aes_con->dst_buf += aes_con->nbytes;
411 }
412
413 lq_sg_init(aes_con, src, dst);
414
415 nbytes = aes_con->nbytes;
416
417 //printk("debug - Line: %d, func: %s, reqsize: %d, scattersize: %d\n",
418 // __LINE__, __func__, nbytes, chunk_size);
419
420 cipher = crypto_ablkcipher_reqtfm(areq);
421 ctx = crypto_ablkcipher_ctx(cipher);
422
423
424 if (aes_queue->hw_status == AES_IDLE)
425 aes_queue->hw_status = AES_STARTED;
426
427 aes_con->bytes_processed -= aes_con->nbytes;
428 err = ablkcipher_enqueue_request(&aes_queue->list, &aes_con->arequest);
429 if (err == -EBUSY) {
430 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
431 printk("Failed to enqueue request, ln: %d, err: %d\n",
432 __LINE__, err);
433 return -EINVAL;
434 }
435
436 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
437
438 err = lq_deu_aes_core(ctx, aes_con->dst_buf, aes_con->src_buf, iv, nbytes, dir, mode);
439 return err;
440
441 }
442
443 /* \fn static void process_queue (unsigned long data)
444 * \ingroup IFX_AES_FUNCTIONS
445 * \brief tasklet to signal the dequeuing of the next packet to be processed
446 * \param unsigned long data Not used
447 * \return void
448 */
449
450 static void process_queue(unsigned long data)
451 {
452
453 DEU_WAKEUP_EVENT(deu_dma_priv.deu_thread_wait, AES_ASYNC_EVENT,
454 deu_dma_priv.aes_event_flags);
455 }
456
457
458 /* \fn static int aes_crypto_thread (void *data)
459 * \ingroup IFX_AES_FUNCTIONS
460 * \brief AES thread that handles crypto requests from upper layer & DMA
461 * \param *data Not used
462 * \return -EINVAL: DEU failure, -EBUSY: DEU HW busy, 0: exit thread
463 */
464 static int aes_crypto_thread (void *data)
465 {
466 struct aes_container *aes_con = NULL;
467 struct ablkcipher_request *areq = NULL;
468 int err;
469 unsigned long queue_flag;
470
471 daemonize("lq_aes_thread");
472 printk("AES Queue Manager Starting\n");
473
474 while (1)
475 {
476 DEU_WAIT_EVENT(deu_dma_priv.deu_thread_wait, AES_ASYNC_EVENT,
477 deu_dma_priv.aes_event_flags);
478
479 spin_lock_irqsave(&aes_queue->lock, queue_flag);
480
481 /* wait to prevent starting a crypto session before
482 * exiting the dma interrupt thread.
483 */
484 if (aes_queue->hw_status == AES_STARTED) {
485 areq = ablkcipher_dequeue_request(&aes_queue->list);
486 aes_con = aes_container_cast(areq);
487 aes_queue->hw_status = AES_BUSY;
488 }
489 else if (aes_queue->hw_status == AES_IDLE) {
490 areq = ablkcipher_dequeue_request(&aes_queue->list);
491 aes_con = aes_container_cast(areq);
492 aes_queue->hw_status = AES_STARTED;
493 }
494 else if (aes_queue->hw_status == AES_BUSY) {
495 areq = ablkcipher_dequeue_request(&aes_queue->list);
496 aes_con = aes_container_cast(areq);
497 }
498 else if (aes_queue->hw_status == AES_COMPLETED) {
499 lq_sg_complete(aes_con);
500 aes_queue->hw_status = AES_IDLE;
501 areq->base.complete(&areq->base, 0);
502 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
503 return 0;
504 }
505 //printk("debug ln: %d, bytes proc: %d\n", __LINE__, aes_con->bytes_processed);
506 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
507
508 if (!aes_con) {
509 printk("AES_CON return null\n");
510 goto aes_done;
511 }
512
513 if (aes_con->bytes_processed == 0) {
514 goto aes_done;
515 }
516
517 /* Process new packet or the next packet in a scatterlist */
518 if (aes_con->flag & PROCESS_NEW_PACKET) {
519 aes_con->flag = PROCESS_SCATTER;
520 err = process_next_packet(aes_con, areq, PROCESS_NEW_PACKET);
521 }
522 else
523 err = process_next_packet(aes_con, areq, PROCESS_SCATTER);
524
525 if (err == -EINVAL) {
526 areq->base.complete(&areq->base, err);
527 lq_sg_complete(aes_con);
528 printk("src/dst returned -EINVAL in func: %s\n", __func__);
529 }
530 else if (err > 0) {
531 printk("src/dst returned zero in func: %s\n", __func__);
532 goto aes_done;
533 }
534
535 continue;
536
537 aes_done:
538 //printk("debug line - %d, func: %s, qlen: %d\n", __LINE__, __func__, aes_queue->list.qlen);
539 areq->base.complete(&areq->base, 0);
540 lq_sg_complete(aes_con);
541
542 spin_lock_irqsave(&aes_queue->lock, queue_flag);
543 if (aes_queue->list.qlen > 0) {
544 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
545 tasklet_schedule(&aes_queue->aes_task);
546 }
547 else {
548 aes_queue->hw_status = AES_IDLE;
549 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
550 }
551 } //while(1)
552
553 return 0;
554 }
555
556 /* \fn static int lq_aes_queue_mgr(struct aes_ctx *ctx, struct ablkcipher_request *areq,
557 u8 *iv, int dir, int mode)
558 * \ingroup IFX_AES_FUNCTIONS
559 * \brief starts the process of queuing DEU requests
560 * \param *ctx crypto algo contax
561 * \param *areq Pointer to the balkcipher requests
562 * \param *iv Pointer to intput vector location
563 * \param dir Encrypt/Decrypt
564 * \mode The mode AES algo is running
565 * \return 0 if success
566 */
567
568 static int lq_aes_queue_mgr(struct aes_ctx *ctx, struct ablkcipher_request *areq,
569 u8 *iv, int dir, int mode)
570 {
571 int err = -EINVAL;
572 unsigned long queue_flag;
573 struct scatterlist *src = areq->src;
574 struct scatterlist *dst = areq->dst;
575 struct aes_container *aes_con = NULL;
576 u32 remain, inc, nbytes = areq->nbytes;
577 u32 chunk_bytes = src->length;
578
579
580 aes_con = (struct aes_container *)kmalloc(sizeof(struct aes_container),
581 GFP_KERNEL);
582
583 if (!(aes_con)) {
584 printk("Cannot allocate memory for AES container, fn %s, ln %d\n",
585 __func__, __LINE__);
586 return -ENOMEM;
587 }
588
589 /* AES encrypt/decrypt mode */
590 if (mode == 5) {
591 nbytes = AES_BLOCK_SIZE;
592 chunk_bytes = AES_BLOCK_SIZE;
593 mode = 0;
594 }
595
596 aes_con->bytes_processed = nbytes;
597 aes_con->arequest = *(areq);
598 remain = nbytes;
599
600 //printk("debug - Line: %d, func: %s, reqsize: %d, scattersize: %d\n",
601 // __LINE__, __func__, nbytes, chunk_bytes);
602
603 if (remain > DEU_MAX_PACKET_SIZE)
604 inc = DEU_MAX_PACKET_SIZE;
605 else if (remain > chunk_bytes)
606 inc = chunk_bytes;
607 else
608 inc = remain;
609
610 remain -= inc;
611 lq_sg_init(aes_con, src, dst);
612
613 if (remain <= 0)
614 aes_con->complete = 1;
615 else
616 aes_con->complete = 0;
617
618 aes_con->nbytes = inc;
619 aes_con->iv = iv;
620 aes_con->mode = mode;
621 aes_con->encdec = dir;
622
623 spin_lock_irqsave(&aes_queue->lock, queue_flag);
624
625 if (aes_queue->hw_status == AES_STARTED || aes_queue->hw_status == AES_BUSY ||
626 aes_queue->list.qlen > 0) {
627
628 aes_con->flag = PROCESS_NEW_PACKET;
629 err = ablkcipher_enqueue_request(&aes_queue->list, &aes_con->arequest);
630
631 /* max queue length reached */
632 if (err == -EBUSY) {
633 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
634 printk("Unable to enqueue request ln: %d, err: %d\n", __LINE__, err);
635 return err;
636 }
637
638 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
639 return -EINPROGRESS;
640 }
641 else if (aes_queue->hw_status == AES_IDLE)
642 aes_queue->hw_status = AES_STARTED;
643
644 aes_con->flag = PROCESS_SCATTER;
645 aes_con->bytes_processed -= aes_con->nbytes;
646 /* or enqueue the whole structure so as to get back the info
647 * at the moment that it's queued. nbytes might be different */
648 err = ablkcipher_enqueue_request(&aes_queue->list, &aes_con->arequest);
649
650 if (err == -EBUSY) {
651 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
652 printk("Unable to enqueue request ln: %d, err: %d\n", __LINE__, err);
653 return err;
654 }
655
656 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
657 return lq_deu_aes_core(ctx, aes_con->dst_buf, aes_con->src_buf, iv, inc, dir, mode);
658
659 }
660
661 /* \fn static int aes_setkey(struct crypto_ablkcipher *tfm, const u8 *in_key,
662 * unsigned int keylen)
663 * \ingroup IFX_AES_FUNCTIONS
664 * \brief Sets AES key
665 * \param *tfm Pointer to the ablkcipher transform
666 * \param *in_key Pointer to input keys
667 * \param key_len Length of the AES keys
668 * \return 0 is success, -EINVAL if bad key length
669 */
670
671 static int aes_setkey(struct crypto_ablkcipher *tfm, const u8 *in_key,
672 unsigned int keylen)
673 {
674 struct aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
675 unsigned long *flags = (unsigned long *) &tfm->base.crt_flags;
676
677 DPRINTF(2, "set_key in %s\n", __FILE__);
678
679 if (keylen != 16 && keylen != 24 && keylen != 32) {
680 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
681 return -EINVAL;
682 }
683
684 ctx->key_length = keylen;
685 DPRINTF(0, "ctx @%p, keylen %d, ctx->key_length %d\n", ctx, keylen, ctx->key_length);
686 memcpy ((u8 *) (ctx->buf), in_key, keylen);
687
688 return 0;
689
690 }
691
692 /* \fn static int aes_generic_setkey(struct crypto_ablkcipher *tfm, const u8 *in_key,
693 * unsigned int keylen)
694 * \ingroup IFX_AES_FUNCTIONS
695 * \brief Sets AES key
696 * \param *tfm Pointer to the ablkcipher transform
697 * \param *key Pointer to input keys
698 * \param keylen Length of AES keys
699 * \return 0 is success, -EINVAL if bad key length
700 */
701
702 static int aes_generic_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
703 unsigned int keylen)
704 {
705 return aes_setkey(tfm, key, keylen);
706 }
707
708 /* \fn static int rfc3686_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *in_key,
709 * unsigned int keylen)
710 * \ingroup IFX_AES_FUNCTIONS
711 * \brief Sets AES key
712 * \param *tfm Pointer to the ablkcipher transform
713 * \param *in_key Pointer to input keys
714 * \param key_len Length of the AES keys
715 * \return 0 is success, -EINVAL if bad key length
716 */
717
718 static int rfc3686_aes_setkey(struct crypto_ablkcipher *tfm,
719 const u8 *in_key, unsigned int keylen)
720 {
721 struct aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
722 unsigned long *flags = (unsigned long *)&tfm->base.crt_flags;
723
724 DPRINTF(2, "ctr_rfc3686_aes_set_key in %s\n", __FILE__);
725
726 memcpy(ctx->nonce, in_key + (keylen - CTR_RFC3686_NONCE_SIZE),
727 CTR_RFC3686_NONCE_SIZE);
728
729 keylen -= CTR_RFC3686_NONCE_SIZE; // remove 4 bytes of nonce
730
731 if (keylen != 16 && keylen != 24 && keylen != 32) {
732 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
733 return -EINVAL;
734 }
735
736 ctx->key_length = keylen;
737
738 memcpy ((u8 *) (ctx->buf), in_key, keylen);
739
740 return 0;
741 }
742
743 /* \fn static int aes_encrypt(struct ablkcipher_request *areq)
744 * \ingroup IFX_AES_FUNCTIONS
745 * \brief Encrypt function for AES algo
746 * \param *areq Pointer to ablkcipher request in memory
747 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
748 */
749
750 static int aes_encrypt (struct ablkcipher_request *areq)
751 {
752 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
753 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
754
755 return lq_aes_queue_mgr(ctx, areq, NULL, CRYPTO_DIR_ENCRYPT, 5);
756
757 }
758
759 /* \fn static int aes_decrypt(struct ablkcipher_request *areq)
760 * \ingroup IFX_AES_FUNCTIONS
761 * \brief Decrypt function for AES algo
762 * \param *areq Pointer to ablkcipher request in memory
763 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
764 */
765 static int aes_decrypt (struct ablkcipher_request *areq)
766 {
767 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
768 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
769
770 return lq_aes_queue_mgr(ctx, areq, NULL, CRYPTO_DIR_DECRYPT, 5);
771 }
772
773 /* \fn static int ecb_aes_decrypt(struct ablkcipher_request *areq)
774 * \ingroup IFX_AES_FUNCTIONS
775 * \brief Encrypt function for AES algo
776 * \param *areq Pointer to ablkcipher request in memory
777 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
778 */
779
780 static int ecb_aes_encrypt (struct ablkcipher_request *areq)
781 {
782 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
783 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
784
785 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 0);
786
787 }
788 /* \fn static int ecb_aes_decrypt(struct ablkcipher_request *areq)
789 * \ingroup IFX_AES_FUNCTIONS
790 * \brief Decrypt function for AES algo
791 * \param *areq Pointer to ablkcipher request in memory
792 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
793 */
794 static int ecb_aes_decrypt(struct ablkcipher_request *areq)
795
796 {
797 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
798 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
799
800 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 0);
801 }
802
803 /* \fn static int cbc_aes_encrypt(struct ablkcipher_request *areq)
804 * \ingroup IFX_AES_FUNCTIONS
805 * \brief Encrypt function for AES algo
806 * \param *areq Pointer to ablkcipher request in memory
807 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
808 */
809
810 static int cbc_aes_encrypt (struct ablkcipher_request *areq)
811 {
812 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
813 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
814
815 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 1);
816
817 }
818
819 /* \fn static int cbc_aes_decrypt(struct ablkcipher_request *areq)
820 * \ingroup IFX_AES_FUNCTIONS
821 * \brief Decrypt function for AES algo
822 * \param *areq Pointer to ablkcipher request in memory
823 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
824 */
825
826 static int cbc_aes_decrypt(struct ablkcipher_request *areq)
827 {
828 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
829 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
830
831 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 1);
832 }
833 #if 0
834 static int ofb_aes_encrypt (struct ablkcipher_request *areq)
835 {
836 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
837 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
838
839 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 2);
840
841 }
842
843 static int ofb_aes_decrypt(struct ablkcipher_request *areq)
844 {
845 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
846 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
847
848 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 2);
849 }
850
851 static int cfb_aes_encrypt (struct ablkcipher_request *areq)
852 {
853 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
854 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
855
856 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 3);
857
858 }
859
860 static int cfb_aes_decrypt(struct ablkcipher_request *areq)
861 {
862 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
863 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
864
865 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 3);
866 }
867 #endif
868
869 /* \fn static int ctr_aes_encrypt(struct ablkcipher_request *areq)
870 * \ingroup IFX_AES_FUNCTIONS
871 * \brief Encrypt function for AES algo
872 * \param *areq Pointer to ablkcipher request in memory
873 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
874 */
875
876 static int ctr_aes_encrypt (struct ablkcipher_request *areq)
877 {
878 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
879 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
880
881 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 4);
882
883 }
884
885 /* \fn static int ctr_aes_decrypt(struct ablkcipher_request *areq)
886 * \ingroup IFX_AES_FUNCTIONS
887 * \brief Decrypt function for AES algo
888 * \param *areq Pointer to ablkcipher request in memory
889 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
890 */
891
892 static int ctr_aes_decrypt(struct ablkcipher_request *areq)
893 {
894 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
895 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
896
897 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 4);
898 }
899
900 /* \fn static int rfc3686_aes_encrypt(struct ablkcipher_request *areq)
901 * \ingroup IFX_AES_FUNCTIONS
902 * \brief Encrypt function for AES algo
903 * \param *areq Pointer to ablkcipher request in memory
904 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
905 */
906
907 static int rfc3686_aes_encrypt(struct ablkcipher_request *areq)
908 {
909 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
910 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
911 int ret;
912 u8 *info = areq->info;
913 u8 rfc3686_iv[16];
914
915 memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
916 memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
917
918 /* initialize counter portion of counter block */
919 *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
920 cpu_to_be32(1);
921
922 areq->info = rfc3686_iv;
923 ret = lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 4);
924 areq->info = info;
925 return ret;
926 }
927
928 /* \fn static int rfc3686_aes_decrypt(struct ablkcipher_request *areq)
929 * \ingroup IFX_AES_FUNCTIONS
930 * \brief Decrypt function for AES algo
931 * \param *areq Pointer to ablkcipher request in memory
932 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
933 */
934
935 static int rfc3686_aes_decrypt(struct ablkcipher_request *areq)
936 {
937 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
938 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
939 int ret;
940 u8 *info = areq->info;
941 u8 rfc3686_iv[16];
942
943 /* set up counter block */
944 memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
945 memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
946
947 /* initialize counter portion of counter block */
948 *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
949 cpu_to_be32(1);
950
951 areq->info = rfc3686_iv;
952 ret = lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 4);
953 areq->info = info;
954 return ret;
955 }
956
957 struct lq_aes_alg {
958 struct crypto_alg alg;
959 };
960
961 /* AES supported algo array */
962 static struct lq_aes_alg aes_drivers_alg[] = {
963 {
964 .alg = {
965 .cra_name = "aes",
966 .cra_driver_name = "ifxdeu-aes",
967 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
968 .cra_blocksize = AES_BLOCK_SIZE,
969 .cra_ctxsize = sizeof(struct aes_ctx),
970 .cra_type = &crypto_ablkcipher_type,
971 .cra_priority = 300,
972 .cra_module = THIS_MODULE,
973 .cra_ablkcipher = {
974 .setkey = aes_setkey,
975 .encrypt = aes_encrypt,
976 .decrypt = aes_decrypt,
977 .geniv = "eseqiv",
978 .min_keysize = AES_MIN_KEY_SIZE,
979 .max_keysize = AES_MAX_KEY_SIZE,
980 .ivsize = AES_BLOCK_SIZE,
981 }
982 }
983 },{
984 .alg = {
985 .cra_name = "ecb(aes)",
986 .cra_driver_name = "ifxdeu-ecb(aes)",
987 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
988 .cra_blocksize = AES_BLOCK_SIZE,
989 .cra_ctxsize = sizeof(struct aes_ctx),
990 .cra_type = &crypto_ablkcipher_type,
991 .cra_priority = 400,
992 .cra_module = THIS_MODULE,
993 .cra_ablkcipher = {
994 .setkey = aes_generic_setkey,
995 .encrypt = ecb_aes_encrypt,
996 .decrypt = ecb_aes_decrypt,
997 .geniv = "eseqiv",
998 .min_keysize = AES_MIN_KEY_SIZE,
999 .max_keysize = AES_MAX_KEY_SIZE,
1000 .ivsize = AES_BLOCK_SIZE,
1001 }
1002 }
1003 },{
1004 .alg = {
1005 .cra_name = "cbc(aes)",
1006 .cra_driver_name = "ifxdeu-cbc(aes)",
1007 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1008 .cra_blocksize = AES_BLOCK_SIZE,
1009 .cra_ctxsize = sizeof(struct aes_ctx),
1010 .cra_type = &crypto_ablkcipher_type,
1011 .cra_priority = 400,
1012 .cra_module = THIS_MODULE,
1013 .cra_ablkcipher = {
1014 .setkey = aes_generic_setkey,
1015 .encrypt = cbc_aes_encrypt,
1016 .decrypt = cbc_aes_decrypt,
1017 .geniv = "eseqiv",
1018 .min_keysize = AES_MIN_KEY_SIZE,
1019 .max_keysize = AES_MAX_KEY_SIZE,
1020 .ivsize = AES_BLOCK_SIZE,
1021 }
1022 }
1023 },{
1024 .alg = {
1025 .cra_name = "ctr(aes)",
1026 .cra_driver_name = "ifxdeu-ctr(aes)",
1027 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1028 .cra_blocksize = AES_BLOCK_SIZE,
1029 .cra_ctxsize = sizeof(struct aes_ctx),
1030 .cra_type = &crypto_ablkcipher_type,
1031 .cra_priority = 400,
1032 .cra_module = THIS_MODULE,
1033 .cra_ablkcipher = {
1034 .setkey = aes_generic_setkey,
1035 .encrypt = ctr_aes_encrypt,
1036 .decrypt = ctr_aes_decrypt,
1037 .geniv = "eseqiv",
1038 .min_keysize = AES_MIN_KEY_SIZE,
1039 .max_keysize = AES_MAX_KEY_SIZE,
1040 .ivsize = AES_BLOCK_SIZE,
1041 }
1042 }
1043 },{
1044 .alg = {
1045 .cra_name = "rfc3686(ctr(aes))",
1046 .cra_driver_name = "ifxdeu-rfc3686(ctr(aes))",
1047 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1048 .cra_blocksize = AES_BLOCK_SIZE,
1049 .cra_ctxsize = sizeof(struct aes_ctx),
1050 .cra_type = &crypto_ablkcipher_type,
1051 .cra_priority = 400,
1052 .cra_module = THIS_MODULE,
1053 .cra_ablkcipher = {
1054 .setkey = rfc3686_aes_setkey,
1055 .encrypt = rfc3686_aes_encrypt,
1056 .decrypt = rfc3686_aes_decrypt,
1057 .geniv = "eseqiv",
1058 .min_keysize = AES_MIN_KEY_SIZE,
1059 .max_keysize = CTR_RFC3686_MAX_KEY_SIZE,
1060 //.max_keysize = AES_MAX_KEY_SIZE,
1061 //.ivsize = CTR_RFC3686_IV_SIZE,
1062 .ivsize = AES_BLOCK_SIZE, // else cannot reg
1063 }
1064 }
1065 }
1066 };
1067
1068 /* \fn int __init lqdeu_async_aes_init (void)
1069 * \ingroup IFX_AES_FUNCTIONS
1070 * \brief Initializes the Async. AES driver
1071 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
1072 */
1073
1074 int __init lqdeu_async_aes_init (void)
1075 {
1076 int i, j, ret = -EINVAL;
1077
1078 #define IFX_DEU_DRV_VERSION "2.0.0"
1079 printk(KERN_INFO "Lantiq Technologies DEU Driver version %s\n", IFX_DEU_DRV_VERSION);
1080
1081 for (i = 0; i < ARRAY_SIZE(aes_drivers_alg); i++) {
1082 ret = crypto_register_alg(&aes_drivers_alg[i].alg);
1083 printk("driver: %s\n", aes_drivers_alg[i].alg.cra_name);
1084 if (ret)
1085 goto aes_err;
1086 }
1087
1088 aes_chip_init();
1089
1090 CRTCL_SECT_INIT;
1091
1092
1093 printk (KERN_NOTICE "Lantiq DEU AES initialized %s %s.\n",
1094 disable_multiblock ? "" : " (multiblock)", disable_deudma ? "" : " (DMA)");
1095
1096 return ret;
1097
1098 aes_err:
1099
1100 for (j = 0; j < i; j++)
1101 crypto_unregister_alg(&aes_drivers_alg[j].alg);
1102
1103 printk(KERN_ERR "Lantiq %s driver initialization failed!\n", (char *)&aes_drivers_alg[i].alg.cra_driver_name);
1104 return ret;
1105
1106 ctr_rfc3686_aes_err:
1107 for (i = 0; i < ARRAY_SIZE(aes_drivers_alg); i++) {
1108 if (!strcmp((char *)&aes_drivers_alg[i].alg.cra_name, "rfc3686(ctr(aes))"))
1109 crypto_unregister_alg(&aes_drivers_alg[j].alg);
1110 }
1111 printk (KERN_ERR "Lantiq ctr_rfc3686_aes initialization failed!\n");
1112 return ret;
1113 }
1114
1115 /*! \fn void __exit ifxdeu_fini_aes (void)
1116 * \ingroup IFX_AES_FUNCTIONS
1117 * \brief unregister aes driver
1118 */
1119 void __exit lqdeu_fini_async_aes (void)
1120 {
1121 int i;
1122
1123 for (i = 0; i < ARRAY_SIZE(aes_drivers_alg); i++)
1124 crypto_unregister_alg(&aes_drivers_alg[i].alg);
1125
1126 aes_queue->hw_status = AES_COMPLETED;
1127
1128 DEU_WAKEUP_EVENT(deu_dma_priv.deu_thread_wait, AES_ASYNC_EVENT,
1129 deu_dma_priv.aes_event_flags);
1130
1131 kfree(aes_queue);
1132
1133 }