Merge branch 'overlayfs-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszer...
[cascardo/linux.git] / drivers / crypto / atmel-aes.c
1 /*
2  * Cryptographic API.
3  *
4  * Support for ATMEL AES HW acceleration.
5  *
6  * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7  * Author: Nicolas Royer <nicolas@eukrea.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as published
11  * by the Free Software Foundation.
12  *
13  * Some ideas are from omap-aes.c driver.
14  */
15
16
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
22 #include <linux/io.h>
23 #include <linux/hw_random.h>
24 #include <linux/platform_device.h>
25
26 #include <linux/device.h>
27 #include <linux/init.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/scatterlist.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/of_device.h>
34 #include <linux/delay.h>
35 #include <linux/crypto.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/algapi.h>
38 #include <crypto/aes.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/platform_data/crypto-atmel.h>
41 #include <dt-bindings/dma/at91.h>
42 #include "atmel-aes-regs.h"
43
44 #define ATMEL_AES_PRIORITY      300
45
46 #define ATMEL_AES_BUFFER_ORDER  2
47 #define ATMEL_AES_BUFFER_SIZE   (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
48
49 #define CFB8_BLOCK_SIZE         1
50 #define CFB16_BLOCK_SIZE        2
51 #define CFB32_BLOCK_SIZE        4
52 #define CFB64_BLOCK_SIZE        8
53
54 #define SIZE_IN_WORDS(x)        ((x) >> 2)
55
56 /* AES flags */
57 /* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */
58 #define AES_FLAGS_ENCRYPT       AES_MR_CYPHER_ENC
59 #define AES_FLAGS_GTAGEN        AES_MR_GTAGEN
60 #define AES_FLAGS_OPMODE_MASK   (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
61 #define AES_FLAGS_ECB           AES_MR_OPMOD_ECB
62 #define AES_FLAGS_CBC           AES_MR_OPMOD_CBC
63 #define AES_FLAGS_OFB           AES_MR_OPMOD_OFB
64 #define AES_FLAGS_CFB128        (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
65 #define AES_FLAGS_CFB64         (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
66 #define AES_FLAGS_CFB32         (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
67 #define AES_FLAGS_CFB16         (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
68 #define AES_FLAGS_CFB8          (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
69 #define AES_FLAGS_CTR           AES_MR_OPMOD_CTR
70 #define AES_FLAGS_GCM           AES_MR_OPMOD_GCM
71
72 #define AES_FLAGS_MODE_MASK     (AES_FLAGS_OPMODE_MASK |        \
73                                  AES_FLAGS_ENCRYPT |            \
74                                  AES_FLAGS_GTAGEN)
75
76 #define AES_FLAGS_INIT          BIT(2)
77 #define AES_FLAGS_BUSY          BIT(3)
78 #define AES_FLAGS_DUMP_REG      BIT(4)
79
80 #define AES_FLAGS_PERSISTENT    (AES_FLAGS_INIT | AES_FLAGS_BUSY)
81
82 #define ATMEL_AES_QUEUE_LENGTH  50
83
84 #define ATMEL_AES_DMA_THRESHOLD         256
85
86
87 struct atmel_aes_caps {
88         bool                    has_dualbuff;
89         bool                    has_cfb64;
90         bool                    has_ctr32;
91         bool                    has_gcm;
92         u32                     max_burst_size;
93 };
94
95 struct atmel_aes_dev;
96
97
98 typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
99
100
101 struct atmel_aes_base_ctx {
102         struct atmel_aes_dev    *dd;
103         atmel_aes_fn_t          start;
104         int                     keylen;
105         u32                     key[AES_KEYSIZE_256 / sizeof(u32)];
106         u16                     block_size;
107 };
108
109 struct atmel_aes_ctx {
110         struct atmel_aes_base_ctx       base;
111 };
112
113 struct atmel_aes_ctr_ctx {
114         struct atmel_aes_base_ctx       base;
115
116         u32                     iv[AES_BLOCK_SIZE / sizeof(u32)];
117         size_t                  offset;
118         struct scatterlist      src[2];
119         struct scatterlist      dst[2];
120 };
121
122 struct atmel_aes_gcm_ctx {
123         struct atmel_aes_base_ctx       base;
124
125         struct scatterlist      src[2];
126         struct scatterlist      dst[2];
127
128         u32                     j0[AES_BLOCK_SIZE / sizeof(u32)];
129         u32                     tag[AES_BLOCK_SIZE / sizeof(u32)];
130         u32                     ghash[AES_BLOCK_SIZE / sizeof(u32)];
131         size_t                  textlen;
132
133         const u32               *ghash_in;
134         u32                     *ghash_out;
135         atmel_aes_fn_t          ghash_resume;
136 };
137
138 struct atmel_aes_reqctx {
139         unsigned long           mode;
140 };
141
142 struct atmel_aes_dma {
143         struct dma_chan         *chan;
144         struct scatterlist      *sg;
145         int                     nents;
146         unsigned int            remainder;
147         unsigned int            sg_len;
148 };
149
150 struct atmel_aes_dev {
151         struct list_head        list;
152         unsigned long           phys_base;
153         void __iomem            *io_base;
154
155         struct crypto_async_request     *areq;
156         struct atmel_aes_base_ctx       *ctx;
157
158         bool                    is_async;
159         atmel_aes_fn_t          resume;
160         atmel_aes_fn_t          cpu_transfer_complete;
161
162         struct device           *dev;
163         struct clk              *iclk;
164         int                     irq;
165
166         unsigned long           flags;
167
168         spinlock_t              lock;
169         struct crypto_queue     queue;
170
171         struct tasklet_struct   done_task;
172         struct tasklet_struct   queue_task;
173
174         size_t                  total;
175         size_t                  datalen;
176         u32                     *data;
177
178         struct atmel_aes_dma    src;
179         struct atmel_aes_dma    dst;
180
181         size_t                  buflen;
182         void                    *buf;
183         struct scatterlist      aligned_sg;
184         struct scatterlist      *real_dst;
185
186         struct atmel_aes_caps   caps;
187
188         u32                     hw_version;
189 };
190
191 struct atmel_aes_drv {
192         struct list_head        dev_list;
193         spinlock_t              lock;
194 };
195
196 static struct atmel_aes_drv atmel_aes = {
197         .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
198         .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
199 };
200
201 #ifdef VERBOSE_DEBUG
202 static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
203 {
204         switch (offset) {
205         case AES_CR:
206                 return "CR";
207
208         case AES_MR:
209                 return "MR";
210
211         case AES_ISR:
212                 return "ISR";
213
214         case AES_IMR:
215                 return "IMR";
216
217         case AES_IER:
218                 return "IER";
219
220         case AES_IDR:
221                 return "IDR";
222
223         case AES_KEYWR(0):
224         case AES_KEYWR(1):
225         case AES_KEYWR(2):
226         case AES_KEYWR(3):
227         case AES_KEYWR(4):
228         case AES_KEYWR(5):
229         case AES_KEYWR(6):
230         case AES_KEYWR(7):
231                 snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2);
232                 break;
233
234         case AES_IDATAR(0):
235         case AES_IDATAR(1):
236         case AES_IDATAR(2):
237         case AES_IDATAR(3):
238                 snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2);
239                 break;
240
241         case AES_ODATAR(0):
242         case AES_ODATAR(1):
243         case AES_ODATAR(2):
244         case AES_ODATAR(3):
245                 snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2);
246                 break;
247
248         case AES_IVR(0):
249         case AES_IVR(1):
250         case AES_IVR(2):
251         case AES_IVR(3):
252                 snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2);
253                 break;
254
255         case AES_AADLENR:
256                 return "AADLENR";
257
258         case AES_CLENR:
259                 return "CLENR";
260
261         case AES_GHASHR(0):
262         case AES_GHASHR(1):
263         case AES_GHASHR(2):
264         case AES_GHASHR(3):
265                 snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2);
266                 break;
267
268         case AES_TAGR(0):
269         case AES_TAGR(1):
270         case AES_TAGR(2):
271         case AES_TAGR(3):
272                 snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2);
273                 break;
274
275         case AES_CTRR:
276                 return "CTRR";
277
278         case AES_GCMHR(0):
279         case AES_GCMHR(1):
280         case AES_GCMHR(2):
281         case AES_GCMHR(3):
282                 snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
283
284         default:
285                 snprintf(tmp, sz, "0x%02x", offset);
286                 break;
287         }
288
289         return tmp;
290 }
291 #endif /* VERBOSE_DEBUG */
292
293 /* Shared functions */
294
295 static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
296 {
297         u32 value = readl_relaxed(dd->io_base + offset);
298
299 #ifdef VERBOSE_DEBUG
300         if (dd->flags & AES_FLAGS_DUMP_REG) {
301                 char tmp[16];
302
303                 dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
304                          atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
305         }
306 #endif /* VERBOSE_DEBUG */
307
308         return value;
309 }
310
311 static inline void atmel_aes_write(struct atmel_aes_dev *dd,
312                                         u32 offset, u32 value)
313 {
314 #ifdef VERBOSE_DEBUG
315         if (dd->flags & AES_FLAGS_DUMP_REG) {
316                 char tmp[16];
317
318                 dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
319                          atmel_aes_reg_name(offset, tmp));
320         }
321 #endif /* VERBOSE_DEBUG */
322
323         writel_relaxed(value, dd->io_base + offset);
324 }
325
326 static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
327                                         u32 *value, int count)
328 {
329         for (; count--; value++, offset += 4)
330                 *value = atmel_aes_read(dd, offset);
331 }
332
333 static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
334                               const u32 *value, int count)
335 {
336         for (; count--; value++, offset += 4)
337                 atmel_aes_write(dd, offset, *value);
338 }
339
340 static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
341                                         u32 *value)
342 {
343         atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
344 }
345
346 static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
347                                          const u32 *value)
348 {
349         atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
350 }
351
352 static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd,
353                                                 atmel_aes_fn_t resume)
354 {
355         u32 isr = atmel_aes_read(dd, AES_ISR);
356
357         if (unlikely(isr & AES_INT_DATARDY))
358                 return resume(dd);
359
360         dd->resume = resume;
361         atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
362         return -EINPROGRESS;
363 }
364
365 static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
366 {
367         len &= block_size - 1;
368         return len ? block_size - len : 0;
369 }
370
371 static inline struct aead_request *
372 aead_request_cast(struct crypto_async_request *req)
373 {
374         return container_of(req, struct aead_request, base);
375 }
376
377 static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
378 {
379         struct atmel_aes_dev *aes_dd = NULL;
380         struct atmel_aes_dev *tmp;
381
382         spin_lock_bh(&atmel_aes.lock);
383         if (!ctx->dd) {
384                 list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
385                         aes_dd = tmp;
386                         break;
387                 }
388                 ctx->dd = aes_dd;
389         } else {
390                 aes_dd = ctx->dd;
391         }
392
393         spin_unlock_bh(&atmel_aes.lock);
394
395         return aes_dd;
396 }
397
398 static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
399 {
400         int err;
401
402         err = clk_prepare_enable(dd->iclk);
403         if (err)
404                 return err;
405
406         if (!(dd->flags & AES_FLAGS_INIT)) {
407                 atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
408                 atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
409                 dd->flags |= AES_FLAGS_INIT;
410         }
411
412         return 0;
413 }
414
415 static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
416 {
417         return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
418 }
419
420 static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
421 {
422         int err;
423
424         err = atmel_aes_hw_init(dd);
425         if (err)
426                 return err;
427
428         dd->hw_version = atmel_aes_get_version(dd);
429
430         dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
431
432         clk_disable_unprepare(dd->iclk);
433         return 0;
434 }
435
436 static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd,
437                                       const struct atmel_aes_reqctx *rctx)
438 {
439         /* Clear all but persistent flags and set request flags. */
440         dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
441 }
442
443 static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
444 {
445         return (dd->flags & AES_FLAGS_ENCRYPT);
446 }
447
448 static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
449 {
450         clk_disable_unprepare(dd->iclk);
451         dd->flags &= ~AES_FLAGS_BUSY;
452
453         if (dd->is_async)
454                 dd->areq->complete(dd->areq, err);
455
456         tasklet_schedule(&dd->queue_task);
457
458         return err;
459 }
460
461 static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
462                                  const u32 *iv)
463 {
464         u32 valmr = 0;
465
466         /* MR register must be set before IV registers */
467         if (dd->ctx->keylen == AES_KEYSIZE_128)
468                 valmr |= AES_MR_KEYSIZE_128;
469         else if (dd->ctx->keylen == AES_KEYSIZE_192)
470                 valmr |= AES_MR_KEYSIZE_192;
471         else
472                 valmr |= AES_MR_KEYSIZE_256;
473
474         valmr |= dd->flags & AES_FLAGS_MODE_MASK;
475
476         if (use_dma) {
477                 valmr |= AES_MR_SMOD_IDATAR0;
478                 if (dd->caps.has_dualbuff)
479                         valmr |= AES_MR_DUALBUFF;
480         } else {
481                 valmr |= AES_MR_SMOD_AUTO;
482         }
483
484         atmel_aes_write(dd, AES_MR, valmr);
485
486         atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
487                           SIZE_IN_WORDS(dd->ctx->keylen));
488
489         if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
490                 atmel_aes_write_block(dd, AES_IVR(0), iv);
491 }
492
493
494 /* CPU transfer */
495
496 static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd)
497 {
498         int err = 0;
499         u32 isr;
500
501         for (;;) {
502                 atmel_aes_read_block(dd, AES_ODATAR(0), dd->data);
503                 dd->data += 4;
504                 dd->datalen -= AES_BLOCK_SIZE;
505
506                 if (dd->datalen < AES_BLOCK_SIZE)
507                         break;
508
509                 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
510
511                 isr = atmel_aes_read(dd, AES_ISR);
512                 if (!(isr & AES_INT_DATARDY)) {
513                         dd->resume = atmel_aes_cpu_transfer;
514                         atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
515                         return -EINPROGRESS;
516                 }
517         }
518
519         if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
520                                  dd->buf, dd->total))
521                 err = -EINVAL;
522
523         if (err)
524                 return atmel_aes_complete(dd, err);
525
526         return dd->cpu_transfer_complete(dd);
527 }
528
529 static int atmel_aes_cpu_start(struct atmel_aes_dev *dd,
530                                struct scatterlist *src,
531                                struct scatterlist *dst,
532                                size_t len,
533                                atmel_aes_fn_t resume)
534 {
535         size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE);
536
537         if (unlikely(len == 0))
538                 return -EINVAL;
539
540         sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
541
542         dd->total = len;
543         dd->real_dst = dst;
544         dd->cpu_transfer_complete = resume;
545         dd->datalen = len + padlen;
546         dd->data = (u32 *)dd->buf;
547         atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
548         return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer);
549 }
550
551
552 /* DMA transfer */
553
554 static void atmel_aes_dma_callback(void *data);
555
556 static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd,
557                                     struct scatterlist *sg,
558                                     size_t len,
559                                     struct atmel_aes_dma *dma)
560 {
561         int nents;
562
563         if (!IS_ALIGNED(len, dd->ctx->block_size))
564                 return false;
565
566         for (nents = 0; sg; sg = sg_next(sg), ++nents) {
567                 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
568                         return false;
569
570                 if (len <= sg->length) {
571                         if (!IS_ALIGNED(len, dd->ctx->block_size))
572                                 return false;
573
574                         dma->nents = nents+1;
575                         dma->remainder = sg->length - len;
576                         sg->length = len;
577                         return true;
578                 }
579
580                 if (!IS_ALIGNED(sg->length, dd->ctx->block_size))
581                         return false;
582
583                 len -= sg->length;
584         }
585
586         return false;
587 }
588
589 static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma)
590 {
591         struct scatterlist *sg = dma->sg;
592         int nents = dma->nents;
593
594         if (!dma->remainder)
595                 return;
596
597         while (--nents > 0 && sg)
598                 sg = sg_next(sg);
599
600         if (!sg)
601                 return;
602
603         sg->length += dma->remainder;
604 }
605
606 static int atmel_aes_map(struct atmel_aes_dev *dd,
607                          struct scatterlist *src,
608                          struct scatterlist *dst,
609                          size_t len)
610 {
611         bool src_aligned, dst_aligned;
612         size_t padlen;
613
614         dd->total = len;
615         dd->src.sg = src;
616         dd->dst.sg = dst;
617         dd->real_dst = dst;
618
619         src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src);
620         if (src == dst)
621                 dst_aligned = src_aligned;
622         else
623                 dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst);
624         if (!src_aligned || !dst_aligned) {
625                 padlen = atmel_aes_padlen(len, dd->ctx->block_size);
626
627                 if (dd->buflen < len + padlen)
628                         return -ENOMEM;
629
630                 if (!src_aligned) {
631                         sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
632                         dd->src.sg = &dd->aligned_sg;
633                         dd->src.nents = 1;
634                         dd->src.remainder = 0;
635                 }
636
637                 if (!dst_aligned) {
638                         dd->dst.sg = &dd->aligned_sg;
639                         dd->dst.nents = 1;
640                         dd->dst.remainder = 0;
641                 }
642
643                 sg_init_table(&dd->aligned_sg, 1);
644                 sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen);
645         }
646
647         if (dd->src.sg == dd->dst.sg) {
648                 dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
649                                             DMA_BIDIRECTIONAL);
650                 dd->dst.sg_len = dd->src.sg_len;
651                 if (!dd->src.sg_len)
652                         return -EFAULT;
653         } else {
654                 dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
655                                             DMA_TO_DEVICE);
656                 if (!dd->src.sg_len)
657                         return -EFAULT;
658
659                 dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents,
660                                             DMA_FROM_DEVICE);
661                 if (!dd->dst.sg_len) {
662                         dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
663                                      DMA_TO_DEVICE);
664                         return -EFAULT;
665                 }
666         }
667
668         return 0;
669 }
670
671 static void atmel_aes_unmap(struct atmel_aes_dev *dd)
672 {
673         if (dd->src.sg == dd->dst.sg) {
674                 dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
675                              DMA_BIDIRECTIONAL);
676
677                 if (dd->src.sg != &dd->aligned_sg)
678                         atmel_aes_restore_sg(&dd->src);
679         } else {
680                 dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents,
681                              DMA_FROM_DEVICE);
682
683                 if (dd->dst.sg != &dd->aligned_sg)
684                         atmel_aes_restore_sg(&dd->dst);
685
686                 dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
687                              DMA_TO_DEVICE);
688
689                 if (dd->src.sg != &dd->aligned_sg)
690                         atmel_aes_restore_sg(&dd->src);
691         }
692
693         if (dd->dst.sg == &dd->aligned_sg)
694                 sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
695                                     dd->buf, dd->total);
696 }
697
698 static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
699                                         enum dma_slave_buswidth addr_width,
700                                         enum dma_transfer_direction dir,
701                                         u32 maxburst)
702 {
703         struct dma_async_tx_descriptor *desc;
704         struct dma_slave_config config;
705         dma_async_tx_callback callback;
706         struct atmel_aes_dma *dma;
707         int err;
708
709         memset(&config, 0, sizeof(config));
710         config.direction = dir;
711         config.src_addr_width = addr_width;
712         config.dst_addr_width = addr_width;
713         config.src_maxburst = maxburst;
714         config.dst_maxburst = maxburst;
715
716         switch (dir) {
717         case DMA_MEM_TO_DEV:
718                 dma = &dd->src;
719                 callback = NULL;
720                 config.dst_addr = dd->phys_base + AES_IDATAR(0);
721                 break;
722
723         case DMA_DEV_TO_MEM:
724                 dma = &dd->dst;
725                 callback = atmel_aes_dma_callback;
726                 config.src_addr = dd->phys_base + AES_ODATAR(0);
727                 break;
728
729         default:
730                 return -EINVAL;
731         }
732
733         err = dmaengine_slave_config(dma->chan, &config);
734         if (err)
735                 return err;
736
737         desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir,
738                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
739         if (!desc)
740                 return -ENOMEM;
741
742         desc->callback = callback;
743         desc->callback_param = dd;
744         dmaengine_submit(desc);
745         dma_async_issue_pending(dma->chan);
746
747         return 0;
748 }
749
750 static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd,
751                                         enum dma_transfer_direction dir)
752 {
753         struct atmel_aes_dma *dma;
754
755         switch (dir) {
756         case DMA_MEM_TO_DEV:
757                 dma = &dd->src;
758                 break;
759
760         case DMA_DEV_TO_MEM:
761                 dma = &dd->dst;
762                 break;
763
764         default:
765                 return;
766         }
767
768         dmaengine_terminate_all(dma->chan);
769 }
770
771 static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
772                                struct scatterlist *src,
773                                struct scatterlist *dst,
774                                size_t len,
775                                atmel_aes_fn_t resume)
776 {
777         enum dma_slave_buswidth addr_width;
778         u32 maxburst;
779         int err;
780
781         switch (dd->ctx->block_size) {
782         case CFB8_BLOCK_SIZE:
783                 addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
784                 maxburst = 1;
785                 break;
786
787         case CFB16_BLOCK_SIZE:
788                 addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
789                 maxburst = 1;
790                 break;
791
792         case CFB32_BLOCK_SIZE:
793         case CFB64_BLOCK_SIZE:
794                 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
795                 maxburst = 1;
796                 break;
797
798         case AES_BLOCK_SIZE:
799                 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
800                 maxburst = dd->caps.max_burst_size;
801                 break;
802
803         default:
804                 err = -EINVAL;
805                 goto exit;
806         }
807
808         err = atmel_aes_map(dd, src, dst, len);
809         if (err)
810                 goto exit;
811
812         dd->resume = resume;
813
814         /* Set output DMA transfer first */
815         err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM,
816                                            maxburst);
817         if (err)
818                 goto unmap;
819
820         /* Then set input DMA transfer */
821         err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV,
822                                            maxburst);
823         if (err)
824                 goto output_transfer_stop;
825
826         return -EINPROGRESS;
827
828 output_transfer_stop:
829         atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
830 unmap:
831         atmel_aes_unmap(dd);
832 exit:
833         return atmel_aes_complete(dd, err);
834 }
835
836 static void atmel_aes_dma_stop(struct atmel_aes_dev *dd)
837 {
838         atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV);
839         atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
840         atmel_aes_unmap(dd);
841 }
842
843 static void atmel_aes_dma_callback(void *data)
844 {
845         struct atmel_aes_dev *dd = data;
846
847         atmel_aes_dma_stop(dd);
848         dd->is_async = true;
849         (void)dd->resume(dd);
850 }
851
852 static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
853                                   struct crypto_async_request *new_areq)
854 {
855         struct crypto_async_request *areq, *backlog;
856         struct atmel_aes_base_ctx *ctx;
857         unsigned long flags;
858         int err, ret = 0;
859
860         spin_lock_irqsave(&dd->lock, flags);
861         if (new_areq)
862                 ret = crypto_enqueue_request(&dd->queue, new_areq);
863         if (dd->flags & AES_FLAGS_BUSY) {
864                 spin_unlock_irqrestore(&dd->lock, flags);
865                 return ret;
866         }
867         backlog = crypto_get_backlog(&dd->queue);
868         areq = crypto_dequeue_request(&dd->queue);
869         if (areq)
870                 dd->flags |= AES_FLAGS_BUSY;
871         spin_unlock_irqrestore(&dd->lock, flags);
872
873         if (!areq)
874                 return ret;
875
876         if (backlog)
877                 backlog->complete(backlog, -EINPROGRESS);
878
879         ctx = crypto_tfm_ctx(areq->tfm);
880
881         dd->areq = areq;
882         dd->ctx = ctx;
883         dd->is_async = (areq != new_areq);
884
885         err = ctx->start(dd);
886         return (dd->is_async) ? ret : err;
887 }
888
889
890 /* AES async block ciphers */
891
892 static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
893 {
894         return atmel_aes_complete(dd, 0);
895 }
896
897 static int atmel_aes_start(struct atmel_aes_dev *dd)
898 {
899         struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
900         struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
901         bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD ||
902                         dd->ctx->block_size != AES_BLOCK_SIZE);
903         int err;
904
905         atmel_aes_set_mode(dd, rctx);
906
907         err = atmel_aes_hw_init(dd);
908         if (err)
909                 return atmel_aes_complete(dd, err);
910
911         atmel_aes_write_ctrl(dd, use_dma, req->info);
912         if (use_dma)
913                 return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
914                                            atmel_aes_transfer_complete);
915
916         return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
917                                    atmel_aes_transfer_complete);
918 }
919
920 static inline struct atmel_aes_ctr_ctx *
921 atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
922 {
923         return container_of(ctx, struct atmel_aes_ctr_ctx, base);
924 }
925
926 static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
927 {
928         struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
929         struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
930         struct scatterlist *src, *dst;
931         u32 ctr, blocks;
932         size_t datalen;
933         bool use_dma, fragmented = false;
934
935         /* Check for transfer completion. */
936         ctx->offset += dd->total;
937         if (ctx->offset >= req->nbytes)
938                 return atmel_aes_transfer_complete(dd);
939
940         /* Compute data length. */
941         datalen = req->nbytes - ctx->offset;
942         blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
943         ctr = be32_to_cpu(ctx->iv[3]);
944         if (dd->caps.has_ctr32) {
945                 /* Check 32bit counter overflow. */
946                 u32 start = ctr;
947                 u32 end = start + blocks - 1;
948
949                 if (end < start) {
950                         ctr |= 0xffffffff;
951                         datalen = AES_BLOCK_SIZE * -start;
952                         fragmented = true;
953                 }
954         } else {
955                 /* Check 16bit counter overflow. */
956                 u16 start = ctr & 0xffff;
957                 u16 end = start + (u16)blocks - 1;
958
959                 if (blocks >> 16 || end < start) {
960                         ctr |= 0xffff;
961                         datalen = AES_BLOCK_SIZE * (0x10000-start);
962                         fragmented = true;
963                 }
964         }
965         use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
966
967         /* Jump to offset. */
968         src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset);
969         dst = ((req->src == req->dst) ? src :
970                scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset));
971
972         /* Configure hardware. */
973         atmel_aes_write_ctrl(dd, use_dma, ctx->iv);
974         if (unlikely(fragmented)) {
975                 /*
976                  * Increment the counter manually to cope with the hardware
977                  * counter overflow.
978                  */
979                 ctx->iv[3] = cpu_to_be32(ctr);
980                 crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
981         }
982
983         if (use_dma)
984                 return atmel_aes_dma_start(dd, src, dst, datalen,
985                                            atmel_aes_ctr_transfer);
986
987         return atmel_aes_cpu_start(dd, src, dst, datalen,
988                                    atmel_aes_ctr_transfer);
989 }
990
991 static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
992 {
993         struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
994         struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
995         struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
996         int err;
997
998         atmel_aes_set_mode(dd, rctx);
999
1000         err = atmel_aes_hw_init(dd);
1001         if (err)
1002                 return atmel_aes_complete(dd, err);
1003
1004         memcpy(ctx->iv, req->info, AES_BLOCK_SIZE);
1005         ctx->offset = 0;
1006         dd->total = 0;
1007         return atmel_aes_ctr_transfer(dd);
1008 }
1009
1010 static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
1011 {
1012         struct atmel_aes_base_ctx *ctx;
1013         struct atmel_aes_reqctx *rctx;
1014         struct atmel_aes_dev *dd;
1015
1016         ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
1017         switch (mode & AES_FLAGS_OPMODE_MASK) {
1018         case AES_FLAGS_CFB8:
1019                 ctx->block_size = CFB8_BLOCK_SIZE;
1020                 break;
1021
1022         case AES_FLAGS_CFB16:
1023                 ctx->block_size = CFB16_BLOCK_SIZE;
1024                 break;
1025
1026         case AES_FLAGS_CFB32:
1027                 ctx->block_size = CFB32_BLOCK_SIZE;
1028                 break;
1029
1030         case AES_FLAGS_CFB64:
1031                 ctx->block_size = CFB64_BLOCK_SIZE;
1032                 break;
1033
1034         default:
1035                 ctx->block_size = AES_BLOCK_SIZE;
1036                 break;
1037         }
1038
1039         dd = atmel_aes_find_dev(ctx);
1040         if (!dd)
1041                 return -ENODEV;
1042
1043         rctx = ablkcipher_request_ctx(req);
1044         rctx->mode = mode;
1045
1046         return atmel_aes_handle_queue(dd, &req->base);
1047 }
1048
1049 static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1050                            unsigned int keylen)
1051 {
1052         struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
1053
1054         if (keylen != AES_KEYSIZE_128 &&
1055             keylen != AES_KEYSIZE_192 &&
1056             keylen != AES_KEYSIZE_256) {
1057                 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1058                 return -EINVAL;
1059         }
1060
1061         memcpy(ctx->key, key, keylen);
1062         ctx->keylen = keylen;
1063
1064         return 0;
1065 }
1066
1067 static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
1068 {
1069         return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
1070 }
1071
1072 static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
1073 {
1074         return atmel_aes_crypt(req, AES_FLAGS_ECB);
1075 }
1076
1077 static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
1078 {
1079         return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
1080 }
1081
1082 static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
1083 {
1084         return atmel_aes_crypt(req, AES_FLAGS_CBC);
1085 }
1086
1087 static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
1088 {
1089         return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
1090 }
1091
1092 static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
1093 {
1094         return atmel_aes_crypt(req, AES_FLAGS_OFB);
1095 }
1096
1097 static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
1098 {
1099         return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
1100 }
1101
1102 static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
1103 {
1104         return atmel_aes_crypt(req, AES_FLAGS_CFB128);
1105 }
1106
1107 static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
1108 {
1109         return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
1110 }
1111
1112 static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
1113 {
1114         return atmel_aes_crypt(req, AES_FLAGS_CFB64);
1115 }
1116
1117 static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
1118 {
1119         return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
1120 }
1121
1122 static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
1123 {
1124         return atmel_aes_crypt(req, AES_FLAGS_CFB32);
1125 }
1126
1127 static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
1128 {
1129         return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
1130 }
1131
1132 static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
1133 {
1134         return atmel_aes_crypt(req, AES_FLAGS_CFB16);
1135 }
1136
1137 static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
1138 {
1139         return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
1140 }
1141
1142 static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
1143 {
1144         return atmel_aes_crypt(req, AES_FLAGS_CFB8);
1145 }
1146
1147 static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
1148 {
1149         return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
1150 }
1151
1152 static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
1153 {
1154         return atmel_aes_crypt(req, AES_FLAGS_CTR);
1155 }
1156
1157 static int atmel_aes_cra_init(struct crypto_tfm *tfm)
1158 {
1159         struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
1160
1161         tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
1162         ctx->base.start = atmel_aes_start;
1163
1164         return 0;
1165 }
1166
1167 static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm)
1168 {
1169         struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
1170
1171         tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
1172         ctx->base.start = atmel_aes_ctr_start;
1173
1174         return 0;
1175 }
1176
1177 static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
1178 {
1179 }
1180
1181 static struct crypto_alg aes_algs[] = {
1182 {
1183         .cra_name               = "ecb(aes)",
1184         .cra_driver_name        = "atmel-ecb-aes",
1185         .cra_priority           = ATMEL_AES_PRIORITY,
1186         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1187         .cra_blocksize          = AES_BLOCK_SIZE,
1188         .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1189         .cra_alignmask          = 0xf,
1190         .cra_type               = &crypto_ablkcipher_type,
1191         .cra_module             = THIS_MODULE,
1192         .cra_init               = atmel_aes_cra_init,
1193         .cra_exit               = atmel_aes_cra_exit,
1194         .cra_u.ablkcipher = {
1195                 .min_keysize    = AES_MIN_KEY_SIZE,
1196                 .max_keysize    = AES_MAX_KEY_SIZE,
1197                 .setkey         = atmel_aes_setkey,
1198                 .encrypt        = atmel_aes_ecb_encrypt,
1199                 .decrypt        = atmel_aes_ecb_decrypt,
1200         }
1201 },
1202 {
1203         .cra_name               = "cbc(aes)",
1204         .cra_driver_name        = "atmel-cbc-aes",
1205         .cra_priority           = ATMEL_AES_PRIORITY,
1206         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1207         .cra_blocksize          = AES_BLOCK_SIZE,
1208         .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1209         .cra_alignmask          = 0xf,
1210         .cra_type               = &crypto_ablkcipher_type,
1211         .cra_module             = THIS_MODULE,
1212         .cra_init               = atmel_aes_cra_init,
1213         .cra_exit               = atmel_aes_cra_exit,
1214         .cra_u.ablkcipher = {
1215                 .min_keysize    = AES_MIN_KEY_SIZE,
1216                 .max_keysize    = AES_MAX_KEY_SIZE,
1217                 .ivsize         = AES_BLOCK_SIZE,
1218                 .setkey         = atmel_aes_setkey,
1219                 .encrypt        = atmel_aes_cbc_encrypt,
1220                 .decrypt        = atmel_aes_cbc_decrypt,
1221         }
1222 },
1223 {
1224         .cra_name               = "ofb(aes)",
1225         .cra_driver_name        = "atmel-ofb-aes",
1226         .cra_priority           = ATMEL_AES_PRIORITY,
1227         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1228         .cra_blocksize          = AES_BLOCK_SIZE,
1229         .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1230         .cra_alignmask          = 0xf,
1231         .cra_type               = &crypto_ablkcipher_type,
1232         .cra_module             = THIS_MODULE,
1233         .cra_init               = atmel_aes_cra_init,
1234         .cra_exit               = atmel_aes_cra_exit,
1235         .cra_u.ablkcipher = {
1236                 .min_keysize    = AES_MIN_KEY_SIZE,
1237                 .max_keysize    = AES_MAX_KEY_SIZE,
1238                 .ivsize         = AES_BLOCK_SIZE,
1239                 .setkey         = atmel_aes_setkey,
1240                 .encrypt        = atmel_aes_ofb_encrypt,
1241                 .decrypt        = atmel_aes_ofb_decrypt,
1242         }
1243 },
1244 {
1245         .cra_name               = "cfb(aes)",
1246         .cra_driver_name        = "atmel-cfb-aes",
1247         .cra_priority           = ATMEL_AES_PRIORITY,
1248         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1249         .cra_blocksize          = AES_BLOCK_SIZE,
1250         .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1251         .cra_alignmask          = 0xf,
1252         .cra_type               = &crypto_ablkcipher_type,
1253         .cra_module             = THIS_MODULE,
1254         .cra_init               = atmel_aes_cra_init,
1255         .cra_exit               = atmel_aes_cra_exit,
1256         .cra_u.ablkcipher = {
1257                 .min_keysize    = AES_MIN_KEY_SIZE,
1258                 .max_keysize    = AES_MAX_KEY_SIZE,
1259                 .ivsize         = AES_BLOCK_SIZE,
1260                 .setkey         = atmel_aes_setkey,
1261                 .encrypt        = atmel_aes_cfb_encrypt,
1262                 .decrypt        = atmel_aes_cfb_decrypt,
1263         }
1264 },
1265 {
1266         .cra_name               = "cfb32(aes)",
1267         .cra_driver_name        = "atmel-cfb32-aes",
1268         .cra_priority           = ATMEL_AES_PRIORITY,
1269         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1270         .cra_blocksize          = CFB32_BLOCK_SIZE,
1271         .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1272         .cra_alignmask          = 0x3,
1273         .cra_type               = &crypto_ablkcipher_type,
1274         .cra_module             = THIS_MODULE,
1275         .cra_init               = atmel_aes_cra_init,
1276         .cra_exit               = atmel_aes_cra_exit,
1277         .cra_u.ablkcipher = {
1278                 .min_keysize    = AES_MIN_KEY_SIZE,
1279                 .max_keysize    = AES_MAX_KEY_SIZE,
1280                 .ivsize         = AES_BLOCK_SIZE,
1281                 .setkey         = atmel_aes_setkey,
1282                 .encrypt        = atmel_aes_cfb32_encrypt,
1283                 .decrypt        = atmel_aes_cfb32_decrypt,
1284         }
1285 },
1286 {
1287         .cra_name               = "cfb16(aes)",
1288         .cra_driver_name        = "atmel-cfb16-aes",
1289         .cra_priority           = ATMEL_AES_PRIORITY,
1290         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1291         .cra_blocksize          = CFB16_BLOCK_SIZE,
1292         .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1293         .cra_alignmask          = 0x1,
1294         .cra_type               = &crypto_ablkcipher_type,
1295         .cra_module             = THIS_MODULE,
1296         .cra_init               = atmel_aes_cra_init,
1297         .cra_exit               = atmel_aes_cra_exit,
1298         .cra_u.ablkcipher = {
1299                 .min_keysize    = AES_MIN_KEY_SIZE,
1300                 .max_keysize    = AES_MAX_KEY_SIZE,
1301                 .ivsize         = AES_BLOCK_SIZE,
1302                 .setkey         = atmel_aes_setkey,
1303                 .encrypt        = atmel_aes_cfb16_encrypt,
1304                 .decrypt        = atmel_aes_cfb16_decrypt,
1305         }
1306 },
1307 {
1308         .cra_name               = "cfb8(aes)",
1309         .cra_driver_name        = "atmel-cfb8-aes",
1310         .cra_priority           = ATMEL_AES_PRIORITY,
1311         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1312         .cra_blocksize          = CFB8_BLOCK_SIZE,
1313         .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1314         .cra_alignmask          = 0x0,
1315         .cra_type               = &crypto_ablkcipher_type,
1316         .cra_module             = THIS_MODULE,
1317         .cra_init               = atmel_aes_cra_init,
1318         .cra_exit               = atmel_aes_cra_exit,
1319         .cra_u.ablkcipher = {
1320                 .min_keysize    = AES_MIN_KEY_SIZE,
1321                 .max_keysize    = AES_MAX_KEY_SIZE,
1322                 .ivsize         = AES_BLOCK_SIZE,
1323                 .setkey         = atmel_aes_setkey,
1324                 .encrypt        = atmel_aes_cfb8_encrypt,
1325                 .decrypt        = atmel_aes_cfb8_decrypt,
1326         }
1327 },
1328 {
1329         .cra_name               = "ctr(aes)",
1330         .cra_driver_name        = "atmel-ctr-aes",
1331         .cra_priority           = ATMEL_AES_PRIORITY,
1332         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1333         .cra_blocksize          = 1,
1334         .cra_ctxsize            = sizeof(struct atmel_aes_ctr_ctx),
1335         .cra_alignmask          = 0xf,
1336         .cra_type               = &crypto_ablkcipher_type,
1337         .cra_module             = THIS_MODULE,
1338         .cra_init               = atmel_aes_ctr_cra_init,
1339         .cra_exit               = atmel_aes_cra_exit,
1340         .cra_u.ablkcipher = {
1341                 .min_keysize    = AES_MIN_KEY_SIZE,
1342                 .max_keysize    = AES_MAX_KEY_SIZE,
1343                 .ivsize         = AES_BLOCK_SIZE,
1344                 .setkey         = atmel_aes_setkey,
1345                 .encrypt        = atmel_aes_ctr_encrypt,
1346                 .decrypt        = atmel_aes_ctr_decrypt,
1347         }
1348 },
1349 };
1350
1351 static struct crypto_alg aes_cfb64_alg = {
1352         .cra_name               = "cfb64(aes)",
1353         .cra_driver_name        = "atmel-cfb64-aes",
1354         .cra_priority           = ATMEL_AES_PRIORITY,
1355         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1356         .cra_blocksize          = CFB64_BLOCK_SIZE,
1357         .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1358         .cra_alignmask          = 0x7,
1359         .cra_type               = &crypto_ablkcipher_type,
1360         .cra_module             = THIS_MODULE,
1361         .cra_init               = atmel_aes_cra_init,
1362         .cra_exit               = atmel_aes_cra_exit,
1363         .cra_u.ablkcipher = {
1364                 .min_keysize    = AES_MIN_KEY_SIZE,
1365                 .max_keysize    = AES_MAX_KEY_SIZE,
1366                 .ivsize         = AES_BLOCK_SIZE,
1367                 .setkey         = atmel_aes_setkey,
1368                 .encrypt        = atmel_aes_cfb64_encrypt,
1369                 .decrypt        = atmel_aes_cfb64_decrypt,
1370         }
1371 };
1372
1373
1374 /* gcm aead functions */
1375
1376 static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1377                                const u32 *data, size_t datalen,
1378                                const u32 *ghash_in, u32 *ghash_out,
1379                                atmel_aes_fn_t resume);
1380 static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
1381 static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
1382
1383 static int atmel_aes_gcm_start(struct atmel_aes_dev *dd);
1384 static int atmel_aes_gcm_process(struct atmel_aes_dev *dd);
1385 static int atmel_aes_gcm_length(struct atmel_aes_dev *dd);
1386 static int atmel_aes_gcm_data(struct atmel_aes_dev *dd);
1387 static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd);
1388 static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd);
1389 static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd);
1390
1391 static inline struct atmel_aes_gcm_ctx *
1392 atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
1393 {
1394         return container_of(ctx, struct atmel_aes_gcm_ctx, base);
1395 }
1396
1397 static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1398                                const u32 *data, size_t datalen,
1399                                const u32 *ghash_in, u32 *ghash_out,
1400                                atmel_aes_fn_t resume)
1401 {
1402         struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1403
1404         dd->data = (u32 *)data;
1405         dd->datalen = datalen;
1406         ctx->ghash_in = ghash_in;
1407         ctx->ghash_out = ghash_out;
1408         ctx->ghash_resume = resume;
1409
1410         atmel_aes_write_ctrl(dd, false, NULL);
1411         return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init);
1412 }
1413
1414 static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd)
1415 {
1416         struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1417
1418         /* Set the data length. */
1419         atmel_aes_write(dd, AES_AADLENR, dd->total);
1420         atmel_aes_write(dd, AES_CLENR, 0);
1421
1422         /* If needed, overwrite the GCM Intermediate Hash Word Registers */
1423         if (ctx->ghash_in)
1424                 atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in);
1425
1426         return atmel_aes_gcm_ghash_finalize(dd);
1427 }
1428
1429 static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd)
1430 {
1431         struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1432         u32 isr;
1433
1434         /* Write data into the Input Data Registers. */
1435         while (dd->datalen > 0) {
1436                 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1437                 dd->data += 4;
1438                 dd->datalen -= AES_BLOCK_SIZE;
1439
1440                 isr = atmel_aes_read(dd, AES_ISR);
1441                 if (!(isr & AES_INT_DATARDY)) {
1442                         dd->resume = atmel_aes_gcm_ghash_finalize;
1443                         atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1444                         return -EINPROGRESS;
1445                 }
1446         }
1447
1448         /* Read the computed hash from GHASHRx. */
1449         atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out);
1450
1451         return ctx->ghash_resume(dd);
1452 }
1453
1454
1455 static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
1456 {
1457         struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1458         struct aead_request *req = aead_request_cast(dd->areq);
1459         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1460         struct atmel_aes_reqctx *rctx = aead_request_ctx(req);
1461         size_t ivsize = crypto_aead_ivsize(tfm);
1462         size_t datalen, padlen;
1463         const void *iv = req->iv;
1464         u8 *data = dd->buf;
1465         int err;
1466
1467         atmel_aes_set_mode(dd, rctx);
1468
1469         err = atmel_aes_hw_init(dd);
1470         if (err)
1471                 return atmel_aes_complete(dd, err);
1472
1473         if (likely(ivsize == 12)) {
1474                 memcpy(ctx->j0, iv, ivsize);
1475                 ctx->j0[3] = cpu_to_be32(1);
1476                 return atmel_aes_gcm_process(dd);
1477         }
1478
1479         padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE);
1480         datalen = ivsize + padlen + AES_BLOCK_SIZE;
1481         if (datalen > dd->buflen)
1482                 return atmel_aes_complete(dd, -EINVAL);
1483
1484         memcpy(data, iv, ivsize);
1485         memset(data + ivsize, 0, padlen + sizeof(u64));
1486         ((u64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
1487
1488         return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
1489                                    NULL, ctx->j0, atmel_aes_gcm_process);
1490 }
1491
1492 static int atmel_aes_gcm_process(struct atmel_aes_dev *dd)
1493 {
1494         struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1495         struct aead_request *req = aead_request_cast(dd->areq);
1496         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1497         bool enc = atmel_aes_is_encrypt(dd);
1498         u32 authsize;
1499
1500         /* Compute text length. */
1501         authsize = crypto_aead_authsize(tfm);
1502         ctx->textlen = req->cryptlen - (enc ? 0 : authsize);
1503
1504         /*
1505          * According to tcrypt test suite, the GCM Automatic Tag Generation
1506          * fails when both the message and its associated data are empty.
1507          */
1508         if (likely(req->assoclen != 0 || ctx->textlen != 0))
1509                 dd->flags |= AES_FLAGS_GTAGEN;
1510
1511         atmel_aes_write_ctrl(dd, false, NULL);
1512         return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length);
1513 }
1514
1515 static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
1516 {
1517         struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1518         struct aead_request *req = aead_request_cast(dd->areq);
1519         u32 j0_lsw, *j0 = ctx->j0;
1520         size_t padlen;
1521
1522         /* Write incr32(J0) into IV. */
1523         j0_lsw = j0[3];
1524         j0[3] = cpu_to_be32(be32_to_cpu(j0[3]) + 1);
1525         atmel_aes_write_block(dd, AES_IVR(0), j0);
1526         j0[3] = j0_lsw;
1527
1528         /* Set aad and text lengths. */
1529         atmel_aes_write(dd, AES_AADLENR, req->assoclen);
1530         atmel_aes_write(dd, AES_CLENR, ctx->textlen);
1531
1532         /* Check whether AAD are present. */
1533         if (unlikely(req->assoclen == 0)) {
1534                 dd->datalen = 0;
1535                 return atmel_aes_gcm_data(dd);
1536         }
1537
1538         /* Copy assoc data and add padding. */
1539         padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE);
1540         if (unlikely(req->assoclen + padlen > dd->buflen))
1541                 return atmel_aes_complete(dd, -EINVAL);
1542         sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen);
1543
1544         /* Write assoc data into the Input Data register. */
1545         dd->data = (u32 *)dd->buf;
1546         dd->datalen = req->assoclen + padlen;
1547         return atmel_aes_gcm_data(dd);
1548 }
1549
1550 static int atmel_aes_gcm_data(struct atmel_aes_dev *dd)
1551 {
1552         struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1553         struct aead_request *req = aead_request_cast(dd->areq);
1554         bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD);
1555         struct scatterlist *src, *dst;
1556         u32 isr, mr;
1557
1558         /* Write AAD first. */
1559         while (dd->datalen > 0) {
1560                 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1561                 dd->data += 4;
1562                 dd->datalen -= AES_BLOCK_SIZE;
1563
1564                 isr = atmel_aes_read(dd, AES_ISR);
1565                 if (!(isr & AES_INT_DATARDY)) {
1566                         dd->resume = atmel_aes_gcm_data;
1567                         atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1568                         return -EINPROGRESS;
1569                 }
1570         }
1571
1572         /* GMAC only. */
1573         if (unlikely(ctx->textlen == 0))
1574                 return atmel_aes_gcm_tag_init(dd);
1575
1576         /* Prepare src and dst scatter lists to transfer cipher/plain texts */
1577         src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen);
1578         dst = ((req->src == req->dst) ? src :
1579                scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen));
1580
1581         if (use_dma) {
1582                 /* Update the Mode Register for DMA transfers. */
1583                 mr = atmel_aes_read(dd, AES_MR);
1584                 mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF);
1585                 mr |= AES_MR_SMOD_IDATAR0;
1586                 if (dd->caps.has_dualbuff)
1587                         mr |= AES_MR_DUALBUFF;
1588                 atmel_aes_write(dd, AES_MR, mr);
1589
1590                 return atmel_aes_dma_start(dd, src, dst, ctx->textlen,
1591                                            atmel_aes_gcm_tag_init);
1592         }
1593
1594         return atmel_aes_cpu_start(dd, src, dst, ctx->textlen,
1595                                    atmel_aes_gcm_tag_init);
1596 }
1597
1598 static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
1599 {
1600         struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1601         struct aead_request *req = aead_request_cast(dd->areq);
1602         u64 *data = dd->buf;
1603
1604         if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
1605                 if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
1606                         dd->resume = atmel_aes_gcm_tag_init;
1607                         atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY);
1608                         return -EINPROGRESS;
1609                 }
1610
1611                 return atmel_aes_gcm_finalize(dd);
1612         }
1613
1614         /* Read the GCM Intermediate Hash Word Registers. */
1615         atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash);
1616
1617         data[0] = cpu_to_be64(req->assoclen * 8);
1618         data[1] = cpu_to_be64(ctx->textlen * 8);
1619
1620         return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE,
1621                                    ctx->ghash, ctx->ghash, atmel_aes_gcm_tag);
1622 }
1623
1624 static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd)
1625 {
1626         struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1627         unsigned long flags;
1628
1629         /*
1630          * Change mode to CTR to complete the tag generation.
1631          * Use J0 as Initialization Vector.
1632          */
1633         flags = dd->flags;
1634         dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN);
1635         dd->flags |= AES_FLAGS_CTR;
1636         atmel_aes_write_ctrl(dd, false, ctx->j0);
1637         dd->flags = flags;
1638
1639         atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash);
1640         return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize);
1641 }
1642
1643 static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd)
1644 {
1645         struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1646         struct aead_request *req = aead_request_cast(dd->areq);
1647         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1648         bool enc = atmel_aes_is_encrypt(dd);
1649         u32 offset, authsize, itag[4], *otag = ctx->tag;
1650         int err;
1651
1652         /* Read the computed tag. */
1653         if (likely(dd->flags & AES_FLAGS_GTAGEN))
1654                 atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag);
1655         else
1656                 atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag);
1657
1658         offset = req->assoclen + ctx->textlen;
1659         authsize = crypto_aead_authsize(tfm);
1660         if (enc) {
1661                 scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1);
1662                 err = 0;
1663         } else {
1664                 scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0);
1665                 err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0;
1666         }
1667
1668         return atmel_aes_complete(dd, err);
1669 }
1670
1671 static int atmel_aes_gcm_crypt(struct aead_request *req,
1672                                unsigned long mode)
1673 {
1674         struct atmel_aes_base_ctx *ctx;
1675         struct atmel_aes_reqctx *rctx;
1676         struct atmel_aes_dev *dd;
1677
1678         ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
1679         ctx->block_size = AES_BLOCK_SIZE;
1680
1681         dd = atmel_aes_find_dev(ctx);
1682         if (!dd)
1683                 return -ENODEV;
1684
1685         rctx = aead_request_ctx(req);
1686         rctx->mode = AES_FLAGS_GCM | mode;
1687
1688         return atmel_aes_handle_queue(dd, &req->base);
1689 }
1690
1691 static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
1692                                 unsigned int keylen)
1693 {
1694         struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
1695
1696         if (keylen != AES_KEYSIZE_256 &&
1697             keylen != AES_KEYSIZE_192 &&
1698             keylen != AES_KEYSIZE_128) {
1699                 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1700                 return -EINVAL;
1701         }
1702
1703         memcpy(ctx->key, key, keylen);
1704         ctx->keylen = keylen;
1705
1706         return 0;
1707 }
1708
1709 static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
1710                                      unsigned int authsize)
1711 {
1712         /* Same as crypto_gcm_authsize() from crypto/gcm.c */
1713         switch (authsize) {
1714         case 4:
1715         case 8:
1716         case 12:
1717         case 13:
1718         case 14:
1719         case 15:
1720         case 16:
1721                 break;
1722         default:
1723                 return -EINVAL;
1724         }
1725
1726         return 0;
1727 }
1728
1729 static int atmel_aes_gcm_encrypt(struct aead_request *req)
1730 {
1731         return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1732 }
1733
1734 static int atmel_aes_gcm_decrypt(struct aead_request *req)
1735 {
1736         return atmel_aes_gcm_crypt(req, 0);
1737 }
1738
1739 static int atmel_aes_gcm_init(struct crypto_aead *tfm)
1740 {
1741         struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
1742
1743         crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1744         ctx->base.start = atmel_aes_gcm_start;
1745
1746         return 0;
1747 }
1748
1749 static void atmel_aes_gcm_exit(struct crypto_aead *tfm)
1750 {
1751
1752 }
1753
1754 static struct aead_alg aes_gcm_alg = {
1755         .setkey         = atmel_aes_gcm_setkey,
1756         .setauthsize    = atmel_aes_gcm_setauthsize,
1757         .encrypt        = atmel_aes_gcm_encrypt,
1758         .decrypt        = atmel_aes_gcm_decrypt,
1759         .init           = atmel_aes_gcm_init,
1760         .exit           = atmel_aes_gcm_exit,
1761         .ivsize         = 12,
1762         .maxauthsize    = AES_BLOCK_SIZE,
1763
1764         .base = {
1765                 .cra_name               = "gcm(aes)",
1766                 .cra_driver_name        = "atmel-gcm-aes",
1767                 .cra_priority           = ATMEL_AES_PRIORITY,
1768                 .cra_flags              = CRYPTO_ALG_ASYNC,
1769                 .cra_blocksize          = 1,
1770                 .cra_ctxsize            = sizeof(struct atmel_aes_gcm_ctx),
1771                 .cra_alignmask          = 0xf,
1772                 .cra_module             = THIS_MODULE,
1773         },
1774 };
1775
1776
1777 /* Probe functions */
1778
1779 static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
1780 {
1781         dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
1782         dd->buflen = ATMEL_AES_BUFFER_SIZE;
1783         dd->buflen &= ~(AES_BLOCK_SIZE - 1);
1784
1785         if (!dd->buf) {
1786                 dev_err(dd->dev, "unable to alloc pages.\n");
1787                 return -ENOMEM;
1788         }
1789
1790         return 0;
1791 }
1792
1793 static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
1794 {
1795         free_page((unsigned long)dd->buf);
1796 }
1797
1798 static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
1799 {
1800         struct at_dma_slave     *sl = slave;
1801
1802         if (sl && sl->dma_dev == chan->device->dev) {
1803                 chan->private = sl;
1804                 return true;
1805         } else {
1806                 return false;
1807         }
1808 }
1809
1810 static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
1811                               struct crypto_platform_data *pdata)
1812 {
1813         struct at_dma_slave *slave;
1814         int err = -ENOMEM;
1815         dma_cap_mask_t mask;
1816
1817         dma_cap_zero(mask);
1818         dma_cap_set(DMA_SLAVE, mask);
1819
1820         /* Try to grab 2 DMA channels */
1821         slave = &pdata->dma_slave->rxdata;
1822         dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
1823                                                         slave, dd->dev, "tx");
1824         if (!dd->src.chan)
1825                 goto err_dma_in;
1826
1827         slave = &pdata->dma_slave->txdata;
1828         dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
1829                                                         slave, dd->dev, "rx");
1830         if (!dd->dst.chan)
1831                 goto err_dma_out;
1832
1833         return 0;
1834
1835 err_dma_out:
1836         dma_release_channel(dd->src.chan);
1837 err_dma_in:
1838         dev_warn(dd->dev, "no DMA channel available\n");
1839         return err;
1840 }
1841
1842 static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
1843 {
1844         dma_release_channel(dd->dst.chan);
1845         dma_release_channel(dd->src.chan);
1846 }
1847
1848 static void atmel_aes_queue_task(unsigned long data)
1849 {
1850         struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1851
1852         atmel_aes_handle_queue(dd, NULL);
1853 }
1854
1855 static void atmel_aes_done_task(unsigned long data)
1856 {
1857         struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1858
1859         dd->is_async = true;
1860         (void)dd->resume(dd);
1861 }
1862
1863 static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
1864 {
1865         struct atmel_aes_dev *aes_dd = dev_id;
1866         u32 reg;
1867
1868         reg = atmel_aes_read(aes_dd, AES_ISR);
1869         if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
1870                 atmel_aes_write(aes_dd, AES_IDR, reg);
1871                 if (AES_FLAGS_BUSY & aes_dd->flags)
1872                         tasklet_schedule(&aes_dd->done_task);
1873                 else
1874                         dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
1875                 return IRQ_HANDLED;
1876         }
1877
1878         return IRQ_NONE;
1879 }
1880
1881 static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
1882 {
1883         int i;
1884
1885         if (dd->caps.has_gcm)
1886                 crypto_unregister_aead(&aes_gcm_alg);
1887
1888         if (dd->caps.has_cfb64)
1889                 crypto_unregister_alg(&aes_cfb64_alg);
1890
1891         for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1892                 crypto_unregister_alg(&aes_algs[i]);
1893 }
1894
1895 static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
1896 {
1897         int err, i, j;
1898
1899         for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1900                 err = crypto_register_alg(&aes_algs[i]);
1901                 if (err)
1902                         goto err_aes_algs;
1903         }
1904
1905         if (dd->caps.has_cfb64) {
1906                 err = crypto_register_alg(&aes_cfb64_alg);
1907                 if (err)
1908                         goto err_aes_cfb64_alg;
1909         }
1910
1911         if (dd->caps.has_gcm) {
1912                 err = crypto_register_aead(&aes_gcm_alg);
1913                 if (err)
1914                         goto err_aes_gcm_alg;
1915         }
1916
1917         return 0;
1918
1919 err_aes_gcm_alg:
1920         crypto_unregister_alg(&aes_cfb64_alg);
1921 err_aes_cfb64_alg:
1922         i = ARRAY_SIZE(aes_algs);
1923 err_aes_algs:
1924         for (j = 0; j < i; j++)
1925                 crypto_unregister_alg(&aes_algs[j]);
1926
1927         return err;
1928 }
1929
1930 static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
1931 {
1932         dd->caps.has_dualbuff = 0;
1933         dd->caps.has_cfb64 = 0;
1934         dd->caps.has_ctr32 = 0;
1935         dd->caps.has_gcm = 0;
1936         dd->caps.max_burst_size = 1;
1937
1938         /* keep only major version number */
1939         switch (dd->hw_version & 0xff0) {
1940         case 0x500:
1941                 dd->caps.has_dualbuff = 1;
1942                 dd->caps.has_cfb64 = 1;
1943                 dd->caps.has_ctr32 = 1;
1944                 dd->caps.has_gcm = 1;
1945                 dd->caps.max_burst_size = 4;
1946                 break;
1947         case 0x200:
1948                 dd->caps.has_dualbuff = 1;
1949                 dd->caps.has_cfb64 = 1;
1950                 dd->caps.has_ctr32 = 1;
1951                 dd->caps.has_gcm = 1;
1952                 dd->caps.max_burst_size = 4;
1953                 break;
1954         case 0x130:
1955                 dd->caps.has_dualbuff = 1;
1956                 dd->caps.has_cfb64 = 1;
1957                 dd->caps.max_burst_size = 4;
1958                 break;
1959         case 0x120:
1960                 break;
1961         default:
1962                 dev_warn(dd->dev,
1963                                 "Unmanaged aes version, set minimum capabilities\n");
1964                 break;
1965         }
1966 }
1967
1968 #if defined(CONFIG_OF)
1969 static const struct of_device_id atmel_aes_dt_ids[] = {
1970         { .compatible = "atmel,at91sam9g46-aes" },
1971         { /* sentinel */ }
1972 };
1973 MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
1974
1975 static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1976 {
1977         struct device_node *np = pdev->dev.of_node;
1978         struct crypto_platform_data *pdata;
1979
1980         if (!np) {
1981                 dev_err(&pdev->dev, "device node not found\n");
1982                 return ERR_PTR(-EINVAL);
1983         }
1984
1985         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1986         if (!pdata) {
1987                 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1988                 return ERR_PTR(-ENOMEM);
1989         }
1990
1991         pdata->dma_slave = devm_kzalloc(&pdev->dev,
1992                                         sizeof(*(pdata->dma_slave)),
1993                                         GFP_KERNEL);
1994         if (!pdata->dma_slave) {
1995                 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
1996                 devm_kfree(&pdev->dev, pdata);
1997                 return ERR_PTR(-ENOMEM);
1998         }
1999
2000         return pdata;
2001 }
2002 #else
2003 static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
2004 {
2005         return ERR_PTR(-EINVAL);
2006 }
2007 #endif
2008
2009 static int atmel_aes_probe(struct platform_device *pdev)
2010 {
2011         struct atmel_aes_dev *aes_dd;
2012         struct crypto_platform_data *pdata;
2013         struct device *dev = &pdev->dev;
2014         struct resource *aes_res;
2015         int err;
2016
2017         pdata = pdev->dev.platform_data;
2018         if (!pdata) {
2019                 pdata = atmel_aes_of_init(pdev);
2020                 if (IS_ERR(pdata)) {
2021                         err = PTR_ERR(pdata);
2022                         goto aes_dd_err;
2023                 }
2024         }
2025
2026         if (!pdata->dma_slave) {
2027                 err = -ENXIO;
2028                 goto aes_dd_err;
2029         }
2030
2031         aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
2032         if (aes_dd == NULL) {
2033                 dev_err(dev, "unable to alloc data struct.\n");
2034                 err = -ENOMEM;
2035                 goto aes_dd_err;
2036         }
2037
2038         aes_dd->dev = dev;
2039
2040         platform_set_drvdata(pdev, aes_dd);
2041
2042         INIT_LIST_HEAD(&aes_dd->list);
2043         spin_lock_init(&aes_dd->lock);
2044
2045         tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
2046                                         (unsigned long)aes_dd);
2047         tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
2048                                         (unsigned long)aes_dd);
2049
2050         crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
2051
2052         aes_dd->irq = -1;
2053
2054         /* Get the base address */
2055         aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2056         if (!aes_res) {
2057                 dev_err(dev, "no MEM resource info\n");
2058                 err = -ENODEV;
2059                 goto res_err;
2060         }
2061         aes_dd->phys_base = aes_res->start;
2062
2063         /* Get the IRQ */
2064         aes_dd->irq = platform_get_irq(pdev,  0);
2065         if (aes_dd->irq < 0) {
2066                 dev_err(dev, "no IRQ resource info\n");
2067                 err = aes_dd->irq;
2068                 goto res_err;
2069         }
2070
2071         err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
2072                                IRQF_SHARED, "atmel-aes", aes_dd);
2073         if (err) {
2074                 dev_err(dev, "unable to request aes irq.\n");
2075                 goto res_err;
2076         }
2077
2078         /* Initializing the clock */
2079         aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
2080         if (IS_ERR(aes_dd->iclk)) {
2081                 dev_err(dev, "clock initialization failed.\n");
2082                 err = PTR_ERR(aes_dd->iclk);
2083                 goto res_err;
2084         }
2085
2086         aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
2087         if (!aes_dd->io_base) {
2088                 dev_err(dev, "can't ioremap\n");
2089                 err = -ENOMEM;
2090                 goto res_err;
2091         }
2092
2093         err = atmel_aes_hw_version_init(aes_dd);
2094         if (err)
2095                 goto res_err;
2096
2097         atmel_aes_get_cap(aes_dd);
2098
2099         err = atmel_aes_buff_init(aes_dd);
2100         if (err)
2101                 goto err_aes_buff;
2102
2103         err = atmel_aes_dma_init(aes_dd, pdata);
2104         if (err)
2105                 goto err_aes_dma;
2106
2107         spin_lock(&atmel_aes.lock);
2108         list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
2109         spin_unlock(&atmel_aes.lock);
2110
2111         err = atmel_aes_register_algs(aes_dd);
2112         if (err)
2113                 goto err_algs;
2114
2115         dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
2116                         dma_chan_name(aes_dd->src.chan),
2117                         dma_chan_name(aes_dd->dst.chan));
2118
2119         return 0;
2120
2121 err_algs:
2122         spin_lock(&atmel_aes.lock);
2123         list_del(&aes_dd->list);
2124         spin_unlock(&atmel_aes.lock);
2125         atmel_aes_dma_cleanup(aes_dd);
2126 err_aes_dma:
2127         atmel_aes_buff_cleanup(aes_dd);
2128 err_aes_buff:
2129 res_err:
2130         tasklet_kill(&aes_dd->done_task);
2131         tasklet_kill(&aes_dd->queue_task);
2132 aes_dd_err:
2133         dev_err(dev, "initialization failed.\n");
2134
2135         return err;
2136 }
2137
2138 static int atmel_aes_remove(struct platform_device *pdev)
2139 {
2140         static struct atmel_aes_dev *aes_dd;
2141
2142         aes_dd = platform_get_drvdata(pdev);
2143         if (!aes_dd)
2144                 return -ENODEV;
2145         spin_lock(&atmel_aes.lock);
2146         list_del(&aes_dd->list);
2147         spin_unlock(&atmel_aes.lock);
2148
2149         atmel_aes_unregister_algs(aes_dd);
2150
2151         tasklet_kill(&aes_dd->done_task);
2152         tasklet_kill(&aes_dd->queue_task);
2153
2154         atmel_aes_dma_cleanup(aes_dd);
2155         atmel_aes_buff_cleanup(aes_dd);
2156
2157         return 0;
2158 }
2159
2160 static struct platform_driver atmel_aes_driver = {
2161         .probe          = atmel_aes_probe,
2162         .remove         = atmel_aes_remove,
2163         .driver         = {
2164                 .name   = "atmel_aes",
2165                 .of_match_table = of_match_ptr(atmel_aes_dt_ids),
2166         },
2167 };
2168
2169 module_platform_driver(atmel_aes_driver);
2170
2171 MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
2172 MODULE_LICENSE("GPL v2");
2173 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");