4 * Support for OMAP AES HW acceleration.
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 * Copyright (c) 2011 Texas Instruments Incorporated
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
16 #define pr_fmt(fmt) "%20s: " fmt, __func__
17 #define prn(num) pr_debug(#num "=%d\n", num)
18 #define prx(num) pr_debug(#num "=%x\n", num)
20 #include <linux/err.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/errno.h>
24 #include <linux/kernel.h>
25 #include <linux/platform_device.h>
26 #include <linux/scatterlist.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/dmaengine.h>
29 #include <linux/pm_runtime.h>
31 #include <linux/of_device.h>
32 #include <linux/of_address.h>
34 #include <linux/crypto.h>
35 #include <linux/interrupt.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/aes.h>
38 #include <crypto/algapi.h>
39 #include <crypto/engine.h>
41 #define DST_MAXBURST 4
42 #define DMA_MIN (DST_MAXBURST * sizeof(u32))
44 #define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
46 /* OMAP TRM gives bitfields as start:end, where start is the higher bit
47 number. For example 7:0 */
48 #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
49 #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
51 #define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
53 #define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
55 #define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
56 #define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7)
57 #define AES_REG_CTRL_CTR_WIDTH_32 0
58 #define AES_REG_CTRL_CTR_WIDTH_64 BIT(7)
59 #define AES_REG_CTRL_CTR_WIDTH_96 BIT(8)
60 #define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7)
61 #define AES_REG_CTRL_CTR BIT(6)
62 #define AES_REG_CTRL_CBC BIT(5)
63 #define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3)
64 #define AES_REG_CTRL_DIRECTION BIT(2)
65 #define AES_REG_CTRL_INPUT_READY BIT(1)
66 #define AES_REG_CTRL_OUTPUT_READY BIT(0)
67 #define AES_REG_CTRL_MASK GENMASK(24, 2)
69 #define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
71 #define AES_REG_REV(dd) ((dd)->pdata->rev_ofs)
73 #define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
74 #define AES_REG_MASK_SIDLE BIT(6)
75 #define AES_REG_MASK_START BIT(5)
76 #define AES_REG_MASK_DMA_OUT_EN BIT(3)
77 #define AES_REG_MASK_DMA_IN_EN BIT(2)
78 #define AES_REG_MASK_SOFTRESET BIT(1)
79 #define AES_REG_AUTOIDLE BIT(0)
81 #define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
83 #define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs)
84 #define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs)
85 #define AES_REG_IRQ_DATA_IN BIT(1)
86 #define AES_REG_IRQ_DATA_OUT BIT(2)
87 #define DEFAULT_TIMEOUT (5*HZ)
89 #define DEFAULT_AUTOSUSPEND_DELAY 1000
91 #define FLAGS_MODE_MASK 0x000f
92 #define FLAGS_ENCRYPT BIT(0)
93 #define FLAGS_CBC BIT(1)
94 #define FLAGS_GIV BIT(2)
95 #define FLAGS_CTR BIT(3)
97 #define FLAGS_INIT BIT(4)
98 #define FLAGS_FAST BIT(5)
99 #define FLAGS_BUSY BIT(6)
101 #define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2)
103 struct omap_aes_ctx {
104 struct omap_aes_dev *dd;
107 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
111 struct omap_aes_reqctx {
115 #define OMAP_AES_QUEUE_LENGTH 1
116 #define OMAP_AES_CACHE_SIZE 0
118 struct omap_aes_algs_info {
119 struct crypto_alg *algs_list;
121 unsigned int registered;
124 struct omap_aes_pdata {
125 struct omap_aes_algs_info *algs_info;
126 unsigned int algs_info_size;
128 void (*trigger)(struct omap_aes_dev *dd, int length);
149 struct omap_aes_dev {
150 struct list_head list;
151 unsigned long phys_base;
152 void __iomem *io_base;
153 struct omap_aes_ctx *ctx;
158 struct tasklet_struct done_task;
160 struct ablkcipher_request *req;
161 struct crypto_engine *engine;
164 * total is used by PIO mode for book keeping so introduce
165 * variable total_save as need it to calc page_order
170 struct scatterlist *in_sg;
171 struct scatterlist *out_sg;
173 /* Buffers for copying for unaligned cases */
174 struct scatterlist in_sgl;
175 struct scatterlist out_sgl;
176 struct scatterlist *orig_out;
179 struct scatter_walk in_walk;
180 struct scatter_walk out_walk;
181 struct dma_chan *dma_lch_in;
182 struct dma_chan *dma_lch_out;
186 const struct omap_aes_pdata *pdata;
189 /* keep registered devices data here */
190 static LIST_HEAD(dev_list);
191 static DEFINE_SPINLOCK(list_lock);
194 #define omap_aes_read(dd, offset) \
197 _read_ret = __raw_readl(dd->io_base + offset); \
198 pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n", \
199 offset, _read_ret); \
203 static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
205 return __raw_readl(dd->io_base + offset);
210 #define omap_aes_write(dd, offset, value) \
212 pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \
214 __raw_writel(value, dd->io_base + offset); \
217 static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
220 __raw_writel(value, dd->io_base + offset);
224 static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
229 val = omap_aes_read(dd, offset);
232 omap_aes_write(dd, offset, val);
235 static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
236 u32 *value, int count)
238 for (; count--; value++, offset += 4)
239 omap_aes_write(dd, offset, *value);
242 static int omap_aes_hw_init(struct omap_aes_dev *dd)
246 if (!(dd->flags & FLAGS_INIT)) {
247 dd->flags |= FLAGS_INIT;
251 err = pm_runtime_get_sync(dd->dev);
253 dev_err(dd->dev, "failed to get sync: %d\n", err);
260 static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
266 err = omap_aes_hw_init(dd);
270 key32 = dd->ctx->keylen / sizeof(u32);
272 /* it seems a key should always be set even if it has not changed */
273 for (i = 0; i < key32; i++) {
274 omap_aes_write(dd, AES_REG_KEY(dd, i),
275 __le32_to_cpu(dd->ctx->key[i]));
278 if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
279 omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
281 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
282 if (dd->flags & FLAGS_CBC)
283 val |= AES_REG_CTRL_CBC;
284 if (dd->flags & FLAGS_CTR)
285 val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
287 if (dd->flags & FLAGS_ENCRYPT)
288 val |= AES_REG_CTRL_DIRECTION;
290 omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
295 static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length)
299 val = dd->pdata->dma_start;
301 if (dd->dma_lch_out != NULL)
302 val |= dd->pdata->dma_enable_out;
303 if (dd->dma_lch_in != NULL)
304 val |= dd->pdata->dma_enable_in;
306 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
307 dd->pdata->dma_start;
309 omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask);
313 static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length)
315 omap_aes_write(dd, AES_REG_LENGTH_N(0), length);
316 omap_aes_write(dd, AES_REG_LENGTH_N(1), 0);
318 omap_aes_dma_trigger_omap2(dd, length);
321 static void omap_aes_dma_stop(struct omap_aes_dev *dd)
325 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
326 dd->pdata->dma_start;
328 omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask);
331 static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
333 struct omap_aes_dev *dd;
335 spin_lock_bh(&list_lock);
336 dd = list_first_entry(&dev_list, struct omap_aes_dev, list);
337 list_move_tail(&dd->list, &dev_list);
339 spin_unlock_bh(&list_lock);
344 static void omap_aes_dma_out_callback(void *data)
346 struct omap_aes_dev *dd = data;
348 /* dma_lch_out - completed */
349 tasklet_schedule(&dd->done_task);
352 static int omap_aes_dma_init(struct omap_aes_dev *dd)
356 dd->dma_lch_out = NULL;
357 dd->dma_lch_in = NULL;
359 dd->dma_lch_in = dma_request_chan(dd->dev, "rx");
360 if (IS_ERR(dd->dma_lch_in)) {
361 dev_err(dd->dev, "Unable to request in DMA channel\n");
362 return PTR_ERR(dd->dma_lch_in);
365 dd->dma_lch_out = dma_request_chan(dd->dev, "tx");
366 if (IS_ERR(dd->dma_lch_out)) {
367 dev_err(dd->dev, "Unable to request out DMA channel\n");
368 err = PTR_ERR(dd->dma_lch_out);
375 dma_release_channel(dd->dma_lch_in);
380 static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
385 dma_release_channel(dd->dma_lch_out);
386 dma_release_channel(dd->dma_lch_in);
389 static void sg_copy_buf(void *buf, struct scatterlist *sg,
390 unsigned int start, unsigned int nbytes, int out)
392 struct scatter_walk walk;
397 scatterwalk_start(&walk, sg);
398 scatterwalk_advance(&walk, start);
399 scatterwalk_copychunks(buf, &walk, nbytes, out);
400 scatterwalk_done(&walk, out, 0);
403 static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
404 struct scatterlist *in_sg, struct scatterlist *out_sg,
405 int in_sg_len, int out_sg_len)
407 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
408 struct omap_aes_dev *dd = ctx->dd;
409 struct dma_async_tx_descriptor *tx_in, *tx_out;
410 struct dma_slave_config cfg;
414 scatterwalk_start(&dd->in_walk, dd->in_sg);
415 scatterwalk_start(&dd->out_walk, dd->out_sg);
417 /* Enable DATAIN interrupt and let it take
419 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
423 dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
425 memset(&cfg, 0, sizeof(cfg));
427 cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
428 cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
429 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
430 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
431 cfg.src_maxburst = DST_MAXBURST;
432 cfg.dst_maxburst = DST_MAXBURST;
435 ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
437 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
442 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
444 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
446 dev_err(dd->dev, "IN prep_slave_sg() failed\n");
450 /* No callback necessary */
451 tx_in->callback_param = dd;
454 ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
456 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
461 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
463 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
465 dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
469 tx_out->callback = omap_aes_dma_out_callback;
470 tx_out->callback_param = dd;
472 dmaengine_submit(tx_in);
473 dmaengine_submit(tx_out);
475 dma_async_issue_pending(dd->dma_lch_in);
476 dma_async_issue_pending(dd->dma_lch_out);
479 dd->pdata->trigger(dd, dd->total);
484 static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
486 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
487 crypto_ablkcipher_reqtfm(dd->req));
490 pr_debug("total: %d\n", dd->total);
493 err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
496 dev_err(dd->dev, "dma_map_sg() error\n");
500 err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
503 dev_err(dd->dev, "dma_map_sg() error\n");
508 err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len,
510 if (err && !dd->pio_only) {
511 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
512 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
519 static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
521 struct ablkcipher_request *req = dd->req;
523 pr_debug("err: %d\n", err);
525 crypto_finalize_cipher_request(dd->engine, req, err);
527 pm_runtime_mark_last_busy(dd->dev);
528 pm_runtime_put_autosuspend(dd->dev);
531 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
533 pr_debug("total: %d\n", dd->total);
535 omap_aes_dma_stop(dd);
541 static int omap_aes_check_aligned(struct scatterlist *sg, int total)
545 if (!IS_ALIGNED(total, AES_BLOCK_SIZE))
549 if (!IS_ALIGNED(sg->offset, 4))
551 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
564 static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
566 void *buf_in, *buf_out;
569 total = ALIGN(dd->total, AES_BLOCK_SIZE);
570 pages = get_order(total);
572 buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
573 buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
575 if (!buf_in || !buf_out) {
576 pr_err("Couldn't allocated pages for unaligned cases.\n");
580 dd->orig_out = dd->out_sg;
582 sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
584 sg_init_table(&dd->in_sgl, 1);
585 sg_set_buf(&dd->in_sgl, buf_in, total);
586 dd->in_sg = &dd->in_sgl;
589 sg_init_table(&dd->out_sgl, 1);
590 sg_set_buf(&dd->out_sgl, buf_out, total);
591 dd->out_sg = &dd->out_sgl;
597 static int omap_aes_handle_queue(struct omap_aes_dev *dd,
598 struct ablkcipher_request *req)
601 return crypto_transfer_cipher_request_to_engine(dd->engine, req);
606 static int omap_aes_prepare_req(struct crypto_engine *engine,
607 struct ablkcipher_request *req)
609 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
610 crypto_ablkcipher_reqtfm(req));
611 struct omap_aes_dev *dd = ctx->dd;
612 struct omap_aes_reqctx *rctx;
617 /* assign new request to device */
619 dd->total = req->nbytes;
620 dd->total_save = req->nbytes;
621 dd->in_sg = req->src;
622 dd->out_sg = req->dst;
624 dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total);
625 if (dd->in_sg_len < 0)
626 return dd->in_sg_len;
628 dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total);
629 if (dd->out_sg_len < 0)
630 return dd->out_sg_len;
632 if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
633 omap_aes_check_aligned(dd->out_sg, dd->total)) {
634 if (omap_aes_copy_sgs(dd))
635 pr_err("Failed to copy SGs for unaligned cases\n");
641 rctx = ablkcipher_request_ctx(req);
642 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
643 rctx->mode &= FLAGS_MODE_MASK;
644 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
649 return omap_aes_write_ctrl(dd);
652 static int omap_aes_crypt_req(struct crypto_engine *engine,
653 struct ablkcipher_request *req)
655 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
656 crypto_ablkcipher_reqtfm(req));
657 struct omap_aes_dev *dd = ctx->dd;
662 return omap_aes_crypt_dma_start(dd);
665 static void omap_aes_done_task(unsigned long data)
667 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
668 void *buf_in, *buf_out;
671 pr_debug("enter done_task\n");
674 dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
676 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
677 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
679 omap_aes_crypt_dma_stop(dd);
682 if (dd->sgs_copied) {
683 buf_in = sg_virt(&dd->in_sgl);
684 buf_out = sg_virt(&dd->out_sgl);
686 sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
688 len = ALIGN(dd->total_save, AES_BLOCK_SIZE);
689 pages = get_order(len);
690 free_pages((unsigned long)buf_in, pages);
691 free_pages((unsigned long)buf_out, pages);
694 omap_aes_finish_req(dd, 0);
699 static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
701 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
702 crypto_ablkcipher_reqtfm(req));
703 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
704 struct omap_aes_dev *dd;
706 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
707 !!(mode & FLAGS_ENCRYPT),
708 !!(mode & FLAGS_CBC));
710 dd = omap_aes_find_dev(ctx);
716 return omap_aes_handle_queue(dd, req);
719 /* ********************** ALG API ************************************ */
721 static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
724 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
726 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
727 keylen != AES_KEYSIZE_256)
730 pr_debug("enter, keylen: %d\n", keylen);
732 memcpy(ctx->key, key, keylen);
733 ctx->keylen = keylen;
738 static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
740 return omap_aes_crypt(req, FLAGS_ENCRYPT);
743 static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
745 return omap_aes_crypt(req, 0);
748 static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
750 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
753 static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
755 return omap_aes_crypt(req, FLAGS_CBC);
758 static int omap_aes_ctr_encrypt(struct ablkcipher_request *req)
760 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR);
763 static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
765 return omap_aes_crypt(req, FLAGS_CTR);
768 static int omap_aes_cra_init(struct crypto_tfm *tfm)
770 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
775 static void omap_aes_cra_exit(struct crypto_tfm *tfm)
779 /* ********************** ALGS ************************************ */
781 static struct crypto_alg algs_ecb_cbc[] = {
783 .cra_name = "ecb(aes)",
784 .cra_driver_name = "ecb-aes-omap",
786 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
787 CRYPTO_ALG_KERN_DRIVER_ONLY |
789 .cra_blocksize = AES_BLOCK_SIZE,
790 .cra_ctxsize = sizeof(struct omap_aes_ctx),
792 .cra_type = &crypto_ablkcipher_type,
793 .cra_module = THIS_MODULE,
794 .cra_init = omap_aes_cra_init,
795 .cra_exit = omap_aes_cra_exit,
796 .cra_u.ablkcipher = {
797 .min_keysize = AES_MIN_KEY_SIZE,
798 .max_keysize = AES_MAX_KEY_SIZE,
799 .setkey = omap_aes_setkey,
800 .encrypt = omap_aes_ecb_encrypt,
801 .decrypt = omap_aes_ecb_decrypt,
805 .cra_name = "cbc(aes)",
806 .cra_driver_name = "cbc-aes-omap",
808 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
809 CRYPTO_ALG_KERN_DRIVER_ONLY |
811 .cra_blocksize = AES_BLOCK_SIZE,
812 .cra_ctxsize = sizeof(struct omap_aes_ctx),
814 .cra_type = &crypto_ablkcipher_type,
815 .cra_module = THIS_MODULE,
816 .cra_init = omap_aes_cra_init,
817 .cra_exit = omap_aes_cra_exit,
818 .cra_u.ablkcipher = {
819 .min_keysize = AES_MIN_KEY_SIZE,
820 .max_keysize = AES_MAX_KEY_SIZE,
821 .ivsize = AES_BLOCK_SIZE,
822 .setkey = omap_aes_setkey,
823 .encrypt = omap_aes_cbc_encrypt,
824 .decrypt = omap_aes_cbc_decrypt,
829 static struct crypto_alg algs_ctr[] = {
831 .cra_name = "ctr(aes)",
832 .cra_driver_name = "ctr-aes-omap",
834 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
835 CRYPTO_ALG_KERN_DRIVER_ONLY |
837 .cra_blocksize = AES_BLOCK_SIZE,
838 .cra_ctxsize = sizeof(struct omap_aes_ctx),
840 .cra_type = &crypto_ablkcipher_type,
841 .cra_module = THIS_MODULE,
842 .cra_init = omap_aes_cra_init,
843 .cra_exit = omap_aes_cra_exit,
844 .cra_u.ablkcipher = {
845 .min_keysize = AES_MIN_KEY_SIZE,
846 .max_keysize = AES_MAX_KEY_SIZE,
848 .ivsize = AES_BLOCK_SIZE,
849 .setkey = omap_aes_setkey,
850 .encrypt = omap_aes_ctr_encrypt,
851 .decrypt = omap_aes_ctr_decrypt,
856 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
858 .algs_list = algs_ecb_cbc,
859 .size = ARRAY_SIZE(algs_ecb_cbc),
863 static const struct omap_aes_pdata omap_aes_pdata_omap2 = {
864 .algs_info = omap_aes_algs_info_ecb_cbc,
865 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc),
866 .trigger = omap_aes_dma_trigger_omap2,
873 .dma_enable_in = BIT(2),
874 .dma_enable_out = BIT(3),
883 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = {
885 .algs_list = algs_ecb_cbc,
886 .size = ARRAY_SIZE(algs_ecb_cbc),
889 .algs_list = algs_ctr,
890 .size = ARRAY_SIZE(algs_ctr),
894 static const struct omap_aes_pdata omap_aes_pdata_omap3 = {
895 .algs_info = omap_aes_algs_info_ecb_cbc_ctr,
896 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
897 .trigger = omap_aes_dma_trigger_omap2,
904 .dma_enable_in = BIT(2),
905 .dma_enable_out = BIT(3),
913 static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
914 .algs_info = omap_aes_algs_info_ecb_cbc_ctr,
915 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
916 .trigger = omap_aes_dma_trigger_omap4,
923 .irq_status_ofs = 0x8c,
924 .irq_enable_ofs = 0x90,
925 .dma_enable_in = BIT(5),
926 .dma_enable_out = BIT(6),
927 .major_mask = 0x0700,
929 .minor_mask = 0x003f,
933 static irqreturn_t omap_aes_irq(int irq, void *dev_id)
935 struct omap_aes_dev *dd = dev_id;
939 status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd));
940 if (status & AES_REG_IRQ_DATA_IN) {
941 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
945 BUG_ON(_calc_walked(in) > dd->in_sg->length);
947 src = sg_virt(dd->in_sg) + _calc_walked(in);
949 for (i = 0; i < AES_BLOCK_WORDS; i++) {
950 omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
952 scatterwalk_advance(&dd->in_walk, 4);
953 if (dd->in_sg->length == _calc_walked(in)) {
954 dd->in_sg = sg_next(dd->in_sg);
956 scatterwalk_start(&dd->in_walk,
958 src = sg_virt(dd->in_sg) +
966 /* Clear IRQ status */
967 status &= ~AES_REG_IRQ_DATA_IN;
968 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
970 /* Enable DATA_OUT interrupt */
971 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
973 } else if (status & AES_REG_IRQ_DATA_OUT) {
974 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
978 BUG_ON(_calc_walked(out) > dd->out_sg->length);
980 dst = sg_virt(dd->out_sg) + _calc_walked(out);
982 for (i = 0; i < AES_BLOCK_WORDS; i++) {
983 *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
984 scatterwalk_advance(&dd->out_walk, 4);
985 if (dd->out_sg->length == _calc_walked(out)) {
986 dd->out_sg = sg_next(dd->out_sg);
988 scatterwalk_start(&dd->out_walk,
990 dst = sg_virt(dd->out_sg) +
998 dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total);
1000 /* Clear IRQ status */
1001 status &= ~AES_REG_IRQ_DATA_OUT;
1002 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
1005 /* All bytes read! */
1006 tasklet_schedule(&dd->done_task);
1008 /* Enable DATA_IN interrupt for next block */
1009 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
1015 static const struct of_device_id omap_aes_of_match[] = {
1017 .compatible = "ti,omap2-aes",
1018 .data = &omap_aes_pdata_omap2,
1021 .compatible = "ti,omap3-aes",
1022 .data = &omap_aes_pdata_omap3,
1025 .compatible = "ti,omap4-aes",
1026 .data = &omap_aes_pdata_omap4,
1030 MODULE_DEVICE_TABLE(of, omap_aes_of_match);
1032 static int omap_aes_get_res_of(struct omap_aes_dev *dd,
1033 struct device *dev, struct resource *res)
1035 struct device_node *node = dev->of_node;
1036 const struct of_device_id *match;
1039 match = of_match_device(of_match_ptr(omap_aes_of_match), dev);
1041 dev_err(dev, "no compatible OF match\n");
1046 err = of_address_to_resource(node, 0, res);
1048 dev_err(dev, "can't translate OF node address\n");
1053 dd->pdata = match->data;
1059 static const struct of_device_id omap_aes_of_match[] = {
1063 static int omap_aes_get_res_of(struct omap_aes_dev *dd,
1064 struct device *dev, struct resource *res)
1070 static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
1071 struct platform_device *pdev, struct resource *res)
1073 struct device *dev = &pdev->dev;
1077 /* Get the base address */
1078 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1080 dev_err(dev, "no MEM resource info\n");
1084 memcpy(res, r, sizeof(*res));
1086 /* Only OMAP2/3 can be non-DT */
1087 dd->pdata = &omap_aes_pdata_omap2;
1093 static int omap_aes_probe(struct platform_device *pdev)
1095 struct device *dev = &pdev->dev;
1096 struct omap_aes_dev *dd;
1097 struct crypto_alg *algp;
1098 struct resource res;
1099 int err = -ENOMEM, i, j, irq = -1;
1102 dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL);
1104 dev_err(dev, "unable to alloc data struct.\n");
1108 platform_set_drvdata(pdev, dd);
1110 err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
1111 omap_aes_get_res_pdev(dd, pdev, &res);
1115 dd->io_base = devm_ioremap_resource(dev, &res);
1116 if (IS_ERR(dd->io_base)) {
1117 err = PTR_ERR(dd->io_base);
1120 dd->phys_base = res.start;
1122 pm_runtime_use_autosuspend(dev);
1123 pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
1125 pm_runtime_enable(dev);
1126 err = pm_runtime_get_sync(dev);
1128 dev_err(dev, "%s: failed to get_sync(%d)\n",
1133 omap_aes_dma_stop(dd);
1135 reg = omap_aes_read(dd, AES_REG_REV(dd));
1137 pm_runtime_put_sync(dev);
1139 dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
1140 (reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
1141 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
1143 tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
1145 err = omap_aes_dma_init(dd);
1146 if (err == -EPROBE_DEFER) {
1148 } else if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
1151 irq = platform_get_irq(pdev, 0);
1153 dev_err(dev, "can't get IRQ resource\n");
1157 err = devm_request_irq(dev, irq, omap_aes_irq, 0,
1160 dev_err(dev, "Unable to grab omap-aes IRQ\n");
1166 INIT_LIST_HEAD(&dd->list);
1167 spin_lock(&list_lock);
1168 list_add_tail(&dd->list, &dev_list);
1169 spin_unlock(&list_lock);
1171 for (i = 0; i < dd->pdata->algs_info_size; i++) {
1172 if (!dd->pdata->algs_info[i].registered) {
1173 for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
1174 algp = &dd->pdata->algs_info[i].algs_list[j];
1176 pr_debug("reg alg: %s\n", algp->cra_name);
1177 INIT_LIST_HEAD(&algp->cra_list);
1179 err = crypto_register_alg(algp);
1183 dd->pdata->algs_info[i].registered++;
1188 /* Initialize crypto engine */
1189 dd->engine = crypto_engine_alloc_init(dev, 1);
1193 dd->engine->prepare_cipher_request = omap_aes_prepare_req;
1194 dd->engine->cipher_one_request = omap_aes_crypt_req;
1195 err = crypto_engine_start(dd->engine);
1201 crypto_engine_exit(dd->engine);
1203 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1204 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1205 crypto_unregister_alg(
1206 &dd->pdata->algs_info[i].algs_list[j]);
1208 omap_aes_dma_cleanup(dd);
1210 tasklet_kill(&dd->done_task);
1211 pm_runtime_disable(dev);
1215 dev_err(dev, "initialization failed.\n");
1219 static int omap_aes_remove(struct platform_device *pdev)
1221 struct omap_aes_dev *dd = platform_get_drvdata(pdev);
1227 spin_lock(&list_lock);
1228 list_del(&dd->list);
1229 spin_unlock(&list_lock);
1231 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1232 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1233 crypto_unregister_alg(
1234 &dd->pdata->algs_info[i].algs_list[j]);
1236 crypto_engine_exit(dd->engine);
1237 tasklet_kill(&dd->done_task);
1238 omap_aes_dma_cleanup(dd);
1239 pm_runtime_disable(dd->dev);
1245 #ifdef CONFIG_PM_SLEEP
1246 static int omap_aes_suspend(struct device *dev)
1248 pm_runtime_put_sync(dev);
1252 static int omap_aes_resume(struct device *dev)
1254 pm_runtime_get_sync(dev);
1259 static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume);
1261 static struct platform_driver omap_aes_driver = {
1262 .probe = omap_aes_probe,
1263 .remove = omap_aes_remove,
1266 .pm = &omap_aes_pm_ops,
1267 .of_match_table = omap_aes_of_match,
1271 module_platform_driver(omap_aes_driver);
1273 MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
1274 MODULE_LICENSE("GPL v2");
1275 MODULE_AUTHOR("Dmitry Kasatkin");