4 * Support for OMAP SHA1/MD5 HW acceleration.
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 * Copyright (c) 2011 Texas Instruments Incorporated
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
14 * Some ideas are from old omap-sha1-md5.c driver.
17 #define pr_fmt(fmt) "%s: " fmt, __func__
19 #include <linux/err.h>
20 #include <linux/device.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel.h>
26 #include <linux/irq.h>
28 #include <linux/platform_device.h>
29 #include <linux/scatterlist.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/dmaengine.h>
32 #include <linux/pm_runtime.h>
34 #include <linux/of_device.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/delay.h>
38 #include <linux/crypto.h>
39 #include <linux/cryptohash.h>
40 #include <crypto/scatterwalk.h>
41 #include <crypto/algapi.h>
42 #include <crypto/sha.h>
43 #include <crypto/hash.h>
44 #include <crypto/internal/hash.h>
46 #define MD5_DIGEST_SIZE 16
48 #define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04))
49 #define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04))
50 #define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs)
52 #define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04))
54 #define SHA_REG_CTRL 0x18
55 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
56 #define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
57 #define SHA_REG_CTRL_ALGO_CONST (1 << 3)
58 #define SHA_REG_CTRL_ALGO (1 << 2)
59 #define SHA_REG_CTRL_INPUT_READY (1 << 1)
60 #define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
62 #define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs)
64 #define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs)
65 #define SHA_REG_MASK_DMA_EN (1 << 3)
66 #define SHA_REG_MASK_IT_EN (1 << 2)
67 #define SHA_REG_MASK_SOFTRESET (1 << 1)
68 #define SHA_REG_AUTOIDLE (1 << 0)
70 #define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs)
71 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
73 #define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs)
74 #define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7)
75 #define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5)
76 #define SHA_REG_MODE_CLOSE_HASH (1 << 4)
77 #define SHA_REG_MODE_ALGO_CONSTANT (1 << 3)
79 #define SHA_REG_MODE_ALGO_MASK (7 << 0)
80 #define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
81 #define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
82 #define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1)
83 #define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1)
84 #define SHA_REG_MODE_ALGO_SHA2_384 (1 << 0)
85 #define SHA_REG_MODE_ALGO_SHA2_512 (3 << 0)
87 #define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs)
89 #define SHA_REG_IRQSTATUS 0x118
90 #define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3)
91 #define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
92 #define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1)
93 #define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0)
95 #define SHA_REG_IRQENA 0x11C
96 #define SHA_REG_IRQENA_CTX_RDY (1 << 3)
97 #define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2)
98 #define SHA_REG_IRQENA_INPUT_RDY (1 << 1)
99 #define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0)
101 #define DEFAULT_TIMEOUT_INTERVAL HZ
103 #define DEFAULT_AUTOSUSPEND_DELAY 1000
105 /* mostly device flags */
107 #define FLAGS_FINAL 1
108 #define FLAGS_DMA_ACTIVE 2
109 #define FLAGS_OUTPUT_READY 3
112 #define FLAGS_DMA_READY 6
113 #define FLAGS_AUTO_XOR 7
114 #define FLAGS_BE32_SHA1 8
116 #define FLAGS_FINUP 16
119 #define FLAGS_MODE_SHIFT 18
120 #define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT)
121 #define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
122 #define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
123 #define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
124 #define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
125 #define FLAGS_MODE_SHA384 (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
126 #define FLAGS_MODE_SHA512 (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
128 #define FLAGS_HMAC 21
129 #define FLAGS_ERROR 22
134 #define OMAP_ALIGN_MASK (sizeof(u32)-1)
135 #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
137 #define BUFLEN PAGE_SIZE
139 struct omap_sham_dev;
141 struct omap_sham_reqctx {
142 struct omap_sham_dev *dd;
146 u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
153 struct scatterlist *sg;
154 struct scatterlist sgl;
155 unsigned int offset; /* offset in current sg */
156 unsigned int total; /* total request */
158 u8 buffer[0] OMAP_ALIGNED;
161 struct omap_sham_hmac_ctx {
162 struct crypto_shash *shash;
163 u8 ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
164 u8 opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
167 struct omap_sham_ctx {
168 struct omap_sham_dev *dd;
173 struct crypto_shash *fallback;
175 struct omap_sham_hmac_ctx base[0];
178 #define OMAP_SHAM_QUEUE_LENGTH 10
180 struct omap_sham_algs_info {
181 struct ahash_alg *algs_list;
183 unsigned int registered;
186 struct omap_sham_pdata {
187 struct omap_sham_algs_info *algs_info;
188 unsigned int algs_info_size;
192 void (*copy_hash)(struct ahash_request *req, int out);
193 void (*write_ctrl)(struct omap_sham_dev *dd, size_t length,
195 void (*trigger)(struct omap_sham_dev *dd, size_t length);
196 int (*poll_irq)(struct omap_sham_dev *dd);
197 irqreturn_t (*intr_hdlr)(int irq, void *dev_id);
215 struct omap_sham_dev {
216 struct list_head list;
217 unsigned long phys_base;
219 void __iomem *io_base;
223 struct dma_chan *dma_lch;
224 struct tasklet_struct done_task;
228 struct crypto_queue queue;
229 struct ahash_request *req;
231 const struct omap_sham_pdata *pdata;
234 struct omap_sham_drv {
235 struct list_head dev_list;
240 static struct omap_sham_drv sham = {
241 .dev_list = LIST_HEAD_INIT(sham.dev_list),
242 .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
245 static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
247 return __raw_readl(dd->io_base + offset);
250 static inline void omap_sham_write(struct omap_sham_dev *dd,
251 u32 offset, u32 value)
253 __raw_writel(value, dd->io_base + offset);
256 static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
261 val = omap_sham_read(dd, address);
264 omap_sham_write(dd, address, val);
267 static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
269 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
271 while (!(omap_sham_read(dd, offset) & bit)) {
272 if (time_is_before_jiffies(timeout))
279 static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
281 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
282 struct omap_sham_dev *dd = ctx->dd;
283 u32 *hash = (u32 *)ctx->digest;
286 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
288 hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
290 omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
294 static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
296 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
297 struct omap_sham_dev *dd = ctx->dd;
300 if (ctx->flags & BIT(FLAGS_HMAC)) {
301 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
302 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
303 struct omap_sham_hmac_ctx *bctx = tctx->base;
304 u32 *opad = (u32 *)bctx->opad;
306 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
308 opad[i] = omap_sham_read(dd,
309 SHA_REG_ODIGEST(dd, i));
311 omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
316 omap_sham_copy_hash_omap2(req, out);
319 static void omap_sham_copy_ready_hash(struct ahash_request *req)
321 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
322 u32 *in = (u32 *)ctx->digest;
323 u32 *hash = (u32 *)req->result;
324 int i, d, big_endian = 0;
329 switch (ctx->flags & FLAGS_MODE_MASK) {
331 d = MD5_DIGEST_SIZE / sizeof(u32);
333 case FLAGS_MODE_SHA1:
334 /* OMAP2 SHA1 is big endian */
335 if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
337 d = SHA1_DIGEST_SIZE / sizeof(u32);
339 case FLAGS_MODE_SHA224:
340 d = SHA224_DIGEST_SIZE / sizeof(u32);
342 case FLAGS_MODE_SHA256:
343 d = SHA256_DIGEST_SIZE / sizeof(u32);
345 case FLAGS_MODE_SHA384:
346 d = SHA384_DIGEST_SIZE / sizeof(u32);
348 case FLAGS_MODE_SHA512:
349 d = SHA512_DIGEST_SIZE / sizeof(u32);
356 for (i = 0; i < d; i++)
357 hash[i] = be32_to_cpu(in[i]);
359 for (i = 0; i < d; i++)
360 hash[i] = le32_to_cpu(in[i]);
363 static int omap_sham_hw_init(struct omap_sham_dev *dd)
367 err = pm_runtime_get_sync(dd->dev);
369 dev_err(dd->dev, "failed to get sync: %d\n", err);
373 if (!test_bit(FLAGS_INIT, &dd->flags)) {
374 set_bit(FLAGS_INIT, &dd->flags);
381 static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
384 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
385 u32 val = length << 5, mask;
387 if (likely(ctx->digcnt))
388 omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
390 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
391 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
392 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
394 * Setting ALGO_CONST only for the first iteration
395 * and CLOSE_HASH only for the last one.
397 if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
398 val |= SHA_REG_CTRL_ALGO;
400 val |= SHA_REG_CTRL_ALGO_CONST;
402 val |= SHA_REG_CTRL_CLOSE_HASH;
404 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
405 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
407 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
410 static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
414 static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
416 return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
419 static int get_block_size(struct omap_sham_reqctx *ctx)
423 switch (ctx->flags & FLAGS_MODE_MASK) {
425 case FLAGS_MODE_SHA1:
428 case FLAGS_MODE_SHA224:
429 case FLAGS_MODE_SHA256:
430 d = SHA256_BLOCK_SIZE;
432 case FLAGS_MODE_SHA384:
433 case FLAGS_MODE_SHA512:
434 d = SHA512_BLOCK_SIZE;
443 static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
444 u32 *value, int count)
446 for (; count--; value++, offset += 4)
447 omap_sham_write(dd, offset, *value);
450 static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
453 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
457 * Setting ALGO_CONST only for the first iteration and
458 * CLOSE_HASH only for the last one. Note that flags mode bits
459 * correspond to algorithm encoding in mode register.
461 val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
463 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
464 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
465 struct omap_sham_hmac_ctx *bctx = tctx->base;
468 val |= SHA_REG_MODE_ALGO_CONSTANT;
470 if (ctx->flags & BIT(FLAGS_HMAC)) {
471 bs = get_block_size(ctx);
472 nr_dr = bs / (2 * sizeof(u32));
473 val |= SHA_REG_MODE_HMAC_KEY_PROC;
474 omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
475 (u32 *)bctx->ipad, nr_dr);
476 omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
477 (u32 *)bctx->ipad + nr_dr, nr_dr);
483 val |= SHA_REG_MODE_CLOSE_HASH;
485 if (ctx->flags & BIT(FLAGS_HMAC))
486 val |= SHA_REG_MODE_HMAC_OUTER_HASH;
489 mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
490 SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
491 SHA_REG_MODE_HMAC_KEY_PROC;
493 dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
494 omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
495 omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
496 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
498 (dma ? SHA_REG_MASK_DMA_EN : 0),
499 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
502 static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
504 omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
507 static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
509 return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
510 SHA_REG_IRQSTATUS_INPUT_RDY);
513 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
514 size_t length, int final)
516 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
517 int count, len32, bs32, offset = 0;
518 const u32 *buffer = (const u32 *)buf;
520 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
521 ctx->digcnt, length, final);
523 dd->pdata->write_ctrl(dd, length, final, 0);
524 dd->pdata->trigger(dd, length);
526 /* should be non-zero before next lines to disable clocks later */
527 ctx->digcnt += length;
530 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
532 set_bit(FLAGS_CPU, &dd->flags);
534 len32 = DIV_ROUND_UP(length, sizeof(u32));
535 bs32 = get_block_size(ctx) / sizeof(u32);
538 if (dd->pdata->poll_irq(dd))
541 for (count = 0; count < min(len32, bs32); count++, offset++)
542 omap_sham_write(dd, SHA_REG_DIN(dd, count),
544 len32 -= min(len32, bs32);
550 static void omap_sham_dma_callback(void *param)
552 struct omap_sham_dev *dd = param;
554 set_bit(FLAGS_DMA_READY, &dd->flags);
555 tasklet_schedule(&dd->done_task);
558 static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
559 size_t length, int final, int is_sg)
561 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
562 struct dma_async_tx_descriptor *tx;
563 struct dma_slave_config cfg;
564 int len32, ret, dma_min = get_block_size(ctx);
566 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
567 ctx->digcnt, length, final);
569 memset(&cfg, 0, sizeof(cfg));
571 cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
572 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
573 cfg.dst_maxburst = dma_min / DMA_SLAVE_BUSWIDTH_4_BYTES;
575 ret = dmaengine_slave_config(dd->dma_lch, &cfg);
577 pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
581 len32 = DIV_ROUND_UP(length, dma_min) * dma_min;
585 * The SG entry passed in may not have the 'length' member
586 * set correctly so use a local SG entry (sgl) with the
587 * proper value for 'length' instead. If this is not done,
588 * the dmaengine may try to DMA the incorrect amount of data.
590 sg_init_table(&ctx->sgl, 1);
591 sg_assign_page(&ctx->sgl, sg_page(ctx->sg));
592 ctx->sgl.offset = ctx->sg->offset;
593 sg_dma_len(&ctx->sgl) = len32;
594 sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
596 tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1,
597 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
599 tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32,
600 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
604 dev_err(dd->dev, "prep_slave_sg/single() failed\n");
608 tx->callback = omap_sham_dma_callback;
609 tx->callback_param = dd;
611 dd->pdata->write_ctrl(dd, length, final, 1);
613 ctx->digcnt += length;
616 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
618 set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
620 dmaengine_submit(tx);
621 dma_async_issue_pending(dd->dma_lch);
623 dd->pdata->trigger(dd, length);
628 static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
629 const u8 *data, size_t length)
631 size_t count = min(length, ctx->buflen - ctx->bufcnt);
633 count = min(count, ctx->total);
636 memcpy(ctx->buffer + ctx->bufcnt, data, count);
637 ctx->bufcnt += count;
642 static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
648 vaddr = kmap_atomic(sg_page(ctx->sg));
649 vaddr += ctx->sg->offset;
651 count = omap_sham_append_buffer(ctx,
653 ctx->sg->length - ctx->offset);
655 kunmap_atomic((void *)vaddr);
659 ctx->offset += count;
661 if (ctx->offset == ctx->sg->length) {
662 ctx->sg = sg_next(ctx->sg);
673 static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
674 struct omap_sham_reqctx *ctx,
675 size_t length, int final)
679 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
681 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
682 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
686 ctx->flags &= ~BIT(FLAGS_SG);
688 ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0);
689 if (ret != -EINPROGRESS)
690 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
696 static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
698 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
702 omap_sham_append_sg(ctx);
704 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
706 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
707 ctx->bufcnt, ctx->digcnt, final);
709 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
712 return omap_sham_xmit_dma_map(dd, ctx, count, final);
718 /* Start address alignment */
719 #define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
720 /* SHA1 block size alignment */
721 #define SG_SA(sg, bs) (IS_ALIGNED(sg->length, bs))
723 static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
725 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
726 unsigned int length, final, tail;
727 struct scatterlist *sg;
733 if (ctx->bufcnt || ctx->offset)
734 return omap_sham_update_dma_slow(dd);
737 * Don't use the sg interface when the transfer size is less
738 * than the number of elements in a DMA frame. Otherwise,
739 * the dmaengine infrastructure will calculate that it needs
740 * to transfer 0 frames which ultimately fails.
742 if (ctx->total < get_block_size(ctx))
743 return omap_sham_update_dma_slow(dd);
745 dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
746 ctx->digcnt, ctx->bufcnt, ctx->total);
749 bs = get_block_size(ctx);
752 return omap_sham_update_dma_slow(dd);
754 if (!sg_is_last(sg) && !SG_SA(sg, bs))
755 /* size is not BLOCK_SIZE aligned */
756 return omap_sham_update_dma_slow(dd);
758 length = min(ctx->total, sg->length);
760 if (sg_is_last(sg)) {
761 if (!(ctx->flags & BIT(FLAGS_FINUP))) {
762 /* not last sg must be BLOCK_SIZE aligned */
763 tail = length & (bs - 1);
764 /* without finup() we need one block to close hash */
771 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
772 dev_err(dd->dev, "dma_map_sg error\n");
776 ctx->flags |= BIT(FLAGS_SG);
778 ctx->total -= length;
779 ctx->offset = length; /* offset where to start slow */
781 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
783 ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1);
784 if (ret != -EINPROGRESS)
785 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
790 static int omap_sham_update_cpu(struct omap_sham_dev *dd)
792 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
798 omap_sham_append_sg(ctx);
800 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
802 dev_dbg(dd->dev, "cpu: bufcnt: %u, digcnt: %d, final: %d\n",
803 ctx->bufcnt, ctx->digcnt, final);
805 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
806 bufcnt = ctx->bufcnt;
808 return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final);
814 static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
816 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
819 if (ctx->flags & BIT(FLAGS_SG)) {
820 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
821 if (ctx->sg->length == ctx->offset) {
822 ctx->sg = sg_next(ctx->sg);
827 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
834 static int omap_sham_init(struct ahash_request *req)
836 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
837 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
838 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
839 struct omap_sham_dev *dd = NULL, *tmp;
842 spin_lock_bh(&sham.lock);
844 list_for_each_entry(tmp, &sham.dev_list, list) {
852 spin_unlock_bh(&sham.lock);
858 dev_dbg(dd->dev, "init: digest size: %d\n",
859 crypto_ahash_digestsize(tfm));
861 switch (crypto_ahash_digestsize(tfm)) {
862 case MD5_DIGEST_SIZE:
863 ctx->flags |= FLAGS_MODE_MD5;
864 bs = SHA1_BLOCK_SIZE;
866 case SHA1_DIGEST_SIZE:
867 ctx->flags |= FLAGS_MODE_SHA1;
868 bs = SHA1_BLOCK_SIZE;
870 case SHA224_DIGEST_SIZE:
871 ctx->flags |= FLAGS_MODE_SHA224;
872 bs = SHA224_BLOCK_SIZE;
874 case SHA256_DIGEST_SIZE:
875 ctx->flags |= FLAGS_MODE_SHA256;
876 bs = SHA256_BLOCK_SIZE;
878 case SHA384_DIGEST_SIZE:
879 ctx->flags |= FLAGS_MODE_SHA384;
880 bs = SHA384_BLOCK_SIZE;
882 case SHA512_DIGEST_SIZE:
883 ctx->flags |= FLAGS_MODE_SHA512;
884 bs = SHA512_BLOCK_SIZE;
890 ctx->buflen = BUFLEN;
892 if (tctx->flags & BIT(FLAGS_HMAC)) {
893 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
894 struct omap_sham_hmac_ctx *bctx = tctx->base;
896 memcpy(ctx->buffer, bctx->ipad, bs);
900 ctx->flags |= BIT(FLAGS_HMAC);
907 static int omap_sham_update_req(struct omap_sham_dev *dd)
909 struct ahash_request *req = dd->req;
910 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
913 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
914 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
916 if (ctx->flags & BIT(FLAGS_CPU))
917 err = omap_sham_update_cpu(dd);
919 err = omap_sham_update_dma_start(dd);
921 /* wait for dma completion before can take more data */
922 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
927 static int omap_sham_final_req(struct omap_sham_dev *dd)
929 struct ahash_request *req = dd->req;
930 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
931 int err = 0, use_dma = 1;
933 if ((ctx->bufcnt <= get_block_size(ctx)) || dd->polling_mode)
935 * faster to handle last block with cpu or
936 * use cpu when dma is not present.
941 err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
943 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
947 dev_dbg(dd->dev, "final_req: err: %d\n", err);
952 static int omap_sham_finish_hmac(struct ahash_request *req)
954 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
955 struct omap_sham_hmac_ctx *bctx = tctx->base;
956 int bs = crypto_shash_blocksize(bctx->shash);
957 int ds = crypto_shash_digestsize(bctx->shash);
958 SHASH_DESC_ON_STACK(shash, bctx->shash);
960 shash->tfm = bctx->shash;
961 shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
963 return crypto_shash_init(shash) ?:
964 crypto_shash_update(shash, bctx->opad, bs) ?:
965 crypto_shash_finup(shash, req->result, ds, req->result);
968 static int omap_sham_finish(struct ahash_request *req)
970 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
971 struct omap_sham_dev *dd = ctx->dd;
975 omap_sham_copy_ready_hash(req);
976 if ((ctx->flags & BIT(FLAGS_HMAC)) &&
977 !test_bit(FLAGS_AUTO_XOR, &dd->flags))
978 err = omap_sham_finish_hmac(req);
981 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
986 static void omap_sham_finish_req(struct ahash_request *req, int err)
988 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
989 struct omap_sham_dev *dd = ctx->dd;
992 dd->pdata->copy_hash(req, 1);
993 if (test_bit(FLAGS_FINAL, &dd->flags))
994 err = omap_sham_finish(req);
996 ctx->flags |= BIT(FLAGS_ERROR);
999 /* atomic operation is not needed here */
1000 dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
1001 BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
1003 pm_runtime_mark_last_busy(dd->dev);
1004 pm_runtime_put_autosuspend(dd->dev);
1006 if (req->base.complete)
1007 req->base.complete(&req->base, err);
1010 static int omap_sham_handle_queue(struct omap_sham_dev *dd,
1011 struct ahash_request *req)
1013 struct crypto_async_request *async_req, *backlog;
1014 struct omap_sham_reqctx *ctx;
1015 unsigned long flags;
1016 int err = 0, ret = 0;
1019 spin_lock_irqsave(&dd->lock, flags);
1021 ret = ahash_enqueue_request(&dd->queue, req);
1022 if (test_bit(FLAGS_BUSY, &dd->flags)) {
1023 spin_unlock_irqrestore(&dd->lock, flags);
1026 backlog = crypto_get_backlog(&dd->queue);
1027 async_req = crypto_dequeue_request(&dd->queue);
1029 set_bit(FLAGS_BUSY, &dd->flags);
1030 spin_unlock_irqrestore(&dd->lock, flags);
1036 backlog->complete(backlog, -EINPROGRESS);
1038 req = ahash_request_cast(async_req);
1040 ctx = ahash_request_ctx(req);
1042 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
1043 ctx->op, req->nbytes);
1045 err = omap_sham_hw_init(dd);
1050 /* request has changed - restore hash */
1051 dd->pdata->copy_hash(req, 0);
1053 if (ctx->op == OP_UPDATE) {
1054 err = omap_sham_update_req(dd);
1055 if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
1056 /* no final() after finup() */
1057 err = omap_sham_final_req(dd);
1058 } else if (ctx->op == OP_FINAL) {
1059 err = omap_sham_final_req(dd);
1062 dev_dbg(dd->dev, "exit, err: %d\n", err);
1064 if (err != -EINPROGRESS) {
1065 /* done_task will not finish it, so do it here */
1066 omap_sham_finish_req(req, err);
1070 * Execute next request immediately if there is anything
1079 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
1081 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1082 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1083 struct omap_sham_dev *dd = tctx->dd;
1087 return omap_sham_handle_queue(dd, req);
1090 static int omap_sham_update(struct ahash_request *req)
1092 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1093 struct omap_sham_dev *dd = ctx->dd;
1094 int bs = get_block_size(ctx);
1099 ctx->total = req->nbytes;
1103 if (ctx->flags & BIT(FLAGS_FINUP)) {
1104 if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 240) {
1106 * OMAP HW accel works only with buffers >= 9
1107 * will switch to bypass in final()
1108 * final has the same request and data
1110 omap_sham_append_sg(ctx);
1112 } else if ((ctx->bufcnt + ctx->total <= bs) ||
1115 * faster to use CPU for short transfers or
1116 * use cpu when dma is not present.
1118 ctx->flags |= BIT(FLAGS_CPU);
1120 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
1121 omap_sham_append_sg(ctx);
1125 if (dd->polling_mode)
1126 ctx->flags |= BIT(FLAGS_CPU);
1128 return omap_sham_enqueue(req, OP_UPDATE);
1131 static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
1132 const u8 *data, unsigned int len, u8 *out)
1134 SHASH_DESC_ON_STACK(shash, tfm);
1137 shash->flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1139 return crypto_shash_digest(shash, data, len, out);
1142 static int omap_sham_final_shash(struct ahash_request *req)
1144 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1145 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1149 * If we are running HMAC on limited hardware support, skip
1150 * the ipad in the beginning of the buffer if we are going for
1151 * software fallback algorithm.
1153 if (test_bit(FLAGS_HMAC, &ctx->flags) &&
1154 !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
1155 offset = get_block_size(ctx);
1157 return omap_sham_shash_digest(tctx->fallback, req->base.flags,
1158 ctx->buffer + offset,
1159 ctx->bufcnt - offset, req->result);
1162 static int omap_sham_final(struct ahash_request *req)
1164 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1166 ctx->flags |= BIT(FLAGS_FINUP);
1168 if (ctx->flags & BIT(FLAGS_ERROR))
1169 return 0; /* uncompleted hash is not needed */
1172 * OMAP HW accel works only with buffers >= 9.
1173 * HMAC is always >= 9 because ipad == block size.
1174 * If buffersize is less than 240, we use fallback SW encoding,
1175 * as using DMA + HW in this case doesn't provide any benefit.
1177 if (!ctx->digcnt && ctx->bufcnt < 240)
1178 return omap_sham_final_shash(req);
1179 else if (ctx->bufcnt)
1180 return omap_sham_enqueue(req, OP_FINAL);
1182 /* copy ready hash (+ finalize hmac) */
1183 return omap_sham_finish(req);
1186 static int omap_sham_finup(struct ahash_request *req)
1188 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1191 ctx->flags |= BIT(FLAGS_FINUP);
1193 err1 = omap_sham_update(req);
1194 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1197 * final() has to be always called to cleanup resources
1198 * even if udpate() failed, except EINPROGRESS
1200 err2 = omap_sham_final(req);
1202 return err1 ?: err2;
1205 static int omap_sham_digest(struct ahash_request *req)
1207 return omap_sham_init(req) ?: omap_sham_finup(req);
1210 static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
1211 unsigned int keylen)
1213 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1214 struct omap_sham_hmac_ctx *bctx = tctx->base;
1215 int bs = crypto_shash_blocksize(bctx->shash);
1216 int ds = crypto_shash_digestsize(bctx->shash);
1217 struct omap_sham_dev *dd = NULL, *tmp;
1220 spin_lock_bh(&sham.lock);
1222 list_for_each_entry(tmp, &sham.dev_list, list) {
1230 spin_unlock_bh(&sham.lock);
1232 err = crypto_shash_setkey(tctx->fallback, key, keylen);
1237 err = omap_sham_shash_digest(bctx->shash,
1238 crypto_shash_get_flags(bctx->shash),
1239 key, keylen, bctx->ipad);
1244 memcpy(bctx->ipad, key, keylen);
1247 memset(bctx->ipad + keylen, 0, bs - keylen);
1249 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
1250 memcpy(bctx->opad, bctx->ipad, bs);
1252 for (i = 0; i < bs; i++) {
1253 bctx->ipad[i] ^= 0x36;
1254 bctx->opad[i] ^= 0x5c;
1261 static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1263 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1264 const char *alg_name = crypto_tfm_alg_name(tfm);
1266 /* Allocate a fallback and abort if it failed. */
1267 tctx->fallback = crypto_alloc_shash(alg_name, 0,
1268 CRYPTO_ALG_NEED_FALLBACK);
1269 if (IS_ERR(tctx->fallback)) {
1270 pr_err("omap-sham: fallback driver '%s' "
1271 "could not be loaded.\n", alg_name);
1272 return PTR_ERR(tctx->fallback);
1275 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1276 sizeof(struct omap_sham_reqctx) + BUFLEN);
1279 struct omap_sham_hmac_ctx *bctx = tctx->base;
1280 tctx->flags |= BIT(FLAGS_HMAC);
1281 bctx->shash = crypto_alloc_shash(alg_base, 0,
1282 CRYPTO_ALG_NEED_FALLBACK);
1283 if (IS_ERR(bctx->shash)) {
1284 pr_err("omap-sham: base driver '%s' "
1285 "could not be loaded.\n", alg_base);
1286 crypto_free_shash(tctx->fallback);
1287 return PTR_ERR(bctx->shash);
1295 static int omap_sham_cra_init(struct crypto_tfm *tfm)
1297 return omap_sham_cra_init_alg(tfm, NULL);
1300 static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1302 return omap_sham_cra_init_alg(tfm, "sha1");
1305 static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
1307 return omap_sham_cra_init_alg(tfm, "sha224");
1310 static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
1312 return omap_sham_cra_init_alg(tfm, "sha256");
1315 static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1317 return omap_sham_cra_init_alg(tfm, "md5");
1320 static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
1322 return omap_sham_cra_init_alg(tfm, "sha384");
1325 static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
1327 return omap_sham_cra_init_alg(tfm, "sha512");
1330 static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1332 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1334 crypto_free_shash(tctx->fallback);
1335 tctx->fallback = NULL;
1337 if (tctx->flags & BIT(FLAGS_HMAC)) {
1338 struct omap_sham_hmac_ctx *bctx = tctx->base;
1339 crypto_free_shash(bctx->shash);
1343 static int omap_sham_export(struct ahash_request *req, void *out)
1348 static int omap_sham_import(struct ahash_request *req, const void *in)
1353 static struct ahash_alg algs_sha1_md5[] = {
1355 .init = omap_sham_init,
1356 .update = omap_sham_update,
1357 .final = omap_sham_final,
1358 .finup = omap_sham_finup,
1359 .digest = omap_sham_digest,
1360 .halg.digestsize = SHA1_DIGEST_SIZE,
1363 .cra_driver_name = "omap-sha1",
1364 .cra_priority = 400,
1365 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1366 CRYPTO_ALG_KERN_DRIVER_ONLY |
1368 CRYPTO_ALG_NEED_FALLBACK,
1369 .cra_blocksize = SHA1_BLOCK_SIZE,
1370 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1372 .cra_module = THIS_MODULE,
1373 .cra_init = omap_sham_cra_init,
1374 .cra_exit = omap_sham_cra_exit,
1378 .init = omap_sham_init,
1379 .update = omap_sham_update,
1380 .final = omap_sham_final,
1381 .finup = omap_sham_finup,
1382 .digest = omap_sham_digest,
1383 .halg.digestsize = MD5_DIGEST_SIZE,
1386 .cra_driver_name = "omap-md5",
1387 .cra_priority = 400,
1388 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1389 CRYPTO_ALG_KERN_DRIVER_ONLY |
1391 CRYPTO_ALG_NEED_FALLBACK,
1392 .cra_blocksize = SHA1_BLOCK_SIZE,
1393 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1394 .cra_alignmask = OMAP_ALIGN_MASK,
1395 .cra_module = THIS_MODULE,
1396 .cra_init = omap_sham_cra_init,
1397 .cra_exit = omap_sham_cra_exit,
1401 .init = omap_sham_init,
1402 .update = omap_sham_update,
1403 .final = omap_sham_final,
1404 .finup = omap_sham_finup,
1405 .digest = omap_sham_digest,
1406 .setkey = omap_sham_setkey,
1407 .halg.digestsize = SHA1_DIGEST_SIZE,
1409 .cra_name = "hmac(sha1)",
1410 .cra_driver_name = "omap-hmac-sha1",
1411 .cra_priority = 400,
1412 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1413 CRYPTO_ALG_KERN_DRIVER_ONLY |
1415 CRYPTO_ALG_NEED_FALLBACK,
1416 .cra_blocksize = SHA1_BLOCK_SIZE,
1417 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1418 sizeof(struct omap_sham_hmac_ctx),
1419 .cra_alignmask = OMAP_ALIGN_MASK,
1420 .cra_module = THIS_MODULE,
1421 .cra_init = omap_sham_cra_sha1_init,
1422 .cra_exit = omap_sham_cra_exit,
1426 .init = omap_sham_init,
1427 .update = omap_sham_update,
1428 .final = omap_sham_final,
1429 .finup = omap_sham_finup,
1430 .digest = omap_sham_digest,
1431 .setkey = omap_sham_setkey,
1432 .halg.digestsize = MD5_DIGEST_SIZE,
1434 .cra_name = "hmac(md5)",
1435 .cra_driver_name = "omap-hmac-md5",
1436 .cra_priority = 400,
1437 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1438 CRYPTO_ALG_KERN_DRIVER_ONLY |
1440 CRYPTO_ALG_NEED_FALLBACK,
1441 .cra_blocksize = SHA1_BLOCK_SIZE,
1442 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1443 sizeof(struct omap_sham_hmac_ctx),
1444 .cra_alignmask = OMAP_ALIGN_MASK,
1445 .cra_module = THIS_MODULE,
1446 .cra_init = omap_sham_cra_md5_init,
1447 .cra_exit = omap_sham_cra_exit,
1452 /* OMAP4 has some algs in addition to what OMAP2 has */
1453 static struct ahash_alg algs_sha224_sha256[] = {
1455 .init = omap_sham_init,
1456 .update = omap_sham_update,
1457 .final = omap_sham_final,
1458 .finup = omap_sham_finup,
1459 .digest = omap_sham_digest,
1460 .halg.digestsize = SHA224_DIGEST_SIZE,
1462 .cra_name = "sha224",
1463 .cra_driver_name = "omap-sha224",
1464 .cra_priority = 400,
1465 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1467 CRYPTO_ALG_NEED_FALLBACK,
1468 .cra_blocksize = SHA224_BLOCK_SIZE,
1469 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1471 .cra_module = THIS_MODULE,
1472 .cra_init = omap_sham_cra_init,
1473 .cra_exit = omap_sham_cra_exit,
1477 .init = omap_sham_init,
1478 .update = omap_sham_update,
1479 .final = omap_sham_final,
1480 .finup = omap_sham_finup,
1481 .digest = omap_sham_digest,
1482 .halg.digestsize = SHA256_DIGEST_SIZE,
1484 .cra_name = "sha256",
1485 .cra_driver_name = "omap-sha256",
1486 .cra_priority = 400,
1487 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1489 CRYPTO_ALG_NEED_FALLBACK,
1490 .cra_blocksize = SHA256_BLOCK_SIZE,
1491 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1493 .cra_module = THIS_MODULE,
1494 .cra_init = omap_sham_cra_init,
1495 .cra_exit = omap_sham_cra_exit,
1499 .init = omap_sham_init,
1500 .update = omap_sham_update,
1501 .final = omap_sham_final,
1502 .finup = omap_sham_finup,
1503 .digest = omap_sham_digest,
1504 .setkey = omap_sham_setkey,
1505 .halg.digestsize = SHA224_DIGEST_SIZE,
1507 .cra_name = "hmac(sha224)",
1508 .cra_driver_name = "omap-hmac-sha224",
1509 .cra_priority = 400,
1510 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1512 CRYPTO_ALG_NEED_FALLBACK,
1513 .cra_blocksize = SHA224_BLOCK_SIZE,
1514 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1515 sizeof(struct omap_sham_hmac_ctx),
1516 .cra_alignmask = OMAP_ALIGN_MASK,
1517 .cra_module = THIS_MODULE,
1518 .cra_init = omap_sham_cra_sha224_init,
1519 .cra_exit = omap_sham_cra_exit,
1523 .init = omap_sham_init,
1524 .update = omap_sham_update,
1525 .final = omap_sham_final,
1526 .finup = omap_sham_finup,
1527 .digest = omap_sham_digest,
1528 .setkey = omap_sham_setkey,
1529 .halg.digestsize = SHA256_DIGEST_SIZE,
1531 .cra_name = "hmac(sha256)",
1532 .cra_driver_name = "omap-hmac-sha256",
1533 .cra_priority = 400,
1534 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1536 CRYPTO_ALG_NEED_FALLBACK,
1537 .cra_blocksize = SHA256_BLOCK_SIZE,
1538 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1539 sizeof(struct omap_sham_hmac_ctx),
1540 .cra_alignmask = OMAP_ALIGN_MASK,
1541 .cra_module = THIS_MODULE,
1542 .cra_init = omap_sham_cra_sha256_init,
1543 .cra_exit = omap_sham_cra_exit,
1548 static struct ahash_alg algs_sha384_sha512[] = {
1550 .init = omap_sham_init,
1551 .update = omap_sham_update,
1552 .final = omap_sham_final,
1553 .finup = omap_sham_finup,
1554 .digest = omap_sham_digest,
1555 .halg.digestsize = SHA384_DIGEST_SIZE,
1557 .cra_name = "sha384",
1558 .cra_driver_name = "omap-sha384",
1559 .cra_priority = 400,
1560 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1562 CRYPTO_ALG_NEED_FALLBACK,
1563 .cra_blocksize = SHA384_BLOCK_SIZE,
1564 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1566 .cra_module = THIS_MODULE,
1567 .cra_init = omap_sham_cra_init,
1568 .cra_exit = omap_sham_cra_exit,
1572 .init = omap_sham_init,
1573 .update = omap_sham_update,
1574 .final = omap_sham_final,
1575 .finup = omap_sham_finup,
1576 .digest = omap_sham_digest,
1577 .halg.digestsize = SHA512_DIGEST_SIZE,
1579 .cra_name = "sha512",
1580 .cra_driver_name = "omap-sha512",
1581 .cra_priority = 400,
1582 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1584 CRYPTO_ALG_NEED_FALLBACK,
1585 .cra_blocksize = SHA512_BLOCK_SIZE,
1586 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1588 .cra_module = THIS_MODULE,
1589 .cra_init = omap_sham_cra_init,
1590 .cra_exit = omap_sham_cra_exit,
1594 .init = omap_sham_init,
1595 .update = omap_sham_update,
1596 .final = omap_sham_final,
1597 .finup = omap_sham_finup,
1598 .digest = omap_sham_digest,
1599 .setkey = omap_sham_setkey,
1600 .halg.digestsize = SHA384_DIGEST_SIZE,
1602 .cra_name = "hmac(sha384)",
1603 .cra_driver_name = "omap-hmac-sha384",
1604 .cra_priority = 400,
1605 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1607 CRYPTO_ALG_NEED_FALLBACK,
1608 .cra_blocksize = SHA384_BLOCK_SIZE,
1609 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1610 sizeof(struct omap_sham_hmac_ctx),
1611 .cra_alignmask = OMAP_ALIGN_MASK,
1612 .cra_module = THIS_MODULE,
1613 .cra_init = omap_sham_cra_sha384_init,
1614 .cra_exit = omap_sham_cra_exit,
1618 .init = omap_sham_init,
1619 .update = omap_sham_update,
1620 .final = omap_sham_final,
1621 .finup = omap_sham_finup,
1622 .digest = omap_sham_digest,
1623 .setkey = omap_sham_setkey,
1624 .halg.digestsize = SHA512_DIGEST_SIZE,
1626 .cra_name = "hmac(sha512)",
1627 .cra_driver_name = "omap-hmac-sha512",
1628 .cra_priority = 400,
1629 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1631 CRYPTO_ALG_NEED_FALLBACK,
1632 .cra_blocksize = SHA512_BLOCK_SIZE,
1633 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1634 sizeof(struct omap_sham_hmac_ctx),
1635 .cra_alignmask = OMAP_ALIGN_MASK,
1636 .cra_module = THIS_MODULE,
1637 .cra_init = omap_sham_cra_sha512_init,
1638 .cra_exit = omap_sham_cra_exit,
1643 static void omap_sham_done_task(unsigned long data)
1645 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1648 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1649 omap_sham_handle_queue(dd, NULL);
1653 if (test_bit(FLAGS_CPU, &dd->flags)) {
1654 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1655 /* hash or semi-hash ready */
1656 err = omap_sham_update_cpu(dd);
1657 if (err != -EINPROGRESS)
1660 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1661 if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1662 omap_sham_update_dma_stop(dd);
1668 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1669 /* hash or semi-hash ready */
1670 clear_bit(FLAGS_DMA_READY, &dd->flags);
1671 err = omap_sham_update_dma_start(dd);
1672 if (err != -EINPROGRESS)
1680 dev_dbg(dd->dev, "update done: err: %d\n", err);
1681 /* finish curent request */
1682 omap_sham_finish_req(dd->req, err);
1684 /* If we are not busy, process next req */
1685 if (!test_bit(FLAGS_BUSY, &dd->flags))
1686 omap_sham_handle_queue(dd, NULL);
1689 static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
1691 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1692 dev_warn(dd->dev, "Interrupt when no active requests.\n");
1694 set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1695 tasklet_schedule(&dd->done_task);
1701 static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
1703 struct omap_sham_dev *dd = dev_id;
1705 if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1706 /* final -> allow device to go to power-saving mode */
1707 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1709 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1710 SHA_REG_CTRL_OUTPUT_READY);
1711 omap_sham_read(dd, SHA_REG_CTRL);
1713 return omap_sham_irq_common(dd);
1716 static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
1718 struct omap_sham_dev *dd = dev_id;
1720 omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
1722 return omap_sham_irq_common(dd);
1725 static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
1727 .algs_list = algs_sha1_md5,
1728 .size = ARRAY_SIZE(algs_sha1_md5),
1732 static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
1733 .algs_info = omap_sham_algs_info_omap2,
1734 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap2),
1735 .flags = BIT(FLAGS_BE32_SHA1),
1736 .digest_size = SHA1_DIGEST_SIZE,
1737 .copy_hash = omap_sham_copy_hash_omap2,
1738 .write_ctrl = omap_sham_write_ctrl_omap2,
1739 .trigger = omap_sham_trigger_omap2,
1740 .poll_irq = omap_sham_poll_irq_omap2,
1741 .intr_hdlr = omap_sham_irq_omap2,
1742 .idigest_ofs = 0x00,
1747 .sysstatus_ofs = 0x64,
1755 static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
1757 .algs_list = algs_sha1_md5,
1758 .size = ARRAY_SIZE(algs_sha1_md5),
1761 .algs_list = algs_sha224_sha256,
1762 .size = ARRAY_SIZE(algs_sha224_sha256),
1766 static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
1767 .algs_info = omap_sham_algs_info_omap4,
1768 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap4),
1769 .flags = BIT(FLAGS_AUTO_XOR),
1770 .digest_size = SHA256_DIGEST_SIZE,
1771 .copy_hash = omap_sham_copy_hash_omap4,
1772 .write_ctrl = omap_sham_write_ctrl_omap4,
1773 .trigger = omap_sham_trigger_omap4,
1774 .poll_irq = omap_sham_poll_irq_omap4,
1775 .intr_hdlr = omap_sham_irq_omap4,
1776 .idigest_ofs = 0x020,
1779 .digcnt_ofs = 0x040,
1782 .sysstatus_ofs = 0x114,
1785 .major_mask = 0x0700,
1787 .minor_mask = 0x003f,
1791 static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
1793 .algs_list = algs_sha1_md5,
1794 .size = ARRAY_SIZE(algs_sha1_md5),
1797 .algs_list = algs_sha224_sha256,
1798 .size = ARRAY_SIZE(algs_sha224_sha256),
1801 .algs_list = algs_sha384_sha512,
1802 .size = ARRAY_SIZE(algs_sha384_sha512),
1806 static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
1807 .algs_info = omap_sham_algs_info_omap5,
1808 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap5),
1809 .flags = BIT(FLAGS_AUTO_XOR),
1810 .digest_size = SHA512_DIGEST_SIZE,
1811 .copy_hash = omap_sham_copy_hash_omap4,
1812 .write_ctrl = omap_sham_write_ctrl_omap4,
1813 .trigger = omap_sham_trigger_omap4,
1814 .poll_irq = omap_sham_poll_irq_omap4,
1815 .intr_hdlr = omap_sham_irq_omap4,
1816 .idigest_ofs = 0x240,
1817 .odigest_ofs = 0x200,
1819 .digcnt_ofs = 0x280,
1822 .sysstatus_ofs = 0x114,
1824 .length_ofs = 0x288,
1825 .major_mask = 0x0700,
1827 .minor_mask = 0x003f,
1831 static const struct of_device_id omap_sham_of_match[] = {
1833 .compatible = "ti,omap2-sham",
1834 .data = &omap_sham_pdata_omap2,
1837 .compatible = "ti,omap3-sham",
1838 .data = &omap_sham_pdata_omap2,
1841 .compatible = "ti,omap4-sham",
1842 .data = &omap_sham_pdata_omap4,
1845 .compatible = "ti,omap5-sham",
1846 .data = &omap_sham_pdata_omap5,
1850 MODULE_DEVICE_TABLE(of, omap_sham_of_match);
1852 static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1853 struct device *dev, struct resource *res)
1855 struct device_node *node = dev->of_node;
1856 const struct of_device_id *match;
1859 match = of_match_device(of_match_ptr(omap_sham_of_match), dev);
1861 dev_err(dev, "no compatible OF match\n");
1866 err = of_address_to_resource(node, 0, res);
1868 dev_err(dev, "can't translate OF node address\n");
1873 dd->irq = irq_of_parse_and_map(node, 0);
1875 dev_err(dev, "can't translate OF irq value\n");
1880 dd->pdata = match->data;
1886 static const struct of_device_id omap_sham_of_match[] = {
1890 static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1891 struct device *dev, struct resource *res)
1897 static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
1898 struct platform_device *pdev, struct resource *res)
1900 struct device *dev = &pdev->dev;
1904 /* Get the base address */
1905 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1907 dev_err(dev, "no MEM resource info\n");
1911 memcpy(res, r, sizeof(*res));
1914 dd->irq = platform_get_irq(pdev, 0);
1916 dev_err(dev, "no IRQ resource info\n");
1921 /* Only OMAP2/3 can be non-DT */
1922 dd->pdata = &omap_sham_pdata_omap2;
1928 static int omap_sham_probe(struct platform_device *pdev)
1930 struct omap_sham_dev *dd;
1931 struct device *dev = &pdev->dev;
1932 struct resource res;
1933 dma_cap_mask_t mask;
1937 dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
1939 dev_err(dev, "unable to alloc data struct.\n");
1944 platform_set_drvdata(pdev, dd);
1946 INIT_LIST_HEAD(&dd->list);
1947 spin_lock_init(&dd->lock);
1948 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
1949 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
1951 err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
1952 omap_sham_get_res_pdev(dd, pdev, &res);
1956 dd->io_base = devm_ioremap_resource(dev, &res);
1957 if (IS_ERR(dd->io_base)) {
1958 err = PTR_ERR(dd->io_base);
1961 dd->phys_base = res.start;
1963 err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
1964 IRQF_TRIGGER_NONE, dev_name(dev), dd);
1966 dev_err(dev, "unable to request irq %d, err = %d\n",
1972 dma_cap_set(DMA_SLAVE, mask);
1974 dd->dma_lch = dma_request_chan(dev, "rx");
1975 if (IS_ERR(dd->dma_lch)) {
1976 err = PTR_ERR(dd->dma_lch);
1977 if (err == -EPROBE_DEFER)
1980 dd->polling_mode = 1;
1981 dev_dbg(dev, "using polling mode instead of dma\n");
1984 dd->flags |= dd->pdata->flags;
1986 pm_runtime_use_autosuspend(dev);
1987 pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
1989 pm_runtime_enable(dev);
1990 pm_runtime_irq_safe(dev);
1992 err = pm_runtime_get_sync(dev);
1994 dev_err(dev, "failed to get sync: %d\n", err);
1998 rev = omap_sham_read(dd, SHA_REG_REV(dd));
1999 pm_runtime_put_sync(&pdev->dev);
2001 dev_info(dev, "hw accel on OMAP rev %u.%u\n",
2002 (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
2003 (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
2005 spin_lock(&sham.lock);
2006 list_add_tail(&dd->list, &sham.dev_list);
2007 spin_unlock(&sham.lock);
2009 for (i = 0; i < dd->pdata->algs_info_size; i++) {
2010 for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
2011 struct ahash_alg *alg;
2013 alg = &dd->pdata->algs_info[i].algs_list[j];
2014 alg->export = omap_sham_export;
2015 alg->import = omap_sham_import;
2016 alg->halg.statesize = sizeof(struct omap_sham_reqctx);
2017 err = crypto_register_ahash(alg);
2021 dd->pdata->algs_info[i].registered++;
2028 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2029 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2030 crypto_unregister_ahash(
2031 &dd->pdata->algs_info[i].algs_list[j]);
2033 pm_runtime_disable(dev);
2034 if (!dd->polling_mode)
2035 dma_release_channel(dd->dma_lch);
2037 dev_err(dev, "initialization failed.\n");
2042 static int omap_sham_remove(struct platform_device *pdev)
2044 static struct omap_sham_dev *dd;
2047 dd = platform_get_drvdata(pdev);
2050 spin_lock(&sham.lock);
2051 list_del(&dd->list);
2052 spin_unlock(&sham.lock);
2053 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2054 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2055 crypto_unregister_ahash(
2056 &dd->pdata->algs_info[i].algs_list[j]);
2057 tasklet_kill(&dd->done_task);
2058 pm_runtime_disable(&pdev->dev);
2060 if (!dd->polling_mode)
2061 dma_release_channel(dd->dma_lch);
2066 #ifdef CONFIG_PM_SLEEP
2067 static int omap_sham_suspend(struct device *dev)
2069 pm_runtime_put_sync(dev);
2073 static int omap_sham_resume(struct device *dev)
2075 int err = pm_runtime_get_sync(dev);
2077 dev_err(dev, "failed to get sync: %d\n", err);
2084 static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume);
2086 static struct platform_driver omap_sham_driver = {
2087 .probe = omap_sham_probe,
2088 .remove = omap_sham_remove,
2090 .name = "omap-sham",
2091 .pm = &omap_sham_pm_ops,
2092 .of_match_table = omap_sham_of_match,
2096 module_platform_driver(omap_sham_driver);
2098 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2099 MODULE_LICENSE("GPL v2");
2100 MODULE_AUTHOR("Dmitry Kasatkin");
2101 MODULE_ALIAS("platform:omap-sham");