2 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
5 * Author: Arnaud Ebalard <arno@natisbad.org>
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
15 #include <crypto/md5.h>
16 #include <crypto/sha.h>
20 struct mv_cesa_ahash_dma_iter {
21 struct mv_cesa_dma_iter base;
22 struct mv_cesa_sg_dma_iter src;
26 mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
27 struct ahash_request *req)
29 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
30 unsigned int len = req->nbytes + creq->cache_ptr;
33 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
35 mv_cesa_req_dma_iter_init(&iter->base, len);
36 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
37 iter->src.op_offset = creq->cache_ptr;
41 mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
43 iter->src.op_offset = 0;
45 return mv_cesa_req_dma_iter_next_op(&iter->base);
48 static inline int mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_req *creq,
51 struct mv_cesa_ahash_dma_req *dreq = &creq->req.dma;
53 creq->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
61 static inline int mv_cesa_ahash_std_alloc_cache(struct mv_cesa_ahash_req *creq,
64 creq->cache = kzalloc(CESA_MAX_HASH_BLOCK_SIZE, flags);
71 static int mv_cesa_ahash_alloc_cache(struct ahash_request *req)
73 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
74 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
75 GFP_KERNEL : GFP_ATOMIC;
81 if (creq->req.base.type == CESA_DMA_REQ)
82 ret = mv_cesa_ahash_dma_alloc_cache(creq, flags);
84 ret = mv_cesa_ahash_std_alloc_cache(creq, flags);
89 static inline void mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_req *creq)
91 dma_pool_free(cesa_dev->dma->cache_pool, creq->cache,
92 creq->req.dma.cache_dma);
95 static inline void mv_cesa_ahash_std_free_cache(struct mv_cesa_ahash_req *creq)
100 static void mv_cesa_ahash_free_cache(struct mv_cesa_ahash_req *creq)
105 if (creq->req.base.type == CESA_DMA_REQ)
106 mv_cesa_ahash_dma_free_cache(creq);
108 mv_cesa_ahash_std_free_cache(creq);
113 static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
119 req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
127 static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
132 dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
137 static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
139 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
141 mv_cesa_ahash_dma_free_padding(&creq->req.dma);
144 static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
146 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
148 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
149 mv_cesa_dma_cleanup(&creq->req.dma.base);
152 static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
154 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
156 if (creq->req.base.type == CESA_DMA_REQ)
157 mv_cesa_ahash_dma_cleanup(req);
160 static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
162 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
164 mv_cesa_ahash_free_cache(creq);
166 if (creq->req.base.type == CESA_DMA_REQ)
167 mv_cesa_ahash_dma_last_cleanup(req);
170 static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
172 unsigned int index, padlen;
174 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
175 padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
180 static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
182 unsigned int index, padlen;
185 /* Pad out to 56 mod 64 */
186 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
187 padlen = mv_cesa_ahash_pad_len(creq);
188 memset(buf + 1, 0, padlen - 1);
191 __le64 bits = cpu_to_le64(creq->len << 3);
192 memcpy(buf + padlen, &bits, sizeof(bits));
194 __be64 bits = cpu_to_be64(creq->len << 3);
195 memcpy(buf + padlen, &bits, sizeof(bits));
201 static void mv_cesa_ahash_std_step(struct ahash_request *req)
203 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
204 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
205 struct mv_cesa_engine *engine = sreq->base.engine;
206 struct mv_cesa_op_ctx *op;
207 unsigned int new_cache_ptr = 0;
212 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
213 creq->cache, creq->cache_ptr);
215 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
216 CESA_SA_SRAM_PAYLOAD_SIZE);
218 if (!creq->last_req) {
219 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
220 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
223 if (len - creq->cache_ptr)
224 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
226 CESA_SA_DATA_SRAM_OFFSET +
228 len - creq->cache_ptr,
233 frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
235 if (creq->last_req && sreq->offset == req->nbytes &&
236 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
237 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
238 frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
239 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
240 frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
243 if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
244 frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
246 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
247 mv_cesa_set_mac_op_total_len(op, creq->len);
249 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
251 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
252 len &= CESA_HASH_BLOCK_SIZE_MSK;
253 new_cache_ptr = 64 - trailerlen;
254 memcpy_fromio(creq->cache,
256 CESA_SA_DATA_SRAM_OFFSET + len,
259 len += mv_cesa_ahash_pad_req(creq,
261 CESA_SA_DATA_SRAM_OFFSET);
264 if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
265 frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
267 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
271 mv_cesa_set_mac_op_frag_len(op, len);
272 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
274 /* FIXME: only update enc_len field */
275 memcpy_toio(engine->sram, op, sizeof(*op));
277 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
278 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
279 CESA_SA_DESC_CFG_FRAG_MSK);
281 creq->cache_ptr = new_cache_ptr;
283 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
284 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
285 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
288 static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
290 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
291 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
293 if (sreq->offset < (req->nbytes - creq->cache_ptr))
299 static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
301 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
302 struct mv_cesa_tdma_req *dreq = &creq->req.dma.base;
304 mv_cesa_dma_prepare(dreq, dreq->base.engine);
307 static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
309 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
310 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
311 struct mv_cesa_engine *engine = sreq->base.engine;
314 mv_cesa_adjust_op(engine, &creq->op_tmpl);
315 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
318 static void mv_cesa_ahash_step(struct crypto_async_request *req)
320 struct ahash_request *ahashreq = ahash_request_cast(req);
321 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
323 if (creq->req.base.type == CESA_DMA_REQ)
324 mv_cesa_dma_step(&creq->req.dma.base);
326 mv_cesa_ahash_std_step(ahashreq);
329 static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
331 struct ahash_request *ahashreq = ahash_request_cast(req);
332 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
333 struct mv_cesa_engine *engine = creq->req.base.engine;
334 unsigned int digsize;
337 if (creq->req.base.type == CESA_DMA_REQ)
338 ret = mv_cesa_dma_process(&creq->req.dma.base, status);
340 ret = mv_cesa_ahash_std_process(ahashreq, status);
342 if (ret == -EINPROGRESS)
345 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
346 for (i = 0; i < digsize / 4; i++)
347 creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
350 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
353 ahashreq->nbytes - creq->cache_ptr);
355 if (creq->last_req) {
357 * Hardware's MD5 digest is in little endian format, but
358 * SHA in big endian format
361 __le32 *result = (void *)ahashreq->result;
363 for (i = 0; i < digsize / 4; i++)
364 result[i] = cpu_to_le32(creq->state[i]);
366 __be32 *result = (void *)ahashreq->result;
368 for (i = 0; i < digsize / 4; i++)
369 result[i] = cpu_to_be32(creq->state[i]);
376 static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
377 struct mv_cesa_engine *engine)
379 struct ahash_request *ahashreq = ahash_request_cast(req);
380 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
381 unsigned int digsize;
384 creq->req.base.engine = engine;
386 if (creq->req.base.type == CESA_DMA_REQ)
387 mv_cesa_ahash_dma_prepare(ahashreq);
389 mv_cesa_ahash_std_prepare(ahashreq);
391 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
392 for (i = 0; i < digsize / 4; i++)
393 writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
396 static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
398 struct ahash_request *ahashreq = ahash_request_cast(req);
399 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
402 mv_cesa_ahash_last_cleanup(ahashreq);
404 mv_cesa_ahash_cleanup(ahashreq);
407 static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
408 .step = mv_cesa_ahash_step,
409 .process = mv_cesa_ahash_process,
410 .prepare = mv_cesa_ahash_prepare,
411 .cleanup = mv_cesa_ahash_req_cleanup,
414 static int mv_cesa_ahash_init(struct ahash_request *req,
415 struct mv_cesa_op_ctx *tmpl, bool algo_le)
417 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
419 memset(creq, 0, sizeof(*creq));
420 mv_cesa_update_op_cfg(tmpl,
421 CESA_SA_DESC_CFG_OP_MAC_ONLY |
422 CESA_SA_DESC_CFG_FIRST_FRAG,
423 CESA_SA_DESC_CFG_OP_MSK |
424 CESA_SA_DESC_CFG_FRAG_MSK);
425 mv_cesa_set_mac_op_total_len(tmpl, 0);
426 mv_cesa_set_mac_op_frag_len(tmpl, 0);
427 creq->op_tmpl = *tmpl;
429 creq->algo_le = algo_le;
434 static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
436 struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
438 ctx->base.ops = &mv_cesa_ahash_req_ops;
440 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
441 sizeof(struct mv_cesa_ahash_req));
445 static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached)
447 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
450 if (((creq->cache_ptr + req->nbytes) & CESA_HASH_BLOCK_SIZE_MSK) &&
452 ret = mv_cesa_ahash_alloc_cache(req);
457 if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) {
463 sg_pcopy_to_buffer(req->src, creq->src_nents,
464 creq->cache + creq->cache_ptr,
467 creq->cache_ptr += req->nbytes;
473 static struct mv_cesa_op_ctx *
474 mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
475 struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
478 struct mv_cesa_op_ctx *op;
481 op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
485 /* Set the operation block fragment length. */
486 mv_cesa_set_mac_op_frag_len(op, frag_len);
488 /* Append dummy desc to launch operation */
489 ret = mv_cesa_dma_add_dummy_launch(chain, flags);
493 if (mv_cesa_mac_op_is_first_frag(tmpl))
494 mv_cesa_update_op_cfg(tmpl,
495 CESA_SA_DESC_CFG_MID_FRAG,
496 CESA_SA_DESC_CFG_FRAG_MSK);
502 mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
503 struct mv_cesa_ahash_dma_iter *dma_iter,
504 struct mv_cesa_ahash_req *creq,
507 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
509 if (!creq->cache_ptr)
512 return mv_cesa_dma_add_data_transfer(chain,
513 CESA_SA_DATA_SRAM_OFFSET,
514 ahashdreq->cache_dma,
516 CESA_TDMA_DST_IN_SRAM,
520 static struct mv_cesa_op_ctx *
521 mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
522 struct mv_cesa_ahash_dma_iter *dma_iter,
523 struct mv_cesa_ahash_req *creq,
524 unsigned int frag_len, gfp_t flags)
526 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
527 unsigned int len, trailerlen, padoff = 0;
528 struct mv_cesa_op_ctx *op;
532 * If the transfer is smaller than our maximum length, and we have
533 * some data outstanding, we can ask the engine to finish the hash.
535 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
536 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
541 mv_cesa_set_mac_op_total_len(op, creq->len);
542 mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
543 CESA_SA_DESC_CFG_NOT_FRAG :
544 CESA_SA_DESC_CFG_LAST_FRAG,
545 CESA_SA_DESC_CFG_FRAG_MSK);
551 * The request is longer than the engine can handle, or we have
552 * no data outstanding. Manually generate the padding, adding it
553 * as a "mid" fragment.
555 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
559 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
561 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
563 ret = mv_cesa_dma_add_data_transfer(chain,
564 CESA_SA_DATA_SRAM_OFFSET +
566 ahashdreq->padding_dma,
567 len, CESA_TDMA_DST_IN_SRAM,
572 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
577 if (len == trailerlen)
583 ret = mv_cesa_dma_add_data_transfer(chain,
584 CESA_SA_DATA_SRAM_OFFSET,
585 ahashdreq->padding_dma +
588 CESA_TDMA_DST_IN_SRAM,
593 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
597 static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
599 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
600 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
601 GFP_KERNEL : GFP_ATOMIC;
602 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
603 struct mv_cesa_tdma_req *dreq = &ahashdreq->base;
604 struct mv_cesa_ahash_dma_iter iter;
605 struct mv_cesa_op_ctx *op = NULL;
606 unsigned int frag_len;
609 dreq->chain.first = NULL;
610 dreq->chain.last = NULL;
612 if (creq->src_nents) {
613 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
621 mv_cesa_tdma_desc_iter_init(&dreq->chain);
622 mv_cesa_ahash_req_iter_init(&iter, req);
625 * Add the cache (left-over data from a previous block) first.
626 * This will never overflow the SRAM size.
628 ret = mv_cesa_ahash_dma_add_cache(&dreq->chain, &iter, creq, flags);
634 * Add all the new data, inserting an operation block and
635 * launch command between each full SRAM block-worth of
636 * data. We intentionally do not add the final op block.
639 ret = mv_cesa_dma_add_op_transfers(&dreq->chain,
645 frag_len = iter.base.op_len;
647 if (!mv_cesa_ahash_req_iter_next_op(&iter))
650 op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl,
658 /* Account for the data that was in the cache. */
659 frag_len = iter.base.op_len;
663 * At this point, frag_len indicates whether we have any data
664 * outstanding which needs an operation. Queue up the final
665 * operation, which depends whether this is the final request.
668 op = mv_cesa_ahash_dma_last_req(&dreq->chain, &iter, creq,
671 op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl,
680 /* Add dummy desc to wait for crypto operation end */
681 ret = mv_cesa_dma_add_dummy_end(&dreq->chain, flags);
687 creq->cache_ptr = req->nbytes + creq->cache_ptr -
695 mv_cesa_dma_cleanup(dreq);
696 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
699 mv_cesa_ahash_last_cleanup(req);
704 static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
706 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
709 if (cesa_dev->caps->has_tdma)
710 creq->req.base.type = CESA_DMA_REQ;
712 creq->req.base.type = CESA_STD_REQ;
714 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
715 if (creq->src_nents < 0) {
716 dev_err(cesa_dev->dev, "Invalid number of src SG");
717 return creq->src_nents;
720 ret = mv_cesa_ahash_cache_req(req, cached);
727 if (creq->req.base.type == CESA_DMA_REQ)
728 ret = mv_cesa_ahash_dma_req_init(req);
733 static int mv_cesa_ahash_update(struct ahash_request *req)
735 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
739 creq->len += req->nbytes;
740 ret = mv_cesa_ahash_req_init(req, &cached);
747 ret = mv_cesa_queue_req(&req->base);
748 if (mv_cesa_req_needs_cleanup(&req->base, ret))
749 mv_cesa_ahash_cleanup(req);
754 static int mv_cesa_ahash_final(struct ahash_request *req)
756 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
757 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
761 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
762 creq->last_req = true;
765 ret = mv_cesa_ahash_req_init(req, &cached);
772 ret = mv_cesa_queue_req(&req->base);
773 if (mv_cesa_req_needs_cleanup(&req->base, ret))
774 mv_cesa_ahash_cleanup(req);
779 static int mv_cesa_ahash_finup(struct ahash_request *req)
781 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
782 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
786 creq->len += req->nbytes;
787 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
788 creq->last_req = true;
790 ret = mv_cesa_ahash_req_init(req, &cached);
797 ret = mv_cesa_queue_req(&req->base);
798 if (mv_cesa_req_needs_cleanup(&req->base, ret))
799 mv_cesa_ahash_cleanup(req);
804 static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
805 u64 *len, void *cache)
807 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
808 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
809 unsigned int digsize = crypto_ahash_digestsize(ahash);
810 unsigned int blocksize;
812 blocksize = crypto_ahash_blocksize(ahash);
815 memcpy(hash, creq->state, digsize);
816 memset(cache, 0, blocksize);
818 memcpy(cache, creq->cache, creq->cache_ptr);
823 static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
824 u64 len, const void *cache)
826 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
827 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
828 unsigned int digsize = crypto_ahash_digestsize(ahash);
829 unsigned int blocksize;
830 unsigned int cache_ptr;
833 ret = crypto_ahash_init(req);
837 blocksize = crypto_ahash_blocksize(ahash);
838 if (len >= blocksize)
839 mv_cesa_update_op_cfg(&creq->op_tmpl,
840 CESA_SA_DESC_CFG_MID_FRAG,
841 CESA_SA_DESC_CFG_FRAG_MSK);
844 memcpy(creq->state, hash, digsize);
847 cache_ptr = do_div(len, blocksize);
851 ret = mv_cesa_ahash_alloc_cache(req);
855 memcpy(creq->cache, cache, cache_ptr);
856 creq->cache_ptr = cache_ptr;
861 static int mv_cesa_md5_init(struct ahash_request *req)
863 struct mv_cesa_op_ctx tmpl = { };
865 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
867 mv_cesa_ahash_init(req, &tmpl, true);
872 static int mv_cesa_md5_export(struct ahash_request *req, void *out)
874 struct md5_state *out_state = out;
876 return mv_cesa_ahash_export(req, out_state->hash,
877 &out_state->byte_count, out_state->block);
880 static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
882 const struct md5_state *in_state = in;
884 return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
888 static int mv_cesa_md5_digest(struct ahash_request *req)
892 ret = mv_cesa_md5_init(req);
896 return mv_cesa_ahash_finup(req);
899 struct ahash_alg mv_md5_alg = {
900 .init = mv_cesa_md5_init,
901 .update = mv_cesa_ahash_update,
902 .final = mv_cesa_ahash_final,
903 .finup = mv_cesa_ahash_finup,
904 .digest = mv_cesa_md5_digest,
905 .export = mv_cesa_md5_export,
906 .import = mv_cesa_md5_import,
908 .digestsize = MD5_DIGEST_SIZE,
909 .statesize = sizeof(struct md5_state),
912 .cra_driver_name = "mv-md5",
914 .cra_flags = CRYPTO_ALG_ASYNC |
915 CRYPTO_ALG_KERN_DRIVER_ONLY,
916 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
917 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
918 .cra_init = mv_cesa_ahash_cra_init,
919 .cra_module = THIS_MODULE,
924 static int mv_cesa_sha1_init(struct ahash_request *req)
926 struct mv_cesa_op_ctx tmpl = { };
928 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
930 mv_cesa_ahash_init(req, &tmpl, false);
935 static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
937 struct sha1_state *out_state = out;
939 return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
943 static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
945 const struct sha1_state *in_state = in;
947 return mv_cesa_ahash_import(req, in_state->state, in_state->count,
951 static int mv_cesa_sha1_digest(struct ahash_request *req)
955 ret = mv_cesa_sha1_init(req);
959 return mv_cesa_ahash_finup(req);
962 struct ahash_alg mv_sha1_alg = {
963 .init = mv_cesa_sha1_init,
964 .update = mv_cesa_ahash_update,
965 .final = mv_cesa_ahash_final,
966 .finup = mv_cesa_ahash_finup,
967 .digest = mv_cesa_sha1_digest,
968 .export = mv_cesa_sha1_export,
969 .import = mv_cesa_sha1_import,
971 .digestsize = SHA1_DIGEST_SIZE,
972 .statesize = sizeof(struct sha1_state),
975 .cra_driver_name = "mv-sha1",
977 .cra_flags = CRYPTO_ALG_ASYNC |
978 CRYPTO_ALG_KERN_DRIVER_ONLY,
979 .cra_blocksize = SHA1_BLOCK_SIZE,
980 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
981 .cra_init = mv_cesa_ahash_cra_init,
982 .cra_module = THIS_MODULE,
987 static int mv_cesa_sha256_init(struct ahash_request *req)
989 struct mv_cesa_op_ctx tmpl = { };
991 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
993 mv_cesa_ahash_init(req, &tmpl, false);
998 static int mv_cesa_sha256_digest(struct ahash_request *req)
1002 ret = mv_cesa_sha256_init(req);
1006 return mv_cesa_ahash_finup(req);
1009 static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
1011 struct sha256_state *out_state = out;
1013 return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
1017 static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
1019 const struct sha256_state *in_state = in;
1021 return mv_cesa_ahash_import(req, in_state->state, in_state->count,
1025 struct ahash_alg mv_sha256_alg = {
1026 .init = mv_cesa_sha256_init,
1027 .update = mv_cesa_ahash_update,
1028 .final = mv_cesa_ahash_final,
1029 .finup = mv_cesa_ahash_finup,
1030 .digest = mv_cesa_sha256_digest,
1031 .export = mv_cesa_sha256_export,
1032 .import = mv_cesa_sha256_import,
1034 .digestsize = SHA256_DIGEST_SIZE,
1035 .statesize = sizeof(struct sha256_state),
1037 .cra_name = "sha256",
1038 .cra_driver_name = "mv-sha256",
1039 .cra_priority = 300,
1040 .cra_flags = CRYPTO_ALG_ASYNC |
1041 CRYPTO_ALG_KERN_DRIVER_ONLY,
1042 .cra_blocksize = SHA256_BLOCK_SIZE,
1043 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1044 .cra_init = mv_cesa_ahash_cra_init,
1045 .cra_module = THIS_MODULE,
1050 struct mv_cesa_ahash_result {
1051 struct completion completion;
1055 static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
1058 struct mv_cesa_ahash_result *result = req->data;
1060 if (error == -EINPROGRESS)
1063 result->error = error;
1064 complete(&result->completion);
1067 static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1068 void *state, unsigned int blocksize)
1070 struct mv_cesa_ahash_result result;
1071 struct scatterlist sg;
1074 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1075 mv_cesa_hmac_ahash_complete, &result);
1076 sg_init_one(&sg, pad, blocksize);
1077 ahash_request_set_crypt(req, &sg, pad, blocksize);
1078 init_completion(&result.completion);
1080 ret = crypto_ahash_init(req);
1084 ret = crypto_ahash_update(req);
1085 if (ret && ret != -EINPROGRESS)
1088 wait_for_completion_interruptible(&result.completion);
1090 return result.error;
1092 ret = crypto_ahash_export(req, state);
1099 static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1100 const u8 *key, unsigned int keylen,
1102 unsigned int blocksize)
1104 struct mv_cesa_ahash_result result;
1105 struct scatterlist sg;
1109 if (keylen <= blocksize) {
1110 memcpy(ipad, key, keylen);
1112 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1117 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1118 mv_cesa_hmac_ahash_complete,
1120 sg_init_one(&sg, keydup, keylen);
1121 ahash_request_set_crypt(req, &sg, ipad, keylen);
1122 init_completion(&result.completion);
1124 ret = crypto_ahash_digest(req);
1125 if (ret == -EINPROGRESS) {
1126 wait_for_completion_interruptible(&result.completion);
1130 /* Set the memory region to 0 to avoid any leak. */
1131 memset(keydup, 0, keylen);
1137 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1140 memset(ipad + keylen, 0, blocksize - keylen);
1141 memcpy(opad, ipad, blocksize);
1143 for (i = 0; i < blocksize; i++) {
1151 static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1152 const u8 *key, unsigned int keylen,
1153 void *istate, void *ostate)
1155 struct ahash_request *req;
1156 struct crypto_ahash *tfm;
1157 unsigned int blocksize;
1162 tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
1163 CRYPTO_ALG_TYPE_AHASH_MASK);
1165 return PTR_ERR(tfm);
1167 req = ahash_request_alloc(tfm, GFP_KERNEL);
1173 crypto_ahash_clear_flags(tfm, ~0);
1175 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1177 ipad = kzalloc(2 * blocksize, GFP_KERNEL);
1183 opad = ipad + blocksize;
1185 ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1189 ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1193 ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1198 ahash_request_free(req);
1200 crypto_free_ahash(tfm);
1205 static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1207 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1209 ctx->base.ops = &mv_cesa_ahash_req_ops;
1211 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1212 sizeof(struct mv_cesa_ahash_req));
1216 static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1218 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1219 struct mv_cesa_op_ctx tmpl = { };
1221 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1222 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1224 mv_cesa_ahash_init(req, &tmpl, true);
1229 static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1230 unsigned int keylen)
1232 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1233 struct md5_state istate, ostate;
1236 ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1240 for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1241 ctx->iv[i] = be32_to_cpu(istate.hash[i]);
1243 for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1244 ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]);
1249 static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1253 ret = mv_cesa_ahmac_md5_init(req);
1257 return mv_cesa_ahash_finup(req);
1260 struct ahash_alg mv_ahmac_md5_alg = {
1261 .init = mv_cesa_ahmac_md5_init,
1262 .update = mv_cesa_ahash_update,
1263 .final = mv_cesa_ahash_final,
1264 .finup = mv_cesa_ahash_finup,
1265 .digest = mv_cesa_ahmac_md5_digest,
1266 .setkey = mv_cesa_ahmac_md5_setkey,
1267 .export = mv_cesa_md5_export,
1268 .import = mv_cesa_md5_import,
1270 .digestsize = MD5_DIGEST_SIZE,
1271 .statesize = sizeof(struct md5_state),
1273 .cra_name = "hmac(md5)",
1274 .cra_driver_name = "mv-hmac-md5",
1275 .cra_priority = 300,
1276 .cra_flags = CRYPTO_ALG_ASYNC |
1277 CRYPTO_ALG_KERN_DRIVER_ONLY,
1278 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1279 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1280 .cra_init = mv_cesa_ahmac_cra_init,
1281 .cra_module = THIS_MODULE,
1286 static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1288 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1289 struct mv_cesa_op_ctx tmpl = { };
1291 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1292 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1294 mv_cesa_ahash_init(req, &tmpl, false);
1299 static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1300 unsigned int keylen)
1302 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1303 struct sha1_state istate, ostate;
1306 ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1310 for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1311 ctx->iv[i] = be32_to_cpu(istate.state[i]);
1313 for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1314 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1319 static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1323 ret = mv_cesa_ahmac_sha1_init(req);
1327 return mv_cesa_ahash_finup(req);
1330 struct ahash_alg mv_ahmac_sha1_alg = {
1331 .init = mv_cesa_ahmac_sha1_init,
1332 .update = mv_cesa_ahash_update,
1333 .final = mv_cesa_ahash_final,
1334 .finup = mv_cesa_ahash_finup,
1335 .digest = mv_cesa_ahmac_sha1_digest,
1336 .setkey = mv_cesa_ahmac_sha1_setkey,
1337 .export = mv_cesa_sha1_export,
1338 .import = mv_cesa_sha1_import,
1340 .digestsize = SHA1_DIGEST_SIZE,
1341 .statesize = sizeof(struct sha1_state),
1343 .cra_name = "hmac(sha1)",
1344 .cra_driver_name = "mv-hmac-sha1",
1345 .cra_priority = 300,
1346 .cra_flags = CRYPTO_ALG_ASYNC |
1347 CRYPTO_ALG_KERN_DRIVER_ONLY,
1348 .cra_blocksize = SHA1_BLOCK_SIZE,
1349 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1350 .cra_init = mv_cesa_ahmac_cra_init,
1351 .cra_module = THIS_MODULE,
1356 static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1357 unsigned int keylen)
1359 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1360 struct sha256_state istate, ostate;
1363 ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1367 for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1368 ctx->iv[i] = be32_to_cpu(istate.state[i]);
1370 for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1371 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1376 static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1378 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1379 struct mv_cesa_op_ctx tmpl = { };
1381 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1382 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1384 mv_cesa_ahash_init(req, &tmpl, false);
1389 static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1393 ret = mv_cesa_ahmac_sha256_init(req);
1397 return mv_cesa_ahash_finup(req);
1400 struct ahash_alg mv_ahmac_sha256_alg = {
1401 .init = mv_cesa_ahmac_sha256_init,
1402 .update = mv_cesa_ahash_update,
1403 .final = mv_cesa_ahash_final,
1404 .finup = mv_cesa_ahash_finup,
1405 .digest = mv_cesa_ahmac_sha256_digest,
1406 .setkey = mv_cesa_ahmac_sha256_setkey,
1407 .export = mv_cesa_sha256_export,
1408 .import = mv_cesa_sha256_import,
1410 .digestsize = SHA256_DIGEST_SIZE,
1411 .statesize = sizeof(struct sha256_state),
1413 .cra_name = "hmac(sha256)",
1414 .cra_driver_name = "mv-hmac-sha256",
1415 .cra_priority = 300,
1416 .cra_flags = CRYPTO_ALG_ASYNC |
1417 CRYPTO_ALG_KERN_DRIVER_ONLY,
1418 .cra_blocksize = SHA256_BLOCK_SIZE,
1419 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1420 .cra_init = mv_cesa_ahmac_cra_init,
1421 .cra_module = THIS_MODULE,