ad8e353cf897538fec5f68e3878a0114eeaa8e8c
[cascardo/linux.git] / drivers / crypto / chelsio / chcr_algo.c
1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *      Manoj Malviya (manojmalviya@chelsio.com)
36  *      Atul Gupta (atul.gupta@chelsio.com)
37  *      Jitendra Lulla (jlulla@chelsio.com)
38  *      Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *      Harsh Jain (harsh@chelsio.com)
40  */
41
42 #define pr_fmt(fmt) "chcr:" fmt
43
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/sha.h>
57 #include <crypto/internal/hash.h>
58
59 #include "t4fw_api.h"
60 #include "t4_msg.h"
61 #include "chcr_core.h"
62 #include "chcr_algo.h"
63 #include "chcr_crypto.h"
64
65 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
66 {
67         return ctx->crypto_ctx->ablkctx;
68 }
69
70 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
71 {
72         return ctx->crypto_ctx->hmacctx;
73 }
74
75 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
76 {
77         return ctx->dev->u_ctx;
78 }
79
80 static inline int is_ofld_imm(const struct sk_buff *skb)
81 {
82         return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
83 }
84
85 /*
86  *      sgl_len - calculates the size of an SGL of the given capacity
87  *      @n: the number of SGL entries
88  *      Calculates the number of flits needed for a scatter/gather list that
89  *      can hold the given number of entries.
90  */
91 static inline unsigned int sgl_len(unsigned int n)
92 {
93         n--;
94         return (3 * n) / 2 + (n & 1) + 2;
95 }
96
97 /*
98  *      chcr_handle_resp - Unmap the DMA buffers associated with the request
99  *      @req: crypto request
100  */
101 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
102                      int error_status)
103 {
104         struct crypto_tfm *tfm = req->tfm;
105         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
106         struct uld_ctx *u_ctx = ULD_CTX(ctx);
107         struct chcr_req_ctx ctx_req;
108         struct cpl_fw6_pld *fw6_pld;
109         unsigned int digestsize, updated_digestsize;
110
111         switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
112         case CRYPTO_ALG_TYPE_BLKCIPHER:
113                 ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
114                 ctx_req.ctx.ablk_ctx =
115                         ablkcipher_request_ctx(ctx_req.req.ablk_req);
116                 if (!error_status) {
117                         fw6_pld = (struct cpl_fw6_pld *)input;
118                         memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
119                                AES_BLOCK_SIZE);
120                 }
121                 dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
122                              ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
123                 if (ctx_req.ctx.ablk_ctx->skb) {
124                         kfree_skb(ctx_req.ctx.ablk_ctx->skb);
125                         ctx_req.ctx.ablk_ctx->skb = NULL;
126                 }
127                 break;
128
129         case CRYPTO_ALG_TYPE_AHASH:
130                 ctx_req.req.ahash_req = (struct ahash_request *)req;
131                 ctx_req.ctx.ahash_ctx =
132                         ahash_request_ctx(ctx_req.req.ahash_req);
133                 digestsize =
134                         crypto_ahash_digestsize(crypto_ahash_reqtfm(
135                                                         ctx_req.req.ahash_req));
136                 updated_digestsize = digestsize;
137                 if (digestsize == SHA224_DIGEST_SIZE)
138                         updated_digestsize = SHA256_DIGEST_SIZE;
139                 else if (digestsize == SHA384_DIGEST_SIZE)
140                         updated_digestsize = SHA512_DIGEST_SIZE;
141                 if (ctx_req.ctx.ahash_ctx->skb)
142                         ctx_req.ctx.ahash_ctx->skb = NULL;
143                 if (ctx_req.ctx.ahash_ctx->result == 1) {
144                         ctx_req.ctx.ahash_ctx->result = 0;
145                         memcpy(ctx_req.req.ahash_req->result, input +
146                                sizeof(struct cpl_fw6_pld),
147                                digestsize);
148                 } else {
149                         memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
150                                sizeof(struct cpl_fw6_pld),
151                                updated_digestsize);
152                 }
153                 kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr);
154                 ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL;
155                 break;
156         }
157         return 0;
158 }
159
160 /*
161  *      calc_tx_flits_ofld - calculate # of flits for an offload packet
162  *      @skb: the packet
163  *      Returns the number of flits needed for the given offload packet.
164  *      These packets are already fully constructed and no additional headers
165  *      will be added.
166  */
167 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
168 {
169         unsigned int flits, cnt;
170
171         if (is_ofld_imm(skb))
172                 return DIV_ROUND_UP(skb->len, 8);
173
174         flits = skb_transport_offset(skb) / 8;   /* headers */
175         cnt = skb_shinfo(skb)->nr_frags;
176         if (skb_tail_pointer(skb) != skb_transport_header(skb))
177                 cnt++;
178         return flits + sgl_len(cnt);
179 }
180
181 static struct shash_desc *chcr_alloc_shash(unsigned int ds)
182 {
183         struct crypto_shash *base_hash = NULL;
184         struct shash_desc *desc;
185
186         switch (ds) {
187         case SHA1_DIGEST_SIZE:
188                 base_hash = crypto_alloc_shash("sha1-generic", 0, 0);
189                 break;
190         case SHA224_DIGEST_SIZE:
191                 base_hash = crypto_alloc_shash("sha224-generic", 0, 0);
192                 break;
193         case SHA256_DIGEST_SIZE:
194                 base_hash = crypto_alloc_shash("sha256-generic", 0, 0);
195                 break;
196         case SHA384_DIGEST_SIZE:
197                 base_hash = crypto_alloc_shash("sha384-generic", 0, 0);
198                 break;
199         case SHA512_DIGEST_SIZE:
200                 base_hash = crypto_alloc_shash("sha512-generic", 0, 0);
201                 break;
202         }
203         if (IS_ERR(base_hash)) {
204                 pr_err("Can not allocate sha-generic algo.\n");
205                 return (void *)base_hash;
206         }
207
208         desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash),
209                        GFP_KERNEL);
210         if (!desc)
211                 return ERR_PTR(-ENOMEM);
212         desc->tfm = base_hash;
213         desc->flags = crypto_shash_get_flags(base_hash);
214         return desc;
215 }
216
217 static int chcr_compute_partial_hash(struct shash_desc *desc,
218                                      char *iopad, char *result_hash,
219                                      int digest_size)
220 {
221         struct sha1_state sha1_st;
222         struct sha256_state sha256_st;
223         struct sha512_state sha512_st;
224         int error;
225
226         if (digest_size == SHA1_DIGEST_SIZE) {
227                 error = crypto_shash_init(desc) ?:
228                         crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
229                         crypto_shash_export(desc, (void *)&sha1_st);
230                 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
231         } else if (digest_size == SHA224_DIGEST_SIZE) {
232                 error = crypto_shash_init(desc) ?:
233                         crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
234                         crypto_shash_export(desc, (void *)&sha256_st);
235                 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
236
237         } else if (digest_size == SHA256_DIGEST_SIZE) {
238                 error = crypto_shash_init(desc) ?:
239                         crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
240                         crypto_shash_export(desc, (void *)&sha256_st);
241                 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
242
243         } else if (digest_size == SHA384_DIGEST_SIZE) {
244                 error = crypto_shash_init(desc) ?:
245                         crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
246                         crypto_shash_export(desc, (void *)&sha512_st);
247                 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
248
249         } else if (digest_size == SHA512_DIGEST_SIZE) {
250                 error = crypto_shash_init(desc) ?:
251                         crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
252                         crypto_shash_export(desc, (void *)&sha512_st);
253                 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
254         } else {
255                 error = -EINVAL;
256                 pr_err("Unknown digest size %d\n", digest_size);
257         }
258         return error;
259 }
260
261 static void chcr_change_order(char *buf, int ds)
262 {
263         int i;
264
265         if (ds == SHA512_DIGEST_SIZE) {
266                 for (i = 0; i < (ds / sizeof(u64)); i++)
267                         *((__be64 *)buf + i) =
268                                 cpu_to_be64(*((u64 *)buf + i));
269         } else {
270                 for (i = 0; i < (ds / sizeof(u32)); i++)
271                         *((__be32 *)buf + i) =
272                                 cpu_to_be32(*((u32 *)buf + i));
273         }
274 }
275
276 static inline int is_hmac(struct crypto_tfm *tfm)
277 {
278         struct crypto_alg *alg = tfm->__crt_alg;
279         struct chcr_alg_template *chcr_crypto_alg =
280                 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
281                              alg.hash);
282         if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) ==
283             CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
284                 return 1;
285         return 0;
286 }
287
288 static inline unsigned int ch_nents(struct scatterlist *sg,
289                                     unsigned int *total_size)
290 {
291         unsigned int nents;
292
293         for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) {
294                 nents++;
295                 *total_size += sg->length;
296         }
297         return nents;
298 }
299
300 static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
301                            struct scatterlist *sg,
302                            struct phys_sge_parm *sg_param)
303 {
304         struct phys_sge_pairs *to;
305         unsigned int out_buf_size = sg_param->obsize;
306         unsigned int nents = sg_param->nents, i, j, tot_len = 0;
307
308         phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
309                                     | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
310         phys_cpl->pcirlxorder_to_noofsgentr =
311                 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
312                       CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
313                       CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
314                       CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
315                       CPL_RX_PHYS_DSGL_DCAID_V(0) |
316                       CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
317         phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
318         phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
319         phys_cpl->rss_hdr_int.hash_val = 0;
320         to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
321                                        sizeof(struct cpl_rx_phys_dsgl));
322
323         for (i = 0; nents; to++) {
324                 for (j = i; (nents && (j < (8 + i))); j++, nents--) {
325                         to->len[j] = htons(sg->length);
326                         to->addr[j] = cpu_to_be64(sg_dma_address(sg));
327                         if (out_buf_size) {
328                                 if (tot_len + sg_dma_len(sg) >= out_buf_size) {
329                                         to->len[j] = htons(out_buf_size -
330                                                            tot_len);
331                                         return;
332                                 }
333                                 tot_len += sg_dma_len(sg);
334                         }
335                         sg = sg_next(sg);
336                 }
337         }
338 }
339
340 static inline unsigned
341 int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
342                          struct scatterlist *sg, struct phys_sge_parm *sg_param)
343 {
344         if (!sg || !sg_param->nents)
345                 return 0;
346
347         sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
348         if (sg_param->nents == 0) {
349                 pr_err("CHCR : DMA mapping failed\n");
350                 return -EINVAL;
351         }
352         write_phys_cpl(phys_cpl, sg, sg_param);
353         return 0;
354 }
355
356 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
357 {
358         struct crypto_alg *alg = tfm->__crt_alg;
359         struct chcr_alg_template *chcr_crypto_alg =
360                 container_of(alg, struct chcr_alg_template, alg.crypto);
361
362         return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
363 }
364
365 static inline void
366 write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
367                         struct scatterlist *sg, unsigned int count)
368 {
369         struct page *spage;
370         unsigned int page_len;
371
372         skb->len += count;
373         skb->data_len += count;
374         skb->truesize += count;
375         while (count > 0) {
376                 if (sg && (!(sg->length)))
377                         break;
378                 spage = sg_page(sg);
379                 get_page(spage);
380                 page_len = min(sg->length, count);
381                 skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
382                 (*frags)++;
383                 count -= page_len;
384                 sg = sg_next(sg);
385         }
386 }
387
388 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
389                                struct _key_ctx *key_ctx)
390 {
391         if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
392                 get_aes_decrypt_key(key_ctx->key, ablkctx->key,
393                                     ablkctx->enckey_len << 3);
394                 memset(key_ctx->key + ablkctx->enckey_len, 0,
395                        CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len);
396         } else {
397                 memcpy(key_ctx->key,
398                        ablkctx->key + (ablkctx->enckey_len >> 1),
399                        ablkctx->enckey_len >> 1);
400                 get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1),
401                                     ablkctx->key, ablkctx->enckey_len << 2);
402         }
403         return 0;
404 }
405
406 static inline void create_wreq(struct chcr_context *ctx,
407                                struct fw_crypto_lookaside_wr *wreq,
408                                void *req, struct sk_buff *skb,
409                                int kctx_len, int hash_sz,
410                                unsigned int phys_dsgl)
411 {
412         struct uld_ctx *u_ctx = ULD_CTX(ctx);
413         struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1);
414         struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1);
415         int iv_loc = IV_DSGL;
416         int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
417         unsigned int immdatalen = 0, nr_frags = 0;
418
419         if (is_ofld_imm(skb)) {
420                 immdatalen = skb->data_len;
421                 iv_loc = IV_IMMEDIATE;
422         } else {
423                 nr_frags = skb_shinfo(skb)->nr_frags;
424         }
425
426         wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
427                                                      (kctx_len >> 4));
428         wreq->pld_size_hash_size =
429                 htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
430                       FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
431         wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
432                                     (calc_tx_flits_ofld(skb) * 8), 16)));
433         wreq->cookie = cpu_to_be64((uintptr_t)req);
434         wreq->rx_chid_to_rx_q_id =
435                 FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
436                                 (hash_sz) ? IV_NOP : iv_loc);
437
438         ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
439         ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
440                                          16) - ((sizeof(*wreq)) >> 4)));
441
442         sc_imm->cmd_more = FILL_CMD_MORE(immdatalen);
443         sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len +
444                                   ((hash_sz) ? DUMMY_BYTES :
445                                   (sizeof(struct cpl_rx_phys_dsgl) +
446                                    phys_dsgl)) + immdatalen);
447 }
448
449 /**
450  *      create_cipher_wr - form the WR for cipher operations
451  *      @req: cipher req.
452  *      @ctx: crypto driver context of the request.
453  *      @qid: ingress qid where response of this WR should be received.
454  *      @op_type:       encryption or decryption
455  */
456 static struct sk_buff
457 *create_cipher_wr(struct crypto_async_request *req_base,
458                   struct chcr_context *ctx, unsigned short qid,
459                   unsigned short op_type)
460 {
461         struct ablkcipher_request *req = (struct ablkcipher_request *)req_base;
462         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
463         struct uld_ctx *u_ctx = ULD_CTX(ctx);
464         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
465         struct sk_buff *skb = NULL;
466         struct _key_ctx *key_ctx;
467         struct fw_crypto_lookaside_wr *wreq;
468         struct cpl_tx_sec_pdu *sec_cpl;
469         struct cpl_rx_phys_dsgl *phys_cpl;
470         struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
471         struct phys_sge_parm sg_param;
472         unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
473         unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
474
475         if (!req->info)
476                 return ERR_PTR(-EINVAL);
477         ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize);
478         ablkctx->enc = op_type;
479
480         if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
481             (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE))
482                 return ERR_PTR(-EINVAL);
483
484         phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
485
486         kctx_len = sizeof(*key_ctx) +
487                 (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
488         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
489         skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
490                         GFP_ATOMIC);
491         if (!skb)
492                 return ERR_PTR(-ENOMEM);
493         skb_reserve(skb, sizeof(struct sge_opaque_hdr));
494         wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
495
496         sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
497         sec_cpl->op_ivinsrtofst =
498                 FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1);
499
500         sec_cpl->pldlen = htonl(ivsize + req->nbytes);
501         sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0,
502                                                                 ivsize + 1, 0);
503
504         sec_cpl->cipherstop_lo_authinsert =  FILL_SEC_CPL_AUTHINSERT(0, 0,
505                                                                      0, 0);
506         sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
507                                                          ablkctx->ciph_mode,
508                                                          0, 0, ivsize >> 1, 1);
509         sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
510                                                           0, 1, phys_dsgl);
511
512         key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
513         key_ctx->ctx_hdr = ablkctx->key_ctx_hdr;
514         if (op_type == CHCR_DECRYPT_OP) {
515                 if (generate_copy_rrkey(ablkctx, key_ctx))
516                         goto map_fail1;
517         } else {
518                 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
519                         memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len);
520                 } else {
521                         memcpy(key_ctx->key, ablkctx->key +
522                                (ablkctx->enckey_len >> 1),
523                                ablkctx->enckey_len >> 1);
524                         memcpy(key_ctx->key +
525                                (ablkctx->enckey_len >> 1),
526                                ablkctx->key,
527                                ablkctx->enckey_len >> 1);
528                 }
529         }
530         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len);
531
532         memcpy(ablkctx->iv, req->info, ivsize);
533         sg_init_table(&ablkctx->iv_sg, 1);
534         sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize);
535         sg_param.nents = ablkctx->dst_nents;
536         sg_param.obsize = dst_bufsize;
537         sg_param.qid = qid;
538         sg_param.align = 1;
539         if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
540                                  &sg_param))
541                 goto map_fail1;
542
543         skb_set_transport_header(skb, transhdr_len);
544         write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize);
545         write_sg_data_page_desc(skb, &frags, req->src, req->nbytes);
546         create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl);
547         req_ctx->skb = skb;
548         skb_get(skb);
549         return skb;
550 map_fail1:
551         kfree_skb(skb);
552         return ERR_PTR(-ENOMEM);
553 }
554
555 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
556                                unsigned int keylen)
557 {
558         struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
559         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
560         struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm);
561         unsigned int ck_size, context_size;
562         u16 alignment = 0;
563
564         if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize))
565                 goto badkey_err;
566
567         memcpy(ablkctx->key, key, keylen);
568         ablkctx->enckey_len = keylen;
569         if (keylen == AES_KEYSIZE_128) {
570                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
571         } else if (keylen == AES_KEYSIZE_192) {
572                 alignment = 8;
573                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
574         } else if (keylen == AES_KEYSIZE_256) {
575                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
576         } else {
577                 goto badkey_err;
578         }
579
580         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
581                         keylen + alignment) >> 4;
582
583         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
584                                                 0, 0, context_size);
585         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
586         return 0;
587 badkey_err:
588         crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
589         ablkctx->enckey_len = 0;
590         return -EINVAL;
591 }
592
593 int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
594 {
595         int ret = 0;
596         struct sge_ofld_txq *q;
597         struct adapter *adap = netdev2adap(dev);
598
599         local_bh_disable();
600         q = &adap->sge.ofldtxq[idx];
601         spin_lock(&q->sendq.lock);
602         if (q->full)
603                 ret = -1;
604         spin_unlock(&q->sendq.lock);
605         local_bh_enable();
606         return ret;
607 }
608
609 static int chcr_aes_encrypt(struct ablkcipher_request *req)
610 {
611         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
612         struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
613         struct crypto_async_request *req_base = &req->base;
614         struct uld_ctx *u_ctx = ULD_CTX(ctx);
615         struct sk_buff *skb;
616
617         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
618                                             ctx->tx_channel_id))) {
619                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
620                         return -EBUSY;
621         }
622
623         skb = create_cipher_wr(req_base, ctx,
624                                u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
625                                CHCR_ENCRYPT_OP);
626         if (IS_ERR(skb)) {
627                 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
628                 return  PTR_ERR(skb);
629         }
630         skb->dev = u_ctx->lldi.ports[0];
631         set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
632         chcr_send_wr(skb);
633         return -EINPROGRESS;
634 }
635
636 static int chcr_aes_decrypt(struct ablkcipher_request *req)
637 {
638         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
639         struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
640         struct crypto_async_request *req_base = &req->base;
641         struct uld_ctx *u_ctx = ULD_CTX(ctx);
642         struct sk_buff *skb;
643
644         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
645                                             ctx->tx_channel_id))) {
646                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
647                         return -EBUSY;
648         }
649
650         skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0],
651                                CHCR_DECRYPT_OP);
652         if (IS_ERR(skb)) {
653                 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
654                 return PTR_ERR(skb);
655         }
656         skb->dev = u_ctx->lldi.ports[0];
657         set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
658         chcr_send_wr(skb);
659         return -EINPROGRESS;
660 }
661
662 static int chcr_device_init(struct chcr_context *ctx)
663 {
664         struct uld_ctx *u_ctx;
665         unsigned int id;
666         int err = 0, rxq_perchan, rxq_idx;
667
668         id = smp_processor_id();
669         if (!ctx->dev) {
670                 err = assign_chcr_device(&ctx->dev);
671                 if (err) {
672                         pr_err("chcr device assignment fails\n");
673                         goto out;
674                 }
675                 u_ctx = ULD_CTX(ctx);
676                 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
677                 ctx->dev->tx_channel_id = 0;
678                 rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
679                 rxq_idx += id % rxq_perchan;
680                 spin_lock(&ctx->dev->lock_chcr_dev);
681                 ctx->tx_channel_id = rxq_idx;
682                 spin_unlock(&ctx->dev->lock_chcr_dev);
683         }
684 out:
685         return err;
686 }
687
688 static int chcr_cra_init(struct crypto_tfm *tfm)
689 {
690         tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
691         return chcr_device_init(crypto_tfm_ctx(tfm));
692 }
693
694 static int get_alg_config(struct algo_param *params,
695                           unsigned int auth_size)
696 {
697         switch (auth_size) {
698         case SHA1_DIGEST_SIZE:
699                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
700                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
701                 params->result_size = SHA1_DIGEST_SIZE;
702                 break;
703         case SHA224_DIGEST_SIZE:
704                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
705                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
706                 params->result_size = SHA256_DIGEST_SIZE;
707                 break;
708         case SHA256_DIGEST_SIZE:
709                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
710                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
711                 params->result_size = SHA256_DIGEST_SIZE;
712                 break;
713         case SHA384_DIGEST_SIZE:
714                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
715                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
716                 params->result_size = SHA512_DIGEST_SIZE;
717                 break;
718         case SHA512_DIGEST_SIZE:
719                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
720                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
721                 params->result_size = SHA512_DIGEST_SIZE;
722                 break;
723         default:
724                 pr_err("chcr : ERROR, unsupported digest size\n");
725                 return -EINVAL;
726         }
727         return 0;
728 }
729
730 static inline int
731 write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx,
732                             struct sk_buff *skb, unsigned int *frags, char *bfr,
733                             u8 bfr_len)
734 {
735         void *page_ptr = NULL;
736
737         skb->len += bfr_len;
738         skb->data_len += bfr_len;
739         skb->truesize += bfr_len;
740         page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA);
741         if (!page_ptr)
742                 return -ENOMEM;
743         get_page(virt_to_page(page_ptr));
744         req_ctx->dummy_payload_ptr = page_ptr;
745         memcpy(page_ptr, bfr, bfr_len);
746         skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr),
747                            offset_in_page(page_ptr), bfr_len);
748         (*frags)++;
749         return 0;
750 }
751
752 /**
753  *      create_final_hash_wr - Create hash work request
754  *      @req - Cipher req base
755  */
756 static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
757                                             struct hash_wr_param *param)
758 {
759         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
760         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
761         struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
762         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
763         struct sk_buff *skb = NULL;
764         struct _key_ctx *key_ctx;
765         struct fw_crypto_lookaside_wr *wreq;
766         struct cpl_tx_sec_pdu *sec_cpl;
767         unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
768         unsigned int digestsize = crypto_ahash_digestsize(tfm);
769         unsigned int kctx_len = sizeof(*key_ctx);
770         u8 hash_size_in_response = 0;
771
772         iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
773         kctx_len += param->alg_prm.result_size + iopad_alignment;
774         if (param->opad_needed)
775                 kctx_len += param->alg_prm.result_size + iopad_alignment;
776
777         if (req_ctx->result)
778                 hash_size_in_response = digestsize;
779         else
780                 hash_size_in_response = param->alg_prm.result_size;
781         transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
782         skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
783                         GFP_ATOMIC);
784         if (!skb)
785                 return skb;
786
787         skb_reserve(skb, sizeof(struct sge_opaque_hdr));
788         wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
789         memset(wreq, 0, transhdr_len);
790
791         sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
792         sec_cpl->op_ivinsrtofst =
793                 FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0);
794         sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len);
795
796         sec_cpl->aadstart_cipherstop_hi =
797                 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
798         sec_cpl->cipherstop_lo_authinsert =
799                 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
800         sec_cpl->seqno_numivs =
801                 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
802                                          param->opad_needed, 0, 0);
803
804         sec_cpl->ivgen_hdrlen =
805                 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
806
807         key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
808         memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size);
809
810         if (param->opad_needed)
811                 memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 :
812                                        CHCR_HASH_MAX_DIGEST_SIZE),
813                        hmacctx->opad, param->alg_prm.result_size);
814
815         key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
816                                             param->alg_prm.mk_size, 0,
817                                             param->opad_needed,
818                                             (kctx_len >> 4));
819         sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1);
820
821         skb_set_transport_header(skb, transhdr_len);
822         if (param->bfr_len != 0)
823                 write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr,
824                                             param->bfr_len);
825         if (param->sg_len != 0)
826                 write_sg_data_page_desc(skb, &frags, req->src, param->sg_len);
827
828         create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response,
829                     0);
830         req_ctx->skb = skb;
831         skb_get(skb);
832         return skb;
833 }
834
835 static int chcr_ahash_update(struct ahash_request *req)
836 {
837         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
838         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
839         struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
840         struct uld_ctx *u_ctx = NULL;
841         struct sk_buff *skb;
842         u8 remainder = 0, bs;
843         unsigned int nbytes = req->nbytes;
844         struct hash_wr_param params;
845
846         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
847
848         u_ctx = ULD_CTX(ctx);
849         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
850                                             ctx->tx_channel_id))) {
851                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
852                         return -EBUSY;
853         }
854
855         if (nbytes + req_ctx->bfr_len >= bs) {
856                 remainder = (nbytes + req_ctx->bfr_len) % bs;
857                 nbytes = nbytes + req_ctx->bfr_len - remainder;
858         } else {
859                 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr +
860                                    req_ctx->bfr_len, nbytes, 0);
861                 req_ctx->bfr_len += nbytes;
862                 return 0;
863         }
864
865         params.opad_needed = 0;
866         params.more = 1;
867         params.last = 0;
868         params.sg_len = nbytes - req_ctx->bfr_len;
869         params.bfr_len = req_ctx->bfr_len;
870         params.scmd1 = 0;
871         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
872         req_ctx->result = 0;
873         req_ctx->data_len += params.sg_len + params.bfr_len;
874         skb = create_final_hash_wr(req, &params);
875         if (!skb)
876                 return -ENOMEM;
877
878         req_ctx->bfr_len = remainder;
879         if (remainder)
880                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
881                                    req_ctx->bfr, remainder, req->nbytes -
882                                    remainder);
883         skb->dev = u_ctx->lldi.ports[0];
884         set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
885         chcr_send_wr(skb);
886
887         return -EINPROGRESS;
888 }
889
890 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
891 {
892         memset(bfr_ptr, 0, bs);
893         *bfr_ptr = 0x80;
894         if (bs == 64)
895                 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
896         else
897                 *(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
898 }
899
900 static int chcr_ahash_final(struct ahash_request *req)
901 {
902         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
903         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
904         struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
905         struct hash_wr_param params;
906         struct sk_buff *skb;
907         struct uld_ctx *u_ctx = NULL;
908         u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
909
910         u_ctx = ULD_CTX(ctx);
911         if (is_hmac(crypto_ahash_tfm(rtfm)))
912                 params.opad_needed = 1;
913         else
914                 params.opad_needed = 0;
915         params.sg_len = 0;
916         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
917         req_ctx->result = 1;
918         params.bfr_len = req_ctx->bfr_len;
919         req_ctx->data_len += params.bfr_len + params.sg_len;
920         if (req_ctx->bfr && (req_ctx->bfr_len == 0)) {
921                 create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
922                 params.last = 0;
923                 params.more = 1;
924                 params.scmd1 = 0;
925                 params.bfr_len = bs;
926
927         } else {
928                 params.scmd1 = req_ctx->data_len;
929                 params.last = 1;
930                 params.more = 0;
931         }
932         skb = create_final_hash_wr(req, &params);
933         skb->dev = u_ctx->lldi.ports[0];
934         set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
935         chcr_send_wr(skb);
936         return -EINPROGRESS;
937 }
938
939 static int chcr_ahash_finup(struct ahash_request *req)
940 {
941         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
942         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
943         struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
944         struct uld_ctx *u_ctx = NULL;
945         struct sk_buff *skb;
946         struct hash_wr_param params;
947         u8  bs;
948
949         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
950         u_ctx = ULD_CTX(ctx);
951
952         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
953                                             ctx->tx_channel_id))) {
954                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
955                         return -EBUSY;
956         }
957
958         if (is_hmac(crypto_ahash_tfm(rtfm)))
959                 params.opad_needed = 1;
960         else
961                 params.opad_needed = 0;
962
963         params.sg_len = req->nbytes;
964         params.bfr_len = req_ctx->bfr_len;
965         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
966         req_ctx->data_len += params.bfr_len + params.sg_len;
967         req_ctx->result = 1;
968         if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) {
969                 create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
970                 params.last = 0;
971                 params.more = 1;
972                 params.scmd1 = 0;
973                 params.bfr_len = bs;
974         } else {
975                 params.scmd1 = req_ctx->data_len;
976                 params.last = 1;
977                 params.more = 0;
978         }
979
980         skb = create_final_hash_wr(req, &params);
981         if (!skb)
982                 return -ENOMEM;
983         skb->dev = u_ctx->lldi.ports[0];
984         set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
985         chcr_send_wr(skb);
986
987         return -EINPROGRESS;
988 }
989
990 static int chcr_ahash_digest(struct ahash_request *req)
991 {
992         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
993         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
994         struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
995         struct uld_ctx *u_ctx = NULL;
996         struct sk_buff *skb;
997         struct hash_wr_param params;
998         u8  bs;
999
1000         rtfm->init(req);
1001         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1002
1003         u_ctx = ULD_CTX(ctx);
1004         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1005                                             ctx->tx_channel_id))) {
1006                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1007                         return -EBUSY;
1008         }
1009
1010         if (is_hmac(crypto_ahash_tfm(rtfm)))
1011                 params.opad_needed = 1;
1012         else
1013                 params.opad_needed = 0;
1014
1015         params.last = 0;
1016         params.more = 0;
1017         params.sg_len = req->nbytes;
1018         params.bfr_len = 0;
1019         params.scmd1 = 0;
1020         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1021         req_ctx->result = 1;
1022         req_ctx->data_len += params.bfr_len + params.sg_len;
1023
1024         if (req_ctx->bfr && req->nbytes == 0) {
1025                 create_last_hash_block(req_ctx->bfr, bs, 0);
1026                 params.more = 1;
1027                 params.bfr_len = bs;
1028         }
1029
1030         skb = create_final_hash_wr(req, &params);
1031         if (!skb)
1032                 return -ENOMEM;
1033
1034         skb->dev = u_ctx->lldi.ports[0];
1035         set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1036         chcr_send_wr(skb);
1037         return -EINPROGRESS;
1038 }
1039
1040 static int chcr_ahash_export(struct ahash_request *areq, void *out)
1041 {
1042         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1043         struct chcr_ahash_req_ctx *state = out;
1044
1045         state->bfr_len = req_ctx->bfr_len;
1046         state->data_len = req_ctx->data_len;
1047         memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
1048         memcpy(state->partial_hash, req_ctx->partial_hash,
1049                CHCR_HASH_MAX_DIGEST_SIZE);
1050         return 0;
1051 }
1052
1053 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1054 {
1055         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1056         struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1057
1058         req_ctx->bfr_len = state->bfr_len;
1059         req_ctx->data_len = state->data_len;
1060         req_ctx->dummy_payload_ptr = NULL;
1061         memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
1062         memcpy(req_ctx->partial_hash, state->partial_hash,
1063                CHCR_HASH_MAX_DIGEST_SIZE);
1064         return 0;
1065 }
1066
1067 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1068                              unsigned int keylen)
1069 {
1070         struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1071         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1072         unsigned int digestsize = crypto_ahash_digestsize(tfm);
1073         unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1074         unsigned int i, err = 0, updated_digestsize;
1075
1076         /*
1077          * use the key to calculate the ipad and opad. ipad will sent with the
1078          * first request's data. opad will be sent with the final hash result
1079          * ipad in hmacctx->ipad and opad in hmacctx->opad location
1080          */
1081         if (!hmacctx->desc)
1082                 return -EINVAL;
1083         if (keylen > bs) {
1084                 err = crypto_shash_digest(hmacctx->desc, key, keylen,
1085                                           hmacctx->ipad);
1086                 if (err)
1087                         goto out;
1088                 keylen = digestsize;
1089         } else {
1090                 memcpy(hmacctx->ipad, key, keylen);
1091         }
1092         memset(hmacctx->ipad + keylen, 0, bs - keylen);
1093         memcpy(hmacctx->opad, hmacctx->ipad, bs);
1094
1095         for (i = 0; i < bs / sizeof(int); i++) {
1096                 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1097                 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1098         }
1099
1100         updated_digestsize = digestsize;
1101         if (digestsize == SHA224_DIGEST_SIZE)
1102                 updated_digestsize = SHA256_DIGEST_SIZE;
1103         else if (digestsize == SHA384_DIGEST_SIZE)
1104                 updated_digestsize = SHA512_DIGEST_SIZE;
1105         err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad,
1106                                         hmacctx->ipad, digestsize);
1107         if (err)
1108                 goto out;
1109         chcr_change_order(hmacctx->ipad, updated_digestsize);
1110
1111         err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad,
1112                                         hmacctx->opad, digestsize);
1113         if (err)
1114                 goto out;
1115         chcr_change_order(hmacctx->opad, updated_digestsize);
1116 out:
1117         return err;
1118 }
1119
1120 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1121                                unsigned int key_len)
1122 {
1123         struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1124         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1125         int status = 0;
1126         unsigned short context_size = 0;
1127
1128         if ((key_len == (AES_KEYSIZE_128 << 1)) ||
1129             (key_len == (AES_KEYSIZE_256 << 1))) {
1130                 memcpy(ablkctx->key, key, key_len);
1131                 ablkctx->enckey_len = key_len;
1132                 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1133                 ablkctx->key_ctx_hdr =
1134                         FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1135                                          CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1136                                          CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1137                                          CHCR_KEYCTX_NO_KEY, 1,
1138                                          0, context_size);
1139                 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1140         } else {
1141                 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
1142                                      CRYPTO_TFM_RES_BAD_KEY_LEN);
1143                 ablkctx->enckey_len = 0;
1144                 status = -EINVAL;
1145         }
1146         return status;
1147 }
1148
1149 static int chcr_sha_init(struct ahash_request *areq)
1150 {
1151         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1152         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1153         int digestsize =  crypto_ahash_digestsize(tfm);
1154
1155         req_ctx->data_len = 0;
1156         req_ctx->dummy_payload_ptr = NULL;
1157         req_ctx->bfr_len = 0;
1158         req_ctx->skb = NULL;
1159         req_ctx->result = 0;
1160         copy_hash_init_values(req_ctx->partial_hash, digestsize);
1161         return 0;
1162 }
1163
1164 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1165 {
1166         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1167                                  sizeof(struct chcr_ahash_req_ctx));
1168         return chcr_device_init(crypto_tfm_ctx(tfm));
1169 }
1170
1171 static int chcr_hmac_init(struct ahash_request *areq)
1172 {
1173         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1174         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
1175         struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1176         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1177         unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1178         unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1179
1180         chcr_sha_init(areq);
1181         req_ctx->data_len = bs;
1182         if (is_hmac(crypto_ahash_tfm(rtfm))) {
1183                 if (digestsize == SHA224_DIGEST_SIZE)
1184                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
1185                                SHA256_DIGEST_SIZE);
1186                 else if (digestsize == SHA384_DIGEST_SIZE)
1187                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
1188                                SHA512_DIGEST_SIZE);
1189                 else
1190                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
1191                                digestsize);
1192         }
1193         return 0;
1194 }
1195
1196 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1197 {
1198         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1199         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1200         unsigned int digestsize =
1201                 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
1202
1203         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1204                                  sizeof(struct chcr_ahash_req_ctx));
1205         hmacctx->desc = chcr_alloc_shash(digestsize);
1206         if (IS_ERR(hmacctx->desc))
1207                 return PTR_ERR(hmacctx->desc);
1208         return chcr_device_init(crypto_tfm_ctx(tfm));
1209 }
1210
1211 static void chcr_free_shash(struct shash_desc *desc)
1212 {
1213         crypto_free_shash(desc->tfm);
1214         kfree(desc);
1215 }
1216
1217 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
1218 {
1219         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1220         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1221
1222         if (hmacctx->desc) {
1223                 chcr_free_shash(hmacctx->desc);
1224                 hmacctx->desc = NULL;
1225         }
1226 }
1227
1228 static struct chcr_alg_template driver_algs[] = {
1229         /* AES-CBC */
1230         {
1231                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1232                 .is_registered = 0,
1233                 .alg.crypto = {
1234                         .cra_name               = "cbc(aes)",
1235                         .cra_driver_name        = "cbc(aes-chcr)",
1236                         .cra_priority           = CHCR_CRA_PRIORITY,
1237                         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1238                                 CRYPTO_ALG_ASYNC,
1239                         .cra_blocksize          = AES_BLOCK_SIZE,
1240                         .cra_ctxsize            = sizeof(struct chcr_context)
1241                                 + sizeof(struct ablk_ctx),
1242                         .cra_alignmask          = 0,
1243                         .cra_type               = &crypto_ablkcipher_type,
1244                         .cra_module             = THIS_MODULE,
1245                         .cra_init               = chcr_cra_init,
1246                         .cra_exit               = NULL,
1247                         .cra_u.ablkcipher       = {
1248                                 .min_keysize    = AES_MIN_KEY_SIZE,
1249                                 .max_keysize    = AES_MAX_KEY_SIZE,
1250                                 .ivsize         = AES_BLOCK_SIZE,
1251                                 .setkey                 = chcr_aes_cbc_setkey,
1252                                 .encrypt                = chcr_aes_encrypt,
1253                                 .decrypt                = chcr_aes_decrypt,
1254                         }
1255                 }
1256         },
1257         {
1258                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1259                 .is_registered = 0,
1260                 .alg.crypto =   {
1261                         .cra_name               = "xts(aes)",
1262                         .cra_driver_name        = "xts(aes-chcr)",
1263                         .cra_priority           = CHCR_CRA_PRIORITY,
1264                         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1265                                 CRYPTO_ALG_ASYNC,
1266                         .cra_blocksize          = AES_BLOCK_SIZE,
1267                         .cra_ctxsize            = sizeof(struct chcr_context) +
1268                                 sizeof(struct ablk_ctx),
1269                         .cra_alignmask          = 0,
1270                         .cra_type               = &crypto_ablkcipher_type,
1271                         .cra_module             = THIS_MODULE,
1272                         .cra_init               = chcr_cra_init,
1273                         .cra_exit               = NULL,
1274                         .cra_u = {
1275                                 .ablkcipher = {
1276                                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1277                                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1278                                         .ivsize         = AES_BLOCK_SIZE,
1279                                         .setkey         = chcr_aes_xts_setkey,
1280                                         .encrypt        = chcr_aes_encrypt,
1281                                         .decrypt        = chcr_aes_decrypt,
1282                                 }
1283                         }
1284                 }
1285         },
1286         /* SHA */
1287         {
1288                 .type = CRYPTO_ALG_TYPE_AHASH,
1289                 .is_registered = 0,
1290                 .alg.hash = {
1291                         .halg.digestsize = SHA1_DIGEST_SIZE,
1292                         .halg.base = {
1293                                 .cra_name = "sha1",
1294                                 .cra_driver_name = "sha1-chcr",
1295                                 .cra_blocksize = SHA1_BLOCK_SIZE,
1296                         }
1297                 }
1298         },
1299         {
1300                 .type = CRYPTO_ALG_TYPE_AHASH,
1301                 .is_registered = 0,
1302                 .alg.hash = {
1303                         .halg.digestsize = SHA256_DIGEST_SIZE,
1304                         .halg.base = {
1305                                 .cra_name = "sha256",
1306                                 .cra_driver_name = "sha256-chcr",
1307                                 .cra_blocksize = SHA256_BLOCK_SIZE,
1308                         }
1309                 }
1310         },
1311         {
1312                 .type = CRYPTO_ALG_TYPE_AHASH,
1313                 .is_registered = 0,
1314                 .alg.hash = {
1315                         .halg.digestsize = SHA224_DIGEST_SIZE,
1316                         .halg.base = {
1317                                 .cra_name = "sha224",
1318                                 .cra_driver_name = "sha224-chcr",
1319                                 .cra_blocksize = SHA224_BLOCK_SIZE,
1320                         }
1321                 }
1322         },
1323         {
1324                 .type = CRYPTO_ALG_TYPE_AHASH,
1325                 .is_registered = 0,
1326                 .alg.hash = {
1327                         .halg.digestsize = SHA384_DIGEST_SIZE,
1328                         .halg.base = {
1329                                 .cra_name = "sha384",
1330                                 .cra_driver_name = "sha384-chcr",
1331                                 .cra_blocksize = SHA384_BLOCK_SIZE,
1332                         }
1333                 }
1334         },
1335         {
1336                 .type = CRYPTO_ALG_TYPE_AHASH,
1337                 .is_registered = 0,
1338                 .alg.hash = {
1339                         .halg.digestsize = SHA512_DIGEST_SIZE,
1340                         .halg.base = {
1341                                 .cra_name = "sha512",
1342                                 .cra_driver_name = "sha512-chcr",
1343                                 .cra_blocksize = SHA512_BLOCK_SIZE,
1344                         }
1345                 }
1346         },
1347         /* HMAC */
1348         {
1349                 .type = CRYPTO_ALG_TYPE_HMAC,
1350                 .is_registered = 0,
1351                 .alg.hash = {
1352                         .halg.digestsize = SHA1_DIGEST_SIZE,
1353                         .halg.base = {
1354                                 .cra_name = "hmac(sha1)",
1355                                 .cra_driver_name = "hmac(sha1-chcr)",
1356                                 .cra_blocksize = SHA1_BLOCK_SIZE,
1357                         }
1358                 }
1359         },
1360         {
1361                 .type = CRYPTO_ALG_TYPE_HMAC,
1362                 .is_registered = 0,
1363                 .alg.hash = {
1364                         .halg.digestsize = SHA224_DIGEST_SIZE,
1365                         .halg.base = {
1366                                 .cra_name = "hmac(sha224)",
1367                                 .cra_driver_name = "hmac(sha224-chcr)",
1368                                 .cra_blocksize = SHA224_BLOCK_SIZE,
1369                         }
1370                 }
1371         },
1372         {
1373                 .type = CRYPTO_ALG_TYPE_HMAC,
1374                 .is_registered = 0,
1375                 .alg.hash = {
1376                         .halg.digestsize = SHA256_DIGEST_SIZE,
1377                         .halg.base = {
1378                                 .cra_name = "hmac(sha256)",
1379                                 .cra_driver_name = "hmac(sha256-chcr)",
1380                                 .cra_blocksize = SHA256_BLOCK_SIZE,
1381                         }
1382                 }
1383         },
1384         {
1385                 .type = CRYPTO_ALG_TYPE_HMAC,
1386                 .is_registered = 0,
1387                 .alg.hash = {
1388                         .halg.digestsize = SHA384_DIGEST_SIZE,
1389                         .halg.base = {
1390                                 .cra_name = "hmac(sha384)",
1391                                 .cra_driver_name = "hmac(sha384-chcr)",
1392                                 .cra_blocksize = SHA384_BLOCK_SIZE,
1393                         }
1394                 }
1395         },
1396         {
1397                 .type = CRYPTO_ALG_TYPE_HMAC,
1398                 .is_registered = 0,
1399                 .alg.hash = {
1400                         .halg.digestsize = SHA512_DIGEST_SIZE,
1401                         .halg.base = {
1402                                 .cra_name = "hmac(sha512)",
1403                                 .cra_driver_name = "hmac(sha512-chcr)",
1404                                 .cra_blocksize = SHA512_BLOCK_SIZE,
1405                         }
1406                 }
1407         },
1408 };
1409
1410 /*
1411  *      chcr_unregister_alg - Deregister crypto algorithms with
1412  *      kernel framework.
1413  */
1414 static int chcr_unregister_alg(void)
1415 {
1416         int i;
1417
1418         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1419                 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
1420                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1421                         if (driver_algs[i].is_registered)
1422                                 crypto_unregister_alg(
1423                                                 &driver_algs[i].alg.crypto);
1424                         break;
1425                 case CRYPTO_ALG_TYPE_AHASH:
1426                         if (driver_algs[i].is_registered)
1427                                 crypto_unregister_ahash(
1428                                                 &driver_algs[i].alg.hash);
1429                         break;
1430                 }
1431                 driver_algs[i].is_registered = 0;
1432         }
1433         return 0;
1434 }
1435
1436 #define SZ_AHASH_CTX sizeof(struct chcr_context)
1437 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
1438 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
1439 #define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
1440
1441 /*
1442  *      chcr_register_alg - Register crypto algorithms with kernel framework.
1443  */
1444 static int chcr_register_alg(void)
1445 {
1446         struct crypto_alg ai;
1447         struct ahash_alg *a_hash;
1448         int err = 0, i;
1449         char *name = NULL;
1450
1451         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1452                 if (driver_algs[i].is_registered)
1453                         continue;
1454                 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
1455                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1456                         err = crypto_register_alg(&driver_algs[i].alg.crypto);
1457                         name = driver_algs[i].alg.crypto.cra_driver_name;
1458                         break;
1459                 case CRYPTO_ALG_TYPE_AHASH:
1460                         a_hash = &driver_algs[i].alg.hash;
1461                         a_hash->update = chcr_ahash_update;
1462                         a_hash->final = chcr_ahash_final;
1463                         a_hash->finup = chcr_ahash_finup;
1464                         a_hash->digest = chcr_ahash_digest;
1465                         a_hash->export = chcr_ahash_export;
1466                         a_hash->import = chcr_ahash_import;
1467                         a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
1468                         a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
1469                         a_hash->halg.base.cra_module = THIS_MODULE;
1470                         a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
1471                         a_hash->halg.base.cra_alignmask = 0;
1472                         a_hash->halg.base.cra_exit = NULL;
1473                         a_hash->halg.base.cra_type = &crypto_ahash_type;
1474
1475                         if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
1476                                 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
1477                                 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
1478                                 a_hash->init = chcr_hmac_init;
1479                                 a_hash->setkey = chcr_ahash_setkey;
1480                                 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
1481                         } else {
1482                                 a_hash->init = chcr_sha_init;
1483                                 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
1484                                 a_hash->halg.base.cra_init = chcr_sha_cra_init;
1485                         }
1486                         err = crypto_register_ahash(&driver_algs[i].alg.hash);
1487                         ai = driver_algs[i].alg.hash.halg.base;
1488                         name = ai.cra_driver_name;
1489                         break;
1490                 }
1491                 if (err) {
1492                         pr_err("chcr : %s : Algorithm registration failed\n",
1493                                name);
1494                         goto register_err;
1495                 } else {
1496                         driver_algs[i].is_registered = 1;
1497                 }
1498         }
1499         return 0;
1500
1501 register_err:
1502         chcr_unregister_alg();
1503         return err;
1504 }
1505
1506 /*
1507  *      start_crypto - Register the crypto algorithms.
1508  *      This should called once when the first device comesup. After this
1509  *      kernel will start calling driver APIs for crypto operations.
1510  */
1511 int start_crypto(void)
1512 {
1513         return chcr_register_alg();
1514 }
1515
1516 /*
1517  *      stop_crypto - Deregister all the crypto algorithms with kernel.
1518  *      This should be called once when the last device goes down. After this
1519  *      kernel will not call the driver API for crypto operations.
1520  */
1521 int stop_crypto(void)
1522 {
1523         chcr_unregister_alg();
1524         return 0;
1525 }