regulator: tps6105x: Convert to use regmap helper functions
[cascardo/linux.git] / drivers / crypto / nx / nx-aes-ccm.c
1 /**
2  * AES CCM routines supporting the Power 7+ Nest Accelerators driver
3  *
4  * Copyright (C) 2012 International Business Machines Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2 only.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18  *
19  * Author: Kent Yoder <yoder1@us.ibm.com>
20  */
21
22 #include <crypto/internal/aead.h>
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/crypto.h>
29 #include <asm/vio.h>
30
31 #include "nx_csbcpb.h"
32 #include "nx.h"
33
34
35 static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
36                               const u8           *in_key,
37                               unsigned int        key_len)
38 {
39         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
40         struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
41         struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
42
43         nx_ctx_init(nx_ctx, HCOP_FC_AES);
44
45         switch (key_len) {
46         case AES_KEYSIZE_128:
47                 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
48                 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
49                 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
50                 break;
51         default:
52                 return -EINVAL;
53         }
54
55         csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
56         memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);
57
58         csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
59         memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);
60
61         return 0;
62
63 }
64
65 static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
66                                   const u8           *in_key,
67                                   unsigned int        key_len)
68 {
69         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
70
71         if (key_len < 3)
72                 return -EINVAL;
73
74         key_len -= 3;
75
76         memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);
77
78         return ccm_aes_nx_set_key(tfm, in_key, key_len);
79 }
80
81 static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
82                                   unsigned int authsize)
83 {
84         switch (authsize) {
85         case 4:
86         case 6:
87         case 8:
88         case 10:
89         case 12:
90         case 14:
91         case 16:
92                 break;
93         default:
94                 return -EINVAL;
95         }
96
97         return 0;
98 }
99
100 static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
101                                       unsigned int authsize)
102 {
103         switch (authsize) {
104         case 8:
105         case 12:
106         case 16:
107                 break;
108         default:
109                 return -EINVAL;
110         }
111
112         return 0;
113 }
114
115 /* taken from crypto/ccm.c */
116 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
117 {
118         __be32 data;
119
120         memset(block, 0, csize);
121         block += csize;
122
123         if (csize >= 4)
124                 csize = 4;
125         else if (msglen > (unsigned int)(1 << (8 * csize)))
126                 return -EOVERFLOW;
127
128         data = cpu_to_be32(msglen);
129         memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
130
131         return 0;
132 }
133
134 /* taken from crypto/ccm.c */
135 static inline int crypto_ccm_check_iv(const u8 *iv)
136 {
137         /* 2 <= L <= 8, so 1 <= L' <= 7. */
138         if (1 > iv[0] || iv[0] > 7)
139                 return -EINVAL;
140
141         return 0;
142 }
143
144 /* based on code from crypto/ccm.c */
145 static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
146                        unsigned int cryptlen, u8 *b0)
147 {
148         unsigned int l, lp, m = authsize;
149         int rc;
150
151         memcpy(b0, iv, 16);
152
153         lp = b0[0];
154         l = lp + 1;
155
156         /* set m, bits 3-5 */
157         *b0 |= (8 * ((m - 2) / 2));
158
159         /* set adata, bit 6, if associated data is used */
160         if (assoclen)
161                 *b0 |= 64;
162
163         rc = set_msg_len(b0 + 16 - l, cryptlen, l);
164
165         return rc;
166 }
167
168 static int generate_pat(u8                   *iv,
169                         struct aead_request  *req,
170                         struct nx_crypto_ctx *nx_ctx,
171                         unsigned int          authsize,
172                         unsigned int          nbytes,
173                         unsigned int          assoclen,
174                         u8                   *out)
175 {
176         struct nx_sg *nx_insg = nx_ctx->in_sg;
177         struct nx_sg *nx_outsg = nx_ctx->out_sg;
178         unsigned int iauth_len = 0;
179         u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
180         int rc;
181         unsigned int max_sg_len;
182
183         /* zero the ctr value */
184         memset(iv + 15 - iv[0], 0, iv[0] + 1);
185
186         /* page 78 of nx_wb.pdf has,
187          * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
188          * in length. If a full message is used, the AES CCA implementation
189          * restricts the maximum AAD length to 2^32 -1 bytes.
190          * If partial messages are used, the implementation supports
191          * 2^64 -1 bytes maximum AAD length.
192          *
193          * However, in the cryptoapi's aead_request structure,
194          * assoclen is an unsigned int, thus it cannot hold a length
195          * value greater than 2^32 - 1.
196          * Thus the AAD is further constrained by this and is never
197          * greater than 2^32.
198          */
199
200         if (!assoclen) {
201                 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
202         } else if (assoclen <= 14) {
203                 /* if associated data is 14 bytes or less, we do 1 GCM
204                  * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
205                  * which is fed in through the source buffers here */
206                 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
207                 b1 = nx_ctx->priv.ccm.iauth_tag;
208                 iauth_len = assoclen;
209         } else if (assoclen <= 65280) {
210                 /* if associated data is less than (2^16 - 2^8), we construct
211                  * B1 differently and feed in the associated data to a CCA
212                  * operation */
213                 b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
214                 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
215                 iauth_len = 14;
216         } else {
217                 b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
218                 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
219                 iauth_len = 10;
220         }
221
222         /* generate B0 */
223         rc = generate_b0(iv, assoclen, authsize, nbytes, b0);
224         if (rc)
225                 return rc;
226
227         /* generate B1:
228          * add control info for associated data
229          * RFC 3610 and NIST Special Publication 800-38C
230          */
231         if (b1) {
232                 memset(b1, 0, 16);
233                 if (assoclen <= 65280) {
234                         *(u16 *)b1 = assoclen;
235                         scatterwalk_map_and_copy(b1 + 2, req->src, 0,
236                                          iauth_len, SCATTERWALK_FROM_SG);
237                 } else {
238                         *(u16 *)b1 = (u16)(0xfffe);
239                         *(u32 *)&b1[2] = assoclen;
240                         scatterwalk_map_and_copy(b1 + 6, req->src, 0,
241                                          iauth_len, SCATTERWALK_FROM_SG);
242                 }
243         }
244
245         /* now copy any remaining AAD to scatterlist and call nx... */
246         if (!assoclen) {
247                 return rc;
248         } else if (assoclen <= 14) {
249                 unsigned int len = 16;
250
251                 nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
252
253                 if (len != 16)
254                         return -EINVAL;
255
256                 nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
257                                             nx_ctx->ap->sglen);
258
259                 if (len != 16)
260                         return -EINVAL;
261
262                 /* inlen should be negative, indicating to phyp that its a
263                  * pointer to an sg list */
264                 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
265                                         sizeof(struct nx_sg);
266                 nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
267                                         sizeof(struct nx_sg);
268
269                 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
270                 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
271
272                 result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
273
274                 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
275                                    req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
276                 if (rc)
277                         return rc;
278
279                 atomic_inc(&(nx_ctx->stats->aes_ops));
280                 atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
281
282         } else {
283                 unsigned int processed = 0, to_process;
284
285                 processed += iauth_len;
286
287                 /* page_limit: number of sg entries that fit on one page */
288                 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
289                                 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
290                 max_sg_len = min_t(u64, max_sg_len,
291                                 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
292
293                 do {
294                         to_process = min_t(u32, assoclen - processed,
295                                            nx_ctx->ap->databytelen);
296
297                         nx_insg = nx_walk_and_build(nx_ctx->in_sg,
298                                                     nx_ctx->ap->sglen,
299                                                     req->src, processed,
300                                                     &to_process);
301
302                         if ((to_process + processed) < assoclen) {
303                                 NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
304                                         NX_FDM_INTERMEDIATE;
305                         } else {
306                                 NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
307                                         ~NX_FDM_INTERMEDIATE;
308                         }
309
310
311                         nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
312                                                 sizeof(struct nx_sg);
313
314                         result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
315
316                         rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
317                                    req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
318                         if (rc)
319                                 return rc;
320
321                         memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
322                                 nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
323                                 AES_BLOCK_SIZE);
324
325                         NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
326
327                         atomic_inc(&(nx_ctx->stats->aes_ops));
328                         atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
329
330                         processed += to_process;
331                 } while (processed < assoclen);
332
333                 result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
334         }
335
336         memcpy(out, result, AES_BLOCK_SIZE);
337
338         return rc;
339 }
340
341 static int ccm_nx_decrypt(struct aead_request   *req,
342                           struct blkcipher_desc *desc,
343                           unsigned int assoclen)
344 {
345         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
346         struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
347         unsigned int nbytes = req->cryptlen;
348         unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
349         struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
350         unsigned long irq_flags;
351         unsigned int processed = 0, to_process;
352         int rc = -1;
353
354         spin_lock_irqsave(&nx_ctx->lock, irq_flags);
355
356         nbytes -= authsize;
357
358         /* copy out the auth tag to compare with later */
359         scatterwalk_map_and_copy(priv->oauth_tag,
360                                  req->src, nbytes + req->assoclen, authsize,
361                                  SCATTERWALK_FROM_SG);
362
363         rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
364                           csbcpb->cpb.aes_ccm.in_pat_or_b0);
365         if (rc)
366                 goto out;
367
368         do {
369
370                 /* to_process: the AES_BLOCK_SIZE data chunk to process in this
371                  * update. This value is bound by sg list limits.
372                  */
373                 to_process = nbytes - processed;
374
375                 if ((to_process + processed) < nbytes)
376                         NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
377                 else
378                         NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
379
380                 NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
381
382                 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
383                                        &to_process, processed + req->assoclen,
384                                        csbcpb->cpb.aes_ccm.iv_or_ctr);
385                 if (rc)
386                         goto out;
387
388                 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
389                            req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
390                 if (rc)
391                         goto out;
392
393                 /* for partial completion, copy following for next
394                  * entry into loop...
395                  */
396                 memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
397                 memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
398                         csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
399                 memcpy(csbcpb->cpb.aes_ccm.in_s0,
400                         csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
401
402                 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
403
404                 /* update stats */
405                 atomic_inc(&(nx_ctx->stats->aes_ops));
406                 atomic64_add(csbcpb->csb.processed_byte_count,
407                              &(nx_ctx->stats->aes_bytes));
408
409                 processed += to_process;
410         } while (processed < nbytes);
411
412         rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
413                     authsize) ? -EBADMSG : 0;
414 out:
415         spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
416         return rc;
417 }
418
419 static int ccm_nx_encrypt(struct aead_request   *req,
420                           struct blkcipher_desc *desc,
421                           unsigned int assoclen)
422 {
423         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
424         struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
425         unsigned int nbytes = req->cryptlen;
426         unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
427         unsigned long irq_flags;
428         unsigned int processed = 0, to_process;
429         int rc = -1;
430
431         spin_lock_irqsave(&nx_ctx->lock, irq_flags);
432
433         rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
434                           csbcpb->cpb.aes_ccm.in_pat_or_b0);
435         if (rc)
436                 goto out;
437
438         do {
439                 /* to process: the AES_BLOCK_SIZE data chunk to process in this
440                  * update. This value is bound by sg list limits.
441                  */
442                 to_process = nbytes - processed;
443
444                 if ((to_process + processed) < nbytes)
445                         NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
446                 else
447                         NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
448
449                 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
450
451                 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
452                                        &to_process, processed + req->assoclen,
453                                        csbcpb->cpb.aes_ccm.iv_or_ctr);
454                 if (rc)
455                         goto out;
456
457                 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
458                                    req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
459                 if (rc)
460                         goto out;
461
462                 /* for partial completion, copy following for next
463                  * entry into loop...
464                  */
465                 memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
466                 memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
467                         csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
468                 memcpy(csbcpb->cpb.aes_ccm.in_s0,
469                         csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
470
471                 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
472
473                 /* update stats */
474                 atomic_inc(&(nx_ctx->stats->aes_ops));
475                 atomic64_add(csbcpb->csb.processed_byte_count,
476                              &(nx_ctx->stats->aes_bytes));
477
478                 processed += to_process;
479
480         } while (processed < nbytes);
481
482         /* copy out the auth tag */
483         scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
484                                  req->dst, nbytes + req->assoclen, authsize,
485                                  SCATTERWALK_TO_SG);
486
487 out:
488         spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
489         return rc;
490 }
491
492 static int ccm4309_aes_nx_encrypt(struct aead_request *req)
493 {
494         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
495         struct nx_gcm_rctx *rctx = aead_request_ctx(req);
496         struct blkcipher_desc desc;
497         u8 *iv = rctx->iv;
498
499         iv[0] = 3;
500         memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
501         memcpy(iv + 4, req->iv, 8);
502
503         desc.info = iv;
504
505         return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
506 }
507
508 static int ccm_aes_nx_encrypt(struct aead_request *req)
509 {
510         struct blkcipher_desc desc;
511         int rc;
512
513         desc.info = req->iv;
514
515         rc = crypto_ccm_check_iv(desc.info);
516         if (rc)
517                 return rc;
518
519         return ccm_nx_encrypt(req, &desc, req->assoclen);
520 }
521
522 static int ccm4309_aes_nx_decrypt(struct aead_request *req)
523 {
524         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
525         struct nx_gcm_rctx *rctx = aead_request_ctx(req);
526         struct blkcipher_desc desc;
527         u8 *iv = rctx->iv;
528
529         iv[0] = 3;
530         memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
531         memcpy(iv + 4, req->iv, 8);
532
533         desc.info = iv;
534
535         return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
536 }
537
538 static int ccm_aes_nx_decrypt(struct aead_request *req)
539 {
540         struct blkcipher_desc desc;
541         int rc;
542
543         desc.info = req->iv;
544
545         rc = crypto_ccm_check_iv(desc.info);
546         if (rc)
547                 return rc;
548
549         return ccm_nx_decrypt(req, &desc, req->assoclen);
550 }
551
552 /* tell the block cipher walk routines that this is a stream cipher by
553  * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
554  * during encrypt/decrypt doesn't solve this problem, because it calls
555  * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
556  * but instead uses this tfm->blocksize. */
557 struct aead_alg nx_ccm_aes_alg = {
558         .base = {
559                 .cra_name        = "ccm(aes)",
560                 .cra_driver_name = "ccm-aes-nx",
561                 .cra_priority    = 300,
562                 .cra_flags       = CRYPTO_ALG_NEED_FALLBACK,
563                 .cra_blocksize   = 1,
564                 .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
565                 .cra_module      = THIS_MODULE,
566         },
567         .init        = nx_crypto_ctx_aes_ccm_init,
568         .exit        = nx_crypto_ctx_aead_exit,
569         .ivsize      = AES_BLOCK_SIZE,
570         .maxauthsize = AES_BLOCK_SIZE,
571         .setkey      = ccm_aes_nx_set_key,
572         .setauthsize = ccm_aes_nx_setauthsize,
573         .encrypt     = ccm_aes_nx_encrypt,
574         .decrypt     = ccm_aes_nx_decrypt,
575 };
576
577 struct aead_alg nx_ccm4309_aes_alg = {
578         .base = {
579                 .cra_name        = "rfc4309(ccm(aes))",
580                 .cra_driver_name = "rfc4309-ccm-aes-nx",
581                 .cra_priority    = 300,
582                 .cra_flags       = CRYPTO_ALG_NEED_FALLBACK,
583                 .cra_blocksize   = 1,
584                 .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
585                 .cra_module      = THIS_MODULE,
586         },
587         .init        = nx_crypto_ctx_aes_ccm_init,
588         .exit        = nx_crypto_ctx_aead_exit,
589         .ivsize      = 8,
590         .maxauthsize = AES_BLOCK_SIZE,
591         .setkey      = ccm4309_aes_nx_set_key,
592         .setauthsize = ccm4309_aes_nx_setauthsize,
593         .encrypt     = ccm4309_aes_nx_encrypt,
594         .decrypt     = ccm4309_aes_nx_decrypt,
595 };