Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux
[cascardo/linux.git] / arch / s390 / crypto / aes_s390.c
1 /*
2  * Cryptographic API.
3  *
4  * s390 implementation of the AES Cipher Algorithm.
5  *
6  * s390 Version:
7  *   Copyright IBM Corp. 2005, 2007
8  *   Author(s): Jan Glauber (jang@de.ibm.com)
9  *              Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
10  *
11  * Derived from "crypto/aes_generic.c"
12  *
13  * This program is free software; you can redistribute it and/or modify it
14  * under the terms of the GNU General Public License as published by the Free
15  * Software Foundation; either version 2 of the License, or (at your option)
16  * any later version.
17  *
18  */
19
20 #define KMSG_COMPONENT "aes_s390"
21 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <linux/err.h>
26 #include <linux/module.h>
27 #include <linux/cpufeature.h>
28 #include <linux/init.h>
29 #include <linux/spinlock.h>
30 #include <crypto/xts.h>
31 #include <asm/cpacf.h>
32
33 #define AES_KEYLEN_128          1
34 #define AES_KEYLEN_192          2
35 #define AES_KEYLEN_256          4
36
37 static u8 *ctrblk;
38 static DEFINE_SPINLOCK(ctrblk_lock);
39 static char keylen_flag;
40
41 struct s390_aes_ctx {
42         u8 key[AES_MAX_KEY_SIZE];
43         long enc;
44         long dec;
45         int key_len;
46         union {
47                 struct crypto_blkcipher *blk;
48                 struct crypto_cipher *cip;
49         } fallback;
50 };
51
52 struct pcc_param {
53         u8 key[32];
54         u8 tweak[16];
55         u8 block[16];
56         u8 bit[16];
57         u8 xts[16];
58 };
59
60 struct s390_xts_ctx {
61         u8 key[32];
62         u8 pcc_key[32];
63         long enc;
64         long dec;
65         int key_len;
66         struct crypto_blkcipher *fallback;
67 };
68
69 /*
70  * Check if the key_len is supported by the HW.
71  * Returns 0 if it is, a positive number if it is not and software fallback is
72  * required or a negative number in case the key size is not valid
73  */
74 static int need_fallback(unsigned int key_len)
75 {
76         switch (key_len) {
77         case 16:
78                 if (!(keylen_flag & AES_KEYLEN_128))
79                         return 1;
80                 break;
81         case 24:
82                 if (!(keylen_flag & AES_KEYLEN_192))
83                         return 1;
84                 break;
85         case 32:
86                 if (!(keylen_flag & AES_KEYLEN_256))
87                         return 1;
88                 break;
89         default:
90                 return -1;
91                 break;
92         }
93         return 0;
94 }
95
96 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
97                 unsigned int key_len)
98 {
99         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
100         int ret;
101
102         sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
103         sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
104                         CRYPTO_TFM_REQ_MASK);
105
106         ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
107         if (ret) {
108                 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
109                 tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
110                                 CRYPTO_TFM_RES_MASK);
111         }
112         return ret;
113 }
114
115 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
116                        unsigned int key_len)
117 {
118         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
119         u32 *flags = &tfm->crt_flags;
120         int ret;
121
122         ret = need_fallback(key_len);
123         if (ret < 0) {
124                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
125                 return -EINVAL;
126         }
127
128         sctx->key_len = key_len;
129         if (!ret) {
130                 memcpy(sctx->key, in_key, key_len);
131                 return 0;
132         }
133
134         return setkey_fallback_cip(tfm, in_key, key_len);
135 }
136
137 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
138 {
139         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
140
141         if (unlikely(need_fallback(sctx->key_len))) {
142                 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
143                 return;
144         }
145
146         switch (sctx->key_len) {
147         case 16:
148                 cpacf_km(CPACF_KM_AES_128_ENC, &sctx->key, out, in,
149                          AES_BLOCK_SIZE);
150                 break;
151         case 24:
152                 cpacf_km(CPACF_KM_AES_192_ENC, &sctx->key, out, in,
153                          AES_BLOCK_SIZE);
154                 break;
155         case 32:
156                 cpacf_km(CPACF_KM_AES_256_ENC, &sctx->key, out, in,
157                          AES_BLOCK_SIZE);
158                 break;
159         }
160 }
161
162 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
163 {
164         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
165
166         if (unlikely(need_fallback(sctx->key_len))) {
167                 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
168                 return;
169         }
170
171         switch (sctx->key_len) {
172         case 16:
173                 cpacf_km(CPACF_KM_AES_128_DEC, &sctx->key, out, in,
174                          AES_BLOCK_SIZE);
175                 break;
176         case 24:
177                 cpacf_km(CPACF_KM_AES_192_DEC, &sctx->key, out, in,
178                          AES_BLOCK_SIZE);
179                 break;
180         case 32:
181                 cpacf_km(CPACF_KM_AES_256_DEC, &sctx->key, out, in,
182                          AES_BLOCK_SIZE);
183                 break;
184         }
185 }
186
187 static int fallback_init_cip(struct crypto_tfm *tfm)
188 {
189         const char *name = tfm->__crt_alg->cra_name;
190         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
191
192         sctx->fallback.cip = crypto_alloc_cipher(name, 0,
193                         CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
194
195         if (IS_ERR(sctx->fallback.cip)) {
196                 pr_err("Allocating AES fallback algorithm %s failed\n",
197                        name);
198                 return PTR_ERR(sctx->fallback.cip);
199         }
200
201         return 0;
202 }
203
204 static void fallback_exit_cip(struct crypto_tfm *tfm)
205 {
206         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
207
208         crypto_free_cipher(sctx->fallback.cip);
209         sctx->fallback.cip = NULL;
210 }
211
212 static struct crypto_alg aes_alg = {
213         .cra_name               =       "aes",
214         .cra_driver_name        =       "aes-s390",
215         .cra_priority           =       300,
216         .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER |
217                                         CRYPTO_ALG_NEED_FALLBACK,
218         .cra_blocksize          =       AES_BLOCK_SIZE,
219         .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
220         .cra_module             =       THIS_MODULE,
221         .cra_init               =       fallback_init_cip,
222         .cra_exit               =       fallback_exit_cip,
223         .cra_u                  =       {
224                 .cipher = {
225                         .cia_min_keysize        =       AES_MIN_KEY_SIZE,
226                         .cia_max_keysize        =       AES_MAX_KEY_SIZE,
227                         .cia_setkey             =       aes_set_key,
228                         .cia_encrypt            =       aes_encrypt,
229                         .cia_decrypt            =       aes_decrypt,
230                 }
231         }
232 };
233
234 static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
235                 unsigned int len)
236 {
237         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
238         unsigned int ret;
239
240         sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
241         sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
242                         CRYPTO_TFM_REQ_MASK);
243
244         ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len);
245         if (ret) {
246                 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
247                 tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
248                                 CRYPTO_TFM_RES_MASK);
249         }
250         return ret;
251 }
252
253 static int fallback_blk_dec(struct blkcipher_desc *desc,
254                 struct scatterlist *dst, struct scatterlist *src,
255                 unsigned int nbytes)
256 {
257         unsigned int ret;
258         struct crypto_blkcipher *tfm;
259         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
260
261         tfm = desc->tfm;
262         desc->tfm = sctx->fallback.blk;
263
264         ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
265
266         desc->tfm = tfm;
267         return ret;
268 }
269
270 static int fallback_blk_enc(struct blkcipher_desc *desc,
271                 struct scatterlist *dst, struct scatterlist *src,
272                 unsigned int nbytes)
273 {
274         unsigned int ret;
275         struct crypto_blkcipher *tfm;
276         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
277
278         tfm = desc->tfm;
279         desc->tfm = sctx->fallback.blk;
280
281         ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
282
283         desc->tfm = tfm;
284         return ret;
285 }
286
287 static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
288                            unsigned int key_len)
289 {
290         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
291         int ret;
292
293         ret = need_fallback(key_len);
294         if (ret > 0) {
295                 sctx->key_len = key_len;
296                 return setkey_fallback_blk(tfm, in_key, key_len);
297         }
298
299         switch (key_len) {
300         case 16:
301                 sctx->enc = CPACF_KM_AES_128_ENC;
302                 sctx->dec = CPACF_KM_AES_128_DEC;
303                 break;
304         case 24:
305                 sctx->enc = CPACF_KM_AES_192_ENC;
306                 sctx->dec = CPACF_KM_AES_192_DEC;
307                 break;
308         case 32:
309                 sctx->enc = CPACF_KM_AES_256_ENC;
310                 sctx->dec = CPACF_KM_AES_256_DEC;
311                 break;
312         }
313
314         return aes_set_key(tfm, in_key, key_len);
315 }
316
317 static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
318                          struct blkcipher_walk *walk)
319 {
320         int ret = blkcipher_walk_virt(desc, walk);
321         unsigned int nbytes;
322
323         while ((nbytes = walk->nbytes)) {
324                 /* only use complete blocks */
325                 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
326                 u8 *out = walk->dst.virt.addr;
327                 u8 *in = walk->src.virt.addr;
328
329                 ret = cpacf_km(func, param, out, in, n);
330                 if (ret < 0 || ret != n)
331                         return -EIO;
332
333                 nbytes &= AES_BLOCK_SIZE - 1;
334                 ret = blkcipher_walk_done(desc, walk, nbytes);
335         }
336
337         return ret;
338 }
339
340 static int ecb_aes_encrypt(struct blkcipher_desc *desc,
341                            struct scatterlist *dst, struct scatterlist *src,
342                            unsigned int nbytes)
343 {
344         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
345         struct blkcipher_walk walk;
346
347         if (unlikely(need_fallback(sctx->key_len)))
348                 return fallback_blk_enc(desc, dst, src, nbytes);
349
350         blkcipher_walk_init(&walk, dst, src, nbytes);
351         return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
352 }
353
354 static int ecb_aes_decrypt(struct blkcipher_desc *desc,
355                            struct scatterlist *dst, struct scatterlist *src,
356                            unsigned int nbytes)
357 {
358         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
359         struct blkcipher_walk walk;
360
361         if (unlikely(need_fallback(sctx->key_len)))
362                 return fallback_blk_dec(desc, dst, src, nbytes);
363
364         blkcipher_walk_init(&walk, dst, src, nbytes);
365         return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
366 }
367
368 static int fallback_init_blk(struct crypto_tfm *tfm)
369 {
370         const char *name = tfm->__crt_alg->cra_name;
371         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
372
373         sctx->fallback.blk = crypto_alloc_blkcipher(name, 0,
374                         CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
375
376         if (IS_ERR(sctx->fallback.blk)) {
377                 pr_err("Allocating AES fallback algorithm %s failed\n",
378                        name);
379                 return PTR_ERR(sctx->fallback.blk);
380         }
381
382         return 0;
383 }
384
385 static void fallback_exit_blk(struct crypto_tfm *tfm)
386 {
387         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
388
389         crypto_free_blkcipher(sctx->fallback.blk);
390         sctx->fallback.blk = NULL;
391 }
392
393 static struct crypto_alg ecb_aes_alg = {
394         .cra_name               =       "ecb(aes)",
395         .cra_driver_name        =       "ecb-aes-s390",
396         .cra_priority           =       400,    /* combo: aes + ecb */
397         .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER |
398                                         CRYPTO_ALG_NEED_FALLBACK,
399         .cra_blocksize          =       AES_BLOCK_SIZE,
400         .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
401         .cra_type               =       &crypto_blkcipher_type,
402         .cra_module             =       THIS_MODULE,
403         .cra_init               =       fallback_init_blk,
404         .cra_exit               =       fallback_exit_blk,
405         .cra_u                  =       {
406                 .blkcipher = {
407                         .min_keysize            =       AES_MIN_KEY_SIZE,
408                         .max_keysize            =       AES_MAX_KEY_SIZE,
409                         .setkey                 =       ecb_aes_set_key,
410                         .encrypt                =       ecb_aes_encrypt,
411                         .decrypt                =       ecb_aes_decrypt,
412                 }
413         }
414 };
415
416 static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
417                            unsigned int key_len)
418 {
419         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
420         int ret;
421
422         ret = need_fallback(key_len);
423         if (ret > 0) {
424                 sctx->key_len = key_len;
425                 return setkey_fallback_blk(tfm, in_key, key_len);
426         }
427
428         switch (key_len) {
429         case 16:
430                 sctx->enc = CPACF_KMC_AES_128_ENC;
431                 sctx->dec = CPACF_KMC_AES_128_DEC;
432                 break;
433         case 24:
434                 sctx->enc = CPACF_KMC_AES_192_ENC;
435                 sctx->dec = CPACF_KMC_AES_192_DEC;
436                 break;
437         case 32:
438                 sctx->enc = CPACF_KMC_AES_256_ENC;
439                 sctx->dec = CPACF_KMC_AES_256_DEC;
440                 break;
441         }
442
443         return aes_set_key(tfm, in_key, key_len);
444 }
445
446 static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
447                          struct blkcipher_walk *walk)
448 {
449         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
450         int ret = blkcipher_walk_virt(desc, walk);
451         unsigned int nbytes = walk->nbytes;
452         struct {
453                 u8 iv[AES_BLOCK_SIZE];
454                 u8 key[AES_MAX_KEY_SIZE];
455         } param;
456
457         if (!nbytes)
458                 goto out;
459
460         memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
461         memcpy(param.key, sctx->key, sctx->key_len);
462         do {
463                 /* only use complete blocks */
464                 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
465                 u8 *out = walk->dst.virt.addr;
466                 u8 *in = walk->src.virt.addr;
467
468                 ret = cpacf_kmc(func, &param, out, in, n);
469                 if (ret < 0 || ret != n)
470                         return -EIO;
471
472                 nbytes &= AES_BLOCK_SIZE - 1;
473                 ret = blkcipher_walk_done(desc, walk, nbytes);
474         } while ((nbytes = walk->nbytes));
475         memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
476
477 out:
478         return ret;
479 }
480
481 static int cbc_aes_encrypt(struct blkcipher_desc *desc,
482                            struct scatterlist *dst, struct scatterlist *src,
483                            unsigned int nbytes)
484 {
485         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
486         struct blkcipher_walk walk;
487
488         if (unlikely(need_fallback(sctx->key_len)))
489                 return fallback_blk_enc(desc, dst, src, nbytes);
490
491         blkcipher_walk_init(&walk, dst, src, nbytes);
492         return cbc_aes_crypt(desc, sctx->enc, &walk);
493 }
494
495 static int cbc_aes_decrypt(struct blkcipher_desc *desc,
496                            struct scatterlist *dst, struct scatterlist *src,
497                            unsigned int nbytes)
498 {
499         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
500         struct blkcipher_walk walk;
501
502         if (unlikely(need_fallback(sctx->key_len)))
503                 return fallback_blk_dec(desc, dst, src, nbytes);
504
505         blkcipher_walk_init(&walk, dst, src, nbytes);
506         return cbc_aes_crypt(desc, sctx->dec, &walk);
507 }
508
509 static struct crypto_alg cbc_aes_alg = {
510         .cra_name               =       "cbc(aes)",
511         .cra_driver_name        =       "cbc-aes-s390",
512         .cra_priority           =       400,    /* combo: aes + cbc */
513         .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER |
514                                         CRYPTO_ALG_NEED_FALLBACK,
515         .cra_blocksize          =       AES_BLOCK_SIZE,
516         .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
517         .cra_type               =       &crypto_blkcipher_type,
518         .cra_module             =       THIS_MODULE,
519         .cra_init               =       fallback_init_blk,
520         .cra_exit               =       fallback_exit_blk,
521         .cra_u                  =       {
522                 .blkcipher = {
523                         .min_keysize            =       AES_MIN_KEY_SIZE,
524                         .max_keysize            =       AES_MAX_KEY_SIZE,
525                         .ivsize                 =       AES_BLOCK_SIZE,
526                         .setkey                 =       cbc_aes_set_key,
527                         .encrypt                =       cbc_aes_encrypt,
528                         .decrypt                =       cbc_aes_decrypt,
529                 }
530         }
531 };
532
533 static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
534                                    unsigned int len)
535 {
536         struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
537         unsigned int ret;
538
539         xts_ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
540         xts_ctx->fallback->base.crt_flags |= (tfm->crt_flags &
541                         CRYPTO_TFM_REQ_MASK);
542
543         ret = crypto_blkcipher_setkey(xts_ctx->fallback, key, len);
544         if (ret) {
545                 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
546                 tfm->crt_flags |= (xts_ctx->fallback->base.crt_flags &
547                                 CRYPTO_TFM_RES_MASK);
548         }
549         return ret;
550 }
551
552 static int xts_fallback_decrypt(struct blkcipher_desc *desc,
553                 struct scatterlist *dst, struct scatterlist *src,
554                 unsigned int nbytes)
555 {
556         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
557         struct crypto_blkcipher *tfm;
558         unsigned int ret;
559
560         tfm = desc->tfm;
561         desc->tfm = xts_ctx->fallback;
562
563         ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
564
565         desc->tfm = tfm;
566         return ret;
567 }
568
569 static int xts_fallback_encrypt(struct blkcipher_desc *desc,
570                 struct scatterlist *dst, struct scatterlist *src,
571                 unsigned int nbytes)
572 {
573         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
574         struct crypto_blkcipher *tfm;
575         unsigned int ret;
576
577         tfm = desc->tfm;
578         desc->tfm = xts_ctx->fallback;
579
580         ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
581
582         desc->tfm = tfm;
583         return ret;
584 }
585
586 static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
587                            unsigned int key_len)
588 {
589         struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
590         u32 *flags = &tfm->crt_flags;
591         int err;
592
593         err = xts_check_key(tfm, in_key, key_len);
594         if (err)
595                 return err;
596
597         switch (key_len) {
598         case 32:
599                 xts_ctx->enc = CPACF_KM_XTS_128_ENC;
600                 xts_ctx->dec = CPACF_KM_XTS_128_DEC;
601                 memcpy(xts_ctx->key + 16, in_key, 16);
602                 memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
603                 break;
604         case 48:
605                 xts_ctx->enc = 0;
606                 xts_ctx->dec = 0;
607                 xts_fallback_setkey(tfm, in_key, key_len);
608                 break;
609         case 64:
610                 xts_ctx->enc = CPACF_KM_XTS_256_ENC;
611                 xts_ctx->dec = CPACF_KM_XTS_256_DEC;
612                 memcpy(xts_ctx->key, in_key, 32);
613                 memcpy(xts_ctx->pcc_key, in_key + 32, 32);
614                 break;
615         default:
616                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
617                 return -EINVAL;
618         }
619         xts_ctx->key_len = key_len;
620         return 0;
621 }
622
623 static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
624                          struct s390_xts_ctx *xts_ctx,
625                          struct blkcipher_walk *walk)
626 {
627         unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
628         int ret = blkcipher_walk_virt(desc, walk);
629         unsigned int nbytes = walk->nbytes;
630         unsigned int n;
631         u8 *in, *out;
632         struct pcc_param pcc_param;
633         struct {
634                 u8 key[32];
635                 u8 init[16];
636         } xts_param;
637
638         if (!nbytes)
639                 goto out;
640
641         memset(pcc_param.block, 0, sizeof(pcc_param.block));
642         memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
643         memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
644         memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
645         memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
646         /* remove decipher modifier bit from 'func' and call PCC */
647         ret = cpacf_pcc(func & 0x7f, &pcc_param.key[offset]);
648         if (ret < 0)
649                 return -EIO;
650
651         memcpy(xts_param.key, xts_ctx->key, 32);
652         memcpy(xts_param.init, pcc_param.xts, 16);
653         do {
654                 /* only use complete blocks */
655                 n = nbytes & ~(AES_BLOCK_SIZE - 1);
656                 out = walk->dst.virt.addr;
657                 in = walk->src.virt.addr;
658
659                 ret = cpacf_km(func, &xts_param.key[offset], out, in, n);
660                 if (ret < 0 || ret != n)
661                         return -EIO;
662
663                 nbytes &= AES_BLOCK_SIZE - 1;
664                 ret = blkcipher_walk_done(desc, walk, nbytes);
665         } while ((nbytes = walk->nbytes));
666 out:
667         return ret;
668 }
669
670 static int xts_aes_encrypt(struct blkcipher_desc *desc,
671                            struct scatterlist *dst, struct scatterlist *src,
672                            unsigned int nbytes)
673 {
674         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
675         struct blkcipher_walk walk;
676
677         if (unlikely(xts_ctx->key_len == 48))
678                 return xts_fallback_encrypt(desc, dst, src, nbytes);
679
680         blkcipher_walk_init(&walk, dst, src, nbytes);
681         return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk);
682 }
683
684 static int xts_aes_decrypt(struct blkcipher_desc *desc,
685                            struct scatterlist *dst, struct scatterlist *src,
686                            unsigned int nbytes)
687 {
688         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
689         struct blkcipher_walk walk;
690
691         if (unlikely(xts_ctx->key_len == 48))
692                 return xts_fallback_decrypt(desc, dst, src, nbytes);
693
694         blkcipher_walk_init(&walk, dst, src, nbytes);
695         return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk);
696 }
697
698 static int xts_fallback_init(struct crypto_tfm *tfm)
699 {
700         const char *name = tfm->__crt_alg->cra_name;
701         struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
702
703         xts_ctx->fallback = crypto_alloc_blkcipher(name, 0,
704                         CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
705
706         if (IS_ERR(xts_ctx->fallback)) {
707                 pr_err("Allocating XTS fallback algorithm %s failed\n",
708                        name);
709                 return PTR_ERR(xts_ctx->fallback);
710         }
711         return 0;
712 }
713
714 static void xts_fallback_exit(struct crypto_tfm *tfm)
715 {
716         struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
717
718         crypto_free_blkcipher(xts_ctx->fallback);
719         xts_ctx->fallback = NULL;
720 }
721
722 static struct crypto_alg xts_aes_alg = {
723         .cra_name               =       "xts(aes)",
724         .cra_driver_name        =       "xts-aes-s390",
725         .cra_priority           =       400,    /* combo: aes + xts */
726         .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER |
727                                         CRYPTO_ALG_NEED_FALLBACK,
728         .cra_blocksize          =       AES_BLOCK_SIZE,
729         .cra_ctxsize            =       sizeof(struct s390_xts_ctx),
730         .cra_type               =       &crypto_blkcipher_type,
731         .cra_module             =       THIS_MODULE,
732         .cra_init               =       xts_fallback_init,
733         .cra_exit               =       xts_fallback_exit,
734         .cra_u                  =       {
735                 .blkcipher = {
736                         .min_keysize            =       2 * AES_MIN_KEY_SIZE,
737                         .max_keysize            =       2 * AES_MAX_KEY_SIZE,
738                         .ivsize                 =       AES_BLOCK_SIZE,
739                         .setkey                 =       xts_aes_set_key,
740                         .encrypt                =       xts_aes_encrypt,
741                         .decrypt                =       xts_aes_decrypt,
742                 }
743         }
744 };
745
746 static int xts_aes_alg_reg;
747
748 static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
749                            unsigned int key_len)
750 {
751         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
752
753         switch (key_len) {
754         case 16:
755                 sctx->enc = CPACF_KMCTR_AES_128_ENC;
756                 sctx->dec = CPACF_KMCTR_AES_128_DEC;
757                 break;
758         case 24:
759                 sctx->enc = CPACF_KMCTR_AES_192_ENC;
760                 sctx->dec = CPACF_KMCTR_AES_192_DEC;
761                 break;
762         case 32:
763                 sctx->enc = CPACF_KMCTR_AES_256_ENC;
764                 sctx->dec = CPACF_KMCTR_AES_256_DEC;
765                 break;
766         }
767
768         return aes_set_key(tfm, in_key, key_len);
769 }
770
771 static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
772 {
773         unsigned int i, n;
774
775         /* only use complete blocks, max. PAGE_SIZE */
776         n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
777         for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
778                 memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
779                        AES_BLOCK_SIZE);
780                 crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
781         }
782         return n;
783 }
784
785 static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
786                          struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
787 {
788         int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
789         unsigned int n, nbytes;
790         u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
791         u8 *out, *in, *ctrptr = ctrbuf;
792
793         if (!walk->nbytes)
794                 return ret;
795
796         if (spin_trylock(&ctrblk_lock))
797                 ctrptr = ctrblk;
798
799         memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
800         while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
801                 out = walk->dst.virt.addr;
802                 in = walk->src.virt.addr;
803                 while (nbytes >= AES_BLOCK_SIZE) {
804                         if (ctrptr == ctrblk)
805                                 n = __ctrblk_init(ctrptr, nbytes);
806                         else
807                                 n = AES_BLOCK_SIZE;
808                         ret = cpacf_kmctr(func, sctx->key, out, in, n, ctrptr);
809                         if (ret < 0 || ret != n) {
810                                 if (ctrptr == ctrblk)
811                                         spin_unlock(&ctrblk_lock);
812                                 return -EIO;
813                         }
814                         if (n > AES_BLOCK_SIZE)
815                                 memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
816                                        AES_BLOCK_SIZE);
817                         crypto_inc(ctrptr, AES_BLOCK_SIZE);
818                         out += n;
819                         in += n;
820                         nbytes -= n;
821                 }
822                 ret = blkcipher_walk_done(desc, walk, nbytes);
823         }
824         if (ctrptr == ctrblk) {
825                 if (nbytes)
826                         memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
827                 else
828                         memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
829                 spin_unlock(&ctrblk_lock);
830         } else {
831                 if (!nbytes)
832                         memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
833         }
834         /*
835          * final block may be < AES_BLOCK_SIZE, copy only nbytes
836          */
837         if (nbytes) {
838                 out = walk->dst.virt.addr;
839                 in = walk->src.virt.addr;
840                 ret = cpacf_kmctr(func, sctx->key, buf, in,
841                                   AES_BLOCK_SIZE, ctrbuf);
842                 if (ret < 0 || ret != AES_BLOCK_SIZE)
843                         return -EIO;
844                 memcpy(out, buf, nbytes);
845                 crypto_inc(ctrbuf, AES_BLOCK_SIZE);
846                 ret = blkcipher_walk_done(desc, walk, 0);
847                 memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
848         }
849
850         return ret;
851 }
852
853 static int ctr_aes_encrypt(struct blkcipher_desc *desc,
854                            struct scatterlist *dst, struct scatterlist *src,
855                            unsigned int nbytes)
856 {
857         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
858         struct blkcipher_walk walk;
859
860         blkcipher_walk_init(&walk, dst, src, nbytes);
861         return ctr_aes_crypt(desc, sctx->enc, sctx, &walk);
862 }
863
864 static int ctr_aes_decrypt(struct blkcipher_desc *desc,
865                            struct scatterlist *dst, struct scatterlist *src,
866                            unsigned int nbytes)
867 {
868         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
869         struct blkcipher_walk walk;
870
871         blkcipher_walk_init(&walk, dst, src, nbytes);
872         return ctr_aes_crypt(desc, sctx->dec, sctx, &walk);
873 }
874
875 static struct crypto_alg ctr_aes_alg = {
876         .cra_name               =       "ctr(aes)",
877         .cra_driver_name        =       "ctr-aes-s390",
878         .cra_priority           =       400,    /* combo: aes + ctr */
879         .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
880         .cra_blocksize          =       1,
881         .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
882         .cra_type               =       &crypto_blkcipher_type,
883         .cra_module             =       THIS_MODULE,
884         .cra_u                  =       {
885                 .blkcipher = {
886                         .min_keysize            =       AES_MIN_KEY_SIZE,
887                         .max_keysize            =       AES_MAX_KEY_SIZE,
888                         .ivsize                 =       AES_BLOCK_SIZE,
889                         .setkey                 =       ctr_aes_set_key,
890                         .encrypt                =       ctr_aes_encrypt,
891                         .decrypt                =       ctr_aes_decrypt,
892                 }
893         }
894 };
895
896 static int ctr_aes_alg_reg;
897
898 static int __init aes_s390_init(void)
899 {
900         int ret;
901
902         if (cpacf_query(CPACF_KM, CPACF_KM_AES_128_ENC))
903                 keylen_flag |= AES_KEYLEN_128;
904         if (cpacf_query(CPACF_KM, CPACF_KM_AES_192_ENC))
905                 keylen_flag |= AES_KEYLEN_192;
906         if (cpacf_query(CPACF_KM, CPACF_KM_AES_256_ENC))
907                 keylen_flag |= AES_KEYLEN_256;
908
909         if (!keylen_flag)
910                 return -EOPNOTSUPP;
911
912         /* z9 109 and z9 BC/EC only support 128 bit key length */
913         if (keylen_flag == AES_KEYLEN_128)
914                 pr_info("AES hardware acceleration is only available for"
915                         " 128-bit keys\n");
916
917         ret = crypto_register_alg(&aes_alg);
918         if (ret)
919                 goto aes_err;
920
921         ret = crypto_register_alg(&ecb_aes_alg);
922         if (ret)
923                 goto ecb_aes_err;
924
925         ret = crypto_register_alg(&cbc_aes_alg);
926         if (ret)
927                 goto cbc_aes_err;
928
929         if (cpacf_query(CPACF_KM, CPACF_KM_XTS_128_ENC) &&
930             cpacf_query(CPACF_KM, CPACF_KM_XTS_256_ENC)) {
931                 ret = crypto_register_alg(&xts_aes_alg);
932                 if (ret)
933                         goto xts_aes_err;
934                 xts_aes_alg_reg = 1;
935         }
936
937         if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_128_ENC) &&
938             cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_192_ENC) &&
939             cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_256_ENC)) {
940                 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
941                 if (!ctrblk) {
942                         ret = -ENOMEM;
943                         goto ctr_aes_err;
944                 }
945                 ret = crypto_register_alg(&ctr_aes_alg);
946                 if (ret) {
947                         free_page((unsigned long) ctrblk);
948                         goto ctr_aes_err;
949                 }
950                 ctr_aes_alg_reg = 1;
951         }
952
953 out:
954         return ret;
955
956 ctr_aes_err:
957         crypto_unregister_alg(&xts_aes_alg);
958 xts_aes_err:
959         crypto_unregister_alg(&cbc_aes_alg);
960 cbc_aes_err:
961         crypto_unregister_alg(&ecb_aes_alg);
962 ecb_aes_err:
963         crypto_unregister_alg(&aes_alg);
964 aes_err:
965         goto out;
966 }
967
968 static void __exit aes_s390_fini(void)
969 {
970         if (ctr_aes_alg_reg) {
971                 crypto_unregister_alg(&ctr_aes_alg);
972                 free_page((unsigned long) ctrblk);
973         }
974         if (xts_aes_alg_reg)
975                 crypto_unregister_alg(&xts_aes_alg);
976         crypto_unregister_alg(&cbc_aes_alg);
977         crypto_unregister_alg(&ecb_aes_alg);
978         crypto_unregister_alg(&aes_alg);
979 }
980
981 module_cpu_feature_match(MSA, aes_s390_init);
982 module_exit(aes_s390_fini);
983
984 MODULE_ALIAS_CRYPTO("aes-all");
985
986 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
987 MODULE_LICENSE("GPL");