Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / arch / x86 / crypto / aesni-intel_glue.c
1 /*
2  * Support for Intel AES-NI instructions. This file contains glue
3  * code, the real AES implementation is in intel-aes_asm.S.
4  *
5  * Copyright (C) 2008, Intel Corp.
6  *    Author: Huang Ying <ying.huang@intel.com>
7  *
8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9  * interface for 64-bit kernels.
10  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
11  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
12  *             Tadeusz Struk (tadeusz.struk@intel.com)
13  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
14  *    Copyright (c) 2010, Intel Corporation.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  */
21
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/i387.h>
36 #include <asm/crypto/aes.h>
37 #include <crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
42 #ifdef CONFIG_X86_64
43 #include <asm/crypto/glue_helper.h>
44 #endif
45
46 #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
47 #define HAS_PCBC
48 #endif
49
50 /* This data is stored at the end of the crypto_tfm struct.
51  * It's a type of per "session" data storage location.
52  * This needs to be 16 byte aligned.
53  */
54 struct aesni_rfc4106_gcm_ctx {
55         u8 hash_subkey[16];
56         struct crypto_aes_ctx aes_key_expanded;
57         u8 nonce[4];
58         struct cryptd_aead *cryptd_tfm;
59 };
60
61 struct aesni_gcm_set_hash_subkey_result {
62         int err;
63         struct completion completion;
64 };
65
66 struct aesni_hash_subkey_req_data {
67         u8 iv[16];
68         struct aesni_gcm_set_hash_subkey_result result;
69         struct scatterlist sg;
70 };
71
72 #define AESNI_ALIGN     (16)
73 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE-1))
74 #define RFC4106_HASH_SUBKEY_SIZE 16
75
76 struct aesni_lrw_ctx {
77         struct lrw_table_ctx lrw_table;
78         u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
79 };
80
81 struct aesni_xts_ctx {
82         u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
83         u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
84 };
85
86 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
87                              unsigned int key_len);
88 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
89                           const u8 *in);
90 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
91                           const u8 *in);
92 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
93                               const u8 *in, unsigned int len);
94 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
95                               const u8 *in, unsigned int len);
96 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
97                               const u8 *in, unsigned int len, u8 *iv);
98 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
99                               const u8 *in, unsigned int len, u8 *iv);
100
101 int crypto_fpu_init(void);
102 void crypto_fpu_exit(void);
103
104 #define AVX_GEN2_OPTSIZE 640
105 #define AVX_GEN4_OPTSIZE 4096
106
107 #ifdef CONFIG_X86_64
108 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
109                               const u8 *in, unsigned int len, u8 *iv);
110
111 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
112                                  const u8 *in, bool enc, u8 *iv);
113
114 /* asmlinkage void aesni_gcm_enc()
115  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
116  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
117  * const u8 *in, Plaintext input
118  * unsigned long plaintext_len, Length of data in bytes for encryption.
119  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
120  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
121  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
122  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
123  * const u8 *aad, Additional Authentication Data (AAD)
124  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
125  *          is going to be 8 or 12 bytes
126  * u8 *auth_tag, Authenticated Tag output.
127  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
128  *          Valid values are 16 (most likely), 12 or 8.
129  */
130 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
131                         const u8 *in, unsigned long plaintext_len, u8 *iv,
132                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
133                         u8 *auth_tag, unsigned long auth_tag_len);
134
135 /* asmlinkage void aesni_gcm_dec()
136  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
137  * u8 *out, Plaintext output. Decrypt in-place is allowed.
138  * const u8 *in, Ciphertext input
139  * unsigned long ciphertext_len, Length of data in bytes for decryption.
140  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
141  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
142  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
143  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
144  * const u8 *aad, Additional Authentication Data (AAD)
145  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
146  * to be 8 or 12 bytes
147  * u8 *auth_tag, Authenticated Tag output.
148  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
149  * Valid values are 16 (most likely), 12 or 8.
150  */
151 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
152                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
153                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
154                         u8 *auth_tag, unsigned long auth_tag_len);
155
156
157 #ifdef CONFIG_AS_AVX
158 /*
159  * asmlinkage void aesni_gcm_precomp_avx_gen2()
160  * gcm_data *my_ctx_data, context data
161  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
162  */
163 asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
164
165 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
166                         const u8 *in, unsigned long plaintext_len, u8 *iv,
167                         const u8 *aad, unsigned long aad_len,
168                         u8 *auth_tag, unsigned long auth_tag_len);
169
170 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
171                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
172                         const u8 *aad, unsigned long aad_len,
173                         u8 *auth_tag, unsigned long auth_tag_len);
174
175 static void aesni_gcm_enc_avx(void *ctx, u8 *out,
176                         const u8 *in, unsigned long plaintext_len, u8 *iv,
177                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
178                         u8 *auth_tag, unsigned long auth_tag_len)
179 {
180         if (plaintext_len < AVX_GEN2_OPTSIZE) {
181                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
182                                 aad_len, auth_tag, auth_tag_len);
183         } else {
184                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
185                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
186                                         aad_len, auth_tag, auth_tag_len);
187         }
188 }
189
190 static void aesni_gcm_dec_avx(void *ctx, u8 *out,
191                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
192                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
193                         u8 *auth_tag, unsigned long auth_tag_len)
194 {
195         if (ciphertext_len < AVX_GEN2_OPTSIZE) {
196                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
197                                 aad_len, auth_tag, auth_tag_len);
198         } else {
199                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
200                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
201                                         aad_len, auth_tag, auth_tag_len);
202         }
203 }
204 #endif
205
206 #ifdef CONFIG_AS_AVX2
207 /*
208  * asmlinkage void aesni_gcm_precomp_avx_gen4()
209  * gcm_data *my_ctx_data, context data
210  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
211  */
212 asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
213
214 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
215                         const u8 *in, unsigned long plaintext_len, u8 *iv,
216                         const u8 *aad, unsigned long aad_len,
217                         u8 *auth_tag, unsigned long auth_tag_len);
218
219 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
220                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
221                         const u8 *aad, unsigned long aad_len,
222                         u8 *auth_tag, unsigned long auth_tag_len);
223
224 static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
225                         const u8 *in, unsigned long plaintext_len, u8 *iv,
226                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
227                         u8 *auth_tag, unsigned long auth_tag_len)
228 {
229         if (plaintext_len < AVX_GEN2_OPTSIZE) {
230                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
231                                 aad_len, auth_tag, auth_tag_len);
232         } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
233                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
234                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
235                                         aad_len, auth_tag, auth_tag_len);
236         } else {
237                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
238                 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
239                                         aad_len, auth_tag, auth_tag_len);
240         }
241 }
242
243 static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
244                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
245                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
246                         u8 *auth_tag, unsigned long auth_tag_len)
247 {
248         if (ciphertext_len < AVX_GEN2_OPTSIZE) {
249                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
250                                 aad, aad_len, auth_tag, auth_tag_len);
251         } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
252                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
253                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
254                                         aad_len, auth_tag, auth_tag_len);
255         } else {
256                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
257                 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
258                                         aad_len, auth_tag, auth_tag_len);
259         }
260 }
261 #endif
262
263 static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
264                         const u8 *in, unsigned long plaintext_len, u8 *iv,
265                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
266                         u8 *auth_tag, unsigned long auth_tag_len);
267
268 static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
269                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
270                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
271                         u8 *auth_tag, unsigned long auth_tag_len);
272
273 static inline struct
274 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
275 {
276         return
277                 (struct aesni_rfc4106_gcm_ctx *)
278                 PTR_ALIGN((u8 *)
279                 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
280 }
281 #endif
282
283 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
284 {
285         unsigned long addr = (unsigned long)raw_ctx;
286         unsigned long align = AESNI_ALIGN;
287
288         if (align <= crypto_tfm_ctx_alignment())
289                 align = 1;
290         return (struct crypto_aes_ctx *)ALIGN(addr, align);
291 }
292
293 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
294                               const u8 *in_key, unsigned int key_len)
295 {
296         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
297         u32 *flags = &tfm->crt_flags;
298         int err;
299
300         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
301             key_len != AES_KEYSIZE_256) {
302                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
303                 return -EINVAL;
304         }
305
306         if (!irq_fpu_usable())
307                 err = crypto_aes_expand_key(ctx, in_key, key_len);
308         else {
309                 kernel_fpu_begin();
310                 err = aesni_set_key(ctx, in_key, key_len);
311                 kernel_fpu_end();
312         }
313
314         return err;
315 }
316
317 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
318                        unsigned int key_len)
319 {
320         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
321 }
322
323 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
324 {
325         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
326
327         if (!irq_fpu_usable())
328                 crypto_aes_encrypt_x86(ctx, dst, src);
329         else {
330                 kernel_fpu_begin();
331                 aesni_enc(ctx, dst, src);
332                 kernel_fpu_end();
333         }
334 }
335
336 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
337 {
338         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
339
340         if (!irq_fpu_usable())
341                 crypto_aes_decrypt_x86(ctx, dst, src);
342         else {
343                 kernel_fpu_begin();
344                 aesni_dec(ctx, dst, src);
345                 kernel_fpu_end();
346         }
347 }
348
349 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
350 {
351         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
352
353         aesni_enc(ctx, dst, src);
354 }
355
356 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
357 {
358         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
359
360         aesni_dec(ctx, dst, src);
361 }
362
363 static int ecb_encrypt(struct blkcipher_desc *desc,
364                        struct scatterlist *dst, struct scatterlist *src,
365                        unsigned int nbytes)
366 {
367         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
368         struct blkcipher_walk walk;
369         int err;
370
371         blkcipher_walk_init(&walk, dst, src, nbytes);
372         err = blkcipher_walk_virt(desc, &walk);
373         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
374
375         kernel_fpu_begin();
376         while ((nbytes = walk.nbytes)) {
377                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
378                               nbytes & AES_BLOCK_MASK);
379                 nbytes &= AES_BLOCK_SIZE - 1;
380                 err = blkcipher_walk_done(desc, &walk, nbytes);
381         }
382         kernel_fpu_end();
383
384         return err;
385 }
386
387 static int ecb_decrypt(struct blkcipher_desc *desc,
388                        struct scatterlist *dst, struct scatterlist *src,
389                        unsigned int nbytes)
390 {
391         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
392         struct blkcipher_walk walk;
393         int err;
394
395         blkcipher_walk_init(&walk, dst, src, nbytes);
396         err = blkcipher_walk_virt(desc, &walk);
397         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
398
399         kernel_fpu_begin();
400         while ((nbytes = walk.nbytes)) {
401                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
402                               nbytes & AES_BLOCK_MASK);
403                 nbytes &= AES_BLOCK_SIZE - 1;
404                 err = blkcipher_walk_done(desc, &walk, nbytes);
405         }
406         kernel_fpu_end();
407
408         return err;
409 }
410
411 static int cbc_encrypt(struct blkcipher_desc *desc,
412                        struct scatterlist *dst, struct scatterlist *src,
413                        unsigned int nbytes)
414 {
415         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
416         struct blkcipher_walk walk;
417         int err;
418
419         blkcipher_walk_init(&walk, dst, src, nbytes);
420         err = blkcipher_walk_virt(desc, &walk);
421         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
422
423         kernel_fpu_begin();
424         while ((nbytes = walk.nbytes)) {
425                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
426                               nbytes & AES_BLOCK_MASK, walk.iv);
427                 nbytes &= AES_BLOCK_SIZE - 1;
428                 err = blkcipher_walk_done(desc, &walk, nbytes);
429         }
430         kernel_fpu_end();
431
432         return err;
433 }
434
435 static int cbc_decrypt(struct blkcipher_desc *desc,
436                        struct scatterlist *dst, struct scatterlist *src,
437                        unsigned int nbytes)
438 {
439         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
440         struct blkcipher_walk walk;
441         int err;
442
443         blkcipher_walk_init(&walk, dst, src, nbytes);
444         err = blkcipher_walk_virt(desc, &walk);
445         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
446
447         kernel_fpu_begin();
448         while ((nbytes = walk.nbytes)) {
449                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
450                               nbytes & AES_BLOCK_MASK, walk.iv);
451                 nbytes &= AES_BLOCK_SIZE - 1;
452                 err = blkcipher_walk_done(desc, &walk, nbytes);
453         }
454         kernel_fpu_end();
455
456         return err;
457 }
458
459 #ifdef CONFIG_X86_64
460 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
461                             struct blkcipher_walk *walk)
462 {
463         u8 *ctrblk = walk->iv;
464         u8 keystream[AES_BLOCK_SIZE];
465         u8 *src = walk->src.virt.addr;
466         u8 *dst = walk->dst.virt.addr;
467         unsigned int nbytes = walk->nbytes;
468
469         aesni_enc(ctx, keystream, ctrblk);
470         crypto_xor(keystream, src, nbytes);
471         memcpy(dst, keystream, nbytes);
472         crypto_inc(ctrblk, AES_BLOCK_SIZE);
473 }
474
475 static int ctr_crypt(struct blkcipher_desc *desc,
476                      struct scatterlist *dst, struct scatterlist *src,
477                      unsigned int nbytes)
478 {
479         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
480         struct blkcipher_walk walk;
481         int err;
482
483         blkcipher_walk_init(&walk, dst, src, nbytes);
484         err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
485         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
486
487         kernel_fpu_begin();
488         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
489                 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
490                               nbytes & AES_BLOCK_MASK, walk.iv);
491                 nbytes &= AES_BLOCK_SIZE - 1;
492                 err = blkcipher_walk_done(desc, &walk, nbytes);
493         }
494         if (walk.nbytes) {
495                 ctr_crypt_final(ctx, &walk);
496                 err = blkcipher_walk_done(desc, &walk, 0);
497         }
498         kernel_fpu_end();
499
500         return err;
501 }
502 #endif
503
504 static int ablk_ecb_init(struct crypto_tfm *tfm)
505 {
506         return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
507 }
508
509 static int ablk_cbc_init(struct crypto_tfm *tfm)
510 {
511         return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
512 }
513
514 #ifdef CONFIG_X86_64
515 static int ablk_ctr_init(struct crypto_tfm *tfm)
516 {
517         return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
518 }
519
520 #endif
521
522 #ifdef HAS_PCBC
523 static int ablk_pcbc_init(struct crypto_tfm *tfm)
524 {
525         return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
526 }
527 #endif
528
529 static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
530 {
531         aesni_ecb_enc(ctx, blks, blks, nbytes);
532 }
533
534 static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
535 {
536         aesni_ecb_dec(ctx, blks, blks, nbytes);
537 }
538
539 static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
540                             unsigned int keylen)
541 {
542         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
543         int err;
544
545         err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
546                                  keylen - AES_BLOCK_SIZE);
547         if (err)
548                 return err;
549
550         return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
551 }
552
553 static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
554 {
555         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
556
557         lrw_free_table(&ctx->lrw_table);
558 }
559
560 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
561                        struct scatterlist *src, unsigned int nbytes)
562 {
563         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
564         be128 buf[8];
565         struct lrw_crypt_req req = {
566                 .tbuf = buf,
567                 .tbuflen = sizeof(buf),
568
569                 .table_ctx = &ctx->lrw_table,
570                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
571                 .crypt_fn = lrw_xts_encrypt_callback,
572         };
573         int ret;
574
575         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
576
577         kernel_fpu_begin();
578         ret = lrw_crypt(desc, dst, src, nbytes, &req);
579         kernel_fpu_end();
580
581         return ret;
582 }
583
584 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
585                        struct scatterlist *src, unsigned int nbytes)
586 {
587         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
588         be128 buf[8];
589         struct lrw_crypt_req req = {
590                 .tbuf = buf,
591                 .tbuflen = sizeof(buf),
592
593                 .table_ctx = &ctx->lrw_table,
594                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
595                 .crypt_fn = lrw_xts_decrypt_callback,
596         };
597         int ret;
598
599         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
600
601         kernel_fpu_begin();
602         ret = lrw_crypt(desc, dst, src, nbytes, &req);
603         kernel_fpu_end();
604
605         return ret;
606 }
607
608 static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
609                             unsigned int keylen)
610 {
611         struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
612         u32 *flags = &tfm->crt_flags;
613         int err;
614
615         /* key consists of keys of equal size concatenated, therefore
616          * the length must be even
617          */
618         if (keylen % 2) {
619                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
620                 return -EINVAL;
621         }
622
623         /* first half of xts-key is for crypt */
624         err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
625         if (err)
626                 return err;
627
628         /* second half of xts-key is for tweak */
629         return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
630                                   keylen / 2);
631 }
632
633
634 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
635 {
636         aesni_enc(ctx, out, in);
637 }
638
639 #ifdef CONFIG_X86_64
640
641 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
642 {
643         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
644 }
645
646 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
647 {
648         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
649 }
650
651 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
652 {
653         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
654 }
655
656 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
657 {
658         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
659 }
660
661 static const struct common_glue_ctx aesni_enc_xts = {
662         .num_funcs = 2,
663         .fpu_blocks_limit = 1,
664
665         .funcs = { {
666                 .num_blocks = 8,
667                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
668         }, {
669                 .num_blocks = 1,
670                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
671         } }
672 };
673
674 static const struct common_glue_ctx aesni_dec_xts = {
675         .num_funcs = 2,
676         .fpu_blocks_limit = 1,
677
678         .funcs = { {
679                 .num_blocks = 8,
680                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
681         }, {
682                 .num_blocks = 1,
683                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
684         } }
685 };
686
687 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
688                        struct scatterlist *src, unsigned int nbytes)
689 {
690         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
691
692         return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
693                                      XTS_TWEAK_CAST(aesni_xts_tweak),
694                                      aes_ctx(ctx->raw_tweak_ctx),
695                                      aes_ctx(ctx->raw_crypt_ctx));
696 }
697
698 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
699                        struct scatterlist *src, unsigned int nbytes)
700 {
701         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
702
703         return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
704                                      XTS_TWEAK_CAST(aesni_xts_tweak),
705                                      aes_ctx(ctx->raw_tweak_ctx),
706                                      aes_ctx(ctx->raw_crypt_ctx));
707 }
708
709 #else
710
711 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
712                        struct scatterlist *src, unsigned int nbytes)
713 {
714         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
715         be128 buf[8];
716         struct xts_crypt_req req = {
717                 .tbuf = buf,
718                 .tbuflen = sizeof(buf),
719
720                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
721                 .tweak_fn = aesni_xts_tweak,
722                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
723                 .crypt_fn = lrw_xts_encrypt_callback,
724         };
725         int ret;
726
727         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
728
729         kernel_fpu_begin();
730         ret = xts_crypt(desc, dst, src, nbytes, &req);
731         kernel_fpu_end();
732
733         return ret;
734 }
735
736 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
737                        struct scatterlist *src, unsigned int nbytes)
738 {
739         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
740         be128 buf[8];
741         struct xts_crypt_req req = {
742                 .tbuf = buf,
743                 .tbuflen = sizeof(buf),
744
745                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
746                 .tweak_fn = aesni_xts_tweak,
747                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
748                 .crypt_fn = lrw_xts_decrypt_callback,
749         };
750         int ret;
751
752         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
753
754         kernel_fpu_begin();
755         ret = xts_crypt(desc, dst, src, nbytes, &req);
756         kernel_fpu_end();
757
758         return ret;
759 }
760
761 #endif
762
763 #ifdef CONFIG_X86_64
764 static int rfc4106_init(struct crypto_tfm *tfm)
765 {
766         struct cryptd_aead *cryptd_tfm;
767         struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
768                 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
769         struct crypto_aead *cryptd_child;
770         struct aesni_rfc4106_gcm_ctx *child_ctx;
771         cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
772         if (IS_ERR(cryptd_tfm))
773                 return PTR_ERR(cryptd_tfm);
774
775         cryptd_child = cryptd_aead_child(cryptd_tfm);
776         child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
777         memcpy(child_ctx, ctx, sizeof(*ctx));
778         ctx->cryptd_tfm = cryptd_tfm;
779         tfm->crt_aead.reqsize = sizeof(struct aead_request)
780                 + crypto_aead_reqsize(&cryptd_tfm->base);
781         return 0;
782 }
783
784 static void rfc4106_exit(struct crypto_tfm *tfm)
785 {
786         struct aesni_rfc4106_gcm_ctx *ctx =
787                 (struct aesni_rfc4106_gcm_ctx *)
788                 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
789         if (!IS_ERR(ctx->cryptd_tfm))
790                 cryptd_free_aead(ctx->cryptd_tfm);
791         return;
792 }
793
794 static void
795 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
796 {
797         struct aesni_gcm_set_hash_subkey_result *result = req->data;
798
799         if (err == -EINPROGRESS)
800                 return;
801         result->err = err;
802         complete(&result->completion);
803 }
804
805 static int
806 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
807 {
808         struct crypto_ablkcipher *ctr_tfm;
809         struct ablkcipher_request *req;
810         int ret = -EINVAL;
811         struct aesni_hash_subkey_req_data *req_data;
812
813         ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
814         if (IS_ERR(ctr_tfm))
815                 return PTR_ERR(ctr_tfm);
816
817         crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
818
819         ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
820         if (ret)
821                 goto out_free_ablkcipher;
822
823         ret = -ENOMEM;
824         req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
825         if (!req)
826                 goto out_free_ablkcipher;
827
828         req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
829         if (!req_data)
830                 goto out_free_request;
831
832         memset(req_data->iv, 0, sizeof(req_data->iv));
833
834         /* Clear the data in the hash sub key container to zero.*/
835         /* We want to cipher all zeros to create the hash sub key. */
836         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
837
838         init_completion(&req_data->result.completion);
839         sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
840         ablkcipher_request_set_tfm(req, ctr_tfm);
841         ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
842                                         CRYPTO_TFM_REQ_MAY_BACKLOG,
843                                         rfc4106_set_hash_subkey_done,
844                                         &req_data->result);
845
846         ablkcipher_request_set_crypt(req, &req_data->sg,
847                 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
848
849         ret = crypto_ablkcipher_encrypt(req);
850         if (ret == -EINPROGRESS || ret == -EBUSY) {
851                 ret = wait_for_completion_interruptible
852                         (&req_data->result.completion);
853                 if (!ret)
854                         ret = req_data->result.err;
855         }
856         kfree(req_data);
857 out_free_request:
858         ablkcipher_request_free(req);
859 out_free_ablkcipher:
860         crypto_free_ablkcipher(ctr_tfm);
861         return ret;
862 }
863
864 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
865                                                    unsigned int key_len)
866 {
867         int ret = 0;
868         struct crypto_tfm *tfm = crypto_aead_tfm(parent);
869         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
870         struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
871         struct aesni_rfc4106_gcm_ctx *child_ctx =
872                                  aesni_rfc4106_gcm_ctx_get(cryptd_child);
873         u8 *new_key_align, *new_key_mem = NULL;
874
875         if (key_len < 4) {
876                 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
877                 return -EINVAL;
878         }
879         /*Account for 4 byte nonce at the end.*/
880         key_len -= 4;
881         if (key_len != AES_KEYSIZE_128) {
882                 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
883                 return -EINVAL;
884         }
885
886         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
887         /*This must be on a 16 byte boundary!*/
888         if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
889                 return -EINVAL;
890
891         if ((unsigned long)key % AESNI_ALIGN) {
892                 /*key is not aligned: use an auxuliar aligned pointer*/
893                 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
894                 if (!new_key_mem)
895                         return -ENOMEM;
896
897                 new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
898                 memcpy(new_key_align, key, key_len);
899                 key = new_key_align;
900         }
901
902         if (!irq_fpu_usable())
903                 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
904                 key, key_len);
905         else {
906                 kernel_fpu_begin();
907                 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
908                 kernel_fpu_end();
909         }
910         /*This must be on a 16 byte boundary!*/
911         if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
912                 ret = -EINVAL;
913                 goto exit;
914         }
915         ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
916         memcpy(child_ctx, ctx, sizeof(*ctx));
917 exit:
918         kfree(new_key_mem);
919         return ret;
920 }
921
922 /* This is the Integrity Check Value (aka the authentication tag length and can
923  * be 8, 12 or 16 bytes long. */
924 static int rfc4106_set_authsize(struct crypto_aead *parent,
925                                 unsigned int authsize)
926 {
927         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
928         struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
929
930         switch (authsize) {
931         case 8:
932         case 12:
933         case 16:
934                 break;
935         default:
936                 return -EINVAL;
937         }
938         crypto_aead_crt(parent)->authsize = authsize;
939         crypto_aead_crt(cryptd_child)->authsize = authsize;
940         return 0;
941 }
942
943 static int rfc4106_encrypt(struct aead_request *req)
944 {
945         int ret;
946         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
947         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
948
949         if (!irq_fpu_usable()) {
950                 struct aead_request *cryptd_req =
951                         (struct aead_request *) aead_request_ctx(req);
952                 memcpy(cryptd_req, req, sizeof(*req));
953                 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
954                 return crypto_aead_encrypt(cryptd_req);
955         } else {
956                 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
957                 kernel_fpu_begin();
958                 ret = cryptd_child->base.crt_aead.encrypt(req);
959                 kernel_fpu_end();
960                 return ret;
961         }
962 }
963
964 static int rfc4106_decrypt(struct aead_request *req)
965 {
966         int ret;
967         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
968         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
969
970         if (!irq_fpu_usable()) {
971                 struct aead_request *cryptd_req =
972                         (struct aead_request *) aead_request_ctx(req);
973                 memcpy(cryptd_req, req, sizeof(*req));
974                 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
975                 return crypto_aead_decrypt(cryptd_req);
976         } else {
977                 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
978                 kernel_fpu_begin();
979                 ret = cryptd_child->base.crt_aead.decrypt(req);
980                 kernel_fpu_end();
981                 return ret;
982         }
983 }
984
985 static int __driver_rfc4106_encrypt(struct aead_request *req)
986 {
987         u8 one_entry_in_sg = 0;
988         u8 *src, *dst, *assoc;
989         __be32 counter = cpu_to_be32(1);
990         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
991         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
992         void *aes_ctx = &(ctx->aes_key_expanded);
993         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
994         u8 iv_tab[16+AESNI_ALIGN];
995         u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
996         struct scatter_walk src_sg_walk;
997         struct scatter_walk assoc_sg_walk;
998         struct scatter_walk dst_sg_walk;
999         unsigned int i;
1000
1001         /* Assuming we are supporting rfc4106 64-bit extended */
1002         /* sequence numbers We need to have the AAD length equal */
1003         /* to 8 or 12 bytes */
1004         if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1005                 return -EINVAL;
1006         /* IV below built */
1007         for (i = 0; i < 4; i++)
1008                 *(iv+i) = ctx->nonce[i];
1009         for (i = 0; i < 8; i++)
1010                 *(iv+4+i) = req->iv[i];
1011         *((__be32 *)(iv+12)) = counter;
1012
1013         if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1014                 one_entry_in_sg = 1;
1015                 scatterwalk_start(&src_sg_walk, req->src);
1016                 scatterwalk_start(&assoc_sg_walk, req->assoc);
1017                 src = scatterwalk_map(&src_sg_walk);
1018                 assoc = scatterwalk_map(&assoc_sg_walk);
1019                 dst = src;
1020                 if (unlikely(req->src != req->dst)) {
1021                         scatterwalk_start(&dst_sg_walk, req->dst);
1022                         dst = scatterwalk_map(&dst_sg_walk);
1023                 }
1024
1025         } else {
1026                 /* Allocate memory for src, dst, assoc */
1027                 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1028                         GFP_ATOMIC);
1029                 if (unlikely(!src))
1030                         return -ENOMEM;
1031                 assoc = (src + req->cryptlen + auth_tag_len);
1032                 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1033                 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1034                                         req->assoclen, 0);
1035                 dst = src;
1036         }
1037
1038         aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
1039                 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1040                 + ((unsigned long)req->cryptlen), auth_tag_len);
1041
1042         /* The authTag (aka the Integrity Check Value) needs to be written
1043          * back to the packet. */
1044         if (one_entry_in_sg) {
1045                 if (unlikely(req->src != req->dst)) {
1046                         scatterwalk_unmap(dst);
1047                         scatterwalk_done(&dst_sg_walk, 0, 0);
1048                 }
1049                 scatterwalk_unmap(src);
1050                 scatterwalk_unmap(assoc);
1051                 scatterwalk_done(&src_sg_walk, 0, 0);
1052                 scatterwalk_done(&assoc_sg_walk, 0, 0);
1053         } else {
1054                 scatterwalk_map_and_copy(dst, req->dst, 0,
1055                         req->cryptlen + auth_tag_len, 1);
1056                 kfree(src);
1057         }
1058         return 0;
1059 }
1060
1061 static int __driver_rfc4106_decrypt(struct aead_request *req)
1062 {
1063         u8 one_entry_in_sg = 0;
1064         u8 *src, *dst, *assoc;
1065         unsigned long tempCipherLen = 0;
1066         __be32 counter = cpu_to_be32(1);
1067         int retval = 0;
1068         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1069         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1070         void *aes_ctx = &(ctx->aes_key_expanded);
1071         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1072         u8 iv_and_authTag[32+AESNI_ALIGN];
1073         u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1074         u8 *authTag = iv + 16;
1075         struct scatter_walk src_sg_walk;
1076         struct scatter_walk assoc_sg_walk;
1077         struct scatter_walk dst_sg_walk;
1078         unsigned int i;
1079
1080         if (unlikely((req->cryptlen < auth_tag_len) ||
1081                 (req->assoclen != 8 && req->assoclen != 12)))
1082                 return -EINVAL;
1083         /* Assuming we are supporting rfc4106 64-bit extended */
1084         /* sequence numbers We need to have the AAD length */
1085         /* equal to 8 or 12 bytes */
1086
1087         tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1088         /* IV below built */
1089         for (i = 0; i < 4; i++)
1090                 *(iv+i) = ctx->nonce[i];
1091         for (i = 0; i < 8; i++)
1092                 *(iv+4+i) = req->iv[i];
1093         *((__be32 *)(iv+12)) = counter;
1094
1095         if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1096                 one_entry_in_sg = 1;
1097                 scatterwalk_start(&src_sg_walk, req->src);
1098                 scatterwalk_start(&assoc_sg_walk, req->assoc);
1099                 src = scatterwalk_map(&src_sg_walk);
1100                 assoc = scatterwalk_map(&assoc_sg_walk);
1101                 dst = src;
1102                 if (unlikely(req->src != req->dst)) {
1103                         scatterwalk_start(&dst_sg_walk, req->dst);
1104                         dst = scatterwalk_map(&dst_sg_walk);
1105                 }
1106
1107         } else {
1108                 /* Allocate memory for src, dst, assoc */
1109                 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1110                 if (!src)
1111                         return -ENOMEM;
1112                 assoc = (src + req->cryptlen + auth_tag_len);
1113                 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1114                 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1115                         req->assoclen, 0);
1116                 dst = src;
1117         }
1118
1119         aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
1120                 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1121                 authTag, auth_tag_len);
1122
1123         /* Compare generated tag with passed in tag. */
1124         retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
1125                 -EBADMSG : 0;
1126
1127         if (one_entry_in_sg) {
1128                 if (unlikely(req->src != req->dst)) {
1129                         scatterwalk_unmap(dst);
1130                         scatterwalk_done(&dst_sg_walk, 0, 0);
1131                 }
1132                 scatterwalk_unmap(src);
1133                 scatterwalk_unmap(assoc);
1134                 scatterwalk_done(&src_sg_walk, 0, 0);
1135                 scatterwalk_done(&assoc_sg_walk, 0, 0);
1136         } else {
1137                 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1138                 kfree(src);
1139         }
1140         return retval;
1141 }
1142 #endif
1143
1144 static struct crypto_alg aesni_algs[] = { {
1145         .cra_name               = "aes",
1146         .cra_driver_name        = "aes-aesni",
1147         .cra_priority           = 300,
1148         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
1149         .cra_blocksize          = AES_BLOCK_SIZE,
1150         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1151                                   AESNI_ALIGN - 1,
1152         .cra_alignmask          = 0,
1153         .cra_module             = THIS_MODULE,
1154         .cra_u  = {
1155                 .cipher = {
1156                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1157                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1158                         .cia_setkey             = aes_set_key,
1159                         .cia_encrypt            = aes_encrypt,
1160                         .cia_decrypt            = aes_decrypt
1161                 }
1162         }
1163 }, {
1164         .cra_name               = "__aes-aesni",
1165         .cra_driver_name        = "__driver-aes-aesni",
1166         .cra_priority           = 0,
1167         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
1168         .cra_blocksize          = AES_BLOCK_SIZE,
1169         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1170                                   AESNI_ALIGN - 1,
1171         .cra_alignmask          = 0,
1172         .cra_module             = THIS_MODULE,
1173         .cra_u  = {
1174                 .cipher = {
1175                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1176                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1177                         .cia_setkey             = aes_set_key,
1178                         .cia_encrypt            = __aes_encrypt,
1179                         .cia_decrypt            = __aes_decrypt
1180                 }
1181         }
1182 }, {
1183         .cra_name               = "__ecb-aes-aesni",
1184         .cra_driver_name        = "__driver-ecb-aes-aesni",
1185         .cra_priority           = 0,
1186         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
1187         .cra_blocksize          = AES_BLOCK_SIZE,
1188         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1189                                   AESNI_ALIGN - 1,
1190         .cra_alignmask          = 0,
1191         .cra_type               = &crypto_blkcipher_type,
1192         .cra_module             = THIS_MODULE,
1193         .cra_u = {
1194                 .blkcipher = {
1195                         .min_keysize    = AES_MIN_KEY_SIZE,
1196                         .max_keysize    = AES_MAX_KEY_SIZE,
1197                         .setkey         = aes_set_key,
1198                         .encrypt        = ecb_encrypt,
1199                         .decrypt        = ecb_decrypt,
1200                 },
1201         },
1202 }, {
1203         .cra_name               = "__cbc-aes-aesni",
1204         .cra_driver_name        = "__driver-cbc-aes-aesni",
1205         .cra_priority           = 0,
1206         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
1207         .cra_blocksize          = AES_BLOCK_SIZE,
1208         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1209                                   AESNI_ALIGN - 1,
1210         .cra_alignmask          = 0,
1211         .cra_type               = &crypto_blkcipher_type,
1212         .cra_module             = THIS_MODULE,
1213         .cra_u = {
1214                 .blkcipher = {
1215                         .min_keysize    = AES_MIN_KEY_SIZE,
1216                         .max_keysize    = AES_MAX_KEY_SIZE,
1217                         .setkey         = aes_set_key,
1218                         .encrypt        = cbc_encrypt,
1219                         .decrypt        = cbc_decrypt,
1220                 },
1221         },
1222 }, {
1223         .cra_name               = "ecb(aes)",
1224         .cra_driver_name        = "ecb-aes-aesni",
1225         .cra_priority           = 400,
1226         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1227         .cra_blocksize          = AES_BLOCK_SIZE,
1228         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1229         .cra_alignmask          = 0,
1230         .cra_type               = &crypto_ablkcipher_type,
1231         .cra_module             = THIS_MODULE,
1232         .cra_init               = ablk_ecb_init,
1233         .cra_exit               = ablk_exit,
1234         .cra_u = {
1235                 .ablkcipher = {
1236                         .min_keysize    = AES_MIN_KEY_SIZE,
1237                         .max_keysize    = AES_MAX_KEY_SIZE,
1238                         .setkey         = ablk_set_key,
1239                         .encrypt        = ablk_encrypt,
1240                         .decrypt        = ablk_decrypt,
1241                 },
1242         },
1243 }, {
1244         .cra_name               = "cbc(aes)",
1245         .cra_driver_name        = "cbc-aes-aesni",
1246         .cra_priority           = 400,
1247         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1248         .cra_blocksize          = AES_BLOCK_SIZE,
1249         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1250         .cra_alignmask          = 0,
1251         .cra_type               = &crypto_ablkcipher_type,
1252         .cra_module             = THIS_MODULE,
1253         .cra_init               = ablk_cbc_init,
1254         .cra_exit               = ablk_exit,
1255         .cra_u = {
1256                 .ablkcipher = {
1257                         .min_keysize    = AES_MIN_KEY_SIZE,
1258                         .max_keysize    = AES_MAX_KEY_SIZE,
1259                         .ivsize         = AES_BLOCK_SIZE,
1260                         .setkey         = ablk_set_key,
1261                         .encrypt        = ablk_encrypt,
1262                         .decrypt        = ablk_decrypt,
1263                 },
1264         },
1265 #ifdef CONFIG_X86_64
1266 }, {
1267         .cra_name               = "__ctr-aes-aesni",
1268         .cra_driver_name        = "__driver-ctr-aes-aesni",
1269         .cra_priority           = 0,
1270         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
1271         .cra_blocksize          = 1,
1272         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1273                                   AESNI_ALIGN - 1,
1274         .cra_alignmask          = 0,
1275         .cra_type               = &crypto_blkcipher_type,
1276         .cra_module             = THIS_MODULE,
1277         .cra_u = {
1278                 .blkcipher = {
1279                         .min_keysize    = AES_MIN_KEY_SIZE,
1280                         .max_keysize    = AES_MAX_KEY_SIZE,
1281                         .ivsize         = AES_BLOCK_SIZE,
1282                         .setkey         = aes_set_key,
1283                         .encrypt        = ctr_crypt,
1284                         .decrypt        = ctr_crypt,
1285                 },
1286         },
1287 }, {
1288         .cra_name               = "ctr(aes)",
1289         .cra_driver_name        = "ctr-aes-aesni",
1290         .cra_priority           = 400,
1291         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1292         .cra_blocksize          = 1,
1293         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1294         .cra_alignmask          = 0,
1295         .cra_type               = &crypto_ablkcipher_type,
1296         .cra_module             = THIS_MODULE,
1297         .cra_init               = ablk_ctr_init,
1298         .cra_exit               = ablk_exit,
1299         .cra_u = {
1300                 .ablkcipher = {
1301                         .min_keysize    = AES_MIN_KEY_SIZE,
1302                         .max_keysize    = AES_MAX_KEY_SIZE,
1303                         .ivsize         = AES_BLOCK_SIZE,
1304                         .setkey         = ablk_set_key,
1305                         .encrypt        = ablk_encrypt,
1306                         .decrypt        = ablk_encrypt,
1307                         .geniv          = "chainiv",
1308                 },
1309         },
1310 }, {
1311         .cra_name               = "__gcm-aes-aesni",
1312         .cra_driver_name        = "__driver-gcm-aes-aesni",
1313         .cra_priority           = 0,
1314         .cra_flags              = CRYPTO_ALG_TYPE_AEAD,
1315         .cra_blocksize          = 1,
1316         .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx) +
1317                                   AESNI_ALIGN,
1318         .cra_alignmask          = 0,
1319         .cra_type               = &crypto_aead_type,
1320         .cra_module             = THIS_MODULE,
1321         .cra_u = {
1322                 .aead = {
1323                         .encrypt        = __driver_rfc4106_encrypt,
1324                         .decrypt        = __driver_rfc4106_decrypt,
1325                 },
1326         },
1327 }, {
1328         .cra_name               = "rfc4106(gcm(aes))",
1329         .cra_driver_name        = "rfc4106-gcm-aesni",
1330         .cra_priority           = 400,
1331         .cra_flags              = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1332         .cra_blocksize          = 1,
1333         .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx) +
1334                                   AESNI_ALIGN,
1335         .cra_alignmask          = 0,
1336         .cra_type               = &crypto_nivaead_type,
1337         .cra_module             = THIS_MODULE,
1338         .cra_init               = rfc4106_init,
1339         .cra_exit               = rfc4106_exit,
1340         .cra_u = {
1341                 .aead = {
1342                         .setkey         = rfc4106_set_key,
1343                         .setauthsize    = rfc4106_set_authsize,
1344                         .encrypt        = rfc4106_encrypt,
1345                         .decrypt        = rfc4106_decrypt,
1346                         .geniv          = "seqiv",
1347                         .ivsize         = 8,
1348                         .maxauthsize    = 16,
1349                 },
1350         },
1351 #endif
1352 #ifdef HAS_PCBC
1353 }, {
1354         .cra_name               = "pcbc(aes)",
1355         .cra_driver_name        = "pcbc-aes-aesni",
1356         .cra_priority           = 400,
1357         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1358         .cra_blocksize          = AES_BLOCK_SIZE,
1359         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1360         .cra_alignmask          = 0,
1361         .cra_type               = &crypto_ablkcipher_type,
1362         .cra_module             = THIS_MODULE,
1363         .cra_init               = ablk_pcbc_init,
1364         .cra_exit               = ablk_exit,
1365         .cra_u = {
1366                 .ablkcipher = {
1367                         .min_keysize    = AES_MIN_KEY_SIZE,
1368                         .max_keysize    = AES_MAX_KEY_SIZE,
1369                         .ivsize         = AES_BLOCK_SIZE,
1370                         .setkey         = ablk_set_key,
1371                         .encrypt        = ablk_encrypt,
1372                         .decrypt        = ablk_decrypt,
1373                 },
1374         },
1375 #endif
1376 }, {
1377         .cra_name               = "__lrw-aes-aesni",
1378         .cra_driver_name        = "__driver-lrw-aes-aesni",
1379         .cra_priority           = 0,
1380         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
1381         .cra_blocksize          = AES_BLOCK_SIZE,
1382         .cra_ctxsize            = sizeof(struct aesni_lrw_ctx),
1383         .cra_alignmask          = 0,
1384         .cra_type               = &crypto_blkcipher_type,
1385         .cra_module             = THIS_MODULE,
1386         .cra_exit               = lrw_aesni_exit_tfm,
1387         .cra_u = {
1388                 .blkcipher = {
1389                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1390                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1391                         .ivsize         = AES_BLOCK_SIZE,
1392                         .setkey         = lrw_aesni_setkey,
1393                         .encrypt        = lrw_encrypt,
1394                         .decrypt        = lrw_decrypt,
1395                 },
1396         },
1397 }, {
1398         .cra_name               = "__xts-aes-aesni",
1399         .cra_driver_name        = "__driver-xts-aes-aesni",
1400         .cra_priority           = 0,
1401         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
1402         .cra_blocksize          = AES_BLOCK_SIZE,
1403         .cra_ctxsize            = sizeof(struct aesni_xts_ctx),
1404         .cra_alignmask          = 0,
1405         .cra_type               = &crypto_blkcipher_type,
1406         .cra_module             = THIS_MODULE,
1407         .cra_u = {
1408                 .blkcipher = {
1409                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1410                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1411                         .ivsize         = AES_BLOCK_SIZE,
1412                         .setkey         = xts_aesni_setkey,
1413                         .encrypt        = xts_encrypt,
1414                         .decrypt        = xts_decrypt,
1415                 },
1416         },
1417 }, {
1418         .cra_name               = "lrw(aes)",
1419         .cra_driver_name        = "lrw-aes-aesni",
1420         .cra_priority           = 400,
1421         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1422         .cra_blocksize          = AES_BLOCK_SIZE,
1423         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1424         .cra_alignmask          = 0,
1425         .cra_type               = &crypto_ablkcipher_type,
1426         .cra_module             = THIS_MODULE,
1427         .cra_init               = ablk_init,
1428         .cra_exit               = ablk_exit,
1429         .cra_u = {
1430                 .ablkcipher = {
1431                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1432                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1433                         .ivsize         = AES_BLOCK_SIZE,
1434                         .setkey         = ablk_set_key,
1435                         .encrypt        = ablk_encrypt,
1436                         .decrypt        = ablk_decrypt,
1437                 },
1438         },
1439 }, {
1440         .cra_name               = "xts(aes)",
1441         .cra_driver_name        = "xts-aes-aesni",
1442         .cra_priority           = 400,
1443         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1444         .cra_blocksize          = AES_BLOCK_SIZE,
1445         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1446         .cra_alignmask          = 0,
1447         .cra_type               = &crypto_ablkcipher_type,
1448         .cra_module             = THIS_MODULE,
1449         .cra_init               = ablk_init,
1450         .cra_exit               = ablk_exit,
1451         .cra_u = {
1452                 .ablkcipher = {
1453                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1454                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1455                         .ivsize         = AES_BLOCK_SIZE,
1456                         .setkey         = ablk_set_key,
1457                         .encrypt        = ablk_encrypt,
1458                         .decrypt        = ablk_decrypt,
1459                 },
1460         },
1461 } };
1462
1463
1464 static const struct x86_cpu_id aesni_cpu_id[] = {
1465         X86_FEATURE_MATCH(X86_FEATURE_AES),
1466         {}
1467 };
1468 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1469
1470 static int __init aesni_init(void)
1471 {
1472         int err;
1473
1474         if (!x86_match_cpu(aesni_cpu_id))
1475                 return -ENODEV;
1476 #ifdef CONFIG_X86_64
1477 #ifdef CONFIG_AS_AVX2
1478         if (boot_cpu_has(X86_FEATURE_AVX2)) {
1479                 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1480                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1481                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1482         } else
1483 #endif
1484 #ifdef CONFIG_AS_AVX
1485         if (boot_cpu_has(X86_FEATURE_AVX)) {
1486                 pr_info("AVX version of gcm_enc/dec engaged.\n");
1487                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1488                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1489         } else
1490 #endif
1491         {
1492                 pr_info("SSE version of gcm_enc/dec engaged.\n");
1493                 aesni_gcm_enc_tfm = aesni_gcm_enc;
1494                 aesni_gcm_dec_tfm = aesni_gcm_dec;
1495         }
1496 #endif
1497
1498         err = crypto_fpu_init();
1499         if (err)
1500                 return err;
1501
1502         return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1503 }
1504
1505 static void __exit aesni_exit(void)
1506 {
1507         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1508
1509         crypto_fpu_exit();
1510 }
1511
1512 module_init(aesni_init);
1513 module_exit(aesni_exit);
1514
1515 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1516 MODULE_LICENSE("GPL");
1517 MODULE_ALIAS("aes");