Merge tag 'iwlwifi-next-for-kalle-2014-12-30' of https://git.kernel.org/pub/scm/linux...
[cascardo/linux.git] / arch / x86 / crypto / aesni-intel_glue.c
1 /*
2  * Support for Intel AES-NI instructions. This file contains glue
3  * code, the real AES implementation is in intel-aes_asm.S.
4  *
5  * Copyright (C) 2008, Intel Corp.
6  *    Author: Huang Ying <ying.huang@intel.com>
7  *
8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9  * interface for 64-bit kernels.
10  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
11  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
12  *             Tadeusz Struk (tadeusz.struk@intel.com)
13  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
14  *    Copyright (c) 2010, Intel Corporation.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  */
21
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/i387.h>
36 #include <asm/crypto/aes.h>
37 #include <crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
42 #ifdef CONFIG_X86_64
43 #include <asm/crypto/glue_helper.h>
44 #endif
45
46 /* This data is stored at the end of the crypto_tfm struct.
47  * It's a type of per "session" data storage location.
48  * This needs to be 16 byte aligned.
49  */
50 struct aesni_rfc4106_gcm_ctx {
51         u8 hash_subkey[16];
52         struct crypto_aes_ctx aes_key_expanded;
53         u8 nonce[4];
54         struct cryptd_aead *cryptd_tfm;
55 };
56
57 struct aesni_gcm_set_hash_subkey_result {
58         int err;
59         struct completion completion;
60 };
61
62 struct aesni_hash_subkey_req_data {
63         u8 iv[16];
64         struct aesni_gcm_set_hash_subkey_result result;
65         struct scatterlist sg;
66 };
67
68 #define AESNI_ALIGN     (16)
69 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE-1))
70 #define RFC4106_HASH_SUBKEY_SIZE 16
71
72 struct aesni_lrw_ctx {
73         struct lrw_table_ctx lrw_table;
74         u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
75 };
76
77 struct aesni_xts_ctx {
78         u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
79         u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
80 };
81
82 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
83                              unsigned int key_len);
84 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
85                           const u8 *in);
86 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
87                           const u8 *in);
88 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
89                               const u8 *in, unsigned int len);
90 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
91                               const u8 *in, unsigned int len);
92 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
93                               const u8 *in, unsigned int len, u8 *iv);
94 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
95                               const u8 *in, unsigned int len, u8 *iv);
96
97 int crypto_fpu_init(void);
98 void crypto_fpu_exit(void);
99
100 #define AVX_GEN2_OPTSIZE 640
101 #define AVX_GEN4_OPTSIZE 4096
102
103 #ifdef CONFIG_X86_64
104
105 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
106                               const u8 *in, unsigned int len, u8 *iv);
107 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
108                               const u8 *in, unsigned int len, u8 *iv);
109
110 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
111                                  const u8 *in, bool enc, u8 *iv);
112
113 /* asmlinkage void aesni_gcm_enc()
114  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
115  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
116  * const u8 *in, Plaintext input
117  * unsigned long plaintext_len, Length of data in bytes for encryption.
118  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
119  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
120  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
121  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
122  * const u8 *aad, Additional Authentication Data (AAD)
123  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
124  *          is going to be 8 or 12 bytes
125  * u8 *auth_tag, Authenticated Tag output.
126  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
127  *          Valid values are 16 (most likely), 12 or 8.
128  */
129 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
130                         const u8 *in, unsigned long plaintext_len, u8 *iv,
131                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
132                         u8 *auth_tag, unsigned long auth_tag_len);
133
134 /* asmlinkage void aesni_gcm_dec()
135  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
136  * u8 *out, Plaintext output. Decrypt in-place is allowed.
137  * const u8 *in, Ciphertext input
138  * unsigned long ciphertext_len, Length of data in bytes for decryption.
139  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
140  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
141  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
142  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
143  * const u8 *aad, Additional Authentication Data (AAD)
144  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
145  * to be 8 or 12 bytes
146  * u8 *auth_tag, Authenticated Tag output.
147  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
148  * Valid values are 16 (most likely), 12 or 8.
149  */
150 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
151                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
152                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
153                         u8 *auth_tag, unsigned long auth_tag_len);
154
155
156 #ifdef CONFIG_AS_AVX
157 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
158                 void *keys, u8 *out, unsigned int num_bytes);
159 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
160                 void *keys, u8 *out, unsigned int num_bytes);
161 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
162                 void *keys, u8 *out, unsigned int num_bytes);
163 /*
164  * asmlinkage void aesni_gcm_precomp_avx_gen2()
165  * gcm_data *my_ctx_data, context data
166  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
167  */
168 asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
169
170 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
171                         const u8 *in, unsigned long plaintext_len, u8 *iv,
172                         const u8 *aad, unsigned long aad_len,
173                         u8 *auth_tag, unsigned long auth_tag_len);
174
175 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
176                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
177                         const u8 *aad, unsigned long aad_len,
178                         u8 *auth_tag, unsigned long auth_tag_len);
179
180 static void aesni_gcm_enc_avx(void *ctx, u8 *out,
181                         const u8 *in, unsigned long plaintext_len, u8 *iv,
182                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
183                         u8 *auth_tag, unsigned long auth_tag_len)
184 {
185         if (plaintext_len < AVX_GEN2_OPTSIZE) {
186                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
187                                 aad_len, auth_tag, auth_tag_len);
188         } else {
189                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
190                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
191                                         aad_len, auth_tag, auth_tag_len);
192         }
193 }
194
195 static void aesni_gcm_dec_avx(void *ctx, u8 *out,
196                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
197                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
198                         u8 *auth_tag, unsigned long auth_tag_len)
199 {
200         if (ciphertext_len < AVX_GEN2_OPTSIZE) {
201                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
202                                 aad_len, auth_tag, auth_tag_len);
203         } else {
204                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
205                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
206                                         aad_len, auth_tag, auth_tag_len);
207         }
208 }
209 #endif
210
211 #ifdef CONFIG_AS_AVX2
212 /*
213  * asmlinkage void aesni_gcm_precomp_avx_gen4()
214  * gcm_data *my_ctx_data, context data
215  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
216  */
217 asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
218
219 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
220                         const u8 *in, unsigned long plaintext_len, u8 *iv,
221                         const u8 *aad, unsigned long aad_len,
222                         u8 *auth_tag, unsigned long auth_tag_len);
223
224 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
225                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
226                         const u8 *aad, unsigned long aad_len,
227                         u8 *auth_tag, unsigned long auth_tag_len);
228
229 static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
230                         const u8 *in, unsigned long plaintext_len, u8 *iv,
231                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
232                         u8 *auth_tag, unsigned long auth_tag_len)
233 {
234         if (plaintext_len < AVX_GEN2_OPTSIZE) {
235                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
236                                 aad_len, auth_tag, auth_tag_len);
237         } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
238                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
239                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
240                                         aad_len, auth_tag, auth_tag_len);
241         } else {
242                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
243                 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
244                                         aad_len, auth_tag, auth_tag_len);
245         }
246 }
247
248 static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
249                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
250                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
251                         u8 *auth_tag, unsigned long auth_tag_len)
252 {
253         if (ciphertext_len < AVX_GEN2_OPTSIZE) {
254                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
255                                 aad, aad_len, auth_tag, auth_tag_len);
256         } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
257                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
258                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
259                                         aad_len, auth_tag, auth_tag_len);
260         } else {
261                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
262                 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
263                                         aad_len, auth_tag, auth_tag_len);
264         }
265 }
266 #endif
267
268 static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
269                         const u8 *in, unsigned long plaintext_len, u8 *iv,
270                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
271                         u8 *auth_tag, unsigned long auth_tag_len);
272
273 static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
274                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
275                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
276                         u8 *auth_tag, unsigned long auth_tag_len);
277
278 static inline struct
279 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
280 {
281         return
282                 (struct aesni_rfc4106_gcm_ctx *)
283                 PTR_ALIGN((u8 *)
284                 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
285 }
286 #endif
287
288 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
289 {
290         unsigned long addr = (unsigned long)raw_ctx;
291         unsigned long align = AESNI_ALIGN;
292
293         if (align <= crypto_tfm_ctx_alignment())
294                 align = 1;
295         return (struct crypto_aes_ctx *)ALIGN(addr, align);
296 }
297
298 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
299                               const u8 *in_key, unsigned int key_len)
300 {
301         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
302         u32 *flags = &tfm->crt_flags;
303         int err;
304
305         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
306             key_len != AES_KEYSIZE_256) {
307                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
308                 return -EINVAL;
309         }
310
311         if (!irq_fpu_usable())
312                 err = crypto_aes_expand_key(ctx, in_key, key_len);
313         else {
314                 kernel_fpu_begin();
315                 err = aesni_set_key(ctx, in_key, key_len);
316                 kernel_fpu_end();
317         }
318
319         return err;
320 }
321
322 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
323                        unsigned int key_len)
324 {
325         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
326 }
327
328 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
329 {
330         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
331
332         if (!irq_fpu_usable())
333                 crypto_aes_encrypt_x86(ctx, dst, src);
334         else {
335                 kernel_fpu_begin();
336                 aesni_enc(ctx, dst, src);
337                 kernel_fpu_end();
338         }
339 }
340
341 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
342 {
343         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
344
345         if (!irq_fpu_usable())
346                 crypto_aes_decrypt_x86(ctx, dst, src);
347         else {
348                 kernel_fpu_begin();
349                 aesni_dec(ctx, dst, src);
350                 kernel_fpu_end();
351         }
352 }
353
354 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
355 {
356         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
357
358         aesni_enc(ctx, dst, src);
359 }
360
361 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
362 {
363         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
364
365         aesni_dec(ctx, dst, src);
366 }
367
368 static int ecb_encrypt(struct blkcipher_desc *desc,
369                        struct scatterlist *dst, struct scatterlist *src,
370                        unsigned int nbytes)
371 {
372         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
373         struct blkcipher_walk walk;
374         int err;
375
376         blkcipher_walk_init(&walk, dst, src, nbytes);
377         err = blkcipher_walk_virt(desc, &walk);
378         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
379
380         kernel_fpu_begin();
381         while ((nbytes = walk.nbytes)) {
382                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
383                               nbytes & AES_BLOCK_MASK);
384                 nbytes &= AES_BLOCK_SIZE - 1;
385                 err = blkcipher_walk_done(desc, &walk, nbytes);
386         }
387         kernel_fpu_end();
388
389         return err;
390 }
391
392 static int ecb_decrypt(struct blkcipher_desc *desc,
393                        struct scatterlist *dst, struct scatterlist *src,
394                        unsigned int nbytes)
395 {
396         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
397         struct blkcipher_walk walk;
398         int err;
399
400         blkcipher_walk_init(&walk, dst, src, nbytes);
401         err = blkcipher_walk_virt(desc, &walk);
402         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
403
404         kernel_fpu_begin();
405         while ((nbytes = walk.nbytes)) {
406                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
407                               nbytes & AES_BLOCK_MASK);
408                 nbytes &= AES_BLOCK_SIZE - 1;
409                 err = blkcipher_walk_done(desc, &walk, nbytes);
410         }
411         kernel_fpu_end();
412
413         return err;
414 }
415
416 static int cbc_encrypt(struct blkcipher_desc *desc,
417                        struct scatterlist *dst, struct scatterlist *src,
418                        unsigned int nbytes)
419 {
420         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
421         struct blkcipher_walk walk;
422         int err;
423
424         blkcipher_walk_init(&walk, dst, src, nbytes);
425         err = blkcipher_walk_virt(desc, &walk);
426         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
427
428         kernel_fpu_begin();
429         while ((nbytes = walk.nbytes)) {
430                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
431                               nbytes & AES_BLOCK_MASK, walk.iv);
432                 nbytes &= AES_BLOCK_SIZE - 1;
433                 err = blkcipher_walk_done(desc, &walk, nbytes);
434         }
435         kernel_fpu_end();
436
437         return err;
438 }
439
440 static int cbc_decrypt(struct blkcipher_desc *desc,
441                        struct scatterlist *dst, struct scatterlist *src,
442                        unsigned int nbytes)
443 {
444         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
445         struct blkcipher_walk walk;
446         int err;
447
448         blkcipher_walk_init(&walk, dst, src, nbytes);
449         err = blkcipher_walk_virt(desc, &walk);
450         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
451
452         kernel_fpu_begin();
453         while ((nbytes = walk.nbytes)) {
454                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
455                               nbytes & AES_BLOCK_MASK, walk.iv);
456                 nbytes &= AES_BLOCK_SIZE - 1;
457                 err = blkcipher_walk_done(desc, &walk, nbytes);
458         }
459         kernel_fpu_end();
460
461         return err;
462 }
463
464 #ifdef CONFIG_X86_64
465 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
466                             struct blkcipher_walk *walk)
467 {
468         u8 *ctrblk = walk->iv;
469         u8 keystream[AES_BLOCK_SIZE];
470         u8 *src = walk->src.virt.addr;
471         u8 *dst = walk->dst.virt.addr;
472         unsigned int nbytes = walk->nbytes;
473
474         aesni_enc(ctx, keystream, ctrblk);
475         crypto_xor(keystream, src, nbytes);
476         memcpy(dst, keystream, nbytes);
477         crypto_inc(ctrblk, AES_BLOCK_SIZE);
478 }
479
480 #ifdef CONFIG_AS_AVX
481 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
482                               const u8 *in, unsigned int len, u8 *iv)
483 {
484         /*
485          * based on key length, override with the by8 version
486          * of ctr mode encryption/decryption for improved performance
487          * aes_set_key_common() ensures that key length is one of
488          * {128,192,256}
489          */
490         if (ctx->key_length == AES_KEYSIZE_128)
491                 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
492         else if (ctx->key_length == AES_KEYSIZE_192)
493                 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
494         else
495                 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
496 }
497 #endif
498
499 static int ctr_crypt(struct blkcipher_desc *desc,
500                      struct scatterlist *dst, struct scatterlist *src,
501                      unsigned int nbytes)
502 {
503         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
504         struct blkcipher_walk walk;
505         int err;
506
507         blkcipher_walk_init(&walk, dst, src, nbytes);
508         err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
509         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
510
511         kernel_fpu_begin();
512         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
513                 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
514                                   nbytes & AES_BLOCK_MASK, walk.iv);
515                 nbytes &= AES_BLOCK_SIZE - 1;
516                 err = blkcipher_walk_done(desc, &walk, nbytes);
517         }
518         if (walk.nbytes) {
519                 ctr_crypt_final(ctx, &walk);
520                 err = blkcipher_walk_done(desc, &walk, 0);
521         }
522         kernel_fpu_end();
523
524         return err;
525 }
526 #endif
527
528 static int ablk_ecb_init(struct crypto_tfm *tfm)
529 {
530         return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
531 }
532
533 static int ablk_cbc_init(struct crypto_tfm *tfm)
534 {
535         return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
536 }
537
538 #ifdef CONFIG_X86_64
539 static int ablk_ctr_init(struct crypto_tfm *tfm)
540 {
541         return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
542 }
543
544 #endif
545
546 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
547 static int ablk_pcbc_init(struct crypto_tfm *tfm)
548 {
549         return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
550 }
551 #endif
552
553 static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
554 {
555         aesni_ecb_enc(ctx, blks, blks, nbytes);
556 }
557
558 static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
559 {
560         aesni_ecb_dec(ctx, blks, blks, nbytes);
561 }
562
563 static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
564                             unsigned int keylen)
565 {
566         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
567         int err;
568
569         err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
570                                  keylen - AES_BLOCK_SIZE);
571         if (err)
572                 return err;
573
574         return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
575 }
576
577 static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
578 {
579         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
580
581         lrw_free_table(&ctx->lrw_table);
582 }
583
584 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
585                        struct scatterlist *src, unsigned int nbytes)
586 {
587         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
588         be128 buf[8];
589         struct lrw_crypt_req req = {
590                 .tbuf = buf,
591                 .tbuflen = sizeof(buf),
592
593                 .table_ctx = &ctx->lrw_table,
594                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
595                 .crypt_fn = lrw_xts_encrypt_callback,
596         };
597         int ret;
598
599         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
600
601         kernel_fpu_begin();
602         ret = lrw_crypt(desc, dst, src, nbytes, &req);
603         kernel_fpu_end();
604
605         return ret;
606 }
607
608 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
609                        struct scatterlist *src, unsigned int nbytes)
610 {
611         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
612         be128 buf[8];
613         struct lrw_crypt_req req = {
614                 .tbuf = buf,
615                 .tbuflen = sizeof(buf),
616
617                 .table_ctx = &ctx->lrw_table,
618                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
619                 .crypt_fn = lrw_xts_decrypt_callback,
620         };
621         int ret;
622
623         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
624
625         kernel_fpu_begin();
626         ret = lrw_crypt(desc, dst, src, nbytes, &req);
627         kernel_fpu_end();
628
629         return ret;
630 }
631
632 static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
633                             unsigned int keylen)
634 {
635         struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
636         u32 *flags = &tfm->crt_flags;
637         int err;
638
639         /* key consists of keys of equal size concatenated, therefore
640          * the length must be even
641          */
642         if (keylen % 2) {
643                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
644                 return -EINVAL;
645         }
646
647         /* first half of xts-key is for crypt */
648         err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
649         if (err)
650                 return err;
651
652         /* second half of xts-key is for tweak */
653         return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
654                                   keylen / 2);
655 }
656
657
658 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
659 {
660         aesni_enc(ctx, out, in);
661 }
662
663 #ifdef CONFIG_X86_64
664
665 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
666 {
667         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
668 }
669
670 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
671 {
672         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
673 }
674
675 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
676 {
677         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
678 }
679
680 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
681 {
682         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
683 }
684
685 static const struct common_glue_ctx aesni_enc_xts = {
686         .num_funcs = 2,
687         .fpu_blocks_limit = 1,
688
689         .funcs = { {
690                 .num_blocks = 8,
691                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
692         }, {
693                 .num_blocks = 1,
694                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
695         } }
696 };
697
698 static const struct common_glue_ctx aesni_dec_xts = {
699         .num_funcs = 2,
700         .fpu_blocks_limit = 1,
701
702         .funcs = { {
703                 .num_blocks = 8,
704                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
705         }, {
706                 .num_blocks = 1,
707                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
708         } }
709 };
710
711 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
712                        struct scatterlist *src, unsigned int nbytes)
713 {
714         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
715
716         return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
717                                      XTS_TWEAK_CAST(aesni_xts_tweak),
718                                      aes_ctx(ctx->raw_tweak_ctx),
719                                      aes_ctx(ctx->raw_crypt_ctx));
720 }
721
722 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
723                        struct scatterlist *src, unsigned int nbytes)
724 {
725         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
726
727         return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
728                                      XTS_TWEAK_CAST(aesni_xts_tweak),
729                                      aes_ctx(ctx->raw_tweak_ctx),
730                                      aes_ctx(ctx->raw_crypt_ctx));
731 }
732
733 #else
734
735 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
736                        struct scatterlist *src, unsigned int nbytes)
737 {
738         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
739         be128 buf[8];
740         struct xts_crypt_req req = {
741                 .tbuf = buf,
742                 .tbuflen = sizeof(buf),
743
744                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
745                 .tweak_fn = aesni_xts_tweak,
746                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
747                 .crypt_fn = lrw_xts_encrypt_callback,
748         };
749         int ret;
750
751         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
752
753         kernel_fpu_begin();
754         ret = xts_crypt(desc, dst, src, nbytes, &req);
755         kernel_fpu_end();
756
757         return ret;
758 }
759
760 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
761                        struct scatterlist *src, unsigned int nbytes)
762 {
763         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
764         be128 buf[8];
765         struct xts_crypt_req req = {
766                 .tbuf = buf,
767                 .tbuflen = sizeof(buf),
768
769                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
770                 .tweak_fn = aesni_xts_tweak,
771                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
772                 .crypt_fn = lrw_xts_decrypt_callback,
773         };
774         int ret;
775
776         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
777
778         kernel_fpu_begin();
779         ret = xts_crypt(desc, dst, src, nbytes, &req);
780         kernel_fpu_end();
781
782         return ret;
783 }
784
785 #endif
786
787 #ifdef CONFIG_X86_64
788 static int rfc4106_init(struct crypto_tfm *tfm)
789 {
790         struct cryptd_aead *cryptd_tfm;
791         struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
792                 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
793         struct crypto_aead *cryptd_child;
794         struct aesni_rfc4106_gcm_ctx *child_ctx;
795         cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
796         if (IS_ERR(cryptd_tfm))
797                 return PTR_ERR(cryptd_tfm);
798
799         cryptd_child = cryptd_aead_child(cryptd_tfm);
800         child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
801         memcpy(child_ctx, ctx, sizeof(*ctx));
802         ctx->cryptd_tfm = cryptd_tfm;
803         tfm->crt_aead.reqsize = sizeof(struct aead_request)
804                 + crypto_aead_reqsize(&cryptd_tfm->base);
805         return 0;
806 }
807
808 static void rfc4106_exit(struct crypto_tfm *tfm)
809 {
810         struct aesni_rfc4106_gcm_ctx *ctx =
811                 (struct aesni_rfc4106_gcm_ctx *)
812                 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
813         if (!IS_ERR(ctx->cryptd_tfm))
814                 cryptd_free_aead(ctx->cryptd_tfm);
815         return;
816 }
817
818 static void
819 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
820 {
821         struct aesni_gcm_set_hash_subkey_result *result = req->data;
822
823         if (err == -EINPROGRESS)
824                 return;
825         result->err = err;
826         complete(&result->completion);
827 }
828
829 static int
830 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
831 {
832         struct crypto_ablkcipher *ctr_tfm;
833         struct ablkcipher_request *req;
834         int ret = -EINVAL;
835         struct aesni_hash_subkey_req_data *req_data;
836
837         ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
838         if (IS_ERR(ctr_tfm))
839                 return PTR_ERR(ctr_tfm);
840
841         crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
842
843         ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
844         if (ret)
845                 goto out_free_ablkcipher;
846
847         ret = -ENOMEM;
848         req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
849         if (!req)
850                 goto out_free_ablkcipher;
851
852         req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
853         if (!req_data)
854                 goto out_free_request;
855
856         memset(req_data->iv, 0, sizeof(req_data->iv));
857
858         /* Clear the data in the hash sub key container to zero.*/
859         /* We want to cipher all zeros to create the hash sub key. */
860         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
861
862         init_completion(&req_data->result.completion);
863         sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
864         ablkcipher_request_set_tfm(req, ctr_tfm);
865         ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
866                                         CRYPTO_TFM_REQ_MAY_BACKLOG,
867                                         rfc4106_set_hash_subkey_done,
868                                         &req_data->result);
869
870         ablkcipher_request_set_crypt(req, &req_data->sg,
871                 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
872
873         ret = crypto_ablkcipher_encrypt(req);
874         if (ret == -EINPROGRESS || ret == -EBUSY) {
875                 ret = wait_for_completion_interruptible
876                         (&req_data->result.completion);
877                 if (!ret)
878                         ret = req_data->result.err;
879         }
880         kfree(req_data);
881 out_free_request:
882         ablkcipher_request_free(req);
883 out_free_ablkcipher:
884         crypto_free_ablkcipher(ctr_tfm);
885         return ret;
886 }
887
888 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
889                                                    unsigned int key_len)
890 {
891         int ret = 0;
892         struct crypto_tfm *tfm = crypto_aead_tfm(parent);
893         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
894         struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
895         struct aesni_rfc4106_gcm_ctx *child_ctx =
896                                  aesni_rfc4106_gcm_ctx_get(cryptd_child);
897         u8 *new_key_align, *new_key_mem = NULL;
898
899         if (key_len < 4) {
900                 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
901                 return -EINVAL;
902         }
903         /*Account for 4 byte nonce at the end.*/
904         key_len -= 4;
905         if (key_len != AES_KEYSIZE_128) {
906                 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
907                 return -EINVAL;
908         }
909
910         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
911         /*This must be on a 16 byte boundary!*/
912         if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
913                 return -EINVAL;
914
915         if ((unsigned long)key % AESNI_ALIGN) {
916                 /*key is not aligned: use an auxuliar aligned pointer*/
917                 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
918                 if (!new_key_mem)
919                         return -ENOMEM;
920
921                 new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
922                 memcpy(new_key_align, key, key_len);
923                 key = new_key_align;
924         }
925
926         if (!irq_fpu_usable())
927                 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
928                 key, key_len);
929         else {
930                 kernel_fpu_begin();
931                 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
932                 kernel_fpu_end();
933         }
934         /*This must be on a 16 byte boundary!*/
935         if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
936                 ret = -EINVAL;
937                 goto exit;
938         }
939         ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
940         memcpy(child_ctx, ctx, sizeof(*ctx));
941 exit:
942         kfree(new_key_mem);
943         return ret;
944 }
945
946 /* This is the Integrity Check Value (aka the authentication tag length and can
947  * be 8, 12 or 16 bytes long. */
948 static int rfc4106_set_authsize(struct crypto_aead *parent,
949                                 unsigned int authsize)
950 {
951         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
952         struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
953
954         switch (authsize) {
955         case 8:
956         case 12:
957         case 16:
958                 break;
959         default:
960                 return -EINVAL;
961         }
962         crypto_aead_crt(parent)->authsize = authsize;
963         crypto_aead_crt(cryptd_child)->authsize = authsize;
964         return 0;
965 }
966
967 static int rfc4106_encrypt(struct aead_request *req)
968 {
969         int ret;
970         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
971         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
972
973         if (!irq_fpu_usable()) {
974                 struct aead_request *cryptd_req =
975                         (struct aead_request *) aead_request_ctx(req);
976                 memcpy(cryptd_req, req, sizeof(*req));
977                 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
978                 return crypto_aead_encrypt(cryptd_req);
979         } else {
980                 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
981                 kernel_fpu_begin();
982                 ret = cryptd_child->base.crt_aead.encrypt(req);
983                 kernel_fpu_end();
984                 return ret;
985         }
986 }
987
988 static int rfc4106_decrypt(struct aead_request *req)
989 {
990         int ret;
991         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
992         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
993
994         if (!irq_fpu_usable()) {
995                 struct aead_request *cryptd_req =
996                         (struct aead_request *) aead_request_ctx(req);
997                 memcpy(cryptd_req, req, sizeof(*req));
998                 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
999                 return crypto_aead_decrypt(cryptd_req);
1000         } else {
1001                 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1002                 kernel_fpu_begin();
1003                 ret = cryptd_child->base.crt_aead.decrypt(req);
1004                 kernel_fpu_end();
1005                 return ret;
1006         }
1007 }
1008
1009 static int __driver_rfc4106_encrypt(struct aead_request *req)
1010 {
1011         u8 one_entry_in_sg = 0;
1012         u8 *src, *dst, *assoc;
1013         __be32 counter = cpu_to_be32(1);
1014         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1015         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1016         void *aes_ctx = &(ctx->aes_key_expanded);
1017         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1018         u8 iv_tab[16+AESNI_ALIGN];
1019         u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
1020         struct scatter_walk src_sg_walk;
1021         struct scatter_walk assoc_sg_walk;
1022         struct scatter_walk dst_sg_walk;
1023         unsigned int i;
1024
1025         /* Assuming we are supporting rfc4106 64-bit extended */
1026         /* sequence numbers We need to have the AAD length equal */
1027         /* to 8 or 12 bytes */
1028         if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1029                 return -EINVAL;
1030         /* IV below built */
1031         for (i = 0; i < 4; i++)
1032                 *(iv+i) = ctx->nonce[i];
1033         for (i = 0; i < 8; i++)
1034                 *(iv+4+i) = req->iv[i];
1035         *((__be32 *)(iv+12)) = counter;
1036
1037         if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1038                 one_entry_in_sg = 1;
1039                 scatterwalk_start(&src_sg_walk, req->src);
1040                 scatterwalk_start(&assoc_sg_walk, req->assoc);
1041                 src = scatterwalk_map(&src_sg_walk);
1042                 assoc = scatterwalk_map(&assoc_sg_walk);
1043                 dst = src;
1044                 if (unlikely(req->src != req->dst)) {
1045                         scatterwalk_start(&dst_sg_walk, req->dst);
1046                         dst = scatterwalk_map(&dst_sg_walk);
1047                 }
1048
1049         } else {
1050                 /* Allocate memory for src, dst, assoc */
1051                 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1052                         GFP_ATOMIC);
1053                 if (unlikely(!src))
1054                         return -ENOMEM;
1055                 assoc = (src + req->cryptlen + auth_tag_len);
1056                 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1057                 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1058                                         req->assoclen, 0);
1059                 dst = src;
1060         }
1061
1062         aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
1063                 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1064                 + ((unsigned long)req->cryptlen), auth_tag_len);
1065
1066         /* The authTag (aka the Integrity Check Value) needs to be written
1067          * back to the packet. */
1068         if (one_entry_in_sg) {
1069                 if (unlikely(req->src != req->dst)) {
1070                         scatterwalk_unmap(dst);
1071                         scatterwalk_done(&dst_sg_walk, 0, 0);
1072                 }
1073                 scatterwalk_unmap(src);
1074                 scatterwalk_unmap(assoc);
1075                 scatterwalk_done(&src_sg_walk, 0, 0);
1076                 scatterwalk_done(&assoc_sg_walk, 0, 0);
1077         } else {
1078                 scatterwalk_map_and_copy(dst, req->dst, 0,
1079                         req->cryptlen + auth_tag_len, 1);
1080                 kfree(src);
1081         }
1082         return 0;
1083 }
1084
1085 static int __driver_rfc4106_decrypt(struct aead_request *req)
1086 {
1087         u8 one_entry_in_sg = 0;
1088         u8 *src, *dst, *assoc;
1089         unsigned long tempCipherLen = 0;
1090         __be32 counter = cpu_to_be32(1);
1091         int retval = 0;
1092         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1093         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1094         void *aes_ctx = &(ctx->aes_key_expanded);
1095         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1096         u8 iv_and_authTag[32+AESNI_ALIGN];
1097         u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1098         u8 *authTag = iv + 16;
1099         struct scatter_walk src_sg_walk;
1100         struct scatter_walk assoc_sg_walk;
1101         struct scatter_walk dst_sg_walk;
1102         unsigned int i;
1103
1104         if (unlikely((req->cryptlen < auth_tag_len) ||
1105                 (req->assoclen != 8 && req->assoclen != 12)))
1106                 return -EINVAL;
1107         /* Assuming we are supporting rfc4106 64-bit extended */
1108         /* sequence numbers We need to have the AAD length */
1109         /* equal to 8 or 12 bytes */
1110
1111         tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1112         /* IV below built */
1113         for (i = 0; i < 4; i++)
1114                 *(iv+i) = ctx->nonce[i];
1115         for (i = 0; i < 8; i++)
1116                 *(iv+4+i) = req->iv[i];
1117         *((__be32 *)(iv+12)) = counter;
1118
1119         if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1120                 one_entry_in_sg = 1;
1121                 scatterwalk_start(&src_sg_walk, req->src);
1122                 scatterwalk_start(&assoc_sg_walk, req->assoc);
1123                 src = scatterwalk_map(&src_sg_walk);
1124                 assoc = scatterwalk_map(&assoc_sg_walk);
1125                 dst = src;
1126                 if (unlikely(req->src != req->dst)) {
1127                         scatterwalk_start(&dst_sg_walk, req->dst);
1128                         dst = scatterwalk_map(&dst_sg_walk);
1129                 }
1130
1131         } else {
1132                 /* Allocate memory for src, dst, assoc */
1133                 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1134                 if (!src)
1135                         return -ENOMEM;
1136                 assoc = (src + req->cryptlen + auth_tag_len);
1137                 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1138                 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1139                         req->assoclen, 0);
1140                 dst = src;
1141         }
1142
1143         aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
1144                 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1145                 authTag, auth_tag_len);
1146
1147         /* Compare generated tag with passed in tag. */
1148         retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
1149                 -EBADMSG : 0;
1150
1151         if (one_entry_in_sg) {
1152                 if (unlikely(req->src != req->dst)) {
1153                         scatterwalk_unmap(dst);
1154                         scatterwalk_done(&dst_sg_walk, 0, 0);
1155                 }
1156                 scatterwalk_unmap(src);
1157                 scatterwalk_unmap(assoc);
1158                 scatterwalk_done(&src_sg_walk, 0, 0);
1159                 scatterwalk_done(&assoc_sg_walk, 0, 0);
1160         } else {
1161                 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1162                 kfree(src);
1163         }
1164         return retval;
1165 }
1166 #endif
1167
1168 static struct crypto_alg aesni_algs[] = { {
1169         .cra_name               = "aes",
1170         .cra_driver_name        = "aes-aesni",
1171         .cra_priority           = 300,
1172         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
1173         .cra_blocksize          = AES_BLOCK_SIZE,
1174         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1175                                   AESNI_ALIGN - 1,
1176         .cra_alignmask          = 0,
1177         .cra_module             = THIS_MODULE,
1178         .cra_u  = {
1179                 .cipher = {
1180                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1181                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1182                         .cia_setkey             = aes_set_key,
1183                         .cia_encrypt            = aes_encrypt,
1184                         .cia_decrypt            = aes_decrypt
1185                 }
1186         }
1187 }, {
1188         .cra_name               = "__aes-aesni",
1189         .cra_driver_name        = "__driver-aes-aesni",
1190         .cra_priority           = 0,
1191         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
1192         .cra_blocksize          = AES_BLOCK_SIZE,
1193         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1194                                   AESNI_ALIGN - 1,
1195         .cra_alignmask          = 0,
1196         .cra_module             = THIS_MODULE,
1197         .cra_u  = {
1198                 .cipher = {
1199                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1200                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1201                         .cia_setkey             = aes_set_key,
1202                         .cia_encrypt            = __aes_encrypt,
1203                         .cia_decrypt            = __aes_decrypt
1204                 }
1205         }
1206 }, {
1207         .cra_name               = "__ecb-aes-aesni",
1208         .cra_driver_name        = "__driver-ecb-aes-aesni",
1209         .cra_priority           = 0,
1210         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
1211         .cra_blocksize          = AES_BLOCK_SIZE,
1212         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1213                                   AESNI_ALIGN - 1,
1214         .cra_alignmask          = 0,
1215         .cra_type               = &crypto_blkcipher_type,
1216         .cra_module             = THIS_MODULE,
1217         .cra_u = {
1218                 .blkcipher = {
1219                         .min_keysize    = AES_MIN_KEY_SIZE,
1220                         .max_keysize    = AES_MAX_KEY_SIZE,
1221                         .setkey         = aes_set_key,
1222                         .encrypt        = ecb_encrypt,
1223                         .decrypt        = ecb_decrypt,
1224                 },
1225         },
1226 }, {
1227         .cra_name               = "__cbc-aes-aesni",
1228         .cra_driver_name        = "__driver-cbc-aes-aesni",
1229         .cra_priority           = 0,
1230         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
1231         .cra_blocksize          = AES_BLOCK_SIZE,
1232         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1233                                   AESNI_ALIGN - 1,
1234         .cra_alignmask          = 0,
1235         .cra_type               = &crypto_blkcipher_type,
1236         .cra_module             = THIS_MODULE,
1237         .cra_u = {
1238                 .blkcipher = {
1239                         .min_keysize    = AES_MIN_KEY_SIZE,
1240                         .max_keysize    = AES_MAX_KEY_SIZE,
1241                         .setkey         = aes_set_key,
1242                         .encrypt        = cbc_encrypt,
1243                         .decrypt        = cbc_decrypt,
1244                 },
1245         },
1246 }, {
1247         .cra_name               = "ecb(aes)",
1248         .cra_driver_name        = "ecb-aes-aesni",
1249         .cra_priority           = 400,
1250         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1251         .cra_blocksize          = AES_BLOCK_SIZE,
1252         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1253         .cra_alignmask          = 0,
1254         .cra_type               = &crypto_ablkcipher_type,
1255         .cra_module             = THIS_MODULE,
1256         .cra_init               = ablk_ecb_init,
1257         .cra_exit               = ablk_exit,
1258         .cra_u = {
1259                 .ablkcipher = {
1260                         .min_keysize    = AES_MIN_KEY_SIZE,
1261                         .max_keysize    = AES_MAX_KEY_SIZE,
1262                         .setkey         = ablk_set_key,
1263                         .encrypt        = ablk_encrypt,
1264                         .decrypt        = ablk_decrypt,
1265                 },
1266         },
1267 }, {
1268         .cra_name               = "cbc(aes)",
1269         .cra_driver_name        = "cbc-aes-aesni",
1270         .cra_priority           = 400,
1271         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1272         .cra_blocksize          = AES_BLOCK_SIZE,
1273         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1274         .cra_alignmask          = 0,
1275         .cra_type               = &crypto_ablkcipher_type,
1276         .cra_module             = THIS_MODULE,
1277         .cra_init               = ablk_cbc_init,
1278         .cra_exit               = ablk_exit,
1279         .cra_u = {
1280                 .ablkcipher = {
1281                         .min_keysize    = AES_MIN_KEY_SIZE,
1282                         .max_keysize    = AES_MAX_KEY_SIZE,
1283                         .ivsize         = AES_BLOCK_SIZE,
1284                         .setkey         = ablk_set_key,
1285                         .encrypt        = ablk_encrypt,
1286                         .decrypt        = ablk_decrypt,
1287                 },
1288         },
1289 #ifdef CONFIG_X86_64
1290 }, {
1291         .cra_name               = "__ctr-aes-aesni",
1292         .cra_driver_name        = "__driver-ctr-aes-aesni",
1293         .cra_priority           = 0,
1294         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
1295         .cra_blocksize          = 1,
1296         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1297                                   AESNI_ALIGN - 1,
1298         .cra_alignmask          = 0,
1299         .cra_type               = &crypto_blkcipher_type,
1300         .cra_module             = THIS_MODULE,
1301         .cra_u = {
1302                 .blkcipher = {
1303                         .min_keysize    = AES_MIN_KEY_SIZE,
1304                         .max_keysize    = AES_MAX_KEY_SIZE,
1305                         .ivsize         = AES_BLOCK_SIZE,
1306                         .setkey         = aes_set_key,
1307                         .encrypt        = ctr_crypt,
1308                         .decrypt        = ctr_crypt,
1309                 },
1310         },
1311 }, {
1312         .cra_name               = "ctr(aes)",
1313         .cra_driver_name        = "ctr-aes-aesni",
1314         .cra_priority           = 400,
1315         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1316         .cra_blocksize          = 1,
1317         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1318         .cra_alignmask          = 0,
1319         .cra_type               = &crypto_ablkcipher_type,
1320         .cra_module             = THIS_MODULE,
1321         .cra_init               = ablk_ctr_init,
1322         .cra_exit               = ablk_exit,
1323         .cra_u = {
1324                 .ablkcipher = {
1325                         .min_keysize    = AES_MIN_KEY_SIZE,
1326                         .max_keysize    = AES_MAX_KEY_SIZE,
1327                         .ivsize         = AES_BLOCK_SIZE,
1328                         .setkey         = ablk_set_key,
1329                         .encrypt        = ablk_encrypt,
1330                         .decrypt        = ablk_encrypt,
1331                         .geniv          = "chainiv",
1332                 },
1333         },
1334 }, {
1335         .cra_name               = "__gcm-aes-aesni",
1336         .cra_driver_name        = "__driver-gcm-aes-aesni",
1337         .cra_priority           = 0,
1338         .cra_flags              = CRYPTO_ALG_TYPE_AEAD,
1339         .cra_blocksize          = 1,
1340         .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx) +
1341                                   AESNI_ALIGN,
1342         .cra_alignmask          = 0,
1343         .cra_type               = &crypto_aead_type,
1344         .cra_module             = THIS_MODULE,
1345         .cra_u = {
1346                 .aead = {
1347                         .encrypt        = __driver_rfc4106_encrypt,
1348                         .decrypt        = __driver_rfc4106_decrypt,
1349                 },
1350         },
1351 }, {
1352         .cra_name               = "rfc4106(gcm(aes))",
1353         .cra_driver_name        = "rfc4106-gcm-aesni",
1354         .cra_priority           = 400,
1355         .cra_flags              = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1356         .cra_blocksize          = 1,
1357         .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx) +
1358                                   AESNI_ALIGN,
1359         .cra_alignmask          = 0,
1360         .cra_type               = &crypto_nivaead_type,
1361         .cra_module             = THIS_MODULE,
1362         .cra_init               = rfc4106_init,
1363         .cra_exit               = rfc4106_exit,
1364         .cra_u = {
1365                 .aead = {
1366                         .setkey         = rfc4106_set_key,
1367                         .setauthsize    = rfc4106_set_authsize,
1368                         .encrypt        = rfc4106_encrypt,
1369                         .decrypt        = rfc4106_decrypt,
1370                         .geniv          = "seqiv",
1371                         .ivsize         = 8,
1372                         .maxauthsize    = 16,
1373                 },
1374         },
1375 #endif
1376 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1377 }, {
1378         .cra_name               = "pcbc(aes)",
1379         .cra_driver_name        = "pcbc-aes-aesni",
1380         .cra_priority           = 400,
1381         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1382         .cra_blocksize          = AES_BLOCK_SIZE,
1383         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1384         .cra_alignmask          = 0,
1385         .cra_type               = &crypto_ablkcipher_type,
1386         .cra_module             = THIS_MODULE,
1387         .cra_init               = ablk_pcbc_init,
1388         .cra_exit               = ablk_exit,
1389         .cra_u = {
1390                 .ablkcipher = {
1391                         .min_keysize    = AES_MIN_KEY_SIZE,
1392                         .max_keysize    = AES_MAX_KEY_SIZE,
1393                         .ivsize         = AES_BLOCK_SIZE,
1394                         .setkey         = ablk_set_key,
1395                         .encrypt        = ablk_encrypt,
1396                         .decrypt        = ablk_decrypt,
1397                 },
1398         },
1399 #endif
1400 }, {
1401         .cra_name               = "__lrw-aes-aesni",
1402         .cra_driver_name        = "__driver-lrw-aes-aesni",
1403         .cra_priority           = 0,
1404         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
1405         .cra_blocksize          = AES_BLOCK_SIZE,
1406         .cra_ctxsize            = sizeof(struct aesni_lrw_ctx),
1407         .cra_alignmask          = 0,
1408         .cra_type               = &crypto_blkcipher_type,
1409         .cra_module             = THIS_MODULE,
1410         .cra_exit               = lrw_aesni_exit_tfm,
1411         .cra_u = {
1412                 .blkcipher = {
1413                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1414                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1415                         .ivsize         = AES_BLOCK_SIZE,
1416                         .setkey         = lrw_aesni_setkey,
1417                         .encrypt        = lrw_encrypt,
1418                         .decrypt        = lrw_decrypt,
1419                 },
1420         },
1421 }, {
1422         .cra_name               = "__xts-aes-aesni",
1423         .cra_driver_name        = "__driver-xts-aes-aesni",
1424         .cra_priority           = 0,
1425         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
1426         .cra_blocksize          = AES_BLOCK_SIZE,
1427         .cra_ctxsize            = sizeof(struct aesni_xts_ctx),
1428         .cra_alignmask          = 0,
1429         .cra_type               = &crypto_blkcipher_type,
1430         .cra_module             = THIS_MODULE,
1431         .cra_u = {
1432                 .blkcipher = {
1433                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1434                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1435                         .ivsize         = AES_BLOCK_SIZE,
1436                         .setkey         = xts_aesni_setkey,
1437                         .encrypt        = xts_encrypt,
1438                         .decrypt        = xts_decrypt,
1439                 },
1440         },
1441 }, {
1442         .cra_name               = "lrw(aes)",
1443         .cra_driver_name        = "lrw-aes-aesni",
1444         .cra_priority           = 400,
1445         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1446         .cra_blocksize          = AES_BLOCK_SIZE,
1447         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1448         .cra_alignmask          = 0,
1449         .cra_type               = &crypto_ablkcipher_type,
1450         .cra_module             = THIS_MODULE,
1451         .cra_init               = ablk_init,
1452         .cra_exit               = ablk_exit,
1453         .cra_u = {
1454                 .ablkcipher = {
1455                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1456                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1457                         .ivsize         = AES_BLOCK_SIZE,
1458                         .setkey         = ablk_set_key,
1459                         .encrypt        = ablk_encrypt,
1460                         .decrypt        = ablk_decrypt,
1461                 },
1462         },
1463 }, {
1464         .cra_name               = "xts(aes)",
1465         .cra_driver_name        = "xts-aes-aesni",
1466         .cra_priority           = 400,
1467         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1468         .cra_blocksize          = AES_BLOCK_SIZE,
1469         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1470         .cra_alignmask          = 0,
1471         .cra_type               = &crypto_ablkcipher_type,
1472         .cra_module             = THIS_MODULE,
1473         .cra_init               = ablk_init,
1474         .cra_exit               = ablk_exit,
1475         .cra_u = {
1476                 .ablkcipher = {
1477                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1478                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1479                         .ivsize         = AES_BLOCK_SIZE,
1480                         .setkey         = ablk_set_key,
1481                         .encrypt        = ablk_encrypt,
1482                         .decrypt        = ablk_decrypt,
1483                 },
1484         },
1485 } };
1486
1487
1488 static const struct x86_cpu_id aesni_cpu_id[] = {
1489         X86_FEATURE_MATCH(X86_FEATURE_AES),
1490         {}
1491 };
1492 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1493
1494 static int __init aesni_init(void)
1495 {
1496         int err;
1497
1498         if (!x86_match_cpu(aesni_cpu_id))
1499                 return -ENODEV;
1500 #ifdef CONFIG_X86_64
1501 #ifdef CONFIG_AS_AVX2
1502         if (boot_cpu_has(X86_FEATURE_AVX2)) {
1503                 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1504                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1505                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1506         } else
1507 #endif
1508 #ifdef CONFIG_AS_AVX
1509         if (boot_cpu_has(X86_FEATURE_AVX)) {
1510                 pr_info("AVX version of gcm_enc/dec engaged.\n");
1511                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1512                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1513         } else
1514 #endif
1515         {
1516                 pr_info("SSE version of gcm_enc/dec engaged.\n");
1517                 aesni_gcm_enc_tfm = aesni_gcm_enc;
1518                 aesni_gcm_dec_tfm = aesni_gcm_dec;
1519         }
1520         aesni_ctr_enc_tfm = aesni_ctr_enc;
1521 #ifdef CONFIG_AS_AVX
1522         if (cpu_has_avx) {
1523                 /* optimize performance of ctr mode encryption transform */
1524                 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1525                 pr_info("AES CTR mode by8 optimization enabled\n");
1526         }
1527 #endif
1528 #endif
1529
1530         err = crypto_fpu_init();
1531         if (err)
1532                 return err;
1533
1534         return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1535 }
1536
1537 static void __exit aesni_exit(void)
1538 {
1539         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1540
1541         crypto_fpu_exit();
1542 }
1543
1544 module_init(aesni_init);
1545 module_exit(aesni_exit);
1546
1547 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1548 MODULE_LICENSE("GPL");
1549 MODULE_ALIAS_CRYPTO("aes");