Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[cascardo/linux.git] / drivers / crypto / qat / qat_common / qat_algs.c
1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15
16   Contact Information:
17   qat-linux@intel.com
18
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <crypto/rng.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
65
66 #define QAT_AES_HW_CONFIG_ENC(alg) \
67         ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68                         ICP_QAT_HW_CIPHER_NO_CONVERT, \
69                         ICP_QAT_HW_CIPHER_ENCRYPT)
70
71 #define QAT_AES_HW_CONFIG_DEC(alg) \
72         ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73                         ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74                         ICP_QAT_HW_CIPHER_DECRYPT)
75
76 static atomic_t active_dev;
77
78 struct qat_alg_buf {
79         uint32_t len;
80         uint32_t resrvd;
81         uint64_t addr;
82 } __packed;
83
84 struct qat_alg_buf_list {
85         uint64_t resrvd;
86         uint32_t num_bufs;
87         uint32_t num_mapped_bufs;
88         struct qat_alg_buf bufers[];
89 } __packed __aligned(64);
90
91 /* Common content descriptor */
92 struct qat_alg_cd {
93         union {
94                 struct qat_enc { /* Encrypt content desc */
95                         struct icp_qat_hw_cipher_algo_blk cipher;
96                         struct icp_qat_hw_auth_algo_blk hash;
97                 } qat_enc_cd;
98                 struct qat_dec { /* Decrytp content desc */
99                         struct icp_qat_hw_auth_algo_blk hash;
100                         struct icp_qat_hw_cipher_algo_blk cipher;
101                 } qat_dec_cd;
102         };
103 } __aligned(64);
104
105 #define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
106
107 struct qat_auth_state {
108         uint8_t data[MAX_AUTH_STATE_SIZE + 64];
109 } __aligned(64);
110
111 struct qat_alg_session_ctx {
112         struct qat_alg_cd *enc_cd;
113         dma_addr_t enc_cd_paddr;
114         struct qat_alg_cd *dec_cd;
115         dma_addr_t dec_cd_paddr;
116         struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
117         struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
118         struct qat_crypto_instance *inst;
119         struct crypto_tfm *tfm;
120         struct crypto_shash *hash_tfm;
121         enum icp_qat_hw_auth_algo qat_hash_alg;
122         uint8_t salt[AES_BLOCK_SIZE];
123         spinlock_t lock;        /* protects qat_alg_session_ctx struct */
124 };
125
126 static int get_current_node(void)
127 {
128         return cpu_data(current_thread_info()->cpu).phys_proc_id;
129 }
130
131 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
132 {
133         switch (qat_hash_alg) {
134         case ICP_QAT_HW_AUTH_ALGO_SHA1:
135                 return ICP_QAT_HW_SHA1_STATE1_SZ;
136         case ICP_QAT_HW_AUTH_ALGO_SHA256:
137                 return ICP_QAT_HW_SHA256_STATE1_SZ;
138         case ICP_QAT_HW_AUTH_ALGO_SHA512:
139                 return ICP_QAT_HW_SHA512_STATE1_SZ;
140         default:
141                 return -EFAULT;
142         };
143         return -EFAULT;
144 }
145
146 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
147                                   struct qat_alg_session_ctx *ctx,
148                                   const uint8_t *auth_key,
149                                   unsigned int auth_keylen)
150 {
151         struct qat_auth_state auth_state;
152         SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
153         struct sha1_state sha1;
154         struct sha256_state sha256;
155         struct sha512_state sha512;
156         int block_size = crypto_shash_blocksize(ctx->hash_tfm);
157         int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
158         uint8_t *ipad = auth_state.data;
159         uint8_t *opad = ipad + block_size;
160         __be32 *hash_state_out;
161         __be64 *hash512_state_out;
162         int i, offset;
163
164         memzero_explicit(auth_state.data, MAX_AUTH_STATE_SIZE + 64);
165         shash->tfm = ctx->hash_tfm;
166         shash->flags = 0x0;
167
168         if (auth_keylen > block_size) {
169                 char buff[SHA512_BLOCK_SIZE];
170                 int ret = crypto_shash_digest(shash, auth_key,
171                                               auth_keylen, buff);
172                 if (ret)
173                         return ret;
174
175                 memcpy(ipad, buff, digest_size);
176                 memcpy(opad, buff, digest_size);
177                 memzero_explicit(ipad + digest_size, block_size - digest_size);
178                 memzero_explicit(opad + digest_size, block_size - digest_size);
179         } else {
180                 memcpy(ipad, auth_key, auth_keylen);
181                 memcpy(opad, auth_key, auth_keylen);
182                 memzero_explicit(ipad + auth_keylen, block_size - auth_keylen);
183                 memzero_explicit(opad + auth_keylen, block_size - auth_keylen);
184         }
185
186         for (i = 0; i < block_size; i++) {
187                 char *ipad_ptr = ipad + i;
188                 char *opad_ptr = opad + i;
189                 *ipad_ptr ^= 0x36;
190                 *opad_ptr ^= 0x5C;
191         }
192
193         if (crypto_shash_init(shash))
194                 return -EFAULT;
195
196         if (crypto_shash_update(shash, ipad, block_size))
197                 return -EFAULT;
198
199         hash_state_out = (__be32 *)hash->sha.state1;
200         hash512_state_out = (__be64 *)hash_state_out;
201
202         switch (ctx->qat_hash_alg) {
203         case ICP_QAT_HW_AUTH_ALGO_SHA1:
204                 if (crypto_shash_export(shash, &sha1))
205                         return -EFAULT;
206                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
207                         *hash_state_out = cpu_to_be32(*(sha1.state + i));
208                 break;
209         case ICP_QAT_HW_AUTH_ALGO_SHA256:
210                 if (crypto_shash_export(shash, &sha256))
211                         return -EFAULT;
212                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
213                         *hash_state_out = cpu_to_be32(*(sha256.state + i));
214                 break;
215         case ICP_QAT_HW_AUTH_ALGO_SHA512:
216                 if (crypto_shash_export(shash, &sha512))
217                         return -EFAULT;
218                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
219                         *hash512_state_out = cpu_to_be64(*(sha512.state + i));
220                 break;
221         default:
222                 return -EFAULT;
223         }
224
225         if (crypto_shash_init(shash))
226                 return -EFAULT;
227
228         if (crypto_shash_update(shash, opad, block_size))
229                 return -EFAULT;
230
231         offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
232         hash_state_out = (__be32 *)(hash->sha.state1 + offset);
233         hash512_state_out = (__be64 *)hash_state_out;
234
235         switch (ctx->qat_hash_alg) {
236         case ICP_QAT_HW_AUTH_ALGO_SHA1:
237                 if (crypto_shash_export(shash, &sha1))
238                         return -EFAULT;
239                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
240                         *hash_state_out = cpu_to_be32(*(sha1.state + i));
241                 break;
242         case ICP_QAT_HW_AUTH_ALGO_SHA256:
243                 if (crypto_shash_export(shash, &sha256))
244                         return -EFAULT;
245                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
246                         *hash_state_out = cpu_to_be32(*(sha256.state + i));
247                 break;
248         case ICP_QAT_HW_AUTH_ALGO_SHA512:
249                 if (crypto_shash_export(shash, &sha512))
250                         return -EFAULT;
251                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
252                         *hash512_state_out = cpu_to_be64(*(sha512.state + i));
253                 break;
254         default:
255                 return -EFAULT;
256         }
257         memzero_explicit(ipad, block_size);
258         memzero_explicit(opad, block_size);
259         return 0;
260 }
261
262 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
263 {
264         header->hdr_flags =
265                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
266         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
267         header->comn_req_flags =
268                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
269                                             QAT_COMN_PTR_TYPE_SGL);
270         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
271                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
272         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
273                                   ICP_QAT_FW_LA_PARTIAL_NONE);
274         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
275                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
276         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
277                                 ICP_QAT_FW_LA_NO_PROTO);
278         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
279                                        ICP_QAT_FW_LA_NO_UPDATE_STATE);
280 }
281
282 static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
283                                     int alg, struct crypto_authenc_keys *keys)
284 {
285         struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
286         unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
287         struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
288         struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
289         struct icp_qat_hw_auth_algo_blk *hash =
290                 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
291                 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
292         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl;
293         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
294         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
295         void *ptr = &req_tmpl->cd_ctrl;
296         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
297         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
298
299         /* CD setup */
300         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
301         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
302         hash->sha.inner_setup.auth_config.config =
303                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
304                                              ctx->qat_hash_alg, digestsize);
305         hash->sha.inner_setup.auth_counter.counter =
306                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
307
308         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
309                 return -EFAULT;
310
311         /* Request setup */
312         qat_alg_init_common_hdr(header);
313         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
314         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
315                                    ICP_QAT_FW_LA_RET_AUTH_RES);
316         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
317                                    ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
318         cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
319         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
320
321         /* Cipher CD config setup */
322         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
323         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
324         cipher_cd_ctrl->cipher_cfg_offset = 0;
325         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
326         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
327         /* Auth CD config setup */
328         hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
329         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
330         hash_cd_ctrl->inner_res_sz = digestsize;
331         hash_cd_ctrl->final_sz = digestsize;
332
333         switch (ctx->qat_hash_alg) {
334         case ICP_QAT_HW_AUTH_ALGO_SHA1:
335                 hash_cd_ctrl->inner_state1_sz =
336                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
337                 hash_cd_ctrl->inner_state2_sz =
338                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
339                 break;
340         case ICP_QAT_HW_AUTH_ALGO_SHA256:
341                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
342                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
343                 break;
344         case ICP_QAT_HW_AUTH_ALGO_SHA512:
345                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
346                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
347                 break;
348         default:
349                 break;
350         }
351         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
352                         ((sizeof(struct icp_qat_hw_auth_setup) +
353                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
354         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
355         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
356         return 0;
357 }
358
359 static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
360                                     int alg, struct crypto_authenc_keys *keys)
361 {
362         struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
363         unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
364         struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
365         struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
366         struct icp_qat_hw_cipher_algo_blk *cipher =
367                 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
368                 sizeof(struct icp_qat_hw_auth_setup) +
369                 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
370         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl;
371         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
372         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
373         void *ptr = &req_tmpl->cd_ctrl;
374         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
375         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
376         struct icp_qat_fw_la_auth_req_params *auth_param =
377                 (struct icp_qat_fw_la_auth_req_params *)
378                 ((char *)&req_tmpl->serv_specif_rqpars +
379                 sizeof(struct icp_qat_fw_la_cipher_req_params));
380
381         /* CD setup */
382         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg);
383         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
384         hash->sha.inner_setup.auth_config.config =
385                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
386                                              ctx->qat_hash_alg,
387                                              digestsize);
388         hash->sha.inner_setup.auth_counter.counter =
389                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
390
391         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
392                 return -EFAULT;
393
394         /* Request setup */
395         qat_alg_init_common_hdr(header);
396         header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
397         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
398                                    ICP_QAT_FW_LA_NO_RET_AUTH_RES);
399         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
400                                    ICP_QAT_FW_LA_CMP_AUTH_RES);
401         cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
402         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
403
404         /* Cipher CD config setup */
405         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
406         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
407         cipher_cd_ctrl->cipher_cfg_offset =
408                 (sizeof(struct icp_qat_hw_auth_setup) +
409                  roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
410         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
411         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
412
413         /* Auth CD config setup */
414         hash_cd_ctrl->hash_cfg_offset = 0;
415         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
416         hash_cd_ctrl->inner_res_sz = digestsize;
417         hash_cd_ctrl->final_sz = digestsize;
418
419         switch (ctx->qat_hash_alg) {
420         case ICP_QAT_HW_AUTH_ALGO_SHA1:
421                 hash_cd_ctrl->inner_state1_sz =
422                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
423                 hash_cd_ctrl->inner_state2_sz =
424                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
425                 break;
426         case ICP_QAT_HW_AUTH_ALGO_SHA256:
427                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
428                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
429                 break;
430         case ICP_QAT_HW_AUTH_ALGO_SHA512:
431                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
432                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
433                 break;
434         default:
435                 break;
436         }
437
438         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
439                         ((sizeof(struct icp_qat_hw_auth_setup) +
440                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
441         auth_param->auth_res_sz = digestsize;
442         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
443         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
444         return 0;
445 }
446
447 static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
448                                  const uint8_t *key, unsigned int keylen)
449 {
450         struct crypto_authenc_keys keys;
451         int alg;
452
453         if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
454                 return -EFAULT;
455
456         if (crypto_authenc_extractkeys(&keys, key, keylen))
457                 goto bad_key;
458
459         switch (keys.enckeylen) {
460         case AES_KEYSIZE_128:
461                 alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
462                 break;
463         case AES_KEYSIZE_192:
464                 alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
465                 break;
466         case AES_KEYSIZE_256:
467                 alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
468                 break;
469         default:
470                 goto bad_key;
471         }
472
473         if (qat_alg_init_enc_session(ctx, alg, &keys))
474                 goto error;
475
476         if (qat_alg_init_dec_session(ctx, alg, &keys))
477                 goto error;
478
479         return 0;
480 bad_key:
481         crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
482         return -EINVAL;
483 error:
484         return -EFAULT;
485 }
486
487 static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
488                           unsigned int keylen)
489 {
490         struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
491         struct device *dev;
492
493         spin_lock(&ctx->lock);
494         if (ctx->enc_cd) {
495                 /* rekeying */
496                 dev = &GET_DEV(ctx->inst->accel_dev);
497                 memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
498                 memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
499                 memzero_explicit(&ctx->enc_fw_req_tmpl,
500                                  sizeof(struct icp_qat_fw_la_bulk_req));
501                 memzero_explicit(&ctx->dec_fw_req_tmpl,
502                                  sizeof(struct icp_qat_fw_la_bulk_req));
503         } else {
504                 /* new key */
505                 int node = get_current_node();
506                 struct qat_crypto_instance *inst =
507                                 qat_crypto_get_instance_node(node);
508                 if (!inst) {
509                         spin_unlock(&ctx->lock);
510                         return -EINVAL;
511                 }
512
513                 dev = &GET_DEV(inst->accel_dev);
514                 ctx->inst = inst;
515                 ctx->enc_cd = dma_zalloc_coherent(dev,
516                                                   sizeof(struct qat_alg_cd),
517                                                   &ctx->enc_cd_paddr,
518                                                   GFP_ATOMIC);
519                 if (!ctx->enc_cd) {
520                         spin_unlock(&ctx->lock);
521                         return -ENOMEM;
522                 }
523                 ctx->dec_cd = dma_zalloc_coherent(dev,
524                                                   sizeof(struct qat_alg_cd),
525                                                   &ctx->dec_cd_paddr,
526                                                   GFP_ATOMIC);
527                 if (!ctx->dec_cd) {
528                         spin_unlock(&ctx->lock);
529                         goto out_free_enc;
530                 }
531         }
532         spin_unlock(&ctx->lock);
533         if (qat_alg_init_sessions(ctx, key, keylen))
534                 goto out_free_all;
535
536         return 0;
537
538 out_free_all:
539         memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
540         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
541                           ctx->dec_cd, ctx->dec_cd_paddr);
542         ctx->dec_cd = NULL;
543 out_free_enc:
544         memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
545         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
546                           ctx->enc_cd, ctx->enc_cd_paddr);
547         ctx->enc_cd = NULL;
548         return -ENOMEM;
549 }
550
551 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
552                               struct qat_crypto_request *qat_req)
553 {
554         struct device *dev = &GET_DEV(inst->accel_dev);
555         struct qat_alg_buf_list *bl = qat_req->buf.bl;
556         struct qat_alg_buf_list *blout = qat_req->buf.blout;
557         dma_addr_t blp = qat_req->buf.blp;
558         dma_addr_t blpout = qat_req->buf.bloutp;
559         size_t sz = qat_req->buf.sz;
560         int i, bufs = bl->num_bufs;
561
562         for (i = 0; i < bl->num_bufs; i++)
563                 dma_unmap_single(dev, bl->bufers[i].addr,
564                                  bl->bufers[i].len, DMA_BIDIRECTIONAL);
565
566         dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
567         kfree(bl);
568         if (blp != blpout) {
569                 /* If out of place operation dma unmap only data */
570                 int bufless = bufs - blout->num_mapped_bufs;
571
572                 for (i = bufless; i < bufs; i++) {
573                         dma_unmap_single(dev, blout->bufers[i].addr,
574                                          blout->bufers[i].len,
575                                          DMA_BIDIRECTIONAL);
576                 }
577                 dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE);
578                 kfree(blout);
579         }
580 }
581
582 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
583                                struct scatterlist *assoc,
584                                struct scatterlist *sgl,
585                                struct scatterlist *sglout, uint8_t *iv,
586                                uint8_t ivlen,
587                                struct qat_crypto_request *qat_req)
588 {
589         struct device *dev = &GET_DEV(inst->accel_dev);
590         int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
591         struct qat_alg_buf_list *bufl;
592         struct qat_alg_buf_list *buflout = NULL;
593         dma_addr_t blp;
594         dma_addr_t bloutp = 0;
595         struct scatterlist *sg;
596         size_t sz = sizeof(struct qat_alg_buf_list) +
597                         ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
598
599         if (unlikely(!n))
600                 return -EINVAL;
601
602         bufl = kmalloc_node(sz, GFP_ATOMIC,
603                             dev_to_node(&GET_DEV(inst->accel_dev)));
604         if (unlikely(!bufl))
605                 return -ENOMEM;
606
607         blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
608         if (unlikely(dma_mapping_error(dev, blp)))
609                 goto err;
610
611         for_each_sg(assoc, sg, assoc_n, i) {
612                 if (!sg->length)
613                         continue;
614                 bufl->bufers[bufs].addr = dma_map_single(dev,
615                                                          sg_virt(sg),
616                                                          sg->length,
617                                                          DMA_BIDIRECTIONAL);
618                 bufl->bufers[bufs].len = sg->length;
619                 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
620                         goto err;
621                 bufs++;
622         }
623         bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
624                                                  DMA_BIDIRECTIONAL);
625         bufl->bufers[bufs].len = ivlen;
626         if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
627                 goto err;
628         bufs++;
629
630         for_each_sg(sgl, sg, n, i) {
631                 int y = i + bufs;
632
633                 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
634                                                       sg->length,
635                                                       DMA_BIDIRECTIONAL);
636                 bufl->bufers[y].len = sg->length;
637                 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
638                         goto err;
639         }
640         bufl->num_bufs = n + bufs;
641         qat_req->buf.bl = bufl;
642         qat_req->buf.blp = blp;
643         qat_req->buf.sz = sz;
644         /* Handle out of place operation */
645         if (sgl != sglout) {
646                 struct qat_alg_buf *bufers;
647
648                 buflout = kmalloc_node(sz, GFP_ATOMIC,
649                                        dev_to_node(&GET_DEV(inst->accel_dev)));
650                 if (unlikely(!buflout))
651                         goto err;
652                 bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
653                 if (unlikely(dma_mapping_error(dev, bloutp)))
654                         goto err;
655                 bufers = buflout->bufers;
656                 /* For out of place operation dma map only data and
657                  * reuse assoc mapping and iv */
658                 for (i = 0; i < bufs; i++) {
659                         bufers[i].len = bufl->bufers[i].len;
660                         bufers[i].addr = bufl->bufers[i].addr;
661                 }
662                 for_each_sg(sglout, sg, n, i) {
663                         int y = i + bufs;
664
665                         bufers[y].addr = dma_map_single(dev, sg_virt(sg),
666                                                         sg->length,
667                                                         DMA_BIDIRECTIONAL);
668                         buflout->bufers[y].len = sg->length;
669                         if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
670                                 goto err;
671                 }
672                 buflout->num_bufs = n + bufs;
673                 buflout->num_mapped_bufs = n;
674                 qat_req->buf.blout = buflout;
675                 qat_req->buf.bloutp = bloutp;
676         } else {
677                 /* Otherwise set the src and dst to the same address */
678                 qat_req->buf.bloutp = qat_req->buf.blp;
679         }
680         return 0;
681 err:
682         dev_err(dev, "Failed to map buf for dma\n");
683         for_each_sg(sgl, sg, n + bufs, i) {
684                 if (!dma_mapping_error(dev, bufl->bufers[i].addr)) {
685                         dma_unmap_single(dev, bufl->bufers[i].addr,
686                                          bufl->bufers[i].len,
687                                          DMA_BIDIRECTIONAL);
688                 }
689         }
690         if (!dma_mapping_error(dev, blp))
691                 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
692         kfree(bufl);
693         if (sgl != sglout && buflout) {
694                 for_each_sg(sglout, sg, n, i) {
695                         int y = i + bufs;
696
697                         if (!dma_mapping_error(dev, buflout->bufers[y].addr))
698                                 dma_unmap_single(dev, buflout->bufers[y].addr,
699                                                  buflout->bufers[y].len,
700                                                  DMA_BIDIRECTIONAL);
701                 }
702                 if (!dma_mapping_error(dev, bloutp))
703                         dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE);
704                 kfree(buflout);
705         }
706         return -ENOMEM;
707 }
708
709 void qat_alg_callback(void *resp)
710 {
711         struct icp_qat_fw_la_resp *qat_resp = resp;
712         struct qat_crypto_request *qat_req =
713                                 (void *)(__force long)qat_resp->opaque_data;
714         struct qat_alg_session_ctx *ctx = qat_req->ctx;
715         struct qat_crypto_instance *inst = ctx->inst;
716         struct aead_request *areq = qat_req->areq;
717         uint8_t stat_filed = qat_resp->comn_resp.comn_status;
718         int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
719
720         qat_alg_free_bufl(inst, qat_req);
721         if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
722                 res = -EBADMSG;
723         areq->base.complete(&areq->base, res);
724 }
725
726 static int qat_alg_dec(struct aead_request *areq)
727 {
728         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
729         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
730         struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
731         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
732         struct icp_qat_fw_la_cipher_req_params *cipher_param;
733         struct icp_qat_fw_la_auth_req_params *auth_param;
734         struct icp_qat_fw_la_bulk_req *msg;
735         int digst_size = crypto_aead_crt(aead_tfm)->authsize;
736         int ret, ctr = 0;
737
738         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
739                                   areq->iv, AES_BLOCK_SIZE, qat_req);
740         if (unlikely(ret))
741                 return ret;
742
743         msg = &qat_req->req;
744         *msg = ctx->dec_fw_req_tmpl;
745         qat_req->ctx = ctx;
746         qat_req->areq = areq;
747         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
748         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
749         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
750         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
751         cipher_param->cipher_length = areq->cryptlen - digst_size;
752         cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
753         memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
754         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
755         auth_param->auth_off = 0;
756         auth_param->auth_len = areq->assoclen +
757                                 cipher_param->cipher_length + AES_BLOCK_SIZE;
758         do {
759                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
760         } while (ret == -EAGAIN && ctr++ < 10);
761
762         if (ret == -EAGAIN) {
763                 qat_alg_free_bufl(ctx->inst, qat_req);
764                 return -EBUSY;
765         }
766         return -EINPROGRESS;
767 }
768
769 static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
770                                 int enc_iv)
771 {
772         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
773         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
774         struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
775         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
776         struct icp_qat_fw_la_cipher_req_params *cipher_param;
777         struct icp_qat_fw_la_auth_req_params *auth_param;
778         struct icp_qat_fw_la_bulk_req *msg;
779         int ret, ctr = 0;
780
781         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
782                                   iv, AES_BLOCK_SIZE, qat_req);
783         if (unlikely(ret))
784                 return ret;
785
786         msg = &qat_req->req;
787         *msg = ctx->enc_fw_req_tmpl;
788         qat_req->ctx = ctx;
789         qat_req->areq = areq;
790         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
791         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
792         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
793         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
794         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
795
796         if (enc_iv) {
797                 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
798                 cipher_param->cipher_offset = areq->assoclen;
799         } else {
800                 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
801                 cipher_param->cipher_length = areq->cryptlen;
802                 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
803         }
804         auth_param->auth_off = 0;
805         auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
806
807         do {
808                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
809         } while (ret == -EAGAIN && ctr++ < 10);
810
811         if (ret == -EAGAIN) {
812                 qat_alg_free_bufl(ctx->inst, qat_req);
813                 return -EBUSY;
814         }
815         return -EINPROGRESS;
816 }
817
818 static int qat_alg_enc(struct aead_request *areq)
819 {
820         return qat_alg_enc_internal(areq, areq->iv, 0);
821 }
822
823 static int qat_alg_genivenc(struct aead_givcrypt_request *req)
824 {
825         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
826         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
827         struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
828         __be64 seq;
829
830         memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
831         seq = cpu_to_be64(req->seq);
832         memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
833                &seq, sizeof(uint64_t));
834         return qat_alg_enc_internal(&req->areq, req->giv, 1);
835 }
836
837 static int qat_alg_init(struct crypto_tfm *tfm,
838                         enum icp_qat_hw_auth_algo hash, const char *hash_name)
839 {
840         struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
841
842         memzero_explicit(ctx, sizeof(*ctx));
843         ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
844         if (IS_ERR(ctx->hash_tfm))
845                 return -EFAULT;
846         spin_lock_init(&ctx->lock);
847         ctx->qat_hash_alg = hash;
848         tfm->crt_aead.reqsize = sizeof(struct aead_request) +
849                                 sizeof(struct qat_crypto_request);
850         ctx->tfm = tfm;
851         return 0;
852 }
853
854 static int qat_alg_sha1_init(struct crypto_tfm *tfm)
855 {
856         return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
857 }
858
859 static int qat_alg_sha256_init(struct crypto_tfm *tfm)
860 {
861         return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
862 }
863
864 static int qat_alg_sha512_init(struct crypto_tfm *tfm)
865 {
866         return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
867 }
868
869 static void qat_alg_exit(struct crypto_tfm *tfm)
870 {
871         struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
872         struct qat_crypto_instance *inst = ctx->inst;
873         struct device *dev;
874
875         if (!IS_ERR(ctx->hash_tfm))
876                 crypto_free_shash(ctx->hash_tfm);
877
878         if (!inst)
879                 return;
880
881         dev = &GET_DEV(inst->accel_dev);
882         if (ctx->enc_cd) {
883                 memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
884                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
885                                   ctx->enc_cd, ctx->enc_cd_paddr);
886         }
887         if (ctx->dec_cd) {
888                 memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
889                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
890                                   ctx->dec_cd, ctx->dec_cd_paddr);
891         }
892         qat_crypto_put_instance(inst);
893 }
894
895 static struct crypto_alg qat_algs[] = { {
896         .cra_name = "authenc(hmac(sha1),cbc(aes))",
897         .cra_driver_name = "qat_aes_cbc_hmac_sha1",
898         .cra_priority = 4001,
899         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
900         .cra_blocksize = AES_BLOCK_SIZE,
901         .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
902         .cra_alignmask = 0,
903         .cra_type = &crypto_aead_type,
904         .cra_module = THIS_MODULE,
905         .cra_init = qat_alg_sha1_init,
906         .cra_exit = qat_alg_exit,
907         .cra_u = {
908                 .aead = {
909                         .setkey = qat_alg_setkey,
910                         .decrypt = qat_alg_dec,
911                         .encrypt = qat_alg_enc,
912                         .givencrypt = qat_alg_genivenc,
913                         .ivsize = AES_BLOCK_SIZE,
914                         .maxauthsize = SHA1_DIGEST_SIZE,
915                 },
916         },
917 }, {
918         .cra_name = "authenc(hmac(sha256),cbc(aes))",
919         .cra_driver_name = "qat_aes_cbc_hmac_sha256",
920         .cra_priority = 4001,
921         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
922         .cra_blocksize = AES_BLOCK_SIZE,
923         .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
924         .cra_alignmask = 0,
925         .cra_type = &crypto_aead_type,
926         .cra_module = THIS_MODULE,
927         .cra_init = qat_alg_sha256_init,
928         .cra_exit = qat_alg_exit,
929         .cra_u = {
930                 .aead = {
931                         .setkey = qat_alg_setkey,
932                         .decrypt = qat_alg_dec,
933                         .encrypt = qat_alg_enc,
934                         .givencrypt = qat_alg_genivenc,
935                         .ivsize = AES_BLOCK_SIZE,
936                         .maxauthsize = SHA256_DIGEST_SIZE,
937                 },
938         },
939 }, {
940         .cra_name = "authenc(hmac(sha512),cbc(aes))",
941         .cra_driver_name = "qat_aes_cbc_hmac_sha512",
942         .cra_priority = 4001,
943         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
944         .cra_blocksize = AES_BLOCK_SIZE,
945         .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
946         .cra_alignmask = 0,
947         .cra_type = &crypto_aead_type,
948         .cra_module = THIS_MODULE,
949         .cra_init = qat_alg_sha512_init,
950         .cra_exit = qat_alg_exit,
951         .cra_u = {
952                 .aead = {
953                         .setkey = qat_alg_setkey,
954                         .decrypt = qat_alg_dec,
955                         .encrypt = qat_alg_enc,
956                         .givencrypt = qat_alg_genivenc,
957                         .ivsize = AES_BLOCK_SIZE,
958                         .maxauthsize = SHA512_DIGEST_SIZE,
959                 },
960         },
961 } };
962
963 int qat_algs_register(void)
964 {
965         if (atomic_add_return(1, &active_dev) == 1) {
966                 int i;
967
968                 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
969                         qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD |
970                                                 CRYPTO_ALG_ASYNC;
971                 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
972         }
973         return 0;
974 }
975
976 int qat_algs_unregister(void)
977 {
978         if (atomic_sub_return(1, &active_dev) == 0)
979                 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
980         return 0;
981 }
982
983 int qat_algs_init(void)
984 {
985         atomic_set(&active_dev, 0);
986         crypto_get_default_rng();
987         return 0;
988 }
989
990 void qat_algs_exit(void)
991 {
992         crypto_put_default_rng();
993 }