2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <crypto/rng.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
66 #define QAT_AES_HW_CONFIG_ENC(alg) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
71 #define QAT_AES_HW_CONFIG_DEC(alg) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
76 static atomic_t active_dev;
84 struct qat_alg_buf_list {
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89 } __packed __aligned(64);
91 /* Common content descriptor */
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
105 #define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
107 struct qat_auth_state {
108 uint8_t data[MAX_AUTH_STATE_SIZE + 64];
111 struct qat_alg_session_ctx {
112 struct qat_alg_cd *enc_cd;
113 dma_addr_t enc_cd_paddr;
114 struct qat_alg_cd *dec_cd;
115 dma_addr_t dec_cd_paddr;
116 struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
117 struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
118 struct qat_crypto_instance *inst;
119 struct crypto_tfm *tfm;
120 struct crypto_shash *hash_tfm;
121 enum icp_qat_hw_auth_algo qat_hash_alg;
122 uint8_t salt[AES_BLOCK_SIZE];
123 spinlock_t lock; /* protects qat_alg_session_ctx struct */
126 static int get_current_node(void)
128 return cpu_data(current_thread_info()->cpu).phys_proc_id;
131 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
133 switch (qat_hash_alg) {
134 case ICP_QAT_HW_AUTH_ALGO_SHA1:
135 return ICP_QAT_HW_SHA1_STATE1_SZ;
136 case ICP_QAT_HW_AUTH_ALGO_SHA256:
137 return ICP_QAT_HW_SHA256_STATE1_SZ;
138 case ICP_QAT_HW_AUTH_ALGO_SHA512:
139 return ICP_QAT_HW_SHA512_STATE1_SZ;
146 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
147 struct qat_alg_session_ctx *ctx,
148 const uint8_t *auth_key,
149 unsigned int auth_keylen)
151 struct qat_auth_state auth_state;
152 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
153 struct sha1_state sha1;
154 struct sha256_state sha256;
155 struct sha512_state sha512;
156 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
157 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
158 uint8_t *ipad = auth_state.data;
159 uint8_t *opad = ipad + block_size;
160 __be32 *hash_state_out;
161 __be64 *hash512_state_out;
164 memzero_explicit(auth_state.data, MAX_AUTH_STATE_SIZE + 64);
165 shash->tfm = ctx->hash_tfm;
168 if (auth_keylen > block_size) {
169 char buff[SHA512_BLOCK_SIZE];
170 int ret = crypto_shash_digest(shash, auth_key,
175 memcpy(ipad, buff, digest_size);
176 memcpy(opad, buff, digest_size);
177 memzero_explicit(ipad + digest_size, block_size - digest_size);
178 memzero_explicit(opad + digest_size, block_size - digest_size);
180 memcpy(ipad, auth_key, auth_keylen);
181 memcpy(opad, auth_key, auth_keylen);
182 memzero_explicit(ipad + auth_keylen, block_size - auth_keylen);
183 memzero_explicit(opad + auth_keylen, block_size - auth_keylen);
186 for (i = 0; i < block_size; i++) {
187 char *ipad_ptr = ipad + i;
188 char *opad_ptr = opad + i;
193 if (crypto_shash_init(shash))
196 if (crypto_shash_update(shash, ipad, block_size))
199 hash_state_out = (__be32 *)hash->sha.state1;
200 hash512_state_out = (__be64 *)hash_state_out;
202 switch (ctx->qat_hash_alg) {
203 case ICP_QAT_HW_AUTH_ALGO_SHA1:
204 if (crypto_shash_export(shash, &sha1))
206 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
207 *hash_state_out = cpu_to_be32(*(sha1.state + i));
209 case ICP_QAT_HW_AUTH_ALGO_SHA256:
210 if (crypto_shash_export(shash, &sha256))
212 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
213 *hash_state_out = cpu_to_be32(*(sha256.state + i));
215 case ICP_QAT_HW_AUTH_ALGO_SHA512:
216 if (crypto_shash_export(shash, &sha512))
218 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
219 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
225 if (crypto_shash_init(shash))
228 if (crypto_shash_update(shash, opad, block_size))
231 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
232 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
233 hash512_state_out = (__be64 *)hash_state_out;
235 switch (ctx->qat_hash_alg) {
236 case ICP_QAT_HW_AUTH_ALGO_SHA1:
237 if (crypto_shash_export(shash, &sha1))
239 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
240 *hash_state_out = cpu_to_be32(*(sha1.state + i));
242 case ICP_QAT_HW_AUTH_ALGO_SHA256:
243 if (crypto_shash_export(shash, &sha256))
245 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
246 *hash_state_out = cpu_to_be32(*(sha256.state + i));
248 case ICP_QAT_HW_AUTH_ALGO_SHA512:
249 if (crypto_shash_export(shash, &sha512))
251 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
252 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
257 memzero_explicit(ipad, block_size);
258 memzero_explicit(opad, block_size);
262 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
265 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
266 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
267 header->comn_req_flags =
268 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
269 QAT_COMN_PTR_TYPE_SGL);
270 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
271 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
272 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
273 ICP_QAT_FW_LA_PARTIAL_NONE);
274 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
275 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
276 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
277 ICP_QAT_FW_LA_NO_PROTO);
278 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
279 ICP_QAT_FW_LA_NO_UPDATE_STATE);
282 static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
283 int alg, struct crypto_authenc_keys *keys)
285 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
286 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
287 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
288 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
289 struct icp_qat_hw_auth_algo_blk *hash =
290 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
291 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
292 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl;
293 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
294 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
295 void *ptr = &req_tmpl->cd_ctrl;
296 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
297 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
300 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
301 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
302 hash->sha.inner_setup.auth_config.config =
303 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
304 ctx->qat_hash_alg, digestsize);
305 hash->sha.inner_setup.auth_counter.counter =
306 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
308 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
312 qat_alg_init_common_hdr(header);
313 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
314 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
315 ICP_QAT_FW_LA_RET_AUTH_RES);
316 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
317 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
318 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
319 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
321 /* Cipher CD config setup */
322 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
323 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
324 cipher_cd_ctrl->cipher_cfg_offset = 0;
325 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
326 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
327 /* Auth CD config setup */
328 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
329 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
330 hash_cd_ctrl->inner_res_sz = digestsize;
331 hash_cd_ctrl->final_sz = digestsize;
333 switch (ctx->qat_hash_alg) {
334 case ICP_QAT_HW_AUTH_ALGO_SHA1:
335 hash_cd_ctrl->inner_state1_sz =
336 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
337 hash_cd_ctrl->inner_state2_sz =
338 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
340 case ICP_QAT_HW_AUTH_ALGO_SHA256:
341 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
342 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
344 case ICP_QAT_HW_AUTH_ALGO_SHA512:
345 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
346 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
351 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
352 ((sizeof(struct icp_qat_hw_auth_setup) +
353 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
354 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
355 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
359 static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
360 int alg, struct crypto_authenc_keys *keys)
362 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
363 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
364 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
365 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
366 struct icp_qat_hw_cipher_algo_blk *cipher =
367 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
368 sizeof(struct icp_qat_hw_auth_setup) +
369 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
370 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl;
371 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
372 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
373 void *ptr = &req_tmpl->cd_ctrl;
374 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
375 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
376 struct icp_qat_fw_la_auth_req_params *auth_param =
377 (struct icp_qat_fw_la_auth_req_params *)
378 ((char *)&req_tmpl->serv_specif_rqpars +
379 sizeof(struct icp_qat_fw_la_cipher_req_params));
382 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg);
383 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
384 hash->sha.inner_setup.auth_config.config =
385 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
388 hash->sha.inner_setup.auth_counter.counter =
389 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
391 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
395 qat_alg_init_common_hdr(header);
396 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
397 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
398 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
399 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
400 ICP_QAT_FW_LA_CMP_AUTH_RES);
401 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
402 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
404 /* Cipher CD config setup */
405 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
406 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
407 cipher_cd_ctrl->cipher_cfg_offset =
408 (sizeof(struct icp_qat_hw_auth_setup) +
409 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
410 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
411 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
413 /* Auth CD config setup */
414 hash_cd_ctrl->hash_cfg_offset = 0;
415 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
416 hash_cd_ctrl->inner_res_sz = digestsize;
417 hash_cd_ctrl->final_sz = digestsize;
419 switch (ctx->qat_hash_alg) {
420 case ICP_QAT_HW_AUTH_ALGO_SHA1:
421 hash_cd_ctrl->inner_state1_sz =
422 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
423 hash_cd_ctrl->inner_state2_sz =
424 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
426 case ICP_QAT_HW_AUTH_ALGO_SHA256:
427 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
428 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
430 case ICP_QAT_HW_AUTH_ALGO_SHA512:
431 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
432 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
438 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
439 ((sizeof(struct icp_qat_hw_auth_setup) +
440 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
441 auth_param->auth_res_sz = digestsize;
442 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
443 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
447 static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
448 const uint8_t *key, unsigned int keylen)
450 struct crypto_authenc_keys keys;
453 if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
456 if (crypto_authenc_extractkeys(&keys, key, keylen))
459 switch (keys.enckeylen) {
460 case AES_KEYSIZE_128:
461 alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
463 case AES_KEYSIZE_192:
464 alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
466 case AES_KEYSIZE_256:
467 alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
473 if (qat_alg_init_enc_session(ctx, alg, &keys))
476 if (qat_alg_init_dec_session(ctx, alg, &keys))
481 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
487 static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
490 struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
493 spin_lock(&ctx->lock);
496 dev = &GET_DEV(ctx->inst->accel_dev);
497 memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
498 memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
499 memzero_explicit(&ctx->enc_fw_req_tmpl,
500 sizeof(struct icp_qat_fw_la_bulk_req));
501 memzero_explicit(&ctx->dec_fw_req_tmpl,
502 sizeof(struct icp_qat_fw_la_bulk_req));
505 int node = get_current_node();
506 struct qat_crypto_instance *inst =
507 qat_crypto_get_instance_node(node);
509 spin_unlock(&ctx->lock);
513 dev = &GET_DEV(inst->accel_dev);
515 ctx->enc_cd = dma_zalloc_coherent(dev,
516 sizeof(struct qat_alg_cd),
520 spin_unlock(&ctx->lock);
523 ctx->dec_cd = dma_zalloc_coherent(dev,
524 sizeof(struct qat_alg_cd),
528 spin_unlock(&ctx->lock);
532 spin_unlock(&ctx->lock);
533 if (qat_alg_init_sessions(ctx, key, keylen))
539 memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
540 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
541 ctx->dec_cd, ctx->dec_cd_paddr);
544 memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
545 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
546 ctx->enc_cd, ctx->enc_cd_paddr);
551 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
552 struct qat_crypto_request *qat_req)
554 struct device *dev = &GET_DEV(inst->accel_dev);
555 struct qat_alg_buf_list *bl = qat_req->buf.bl;
556 struct qat_alg_buf_list *blout = qat_req->buf.blout;
557 dma_addr_t blp = qat_req->buf.blp;
558 dma_addr_t blpout = qat_req->buf.bloutp;
559 size_t sz = qat_req->buf.sz;
560 int i, bufs = bl->num_bufs;
562 for (i = 0; i < bl->num_bufs; i++)
563 dma_unmap_single(dev, bl->bufers[i].addr,
564 bl->bufers[i].len, DMA_BIDIRECTIONAL);
566 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
569 /* If out of place operation dma unmap only data */
570 int bufless = bufs - blout->num_mapped_bufs;
572 for (i = bufless; i < bufs; i++) {
573 dma_unmap_single(dev, blout->bufers[i].addr,
574 blout->bufers[i].len,
577 dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE);
582 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
583 struct scatterlist *assoc,
584 struct scatterlist *sgl,
585 struct scatterlist *sglout, uint8_t *iv,
587 struct qat_crypto_request *qat_req)
589 struct device *dev = &GET_DEV(inst->accel_dev);
590 int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
591 struct qat_alg_buf_list *bufl;
592 struct qat_alg_buf_list *buflout = NULL;
594 dma_addr_t bloutp = 0;
595 struct scatterlist *sg;
596 size_t sz = sizeof(struct qat_alg_buf_list) +
597 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
602 bufl = kmalloc_node(sz, GFP_ATOMIC,
603 dev_to_node(&GET_DEV(inst->accel_dev)));
607 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
608 if (unlikely(dma_mapping_error(dev, blp)))
611 for_each_sg(assoc, sg, assoc_n, i) {
614 bufl->bufers[bufs].addr = dma_map_single(dev,
618 bufl->bufers[bufs].len = sg->length;
619 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
623 bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
625 bufl->bufers[bufs].len = ivlen;
626 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
630 for_each_sg(sgl, sg, n, i) {
633 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
636 bufl->bufers[y].len = sg->length;
637 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
640 bufl->num_bufs = n + bufs;
641 qat_req->buf.bl = bufl;
642 qat_req->buf.blp = blp;
643 qat_req->buf.sz = sz;
644 /* Handle out of place operation */
646 struct qat_alg_buf *bufers;
648 buflout = kmalloc_node(sz, GFP_ATOMIC,
649 dev_to_node(&GET_DEV(inst->accel_dev)));
650 if (unlikely(!buflout))
652 bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
653 if (unlikely(dma_mapping_error(dev, bloutp)))
655 bufers = buflout->bufers;
656 /* For out of place operation dma map only data and
657 * reuse assoc mapping and iv */
658 for (i = 0; i < bufs; i++) {
659 bufers[i].len = bufl->bufers[i].len;
660 bufers[i].addr = bufl->bufers[i].addr;
662 for_each_sg(sglout, sg, n, i) {
665 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
668 buflout->bufers[y].len = sg->length;
669 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
672 buflout->num_bufs = n + bufs;
673 buflout->num_mapped_bufs = n;
674 qat_req->buf.blout = buflout;
675 qat_req->buf.bloutp = bloutp;
677 /* Otherwise set the src and dst to the same address */
678 qat_req->buf.bloutp = qat_req->buf.blp;
682 dev_err(dev, "Failed to map buf for dma\n");
683 for_each_sg(sgl, sg, n + bufs, i) {
684 if (!dma_mapping_error(dev, bufl->bufers[i].addr)) {
685 dma_unmap_single(dev, bufl->bufers[i].addr,
690 if (!dma_mapping_error(dev, blp))
691 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
693 if (sgl != sglout && buflout) {
694 for_each_sg(sglout, sg, n, i) {
697 if (!dma_mapping_error(dev, buflout->bufers[y].addr))
698 dma_unmap_single(dev, buflout->bufers[y].addr,
699 buflout->bufers[y].len,
702 if (!dma_mapping_error(dev, bloutp))
703 dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE);
709 void qat_alg_callback(void *resp)
711 struct icp_qat_fw_la_resp *qat_resp = resp;
712 struct qat_crypto_request *qat_req =
713 (void *)(__force long)qat_resp->opaque_data;
714 struct qat_alg_session_ctx *ctx = qat_req->ctx;
715 struct qat_crypto_instance *inst = ctx->inst;
716 struct aead_request *areq = qat_req->areq;
717 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
718 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
720 qat_alg_free_bufl(inst, qat_req);
721 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
723 areq->base.complete(&areq->base, res);
726 static int qat_alg_dec(struct aead_request *areq)
728 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
729 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
730 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
731 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
732 struct icp_qat_fw_la_cipher_req_params *cipher_param;
733 struct icp_qat_fw_la_auth_req_params *auth_param;
734 struct icp_qat_fw_la_bulk_req *msg;
735 int digst_size = crypto_aead_crt(aead_tfm)->authsize;
738 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
739 areq->iv, AES_BLOCK_SIZE, qat_req);
744 *msg = ctx->dec_fw_req_tmpl;
746 qat_req->areq = areq;
747 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
748 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
749 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
750 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
751 cipher_param->cipher_length = areq->cryptlen - digst_size;
752 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
753 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
754 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
755 auth_param->auth_off = 0;
756 auth_param->auth_len = areq->assoclen +
757 cipher_param->cipher_length + AES_BLOCK_SIZE;
759 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
760 } while (ret == -EAGAIN && ctr++ < 10);
762 if (ret == -EAGAIN) {
763 qat_alg_free_bufl(ctx->inst, qat_req);
769 static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
772 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
773 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
774 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
775 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
776 struct icp_qat_fw_la_cipher_req_params *cipher_param;
777 struct icp_qat_fw_la_auth_req_params *auth_param;
778 struct icp_qat_fw_la_bulk_req *msg;
781 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
782 iv, AES_BLOCK_SIZE, qat_req);
787 *msg = ctx->enc_fw_req_tmpl;
789 qat_req->areq = areq;
790 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
791 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
792 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
793 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
794 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
797 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
798 cipher_param->cipher_offset = areq->assoclen;
800 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
801 cipher_param->cipher_length = areq->cryptlen;
802 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
804 auth_param->auth_off = 0;
805 auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
808 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
809 } while (ret == -EAGAIN && ctr++ < 10);
811 if (ret == -EAGAIN) {
812 qat_alg_free_bufl(ctx->inst, qat_req);
818 static int qat_alg_enc(struct aead_request *areq)
820 return qat_alg_enc_internal(areq, areq->iv, 0);
823 static int qat_alg_genivenc(struct aead_givcrypt_request *req)
825 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
826 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
827 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
830 memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
831 seq = cpu_to_be64(req->seq);
832 memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
833 &seq, sizeof(uint64_t));
834 return qat_alg_enc_internal(&req->areq, req->giv, 1);
837 static int qat_alg_init(struct crypto_tfm *tfm,
838 enum icp_qat_hw_auth_algo hash, const char *hash_name)
840 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
842 memzero_explicit(ctx, sizeof(*ctx));
843 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
844 if (IS_ERR(ctx->hash_tfm))
846 spin_lock_init(&ctx->lock);
847 ctx->qat_hash_alg = hash;
848 tfm->crt_aead.reqsize = sizeof(struct aead_request) +
849 sizeof(struct qat_crypto_request);
854 static int qat_alg_sha1_init(struct crypto_tfm *tfm)
856 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
859 static int qat_alg_sha256_init(struct crypto_tfm *tfm)
861 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
864 static int qat_alg_sha512_init(struct crypto_tfm *tfm)
866 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
869 static void qat_alg_exit(struct crypto_tfm *tfm)
871 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
872 struct qat_crypto_instance *inst = ctx->inst;
875 if (!IS_ERR(ctx->hash_tfm))
876 crypto_free_shash(ctx->hash_tfm);
881 dev = &GET_DEV(inst->accel_dev);
883 memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
884 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
885 ctx->enc_cd, ctx->enc_cd_paddr);
888 memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
889 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
890 ctx->dec_cd, ctx->dec_cd_paddr);
892 qat_crypto_put_instance(inst);
895 static struct crypto_alg qat_algs[] = { {
896 .cra_name = "authenc(hmac(sha1),cbc(aes))",
897 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
898 .cra_priority = 4001,
899 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
900 .cra_blocksize = AES_BLOCK_SIZE,
901 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
903 .cra_type = &crypto_aead_type,
904 .cra_module = THIS_MODULE,
905 .cra_init = qat_alg_sha1_init,
906 .cra_exit = qat_alg_exit,
909 .setkey = qat_alg_setkey,
910 .decrypt = qat_alg_dec,
911 .encrypt = qat_alg_enc,
912 .givencrypt = qat_alg_genivenc,
913 .ivsize = AES_BLOCK_SIZE,
914 .maxauthsize = SHA1_DIGEST_SIZE,
918 .cra_name = "authenc(hmac(sha256),cbc(aes))",
919 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
920 .cra_priority = 4001,
921 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
922 .cra_blocksize = AES_BLOCK_SIZE,
923 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
925 .cra_type = &crypto_aead_type,
926 .cra_module = THIS_MODULE,
927 .cra_init = qat_alg_sha256_init,
928 .cra_exit = qat_alg_exit,
931 .setkey = qat_alg_setkey,
932 .decrypt = qat_alg_dec,
933 .encrypt = qat_alg_enc,
934 .givencrypt = qat_alg_genivenc,
935 .ivsize = AES_BLOCK_SIZE,
936 .maxauthsize = SHA256_DIGEST_SIZE,
940 .cra_name = "authenc(hmac(sha512),cbc(aes))",
941 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
942 .cra_priority = 4001,
943 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
944 .cra_blocksize = AES_BLOCK_SIZE,
945 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
947 .cra_type = &crypto_aead_type,
948 .cra_module = THIS_MODULE,
949 .cra_init = qat_alg_sha512_init,
950 .cra_exit = qat_alg_exit,
953 .setkey = qat_alg_setkey,
954 .decrypt = qat_alg_dec,
955 .encrypt = qat_alg_enc,
956 .givencrypt = qat_alg_genivenc,
957 .ivsize = AES_BLOCK_SIZE,
958 .maxauthsize = SHA512_DIGEST_SIZE,
963 int qat_algs_register(void)
965 if (atomic_add_return(1, &active_dev) == 1) {
968 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
969 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD |
971 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
976 int qat_algs_unregister(void)
978 if (atomic_sub_return(1, &active_dev) == 0)
979 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
983 int qat_algs_init(void)
985 atomic_set(&active_dev, 0);
986 crypto_get_default_rng();
990 void qat_algs_exit(void)
992 crypto_put_default_rng();