2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <crypto/rng.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
66 #define QAT_AES_HW_CONFIG_ENC(alg) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
71 #define QAT_AES_HW_CONFIG_DEC(alg) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
76 static atomic_t active_dev;
84 struct qat_alg_buf_list {
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89 } __packed __aligned(64);
91 /* Common content descriptor */
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
105 #define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
107 struct qat_auth_state {
108 uint8_t data[MAX_AUTH_STATE_SIZE + 64];
111 struct qat_alg_session_ctx {
112 struct qat_alg_cd *enc_cd;
113 dma_addr_t enc_cd_paddr;
114 struct qat_alg_cd *dec_cd;
115 dma_addr_t dec_cd_paddr;
116 struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
117 struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
118 struct qat_crypto_instance *inst;
119 struct crypto_tfm *tfm;
120 struct crypto_shash *hash_tfm;
121 enum icp_qat_hw_auth_algo qat_hash_alg;
122 uint8_t salt[AES_BLOCK_SIZE];
123 spinlock_t lock; /* protects qat_alg_session_ctx struct */
126 static int get_current_node(void)
128 return cpu_data(current_thread_info()->cpu).phys_proc_id;
131 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
133 switch (qat_hash_alg) {
134 case ICP_QAT_HW_AUTH_ALGO_SHA1:
135 return ICP_QAT_HW_SHA1_STATE1_SZ;
136 case ICP_QAT_HW_AUTH_ALGO_SHA256:
137 return ICP_QAT_HW_SHA256_STATE1_SZ;
138 case ICP_QAT_HW_AUTH_ALGO_SHA512:
139 return ICP_QAT_HW_SHA512_STATE1_SZ;
146 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
147 struct qat_alg_session_ctx *ctx,
148 const uint8_t *auth_key,
149 unsigned int auth_keylen)
151 struct qat_auth_state auth_state;
152 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
153 struct sha1_state sha1;
154 struct sha256_state sha256;
155 struct sha512_state sha512;
156 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
157 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
158 uint8_t *ipad = auth_state.data;
159 uint8_t *opad = ipad + block_size;
160 __be32 *hash_state_out;
161 __be64 *hash512_state_out;
164 memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64);
165 shash->tfm = ctx->hash_tfm;
168 if (auth_keylen > block_size) {
169 char buff[SHA512_BLOCK_SIZE];
170 int ret = crypto_shash_digest(shash, auth_key,
175 memcpy(ipad, buff, digest_size);
176 memcpy(opad, buff, digest_size);
177 memset(ipad + digest_size, 0, block_size - digest_size);
178 memset(opad + digest_size, 0, block_size - digest_size);
180 memcpy(ipad, auth_key, auth_keylen);
181 memcpy(opad, auth_key, auth_keylen);
182 memset(ipad + auth_keylen, 0, block_size - auth_keylen);
183 memset(opad + auth_keylen, 0, block_size - auth_keylen);
186 for (i = 0; i < block_size; i++) {
187 char *ipad_ptr = ipad + i;
188 char *opad_ptr = opad + i;
193 if (crypto_shash_init(shash))
196 if (crypto_shash_update(shash, ipad, block_size))
199 hash_state_out = (__be32 *)hash->sha.state1;
200 hash512_state_out = (__be64 *)hash_state_out;
202 switch (ctx->qat_hash_alg) {
203 case ICP_QAT_HW_AUTH_ALGO_SHA1:
204 if (crypto_shash_export(shash, &sha1))
206 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
207 *hash_state_out = cpu_to_be32(*(sha1.state + i));
209 case ICP_QAT_HW_AUTH_ALGO_SHA256:
210 if (crypto_shash_export(shash, &sha256))
212 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
213 *hash_state_out = cpu_to_be32(*(sha256.state + i));
215 case ICP_QAT_HW_AUTH_ALGO_SHA512:
216 if (crypto_shash_export(shash, &sha512))
218 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
219 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
225 if (crypto_shash_init(shash))
228 if (crypto_shash_update(shash, opad, block_size))
231 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
232 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
233 hash512_state_out = (__be64 *)hash_state_out;
235 switch (ctx->qat_hash_alg) {
236 case ICP_QAT_HW_AUTH_ALGO_SHA1:
237 if (crypto_shash_export(shash, &sha1))
239 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
240 *hash_state_out = cpu_to_be32(*(sha1.state + i));
242 case ICP_QAT_HW_AUTH_ALGO_SHA256:
243 if (crypto_shash_export(shash, &sha256))
245 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
246 *hash_state_out = cpu_to_be32(*(sha256.state + i));
248 case ICP_QAT_HW_AUTH_ALGO_SHA512:
249 if (crypto_shash_export(shash, &sha512))
251 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
252 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
260 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
263 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
264 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
265 header->comn_req_flags =
266 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
267 QAT_COMN_PTR_TYPE_SGL);
268 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
269 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
270 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
271 ICP_QAT_FW_LA_PARTIAL_NONE);
272 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
273 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
274 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
275 ICP_QAT_FW_LA_NO_PROTO);
276 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
277 ICP_QAT_FW_LA_NO_UPDATE_STATE);
280 static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
281 int alg, struct crypto_authenc_keys *keys)
283 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
284 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
285 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
286 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
287 struct icp_qat_hw_auth_algo_blk *hash =
288 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
289 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
290 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl;
291 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
292 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
293 void *ptr = &req_tmpl->cd_ctrl;
294 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
295 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
298 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
299 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
300 hash->sha.inner_setup.auth_config.config =
301 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
302 ctx->qat_hash_alg, digestsize);
303 hash->sha.inner_setup.auth_counter.counter =
304 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
306 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
310 qat_alg_init_common_hdr(header);
311 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
312 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
313 ICP_QAT_FW_LA_RET_AUTH_RES);
314 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
315 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
316 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
317 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
319 /* Cipher CD config setup */
320 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
321 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
322 cipher_cd_ctrl->cipher_cfg_offset = 0;
323 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
324 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
325 /* Auth CD config setup */
326 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
327 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
328 hash_cd_ctrl->inner_res_sz = digestsize;
329 hash_cd_ctrl->final_sz = digestsize;
331 switch (ctx->qat_hash_alg) {
332 case ICP_QAT_HW_AUTH_ALGO_SHA1:
333 hash_cd_ctrl->inner_state1_sz =
334 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
335 hash_cd_ctrl->inner_state2_sz =
336 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
338 case ICP_QAT_HW_AUTH_ALGO_SHA256:
339 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
340 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
342 case ICP_QAT_HW_AUTH_ALGO_SHA512:
343 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
344 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
349 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
350 ((sizeof(struct icp_qat_hw_auth_setup) +
351 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
352 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
353 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
357 static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
358 int alg, struct crypto_authenc_keys *keys)
360 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
361 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
362 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
363 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
364 struct icp_qat_hw_cipher_algo_blk *cipher =
365 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
366 sizeof(struct icp_qat_hw_auth_setup) +
367 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
368 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl;
369 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
370 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
371 void *ptr = &req_tmpl->cd_ctrl;
372 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
373 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
374 struct icp_qat_fw_la_auth_req_params *auth_param =
375 (struct icp_qat_fw_la_auth_req_params *)
376 ((char *)&req_tmpl->serv_specif_rqpars +
377 sizeof(struct icp_qat_fw_la_cipher_req_params));
380 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg);
381 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
382 hash->sha.inner_setup.auth_config.config =
383 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
386 hash->sha.inner_setup.auth_counter.counter =
387 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
389 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
393 qat_alg_init_common_hdr(header);
394 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
395 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
396 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
397 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
398 ICP_QAT_FW_LA_CMP_AUTH_RES);
399 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
400 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
402 /* Cipher CD config setup */
403 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
404 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
405 cipher_cd_ctrl->cipher_cfg_offset =
406 (sizeof(struct icp_qat_hw_auth_setup) +
407 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
408 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
409 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
411 /* Auth CD config setup */
412 hash_cd_ctrl->hash_cfg_offset = 0;
413 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
414 hash_cd_ctrl->inner_res_sz = digestsize;
415 hash_cd_ctrl->final_sz = digestsize;
417 switch (ctx->qat_hash_alg) {
418 case ICP_QAT_HW_AUTH_ALGO_SHA1:
419 hash_cd_ctrl->inner_state1_sz =
420 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
421 hash_cd_ctrl->inner_state2_sz =
422 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
424 case ICP_QAT_HW_AUTH_ALGO_SHA256:
425 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
426 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
428 case ICP_QAT_HW_AUTH_ALGO_SHA512:
429 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
430 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
436 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
437 ((sizeof(struct icp_qat_hw_auth_setup) +
438 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
439 auth_param->auth_res_sz = digestsize;
440 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
441 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
445 static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
446 const uint8_t *key, unsigned int keylen)
448 struct crypto_authenc_keys keys;
451 if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
454 if (crypto_authenc_extractkeys(&keys, key, keylen))
457 switch (keys.enckeylen) {
458 case AES_KEYSIZE_128:
459 alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
461 case AES_KEYSIZE_192:
462 alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
464 case AES_KEYSIZE_256:
465 alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
472 if (qat_alg_init_enc_session(ctx, alg, &keys))
475 if (qat_alg_init_dec_session(ctx, alg, &keys))
480 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
486 static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
489 struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
492 spin_lock(&ctx->lock);
495 dev = &GET_DEV(ctx->inst->accel_dev);
496 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
497 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
498 memset(&ctx->enc_fw_req_tmpl, 0,
499 sizeof(struct icp_qat_fw_la_bulk_req));
500 memset(&ctx->dec_fw_req_tmpl, 0,
501 sizeof(struct icp_qat_fw_la_bulk_req));
504 int node = get_current_node();
505 struct qat_crypto_instance *inst =
506 qat_crypto_get_instance_node(node);
508 spin_unlock(&ctx->lock);
512 dev = &GET_DEV(inst->accel_dev);
514 ctx->enc_cd = dma_zalloc_coherent(dev,
515 sizeof(struct qat_alg_cd),
519 spin_unlock(&ctx->lock);
522 ctx->dec_cd = dma_zalloc_coherent(dev,
523 sizeof(struct qat_alg_cd),
527 spin_unlock(&ctx->lock);
531 spin_unlock(&ctx->lock);
532 if (qat_alg_init_sessions(ctx, key, keylen))
538 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
539 ctx->dec_cd, ctx->dec_cd_paddr);
542 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
543 ctx->enc_cd, ctx->enc_cd_paddr);
548 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
549 struct qat_crypto_request *qat_req)
551 struct device *dev = &GET_DEV(inst->accel_dev);
552 struct qat_alg_buf_list *bl = qat_req->buf.bl;
553 struct qat_alg_buf_list *blout = qat_req->buf.blout;
554 dma_addr_t blp = qat_req->buf.blp;
555 dma_addr_t blpout = qat_req->buf.bloutp;
556 size_t sz = qat_req->buf.sz;
557 int i, bufs = bl->num_bufs;
559 for (i = 0; i < bl->num_bufs; i++)
560 dma_unmap_single(dev, bl->bufers[i].addr,
561 bl->bufers[i].len, DMA_BIDIRECTIONAL);
563 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
566 /* If out of place operation dma unmap only data */
567 int bufless = bufs - blout->num_mapped_bufs;
569 for (i = bufless; i < bufs; i++) {
570 dma_unmap_single(dev, blout->bufers[i].addr,
571 blout->bufers[i].len,
574 dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE);
579 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
580 struct scatterlist *assoc,
581 struct scatterlist *sgl,
582 struct scatterlist *sglout, uint8_t *iv,
584 struct qat_crypto_request *qat_req)
586 struct device *dev = &GET_DEV(inst->accel_dev);
587 int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
588 struct qat_alg_buf_list *bufl;
589 struct qat_alg_buf_list *buflout = NULL;
591 dma_addr_t bloutp = 0;
592 struct scatterlist *sg;
593 size_t sz = sizeof(struct qat_alg_buf_list) +
594 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
599 bufl = kmalloc_node(sz, GFP_ATOMIC,
600 dev_to_node(&GET_DEV(inst->accel_dev)));
604 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
605 if (unlikely(dma_mapping_error(dev, blp)))
608 for_each_sg(assoc, sg, assoc_n, i) {
611 bufl->bufers[bufs].addr = dma_map_single(dev,
615 bufl->bufers[bufs].len = sg->length;
616 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
620 bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
622 bufl->bufers[bufs].len = ivlen;
623 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
627 for_each_sg(sgl, sg, n, i) {
630 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
633 bufl->bufers[y].len = sg->length;
634 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
637 bufl->num_bufs = n + bufs;
638 qat_req->buf.bl = bufl;
639 qat_req->buf.blp = blp;
640 qat_req->buf.sz = sz;
641 /* Handle out of place operation */
643 struct qat_alg_buf *bufers;
645 buflout = kmalloc_node(sz, GFP_ATOMIC,
646 dev_to_node(&GET_DEV(inst->accel_dev)));
647 if (unlikely(!buflout))
649 bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
650 if (unlikely(dma_mapping_error(dev, bloutp)))
652 bufers = buflout->bufers;
653 /* For out of place operation dma map only data and
654 * reuse assoc mapping and iv */
655 for (i = 0; i < bufs; i++) {
656 bufers[i].len = bufl->bufers[i].len;
657 bufers[i].addr = bufl->bufers[i].addr;
659 for_each_sg(sglout, sg, n, i) {
662 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
665 buflout->bufers[y].len = sg->length;
666 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
669 buflout->num_bufs = n + bufs;
670 buflout->num_mapped_bufs = n;
671 qat_req->buf.blout = buflout;
672 qat_req->buf.bloutp = bloutp;
674 /* Otherwise set the src and dst to the same address */
675 qat_req->buf.bloutp = qat_req->buf.blp;
679 dev_err(dev, "Failed to map buf for dma\n");
680 for_each_sg(sgl, sg, n + bufs, i) {
681 if (!dma_mapping_error(dev, bufl->bufers[i].addr)) {
682 dma_unmap_single(dev, bufl->bufers[i].addr,
687 if (!dma_mapping_error(dev, blp))
688 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
690 if (sgl != sglout && buflout) {
691 for_each_sg(sglout, sg, n, i) {
694 if (!dma_mapping_error(dev, buflout->bufers[y].addr))
695 dma_unmap_single(dev, buflout->bufers[y].addr,
696 buflout->bufers[y].len,
699 if (!dma_mapping_error(dev, bloutp))
700 dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE);
706 void qat_alg_callback(void *resp)
708 struct icp_qat_fw_la_resp *qat_resp = resp;
709 struct qat_crypto_request *qat_req =
710 (void *)(__force long)qat_resp->opaque_data;
711 struct qat_alg_session_ctx *ctx = qat_req->ctx;
712 struct qat_crypto_instance *inst = ctx->inst;
713 struct aead_request *areq = qat_req->areq;
714 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
715 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
717 qat_alg_free_bufl(inst, qat_req);
718 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
720 areq->base.complete(&areq->base, res);
723 static int qat_alg_dec(struct aead_request *areq)
725 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
726 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
727 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
728 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
729 struct icp_qat_fw_la_cipher_req_params *cipher_param;
730 struct icp_qat_fw_la_auth_req_params *auth_param;
731 struct icp_qat_fw_la_bulk_req *msg;
732 int digst_size = crypto_aead_crt(aead_tfm)->authsize;
735 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
736 areq->iv, AES_BLOCK_SIZE, qat_req);
741 *msg = ctx->dec_fw_req_tmpl;
743 qat_req->areq = areq;
744 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
745 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
746 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
747 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
748 cipher_param->cipher_length = areq->cryptlen - digst_size;
749 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
750 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
751 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
752 auth_param->auth_off = 0;
753 auth_param->auth_len = areq->assoclen +
754 cipher_param->cipher_length + AES_BLOCK_SIZE;
756 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
757 } while (ret == -EAGAIN && ctr++ < 10);
759 if (ret == -EAGAIN) {
760 qat_alg_free_bufl(ctx->inst, qat_req);
766 static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
769 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
770 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
771 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
772 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
773 struct icp_qat_fw_la_cipher_req_params *cipher_param;
774 struct icp_qat_fw_la_auth_req_params *auth_param;
775 struct icp_qat_fw_la_bulk_req *msg;
778 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
779 iv, AES_BLOCK_SIZE, qat_req);
784 *msg = ctx->enc_fw_req_tmpl;
786 qat_req->areq = areq;
787 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
788 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
789 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
790 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
791 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
794 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
795 cipher_param->cipher_offset = areq->assoclen;
797 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
798 cipher_param->cipher_length = areq->cryptlen;
799 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
801 auth_param->auth_off = 0;
802 auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
805 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
806 } while (ret == -EAGAIN && ctr++ < 10);
808 if (ret == -EAGAIN) {
809 qat_alg_free_bufl(ctx->inst, qat_req);
815 static int qat_alg_enc(struct aead_request *areq)
817 return qat_alg_enc_internal(areq, areq->iv, 0);
820 static int qat_alg_genivenc(struct aead_givcrypt_request *req)
822 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
823 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
824 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
827 memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
828 seq = cpu_to_be64(req->seq);
829 memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
830 &seq, sizeof(uint64_t));
831 return qat_alg_enc_internal(&req->areq, req->giv, 1);
834 static int qat_alg_init(struct crypto_tfm *tfm,
835 enum icp_qat_hw_auth_algo hash, const char *hash_name)
837 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
839 memset(ctx, '\0', sizeof(*ctx));
840 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
841 if (IS_ERR(ctx->hash_tfm))
843 spin_lock_init(&ctx->lock);
844 ctx->qat_hash_alg = hash;
845 tfm->crt_aead.reqsize = sizeof(struct aead_request) +
846 sizeof(struct qat_crypto_request);
851 static int qat_alg_sha1_init(struct crypto_tfm *tfm)
853 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
856 static int qat_alg_sha256_init(struct crypto_tfm *tfm)
858 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
861 static int qat_alg_sha512_init(struct crypto_tfm *tfm)
863 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
866 static void qat_alg_exit(struct crypto_tfm *tfm)
868 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
869 struct qat_crypto_instance *inst = ctx->inst;
872 if (!IS_ERR(ctx->hash_tfm))
873 crypto_free_shash(ctx->hash_tfm);
878 dev = &GET_DEV(inst->accel_dev);
880 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
881 ctx->enc_cd, ctx->enc_cd_paddr);
883 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
884 ctx->dec_cd, ctx->dec_cd_paddr);
885 qat_crypto_put_instance(inst);
888 static struct crypto_alg qat_algs[] = { {
889 .cra_name = "authenc(hmac(sha1),cbc(aes))",
890 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
891 .cra_priority = 4001,
892 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
893 .cra_blocksize = AES_BLOCK_SIZE,
894 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
896 .cra_type = &crypto_aead_type,
897 .cra_module = THIS_MODULE,
898 .cra_init = qat_alg_sha1_init,
899 .cra_exit = qat_alg_exit,
902 .setkey = qat_alg_setkey,
903 .decrypt = qat_alg_dec,
904 .encrypt = qat_alg_enc,
905 .givencrypt = qat_alg_genivenc,
906 .ivsize = AES_BLOCK_SIZE,
907 .maxauthsize = SHA1_DIGEST_SIZE,
911 .cra_name = "authenc(hmac(sha256),cbc(aes))",
912 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
913 .cra_priority = 4001,
914 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
915 .cra_blocksize = AES_BLOCK_SIZE,
916 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
918 .cra_type = &crypto_aead_type,
919 .cra_module = THIS_MODULE,
920 .cra_init = qat_alg_sha256_init,
921 .cra_exit = qat_alg_exit,
924 .setkey = qat_alg_setkey,
925 .decrypt = qat_alg_dec,
926 .encrypt = qat_alg_enc,
927 .givencrypt = qat_alg_genivenc,
928 .ivsize = AES_BLOCK_SIZE,
929 .maxauthsize = SHA256_DIGEST_SIZE,
933 .cra_name = "authenc(hmac(sha512),cbc(aes))",
934 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
935 .cra_priority = 4001,
936 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
937 .cra_blocksize = AES_BLOCK_SIZE,
938 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
940 .cra_type = &crypto_aead_type,
941 .cra_module = THIS_MODULE,
942 .cra_init = qat_alg_sha512_init,
943 .cra_exit = qat_alg_exit,
946 .setkey = qat_alg_setkey,
947 .decrypt = qat_alg_dec,
948 .encrypt = qat_alg_enc,
949 .givencrypt = qat_alg_genivenc,
950 .ivsize = AES_BLOCK_SIZE,
951 .maxauthsize = SHA512_DIGEST_SIZE,
956 int qat_algs_register(void)
958 if (atomic_add_return(1, &active_dev) == 1) {
961 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
962 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD |
964 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
969 int qat_algs_unregister(void)
971 if (atomic_sub_return(1, &active_dev) == 0)
972 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
976 int qat_algs_init(void)
978 atomic_set(&active_dev, 0);
979 crypto_get_default_rng();
983 void qat_algs_exit(void)
985 crypto_put_default_rng();