2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include "adf_accel_devices.h"
50 #include "adf_common_drv.h"
51 #include "adf_transport.h"
53 #include "adf_cfg_strings.h"
54 #include "qat_crypto.h"
55 #include "icp_qat_fw.h"
57 #define SEC ADF_KERNEL_SEC
59 static struct service_hndl qat_crypto;
61 void qat_crypto_put_instance(struct qat_crypto_instance *inst)
63 if (atomic_sub_return(1, &inst->refctr) == 0)
64 adf_dev_put(inst->accel_dev);
67 static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
69 struct qat_crypto_instance *inst;
70 struct list_head *list_ptr, *tmp;
73 list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) {
74 inst = list_entry(list_ptr, struct qat_crypto_instance, list);
76 for (i = 0; i < atomic_read(&inst->refctr); i++)
77 qat_crypto_put_instance(inst);
80 adf_remove_ring(inst->sym_tx);
83 adf_remove_ring(inst->sym_rx);
86 adf_remove_ring(inst->pke_tx);
89 adf_remove_ring(inst->pke_rx);
92 adf_remove_ring(inst->rnd_tx);
95 adf_remove_ring(inst->rnd_rx);
103 struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
105 struct adf_accel_dev *accel_dev = NULL;
106 struct qat_crypto_instance *inst_best = NULL;
107 struct list_head *itr;
108 unsigned long best = ~0;
110 list_for_each(itr, adf_devmgr_get_head()) {
111 accel_dev = list_entry(itr, struct adf_accel_dev, list);
112 if (accel_dev->numa_node == node && adf_dev_started(accel_dev))
117 pr_err("QAT: Could not find device on give node\n");
118 accel_dev = adf_devmgr_get_first();
120 if (!accel_dev || !adf_dev_started(accel_dev))
123 list_for_each(itr, &accel_dev->crypto_list) {
124 struct qat_crypto_instance *inst;
127 inst = list_entry(itr, struct qat_crypto_instance, list);
128 cur = atomic_read(&inst->refctr);
135 if (atomic_add_return(1, &inst_best->refctr) == 1) {
136 if (adf_dev_get(accel_dev)) {
137 atomic_dec(&inst_best->refctr);
138 pr_err("QAT: Could increment dev refctr\n");
146 static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
150 unsigned long num_inst, num_msg_sym, num_msg_asym;
152 struct qat_crypto_instance *inst;
153 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
154 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
156 INIT_LIST_HEAD(&accel_dev->crypto_list);
157 strlcpy(key, ADF_NUM_CY, sizeof(key));
159 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
162 if (kstrtoul(val, 0, &num_inst))
165 for (i = 0; i < num_inst; i++) {
166 inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
167 accel_dev->numa_node);
171 list_add_tail(&inst->list, &accel_dev->crypto_list);
173 atomic_set(&inst->refctr, 0);
174 inst->accel_dev = accel_dev;
175 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
176 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
179 if (kstrtoul(val, 10, &bank))
181 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
182 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
185 if (kstrtoul(val, 10, &num_msg_sym))
187 num_msg_sym = num_msg_sym >> 1;
188 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
189 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
192 if (kstrtoul(val, 10, &num_msg_asym))
194 num_msg_asym = num_msg_asym >> 1;
196 msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
197 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
198 if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
199 msg_size, key, NULL, 0, &inst->sym_tx))
202 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
203 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
204 msg_size, key, NULL, 0, &inst->rnd_tx))
207 msg_size = msg_size >> 1;
208 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
209 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
210 msg_size, key, NULL, 0, &inst->pke_tx))
213 msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
214 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
215 if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
216 msg_size, key, qat_alg_callback, 0,
220 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
221 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
222 msg_size, key, qat_alg_callback, 0,
226 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
227 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
228 msg_size, key, qat_alg_callback, 0,
234 qat_crypto_free_instances(accel_dev);
238 static int qat_crypto_init(struct adf_accel_dev *accel_dev)
240 if (qat_crypto_create_instances(accel_dev))
246 static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
248 return qat_crypto_free_instances(accel_dev);
251 static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
252 enum adf_event event)
258 ret = qat_crypto_init(accel_dev);
260 case ADF_EVENT_SHUTDOWN:
261 ret = qat_crypto_shutdown(accel_dev);
263 case ADF_EVENT_RESTARTING:
264 case ADF_EVENT_RESTARTED:
265 case ADF_EVENT_START:
273 int qat_crypto_register(void)
275 memset(&qat_crypto, 0, sizeof(qat_crypto));
276 qat_crypto.event_hld = qat_crypto_event_handler;
277 qat_crypto.name = "qat_crypto";
278 return adf_service_register(&qat_crypto);
281 int qat_crypto_unregister(void)
283 return adf_service_unregister(&qat_crypto);