2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/mutex.h>
49 #include <linux/slab.h>
51 #include <linux/bitops.h>
52 #include <linux/pci.h>
53 #include <linux/cdev.h>
54 #include <linux/uaccess.h>
55 #include <linux/crypto.h>
57 #include "adf_accel_devices.h"
58 #include "adf_common_drv.h"
60 #include "adf_cfg_common.h"
61 #include "adf_cfg_user.h"
63 #define DEVICE_NAME "qat_adf_ctl"
65 static DEFINE_MUTEX(adf_ctl_lock);
66 static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
68 static const struct file_operations adf_ctl_ops = {
70 .unlocked_ioctl = adf_ctl_ioctl,
71 .compat_ioctl = adf_ctl_ioctl,
74 struct adf_ctl_drv_info {
77 struct class *drv_class;
80 static struct adf_ctl_drv_info adf_ctl_drv;
82 static void adf_chr_drv_destroy(void)
84 device_destroy(adf_ctl_drv.drv_class, MKDEV(adf_ctl_drv.major, 0));
85 cdev_del(&adf_ctl_drv.drv_cdev);
86 class_destroy(adf_ctl_drv.drv_class);
87 unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1);
90 static int adf_chr_drv_create(void)
93 struct device *drv_device;
95 if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
96 pr_err("QAT: unable to allocate chrdev region\n");
100 adf_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME);
101 if (IS_ERR(adf_ctl_drv.drv_class)) {
102 pr_err("QAT: class_create failed for adf_ctl\n");
103 goto err_chrdev_unreg;
105 adf_ctl_drv.major = MAJOR(dev_id);
106 cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops);
107 if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) {
108 pr_err("QAT: cdev add failed\n");
109 goto err_class_destr;
112 drv_device = device_create(adf_ctl_drv.drv_class, NULL,
113 MKDEV(adf_ctl_drv.major, 0),
115 if (IS_ERR(drv_device)) {
116 pr_err("QAT: failed to create device\n");
121 cdev_del(&adf_ctl_drv.drv_cdev);
123 class_destroy(adf_ctl_drv.drv_class);
125 unregister_chrdev_region(dev_id, 1);
129 static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
132 struct adf_user_cfg_ctl_data *cfg_data;
134 cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
138 /* Initialize device id to NO DEVICE as 0 is a valid device id */
139 cfg_data->device_id = ADF_CFG_NO_DEVICE;
141 if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
142 pr_err("QAT: failed to copy from user cfg_data.\n");
147 *ctl_data = cfg_data;
151 static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
153 const struct adf_user_cfg_key_val *key_val)
155 if (key_val->type == ADF_HEX) {
156 long *ptr = (long *)key_val->val;
159 if (adf_cfg_add_key_value_param(accel_dev, section,
160 key_val->key, (void *)val,
162 dev_err(&GET_DEV(accel_dev),
163 "failed to add hex keyvalue.\n");
167 if (adf_cfg_add_key_value_param(accel_dev, section,
168 key_val->key, key_val->val,
170 dev_err(&GET_DEV(accel_dev),
171 "failed to add keyvalue.\n");
178 static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
179 struct adf_user_cfg_ctl_data *ctl_data)
181 struct adf_user_cfg_key_val key_val;
182 struct adf_user_cfg_key_val *params_head;
183 struct adf_user_cfg_section section, *section_head;
185 section_head = ctl_data->config_section;
187 while (section_head) {
188 if (copy_from_user(§ion, (void __user *)section_head,
189 sizeof(*section_head))) {
190 dev_err(&GET_DEV(accel_dev),
191 "failed to copy section info\n");
195 if (adf_cfg_section_add(accel_dev, section.name)) {
196 dev_err(&GET_DEV(accel_dev),
197 "failed to add section.\n");
201 params_head = section.params;
203 while (params_head) {
204 if (copy_from_user(&key_val, (void __user *)params_head,
206 dev_err(&GET_DEV(accel_dev),
207 "Failed to copy keyvalue.\n");
210 if (adf_add_key_value_data(accel_dev, section.name,
214 params_head = key_val.next;
216 section_head = section.next;
220 adf_cfg_del_all(accel_dev);
224 static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
228 struct adf_user_cfg_ctl_data *ctl_data;
229 struct adf_accel_dev *accel_dev;
231 ret = adf_ctl_alloc_resources(&ctl_data, arg);
235 accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
241 if (adf_dev_started(accel_dev)) {
246 if (adf_copy_key_value_data(accel_dev, ctl_data)) {
250 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
256 static int adf_ctl_is_device_in_use(int id)
258 struct adf_accel_dev *dev;
260 list_for_each_entry(dev, adf_devmgr_get_head(), list) {
261 if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
262 if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
263 dev_info(&GET_DEV(dev),
264 "device qat_dev%d is busy\n",
273 static int adf_ctl_stop_devices(uint32_t id)
275 struct adf_accel_dev *accel_dev;
278 list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
279 if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
280 if (!adf_dev_started(accel_dev))
283 /* First stop all VFs */
284 if (!accel_dev->is_vf)
287 adf_dev_stop(accel_dev);
288 adf_dev_shutdown(accel_dev);
292 list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
293 if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
294 if (!adf_dev_started(accel_dev))
297 adf_dev_stop(accel_dev);
298 adf_dev_shutdown(accel_dev);
304 static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
308 struct adf_user_cfg_ctl_data *ctl_data;
310 ret = adf_ctl_alloc_resources(&ctl_data, arg);
314 if (adf_devmgr_verify_id(ctl_data->device_id)) {
315 pr_err("QAT: Device %d not found\n", ctl_data->device_id);
320 ret = adf_ctl_is_device_in_use(ctl_data->device_id);
324 if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
325 pr_info("QAT: Stopping all acceleration devices.\n");
327 pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
328 ctl_data->device_id);
330 ret = adf_ctl_stop_devices(ctl_data->device_id);
332 pr_err("QAT: failed to stop device.\n");
338 static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
342 struct adf_user_cfg_ctl_data *ctl_data;
343 struct adf_accel_dev *accel_dev;
345 ret = adf_ctl_alloc_resources(&ctl_data, arg);
350 accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
354 if (!adf_dev_started(accel_dev)) {
355 dev_info(&GET_DEV(accel_dev),
356 "Starting acceleration device qat_dev%d.\n",
357 ctl_data->device_id);
358 ret = adf_dev_init(accel_dev);
360 ret = adf_dev_start(accel_dev);
362 dev_info(&GET_DEV(accel_dev),
363 "Acceleration device qat_dev%d already started.\n",
364 ctl_data->device_id);
367 dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
368 ctl_data->device_id);
369 adf_dev_stop(accel_dev);
370 adf_dev_shutdown(accel_dev);
377 static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
380 uint32_t num_devices = 0;
382 adf_devmgr_get_num_dev(&num_devices);
383 if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
389 static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
392 struct adf_hw_device_data *hw_data;
393 struct adf_dev_status_info dev_info;
394 struct adf_accel_dev *accel_dev;
396 if (copy_from_user(&dev_info, (void __user *)arg,
397 sizeof(struct adf_dev_status_info))) {
398 pr_err("QAT: failed to copy from user.\n");
402 accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
406 hw_data = accel_dev->hw_device;
407 dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
408 dev_info.num_ae = hw_data->get_num_aes(hw_data);
409 dev_info.num_accel = hw_data->get_num_accels(hw_data);
410 dev_info.num_logical_accel = hw_data->num_logical_accel;
411 dev_info.banks_per_accel = hw_data->num_banks
412 / hw_data->num_logical_accel;
413 strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
414 dev_info.instance_id = hw_data->instance_id;
415 dev_info.type = hw_data->dev_class->type;
416 dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
417 dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
418 dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
420 if (copy_to_user((void __user *)arg, &dev_info,
421 sizeof(struct adf_dev_status_info))) {
422 dev_err(&GET_DEV(accel_dev), "failed to copy status.\n");
428 static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
432 if (mutex_lock_interruptible(&adf_ctl_lock))
436 case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
437 ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
440 case IOCTL_STOP_ACCEL_DEV:
441 ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
444 case IOCTL_START_ACCEL_DEV:
445 ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
448 case IOCTL_GET_NUM_DEVICES:
449 ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
452 case IOCTL_STATUS_ACCEL_DEV:
453 ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
456 pr_err("QAT: Invalid ioctl\n");
460 mutex_unlock(&adf_ctl_lock);
464 static int __init adf_register_ctl_device_driver(void)
466 mutex_init(&adf_ctl_lock);
468 if (adf_chr_drv_create())
474 if (adf_init_pf_wq())
477 if (adf_init_vf_wq())
480 if (qat_crypto_register())
481 goto err_crypto_register;
492 adf_chr_drv_destroy();
494 mutex_destroy(&adf_ctl_lock);
498 static void __exit adf_unregister_ctl_device_driver(void)
500 adf_chr_drv_destroy();
504 qat_crypto_unregister();
505 adf_clean_vf_map(false);
506 mutex_destroy(&adf_ctl_lock);
509 module_init(adf_register_ctl_device_driver);
510 module_exit(adf_unregister_ctl_device_driver);
511 MODULE_LICENSE("Dual BSD/GPL");
512 MODULE_AUTHOR("Intel");
513 MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
514 MODULE_ALIAS_CRYPTO("intel_qat");
515 MODULE_VERSION(ADF_DRV_VERSION);