2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/kernel.h>
48 #include <linux/module.h>
49 #include <linux/pci.h>
50 #include <linux/init.h>
51 #include <linux/types.h>
53 #include <linux/slab.h>
54 #include <linux/errno.h>
55 #include <linux/device.h>
56 #include <linux/dma-mapping.h>
57 #include <linux/platform_device.h>
58 #include <linux/workqueue.h>
60 #include <adf_accel_devices.h>
61 #include <adf_common_drv.h>
63 #include <adf_transport_access_macros.h>
64 #include "adf_dh895xcc_hw_data.h"
67 static const char adf_driver_name[] = ADF_DH895XCC_DEVICE_NAME;
69 #define ADF_SYSTEM_DEVICE(device_id) \
70 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
72 static const struct pci_device_id adf_pci_tbl[] = {
73 ADF_SYSTEM_DEVICE(ADF_DH895XCC_PCI_DEVICE_ID),
76 MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
78 static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
79 static void adf_remove(struct pci_dev *dev);
81 static struct pci_driver adf_driver = {
82 .id_table = adf_pci_tbl,
83 .name = adf_driver_name,
88 static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
90 struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
93 adf_exit_admin_comms(accel_dev);
94 adf_exit_arb(accel_dev);
95 adf_cleanup_etr_data(accel_dev);
97 for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
98 struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
101 pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
104 if (accel_dev->hw_device) {
105 switch (accel_dev->hw_device->pci_dev_id) {
106 case ADF_DH895XCC_PCI_DEVICE_ID:
107 adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
112 kfree(accel_dev->hw_device);
114 adf_cfg_dev_remove(accel_dev);
115 debugfs_remove(accel_dev->debugfs_dir);
116 adf_devmgr_rm_dev(accel_dev);
117 pci_release_regions(accel_pci_dev->pci_dev);
118 pci_disable_device(accel_pci_dev->pci_dev);
122 static int qat_dev_start(struct adf_accel_dev *accel_dev)
124 int cpus = num_online_cpus();
125 int banks = GET_MAX_BANKS(accel_dev);
126 int instances = min(cpus, banks);
127 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
131 if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
133 if (adf_cfg_section_add(accel_dev, "Accelerator0"))
135 for (i = 0; i < instances; i++) {
137 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
138 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
139 key, (void *)&val, ADF_DEC))
142 snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
144 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
145 key, (void *)&val, ADF_DEC))
148 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
150 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
151 key, (void *)&val, ADF_DEC))
155 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
156 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
157 key, (void *)&val, ADF_DEC))
161 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
162 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
163 key, (void *)&val, ADF_DEC))
167 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
168 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
169 key, (void *)&val, ADF_DEC))
173 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
174 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
175 key, (void *)&val, ADF_DEC))
179 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
180 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
181 key, (void *)&val, ADF_DEC))
185 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
186 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
187 key, (void *)&val, ADF_DEC))
191 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
192 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
193 key, (void *)&val, ADF_DEC))
196 val = ADF_COALESCING_DEF_TIME;
197 snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
198 if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
199 key, (void *)&val, ADF_DEC))
204 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
205 ADF_NUM_CY, (void *)&val, ADF_DEC))
208 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
209 return adf_dev_start(accel_dev);
211 dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
215 static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
217 struct adf_accel_dev *accel_dev;
218 struct adf_accel_pci *accel_pci_dev;
219 struct adf_hw_device_data *hw_data;
220 void __iomem *pmisc_bar_addr = NULL;
221 char name[ADF_DEVICE_NAME_LENGTH];
222 unsigned int i, bar_nr;
225 switch (ent->device) {
226 case ADF_DH895XCC_PCI_DEVICE_ID:
229 dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
233 if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
234 /* If the accelerator is connected to a node with no memory
235 * there is no point in using the accelerator since the remote
236 * memory transaction will be very slow. */
237 dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
241 accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
242 dev_to_node(&pdev->dev));
246 INIT_LIST_HEAD(&accel_dev->crypto_list);
248 /* Add accel device to accel table.
249 * This should be called before adf_cleanup_accel is called */
250 if (adf_devmgr_add_dev(accel_dev)) {
251 dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
256 accel_dev->owner = THIS_MODULE;
257 /* Allocate and configure device configuration structure */
258 hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
259 dev_to_node(&pdev->dev));
265 accel_dev->hw_device = hw_data;
266 switch (ent->device) {
267 case ADF_DH895XCC_PCI_DEVICE_ID:
268 adf_init_hw_data_dh895xcc(accel_dev->hw_device);
273 accel_pci_dev = &accel_dev->accel_pci_dev;
274 pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
275 pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET,
278 /* Get Accelerators and Accelerators Engines masks */
279 hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
280 hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
281 accel_pci_dev->sku = hw_data->get_sku(hw_data);
282 accel_pci_dev->pci_dev = pdev;
283 /* If the device has no acceleration engines then ignore it. */
284 if (!hw_data->accel_mask || !hw_data->ae_mask ||
285 ((~hw_data->ae_mask) & 0x01)) {
286 dev_err(&pdev->dev, "No acceleration units found");
291 /* Create dev top level debugfs entry */
292 snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX,
293 hw_data->dev_class->name, hw_data->instance_id);
294 accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
295 if (!accel_dev->debugfs_dir) {
296 dev_err(&pdev->dev, "Could not create debugfs dir\n");
301 /* Create device configuration table */
302 ret = adf_cfg_dev_add(accel_dev);
306 /* enable PCI device */
307 if (pci_enable_device(pdev)) {
312 /* set dma identifier */
313 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
314 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
315 dev_err(&pdev->dev, "No usable DMA configuration\n");
319 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
323 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
326 if (pci_request_regions(pdev, adf_driver_name)) {
331 /* Read accelerator capabilities mask */
332 pci_read_config_dword(pdev, ADF_DH895XCC_LEGFUSE_OFFSET,
333 &hw_data->accel_capabilities_mask);
335 /* Find and map all the device's BARS */
336 for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
337 struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
340 bar->base_addr = pci_resource_start(pdev, bar_nr);
343 bar->size = pci_resource_len(pdev, bar_nr);
344 bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
345 if (!bar->virt_addr) {
346 dev_err(&pdev->dev, "Failed to map BAR %d\n", i);
350 if (i == ADF_DH895XCC_PMISC_BAR)
351 pmisc_bar_addr = bar->virt_addr;
353 pci_set_master(pdev);
355 if (adf_enable_aer(accel_dev, &adf_driver)) {
356 dev_err(&pdev->dev, "Failed to enable aer\n");
361 if (adf_init_etr_data(accel_dev)) {
362 dev_err(&pdev->dev, "Failed initialize etr\n");
367 if (adf_init_admin_comms(accel_dev)) {
368 dev_err(&pdev->dev, "Failed initialize admin comms\n");
373 if (adf_init_arb(accel_dev)) {
374 dev_err(&pdev->dev, "Failed initialize hw arbiter\n");
378 if (pci_save_state(pdev)) {
379 dev_err(&pdev->dev, "Failed to save pci state\n");
384 /* Enable bundle and misc interrupts */
385 ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
386 ADF_DH895XCC_SMIA0_MASK);
387 ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
388 ADF_DH895XCC_SMIA1_MASK);
390 ret = qat_dev_start(accel_dev);
392 adf_dev_stop(accel_dev);
398 adf_cleanup_accel(accel_dev);
402 static void __exit adf_remove(struct pci_dev *pdev)
404 struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
407 pr_err("QAT: Driver removal failed\n");
410 if (adf_dev_stop(accel_dev))
411 dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
412 adf_disable_aer(accel_dev);
413 adf_cleanup_accel(accel_dev);
416 static int __init adfdrv_init(void)
418 request_module("intel_qat");
419 if (qat_admin_register())
422 if (pci_register_driver(&adf_driver)) {
423 pr_err("QAT: Driver initialization failed\n");
429 static void __exit adfdrv_release(void)
431 pci_unregister_driver(&adf_driver);
432 qat_admin_unregister();
435 module_init(adfdrv_init);
436 module_exit(adfdrv_release);
438 MODULE_LICENSE("Dual BSD/GPL");
439 MODULE_AUTHOR("Intel");
440 MODULE_FIRMWARE("qat_895xcc.bin");
441 MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");