Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[cascardo/linux.git] / drivers / crypto / qat / qat_common / adf_transport.c
1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15
16   Contact Information:
17   qat-linux@intel.com
18
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/delay.h>
48 #include "adf_accel_devices.h"
49 #include "adf_transport_internal.h"
50 #include "adf_transport_access_macros.h"
51 #include "adf_cfg.h"
52 #include "adf_common_drv.h"
53
54 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
55 {
56         uint32_t div = data >> shift;
57         uint32_t mult = div << shift;
58
59         return data - mult;
60 }
61
62 static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size)
63 {
64         if (((size - 1) & addr) != 0)
65                 return -EFAULT;
66         return 0;
67 }
68
69 static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
70 {
71         int i = ADF_MIN_RING_SIZE;
72
73         for (; i <= ADF_MAX_RING_SIZE; i++)
74                 if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
75                         return i;
76
77         return ADF_DEFAULT_RING_SIZE;
78 }
79
80 static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
81 {
82         spin_lock(&bank->lock);
83         if (bank->ring_mask & (1 << ring)) {
84                 spin_unlock(&bank->lock);
85                 return -EFAULT;
86         }
87         bank->ring_mask |= (1 << ring);
88         spin_unlock(&bank->lock);
89         return 0;
90 }
91
92 static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
93 {
94         spin_lock(&bank->lock);
95         bank->ring_mask &= ~(1 << ring);
96         spin_unlock(&bank->lock);
97 }
98
99 static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
100 {
101         spin_lock_bh(&bank->lock);
102         bank->irq_mask |= (1 << ring);
103         spin_unlock_bh(&bank->lock);
104         WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
105         WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number,
106                               bank->irq_coalesc_timer);
107 }
108
109 static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
110 {
111         spin_lock_bh(&bank->lock);
112         bank->irq_mask &= ~(1 << ring);
113         spin_unlock_bh(&bank->lock);
114         WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
115 }
116
117 int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
118 {
119         if (atomic_add_return(1, ring->inflights) >
120             ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
121                 atomic_dec(ring->inflights);
122                 return -EAGAIN;
123         }
124         spin_lock_bh(&ring->lock);
125         memcpy(ring->base_addr + ring->tail, msg,
126                ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
127
128         ring->tail = adf_modulo(ring->tail +
129                                 ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
130                                 ADF_RING_SIZE_MODULO(ring->ring_size));
131         WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
132                             ring->ring_number, ring->tail);
133         spin_unlock_bh(&ring->lock);
134         return 0;
135 }
136
137 static int adf_handle_response(struct adf_etr_ring_data *ring)
138 {
139         uint32_t msg_counter = 0;
140         uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head);
141
142         while (*msg != ADF_RING_EMPTY_SIG) {
143                 ring->callback((uint32_t *)msg);
144                 *msg = ADF_RING_EMPTY_SIG;
145                 ring->head = adf_modulo(ring->head +
146                                         ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
147                                         ADF_RING_SIZE_MODULO(ring->ring_size));
148                 msg_counter++;
149                 msg = (uint32_t *)(ring->base_addr + ring->head);
150         }
151         if (msg_counter > 0) {
152                 WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
153                                     ring->bank->bank_number,
154                                     ring->ring_number, ring->head);
155                 atomic_sub(msg_counter, ring->inflights);
156         }
157         return 0;
158 }
159
160 static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
161 {
162         uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size);
163
164         WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
165                               ring->ring_number, ring_config);
166 }
167
168 static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
169 {
170         uint32_t ring_config =
171                         BUILD_RESP_RING_CONFIG(ring->ring_size,
172                                                ADF_RING_NEAR_WATERMARK_512,
173                                                ADF_RING_NEAR_WATERMARK_0);
174
175         WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
176                               ring->ring_number, ring_config);
177 }
178
179 static int adf_init_ring(struct adf_etr_ring_data *ring)
180 {
181         struct adf_etr_bank_data *bank = ring->bank;
182         struct adf_accel_dev *accel_dev = bank->accel_dev;
183         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
184         uint64_t ring_base;
185         uint32_t ring_size_bytes =
186                         ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
187
188         ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
189         ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
190                                              ring_size_bytes, &ring->dma_addr,
191                                              GFP_KERNEL);
192         if (!ring->base_addr)
193                 return -ENOMEM;
194
195         memset(ring->base_addr, 0x7F, ring_size_bytes);
196         /* The base_addr has to be aligned to the size of the buffer */
197         if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
198                 pr_err("QAT: Ring address not aligned\n");
199                 dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
200                                   ring->base_addr, ring->dma_addr);
201                 return -EFAULT;
202         }
203
204         if (hw_data->tx_rings_mask & (1 << ring->ring_number))
205                 adf_configure_tx_ring(ring);
206
207         else
208                 adf_configure_rx_ring(ring);
209
210         ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
211         WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
212                             ring->ring_number, ring_base);
213         spin_lock_init(&ring->lock);
214         return 0;
215 }
216
217 static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
218 {
219         uint32_t ring_size_bytes =
220                         ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
221         ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
222
223         if (ring->base_addr) {
224                 memset(ring->base_addr, 0x7F, ring_size_bytes);
225                 dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
226                                   ring_size_bytes, ring->base_addr,
227                                   ring->dma_addr);
228         }
229 }
230
231 int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
232                     uint32_t bank_num, uint32_t num_msgs,
233                     uint32_t msg_size, const char *ring_name,
234                     adf_callback_fn callback, int poll_mode,
235                     struct adf_etr_ring_data **ring_ptr)
236 {
237         struct adf_etr_data *transport_data = accel_dev->transport;
238         struct adf_etr_bank_data *bank;
239         struct adf_etr_ring_data *ring;
240         char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
241         uint32_t ring_num;
242         int ret;
243
244         if (bank_num >= GET_MAX_BANKS(accel_dev)) {
245                 pr_err("QAT: Invalid bank number\n");
246                 return -EFAULT;
247         }
248         if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
249                 pr_err("QAT: Invalid msg size\n");
250                 return -EFAULT;
251         }
252         if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
253                               ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
254                 pr_err("QAT: Invalid ring size for given msg size\n");
255                 return -EFAULT;
256         }
257         if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
258                 pr_err("QAT: Section %s, no such entry : %s\n",
259                        section, ring_name);
260                 return -EFAULT;
261         }
262         if (kstrtouint(val, 10, &ring_num)) {
263                 pr_err("QAT: Can't get ring number\n");
264                 return -EFAULT;
265         }
266
267         bank = &transport_data->banks[bank_num];
268         if (adf_reserve_ring(bank, ring_num)) {
269                 pr_err("QAT: Ring %d, %s already exists.\n",
270                        ring_num, ring_name);
271                 return -EFAULT;
272         }
273         ring = &bank->rings[ring_num];
274         ring->ring_number = ring_num;
275         ring->bank = bank;
276         ring->callback = callback;
277         ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
278         ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
279         ring->head = 0;
280         ring->tail = 0;
281         atomic_set(ring->inflights, 0);
282         ret = adf_init_ring(ring);
283         if (ret)
284                 goto err;
285
286         /* Enable HW arbitration for the given ring */
287         accel_dev->hw_device->hw_arb_ring_enable(ring);
288
289         if (adf_ring_debugfs_add(ring, ring_name)) {
290                 pr_err("QAT: Couldn't add ring debugfs entry\n");
291                 ret = -EFAULT;
292                 goto err;
293         }
294
295         /* Enable interrupts if needed */
296         if (callback && (!poll_mode))
297                 adf_enable_ring_irq(bank, ring->ring_number);
298         *ring_ptr = ring;
299         return 0;
300 err:
301         adf_cleanup_ring(ring);
302         adf_unreserve_ring(bank, ring_num);
303         accel_dev->hw_device->hw_arb_ring_disable(ring);
304         return ret;
305 }
306
307 void adf_remove_ring(struct adf_etr_ring_data *ring)
308 {
309         struct adf_etr_bank_data *bank = ring->bank;
310         struct adf_accel_dev *accel_dev = bank->accel_dev;
311
312         /* Disable interrupts for the given ring */
313         adf_disable_ring_irq(bank, ring->ring_number);
314
315         /* Clear PCI config space */
316         WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number,
317                               ring->ring_number, 0);
318         WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number,
319                             ring->ring_number, 0);
320         adf_ring_debugfs_rm(ring);
321         adf_unreserve_ring(bank, ring->ring_number);
322         /* Disable HW arbitration for the given ring */
323         accel_dev->hw_device->hw_arb_ring_disable(ring);
324         adf_cleanup_ring(ring);
325 }
326
327 static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
328 {
329         uint32_t empty_rings, i;
330
331         empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
332         empty_rings = ~empty_rings & bank->irq_mask;
333
334         for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) {
335                 if (empty_rings & (1 << i))
336                         adf_handle_response(&bank->rings[i]);
337         }
338 }
339
340 /**
341  * adf_response_handler() - Bottom half handler response handler
342  * @bank_addr:  Address of a ring bank for with the BH was scheduled.
343  *
344  * Function is the bottom half handler for the response from acceleration
345  * device. There is one handler for every ring bank. Function checks all
346  * communication rings in the bank.
347  * To be used by QAT device specific drivers.
348  *
349  * Return: void
350  */
351 void adf_response_handler(unsigned long bank_addr)
352 {
353         struct adf_etr_bank_data *bank = (void *)bank_addr;
354
355         /* Handle all the responses nad reenable IRQs */
356         adf_ring_response_handler(bank);
357         WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
358                                    bank->irq_mask);
359 }
360 EXPORT_SYMBOL_GPL(adf_response_handler);
361
362 static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
363                                   const char *section, const char *format,
364                                   uint32_t key, uint32_t *value)
365 {
366         char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
367         char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
368
369         snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
370
371         if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
372                 return -EFAULT;
373
374         if (kstrtouint(val_buf, 10, value))
375                 return -EFAULT;
376         return 0;
377 }
378
379 static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
380                                   const char *section,
381                                   uint32_t bank_num_in_accel)
382 {
383         if (adf_get_cfg_int(bank->accel_dev, section,
384                             ADF_ETRMGR_COALESCE_TIMER_FORMAT,
385                             bank_num_in_accel, &bank->irq_coalesc_timer))
386                 bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
387
388         if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
389             ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
390                 bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
391 }
392
393 static int adf_init_bank(struct adf_accel_dev *accel_dev,
394                          struct adf_etr_bank_data *bank,
395                          uint32_t bank_num, void __iomem *csr_addr)
396 {
397         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
398         struct adf_etr_ring_data *ring;
399         struct adf_etr_ring_data *tx_ring;
400         uint32_t i, coalesc_enabled = 0;
401
402         memset(bank, 0, sizeof(*bank));
403         bank->bank_number = bank_num;
404         bank->csr_addr = csr_addr;
405         bank->accel_dev = accel_dev;
406         spin_lock_init(&bank->lock);
407
408         /* Enable IRQ coalescing always. This will allow to use
409          * the optimised flag and coalesc register.
410          * If it is disabled in the config file just use min time value */
411         if ((adf_get_cfg_int(accel_dev, "Accelerator0",
412                              ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
413                              &coalesc_enabled) == 0) && coalesc_enabled)
414                 adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
415         else
416                 bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
417
418         for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
419                 WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
420                 WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
421                 ring = &bank->rings[i];
422                 if (hw_data->tx_rings_mask & (1 << i)) {
423                         ring->inflights =
424                                 kzalloc_node(sizeof(atomic_t),
425                                              GFP_KERNEL,
426                                              dev_to_node(&GET_DEV(accel_dev)));
427                         if (!ring->inflights)
428                                 goto err;
429                 } else {
430                         if (i < hw_data->tx_rx_gap) {
431                                 pr_err("QAT: Invalid tx rings mask config\n");
432                                 goto err;
433                         }
434                         tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
435                         ring->inflights = tx_ring->inflights;
436                 }
437         }
438         if (adf_bank_debugfs_add(bank)) {
439                 pr_err("QAT: Failed to add bank debugfs entry\n");
440                 goto err;
441         }
442
443         WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
444         return 0;
445 err:
446         for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
447                 ring = &bank->rings[i];
448                 if (hw_data->tx_rings_mask & (1 << i) && ring->inflights)
449                         kfree(ring->inflights);
450         }
451         return -ENOMEM;
452 }
453
454 /**
455  * adf_init_etr_data() - Initialize transport rings for acceleration device
456  * @accel_dev:  Pointer to acceleration device.
457  *
458  * Function is the initializes the communications channels (rings) to the
459  * acceleration device accel_dev.
460  * To be used by QAT device specific drivers.
461  *
462  * Return: 0 on success, error code othewise.
463  */
464 int adf_init_etr_data(struct adf_accel_dev *accel_dev)
465 {
466         struct adf_etr_data *etr_data;
467         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
468         void __iomem *csr_addr;
469         uint32_t size;
470         uint32_t num_banks = 0;
471         int i, ret;
472
473         etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
474                                 dev_to_node(&GET_DEV(accel_dev)));
475         if (!etr_data)
476                 return -ENOMEM;
477
478         num_banks = GET_MAX_BANKS(accel_dev);
479         size = num_banks * sizeof(struct adf_etr_bank_data);
480         etr_data->banks = kzalloc_node(size, GFP_KERNEL,
481                                        dev_to_node(&GET_DEV(accel_dev)));
482         if (!etr_data->banks) {
483                 ret = -ENOMEM;
484                 goto err_bank;
485         }
486
487         accel_dev->transport = etr_data;
488         i = hw_data->get_etr_bar_id(hw_data);
489         csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
490
491         /* accel_dev->debugfs_dir should always be non-NULL here */
492         etr_data->debug = debugfs_create_dir("transport",
493                                              accel_dev->debugfs_dir);
494         if (!etr_data->debug) {
495                 pr_err("QAT: Unable to create transport debugfs entry\n");
496                 ret = -ENOENT;
497                 goto err_bank_debug;
498         }
499
500         for (i = 0; i < num_banks; i++) {
501                 ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
502                                     csr_addr);
503                 if (ret)
504                         goto err_bank_all;
505         }
506
507         return 0;
508
509 err_bank_all:
510         debugfs_remove(etr_data->debug);
511 err_bank_debug:
512         kfree(etr_data->banks);
513 err_bank:
514         kfree(etr_data);
515         accel_dev->transport = NULL;
516         return ret;
517 }
518 EXPORT_SYMBOL_GPL(adf_init_etr_data);
519
520 static void cleanup_bank(struct adf_etr_bank_data *bank)
521 {
522         uint32_t i;
523
524         for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
525                 struct adf_accel_dev *accel_dev = bank->accel_dev;
526                 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
527                 struct adf_etr_ring_data *ring = &bank->rings[i];
528
529                 if (bank->ring_mask & (1 << i))
530                         adf_cleanup_ring(ring);
531
532                 if (hw_data->tx_rings_mask & (1 << i))
533                         kfree(ring->inflights);
534         }
535         adf_bank_debugfs_rm(bank);
536         memset(bank, 0, sizeof(*bank));
537 }
538
539 static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
540 {
541         struct adf_etr_data *etr_data = accel_dev->transport;
542         uint32_t i, num_banks = GET_MAX_BANKS(accel_dev);
543
544         for (i = 0; i < num_banks; i++)
545                 cleanup_bank(&etr_data->banks[i]);
546 }
547
548 /**
549  * adf_cleanup_etr_data() - Clear transport rings for acceleration device
550  * @accel_dev:  Pointer to acceleration device.
551  *
552  * Function is the clears the communications channels (rings) of the
553  * acceleration device accel_dev.
554  * To be used by QAT device specific drivers.
555  *
556  * Return: void
557  */
558 void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
559 {
560         struct adf_etr_data *etr_data = accel_dev->transport;
561
562         if (etr_data) {
563                 adf_cleanup_etr_handles(accel_dev);
564                 debugfs_remove(etr_data->debug);
565                 kfree(etr_data->banks);
566                 kfree(etr_data);
567                 accel_dev->transport = NULL;
568         }
569 }
570 EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);