2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/kthread.h>
16 #include <linux/sched.h>
17 #include <linux/interrupt.h>
18 #include <linux/spinlock.h>
19 #include <linux/mutex.h>
20 #include <linux/delay.h>
21 #include <linux/hw_random.h>
22 #include <linux/cpu.h>
24 #include <asm/cpu_device_id.h>
26 #include <linux/ccp.h>
30 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
31 MODULE_LICENSE("GPL");
32 MODULE_VERSION("1.0.0");
33 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
35 struct ccp_tasklet_data {
36 struct completion completion;
41 static struct ccp_device *ccp_dev;
42 static inline struct ccp_device *ccp_get_device(void)
47 static inline void ccp_add_device(struct ccp_device *ccp)
52 static inline void ccp_del_device(struct ccp_device *ccp)
58 * ccp_enqueue_cmd - queue an operation for processing by the CCP
60 * @cmd: ccp_cmd struct to be processed
62 * Queue a cmd to be processed by the CCP. If queueing the cmd
63 * would exceed the defined length of the cmd queue the cmd will
64 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
65 * result in a return code of -EBUSY.
67 * The callback routine specified in the ccp_cmd struct will be
68 * called to notify the caller of completion (if the cmd was not
69 * backlogged) or advancement out of the backlog. If the cmd has
70 * advanced out of the backlog the "err" value of the callback
71 * will be -EINPROGRESS. Any other "err" value during callback is
72 * the result of the operation.
74 * The cmd has been successfully queued if:
75 * the return code is -EINPROGRESS or
76 * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
78 int ccp_enqueue_cmd(struct ccp_cmd *cmd)
80 struct ccp_device *ccp = ccp_get_device();
88 /* Caller must supply a callback routine */
94 spin_lock_irqsave(&ccp->cmd_lock, flags);
98 if (ccp->cmd_count >= MAX_CMD_QLEN) {
100 if (cmd->flags & CCP_CMD_MAY_BACKLOG)
101 list_add_tail(&cmd->entry, &ccp->backlog);
105 list_add_tail(&cmd->entry, &ccp->cmd);
107 /* Find an idle queue */
108 if (!ccp->suspending) {
109 for (i = 0; i < ccp->cmd_q_count; i++) {
110 if (ccp->cmd_q[i].active)
118 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
120 /* If we found an idle queue, wake it up */
121 if (i < ccp->cmd_q_count)
122 wake_up_process(ccp->cmd_q[i].kthread);
126 EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
128 static void ccp_do_cmd_backlog(struct work_struct *work)
130 struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
131 struct ccp_device *ccp = cmd->ccp;
135 cmd->callback(cmd->data, -EINPROGRESS);
137 spin_lock_irqsave(&ccp->cmd_lock, flags);
140 list_add_tail(&cmd->entry, &ccp->cmd);
142 /* Find an idle queue */
143 for (i = 0; i < ccp->cmd_q_count; i++) {
144 if (ccp->cmd_q[i].active)
150 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
152 /* If we found an idle queue, wake it up */
153 if (i < ccp->cmd_q_count)
154 wake_up_process(ccp->cmd_q[i].kthread);
157 static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
159 struct ccp_device *ccp = cmd_q->ccp;
160 struct ccp_cmd *cmd = NULL;
161 struct ccp_cmd *backlog = NULL;
164 spin_lock_irqsave(&ccp->cmd_lock, flags);
168 if (ccp->suspending) {
169 cmd_q->suspended = 1;
171 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
172 wake_up_interruptible(&ccp->suspend_queue);
177 if (ccp->cmd_count) {
180 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
181 list_del(&cmd->entry);
186 if (!list_empty(&ccp->backlog)) {
187 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
189 list_del(&backlog->entry);
192 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
195 INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
196 schedule_work(&backlog->work);
202 static void ccp_do_cmd_complete(unsigned long data)
204 struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
205 struct ccp_cmd *cmd = tdata->cmd;
207 cmd->callback(cmd->data, cmd->ret);
208 complete(&tdata->completion);
211 static int ccp_cmd_queue_thread(void *data)
213 struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
215 struct ccp_tasklet_data tdata;
216 struct tasklet_struct tasklet;
218 tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
220 set_current_state(TASK_INTERRUPTIBLE);
221 while (!kthread_should_stop()) {
224 set_current_state(TASK_INTERRUPTIBLE);
226 cmd = ccp_dequeue_cmd(cmd_q);
230 __set_current_state(TASK_RUNNING);
232 /* Execute the command */
233 cmd->ret = ccp_run_cmd(cmd_q, cmd);
235 /* Schedule the completion callback */
237 init_completion(&tdata.completion);
238 tasklet_schedule(&tasklet);
239 wait_for_completion(&tdata.completion);
242 __set_current_state(TASK_RUNNING);
247 static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
249 struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
251 int len = min_t(int, sizeof(trng_value), max);
254 * Locking is provided by the caller so we can update device
255 * hwrng-related fields safely
257 trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
259 /* Zero is returned if not data is available or if a
260 * bad-entropy error is present. Assume an error if
261 * we exceed TRNG_RETRIES reads of zero.
263 if (ccp->hwrng_retries++ > TRNG_RETRIES)
269 /* Reset the counter and save the rng value */
270 ccp->hwrng_retries = 0;
271 memcpy(data, &trng_value, len);
277 * ccp_alloc_struct - allocate and initialize the ccp_device struct
279 * @dev: device struct of the CCP
281 struct ccp_device *ccp_alloc_struct(struct device *dev)
283 struct ccp_device *ccp;
285 ccp = kzalloc(sizeof(*ccp), GFP_KERNEL);
287 dev_err(dev, "unable to allocate device struct\n");
292 INIT_LIST_HEAD(&ccp->cmd);
293 INIT_LIST_HEAD(&ccp->backlog);
295 spin_lock_init(&ccp->cmd_lock);
296 mutex_init(&ccp->req_mutex);
297 mutex_init(&ccp->ksb_mutex);
298 ccp->ksb_count = KSB_COUNT;
305 * ccp_init - initialize the CCP device
307 * @ccp: ccp_device struct
309 int ccp_init(struct ccp_device *ccp)
311 struct device *dev = ccp->dev;
312 struct ccp_cmd_queue *cmd_q;
313 struct dma_pool *dma_pool;
314 char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
315 unsigned int qmr, qim, i;
318 /* Find available queues */
320 qmr = ioread32(ccp->io_regs + Q_MASK_REG);
321 for (i = 0; i < MAX_HW_QUEUES; i++) {
322 if (!(qmr & (1 << i)))
325 /* Allocate a dma pool for this queue */
326 snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i);
327 dma_pool = dma_pool_create(dma_pool_name, dev,
328 CCP_DMAPOOL_MAX_SIZE,
329 CCP_DMAPOOL_ALIGN, 0);
331 dev_err(dev, "unable to allocate dma pool\n");
336 cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
341 cmd_q->dma_pool = dma_pool;
343 /* Reserve 2 KSB regions for the queue */
344 cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
345 cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
348 /* Preset some register values and masks that are queue
351 cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
352 (CMD_Q_STATUS_INCR * i);
353 cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
354 (CMD_Q_STATUS_INCR * i);
355 cmd_q->int_ok = 1 << (i * 2);
356 cmd_q->int_err = 1 << ((i * 2) + 1);
358 cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
360 init_waitqueue_head(&cmd_q->int_queue);
362 /* Build queue interrupt mask (two interrupts per queue) */
363 qim |= cmd_q->int_ok | cmd_q->int_err;
366 /* For arm64 set the recommended queue cache settings */
367 iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
368 (CMD_Q_CACHE_INC * i));
371 dev_dbg(dev, "queue #%u available\n", i);
373 if (ccp->cmd_q_count == 0) {
374 dev_notice(dev, "no command queues available\n");
378 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
380 /* Disable and clear interrupts until ready */
381 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
382 for (i = 0; i < ccp->cmd_q_count; i++) {
383 cmd_q = &ccp->cmd_q[i];
385 ioread32(cmd_q->reg_int_status);
386 ioread32(cmd_q->reg_status);
388 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
391 ret = ccp->get_irq(ccp);
393 dev_err(dev, "unable to allocate an IRQ\n");
397 /* Initialize the queues used to wait for KSB space and suspend */
398 init_waitqueue_head(&ccp->ksb_queue);
399 init_waitqueue_head(&ccp->suspend_queue);
401 /* Create a kthread for each queue */
402 for (i = 0; i < ccp->cmd_q_count; i++) {
403 struct task_struct *kthread;
405 cmd_q = &ccp->cmd_q[i];
407 kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
408 "ccp-q%u", cmd_q->id);
409 if (IS_ERR(kthread)) {
410 dev_err(dev, "error creating queue thread (%ld)\n",
412 ret = PTR_ERR(kthread);
416 cmd_q->kthread = kthread;
417 wake_up_process(kthread);
420 /* Register the RNG */
421 ccp->hwrng.name = "ccp-rng";
422 ccp->hwrng.read = ccp_trng_read;
423 ret = hwrng_register(&ccp->hwrng);
425 dev_err(dev, "error registering hwrng (%d)\n", ret);
429 /* Make the device struct available before enabling interrupts */
432 /* Enable interrupts */
433 iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
438 for (i = 0; i < ccp->cmd_q_count; i++)
439 if (ccp->cmd_q[i].kthread)
440 kthread_stop(ccp->cmd_q[i].kthread);
445 for (i = 0; i < ccp->cmd_q_count; i++)
446 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
452 * ccp_destroy - tear down the CCP device
454 * @ccp: ccp_device struct
456 void ccp_destroy(struct ccp_device *ccp)
458 struct ccp_cmd_queue *cmd_q;
462 /* Remove general access to the device struct */
465 /* Unregister the RNG */
466 hwrng_unregister(&ccp->hwrng);
468 /* Stop the queue kthreads */
469 for (i = 0; i < ccp->cmd_q_count; i++)
470 if (ccp->cmd_q[i].kthread)
471 kthread_stop(ccp->cmd_q[i].kthread);
473 /* Build queue interrupt mask (two interrupt masks per queue) */
475 for (i = 0; i < ccp->cmd_q_count; i++) {
476 cmd_q = &ccp->cmd_q[i];
477 qim |= cmd_q->int_ok | cmd_q->int_err;
480 /* Disable and clear interrupts */
481 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
482 for (i = 0; i < ccp->cmd_q_count; i++) {
483 cmd_q = &ccp->cmd_q[i];
485 ioread32(cmd_q->reg_int_status);
486 ioread32(cmd_q->reg_status);
488 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
492 for (i = 0; i < ccp->cmd_q_count; i++)
493 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
495 /* Flush the cmd and backlog queue */
496 while (!list_empty(&ccp->cmd)) {
497 /* Invoke the callback directly with an error code */
498 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
499 list_del(&cmd->entry);
500 cmd->callback(cmd->data, -ENODEV);
502 while (!list_empty(&ccp->backlog)) {
503 /* Invoke the callback directly with an error code */
504 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
505 list_del(&cmd->entry);
506 cmd->callback(cmd->data, -ENODEV);
511 * ccp_irq_handler - handle interrupts generated by the CCP device
513 * @irq: the irq associated with the interrupt
514 * @data: the data value supplied when the irq was created
516 irqreturn_t ccp_irq_handler(int irq, void *data)
518 struct device *dev = data;
519 struct ccp_device *ccp = dev_get_drvdata(dev);
520 struct ccp_cmd_queue *cmd_q;
524 status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
526 for (i = 0; i < ccp->cmd_q_count; i++) {
527 cmd_q = &ccp->cmd_q[i];
529 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
531 cmd_q->int_status = status;
532 cmd_q->q_status = ioread32(cmd_q->reg_status);
533 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
535 /* On error, only save the first error value */
536 if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
537 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
541 /* Acknowledge the interrupt and wake the kthread */
542 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
543 wake_up_interruptible(&cmd_q->int_queue);
551 bool ccp_queues_suspended(struct ccp_device *ccp)
553 unsigned int suspended = 0;
557 spin_lock_irqsave(&ccp->cmd_lock, flags);
559 for (i = 0; i < ccp->cmd_q_count; i++)
560 if (ccp->cmd_q[i].suspended)
563 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
565 return ccp->cmd_q_count == suspended;
570 static const struct x86_cpu_id ccp_support[] = {
571 { X86_VENDOR_AMD, 22, },
575 static int __init ccp_mod_init(void)
578 struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
581 if (!x86_match_cpu(ccp_support))
584 switch (cpuinfo->x86) {
586 if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63))
589 ret = ccp_pci_init();
593 /* Don't leave the driver loaded if init failed */
594 if (!ccp_get_device()) {
608 ret = ccp_platform_init();
612 /* Don't leave the driver loaded if init failed */
613 if (!ccp_get_device()) {
624 static void __exit ccp_mod_exit(void)
627 struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
629 switch (cpuinfo->x86) {
641 module_init(ccp_mod_init);
642 module_exit(ccp_mod_exit);