2 * ACPI AML interfacing support
4 * Copyright (C) 2015, Intel Corporation
5 * Authors: Lv Zheng <lv.zheng@intel.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) "ACPI : AML: " fmt
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/wait.h>
18 #include <linux/poll.h>
19 #include <linux/sched.h>
20 #include <linux/kthread.h>
21 #include <linux/proc_fs.h>
22 #include <linux/debugfs.h>
23 #include <linux/circ_buf.h>
24 #include <linux/acpi_dbg.h>
27 #define ACPI_AML_BUF_ALIGN (sizeof (acpi_size))
28 #define ACPI_AML_BUF_SIZE PAGE_SIZE
30 #define circ_count(circ) \
31 (CIRC_CNT((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
32 #define circ_count_to_end(circ) \
33 (CIRC_CNT_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
34 #define circ_space(circ) \
35 (CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
36 #define circ_space_to_end(circ) \
37 (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
39 #define ACPI_AML_OPENED 0x0001
40 #define ACPI_AML_CLOSED 0x0002
41 #define ACPI_AML_IN_USER 0x0004 /* user space is writing cmd */
42 #define ACPI_AML_IN_KERN 0x0008 /* kernel space is reading cmd */
43 #define ACPI_AML_OUT_USER 0x0010 /* user space is reading log */
44 #define ACPI_AML_OUT_KERN 0x0020 /* kernel space is writing log */
45 #define ACPI_AML_USER (ACPI_AML_IN_USER | ACPI_AML_OUT_USER)
46 #define ACPI_AML_KERN (ACPI_AML_IN_KERN | ACPI_AML_OUT_KERN)
47 #define ACPI_AML_BUSY (ACPI_AML_USER | ACPI_AML_KERN)
48 #define ACPI_AML_OPEN (ACPI_AML_OPENED | ACPI_AML_CLOSED)
51 wait_queue_head_t wait;
55 struct task_struct *thread;
56 char out_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
57 struct circ_buf out_crc;
58 char in_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
59 struct circ_buf in_crc;
60 acpi_osd_exec_callback function;
65 static struct acpi_aml_io acpi_aml_io;
66 static bool acpi_aml_initialized;
67 static struct file *acpi_aml_active_reader;
68 static struct dentry *acpi_aml_dentry;
70 static inline bool __acpi_aml_running(void)
72 return acpi_aml_io.thread ? true : false;
75 static inline bool __acpi_aml_access_ok(unsigned long flag)
78 * The debugger interface is in opened state (OPENED && !CLOSED),
79 * then it is allowed to access the debugger buffers from either
80 * user space or the kernel space.
81 * In addition, for the kernel space, only the debugger thread
82 * (thread ID matched) is allowed to access.
84 if (!(acpi_aml_io.flags & ACPI_AML_OPENED) ||
85 (acpi_aml_io.flags & ACPI_AML_CLOSED) ||
86 !__acpi_aml_running())
88 if ((flag & ACPI_AML_KERN) &&
89 current != acpi_aml_io.thread)
94 static inline bool __acpi_aml_readable(struct circ_buf *circ, unsigned long flag)
97 * Another read is not in progress and there is data in buffer
100 if (!(acpi_aml_io.flags & flag) && circ_count(circ))
105 static inline bool __acpi_aml_writable(struct circ_buf *circ, unsigned long flag)
108 * Another write is not in progress and there is buffer space
109 * available for write.
111 if (!(acpi_aml_io.flags & flag) && circ_space(circ))
116 static inline bool __acpi_aml_busy(void)
118 if (acpi_aml_io.flags & ACPI_AML_BUSY)
123 static inline bool __acpi_aml_opened(void)
125 if (acpi_aml_io.flags & ACPI_AML_OPEN)
130 static inline bool __acpi_aml_used(void)
132 return acpi_aml_io.usages ? true : false;
135 static inline bool acpi_aml_running(void)
139 mutex_lock(&acpi_aml_io.lock);
140 ret = __acpi_aml_running();
141 mutex_unlock(&acpi_aml_io.lock);
145 static bool acpi_aml_busy(void)
149 mutex_lock(&acpi_aml_io.lock);
150 ret = __acpi_aml_busy();
151 mutex_unlock(&acpi_aml_io.lock);
155 static bool acpi_aml_used(void)
160 * The usage count is prepared to avoid race conditions between the
161 * starts and the stops of the debugger thread.
163 mutex_lock(&acpi_aml_io.lock);
164 ret = __acpi_aml_used();
165 mutex_unlock(&acpi_aml_io.lock);
169 static bool acpi_aml_kern_readable(void)
173 mutex_lock(&acpi_aml_io.lock);
174 ret = !__acpi_aml_access_ok(ACPI_AML_IN_KERN) ||
175 __acpi_aml_readable(&acpi_aml_io.in_crc, ACPI_AML_IN_KERN);
176 mutex_unlock(&acpi_aml_io.lock);
180 static bool acpi_aml_kern_writable(void)
184 mutex_lock(&acpi_aml_io.lock);
185 ret = !__acpi_aml_access_ok(ACPI_AML_OUT_KERN) ||
186 __acpi_aml_writable(&acpi_aml_io.out_crc, ACPI_AML_OUT_KERN);
187 mutex_unlock(&acpi_aml_io.lock);
191 static bool acpi_aml_user_readable(void)
195 mutex_lock(&acpi_aml_io.lock);
196 ret = !__acpi_aml_access_ok(ACPI_AML_OUT_USER) ||
197 __acpi_aml_readable(&acpi_aml_io.out_crc, ACPI_AML_OUT_USER);
198 mutex_unlock(&acpi_aml_io.lock);
202 static bool acpi_aml_user_writable(void)
206 mutex_lock(&acpi_aml_io.lock);
207 ret = !__acpi_aml_access_ok(ACPI_AML_IN_USER) ||
208 __acpi_aml_writable(&acpi_aml_io.in_crc, ACPI_AML_IN_USER);
209 mutex_unlock(&acpi_aml_io.lock);
213 static int acpi_aml_lock_write(struct circ_buf *circ, unsigned long flag)
217 mutex_lock(&acpi_aml_io.lock);
218 if (!__acpi_aml_access_ok(flag)) {
222 if (!__acpi_aml_writable(circ, flag)) {
226 acpi_aml_io.flags |= flag;
228 mutex_unlock(&acpi_aml_io.lock);
232 static int acpi_aml_lock_read(struct circ_buf *circ, unsigned long flag)
236 mutex_lock(&acpi_aml_io.lock);
237 if (!__acpi_aml_access_ok(flag)) {
241 if (!__acpi_aml_readable(circ, flag)) {
245 acpi_aml_io.flags |= flag;
247 mutex_unlock(&acpi_aml_io.lock);
251 static void acpi_aml_unlock_fifo(unsigned long flag, bool wakeup)
253 mutex_lock(&acpi_aml_io.lock);
254 acpi_aml_io.flags &= ~flag;
256 wake_up_interruptible(&acpi_aml_io.wait);
257 mutex_unlock(&acpi_aml_io.lock);
260 static int acpi_aml_write_kern(const char *buf, int len)
263 struct circ_buf *crc = &acpi_aml_io.out_crc;
267 ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN);
268 if (IS_ERR_VALUE(ret))
270 /* sync tail before inserting logs */
272 p = &crc->buf[crc->head];
273 n = min(len, circ_space_to_end(crc));
275 /* sync head after inserting logs */
277 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
278 acpi_aml_unlock_fifo(ACPI_AML_OUT_KERN, true);
282 static int acpi_aml_readb_kern(void)
285 struct circ_buf *crc = &acpi_aml_io.in_crc;
288 ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN);
289 if (IS_ERR_VALUE(ret))
291 /* sync head before removing cmds */
293 p = &crc->buf[crc->tail];
295 /* sync tail before inserting cmds */
297 crc->tail = (crc->tail + 1) & (ACPI_AML_BUF_SIZE - 1);
298 acpi_aml_unlock_fifo(ACPI_AML_IN_KERN, true);
303 * acpi_aml_write_log() - Capture debugger output
304 * @msg: the debugger output
306 * This function should be used to implement acpi_os_printf() to filter out
307 * the debugger output and store the output into the debugger interface
308 * buffer. Return the size of stored logs or errno.
310 ssize_t acpi_aml_write_log(const char *msg)
313 int count = 0, size = 0;
315 if (!acpi_aml_initialized)
321 ret = acpi_aml_write_kern(msg + size, count);
322 if (ret == -EAGAIN) {
323 ret = wait_event_interruptible(acpi_aml_io.wait,
324 acpi_aml_kern_writable());
326 * We need to retry when the condition
333 if (IS_ERR_VALUE(ret))
338 return size > 0 ? size : ret;
340 EXPORT_SYMBOL(acpi_aml_write_log);
343 * acpi_aml_read_cmd() - Capture debugger input
344 * @msg: the debugger input
345 * @size: the size of the debugger input
347 * This function should be used to implement acpi_os_get_line() to capture
348 * the debugger input commands and store the input commands into the
349 * debugger interface buffer. Return the size of stored commands or errno.
351 ssize_t acpi_aml_read_cmd(char *msg, size_t count)
357 * This is ensured by the running fact of the debugger thread
358 * unless a bug is introduced.
360 BUG_ON(!acpi_aml_initialized);
364 * Check each input byte to find the end of the command.
366 ret = acpi_aml_readb_kern();
367 if (ret == -EAGAIN) {
368 ret = wait_event_interruptible(acpi_aml_io.wait,
369 acpi_aml_kern_readable());
371 * We need to retry when the condition becomes
377 if (IS_ERR_VALUE(ret))
379 *(msg + size) = (char)ret;
384 * acpi_os_get_line() requires a zero terminated command
387 *(msg + size - 1) = '\0';
391 return size > 0 ? size : ret;
393 EXPORT_SYMBOL(acpi_aml_read_cmd);
395 static int acpi_aml_thread(void *unsed)
397 acpi_osd_exec_callback function = NULL;
400 mutex_lock(&acpi_aml_io.lock);
401 if (acpi_aml_io.function) {
402 acpi_aml_io.usages++;
403 function = acpi_aml_io.function;
404 context = acpi_aml_io.context;
406 mutex_unlock(&acpi_aml_io.lock);
411 mutex_lock(&acpi_aml_io.lock);
412 acpi_aml_io.usages--;
413 if (!__acpi_aml_used()) {
414 acpi_aml_io.thread = NULL;
415 wake_up(&acpi_aml_io.wait);
417 mutex_unlock(&acpi_aml_io.lock);
423 * acpi_aml_create_thread() - Create AML debugger thread
424 * @function: the debugger thread callback
425 * @context: the context to be passed to the debugger thread
427 * This function should be used to implement acpi_os_execute() which is
428 * used by the ACPICA debugger to create the debugger thread.
430 int acpi_aml_create_thread(acpi_osd_exec_callback function, void *context)
432 struct task_struct *t;
434 mutex_lock(&acpi_aml_io.lock);
435 acpi_aml_io.function = function;
436 acpi_aml_io.context = context;
437 mutex_unlock(&acpi_aml_io.lock);
439 t = kthread_create(acpi_aml_thread, NULL, "aml");
441 pr_err("Failed to create AML debugger thread.\n");
445 mutex_lock(&acpi_aml_io.lock);
446 acpi_aml_io.thread = t;
447 acpi_set_debugger_thread_id((acpi_thread_id)(unsigned long)t);
449 mutex_unlock(&acpi_aml_io.lock);
452 EXPORT_SYMBOL(acpi_aml_create_thread);
454 int acpi_aml_wait_command_ready(void)
458 if (!acpi_gbl_method_executing)
459 acpi_os_printf("\n%1c ", ACPI_DEBUGGER_COMMAND_PROMPT);
461 acpi_os_printf("\n%1c ", ACPI_DEBUGGER_EXECUTE_PROMPT);
463 status = acpi_os_get_line(acpi_gbl_db_line_buf,
464 ACPI_DB_LINE_BUFFER_SIZE, NULL);
465 if (ACPI_FAILURE(status))
469 EXPORT_SYMBOL(acpi_aml_wait_command_ready);
471 int acpi_aml_notify_command_complete(void)
475 EXPORT_SYMBOL(acpi_aml_notify_command_complete);
477 static int acpi_aml_open(struct inode *inode, struct file *file)
482 mutex_lock(&acpi_aml_io.lock);
484 * The debugger interface is being closed, no new user is allowed
485 * during this period.
487 if (acpi_aml_io.flags & ACPI_AML_CLOSED) {
491 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
493 * Only one reader is allowed to initiate the debugger
496 if (acpi_aml_active_reader) {
500 pr_debug("Opening debugger reader.\n");
501 acpi_aml_active_reader = file;
505 * No writer is allowed unless the debugger thread is
508 if (!(acpi_aml_io.flags & ACPI_AML_OPENED)) {
513 if (acpi_aml_active_reader == file) {
514 pr_debug("Opening debugger interface.\n");
515 mutex_unlock(&acpi_aml_io.lock);
517 pr_debug("Initializing debugger thread.\n");
518 status = acpi_initialize_debugger();
519 if (ACPI_FAILURE(status)) {
520 pr_err("Failed to initialize debugger.\n");
524 acpi_aml_io.flags |= ACPI_AML_OPENED;
525 pr_debug("Debugger thread initialized.\n");
527 mutex_lock(&acpi_aml_io.lock);
528 acpi_aml_io.out_crc.head = acpi_aml_io.out_crc.tail = 0;
529 acpi_aml_io.in_crc.head = acpi_aml_io.in_crc.tail = 0;
530 pr_debug("Debugger interface opened.\n");
534 if (IS_ERR_VALUE(ret)) {
535 if (acpi_aml_active_reader == file)
536 acpi_aml_active_reader = NULL;
538 mutex_unlock(&acpi_aml_io.lock);
542 static int acpi_aml_release(struct inode *inode, struct file *file)
544 mutex_lock(&acpi_aml_io.lock);
546 if (file == acpi_aml_active_reader) {
547 pr_debug("Closing debugger reader.\n");
548 acpi_aml_active_reader = NULL;
550 pr_debug("Closing debugger interface.\n");
551 acpi_aml_io.flags |= ACPI_AML_CLOSED;
554 * Wake up all user space/kernel space blocked
557 wake_up_interruptible(&acpi_aml_io.wait);
558 mutex_unlock(&acpi_aml_io.lock);
560 * Wait all user space/kernel space readers/writers to
561 * stop so that ACPICA command loop of the debugger thread
562 * should fail all its command line reads after this point.
564 wait_event(acpi_aml_io.wait, !acpi_aml_busy());
567 * Then we try to terminate the debugger thread if it is
570 pr_debug("Terminating debugger thread.\n");
571 acpi_terminate_debugger();
572 wait_event(acpi_aml_io.wait, !acpi_aml_used());
573 pr_debug("Debugger thread terminated.\n");
575 mutex_lock(&acpi_aml_io.lock);
576 acpi_aml_io.flags &= ~ACPI_AML_OPENED;
578 if (acpi_aml_io.users == 0) {
579 pr_debug("Debugger interface closed.\n");
580 acpi_aml_io.flags &= ~ACPI_AML_CLOSED;
582 mutex_unlock(&acpi_aml_io.lock);
586 static int acpi_aml_read_user(char __user *buf, int len)
589 struct circ_buf *crc = &acpi_aml_io.out_crc;
593 ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
594 if (IS_ERR_VALUE(ret))
596 /* sync head before removing logs */
598 p = &crc->buf[crc->tail];
599 n = min(len, circ_count_to_end(crc));
600 ret = copy_to_user(buf, p, n);
601 if (IS_ERR_VALUE(ret))
603 /* sync tail after removing logs */
605 crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
608 acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !IS_ERR_VALUE(ret));
612 static ssize_t acpi_aml_read(struct file *file, char __user *buf,
613 size_t count, loff_t *ppos)
618 if (!buf || count < 0)
622 if (!access_ok(VERIFY_WRITE, buf, count))
627 ret = acpi_aml_read_user(buf + size, count);
628 if (ret == -EAGAIN) {
629 if (file->f_flags & O_NONBLOCK)
632 ret = wait_event_interruptible(acpi_aml_io.wait,
633 acpi_aml_user_readable());
635 * We need to retry when the condition
642 if (IS_ERR_VALUE(ret)) {
643 if (!acpi_aml_running())
654 return size > 0 ? size : ret;
657 static int acpi_aml_write_user(const char __user *buf, int len)
660 struct circ_buf *crc = &acpi_aml_io.in_crc;
664 ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
665 if (IS_ERR_VALUE(ret))
667 /* sync tail before inserting cmds */
669 p = &crc->buf[crc->head];
670 n = min(len, circ_space_to_end(crc));
671 ret = copy_from_user(p, buf, n);
672 if (IS_ERR_VALUE(ret))
674 /* sync head after inserting cmds */
676 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
679 acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !IS_ERR_VALUE(ret));
683 static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
684 size_t count, loff_t *ppos)
689 if (!buf || count < 0)
693 if (!access_ok(VERIFY_READ, buf, count))
698 ret = acpi_aml_write_user(buf + size, count);
699 if (ret == -EAGAIN) {
700 if (file->f_flags & O_NONBLOCK)
703 ret = wait_event_interruptible(acpi_aml_io.wait,
704 acpi_aml_user_writable());
706 * We need to retry when the condition
713 if (IS_ERR_VALUE(ret)) {
714 if (!acpi_aml_running())
724 return size > 0 ? size : ret;
727 static unsigned int acpi_aml_poll(struct file *file, poll_table *wait)
731 poll_wait(file, &acpi_aml_io.wait, wait);
732 if (acpi_aml_user_readable())
733 masks |= POLLIN | POLLRDNORM;
734 if (acpi_aml_user_writable())
735 masks |= POLLOUT | POLLWRNORM;
740 static const struct file_operations acpi_aml_operations = {
741 .read = acpi_aml_read,
742 .write = acpi_aml_write,
743 .poll = acpi_aml_poll,
744 .open = acpi_aml_open,
745 .release = acpi_aml_release,
746 .llseek = generic_file_llseek,
749 int __init acpi_aml_init(void)
751 if (!acpi_debugfs_dir)
753 /* Initialize AML IO interface */
754 mutex_init(&acpi_aml_io.lock);
755 init_waitqueue_head(&acpi_aml_io.wait);
756 acpi_aml_io.out_crc.buf = acpi_aml_io.out_buf;
757 acpi_aml_io.in_crc.buf = acpi_aml_io.in_buf;
758 acpi_aml_dentry = debugfs_create_file("acpidbg",
759 S_IFREG | S_IRUGO | S_IWUSR,
760 acpi_debugfs_dir, NULL,
761 &acpi_aml_operations);
762 if (acpi_aml_dentry == NULL)
764 acpi_aml_initialized = true;
769 void __exit acpi_aml_exit(void)
771 /* TODO: Stop the in kernel debugger */
773 debugfs_remove(acpi_aml_dentry);
774 acpi_aml_initialized = false;
777 module_init(acpi_aml_init);
778 module_exit(acpi_aml_exit);