2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/types.h>
18 #include <linux/clk.h>
19 #include <linux/pm_runtime.h>
20 #include <plat/map-base.h>
23 #include <drm/exynos_drm.h>
24 #include "exynos_drm_drv.h"
25 #include "exynos_drm_gem.h"
26 #include "exynos_drm_ipp.h"
27 #include "exynos_drm_iommu.h"
30 * IPP stands for Image Post Processing and
31 * supports image scaler/rotator and input/output DMA operations.
32 * using FIMC, GSC, Rotator, so on.
33 * IPP is integration device driver of same attribute h/w
38 * 1. expand command control id.
39 * 2. integrate property and config.
40 * 3. removed send_event id check routine.
41 * 4. compare send_event id if needed.
42 * 5. free subdrv_remove notifier callback list if needed.
43 * 6. need to check subdrv_open about multi-open.
44 * 7. need to power_on implement power and sysmmu ctrl.
47 #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
48 #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
50 /* platform device pointer for ipp device. */
51 static struct platform_device *exynos_drm_ipp_pdev;
54 * A structure of event.
56 * @base: base of event.
59 struct drm_exynos_ipp_send_event {
60 struct drm_pending_event base;
61 struct drm_exynos_ipp_event event;
65 * A structure of memory node.
67 * @list: list head to memory queue information.
68 * @ops_id: id of operations.
69 * @prop_id: id of property.
70 * @buf_id: id of buffer.
71 * @buf_info: gem objects and dma address, size.
72 * @filp: a pointer to drm_file.
74 struct drm_exynos_ipp_mem_node {
75 struct list_head list;
76 enum drm_exynos_ops_id ops_id;
79 struct drm_exynos_ipp_buf_info buf_info;
80 struct drm_file *filp;
84 * A structure of ipp context.
86 * @subdrv: prepare initialization using subdrv.
87 * @ipp_lock: lock for synchronization of access to ipp_idr.
88 * @prop_lock: lock for synchronization of access to prop_idr.
89 * @ipp_idr: ipp driver idr.
90 * @prop_idr: property idr.
91 * @event_workq: event work queue.
92 * @cmd_workq: command work queue.
95 struct exynos_drm_subdrv subdrv;
96 struct mutex ipp_lock;
97 struct mutex prop_lock;
100 struct workqueue_struct *event_workq;
101 struct workqueue_struct *cmd_workq;
104 static LIST_HEAD(exynos_drm_ippdrv_list);
105 static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
106 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
108 int exynos_platform_device_ipp_register(void)
110 struct platform_device *pdev;
112 if (exynos_drm_ipp_pdev)
115 pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
117 return PTR_ERR(pdev);
119 exynos_drm_ipp_pdev = pdev;
124 void exynos_platform_device_ipp_unregister(void)
126 if (exynos_drm_ipp_pdev) {
127 platform_device_unregister(exynos_drm_ipp_pdev);
128 exynos_drm_ipp_pdev = NULL;
132 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
137 mutex_lock(&exynos_drm_ippdrv_lock);
138 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
139 mutex_unlock(&exynos_drm_ippdrv_lock);
144 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
149 mutex_lock(&exynos_drm_ippdrv_lock);
150 list_del(&ippdrv->drv_list);
151 mutex_unlock(&exynos_drm_ippdrv_lock);
156 static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
161 /* do the allocation under our mutexlock */
163 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
172 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
176 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
180 /* find object using handle */
181 obj = idr_find(id_idr, id);
183 DRM_ERROR("failed to find object.\n");
185 return ERR_PTR(-ENODEV);
193 static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
194 enum drm_exynos_ipp_cmd cmd)
197 * check dedicated flag and WB, OUTPUT operation with
200 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
201 !pm_runtime_suspended(ippdrv->dev)))
207 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
208 struct drm_exynos_ipp_property *property)
210 struct exynos_drm_ippdrv *ippdrv;
211 u32 ipp_id = property->ipp_id;
213 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
216 /* find ipp driver using idr */
217 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
219 if (IS_ERR(ippdrv)) {
220 DRM_ERROR("not found ipp%d driver.\n", ipp_id);
225 * WB, OUTPUT opertion not supported multi-operation.
226 * so, make dedicated state at set property ioctl.
227 * when ipp driver finished operations, clear dedicated flags.
229 if (ipp_check_dedicated(ippdrv, property->cmd)) {
230 DRM_ERROR("already used choose device.\n");
231 return ERR_PTR(-EBUSY);
235 * This is necessary to find correct device in ipp drivers.
236 * ipp drivers have different abilities,
237 * so need to check property.
239 if (ippdrv->check_property &&
240 ippdrv->check_property(ippdrv->dev, property)) {
241 DRM_ERROR("not support property.\n");
242 return ERR_PTR(-EINVAL);
248 * This case is search all ipp driver for finding.
249 * user application don't set ipp_id in this case,
250 * so ipp subsystem search correct driver in driver list.
252 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
253 if (ipp_check_dedicated(ippdrv, property->cmd)) {
254 DRM_DEBUG_KMS("%s:used device.\n", __func__);
258 if (ippdrv->check_property &&
259 ippdrv->check_property(ippdrv->dev, property)) {
260 DRM_DEBUG_KMS("%s:not support property.\n",
268 DRM_ERROR("not support ipp driver operations.\n");
271 return ERR_PTR(-ENODEV);
274 static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
276 struct exynos_drm_ippdrv *ippdrv;
277 struct drm_exynos_ipp_cmd_node *c_node;
280 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
282 if (list_empty(&exynos_drm_ippdrv_list)) {
283 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
284 return ERR_PTR(-ENODEV);
288 * This case is search ipp driver by prop_id handle.
289 * sometimes, ipp subsystem find driver by prop_id.
290 * e.g PAUSE state, queue buf, command contro.
292 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
293 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
294 count++, (int)ippdrv);
296 if (!list_empty(&ippdrv->cmd_list)) {
297 list_for_each_entry(c_node, &ippdrv->cmd_list, list)
298 if (c_node->property.prop_id == prop_id)
303 return ERR_PTR(-ENODEV);
306 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
307 struct drm_file *file)
309 struct drm_exynos_file_private *file_priv = file->driver_priv;
310 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
311 struct device *dev = priv->dev;
312 struct ipp_context *ctx = get_ipp_context(dev);
313 struct drm_exynos_ipp_prop_list *prop_list = data;
314 struct exynos_drm_ippdrv *ippdrv;
318 DRM_ERROR("invalid context.\n");
323 DRM_ERROR("invalid property parameter.\n");
327 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
329 if (!prop_list->ipp_id) {
330 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
333 * Supports ippdrv list count for user application.
334 * First step user application getting ippdrv count.
335 * and second step getting ippdrv capability using ipp_id.
337 prop_list->count = count;
340 * Getting ippdrv capability by ipp_id.
341 * some deivce not supported wb, output interface.
342 * so, user application detect correct ipp driver
345 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
348 DRM_ERROR("not found ipp%d driver.\n",
353 prop_list = ippdrv->prop_list;
359 static void ipp_print_property(struct drm_exynos_ipp_property *property,
362 struct drm_exynos_ipp_config *config = &property->config[idx];
363 struct drm_exynos_pos *pos = &config->pos;
364 struct drm_exynos_sz *sz = &config->sz;
366 DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
367 __func__, property->prop_id, idx ? "dst" : "src", config->fmt);
369 DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
370 __func__, pos->x, pos->y, pos->w, pos->h,
371 sz->hsize, sz->vsize, config->flip, config->degree);
374 static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
376 struct exynos_drm_ippdrv *ippdrv;
377 struct drm_exynos_ipp_cmd_node *c_node;
378 u32 prop_id = property->prop_id;
380 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
382 ippdrv = ipp_find_drv_by_handle(prop_id);
383 if (IS_ERR(ippdrv)) {
384 DRM_ERROR("failed to get ipp driver.\n");
389 * Find command node using command list in ippdrv.
390 * when we find this command no using prop_id.
391 * return property information set in this command node.
393 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
394 if ((c_node->property.prop_id == prop_id) &&
395 (c_node->state == IPP_STATE_STOP)) {
396 DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
397 __func__, property->cmd, (int)ippdrv);
399 c_node->property = *property;
404 DRM_ERROR("failed to search property.\n");
409 static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
411 struct drm_exynos_ipp_cmd_work *cmd_work;
413 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
415 DRM_ERROR("failed to alloc cmd_work.\n");
416 return ERR_PTR(-ENOMEM);
419 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
424 static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
426 struct drm_exynos_ipp_event_work *event_work;
428 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
430 DRM_ERROR("failed to alloc event_work.\n");
431 return ERR_PTR(-ENOMEM);
434 INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
439 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
440 struct drm_file *file)
442 struct drm_exynos_file_private *file_priv = file->driver_priv;
443 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
444 struct device *dev = priv->dev;
445 struct ipp_context *ctx = get_ipp_context(dev);
446 struct drm_exynos_ipp_property *property = data;
447 struct exynos_drm_ippdrv *ippdrv;
448 struct drm_exynos_ipp_cmd_node *c_node;
452 DRM_ERROR("invalid context.\n");
457 DRM_ERROR("invalid property parameter.\n");
462 * This is log print for user application property.
463 * user application set various property.
466 ipp_print_property(property, i);
469 * set property ioctl generated new prop_id.
470 * but in this case already asigned prop_id using old set property.
471 * e.g PAUSE state. this case supports find current prop_id and use it
472 * instead of allocation.
474 if (property->prop_id) {
475 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
476 return ipp_find_and_set_property(property);
479 /* find ipp driver using ipp id */
480 ippdrv = ipp_find_driver(ctx, property);
481 if (IS_ERR(ippdrv)) {
482 DRM_ERROR("failed to get ipp driver.\n");
486 /* allocate command node */
487 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
489 DRM_ERROR("failed to allocate map node.\n");
493 /* create property id */
494 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
497 DRM_ERROR("failed to create id.\n");
501 DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
502 __func__, property->prop_id, property->cmd, (int)ippdrv);
504 /* stored property information and ippdrv in private data */
506 c_node->property = *property;
507 c_node->state = IPP_STATE_IDLE;
509 c_node->start_work = ipp_create_cmd_work();
510 if (IS_ERR(c_node->start_work)) {
511 DRM_ERROR("failed to create start work.\n");
515 c_node->stop_work = ipp_create_cmd_work();
516 if (IS_ERR(c_node->stop_work)) {
517 DRM_ERROR("failed to create stop work.\n");
521 c_node->event_work = ipp_create_event_work();
522 if (IS_ERR(c_node->event_work)) {
523 DRM_ERROR("failed to create event work.\n");
527 mutex_init(&c_node->cmd_lock);
528 mutex_init(&c_node->mem_lock);
529 mutex_init(&c_node->event_lock);
531 init_completion(&c_node->start_complete);
532 init_completion(&c_node->stop_complete);
535 INIT_LIST_HEAD(&c_node->mem_list[i]);
537 INIT_LIST_HEAD(&c_node->event_list);
538 list_splice_init(&priv->event_list, &c_node->event_list);
539 list_add_tail(&c_node->list, &ippdrv->cmd_list);
541 /* make dedicated state without m2m */
542 if (!ipp_is_m2m_cmd(property->cmd))
543 ippdrv->dedicated = true;
548 kfree(c_node->stop_work);
550 kfree(c_node->start_work);
556 static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
559 list_del(&c_node->list);
562 mutex_destroy(&c_node->cmd_lock);
563 mutex_destroy(&c_node->mem_lock);
564 mutex_destroy(&c_node->event_lock);
566 /* free command node */
567 kfree(c_node->start_work);
568 kfree(c_node->stop_work);
569 kfree(c_node->event_work);
573 static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
575 struct drm_exynos_ipp_property *property = &c_node->property;
576 struct drm_exynos_ipp_mem_node *m_node;
577 struct list_head *head;
578 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
580 mutex_lock(&c_node->mem_lock);
582 for_each_ipp_ops(i) {
583 /* source/destination memory list */
584 head = &c_node->mem_list[i];
586 if (list_empty(head)) {
587 DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
592 /* find memory node entry */
593 list_for_each_entry(m_node, head, list) {
594 DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
595 i ? "dst" : "src", count[i], (int)m_node);
600 DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
601 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
602 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
605 * M2M operations should be need paired memory address.
606 * so, need to check minimum count about src, dst.
607 * other case not use paired memory, so use maximum count
609 if (ipp_is_m2m_cmd(property->cmd))
610 ret = min(count[EXYNOS_DRM_OPS_SRC],
611 count[EXYNOS_DRM_OPS_DST]);
613 ret = max(count[EXYNOS_DRM_OPS_SRC],
614 count[EXYNOS_DRM_OPS_DST]);
616 mutex_unlock(&c_node->mem_lock);
621 static struct drm_exynos_ipp_mem_node
622 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
623 struct drm_exynos_ipp_queue_buf *qbuf)
625 struct drm_exynos_ipp_mem_node *m_node;
626 struct list_head *head;
629 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
631 /* source/destination memory list */
632 head = &c_node->mem_list[qbuf->ops_id];
634 /* find memory node from memory list */
635 list_for_each_entry(m_node, head, list) {
636 DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
637 __func__, count++, (int)m_node);
639 /* compare buffer id */
640 if (m_node->buf_id == qbuf->buf_id)
647 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
648 struct drm_exynos_ipp_cmd_node *c_node,
649 struct drm_exynos_ipp_mem_node *m_node)
651 struct exynos_drm_ipp_ops *ops = NULL;
654 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
657 DRM_ERROR("invalid queue node.\n");
661 mutex_lock(&c_node->mem_lock);
663 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
665 /* get operations callback */
666 ops = ippdrv->ops[m_node->ops_id];
668 DRM_ERROR("not support ops.\n");
673 /* set address and enable irq */
675 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
676 m_node->buf_id, IPP_BUF_ENQUEUE);
678 DRM_ERROR("failed to set addr.\n");
684 mutex_unlock(&c_node->mem_lock);
688 static struct drm_exynos_ipp_mem_node
689 *ipp_get_mem_node(struct drm_device *drm_dev,
690 struct drm_file *file,
691 struct drm_exynos_ipp_cmd_node *c_node,
692 struct drm_exynos_ipp_queue_buf *qbuf)
694 struct drm_exynos_ipp_mem_node *m_node;
695 struct drm_exynos_ipp_buf_info buf_info;
699 mutex_lock(&c_node->mem_lock);
701 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
703 DRM_ERROR("failed to allocate queue node.\n");
707 /* clear base address for error handling */
708 memset(&buf_info, 0x0, sizeof(buf_info));
710 /* operations, buffer id */
711 m_node->ops_id = qbuf->ops_id;
712 m_node->prop_id = qbuf->prop_id;
713 m_node->buf_id = qbuf->buf_id;
715 DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
716 (int)m_node, qbuf->ops_id);
717 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
718 qbuf->prop_id, m_node->buf_id);
720 for_each_ipp_planar(i) {
721 DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
724 /* get dma address by handle */
725 if (qbuf->handle[i]) {
726 addr = exynos_drm_gem_get_dma_addr(drm_dev,
727 qbuf->handle[i], file);
729 DRM_ERROR("failed to get addr.\n");
733 buf_info.handles[i] = qbuf->handle[i];
734 buf_info.base[i] = *(dma_addr_t *) addr;
735 DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
736 __func__, i, buf_info.base[i],
737 (int)buf_info.handles[i]);
742 m_node->buf_info = buf_info;
743 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
745 mutex_unlock(&c_node->mem_lock);
751 mutex_unlock(&c_node->mem_lock);
752 return ERR_PTR(-EFAULT);
755 static int ipp_put_mem_node(struct drm_device *drm_dev,
756 struct drm_exynos_ipp_cmd_node *c_node,
757 struct drm_exynos_ipp_mem_node *m_node)
761 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
764 DRM_ERROR("invalid dequeue node.\n");
768 if (list_empty(&m_node->list)) {
769 DRM_ERROR("empty memory node.\n");
773 mutex_lock(&c_node->mem_lock);
775 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
778 for_each_ipp_planar(i) {
779 unsigned long handle = m_node->buf_info.handles[i];
781 exynos_drm_gem_put_dma_addr(drm_dev, handle,
785 /* delete list in queue */
786 list_del(&m_node->list);
789 mutex_unlock(&c_node->mem_lock);
794 static void ipp_free_event(struct drm_pending_event *event)
799 static int ipp_get_event(struct drm_device *drm_dev,
800 struct drm_file *file,
801 struct drm_exynos_ipp_cmd_node *c_node,
802 struct drm_exynos_ipp_queue_buf *qbuf)
804 struct drm_exynos_ipp_send_event *e;
807 DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
808 qbuf->ops_id, qbuf->buf_id);
810 e = kzalloc(sizeof(*e), GFP_KERNEL);
813 DRM_ERROR("failed to allocate event.\n");
814 spin_lock_irqsave(&drm_dev->event_lock, flags);
815 file->event_space += sizeof(e->event);
816 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
821 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
822 e->event.base.length = sizeof(e->event);
823 e->event.user_data = qbuf->user_data;
824 e->event.prop_id = qbuf->prop_id;
825 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
826 e->base.event = &e->event.base;
827 e->base.file_priv = file;
828 e->base.destroy = ipp_free_event;
829 list_add_tail(&e->base.link, &c_node->event_list);
834 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
835 struct drm_exynos_ipp_queue_buf *qbuf)
837 struct drm_exynos_ipp_send_event *e, *te;
840 if (list_empty(&c_node->event_list)) {
841 DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
845 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
846 DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
847 __func__, count++, (int)e);
850 * quf == NULL condition means all event deletion.
851 * stop operations want to delete all event list.
852 * another case delete only same buf id.
856 list_del(&e->base.link);
860 /* compare buffer id */
861 if (qbuf && (qbuf->buf_id ==
862 e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
864 list_del(&e->base.link);
871 static void ipp_handle_cmd_work(struct device *dev,
872 struct exynos_drm_ippdrv *ippdrv,
873 struct drm_exynos_ipp_cmd_work *cmd_work,
874 struct drm_exynos_ipp_cmd_node *c_node)
876 struct ipp_context *ctx = get_ipp_context(dev);
878 cmd_work->ippdrv = ippdrv;
879 cmd_work->c_node = c_node;
880 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
883 static int ipp_queue_buf_with_run(struct device *dev,
884 struct drm_exynos_ipp_cmd_node *c_node,
885 struct drm_exynos_ipp_mem_node *m_node,
886 struct drm_exynos_ipp_queue_buf *qbuf)
888 struct exynos_drm_ippdrv *ippdrv;
889 struct drm_exynos_ipp_property *property;
890 struct exynos_drm_ipp_ops *ops;
893 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
894 if (IS_ERR(ippdrv)) {
895 DRM_ERROR("failed to get ipp driver.\n");
899 ops = ippdrv->ops[qbuf->ops_id];
901 DRM_ERROR("failed to get ops.\n");
905 property = &c_node->property;
907 if (c_node->state != IPP_STATE_START) {
908 DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
912 if (!ipp_check_mem_list(c_node)) {
913 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
918 * If set destination buffer and enabled clock,
919 * then m2m operations need start operations at queue_buf
921 if (ipp_is_m2m_cmd(property->cmd)) {
922 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
924 cmd_work->ctrl = IPP_CTRL_PLAY;
925 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
927 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
929 DRM_ERROR("failed to set m node.\n");
937 static void ipp_clean_queue_buf(struct drm_device *drm_dev,
938 struct drm_exynos_ipp_cmd_node *c_node,
939 struct drm_exynos_ipp_queue_buf *qbuf)
941 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
943 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
945 list_for_each_entry_safe(m_node, tm_node,
946 &c_node->mem_list[qbuf->ops_id], list) {
947 if (m_node->buf_id == qbuf->buf_id &&
948 m_node->ops_id == qbuf->ops_id)
949 ipp_put_mem_node(drm_dev, c_node, m_node);
954 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
955 struct drm_file *file)
957 struct drm_exynos_file_private *file_priv = file->driver_priv;
958 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
959 struct device *dev = priv->dev;
960 struct ipp_context *ctx = get_ipp_context(dev);
961 struct drm_exynos_ipp_queue_buf *qbuf = data;
962 struct drm_exynos_ipp_cmd_node *c_node;
963 struct drm_exynos_ipp_mem_node *m_node;
967 DRM_ERROR("invalid buf parameter.\n");
971 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
972 DRM_ERROR("invalid ops parameter.\n");
976 DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
977 __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
978 qbuf->buf_id, qbuf->buf_type);
980 /* find command node */
981 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
984 DRM_ERROR("failed to get command node.\n");
989 switch (qbuf->buf_type) {
990 case IPP_BUF_ENQUEUE:
991 /* get memory node */
992 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
993 if (IS_ERR(m_node)) {
994 DRM_ERROR("failed to get m_node.\n");
995 return PTR_ERR(m_node);
999 * first step get event for destination buffer.
1000 * and second step when M2M case run with destination buffer
1003 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
1004 /* get event for destination buffer */
1005 ret = ipp_get_event(drm_dev, file, c_node, qbuf);
1007 DRM_ERROR("failed to get event.\n");
1008 goto err_clean_node;
1012 * M2M case run play control for streaming feature.
1013 * other case set address and waiting.
1015 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
1017 DRM_ERROR("failed to run command.\n");
1018 goto err_clean_node;
1022 case IPP_BUF_DEQUEUE:
1023 mutex_lock(&c_node->cmd_lock);
1025 /* put event for destination buffer */
1026 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
1027 ipp_put_event(c_node, qbuf);
1029 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1031 mutex_unlock(&c_node->cmd_lock);
1034 DRM_ERROR("invalid buffer control.\n");
1041 DRM_ERROR("clean memory nodes.\n");
1043 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1047 static bool exynos_drm_ipp_check_valid(struct device *dev,
1048 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
1050 if (ctrl != IPP_CTRL_PLAY) {
1051 if (pm_runtime_suspended(dev)) {
1052 DRM_ERROR("pm:runtime_suspended.\n");
1059 if (state != IPP_STATE_IDLE)
1063 if (state == IPP_STATE_STOP)
1066 case IPP_CTRL_PAUSE:
1067 if (state != IPP_STATE_START)
1070 case IPP_CTRL_RESUME:
1071 if (state != IPP_STATE_STOP)
1075 DRM_ERROR("invalid state.\n");
1083 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1087 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1088 struct drm_file *file)
1090 struct drm_exynos_file_private *file_priv = file->driver_priv;
1091 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1092 struct exynos_drm_ippdrv *ippdrv = NULL;
1093 struct device *dev = priv->dev;
1094 struct ipp_context *ctx = get_ipp_context(dev);
1095 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1096 struct drm_exynos_ipp_cmd_work *cmd_work;
1097 struct drm_exynos_ipp_cmd_node *c_node;
1100 DRM_ERROR("invalid context.\n");
1105 DRM_ERROR("invalid control parameter.\n");
1109 DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
1110 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1112 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1113 if (IS_ERR(ippdrv)) {
1114 DRM_ERROR("failed to get ipp driver.\n");
1115 return PTR_ERR(ippdrv);
1118 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1121 DRM_ERROR("invalid command node list.\n");
1125 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1127 DRM_ERROR("invalid state.\n");
1131 switch (cmd_ctrl->ctrl) {
1133 if (pm_runtime_suspended(ippdrv->dev))
1134 pm_runtime_get_sync(ippdrv->dev);
1135 c_node->state = IPP_STATE_START;
1137 cmd_work = c_node->start_work;
1138 cmd_work->ctrl = cmd_ctrl->ctrl;
1139 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1140 c_node->state = IPP_STATE_START;
1143 cmd_work = c_node->stop_work;
1144 cmd_work->ctrl = cmd_ctrl->ctrl;
1145 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1147 if (!wait_for_completion_timeout(&c_node->stop_complete,
1148 msecs_to_jiffies(300))) {
1149 DRM_ERROR("timeout stop:prop_id[%d]\n",
1150 c_node->property.prop_id);
1153 c_node->state = IPP_STATE_STOP;
1154 ippdrv->dedicated = false;
1155 ipp_clean_cmd_node(c_node);
1157 if (list_empty(&ippdrv->cmd_list))
1158 pm_runtime_put_sync(ippdrv->dev);
1160 case IPP_CTRL_PAUSE:
1161 cmd_work = c_node->stop_work;
1162 cmd_work->ctrl = cmd_ctrl->ctrl;
1163 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1165 if (!wait_for_completion_timeout(&c_node->stop_complete,
1166 msecs_to_jiffies(200))) {
1167 DRM_ERROR("timeout stop:prop_id[%d]\n",
1168 c_node->property.prop_id);
1171 c_node->state = IPP_STATE_STOP;
1173 case IPP_CTRL_RESUME:
1174 c_node->state = IPP_STATE_START;
1175 cmd_work = c_node->start_work;
1176 cmd_work->ctrl = cmd_ctrl->ctrl;
1177 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1180 DRM_ERROR("could not support this state currently.\n");
1184 DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
1185 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1190 int exynos_drm_ippnb_register(struct notifier_block *nb)
1192 return blocking_notifier_chain_register(
1193 &exynos_drm_ippnb_list, nb);
1196 int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1198 return blocking_notifier_chain_unregister(
1199 &exynos_drm_ippnb_list, nb);
1202 int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1204 return blocking_notifier_call_chain(
1205 &exynos_drm_ippnb_list, val, v);
1208 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1209 struct drm_exynos_ipp_property *property)
1211 struct exynos_drm_ipp_ops *ops = NULL;
1216 DRM_ERROR("invalid property parameter.\n");
1220 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1222 /* reset h/w block */
1223 if (ippdrv->reset &&
1224 ippdrv->reset(ippdrv->dev)) {
1225 DRM_ERROR("failed to reset.\n");
1229 /* set source,destination operations */
1230 for_each_ipp_ops(i) {
1231 struct drm_exynos_ipp_config *config =
1232 &property->config[i];
1234 ops = ippdrv->ops[i];
1235 if (!ops || !config) {
1236 DRM_ERROR("not support ops and config.\n");
1242 ret = ops->set_fmt(ippdrv->dev, config->fmt);
1244 DRM_ERROR("not support format.\n");
1249 /* set transform for rotation, flip */
1250 if (ops->set_transf) {
1251 ret = ops->set_transf(ippdrv->dev, config->degree,
1252 config->flip, &swap);
1254 DRM_ERROR("not support tranf.\n");
1260 if (ops->set_size) {
1261 ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1264 DRM_ERROR("not support size.\n");
1273 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1274 struct drm_exynos_ipp_cmd_node *c_node)
1276 struct drm_exynos_ipp_mem_node *m_node;
1277 struct drm_exynos_ipp_property *property = &c_node->property;
1278 struct list_head *head;
1281 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1283 /* store command info in ippdrv */
1284 ippdrv->c_node = c_node;
1286 if (!ipp_check_mem_list(c_node)) {
1287 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1291 /* set current property in ippdrv */
1292 ret = ipp_set_property(ippdrv, property);
1294 DRM_ERROR("failed to set property.\n");
1295 ippdrv->c_node = NULL;
1300 switch (property->cmd) {
1302 for_each_ipp_ops(i) {
1303 /* source/destination memory list */
1304 head = &c_node->mem_list[i];
1306 m_node = list_first_entry(head,
1307 struct drm_exynos_ipp_mem_node, list);
1309 DRM_ERROR("failed to get node.\n");
1314 DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
1315 __func__, (int)m_node);
1317 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1319 DRM_ERROR("failed to set m node.\n");
1325 /* destination memory list */
1326 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1328 list_for_each_entry(m_node, head, list) {
1329 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1331 DRM_ERROR("failed to set m node.\n");
1336 case IPP_CMD_OUTPUT:
1337 /* source memory list */
1338 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1340 list_for_each_entry(m_node, head, list) {
1341 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1343 DRM_ERROR("failed to set m node.\n");
1349 DRM_ERROR("invalid operations.\n");
1353 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
1355 /* start operations */
1356 if (ippdrv->start) {
1357 ret = ippdrv->start(ippdrv->dev, property->cmd);
1359 DRM_ERROR("failed to start ops.\n");
1367 static int ipp_stop_property(struct drm_device *drm_dev,
1368 struct exynos_drm_ippdrv *ippdrv,
1369 struct drm_exynos_ipp_cmd_node *c_node)
1371 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1372 struct drm_exynos_ipp_property *property = &c_node->property;
1373 struct list_head *head;
1376 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1379 ipp_put_event(c_node, NULL);
1382 switch (property->cmd) {
1384 for_each_ipp_ops(i) {
1385 /* source/destination memory list */
1386 head = &c_node->mem_list[i];
1388 if (list_empty(head)) {
1389 DRM_DEBUG_KMS("%s:mem_list is empty.\n",
1394 list_for_each_entry_safe(m_node, tm_node,
1396 ret = ipp_put_mem_node(drm_dev, c_node,
1399 DRM_ERROR("failed to put m_node.\n");
1406 /* destination memory list */
1407 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1409 if (list_empty(head)) {
1410 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1414 list_for_each_entry_safe(m_node, tm_node, head, list) {
1415 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1417 DRM_ERROR("failed to put m_node.\n");
1422 case IPP_CMD_OUTPUT:
1423 /* source memory list */
1424 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1426 if (list_empty(head)) {
1427 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1431 list_for_each_entry_safe(m_node, tm_node, head, list) {
1432 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1434 DRM_ERROR("failed to put m_node.\n");
1440 DRM_ERROR("invalid operations.\n");
1446 /* stop operations */
1448 ippdrv->stop(ippdrv->dev, property->cmd);
1453 void ipp_sched_cmd(struct work_struct *work)
1455 struct drm_exynos_ipp_cmd_work *cmd_work =
1456 (struct drm_exynos_ipp_cmd_work *)work;
1457 struct exynos_drm_ippdrv *ippdrv;
1458 struct drm_exynos_ipp_cmd_node *c_node;
1459 struct drm_exynos_ipp_property *property;
1462 ippdrv = cmd_work->ippdrv;
1464 DRM_ERROR("invalid ippdrv list.\n");
1468 c_node = cmd_work->c_node;
1470 DRM_ERROR("invalid command node list.\n");
1474 mutex_lock(&c_node->cmd_lock);
1476 property = &c_node->property;
1478 switch (cmd_work->ctrl) {
1480 case IPP_CTRL_RESUME:
1481 ret = ipp_start_property(ippdrv, c_node);
1483 DRM_ERROR("failed to start property:prop_id[%d]\n",
1484 c_node->property.prop_id);
1489 * M2M case supports wait_completion of transfer.
1490 * because M2M case supports single unit operation
1491 * with multiple queue.
1492 * M2M need to wait completion of data transfer.
1494 if (ipp_is_m2m_cmd(property->cmd)) {
1495 if (!wait_for_completion_timeout
1496 (&c_node->start_complete, msecs_to_jiffies(200))) {
1497 DRM_ERROR("timeout event:prop_id[%d]\n",
1498 c_node->property.prop_id);
1504 case IPP_CTRL_PAUSE:
1505 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1508 DRM_ERROR("failed to stop property.\n");
1512 complete(&c_node->stop_complete);
1515 DRM_ERROR("unknown control type\n");
1519 DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
1522 mutex_unlock(&c_node->cmd_lock);
1525 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1526 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1528 struct drm_device *drm_dev = ippdrv->drm_dev;
1529 struct drm_exynos_ipp_property *property = &c_node->property;
1530 struct drm_exynos_ipp_mem_node *m_node;
1531 struct drm_exynos_ipp_queue_buf qbuf;
1532 struct drm_exynos_ipp_send_event *e;
1533 struct list_head *head;
1535 unsigned long flags;
1536 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1540 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1541 i ? "dst" : "src", buf_id[i]);
1544 DRM_ERROR("failed to get drm_dev.\n");
1549 DRM_ERROR("failed to get property.\n");
1553 if (list_empty(&c_node->event_list)) {
1554 DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
1558 if (!ipp_check_mem_list(c_node)) {
1559 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1564 switch (property->cmd) {
1566 for_each_ipp_ops(i) {
1567 /* source/destination memory list */
1568 head = &c_node->mem_list[i];
1570 m_node = list_first_entry(head,
1571 struct drm_exynos_ipp_mem_node, list);
1573 DRM_ERROR("empty memory node.\n");
1577 tbuf_id[i] = m_node->buf_id;
1578 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1579 i ? "dst" : "src", tbuf_id[i]);
1581 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1583 DRM_ERROR("failed to put m_node.\n");
1587 /* clear buf for finding */
1588 memset(&qbuf, 0x0, sizeof(qbuf));
1589 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1590 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1592 /* get memory node entry */
1593 m_node = ipp_find_mem_node(c_node, &qbuf);
1595 DRM_ERROR("empty memory node.\n");
1599 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1601 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1603 DRM_ERROR("failed to put m_node.\n");
1605 case IPP_CMD_OUTPUT:
1606 /* source memory list */
1607 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1609 m_node = list_first_entry(head,
1610 struct drm_exynos_ipp_mem_node, list);
1612 DRM_ERROR("empty memory node.\n");
1616 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1618 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1620 DRM_ERROR("failed to put m_node.\n");
1623 DRM_ERROR("invalid operations.\n");
1627 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1628 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1629 tbuf_id[1], buf_id[1], property->prop_id);
1632 * command node have event list of destination buffer
1633 * If destination buffer enqueue to mem list,
1634 * then we make event and link to event list tail.
1635 * so, we get first event for first enqueued buffer.
1637 e = list_first_entry(&c_node->event_list,
1638 struct drm_exynos_ipp_send_event, base.link);
1641 DRM_ERROR("empty event.\n");
1645 do_gettimeofday(&now);
1646 DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
1647 , __func__, now.tv_sec, now.tv_usec);
1648 e->event.tv_sec = now.tv_sec;
1649 e->event.tv_usec = now.tv_usec;
1650 e->event.prop_id = property->prop_id;
1652 /* set buffer id about source destination */
1654 e->event.buf_id[i] = tbuf_id[i];
1656 spin_lock_irqsave(&drm_dev->event_lock, flags);
1657 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1658 wake_up_interruptible(&e->base.file_priv->event_wait);
1659 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1661 DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
1662 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1667 void ipp_sched_event(struct work_struct *work)
1669 struct drm_exynos_ipp_event_work *event_work =
1670 (struct drm_exynos_ipp_event_work *)work;
1671 struct exynos_drm_ippdrv *ippdrv;
1672 struct drm_exynos_ipp_cmd_node *c_node;
1676 DRM_ERROR("failed to get event_work.\n");
1680 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
1681 event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1683 ippdrv = event_work->ippdrv;
1685 DRM_ERROR("failed to get ipp driver.\n");
1689 c_node = ippdrv->c_node;
1691 DRM_ERROR("failed to get command node.\n");
1696 * IPP supports command thread, event thread synchronization.
1697 * If IPP close immediately from user land, then IPP make
1698 * synchronization with command thread, so make complete event.
1699 * or going out operations.
1701 if (c_node->state != IPP_STATE_START) {
1702 DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
1703 __func__, c_node->state, c_node->property.prop_id);
1704 goto err_completion;
1707 mutex_lock(&c_node->event_lock);
1709 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1711 DRM_ERROR("failed to send event.\n");
1712 goto err_completion;
1716 if (ipp_is_m2m_cmd(c_node->property.cmd))
1717 complete(&c_node->start_complete);
1719 mutex_unlock(&c_node->event_lock);
1722 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1724 struct ipp_context *ctx = get_ipp_context(dev);
1725 struct exynos_drm_ippdrv *ippdrv;
1728 /* get ipp driver entry */
1729 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1730 ippdrv->drm_dev = drm_dev;
1732 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1735 DRM_ERROR("failed to create id.\n");
1739 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
1740 count++, (int)ippdrv, ippdrv->ipp_id);
1742 if (ippdrv->ipp_id == 0) {
1743 DRM_ERROR("failed to get ipp_id[%d]\n",
1748 /* store parent device for node */
1749 ippdrv->parent_dev = dev;
1751 /* store event work queue and handler */
1752 ippdrv->event_workq = ctx->event_workq;
1753 ippdrv->sched_event = ipp_sched_event;
1754 INIT_LIST_HEAD(&ippdrv->cmd_list);
1756 if (is_drm_iommu_supported(drm_dev)) {
1757 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1759 DRM_ERROR("failed to activate iommu\n");
1768 /* get ipp driver entry */
1769 list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
1770 if (is_drm_iommu_supported(drm_dev))
1771 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1774 idr_destroy(&ctx->ipp_idr);
1775 idr_destroy(&ctx->prop_idr);
1779 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1781 struct exynos_drm_ippdrv *ippdrv;
1783 /* get ipp driver entry */
1784 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1785 if (is_drm_iommu_supported(drm_dev))
1786 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1788 ippdrv->drm_dev = NULL;
1789 exynos_drm_ippdrv_unregister(ippdrv);
1793 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1794 struct drm_file *file)
1796 struct drm_exynos_file_private *file_priv = file->driver_priv;
1797 struct exynos_drm_ipp_private *priv;
1799 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1801 DRM_ERROR("failed to allocate priv.\n");
1805 file_priv->ipp_priv = priv;
1807 INIT_LIST_HEAD(&priv->event_list);
1809 DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
1814 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1815 struct drm_file *file)
1817 struct drm_exynos_file_private *file_priv = file->driver_priv;
1818 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1819 struct exynos_drm_ippdrv *ippdrv = NULL;
1820 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1823 DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
1825 if (list_empty(&exynos_drm_ippdrv_list)) {
1826 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
1830 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1831 if (list_empty(&ippdrv->cmd_list))
1834 list_for_each_entry_safe(c_node, tc_node,
1835 &ippdrv->cmd_list, list) {
1836 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
1837 __func__, count++, (int)ippdrv);
1839 if (c_node->priv == priv) {
1841 * userland goto unnormal state. process killed.
1842 * and close the file.
1843 * so, IPP didn't called stop cmd ctrl.
1844 * so, we are make stop operation in this state.
1846 if (c_node->state == IPP_STATE_START) {
1847 ipp_stop_property(drm_dev, ippdrv,
1849 c_node->state = IPP_STATE_STOP;
1852 ippdrv->dedicated = false;
1853 ipp_clean_cmd_node(c_node);
1854 if (list_empty(&ippdrv->cmd_list))
1855 pm_runtime_put_sync(ippdrv->dev);
1865 static int ipp_probe(struct platform_device *pdev)
1867 struct device *dev = &pdev->dev;
1868 struct ipp_context *ctx;
1869 struct exynos_drm_subdrv *subdrv;
1872 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1876 mutex_init(&ctx->ipp_lock);
1877 mutex_init(&ctx->prop_lock);
1879 idr_init(&ctx->ipp_idr);
1880 idr_init(&ctx->prop_idr);
1883 * create single thread for ipp event
1884 * IPP supports event thread for IPP drivers.
1885 * IPP driver send event_work to this thread.
1886 * and IPP event thread send event to user process.
1888 ctx->event_workq = create_singlethread_workqueue("ipp_event");
1889 if (!ctx->event_workq) {
1890 dev_err(dev, "failed to create event workqueue\n");
1895 * create single thread for ipp command
1896 * IPP supports command thread for user process.
1897 * user process make command node using set property ioctl.
1898 * and make start_work and send this work to command thread.
1899 * and then this command thread start property.
1901 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1902 if (!ctx->cmd_workq) {
1903 dev_err(dev, "failed to create cmd workqueue\n");
1905 goto err_event_workq;
1908 /* set sub driver informations */
1909 subdrv = &ctx->subdrv;
1911 subdrv->probe = ipp_subdrv_probe;
1912 subdrv->remove = ipp_subdrv_remove;
1913 subdrv->open = ipp_subdrv_open;
1914 subdrv->close = ipp_subdrv_close;
1916 platform_set_drvdata(pdev, ctx);
1918 ret = exynos_drm_subdrv_register(subdrv);
1920 DRM_ERROR("failed to register drm ipp device.\n");
1924 dev_info(dev, "drm ipp registered successfully.\n");
1929 destroy_workqueue(ctx->cmd_workq);
1931 destroy_workqueue(ctx->event_workq);
1935 static int ipp_remove(struct platform_device *pdev)
1937 struct ipp_context *ctx = platform_get_drvdata(pdev);
1939 /* unregister sub driver */
1940 exynos_drm_subdrv_unregister(&ctx->subdrv);
1942 /* remove,destroy ipp idr */
1943 idr_destroy(&ctx->ipp_idr);
1944 idr_destroy(&ctx->prop_idr);
1946 mutex_destroy(&ctx->ipp_lock);
1947 mutex_destroy(&ctx->prop_lock);
1949 /* destroy command, event work queue */
1950 destroy_workqueue(ctx->cmd_workq);
1951 destroy_workqueue(ctx->event_workq);
1956 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1958 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1963 #ifdef CONFIG_PM_SLEEP
1964 static int ipp_suspend(struct device *dev)
1966 struct ipp_context *ctx = get_ipp_context(dev);
1968 if (pm_runtime_suspended(dev))
1971 return ipp_power_ctrl(ctx, false);
1974 static int ipp_resume(struct device *dev)
1976 struct ipp_context *ctx = get_ipp_context(dev);
1978 if (!pm_runtime_suspended(dev))
1979 return ipp_power_ctrl(ctx, true);
1985 #ifdef CONFIG_PM_RUNTIME
1986 static int ipp_runtime_suspend(struct device *dev)
1988 struct ipp_context *ctx = get_ipp_context(dev);
1990 return ipp_power_ctrl(ctx, false);
1993 static int ipp_runtime_resume(struct device *dev)
1995 struct ipp_context *ctx = get_ipp_context(dev);
1997 return ipp_power_ctrl(ctx, true);
2001 static const struct dev_pm_ops ipp_pm_ops = {
2002 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
2003 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
2006 struct platform_driver ipp_driver = {
2008 .remove = ipp_remove,
2010 .name = "exynos-drm-ipp",
2011 .owner = THIS_MODULE,