drm/exynos/ipp: remove redundant messages
[cascardo/linux.git] / drivers / gpu / drm / exynos / exynos_drm_ipp.c
1 /*
2  * Copyright (C) 2012 Samsung Electronics Co.Ltd
3  * Authors:
4  *      Eunchul Kim <chulspro.kim@samsung.com>
5  *      Jinyoung Jeon <jy0.jeon@samsung.com>
6  *      Sangmin Lee <lsmin.lee@samsung.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  */
14 #include <linux/kernel.h>
15 #include <linux/platform_device.h>
16 #include <linux/types.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
19
20 #include <drm/drmP.h>
21 #include <drm/exynos_drm.h>
22 #include "exynos_drm_drv.h"
23 #include "exynos_drm_gem.h"
24 #include "exynos_drm_ipp.h"
25 #include "exynos_drm_iommu.h"
26
27 /*
28  * IPP stands for Image Post Processing and
29  * supports image scaler/rotator and input/output DMA operations.
30  * using FIMC, GSC, Rotator, so on.
31  * IPP is integration device driver of same attribute h/w
32  */
33
34 /*
35  * TODO
36  * 1. expand command control id.
37  * 2. integrate property and config.
38  * 3. removed send_event id check routine.
39  * 4. compare send_event id if needed.
40  * 5. free subdrv_remove notifier callback list if needed.
41  * 6. need to check subdrv_open about multi-open.
42  * 7. need to power_on implement power and sysmmu ctrl.
43  */
44
45 #define get_ipp_context(dev)    platform_get_drvdata(to_platform_device(dev))
46 #define ipp_is_m2m_cmd(c)       (c == IPP_CMD_M2M)
47
48 /* platform device pointer for ipp device. */
49 static struct platform_device *exynos_drm_ipp_pdev;
50
51 /*
52  * A structure of event.
53  *
54  * @base: base of event.
55  * @event: ipp event.
56  */
57 struct drm_exynos_ipp_send_event {
58         struct drm_pending_event        base;
59         struct drm_exynos_ipp_event     event;
60 };
61
62 /*
63  * A structure of memory node.
64  *
65  * @list: list head to memory queue information.
66  * @ops_id: id of operations.
67  * @prop_id: id of property.
68  * @buf_id: id of buffer.
69  * @buf_info: gem objects and dma address, size.
70  * @filp: a pointer to drm_file.
71  */
72 struct drm_exynos_ipp_mem_node {
73         struct list_head        list;
74         enum drm_exynos_ops_id  ops_id;
75         u32     prop_id;
76         u32     buf_id;
77         struct drm_exynos_ipp_buf_info  buf_info;
78         struct drm_file         *filp;
79 };
80
81 /*
82  * A structure of ipp context.
83  *
84  * @subdrv: prepare initialization using subdrv.
85  * @ipp_lock: lock for synchronization of access to ipp_idr.
86  * @prop_lock: lock for synchronization of access to prop_idr.
87  * @ipp_idr: ipp driver idr.
88  * @prop_idr: property idr.
89  * @event_workq: event work queue.
90  * @cmd_workq: command work queue.
91  */
92 struct ipp_context {
93         struct exynos_drm_subdrv        subdrv;
94         struct mutex    ipp_lock;
95         struct mutex    prop_lock;
96         struct idr      ipp_idr;
97         struct idr      prop_idr;
98         struct workqueue_struct *event_workq;
99         struct workqueue_struct *cmd_workq;
100 };
101
102 static LIST_HEAD(exynos_drm_ippdrv_list);
103 static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
104 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
105
106 int exynos_platform_device_ipp_register(void)
107 {
108         struct platform_device *pdev;
109
110         if (exynos_drm_ipp_pdev)
111                 return -EEXIST;
112
113         pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
114         if (IS_ERR(pdev))
115                 return PTR_ERR(pdev);
116
117         exynos_drm_ipp_pdev = pdev;
118
119         return 0;
120 }
121
122 void exynos_platform_device_ipp_unregister(void)
123 {
124         if (exynos_drm_ipp_pdev) {
125                 platform_device_unregister(exynos_drm_ipp_pdev);
126                 exynos_drm_ipp_pdev = NULL;
127         }
128 }
129
130 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
131 {
132         mutex_lock(&exynos_drm_ippdrv_lock);
133         list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
134         mutex_unlock(&exynos_drm_ippdrv_lock);
135
136         return 0;
137 }
138
139 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
140 {
141         mutex_lock(&exynos_drm_ippdrv_lock);
142         list_del(&ippdrv->drv_list);
143         mutex_unlock(&exynos_drm_ippdrv_lock);
144
145         return 0;
146 }
147
148 static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
149                 u32 *idp)
150 {
151         int ret;
152
153         /* do the allocation under our mutexlock */
154         mutex_lock(lock);
155         ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
156         mutex_unlock(lock);
157         if (ret < 0)
158                 return ret;
159
160         *idp = ret;
161         return 0;
162 }
163
164 static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
165 {
166         mutex_lock(lock);
167         idr_remove(id_idr, id);
168         mutex_unlock(lock);
169 }
170
171 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
172 {
173         void *obj;
174
175         mutex_lock(lock);
176         obj = idr_find(id_idr, id);
177         mutex_unlock(lock);
178
179         return obj;
180 }
181
182 static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
183                 enum drm_exynos_ipp_cmd cmd)
184 {
185         /*
186          * check dedicated flag and WB, OUTPUT operation with
187          * power on state.
188          */
189         if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
190             !pm_runtime_suspended(ippdrv->dev)))
191                 return true;
192
193         return false;
194 }
195
196 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
197                 struct drm_exynos_ipp_property *property)
198 {
199         struct exynos_drm_ippdrv *ippdrv;
200         u32 ipp_id = property->ipp_id;
201
202         DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
203
204         if (ipp_id) {
205                 /* find ipp driver using idr */
206                 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
207                         ipp_id);
208                 if (!ippdrv) {
209                         DRM_ERROR("not found ipp%d driver.\n", ipp_id);
210                         return ERR_PTR(-ENODEV);
211                 }
212
213                 /*
214                  * WB, OUTPUT opertion not supported multi-operation.
215                  * so, make dedicated state at set property ioctl.
216                  * when ipp driver finished operations, clear dedicated flags.
217                  */
218                 if (ipp_check_dedicated(ippdrv, property->cmd)) {
219                         DRM_ERROR("already used choose device.\n");
220                         return ERR_PTR(-EBUSY);
221                 }
222
223                 /*
224                  * This is necessary to find correct device in ipp drivers.
225                  * ipp drivers have different abilities,
226                  * so need to check property.
227                  */
228                 if (ippdrv->check_property &&
229                     ippdrv->check_property(ippdrv->dev, property)) {
230                         DRM_ERROR("not support property.\n");
231                         return ERR_PTR(-EINVAL);
232                 }
233
234                 return ippdrv;
235         } else {
236                 /*
237                  * This case is search all ipp driver for finding.
238                  * user application don't set ipp_id in this case,
239                  * so ipp subsystem search correct driver in driver list.
240                  */
241                 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
242                         if (ipp_check_dedicated(ippdrv, property->cmd)) {
243                                 DRM_DEBUG_KMS("used device.\n");
244                                 continue;
245                         }
246
247                         if (ippdrv->check_property &&
248                             ippdrv->check_property(ippdrv->dev, property)) {
249                                 DRM_DEBUG_KMS("not support property.\n");
250                                 continue;
251                         }
252
253                         return ippdrv;
254                 }
255
256                 DRM_ERROR("not support ipp driver operations.\n");
257         }
258
259         return ERR_PTR(-ENODEV);
260 }
261
262 static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
263 {
264         struct exynos_drm_ippdrv *ippdrv;
265         struct drm_exynos_ipp_cmd_node *c_node;
266         int count = 0;
267
268         DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
269
270         /*
271          * This case is search ipp driver by prop_id handle.
272          * sometimes, ipp subsystem find driver by prop_id.
273          * e.g PAUSE state, queue buf, command control.
274          */
275         list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
276                 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
277
278                 mutex_lock(&ippdrv->cmd_lock);
279                 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
280                         if (c_node->property.prop_id == prop_id) {
281                                 mutex_unlock(&ippdrv->cmd_lock);
282                                 return ippdrv;
283                         }
284                 }
285                 mutex_unlock(&ippdrv->cmd_lock);
286         }
287
288         return ERR_PTR(-ENODEV);
289 }
290
291 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
292                 struct drm_file *file)
293 {
294         struct drm_exynos_file_private *file_priv = file->driver_priv;
295         struct device *dev = file_priv->ipp_dev;
296         struct ipp_context *ctx = get_ipp_context(dev);
297         struct drm_exynos_ipp_prop_list *prop_list = data;
298         struct exynos_drm_ippdrv *ippdrv;
299         int count = 0;
300
301         if (!ctx) {
302                 DRM_ERROR("invalid context.\n");
303                 return -EINVAL;
304         }
305
306         if (!prop_list) {
307                 DRM_ERROR("invalid property parameter.\n");
308                 return -EINVAL;
309         }
310
311         DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
312
313         if (!prop_list->ipp_id) {
314                 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
315                         count++;
316
317                 /*
318                  * Supports ippdrv list count for user application.
319                  * First step user application getting ippdrv count.
320                  * and second step getting ippdrv capability using ipp_id.
321                  */
322                 prop_list->count = count;
323         } else {
324                 /*
325                  * Getting ippdrv capability by ipp_id.
326                  * some device not supported wb, output interface.
327                  * so, user application detect correct ipp driver
328                  * using this ioctl.
329                  */
330                 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
331                                                 prop_list->ipp_id);
332                 if (!ippdrv) {
333                         DRM_ERROR("not found ipp%d driver.\n",
334                                         prop_list->ipp_id);
335                         return -ENODEV;
336                 }
337
338                 *prop_list = ippdrv->prop_list;
339         }
340
341         return 0;
342 }
343
344 static void ipp_print_property(struct drm_exynos_ipp_property *property,
345                 int idx)
346 {
347         struct drm_exynos_ipp_config *config = &property->config[idx];
348         struct drm_exynos_pos *pos = &config->pos;
349         struct drm_exynos_sz *sz = &config->sz;
350
351         DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
352                 property->prop_id, idx ? "dst" : "src", config->fmt);
353
354         DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
355                 pos->x, pos->y, pos->w, pos->h,
356                 sz->hsize, sz->vsize, config->flip, config->degree);
357 }
358
359 static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
360 {
361         struct exynos_drm_ippdrv *ippdrv;
362         struct drm_exynos_ipp_cmd_node *c_node;
363         u32 prop_id = property->prop_id;
364
365         DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
366
367         ippdrv = ipp_find_drv_by_handle(prop_id);
368         if (IS_ERR(ippdrv)) {
369                 DRM_ERROR("failed to get ipp driver.\n");
370                 return -EINVAL;
371         }
372
373         /*
374          * Find command node using command list in ippdrv.
375          * when we find this command no using prop_id.
376          * return property information set in this command node.
377          */
378         mutex_lock(&ippdrv->cmd_lock);
379         list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
380                 if ((c_node->property.prop_id == prop_id) &&
381                     (c_node->state == IPP_STATE_STOP)) {
382                         mutex_unlock(&ippdrv->cmd_lock);
383                         DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
384                                 property->cmd, (int)ippdrv);
385
386                         c_node->property = *property;
387                         return 0;
388                 }
389         }
390         mutex_unlock(&ippdrv->cmd_lock);
391
392         DRM_ERROR("failed to search property.\n");
393
394         return -EINVAL;
395 }
396
397 static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
398 {
399         struct drm_exynos_ipp_cmd_work *cmd_work;
400
401         cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
402         if (!cmd_work)
403                 return ERR_PTR(-ENOMEM);
404
405         INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
406
407         return cmd_work;
408 }
409
410 static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
411 {
412         struct drm_exynos_ipp_event_work *event_work;
413
414         event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
415         if (!event_work)
416                 return ERR_PTR(-ENOMEM);
417
418         INIT_WORK(&event_work->work, ipp_sched_event);
419
420         return event_work;
421 }
422
423 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
424                 struct drm_file *file)
425 {
426         struct drm_exynos_file_private *file_priv = file->driver_priv;
427         struct device *dev = file_priv->ipp_dev;
428         struct ipp_context *ctx = get_ipp_context(dev);
429         struct drm_exynos_ipp_property *property = data;
430         struct exynos_drm_ippdrv *ippdrv;
431         struct drm_exynos_ipp_cmd_node *c_node;
432         int ret, i;
433
434         if (!ctx) {
435                 DRM_ERROR("invalid context.\n");
436                 return -EINVAL;
437         }
438
439         if (!property) {
440                 DRM_ERROR("invalid property parameter.\n");
441                 return -EINVAL;
442         }
443
444         /*
445          * This is log print for user application property.
446          * user application set various property.
447          */
448         for_each_ipp_ops(i)
449                 ipp_print_property(property, i);
450
451         /*
452          * set property ioctl generated new prop_id.
453          * but in this case already asigned prop_id using old set property.
454          * e.g PAUSE state. this case supports find current prop_id and use it
455          * instead of allocation.
456          */
457         if (property->prop_id) {
458                 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
459                 return ipp_find_and_set_property(property);
460         }
461
462         /* find ipp driver using ipp id */
463         ippdrv = ipp_find_driver(ctx, property);
464         if (IS_ERR(ippdrv)) {
465                 DRM_ERROR("failed to get ipp driver.\n");
466                 return -EINVAL;
467         }
468
469         /* allocate command node */
470         c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
471         if (!c_node)
472                 return -ENOMEM;
473
474         /* create property id */
475         ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
476                 &property->prop_id);
477         if (ret) {
478                 DRM_ERROR("failed to create id.\n");
479                 goto err_clear;
480         }
481
482         DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
483                 property->prop_id, property->cmd, (int)ippdrv);
484
485         /* stored property information and ippdrv in private data */
486         c_node->dev = dev;
487         c_node->property = *property;
488         c_node->state = IPP_STATE_IDLE;
489
490         c_node->start_work = ipp_create_cmd_work();
491         if (IS_ERR(c_node->start_work)) {
492                 DRM_ERROR("failed to create start work.\n");
493                 goto err_remove_id;
494         }
495
496         c_node->stop_work = ipp_create_cmd_work();
497         if (IS_ERR(c_node->stop_work)) {
498                 DRM_ERROR("failed to create stop work.\n");
499                 goto err_free_start;
500         }
501
502         c_node->event_work = ipp_create_event_work();
503         if (IS_ERR(c_node->event_work)) {
504                 DRM_ERROR("failed to create event work.\n");
505                 goto err_free_stop;
506         }
507
508         mutex_init(&c_node->lock);
509         mutex_init(&c_node->mem_lock);
510         mutex_init(&c_node->event_lock);
511
512         init_completion(&c_node->start_complete);
513         init_completion(&c_node->stop_complete);
514
515         for_each_ipp_ops(i)
516                 INIT_LIST_HEAD(&c_node->mem_list[i]);
517
518         INIT_LIST_HEAD(&c_node->event_list);
519         mutex_lock(&ippdrv->cmd_lock);
520         list_add_tail(&c_node->list, &ippdrv->cmd_list);
521         mutex_unlock(&ippdrv->cmd_lock);
522
523         /* make dedicated state without m2m */
524         if (!ipp_is_m2m_cmd(property->cmd))
525                 ippdrv->dedicated = true;
526
527         return 0;
528
529 err_free_stop:
530         kfree(c_node->stop_work);
531 err_free_start:
532         kfree(c_node->start_work);
533 err_remove_id:
534         ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id);
535 err_clear:
536         kfree(c_node);
537         return ret;
538 }
539
540 static void ipp_clean_cmd_node(struct ipp_context *ctx,
541                                 struct drm_exynos_ipp_cmd_node *c_node)
542 {
543         /* delete list */
544         list_del(&c_node->list);
545
546         ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
547                         c_node->property.prop_id);
548
549         /* destroy mutex */
550         mutex_destroy(&c_node->lock);
551         mutex_destroy(&c_node->mem_lock);
552         mutex_destroy(&c_node->event_lock);
553
554         /* free command node */
555         kfree(c_node->start_work);
556         kfree(c_node->stop_work);
557         kfree(c_node->event_work);
558         kfree(c_node);
559 }
560
561 static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
562 {
563         switch (c_node->property.cmd) {
564         case IPP_CMD_WB:
565                 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
566         case IPP_CMD_OUTPUT:
567                 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
568         case IPP_CMD_M2M:
569         default:
570                 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
571                        !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
572         }
573 }
574
575 static struct drm_exynos_ipp_mem_node
576                 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
577                 struct drm_exynos_ipp_queue_buf *qbuf)
578 {
579         struct drm_exynos_ipp_mem_node *m_node;
580         struct list_head *head;
581         int count = 0;
582
583         DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
584
585         /* source/destination memory list */
586         head = &c_node->mem_list[qbuf->ops_id];
587
588         /* find memory node from memory list */
589         list_for_each_entry(m_node, head, list) {
590                 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
591
592                 /* compare buffer id */
593                 if (m_node->buf_id == qbuf->buf_id)
594                         return m_node;
595         }
596
597         return NULL;
598 }
599
600 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
601                 struct drm_exynos_ipp_cmd_node *c_node,
602                 struct drm_exynos_ipp_mem_node *m_node)
603 {
604         struct exynos_drm_ipp_ops *ops = NULL;
605         int ret = 0;
606
607         DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
608
609         if (!m_node) {
610                 DRM_ERROR("invalid queue node.\n");
611                 return -EFAULT;
612         }
613
614         DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
615
616         /* get operations callback */
617         ops = ippdrv->ops[m_node->ops_id];
618         if (!ops) {
619                 DRM_ERROR("not support ops.\n");
620                 return -EFAULT;
621         }
622
623         /* set address and enable irq */
624         if (ops->set_addr) {
625                 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
626                         m_node->buf_id, IPP_BUF_ENQUEUE);
627                 if (ret) {
628                         DRM_ERROR("failed to set addr.\n");
629                         return ret;
630                 }
631         }
632
633         return ret;
634 }
635
636 static struct drm_exynos_ipp_mem_node
637                 *ipp_get_mem_node(struct drm_device *drm_dev,
638                 struct drm_file *file,
639                 struct drm_exynos_ipp_cmd_node *c_node,
640                 struct drm_exynos_ipp_queue_buf *qbuf)
641 {
642         struct drm_exynos_ipp_mem_node *m_node;
643         struct drm_exynos_ipp_buf_info *buf_info;
644         int i;
645
646         m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
647         if (!m_node)
648                 return ERR_PTR(-ENOMEM);
649
650         buf_info = &m_node->buf_info;
651
652         /* operations, buffer id */
653         m_node->ops_id = qbuf->ops_id;
654         m_node->prop_id = qbuf->prop_id;
655         m_node->buf_id = qbuf->buf_id;
656
657         DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
658         DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
659
660         for_each_ipp_planar(i) {
661                 DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
662
663                 /* get dma address by handle */
664                 if (qbuf->handle[i]) {
665                         dma_addr_t *addr;
666
667                         addr = exynos_drm_gem_get_dma_addr(drm_dev,
668                                         qbuf->handle[i], file);
669                         if (IS_ERR(addr)) {
670                                 DRM_ERROR("failed to get addr.\n");
671                                 goto err_clear;
672                         }
673
674                         buf_info->handles[i] = qbuf->handle[i];
675                         buf_info->base[i] = *addr;
676                         DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
677                                       buf_info->base[i], buf_info->handles[i]);
678                 }
679         }
680
681         m_node->filp = file;
682         mutex_lock(&c_node->mem_lock);
683         list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
684         mutex_unlock(&c_node->mem_lock);
685
686         return m_node;
687
688 err_clear:
689         kfree(m_node);
690         return ERR_PTR(-EFAULT);
691 }
692
693 static int ipp_put_mem_node(struct drm_device *drm_dev,
694                 struct drm_exynos_ipp_cmd_node *c_node,
695                 struct drm_exynos_ipp_mem_node *m_node)
696 {
697         int i;
698
699         DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
700
701         if (!m_node) {
702                 DRM_ERROR("invalid dequeue node.\n");
703                 return -EFAULT;
704         }
705
706         DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
707
708         /* put gem buffer */
709         for_each_ipp_planar(i) {
710                 unsigned long handle = m_node->buf_info.handles[i];
711                 if (handle)
712                         exynos_drm_gem_put_dma_addr(drm_dev, handle,
713                                                         m_node->filp);
714         }
715
716         /* delete list in queue */
717         list_del(&m_node->list);
718         kfree(m_node);
719
720         return 0;
721 }
722
723 static void ipp_free_event(struct drm_pending_event *event)
724 {
725         kfree(event);
726 }
727
728 static int ipp_get_event(struct drm_device *drm_dev,
729                 struct drm_file *file,
730                 struct drm_exynos_ipp_cmd_node *c_node,
731                 struct drm_exynos_ipp_queue_buf *qbuf)
732 {
733         struct drm_exynos_ipp_send_event *e;
734         unsigned long flags;
735
736         DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
737
738         e = kzalloc(sizeof(*e), GFP_KERNEL);
739         if (!e) {
740                 spin_lock_irqsave(&drm_dev->event_lock, flags);
741                 file->event_space += sizeof(e->event);
742                 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
743                 return -ENOMEM;
744         }
745
746         /* make event */
747         e->event.base.type = DRM_EXYNOS_IPP_EVENT;
748         e->event.base.length = sizeof(e->event);
749         e->event.user_data = qbuf->user_data;
750         e->event.prop_id = qbuf->prop_id;
751         e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
752         e->base.event = &e->event.base;
753         e->base.file_priv = file;
754         e->base.destroy = ipp_free_event;
755         mutex_lock(&c_node->event_lock);
756         list_add_tail(&e->base.link, &c_node->event_list);
757         mutex_unlock(&c_node->event_lock);
758
759         return 0;
760 }
761
762 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
763                 struct drm_exynos_ipp_queue_buf *qbuf)
764 {
765         struct drm_exynos_ipp_send_event *e, *te;
766         int count = 0;
767
768         mutex_lock(&c_node->event_lock);
769         list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
770                 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
771
772                 /*
773                  * qbuf == NULL condition means all event deletion.
774                  * stop operations want to delete all event list.
775                  * another case delete only same buf id.
776                  */
777                 if (!qbuf) {
778                         /* delete list */
779                         list_del(&e->base.link);
780                         kfree(e);
781                 }
782
783                 /* compare buffer id */
784                 if (qbuf && (qbuf->buf_id ==
785                     e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
786                         /* delete list */
787                         list_del(&e->base.link);
788                         kfree(e);
789                         goto out_unlock;
790                 }
791         }
792
793 out_unlock:
794         mutex_unlock(&c_node->event_lock);
795         return;
796 }
797
798 static void ipp_handle_cmd_work(struct device *dev,
799                 struct exynos_drm_ippdrv *ippdrv,
800                 struct drm_exynos_ipp_cmd_work *cmd_work,
801                 struct drm_exynos_ipp_cmd_node *c_node)
802 {
803         struct ipp_context *ctx = get_ipp_context(dev);
804
805         cmd_work->ippdrv = ippdrv;
806         cmd_work->c_node = c_node;
807         queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
808 }
809
810 static int ipp_queue_buf_with_run(struct device *dev,
811                 struct drm_exynos_ipp_cmd_node *c_node,
812                 struct drm_exynos_ipp_mem_node *m_node,
813                 struct drm_exynos_ipp_queue_buf *qbuf)
814 {
815         struct exynos_drm_ippdrv *ippdrv;
816         struct drm_exynos_ipp_property *property;
817         struct exynos_drm_ipp_ops *ops;
818         int ret;
819
820         ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
821         if (IS_ERR(ippdrv)) {
822                 DRM_ERROR("failed to get ipp driver.\n");
823                 return -EFAULT;
824         }
825
826         ops = ippdrv->ops[qbuf->ops_id];
827         if (!ops) {
828                 DRM_ERROR("failed to get ops.\n");
829                 return -EFAULT;
830         }
831
832         property = &c_node->property;
833
834         if (c_node->state != IPP_STATE_START) {
835                 DRM_DEBUG_KMS("bypass for invalid state.\n");
836                 return 0;
837         }
838
839         mutex_lock(&c_node->mem_lock);
840         if (!ipp_check_mem_list(c_node)) {
841                 mutex_unlock(&c_node->mem_lock);
842                 DRM_DEBUG_KMS("empty memory.\n");
843                 return 0;
844         }
845
846         /*
847          * If set destination buffer and enabled clock,
848          * then m2m operations need start operations at queue_buf
849          */
850         if (ipp_is_m2m_cmd(property->cmd)) {
851                 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
852
853                 cmd_work->ctrl = IPP_CTRL_PLAY;
854                 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
855         } else {
856                 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
857                 if (ret) {
858                         mutex_unlock(&c_node->mem_lock);
859                         DRM_ERROR("failed to set m node.\n");
860                         return ret;
861                 }
862         }
863         mutex_unlock(&c_node->mem_lock);
864
865         return 0;
866 }
867
868 static void ipp_clean_queue_buf(struct drm_device *drm_dev,
869                 struct drm_exynos_ipp_cmd_node *c_node,
870                 struct drm_exynos_ipp_queue_buf *qbuf)
871 {
872         struct drm_exynos_ipp_mem_node *m_node, *tm_node;
873
874         /* delete list */
875         mutex_lock(&c_node->mem_lock);
876         list_for_each_entry_safe(m_node, tm_node,
877                 &c_node->mem_list[qbuf->ops_id], list) {
878                 if (m_node->buf_id == qbuf->buf_id &&
879                     m_node->ops_id == qbuf->ops_id)
880                         ipp_put_mem_node(drm_dev, c_node, m_node);
881         }
882         mutex_unlock(&c_node->mem_lock);
883 }
884
885 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
886                 struct drm_file *file)
887 {
888         struct drm_exynos_file_private *file_priv = file->driver_priv;
889         struct device *dev = file_priv->ipp_dev;
890         struct ipp_context *ctx = get_ipp_context(dev);
891         struct drm_exynos_ipp_queue_buf *qbuf = data;
892         struct drm_exynos_ipp_cmd_node *c_node;
893         struct drm_exynos_ipp_mem_node *m_node;
894         int ret;
895
896         if (!qbuf) {
897                 DRM_ERROR("invalid buf parameter.\n");
898                 return -EINVAL;
899         }
900
901         if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
902                 DRM_ERROR("invalid ops parameter.\n");
903                 return -EINVAL;
904         }
905
906         DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
907                 qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
908                 qbuf->buf_id, qbuf->buf_type);
909
910         /* find command node */
911         c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
912                 qbuf->prop_id);
913         if (!c_node) {
914                 DRM_ERROR("failed to get command node.\n");
915                 return -ENODEV;
916         }
917
918         /* buffer control */
919         switch (qbuf->buf_type) {
920         case IPP_BUF_ENQUEUE:
921                 /* get memory node */
922                 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
923                 if (IS_ERR(m_node)) {
924                         DRM_ERROR("failed to get m_node.\n");
925                         return PTR_ERR(m_node);
926                 }
927
928                 /*
929                  * first step get event for destination buffer.
930                  * and second step when M2M case run with destination buffer
931                  * if needed.
932                  */
933                 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
934                         /* get event for destination buffer */
935                         ret = ipp_get_event(drm_dev, file, c_node, qbuf);
936                         if (ret) {
937                                 DRM_ERROR("failed to get event.\n");
938                                 goto err_clean_node;
939                         }
940
941                         /*
942                          * M2M case run play control for streaming feature.
943                          * other case set address and waiting.
944                          */
945                         ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
946                         if (ret) {
947                                 DRM_ERROR("failed to run command.\n");
948                                 goto err_clean_node;
949                         }
950                 }
951                 break;
952         case IPP_BUF_DEQUEUE:
953                 mutex_lock(&c_node->lock);
954
955                 /* put event for destination buffer */
956                 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
957                         ipp_put_event(c_node, qbuf);
958
959                 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
960
961                 mutex_unlock(&c_node->lock);
962                 break;
963         default:
964                 DRM_ERROR("invalid buffer control.\n");
965                 return -EINVAL;
966         }
967
968         return 0;
969
970 err_clean_node:
971         DRM_ERROR("clean memory nodes.\n");
972
973         ipp_clean_queue_buf(drm_dev, c_node, qbuf);
974         return ret;
975 }
976
977 static bool exynos_drm_ipp_check_valid(struct device *dev,
978                 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
979 {
980         if (ctrl != IPP_CTRL_PLAY) {
981                 if (pm_runtime_suspended(dev)) {
982                         DRM_ERROR("pm:runtime_suspended.\n");
983                         goto err_status;
984                 }
985         }
986
987         switch (ctrl) {
988         case IPP_CTRL_PLAY:
989                 if (state != IPP_STATE_IDLE)
990                         goto err_status;
991                 break;
992         case IPP_CTRL_STOP:
993                 if (state == IPP_STATE_STOP)
994                         goto err_status;
995                 break;
996         case IPP_CTRL_PAUSE:
997                 if (state != IPP_STATE_START)
998                         goto err_status;
999                 break;
1000         case IPP_CTRL_RESUME:
1001                 if (state != IPP_STATE_STOP)
1002                         goto err_status;
1003                 break;
1004         default:
1005                 DRM_ERROR("invalid state.\n");
1006                 goto err_status;
1007         }
1008
1009         return true;
1010
1011 err_status:
1012         DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1013         return false;
1014 }
1015
1016 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1017                 struct drm_file *file)
1018 {
1019         struct drm_exynos_file_private *file_priv = file->driver_priv;
1020         struct exynos_drm_ippdrv *ippdrv = NULL;
1021         struct device *dev = file_priv->ipp_dev;
1022         struct ipp_context *ctx = get_ipp_context(dev);
1023         struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1024         struct drm_exynos_ipp_cmd_work *cmd_work;
1025         struct drm_exynos_ipp_cmd_node *c_node;
1026
1027         if (!ctx) {
1028                 DRM_ERROR("invalid context.\n");
1029                 return -EINVAL;
1030         }
1031
1032         if (!cmd_ctrl) {
1033                 DRM_ERROR("invalid control parameter.\n");
1034                 return -EINVAL;
1035         }
1036
1037         DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
1038                 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1039
1040         ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1041         if (IS_ERR(ippdrv)) {
1042                 DRM_ERROR("failed to get ipp driver.\n");
1043                 return PTR_ERR(ippdrv);
1044         }
1045
1046         c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1047                 cmd_ctrl->prop_id);
1048         if (!c_node) {
1049                 DRM_ERROR("invalid command node list.\n");
1050                 return -ENODEV;
1051         }
1052
1053         if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1054             c_node->state)) {
1055                 DRM_ERROR("invalid state.\n");
1056                 return -EINVAL;
1057         }
1058
1059         switch (cmd_ctrl->ctrl) {
1060         case IPP_CTRL_PLAY:
1061                 if (pm_runtime_suspended(ippdrv->dev))
1062                         pm_runtime_get_sync(ippdrv->dev);
1063
1064                 c_node->state = IPP_STATE_START;
1065
1066                 cmd_work = c_node->start_work;
1067                 cmd_work->ctrl = cmd_ctrl->ctrl;
1068                 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1069                 break;
1070         case IPP_CTRL_STOP:
1071                 cmd_work = c_node->stop_work;
1072                 cmd_work->ctrl = cmd_ctrl->ctrl;
1073                 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1074
1075                 if (!wait_for_completion_timeout(&c_node->stop_complete,
1076                     msecs_to_jiffies(300))) {
1077                         DRM_ERROR("timeout stop:prop_id[%d]\n",
1078                                 c_node->property.prop_id);
1079                 }
1080
1081                 c_node->state = IPP_STATE_STOP;
1082                 ippdrv->dedicated = false;
1083                 mutex_lock(&ippdrv->cmd_lock);
1084                 ipp_clean_cmd_node(ctx, c_node);
1085
1086                 if (list_empty(&ippdrv->cmd_list))
1087                         pm_runtime_put_sync(ippdrv->dev);
1088                 mutex_unlock(&ippdrv->cmd_lock);
1089                 break;
1090         case IPP_CTRL_PAUSE:
1091                 cmd_work = c_node->stop_work;
1092                 cmd_work->ctrl = cmd_ctrl->ctrl;
1093                 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1094
1095                 if (!wait_for_completion_timeout(&c_node->stop_complete,
1096                     msecs_to_jiffies(200))) {
1097                         DRM_ERROR("timeout stop:prop_id[%d]\n",
1098                                 c_node->property.prop_id);
1099                 }
1100
1101                 c_node->state = IPP_STATE_STOP;
1102                 break;
1103         case IPP_CTRL_RESUME:
1104                 c_node->state = IPP_STATE_START;
1105                 cmd_work = c_node->start_work;
1106                 cmd_work->ctrl = cmd_ctrl->ctrl;
1107                 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1108                 break;
1109         default:
1110                 DRM_ERROR("could not support this state currently.\n");
1111                 return -EINVAL;
1112         }
1113
1114         DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
1115                 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1116
1117         return 0;
1118 }
1119
1120 int exynos_drm_ippnb_register(struct notifier_block *nb)
1121 {
1122         return blocking_notifier_chain_register(
1123                 &exynos_drm_ippnb_list, nb);
1124 }
1125
1126 int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1127 {
1128         return blocking_notifier_chain_unregister(
1129                 &exynos_drm_ippnb_list, nb);
1130 }
1131
1132 int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1133 {
1134         return blocking_notifier_call_chain(
1135                 &exynos_drm_ippnb_list, val, v);
1136 }
1137
1138 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1139                 struct drm_exynos_ipp_property *property)
1140 {
1141         struct exynos_drm_ipp_ops *ops = NULL;
1142         bool swap = false;
1143         int ret, i;
1144
1145         if (!property) {
1146                 DRM_ERROR("invalid property parameter.\n");
1147                 return -EINVAL;
1148         }
1149
1150         DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1151
1152         /* reset h/w block */
1153         if (ippdrv->reset &&
1154             ippdrv->reset(ippdrv->dev)) {
1155                 return -EINVAL;
1156         }
1157
1158         /* set source,destination operations */
1159         for_each_ipp_ops(i) {
1160                 struct drm_exynos_ipp_config *config =
1161                         &property->config[i];
1162
1163                 ops = ippdrv->ops[i];
1164                 if (!ops || !config) {
1165                         DRM_ERROR("not support ops and config.\n");
1166                         return -EINVAL;
1167                 }
1168
1169                 /* set format */
1170                 if (ops->set_fmt) {
1171                         ret = ops->set_fmt(ippdrv->dev, config->fmt);
1172                         if (ret)
1173                                 return ret;
1174                 }
1175
1176                 /* set transform for rotation, flip */
1177                 if (ops->set_transf) {
1178                         ret = ops->set_transf(ippdrv->dev, config->degree,
1179                                 config->flip, &swap);
1180                         if (ret)
1181                                 return ret;
1182                 }
1183
1184                 /* set size */
1185                 if (ops->set_size) {
1186                         ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1187                                 &config->sz);
1188                         if (ret)
1189                                 return ret;
1190                 }
1191         }
1192
1193         return 0;
1194 }
1195
1196 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1197                 struct drm_exynos_ipp_cmd_node *c_node)
1198 {
1199         struct drm_exynos_ipp_mem_node *m_node;
1200         struct drm_exynos_ipp_property *property = &c_node->property;
1201         struct list_head *head;
1202         int ret, i;
1203
1204         DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1205
1206         /* store command info in ippdrv */
1207         ippdrv->c_node = c_node;
1208
1209         mutex_lock(&c_node->mem_lock);
1210         if (!ipp_check_mem_list(c_node)) {
1211                 DRM_DEBUG_KMS("empty memory.\n");
1212                 ret = -ENOMEM;
1213                 goto err_unlock;
1214         }
1215
1216         /* set current property in ippdrv */
1217         ret = ipp_set_property(ippdrv, property);
1218         if (ret) {
1219                 DRM_ERROR("failed to set property.\n");
1220                 ippdrv->c_node = NULL;
1221                 goto err_unlock;
1222         }
1223
1224         /* check command */
1225         switch (property->cmd) {
1226         case IPP_CMD_M2M:
1227                 for_each_ipp_ops(i) {
1228                         /* source/destination memory list */
1229                         head = &c_node->mem_list[i];
1230
1231                         m_node = list_first_entry(head,
1232                                 struct drm_exynos_ipp_mem_node, list);
1233
1234                         DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
1235
1236                         ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1237                         if (ret) {
1238                                 DRM_ERROR("failed to set m node.\n");
1239                                 goto err_unlock;
1240                         }
1241                 }
1242                 break;
1243         case IPP_CMD_WB:
1244                 /* destination memory list */
1245                 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1246
1247                 list_for_each_entry(m_node, head, list) {
1248                         ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1249                         if (ret) {
1250                                 DRM_ERROR("failed to set m node.\n");
1251                                 goto err_unlock;
1252                         }
1253                 }
1254                 break;
1255         case IPP_CMD_OUTPUT:
1256                 /* source memory list */
1257                 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1258
1259                 list_for_each_entry(m_node, head, list) {
1260                         ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1261                         if (ret) {
1262                                 DRM_ERROR("failed to set m node.\n");
1263                                 goto err_unlock;
1264                         }
1265                 }
1266                 break;
1267         default:
1268                 DRM_ERROR("invalid operations.\n");
1269                 ret = -EINVAL;
1270                 goto err_unlock;
1271         }
1272         mutex_unlock(&c_node->mem_lock);
1273
1274         DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
1275
1276         /* start operations */
1277         if (ippdrv->start) {
1278                 ret = ippdrv->start(ippdrv->dev, property->cmd);
1279                 if (ret) {
1280                         DRM_ERROR("failed to start ops.\n");
1281                         ippdrv->c_node = NULL;
1282                         return ret;
1283                 }
1284         }
1285
1286         return 0;
1287
1288 err_unlock:
1289         mutex_unlock(&c_node->mem_lock);
1290         ippdrv->c_node = NULL;
1291         return ret;
1292 }
1293
1294 static int ipp_stop_property(struct drm_device *drm_dev,
1295                 struct exynos_drm_ippdrv *ippdrv,
1296                 struct drm_exynos_ipp_cmd_node *c_node)
1297 {
1298         struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1299         struct drm_exynos_ipp_property *property = &c_node->property;
1300         struct list_head *head;
1301         int ret = 0, i;
1302
1303         DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1304
1305         /* put event */
1306         ipp_put_event(c_node, NULL);
1307
1308         mutex_lock(&c_node->mem_lock);
1309
1310         /* check command */
1311         switch (property->cmd) {
1312         case IPP_CMD_M2M:
1313                 for_each_ipp_ops(i) {
1314                         /* source/destination memory list */
1315                         head = &c_node->mem_list[i];
1316
1317                         list_for_each_entry_safe(m_node, tm_node,
1318                                 head, list) {
1319                                 ret = ipp_put_mem_node(drm_dev, c_node,
1320                                         m_node);
1321                                 if (ret) {
1322                                         DRM_ERROR("failed to put m_node.\n");
1323                                         goto err_clear;
1324                                 }
1325                         }
1326                 }
1327                 break;
1328         case IPP_CMD_WB:
1329                 /* destination memory list */
1330                 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1331
1332                 list_for_each_entry_safe(m_node, tm_node, head, list) {
1333                         ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1334                         if (ret) {
1335                                 DRM_ERROR("failed to put m_node.\n");
1336                                 goto err_clear;
1337                         }
1338                 }
1339                 break;
1340         case IPP_CMD_OUTPUT:
1341                 /* source memory list */
1342                 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1343
1344                 list_for_each_entry_safe(m_node, tm_node, head, list) {
1345                         ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1346                         if (ret) {
1347                                 DRM_ERROR("failed to put m_node.\n");
1348                                 goto err_clear;
1349                         }
1350                 }
1351                 break;
1352         default:
1353                 DRM_ERROR("invalid operations.\n");
1354                 ret = -EINVAL;
1355                 goto err_clear;
1356         }
1357
1358 err_clear:
1359         mutex_unlock(&c_node->mem_lock);
1360
1361         /* stop operations */
1362         if (ippdrv->stop)
1363                 ippdrv->stop(ippdrv->dev, property->cmd);
1364
1365         return ret;
1366 }
1367
1368 void ipp_sched_cmd(struct work_struct *work)
1369 {
1370         struct drm_exynos_ipp_cmd_work *cmd_work =
1371                 (struct drm_exynos_ipp_cmd_work *)work;
1372         struct exynos_drm_ippdrv *ippdrv;
1373         struct drm_exynos_ipp_cmd_node *c_node;
1374         struct drm_exynos_ipp_property *property;
1375         int ret;
1376
1377         ippdrv = cmd_work->ippdrv;
1378         if (!ippdrv) {
1379                 DRM_ERROR("invalid ippdrv list.\n");
1380                 return;
1381         }
1382
1383         c_node = cmd_work->c_node;
1384         if (!c_node) {
1385                 DRM_ERROR("invalid command node list.\n");
1386                 return;
1387         }
1388
1389         mutex_lock(&c_node->lock);
1390
1391         property = &c_node->property;
1392
1393         switch (cmd_work->ctrl) {
1394         case IPP_CTRL_PLAY:
1395         case IPP_CTRL_RESUME:
1396                 ret = ipp_start_property(ippdrv, c_node);
1397                 if (ret) {
1398                         DRM_ERROR("failed to start property:prop_id[%d]\n",
1399                                 c_node->property.prop_id);
1400                         goto err_unlock;
1401                 }
1402
1403                 /*
1404                  * M2M case supports wait_completion of transfer.
1405                  * because M2M case supports single unit operation
1406                  * with multiple queue.
1407                  * M2M need to wait completion of data transfer.
1408                  */
1409                 if (ipp_is_m2m_cmd(property->cmd)) {
1410                         if (!wait_for_completion_timeout
1411                             (&c_node->start_complete, msecs_to_jiffies(200))) {
1412                                 DRM_ERROR("timeout event:prop_id[%d]\n",
1413                                         c_node->property.prop_id);
1414                                 goto err_unlock;
1415                         }
1416                 }
1417                 break;
1418         case IPP_CTRL_STOP:
1419         case IPP_CTRL_PAUSE:
1420                 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1421                         c_node);
1422                 if (ret) {
1423                         DRM_ERROR("failed to stop property.\n");
1424                         goto err_unlock;
1425                 }
1426
1427                 complete(&c_node->stop_complete);
1428                 break;
1429         default:
1430                 DRM_ERROR("unknown control type\n");
1431                 break;
1432         }
1433
1434         DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
1435
1436 err_unlock:
1437         mutex_unlock(&c_node->lock);
1438 }
1439
1440 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1441                 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1442 {
1443         struct drm_device *drm_dev = ippdrv->drm_dev;
1444         struct drm_exynos_ipp_property *property = &c_node->property;
1445         struct drm_exynos_ipp_mem_node *m_node;
1446         struct drm_exynos_ipp_queue_buf qbuf;
1447         struct drm_exynos_ipp_send_event *e;
1448         struct list_head *head;
1449         struct timeval now;
1450         unsigned long flags;
1451         u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1452         int ret, i;
1453
1454         for_each_ipp_ops(i)
1455                 DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
1456
1457         if (!drm_dev) {
1458                 DRM_ERROR("failed to get drm_dev.\n");
1459                 return -EINVAL;
1460         }
1461
1462         if (!property) {
1463                 DRM_ERROR("failed to get property.\n");
1464                 return -EINVAL;
1465         }
1466
1467         mutex_lock(&c_node->event_lock);
1468         if (list_empty(&c_node->event_list)) {
1469                 DRM_DEBUG_KMS("event list is empty.\n");
1470                 ret = 0;
1471                 goto err_event_unlock;
1472         }
1473
1474         mutex_lock(&c_node->mem_lock);
1475         if (!ipp_check_mem_list(c_node)) {
1476                 DRM_DEBUG_KMS("empty memory.\n");
1477                 ret = 0;
1478                 goto err_mem_unlock;
1479         }
1480
1481         /* check command */
1482         switch (property->cmd) {
1483         case IPP_CMD_M2M:
1484                 for_each_ipp_ops(i) {
1485                         /* source/destination memory list */
1486                         head = &c_node->mem_list[i];
1487
1488                         m_node = list_first_entry(head,
1489                                 struct drm_exynos_ipp_mem_node, list);
1490
1491                         tbuf_id[i] = m_node->buf_id;
1492                         DRM_DEBUG_KMS("%s buf_id[%d]\n",
1493                                 i ? "dst" : "src", tbuf_id[i]);
1494
1495                         ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1496                         if (ret)
1497                                 DRM_ERROR("failed to put m_node.\n");
1498                 }
1499                 break;
1500         case IPP_CMD_WB:
1501                 /* clear buf for finding */
1502                 memset(&qbuf, 0x0, sizeof(qbuf));
1503                 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1504                 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1505
1506                 /* get memory node entry */
1507                 m_node = ipp_find_mem_node(c_node, &qbuf);
1508                 if (!m_node) {
1509                         DRM_ERROR("empty memory node.\n");
1510                         ret = -ENOMEM;
1511                         goto err_mem_unlock;
1512                 }
1513
1514                 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1515
1516                 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1517                 if (ret)
1518                         DRM_ERROR("failed to put m_node.\n");
1519                 break;
1520         case IPP_CMD_OUTPUT:
1521                 /* source memory list */
1522                 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1523
1524                 m_node = list_first_entry(head,
1525                         struct drm_exynos_ipp_mem_node, list);
1526
1527                 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1528
1529                 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1530                 if (ret)
1531                         DRM_ERROR("failed to put m_node.\n");
1532                 break;
1533         default:
1534                 DRM_ERROR("invalid operations.\n");
1535                 ret = -EINVAL;
1536                 goto err_mem_unlock;
1537         }
1538         mutex_unlock(&c_node->mem_lock);
1539
1540         if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1541                 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1542                         tbuf_id[1], buf_id[1], property->prop_id);
1543
1544         /*
1545          * command node have event list of destination buffer
1546          * If destination buffer enqueue to mem list,
1547          * then we make event and link to event list tail.
1548          * so, we get first event for first enqueued buffer.
1549          */
1550         e = list_first_entry(&c_node->event_list,
1551                 struct drm_exynos_ipp_send_event, base.link);
1552
1553         do_gettimeofday(&now);
1554         DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
1555         e->event.tv_sec = now.tv_sec;
1556         e->event.tv_usec = now.tv_usec;
1557         e->event.prop_id = property->prop_id;
1558
1559         /* set buffer id about source destination */
1560         for_each_ipp_ops(i)
1561                 e->event.buf_id[i] = tbuf_id[i];
1562
1563         spin_lock_irqsave(&drm_dev->event_lock, flags);
1564         list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1565         wake_up_interruptible(&e->base.file_priv->event_wait);
1566         spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1567         mutex_unlock(&c_node->event_lock);
1568
1569         DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
1570                 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1571
1572         return 0;
1573
1574 err_mem_unlock:
1575         mutex_unlock(&c_node->mem_lock);
1576 err_event_unlock:
1577         mutex_unlock(&c_node->event_lock);
1578         return ret;
1579 }
1580
1581 void ipp_sched_event(struct work_struct *work)
1582 {
1583         struct drm_exynos_ipp_event_work *event_work =
1584                 (struct drm_exynos_ipp_event_work *)work;
1585         struct exynos_drm_ippdrv *ippdrv;
1586         struct drm_exynos_ipp_cmd_node *c_node;
1587         int ret;
1588
1589         if (!event_work) {
1590                 DRM_ERROR("failed to get event_work.\n");
1591                 return;
1592         }
1593
1594         DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1595
1596         ippdrv = event_work->ippdrv;
1597         if (!ippdrv) {
1598                 DRM_ERROR("failed to get ipp driver.\n");
1599                 return;
1600         }
1601
1602         c_node = ippdrv->c_node;
1603         if (!c_node) {
1604                 DRM_ERROR("failed to get command node.\n");
1605                 return;
1606         }
1607
1608         /*
1609          * IPP supports command thread, event thread synchronization.
1610          * If IPP close immediately from user land, then IPP make
1611          * synchronization with command thread, so make complete event.
1612          * or going out operations.
1613          */
1614         if (c_node->state != IPP_STATE_START) {
1615                 DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
1616                         c_node->state, c_node->property.prop_id);
1617                 goto err_completion;
1618         }
1619
1620         ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1621         if (ret) {
1622                 DRM_ERROR("failed to send event.\n");
1623                 goto err_completion;
1624         }
1625
1626 err_completion:
1627         if (ipp_is_m2m_cmd(c_node->property.cmd))
1628                 complete(&c_node->start_complete);
1629 }
1630
1631 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1632 {
1633         struct ipp_context *ctx = get_ipp_context(dev);
1634         struct exynos_drm_ippdrv *ippdrv;
1635         int ret, count = 0;
1636
1637         /* get ipp driver entry */
1638         list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1639                 u32 ipp_id;
1640
1641                 ippdrv->drm_dev = drm_dev;
1642
1643                 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1644                                     &ipp_id);
1645                 if (ret || ipp_id == 0) {
1646                         DRM_ERROR("failed to create id.\n");
1647                         goto err;
1648                 }
1649
1650                 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
1651                         count++, (int)ippdrv, ipp_id);
1652
1653                 ippdrv->prop_list.ipp_id = ipp_id;
1654
1655                 /* store parent device for node */
1656                 ippdrv->parent_dev = dev;
1657
1658                 /* store event work queue and handler */
1659                 ippdrv->event_workq = ctx->event_workq;
1660                 ippdrv->sched_event = ipp_sched_event;
1661                 INIT_LIST_HEAD(&ippdrv->cmd_list);
1662                 mutex_init(&ippdrv->cmd_lock);
1663
1664                 if (is_drm_iommu_supported(drm_dev)) {
1665                         ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1666                         if (ret) {
1667                                 DRM_ERROR("failed to activate iommu\n");
1668                                 goto err;
1669                         }
1670                 }
1671         }
1672
1673         return 0;
1674
1675 err:
1676         /* get ipp driver entry */
1677         list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
1678                                                 drv_list) {
1679                 if (is_drm_iommu_supported(drm_dev))
1680                         drm_iommu_detach_device(drm_dev, ippdrv->dev);
1681
1682                 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1683                                 ippdrv->prop_list.ipp_id);
1684         }
1685
1686         return ret;
1687 }
1688
1689 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1690 {
1691         struct exynos_drm_ippdrv *ippdrv;
1692         struct ipp_context *ctx = get_ipp_context(dev);
1693
1694         /* get ipp driver entry */
1695         list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1696                 if (is_drm_iommu_supported(drm_dev))
1697                         drm_iommu_detach_device(drm_dev, ippdrv->dev);
1698
1699                 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1700                                 ippdrv->prop_list.ipp_id);
1701
1702                 ippdrv->drm_dev = NULL;
1703                 exynos_drm_ippdrv_unregister(ippdrv);
1704         }
1705 }
1706
1707 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1708                 struct drm_file *file)
1709 {
1710         struct drm_exynos_file_private *file_priv = file->driver_priv;
1711
1712         file_priv->ipp_dev = dev;
1713
1714         DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev);
1715
1716         return 0;
1717 }
1718
1719 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1720                 struct drm_file *file)
1721 {
1722         struct drm_exynos_file_private *file_priv = file->driver_priv;
1723         struct exynos_drm_ippdrv *ippdrv = NULL;
1724         struct ipp_context *ctx = get_ipp_context(dev);
1725         struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1726         int count = 0;
1727
1728         DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev);
1729
1730         list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1731                 mutex_lock(&ippdrv->cmd_lock);
1732                 list_for_each_entry_safe(c_node, tc_node,
1733                         &ippdrv->cmd_list, list) {
1734                         DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1735                                 count++, (int)ippdrv);
1736
1737                         if (c_node->dev == file_priv->ipp_dev) {
1738                                 /*
1739                                  * userland goto unnormal state. process killed.
1740                                  * and close the file.
1741                                  * so, IPP didn't called stop cmd ctrl.
1742                                  * so, we are make stop operation in this state.
1743                                  */
1744                                 if (c_node->state == IPP_STATE_START) {
1745                                         ipp_stop_property(drm_dev, ippdrv,
1746                                                 c_node);
1747                                         c_node->state = IPP_STATE_STOP;
1748                                 }
1749
1750                                 ippdrv->dedicated = false;
1751                                 ipp_clean_cmd_node(ctx, c_node);
1752                                 if (list_empty(&ippdrv->cmd_list))
1753                                         pm_runtime_put_sync(ippdrv->dev);
1754                         }
1755                 }
1756                 mutex_unlock(&ippdrv->cmd_lock);
1757         }
1758
1759         return;
1760 }
1761
1762 static int ipp_probe(struct platform_device *pdev)
1763 {
1764         struct device *dev = &pdev->dev;
1765         struct ipp_context *ctx;
1766         struct exynos_drm_subdrv *subdrv;
1767         int ret;
1768
1769         ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1770         if (!ctx)
1771                 return -ENOMEM;
1772
1773         mutex_init(&ctx->ipp_lock);
1774         mutex_init(&ctx->prop_lock);
1775
1776         idr_init(&ctx->ipp_idr);
1777         idr_init(&ctx->prop_idr);
1778
1779         /*
1780          * create single thread for ipp event
1781          * IPP supports event thread for IPP drivers.
1782          * IPP driver send event_work to this thread.
1783          * and IPP event thread send event to user process.
1784          */
1785         ctx->event_workq = create_singlethread_workqueue("ipp_event");
1786         if (!ctx->event_workq) {
1787                 dev_err(dev, "failed to create event workqueue\n");
1788                 return -EINVAL;
1789         }
1790
1791         /*
1792          * create single thread for ipp command
1793          * IPP supports command thread for user process.
1794          * user process make command node using set property ioctl.
1795          * and make start_work and send this work to command thread.
1796          * and then this command thread start property.
1797          */
1798         ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1799         if (!ctx->cmd_workq) {
1800                 dev_err(dev, "failed to create cmd workqueue\n");
1801                 ret = -EINVAL;
1802                 goto err_event_workq;
1803         }
1804
1805         /* set sub driver informations */
1806         subdrv = &ctx->subdrv;
1807         subdrv->dev = dev;
1808         subdrv->probe = ipp_subdrv_probe;
1809         subdrv->remove = ipp_subdrv_remove;
1810         subdrv->open = ipp_subdrv_open;
1811         subdrv->close = ipp_subdrv_close;
1812
1813         platform_set_drvdata(pdev, ctx);
1814
1815         ret = exynos_drm_subdrv_register(subdrv);
1816         if (ret < 0) {
1817                 DRM_ERROR("failed to register drm ipp device.\n");
1818                 goto err_cmd_workq;
1819         }
1820
1821         dev_info(dev, "drm ipp registered successfully.\n");
1822
1823         return 0;
1824
1825 err_cmd_workq:
1826         destroy_workqueue(ctx->cmd_workq);
1827 err_event_workq:
1828         destroy_workqueue(ctx->event_workq);
1829         return ret;
1830 }
1831
1832 static int ipp_remove(struct platform_device *pdev)
1833 {
1834         struct ipp_context *ctx = platform_get_drvdata(pdev);
1835
1836         /* unregister sub driver */
1837         exynos_drm_subdrv_unregister(&ctx->subdrv);
1838
1839         /* remove,destroy ipp idr */
1840         idr_destroy(&ctx->ipp_idr);
1841         idr_destroy(&ctx->prop_idr);
1842
1843         mutex_destroy(&ctx->ipp_lock);
1844         mutex_destroy(&ctx->prop_lock);
1845
1846         /* destroy command, event work queue */
1847         destroy_workqueue(ctx->cmd_workq);
1848         destroy_workqueue(ctx->event_workq);
1849
1850         return 0;
1851 }
1852
1853 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1854 {
1855         DRM_DEBUG_KMS("enable[%d]\n", enable);
1856
1857         return 0;
1858 }
1859
1860 #ifdef CONFIG_PM_SLEEP
1861 static int ipp_suspend(struct device *dev)
1862 {
1863         struct ipp_context *ctx = get_ipp_context(dev);
1864
1865         if (pm_runtime_suspended(dev))
1866                 return 0;
1867
1868         return ipp_power_ctrl(ctx, false);
1869 }
1870
1871 static int ipp_resume(struct device *dev)
1872 {
1873         struct ipp_context *ctx = get_ipp_context(dev);
1874
1875         if (!pm_runtime_suspended(dev))
1876                 return ipp_power_ctrl(ctx, true);
1877
1878         return 0;
1879 }
1880 #endif
1881
1882 #ifdef CONFIG_PM_RUNTIME
1883 static int ipp_runtime_suspend(struct device *dev)
1884 {
1885         struct ipp_context *ctx = get_ipp_context(dev);
1886
1887         return ipp_power_ctrl(ctx, false);
1888 }
1889
1890 static int ipp_runtime_resume(struct device *dev)
1891 {
1892         struct ipp_context *ctx = get_ipp_context(dev);
1893
1894         return ipp_power_ctrl(ctx, true);
1895 }
1896 #endif
1897
1898 static const struct dev_pm_ops ipp_pm_ops = {
1899         SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
1900         SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
1901 };
1902
1903 struct platform_driver ipp_driver = {
1904         .probe          = ipp_probe,
1905         .remove         = ipp_remove,
1906         .driver         = {
1907                 .name   = "exynos-drm-ipp",
1908                 .owner  = THIS_MODULE,
1909                 .pm     = &ipp_pm_ops,
1910         },
1911 };
1912